pax_global_header00006660000000000000000000000064145701657620014527gustar00rootroot0000000000000052 comment=42d0b5734c9ef959a0cab28c76cbb57f7af89ca0 mergerfs-2.40.2/000077500000000000000000000000001457016576200134265ustar00rootroot00000000000000mergerfs-2.40.2/.cirrus.yml000066400000000000000000000215131457016576200155400ustar00rootroot00000000000000freebsd_task: name: "freebsd:11.4" freebsd_instance: image_family: freebsd-11-4 env: ASSUME_ALWAYS_YES: yes script: - buildtools/install-build-pkgs - gmake -j4 freebsd_task: name: "freebsd:12.1" freebsd_instance: image_family: freebsd-12-1 env: ASSUME_ALWAYS_YES: yes script: - buildtools/install-build-pkgs - gmake -j4 freebsd_task: name: "freebsd:12.2" freebsd_instance: image_family: freebsd-12-2 env: ASSUME_ALWAYS_YES: yes script: - buildtools/install-build-pkgs - gmake -j4 freebsd_task: name: "freebsd:13.0" freebsd_instance: image_family: freebsd-13-0 env: ASSUME_ALWAYS_YES: yes script: - buildtools/install-build-pkgs - gmake -j4 #macos_task: # osx_instance: # image: catalina-base # script: # - buildtools/install-build-pkgs # - gmake -j4 linux_task: name: "alpine:3.11" container: image: alpine:3.11 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make STATIC=1 LTO=1 linux_task: name: "alpine:3.12" container: image: alpine:3.12 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make STATIC=1 LTO=1 linux_task: name: "alpine:3.13" container: image: alpine:3.13 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make STATIC=1 LTO=1 linux_task: name: "alpine:3.14" container: image: alpine:3.14 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make STATIC=1 LTO=1 linux_task: name: "alpine:3.15" container: image: alpine:3.15 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make STATIC=1 LTO=1 linux_task: name: "alpine:3.16" container: image: alpine:3.16 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make STATIC=1 LTO=1 linux_task: name: "alpine:3.17" container: image: alpine:3.17 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make STATIC=1 LTO=1 linux_task: name: "alpine:latest" container: image: alpine:latest cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make STATIC=1 LTO=1 linux_task: name: "alpine-arm:latest" arm_container: image: alpine:latest cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make STATIC=1 LTO=1 linux_task: name: "centos:7" container: image: centos:7 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "rocky:8" container: image: rocky:8 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora:30" container: image: fedora:30 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora:31" container: image: fedora:31 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora:32" container: image: fedora:32 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora:33" container: image: fedora:33 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora:34" container: image: fedora:34 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora:35" container: image: fedora:35 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora:36" container: image: fedora:36 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora:37" container: image: fedora:37 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora:38" container: image: fedora:38 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora:latest" container: image: fedora:latest cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "fedora-arm:latest" arm_container: image: fedora:latest cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - make - make rpm linux_task: name: "ubuntu:14.04" container: image: ubuntu:14.04 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "ubuntu:16.04" container: image: ubuntu:16.04 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "ubuntu:18.04" container: image: ubuntu:18.04 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "ubuntu:20.04" container: image: ubuntu:20.04 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "ubuntu:22.04" container: image: ubuntu:22.04 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "ubuntu:latest" container: image: ubuntu:latest cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "ubuntu-arm:latest" arm_container: image: ubuntu:latest cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "debian:7" container: image: debian:8 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "debian:8" container: image: debian:8 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "debian:9" container: image: debian:9 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "debian:10" container: image: debian:10 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "debian:11" container: image: debian:11 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "debian:12" container: image: debian:12 cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "debian:latest" container: image: debian:latest cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true linux_task: name: "debian-arm:latest" arm_container: image: debian:latest cpu: 4 memory: 4G timeout_in: 15m script: - buildtools/install-build-pkgs - git fetch - make deb - apt-get -y install fuse - dpkg -i ../*.deb - mergerfs -v || true mergerfs-2.40.2/.github/000077500000000000000000000000001457016576200147665ustar00rootroot00000000000000mergerfs-2.40.2/.github/FUNDING.yml000066400000000000000000000003061457016576200166020ustar00rootroot00000000000000# These are supported funding model platforms github: [trapexit] patreon: trapexit custom: ['https://paypal.me/trapexit','https://buymeacoff.ee/trapexit'] ko_fi: trapexit open_collective: trapexit mergerfs-2.40.2/.github/ISSUE_TEMPLATE/000077500000000000000000000000001457016576200171515ustar00rootroot00000000000000mergerfs-2.40.2/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000030071457016576200216430ustar00rootroot00000000000000--- name: Bug report about: For the reporting of behavior not as described title: '' labels: bug, investigating assignees: '' --- **Describe the bug** A clear and concise description of the unexpected behavior. **Please be sure to use latest release of mergerfs to ensure the issue still exists. Not your distro's latest but the latest official release.** The master branch is **not** to be considered production ready. Feel free to file bug reports but do so indicating clearly that you are testing unreleased code. **To Reproduce** Steps to reproduce the behavior. List **all** steps to reproduce. **All** settings. Please simplify the reproduction as much as possible. - Unless it is dependenat on multiple branches, use a single branch - Reproduce with standard tooling if possible (touch,truncate,rm,rmdir,ln,etc.) Having to install 3rd party software will make debugging more difficult. **Expected behavior** A clear and concise description of the expected behavior. **System information:** - OS, kernel version: `uname -a` - mergerfs version: `mergerfs -V` - mergerfs settings - List of drives, filesystems, & sizes: - `df -h` - `lsblk -f` - A strace of the application having a problem: - `strace -fvTtt -s 256 -o /tmp/app.strace.txt ` - `strace -fvTtt -s 256 -o /tmp/app.strace.txt -p ` - strace of mergerfs while app tried to do it's thing: - `strace -fvTtt -s 256 -p -o /tmp/mergerfs.strace.txt` **Additional context** Add any other context about the problem here. mergerfs-2.40.2/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000011021457016576200226700ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project title: '' labels: feature, investigating assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. mergerfs-2.40.2/.github/ISSUE_TEMPLATE/question.md000066400000000000000000000004551457016576200213460ustar00rootroot00000000000000--- name: Question about: When you're looking for information or general support title: '' labels: investigating, question assignees: '' --- For general questions and support please use [GitHub Discussions](https://github.com/trapexit/mergerfs/discussions) or [Discord](https://discord.gg/MpAr69V). mergerfs-2.40.2/.github/workflows/000077500000000000000000000000001457016576200170235ustar00rootroot00000000000000mergerfs-2.40.2/.github/workflows/codeql.yml000066400000000000000000000016241457016576200210200ustar00rootroot00000000000000name: "CodeQL" on: push: branches: [ "master" ] pull_request: branches: [ "master" ] schedule: - cron: "7 3 * * 6" jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: [ python, cpp ] steps: - name: Checkout uses: actions/checkout@v3 - name: Initialize CodeQL uses: github/codeql-action/init@v2 with: languages: ${{ matrix.language }} queries: +security-and-quality - name: Autobuild uses: github/codeql-action/autobuild@v2 if: ${{ matrix.language == 'python' || matrix.language == 'cpp' }} - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v2 with: category: "/language:${{ matrix.language }}" mergerfs-2.40.2/.gitignore000066400000000000000000000003641457016576200154210ustar00rootroot00000000000000# Compiled Object files *.slo *.lo *.o # Compiled Dynamic libraries *.so *.dylib # Compiled Static libraries *.lai *.la *.a # build artifacts mergerfs obj/ src/version.hpp # Debian files debian/files debian/changelog # RPM files rpmbuild/ mergerfs-2.40.2/DEPENDENCIES000066400000000000000000000006701457016576200152020ustar00rootroot00000000000000## Libraries ### included in repo * libfuse: https://github.com/libfuse/libfuse (heavily modified fork of v2.x) * wyhash: https://github.com/wangyi-fudan/wyhash * ghc::filesystem: https://github.com/gulrak/filesystem * nonstd::optional: https://github.com/martinmoene/optional-lite * fmt: https://github.com/fmtlib/fmt * concurrentqueue: https://github.com/cameron314/concurrentqueue * scope_guard: https://github.com/Neargye/scope_guard mergerfs-2.40.2/LICENSE000066400000000000000000000014431457016576200144350ustar00rootroot00000000000000/* ISC License Copyright (c) 2024, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ mergerfs-2.40.2/Makefile000066400000000000000000000155141457016576200150740ustar00rootroot00000000000000# Copyright (c) 2024, Antonio SJ Musumeci # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. GIT = git TAR = tar MKDIR = mkdir TOUCH = touch CP = cp RM = rm LN = ln FIND = find INSTALL = install MKTEMP = mktemp STRIP = strip PANDOC = pandoc SED = sed RPMBUILD = rpmbuild GIT2DEBCL = ./buildtools/git2debcl PKGCONFIG = pkg-config GIT_REPO = 0 ifneq ($(shell $(GIT) --version 2> /dev/null),) ifeq ($(shell test -e .git; echo $$?),0) GIT_REPO = 1 endif endif USE_XATTR = 1 UGID_USE_RWLOCK = 0 ifeq ($(DEBUG),1) OPT_FLAGS := -O0 -g -fsanitize=undefined else OPT_FLAGS := -O2 -DNDEBUG endif ifeq ($(STATIC),1) STATIC_FLAGS := -static else STATIC_FLAGS := endif ifeq ($(LTO),1) LTO_FLAGS := -flto else LTO_FLAGS := endif SRC = $(wildcard src/*.cpp) OBJS = $(SRC:src/%.cpp=build/.src/%.o) DEPS = $(SRC:src/%.cpp=build/.src/%.d) TESTS = $(wildcard tests/*.cpp) TESTS_OBJS = $(filter-out build/.src/mergerfs.o,$(OBJS)) TESTS_OBJS += $(TESTS:tests/%.cpp=build/.tests/%.o) TESTS_DEPS = $(TESTS:tests/%.cpp=build/.tests/%.d) TESTS_DEPS += $(DEPS) MANPAGE = mergerfs.1 CFLAGS ?= ${OPT_FLAGS} CFLAGS := ${CFLAGS} \ -Wall \ -Wno-unused-result CXXFLAGS ?= ${OPT_FLAGS} CXXFLAGS := \ ${CXXFLAGS} \ -std=c++11 \ $(STATIC_FLAGS) \ $(LTO_FLAGS) \ -Wall \ -Wno-unused-result \ -MMD FUSE_FLAGS = \ -Ilibfuse/include \ -D_FILE_OFFSET_BITS=64 MFS_FLAGS = \ -DUSE_XATTR=$(USE_XATTR) \ -DUGID_USE_RWLOCK=$(UGID_USE_RWLOCK) TESTS_FLAGS = \ -Isrc \ -DTESTS LDFLAGS := \ ${LDFLAGS} \ -pthread \ -lrt # https://www.gnu.org/prep/standards/html_node/Directory-Variables.html DESTDIR = PREFIX = /usr/local EXEC_PREFIX = $(PREFIX) DATAROOTDIR = $(PREFIX)/share DATADIR = $(DATAROOTDIR) BINDIR = $(EXEC_PREFIX)/bin SBINDIR = $(EXEC_PREFIX)/sbin LIBDIR = $(EXEC_PREFIX)/lib MANDIR = $(DATAROOTDIR)/man MAN1DIR = $(MANDIR)/man1 INSTALLBINDIR = $(DESTDIR)$(BINDIR) INSTALLSBINDIR = $(DESTDIR)$(SBINDIR) INSTALLLIBDIR = $(DESTDIR)$(LIBDIR)/mergerfs INSTALLMAN1DIR = $(DESTDIR)$(MAN1DIR) .PHONY: all all: mergerfs .PHONY: help help: @echo "usage: make\n" @echo "make USE_XATTR=0 - build program without xattrs functionality" @echo "make STATIC=1 - build static binary" @echo "make LTO=1 - build with link time optimization" objects: version build/stamp $(MAKE) $(OBJS) tests-objects: $(MAKE) $(TESTS_OBJS) build/mergerfs: libfuse objects $(CXX) $(CXXFLAGS) $(FUSE_FLAGS) $(MFS_FLAGS) $(CPPFLAGS) $(OBJS) -o $@ libfuse/build/libfuse.a $(LDFLAGS) build/tests: build/mergerfs tests-objects $(CXX) $(CXXFLAGS) $(TESTS_FLAGS) $(FUSE_FLAGS) $(MFS_FLAGS) $(CPPFLAGS) $(TESTS_OBJS) -o $@ libfuse/build/libfuse.a $(LDFLAGS) mergerfs: build/mergerfs tests: build/tests changelog: ifeq ($(GIT_REPO),1) $(GIT2DEBCL) --name mergerfs > ChangeLog else @echo "WARNING: need git repo to generate ChangeLog" endif .PHONY: version version: ./buildtools/update-version build/stamp: $(MKDIR) -p build/.src build/.tests $(TOUCH) $@ build/.src/%.o: src/%.cpp $(CXX) $(CXXFLAGS) $(FUSE_FLAGS) $(MFS_FLAGS) $(CPPFLAGS) -c $< -o $@ build/.tests/%.o: tests/%.cpp $(CXX) $(CXXFLAGS) $(TESTS_FLAGS) $(FUSE_FLAGS) $(MFS_FLAGS) $(CPPFLAGS) -c $< -o $@ build/preload.so: build/stamp tools/preload.c $(CC) -shared -fPIC $(CFLAGS) $(CPPFLAGS) -o $@ tools/preload.c preload: build/preload.so .PHONY: clean clean: rpm-clean $(RM) -rf build $(FIND) . -name "*~" -delete $(MAKE) -C libfuse clean distclean: clean ifeq ($(GIT_REPO),1) $(GIT) clean -xfd endif .PHONY: install install: install-base install-mount-tools install-preload install-man install-base: build/mergerfs $(MKDIR) -p "$(INSTALLBINDIR)" $(INSTALL) -v -m 0755 build/mergerfs "$(INSTALLBINDIR)/mergerfs" install-mount-tools: install-base $(MKDIR) -p "$(INSTALLBINDIR)" $(MAKE) -C libfuse install install-man: $(MANPAGE) $(MKDIR) -p "$(INSTALLMAN1DIR)" $(INSTALL) -v -m 0644 "man/$(MANPAGE)" "$(INSTALLMAN1DIR)/$(MANPAGE)" install-preload: preload $(MKDIR) -p "$(INSTALLLIBDIR)" $(INSTALL) -v -m 444 "build/preload.so" "$(INSTALLLIBDIR)/preload.so" install-strip: install-base $(STRIP) "$(INSTALLBINDIR)/mergerfs" .PHONY: uninstall uninstall: uninstall-base uninstall-mount.mergerfs uninstall-man uninstall-base: $(RM) -f "$(INSTALLBINDIR)/mergerfs" uninstall-mount.mergerfs: $(RM) -f "$(INSTALLBINDIR)/mount.mergerfs" uninstall-man: $(RM) -f "$(INSTALLMAN1DIR)/$(MANPAGE)" $(MANPAGE): README.md ifneq ($(shell $(PANDOC) --version 2> /dev/null),) $(PANDOC) -s -t man -o "man/$(MANPAGE)" README.md else $(warning "pandoc does not appear available: unable to build manpage") endif man: $(MANPAGE) .PHONY: tarball tarball: man changelog version $(eval VERSION := $(shell cat VERSION)) $(eval VERSION := $(subst -,_,$(VERSION))) $(eval FILENAME := mergerfs-$(VERSION)) $(eval TMPDIR := $(shell $(MKTEMP) --tmpdir -d .$(FILENAME).XXXXXXXX)) $(MKDIR) $(TMPDIR)/$(FILENAME) $(CP) -ar . $(TMPDIR)/$(FILENAME) $(TAR) --exclude=.git -cz -C $(TMPDIR) -f $(FILENAME).tar.gz $(FILENAME) $(RM) -rf $(TMPDIR) debian-changelog: ifeq ($(GIT_REPO),1) $(GIT2DEBCL) --name mergerfs > debian/changelog else cp ChangeLog debian/changelog endif signed-deb: $(MAKE) distclean $(MAKE) debian-changelog # dpkg-source -b . dpkg-buildpackage -nc deb: $(MAKE) distclean $(MAKE) debian-changelog # dpkg-source -b . dpkg-buildpackage -nc -uc -us .PHONY: rpm-clean rpm-clean: $(RM) -rf rpmbuild rpm: tarball $(eval VERSION := $(shell cat VERSION)) $(eval VERSION := $(subst -,_,$(VERSION))) $(MKDIR) -p rpmbuild/BUILD rpmbuild/RPMS rpmbuild/SOURCES $(SED) 's/__VERSION__/$(VERSION)/g' mergerfs.spec > \ rpmbuild/SOURCES/mergerfs.spec cp -ar mergerfs-$(VERSION).tar.gz rpmbuild/SOURCES $(RPMBUILD) -ba rpmbuild/SOURCES/mergerfs.spec \ --define "_topdir $(CURDIR)/rpmbuild" .PHONY: install-build-pkgs install-build-pkgs: ./buildtools/install-build-pkgs .PHONY: libfuse libfuse: $(MAKE) DEBUG=$(DEBUG) -C libfuse -include $(DEPS) mergerfs-2.40.2/README.md000066400000000000000000003432101457016576200147100ustar00rootroot00000000000000% mergerfs(1) mergerfs user manual # NAME mergerfs - a featureful union filesystem # SYNOPSIS mergerfs -o<options> <branches> <mountpoint> # DESCRIPTION **mergerfs** is a union filesystem geared towards simplifying storage and management of files across numerous commodity storage devices. It is similar to **mhddfs**, **unionfs**, and **aufs**. # FEATURES * Configurable behaviors / file placement * Ability to add or remove filesystems at will * Resistance to individual filesystem failure * Support for extended attributes (xattrs) * Support for file attributes (chattr) * Runtime configurable (via xattrs) * Works with heterogeneous filesystem types * Moving of file when filesystem runs out of space while writing * Ignore read-only filesystems when creating files * Turn read-only files into symlinks to underlying file * Hard link copy-on-write / CoW * Support for POSIX ACLs * Misc other things # HOW IT WORKS mergerfs logically merges multiple paths together. Think a union of sets. The file/s or directory/s acted on or presented through mergerfs are based on the policy chosen for that particular action. Read more about policies below. ``` A + B = C /disk1 /disk2 /merged | | | +-- /dir1 +-- /dir1 +-- /dir1 | | | | | | | +-- file1 | +-- file2 | +-- file1 | | +-- file3 | +-- file2 +-- /dir2 | | +-- file3 | | +-- /dir3 | | +-- file4 | +-- /dir2 | +-- file5 | | +-- file6 | +-- file4 | +-- /dir3 | | | +-- file5 | +-- file6 ``` mergerfs does **not** support the copy-on-write (CoW) or whiteout behaviors found in **aufs** and **overlayfs**. You can **not** mount a read-only filesystem and write to it. However, mergerfs will ignore read-only filesystems when creating new files so you can mix read-write and read-only filesystems. It also does **not** split data across filesystems. It is not RAID0 / striping. It is simply a union of other filesystems. # TERMINOLOGY * branch: A base path used in the pool. * pool: The mergerfs mount. The union of the branches. * relative path: The path in the pool relative to the branch and mount. * function: A filesystem call (open, unlink, create, getattr, rmdir, etc.) * category: A collection of functions based on basic behavior (action, create, search). * policy: The algorithm used to select a file when performing a function. * path preservation: Aspect of some policies which includes checking the path for which a file would be created. # BASIC SETUP If you don't already know that you have a special use case then just start with one of the following option sets. #### You need `mmap` (used by rtorrent and many sqlite3 base software) `cache.files=partial,dropcacheonclose=true,category.create=mfs` #### You don't need `mmap` `cache.files=off,dropcacheonclose=true,category.create=mfs` ### Command Line `mergerfs -o cache.files=partial,dropcacheonclose=true,category.create=mfs /mnt/hdd0:/mnt/hdd1 /media` ### /etc/fstab `/mnt/hdd0:/mnt/hdd1 /media mergerfs cache.files=partial,dropcacheonclose=true,category.create=mfs 0 0` ### systemd mount https://github.com/trapexit/mergerfs/wiki/systemd ``` [Unit] Description=mergerfs service [Service] Type=simple KillMode=none ExecStart=/usr/bin/mergerfs \ -f \ -o cache.files=partial \ -o dropcacheonclose=true \ -o category.create=mfs \ /mnt/hdd0:/mnt/hdd1 \ /media ExecStop=/bin/fusermount -uz /media Restart=on-failure [Install] WantedBy=default.target ``` See the mergerfs [wiki for real world deployments](https://github.com/trapexit/mergerfs/wiki/Real-World-Deployments) for comparisons / ideas. # OPTIONS These options are the same regardless of whether you use them with the `mergerfs` commandline program, in fstab, or in a config file. ### mount options * **config**: Path to a config file. Same arguments as below in key=val / ini style format. * **branches**: Colon delimited list of branches. * **minfreespace=SIZE**: The minimum space value used for creation policies. Can be overridden by branch specific option. Understands 'K', 'M', and 'G' to represent kilobyte, megabyte, and gigabyte respectively. (default: 4G) * **moveonenospc=BOOL|POLICY**: When enabled if a **write** fails with **ENOSPC** (no space left on device) or **EDQUOT** (disk quota exceeded) the policy selected will run to find a new location for the file. An attempt to move the file to that branch will occur (keeping all metadata possible) and if successful the original is unlinked and the write retried. (default: false, true = mfs) * **inodecalc=passthrough|path-hash|devino-hash|hybrid-hash**: Selects the inode calculation algorithm. (default: hybrid-hash) * **dropcacheonclose=BOOL**: When a file is requested to be closed call `posix_fadvise` on it first to instruct the kernel that we no longer need the data and it can drop its cache. Recommended when **cache.files=partial|full|auto-full|per-process** to limit double caching. (default: false) * **symlinkify=BOOL**: When enabled and a file is not writable and its mtime or ctime is older than **symlinkify_timeout** files will be reported as symlinks to the original files. Please read more below before using. (default: false) * **symlinkify_timeout=UINT**: Time to wait, in seconds, to activate the **symlinkify** behavior. (default: 3600) * **nullrw=BOOL**: Turns reads and writes into no-ops. The request will succeed but do nothing. Useful for benchmarking mergerfs. (default: false) * **lazy-umount-mountpoint=BOOL**: mergerfs will attempt to "lazy umount" the mountpoint before mounting itself. Useful when performing live upgrades of mergerfs. (default: false) * **ignorepponrename=BOOL**: Ignore path preserving on rename. Typically rename and link act differently depending on the policy of `create` (read below). Enabling this will cause rename and link to always use the non-path preserving behavior. This means files, when renamed or linked, will stay on the same filesystem. (default: false) * **export-support=BOOL**: Sets a low-level FUSE feature intended to indicate the filesystem can support being exported via NFS. (default: true) * **security_capability=BOOL**: If false return ENOATTR when xattr security.capability is queried. (default: true) * **xattr=passthrough|noattr|nosys**: Runtime control of xattrs. Default is to passthrough xattr requests. 'noattr' will short circuit as if nothing exists. 'nosys' will respond with ENOSYS as if xattrs are not supported or disabled. (default: passthrough) * **link_cow=BOOL**: When enabled if a regular file is opened which has a link count > 1 it will copy the file to a temporary file and rename over the original. Breaking the link and providing a basic copy-on-write function similar to cow-shell. (default: false) * **statfs=base|full**: Controls how statfs works. 'base' means it will always use all branches in statfs calculations. 'full' is in effect path preserving and only includes branches where the path exists. (default: base) * **statfs_ignore=none|ro|nc**: 'ro' will cause statfs calculations to ignore available space for branches mounted or tagged as 'read-only' or 'no create'. 'nc' will ignore available space for branches tagged as 'no create'. (default: none) * **nfsopenhack=off|git|all**: A workaround for exporting mergerfs over NFS where there are issues with creating files for write while setting the mode to read-only. (default: off) * **branches-mount-timeout=UINT**: Number of seconds to wait at startup for branches to be a mount other than the mountpoint's filesystem. (default: 0) * **follow-symlinks=never|directory|regular|all**: Turns symlinks into what they point to. (default: never) * **link-exdev=passthrough|rel-symlink|abs-base-symlink|abs-pool-symlink**: When a link fails with EXDEV optionally create a symlink to the file instead. * **rename-exdev=passthrough|rel-symlink|abs-symlink**: When a rename fails with EXDEV optionally move the file to a special directory and symlink to it. * **readahead=UINT**: Set readahead (in kilobytes) for mergerfs and branches if greater than 0. (default: 0) * **posix_acl=BOOL**: Enable POSIX ACL support (if supported by kernel and underlying filesystem). (default: false) * **async_read=BOOL**: Perform reads asynchronously. If disabled or unavailable the kernel will ensure there is at most one pending read request per file handle and will attempt to order requests by offset. (default: true) * **fuse_msg_size=UINT**: Set the max number of pages per FUSE message. Only available on Linux >= 4.20 and ignored otherwise. (min: 1; max: 256; default: 256) * **threads=INT**: Number of threads to use. When used alone (`process-thread-count=-1`) it sets the number of threads reading and processing FUSE messages. When used together it sets the number of threads reading from FUSE. When set to zero it will attempt to discover and use the number of logical cores. If the thread count is set negative it will look up the number of cores then divide by the absolute value. ie. threads=-2 on an 8 core machine will result in 8 / 2 = 4 threads. There will always be at least 1 thread. If set to -1 in combination with `process-thread-count` then it will try to pick reasonable values based on CPU thread count. NOTE: higher number of threads increases parallelism but usually decreases throughput. (default: 0) * **read-thread-count=INT**: Alias for `threads`. * **process-thread-count=INT**: Enables separate thread pool to asynchronously process FUSE requests. In this mode `read-thread-count` refers to the number of threads reading FUSE messages which are dispatched to process threads. -1 means disabled otherwise acts like `read-thread-count`. (default: -1) * **process-thread-queue-depth=UINT**: Sets the number of requests any single process thread can have queued up at one time. Meaning the total memory usage of the queues is queue depth multiplied by the number of process threads plus read thread count. 0 sets the depth to the same as the process thread count. (default: 0) * **pin-threads=STR**: Selects a strategy to pin threads to CPUs (default: unset) * **flush-on-close=never|always|opened-for-write**: Flush data cache on file close. Mostly for when writeback is enabled or merging network filesystems. (default: opened-for-write) * **scheduling-priority=INT**: Set mergerfs' scheduling priority. Valid values range from -20 to 19. See `setpriority` man page for more details. (default: -10) * **fsname=STR**: Sets the name of the filesystem as seen in **mount**, **df**, etc. Defaults to a list of the source paths concatenated together with the longest common prefix removed. * **func.FUNC=POLICY**: Sets the specific FUSE function's policy. See below for the list of value types. Example: **func.getattr=newest** * **func.readdir=seq|cosr|cor|cosr:INT|cor:INT**: Sets `readdir` policy. INT value sets the number of threads to use for concurrency. (default: seq) * **category.action=POLICY**: Sets policy of all FUSE functions in the action category. (default: epall) * **category.create=POLICY**: Sets policy of all FUSE functions in the create category. (default: epmfs) * **category.search=POLICY**: Sets policy of all FUSE functions in the search category. (default: ff) * **cache.open=UINT**: 'open' policy cache timeout in seconds. (default: 0) * **cache.statfs=UINT**: 'statfs' cache timeout in seconds. (default: 0) * **cache.attr=UINT**: File attribute cache timeout in seconds. (default: 1) * **cache.entry=UINT**: File name lookup cache timeout in seconds. (default: 1) * **cache.negative_entry=UINT**: Negative file name lookup cache timeout in seconds. (default: 0) * **cache.files=libfuse|off|partial|full|auto-full|per-process**: File page caching mode (default: libfuse) * **cache.files.process-names=LIST**: A pipe | delimited list of process [comm](https://man7.org/linux/man-pages/man5/proc.5.html) names to enable page caching for when `cache.files=per-process`. (default: "rtorrent|qbittorrent-nox") * **cache.writeback=BOOL**: Enable kernel writeback caching (default: false) * **cache.symlinks=BOOL**: Cache symlinks (if supported by kernel) (default: false) * **cache.readdir=BOOL**: Cache readdir (if supported by kernel) (default: false) * **parallel-direct-writes=BOOL**: Allow the kernel to dispatch multiple, parallel (non-extending) write requests for files opened with `cache.files=per-process` (if the process is not in `process-names`) or `cache.files=off`. (This requires kernel support, and was added in v6.2) * **direct_io**: deprecated - Bypass page cache. Use `cache.files=off` instead. (default: false) * **kernel_cache**: deprecated - Do not invalidate data cache on file open. Use `cache.files=full` instead. (default: false) * **auto_cache**: deprecated - Invalidate data cache if file mtime or size change. Use `cache.files=auto-full` instead. (default: false) * **async_read**: deprecated - Perform reads asynchronously. Use `async_read=true` instead. * **sync_read**: deprecated - Perform reads synchronously. Use `async_read=false` instead. * **splice_read**: deprecated - Does nothing. * **splice_write**: deprecated - Does nothing. * **splice_move**: deprecated - Does nothing. * **allow_other**: deprecated - mergerfs v2.35.0 and newer sets this FUSE option automatically if running as root. * **use_ino**: deprecated - mergerfs should always control inode calculation so this is enabled all the time. **NOTE:** Options are evaluated in the order listed so if the options are **func.rmdir=rand,category.action=ff** the **action** category setting will override the **rmdir** setting. **NOTE:** Always look at the documentation for the version of mergerfs you're using. Not all features are available in older releases. Use `man mergerfs` or find the docs as linked in the release. #### Value Types * BOOL = 'true' | 'false' * INT = [MIN_INT,MAX_INT] * UINT = [0,MAX_INT] * SIZE = 'NNM'; NN = INT, M = 'K' | 'M' | 'G' | 'T' * STR = string (may refer to an enumerated value, see details of argument) * FUNC = filesystem function * CATEGORY = function category * POLICY = mergerfs function policy ### branches The 'branches' argument is a colon (':') delimited list of paths to be pooled together. It does not matter if the paths are on the same or different filesystems nor does it matter the filesystem type (within reason). Used and available space will not be duplicated for paths on the same filesystem and any features which aren't supported by the underlying filesystem (such as file attributes or extended attributes) will return the appropriate errors. Branches currently have two options which can be set. A type which impacts whether or not the branch is included in a policy calculation and a individual minfreespace value. The values are set by prepending an `=` at the end of a branch designation and using commas as delimiters. Example: `/mnt/drive=RW,1234` #### branch mode * RW: (read/write) - Default behavior. Will be eligible in all policy categories. * RO: (read-only) - Will be excluded from `create` and `action` policies. Same as a read-only mounted filesystem would be (though faster to process). * NC: (no-create) - Will be excluded from `create` policies. You can't create on that branch but you can change or delete. #### minfreespace Same purpose and syntax as the global option but specific to the branch. If not set the global value is used. #### globbing To make it easier to include multiple branches mergerfs supports [globbing](http://linux.die.net/man/7/glob). **The globbing tokens MUST be escaped when using via the shell else the shell itself will apply the glob itself.** ``` # mergerfs /mnt/hdd\*:/mnt/ssd /media ``` The above line will use all mount points in /mnt prefixed with **hdd** and **ssd**. To have the pool mounted at boot or otherwise accessible from related tools use **/etc/fstab**. ``` # /mnt/hdd*:/mnt/ssd /media mergerfs minfreespace=16G 0 0 ``` **NOTE:** the globbing is done at mount or when updated using the runtime API. If a new directory is added matching the glob after the fact it will not be automatically included. **NOTE:** for mounting via **fstab** to work you must have **mount.fuse** installed. For Ubuntu/Debian it is included in the **fuse** package. ### inodecalc Inodes (st_ino) are unique identifiers within a filesystem. Each mounted filesystem has device ID (st_dev) as well and together they can uniquely identify a file on the whole of the system. Entries on the same device with the same inode are in fact references to the same underlying file. It is a many to one relationship between names and an inode. Directories, however, do not have multiple links on most systems due to the complexity they add. FUSE allows the server (mergerfs) to set inode values but not device IDs. Creating an inode value is somewhat complex in mergerfs' case as files aren't really in its control. If a policy changes what directory or file is to be selected or something changes out of band it becomes unclear what value should be used. Most software does not to care what the values are but those that do often break if a value changes unexpectedly. The tool `find` will abort a directory walk if it sees a directory inode change. NFS can return stale handle errors if the inode changes out of band. File dedup tools will usually leverage device ids and inodes as a shortcut in searching for duplicate files and would resort to full file comparisons should it find different inode values. mergerfs offers multiple ways to calculate the inode in hopes of covering different usecases. * passthrough: Passes through the underlying inode value. Mostly intended for testing as using this does not address any of the problems mentioned above and could confuse file deduplication software as inodes from different filesystems can be the same. * path-hash: Hashes the relative path of the entry in question. The underlying file's values are completely ignored. This means the inode value will always be the same for that file path. This is useful when using NFS and you make changes out of band such as copy data between branches. This also means that entries that do point to the same file will not be recognizable via inodes. That **does not** mean hard links don't work. They will. * path-hash32: 32bit version of path-hash. * devino-hash: Hashes the device id and inode of the underlying entry. This won't prevent issues with NFS should the policy pick a different file or files move out of band but will present the same inode for underlying files that do too. * devino-hash32: 32bit version of devino-hash. * hybrid-hash: Performs `path-hash` on directories and `devino-hash` on other file types. Since directories can't have hard links the static value won't make a difference and the files will get values useful for finding duplicates. Probably the best to use if not using NFS. As such it is the default. * hybrid-hash32: 32bit version of hybrid-hash. 32bit versions are provided as there is some software which does not handle 64bit inodes well. While there is a risk of hash collision in tests of a couple million entries there were zero collisions. Unlike a typical filesystem FUSE filesystems can reuse inodes and not refer to the same entry. The internal identifier used to reference a file in FUSE is different from the inode value presented. The former is the `nodeid` and is actually a tuple of 2 64bit values: `nodeid` and `generation`. This tuple is not client facing. The inode that is presented to the client is passed through the kernel uninterpreted. From FUSE docs for `use_ino`: ``` Honor the st_ino field in the functions getattr() and fill_dir(). This value is used to fill in the st_ino field in the stat(2), lstat(2), fstat(2) functions and the d_ino field in the readdir(2) function. The filesystem does not have to guarantee uniqueness, however some applications rely on this value being unique for the whole filesystem. Note that this does *not* affect the inode that libfuse and the kernel use internally (also called the "nodeid"). ``` As of version 2.35.0 the `use_ino` option has been removed. mergerfs should always be managing inode values. ### pin-threads Simple strategies for pinning read and/or process threads. If process threads are not enabled than the strategy simply works on the read threads. Invalid values are ignored. * R1L: All read threads pinned to a single logical CPU. * R1P: All read threads pinned to a single physical CPU. * RP1L: All read and process threads pinned to a single logical CPU. * RP1P: All read and process threads pinned to a single physical CPU. * R1LP1L: All read threads pinned to a single logical CPU, all process threads pinned to a (if possible) different logical CPU. * R1PP1P: All read threads pinned to a single physical CPU, all process threads pinned to a (if possible) different logical CPU. * RPSL: All read and process threads are spread across all logical CPUs. * RPSP: All read and process threads are spread across all physical CPUs. * R1PPSP: All read threads are pinned to a single physical CPU while process threads are spread across all other phsycial CPUs. ### fuse_msg_size FUSE applications communicate with the kernel over a special character device: `/dev/fuse`. A large portion of the overhead associated with FUSE is the cost of going back and forth from user space and kernel space over that device. Generally speaking the fewer trips needed the better the performance will be. Reducing the number of trips can be done a number of ways. Kernel level caching and increasing message sizes being two significant ones. When it comes to reads and writes if the message size is doubled the number of trips are approximately halved. In Linux 4.20 a new feature was added allowing the negotiation of the max message size. Since the size is in multiples of [pages](https://en.wikipedia.org/wiki/Page_(computer_memory)) the feature is called `max_pages`. There is a maximum `max_pages` value of 256 (1MiB) and minimum of 1 (4KiB). The default used by Linux >=4.20, and hardcoded value used before 4.20, is 32 (128KiB). In mergerfs its referred to as `fuse_msg_size` to make it clear what it impacts and provide some abstraction. Since there should be no downsides to increasing `fuse_msg_size` / `max_pages`, outside a minor bump in RAM usage due to larger message buffers, mergerfs defaults the value to 256. On kernels before 4.20 the value has no effect. The reason the value is configurable is to enable experimentation and benchmarking. See the BENCHMARKING section for examples. ### follow-symlinks This feature, when enabled, will cause symlinks to be interpreted by mergerfs as their target (depending on the mode). When there is a getattr/stat request for a file mergerfs will check if the file is a symlink and depending on the `follow-symlinks` setting will replace the information about the symlink with that of that which it points to. When unlink'ing or rmdir'ing the followed symlink it will remove the symlink itself and not that which it points to. * never: Behave as normal. Symlinks are treated as such. * directory: Resolve symlinks only which point to directories. * regular: Resolve symlinks only which point to regular files. * all: Resolve all symlinks to that which they point to. Symlinks which do not point to anything are left as is. WARNING: This feature works but there might be edge cases yet found. If you find any odd behaviors please file a ticket on [github](https://github.com/trapexit/mergerfs/issues). ### link-exdev If using path preservation and a `link` fails with EXDEV make a call to `symlink` where the `target` is the `oldlink` and the `linkpath` is the `newpath`. The `target` value is determined by the value of `link-exdev`. * passthrough: Return EXDEV as normal. * rel-symlink: A relative path from the `newpath`. * abs-base-symlink: A absolute value using the underlying branch. * abs-pool-symlink: A absolute value using the mergerfs mount point. NOTE: It is possible that some applications check the file they link. In those cases it is possible it will error or complain. ### rename-exdev If using path preservation and a `rename` fails with EXDEV: 1. Move file from **/branch/a/b/c** to **/branch/.mergerfs_rename_exdev/a/b/c**. 2. symlink the rename's `newpath` to the moved file. The `target` value is determined by the value of `rename-exdev`. * passthrough: Return EXDEV as normal. * rel-symlink: A relative path from the `newpath`. * abs-symlink: A absolute value using the mergerfs mount point. NOTE: It is possible that some applications check the file they rename. In those cases it is possible it will error or complain. NOTE: The reason `abs-symlink` is not split into two like `link-exdev` is due to the complexities in managing absolute base symlinks when multiple `oldpaths` exist. ### symlinkify Due to the levels of indirection introduced by mergerfs and the underlying technology FUSE there can be varying levels of performance degradation. This feature will turn non-directories which are not writable into symlinks to the original file found by the `readlink` policy after the mtime and ctime are older than the timeout. **WARNING:** The current implementation has a known issue in which if the file is open and being used when the file is converted to a symlink then the application which has that file open will receive an error when using it. This is unlikely to occur in practice but is something to keep in mind. **WARNING:** Some backup solutions, such as CrashPlan, do not backup the target of a symlink. If using this feature it will be necessary to point any backup software to the original filesystems or configure the software to follow symlinks if such an option is available. Alternatively create two mounts. One for backup and one for general consumption. ### nullrw Due to how FUSE works there is an overhead to all requests made to a FUSE filesystem that wouldn't exist for an in kernel one. Meaning that even a simple passthrough will have some slowdown. However, generally the overhead is minimal in comparison to the cost of the underlying I/O. By disabling the underlying I/O we can test the theoretical performance boundaries. By enabling `nullrw` mergerfs will work as it always does **except** that all reads and writes will be no-ops. A write will succeed (the size of the write will be returned as if it were successful) but mergerfs does nothing with the data it was given. Similarly a read will return the size requested but won't touch the buffer. See the BENCHMARKING section for suggestions on how to test. ### xattr Runtime extended attribute support can be managed via the `xattr` option. By default it will passthrough any xattr calls. Given xattr support is rarely used and can have significant performance implications mergerfs allows it to be disabled at runtime. The performance problems mostly comes when file caching is enabled. The kernel will send a `getxattr` for `security.capability` *before every single write*. It doesn't cache the responses to any `getxattr`. This might be addressed in the future but for now mergerfs can really only offer the following workarounds. `noattr` will cause mergerfs to short circuit all xattr calls and return ENOATTR where appropriate. mergerfs still gets all the requests but they will not be forwarded on to the underlying filesystems. The runtime control will still function in this mode. `nosys` will cause mergerfs to return ENOSYS for any xattr call. The difference with `noattr` is that the kernel will cache this fact and itself short circuit future calls. This is more efficient than `noattr` but will cause mergerfs' runtime control via the hidden file to stop working. ### nfsopenhack NFS is not fully POSIX compliant and historically certain behaviors, such as opening files with O_EXCL, are not or not well supported. When mergerfs (or any FUSE filesystem) is exported over NFS some of these issues come up due to how NFS and FUSE interact. This hack addresses the issue where the creation of a file with a read-only mode but with a read/write or write only flag. Normally this is perfectly valid but NFS chops the one open call into multiple calls. Exactly how it is translated depends on the configuration and versions of the NFS server and clients but it results in a permission error because a normal user is not allowed to open a read-only file as writable. Even though it's a more niche situation this hack breaks normal security and behavior and as such is `off` by default. If set to `git` it will only perform the hack when the path in question includes `/.git/`. `all` will result it applying anytime a read-only file which is empty is opened for writing. ### export-support In theory this flag should not be exposed to the end user. It is a low-level FUSE flag which indicates whether or not the kernel can send certain kinds of messages to it for the purposes of using with NFS. mergerfs does support these messages but due bugs and quirks found in the kernel and mergerfs this option is provided just in case it is needed for debugging. Given that this flag is set when the FUSE connection is first initiated it is not possible to change during run time. # FUNCTIONS, CATEGORIES and POLICIES The POSIX filesystem API is made up of a number of functions. **creat**, **stat**, **chown**, etc. For ease of configuration in mergerfs most of the core functions are grouped into 3 categories: **action**, **create**, and **search**. These functions and categories can be assigned a policy which dictates which branch is chosen when performing that function. Some functions, listed in the category `N/A` below, can not be assigned the normal policies. These functions work with file handles, rather than file paths, which were created by `open` or `create`. That said many times the current FUSE kernel driver will not always provide the file handle when a client calls `fgetattr`, `fchown`, `fchmod`, `futimens`, `ftruncate`, etc. This means it will call the regular, path based, versions. `statfs`'s behavior can be modified via other options. When using policies which are based on a branch's available space the base path provided is used. Not the full path to the file in question. Meaning that mounts in the branch won't be considered in the space calculations. The reason is that it doesn't really work for non-path preserving policies and can lead to non-obvious behaviors. NOTE: While any policy can be assigned to a function or category, some may not be very useful in practice. For instance: **rand** (random) may be useful for file creation (create) but could lead to very odd behavior if used for `chmod` if there were more than one copy of the file. ### Functions and their Category classifications | Category | FUSE Functions | |----------|-------------------------------------------------------------------------------------| | action | chmod, chown, link, removexattr, rename, rmdir, setxattr, truncate, unlink, utimens | | create | create, mkdir, mknod, symlink | | search | access, getattr, getxattr, ioctl (directories), listxattr, open, readlink | | N/A | fchmod, fchown, futimens, ftruncate, fallocate, fgetattr, fsync, ioctl (files), read, readdir, release, statfs, write, copy_file_range | In cases where something may be searched for (such as a path to clone) **getattr** will usually be used. ### Policies A policy is the algorithm used to choose a branch or branches for a function to work on or generally how the function behaves. Any function in the `create` category will clone the relative path if needed. Some other functions (`rename`,`link`,`ioctl`) have special requirements or behaviors which you can read more about below. #### Filtering Most policies basically search branches and create a list of files / paths for functions to work on. The policy is responsible for filtering and sorting the branches. Filters include **minfreespace**, whether or not a branch is mounted read-only, and the branch tagging (RO,NC,RW). These filters are applied across most policies. * No **search** function policies filter. * All **action** function policies filter out branches which are mounted **read-only** or tagged as **RO (read-only)**. * All **create** function policies filter out branches which are mounted **read-only**, tagged **RO (read-only)** or **NC (no create)**, or has available space less than `minfreespace`. Policies may have their own additional filtering such as those that require existing paths to be present. If all branches are filtered an error will be returned. Typically **EROFS** (read-only filesystem) or **ENOSPC** (no space left on device) depending on the most recent reason for filtering a branch. **ENOENT** will be returned if no eligible branch is found. If **create**, **mkdir**, **mknod**, or **symlink** fail with `EROFS` or other fundimental errors then mergerfs will mark any branch found to be read-only as such (IE will set the mode `RO`) and will rerun the policy and try again. This is mostly for `ext4` filesystems that can suddenly become read-only when it encounters an error. #### Path Preservation Policies, as described below, are of two basic classifications. `path preserving` and `non-path preserving`. All policies which start with `ep` (**epff**, **eplfs**, **eplus**, **epmfs**, **eprand**) are `path preserving`. `ep` stands for `existing path`. A path preserving policy will only consider branches where the relative path being accessed already exists. When using non-path preserving policies paths will be cloned to target branches as necessary. With the `msp` or `most shared path` policies they are defined as `path preserving` for the purpose of controlling `link` and `rename`'s behaviors since `ignorepponrename` is available to disable that behavior. #### Policy descriptions A policy's behavior differs, as mentioned above, based on the function it is used with. Sometimes it really might not make sense to even offer certain policies because they are literally the same as others but it makes things a bit more uniform. | Policy | Description | |------------------|------------------------------------------------------------| | all | Search: For **mkdir**, **mknod**, and **symlink** it will apply to all branches. **create** works like **ff**. | | epall (existing path, all) | For **mkdir**, **mknod**, and **symlink** it will apply to all found. **create** works like **epff** (but more expensive because it doesn't stop after finding a valid branch). | | epff (existing path, first found) | Given the order of the branches, as defined at mount time or configured at runtime, act on the first one found where the relative path exists. | | eplfs (existing path, least free space) | Of all the branches on which the relative path exists choose the branch with the least free space. | | eplus (existing path, least used space) | Of all the branches on which the relative path exists choose the branch with the least used space. | | epmfs (existing path, most free space) | Of all the branches on which the relative path exists choose the branch with the most free space. | | eppfrd (existing path, percentage free random distribution) | Like **pfrd** but limited to existing paths. | | eprand (existing path, random) | Calls **epall** and then randomizes. Returns 1. | | ff (first found) | Given the order of the branches, as defined at mount time or configured at runtime, act on the first one found. | | lfs (least free space) | Pick the branch with the least available free space. | | lus (least used space) | Pick the branch with the least used space. | | mfs (most free space) | Pick the branch with the most available free space. | | msplfs (most shared path, least free space) | Like **eplfs** but if it fails to find a branch it will try again with the parent directory. Continues this pattern till finding one. | | msplus (most shared path, least used space) | Like **eplus** but if it fails to find a branch it will try again with the parent directory. Continues this pattern till finding one. | | mspmfs (most shared path, most free space) | Like **epmfs** but if it fails to find a branch it will try again with the parent directory. Continues this pattern till finding one. | | msppfrd (most shared path, percentage free random distribution) | Like **eppfrd** but if it fails to find a branch it will try again with the parent directory. Continues this pattern till finding one. | | newest | Pick the file / directory with the largest mtime. | | pfrd (percentage free random distribution) | Chooses a branch at random with the likelihood of selection based on a branch's available space relative to the total. | | rand (random) | Calls **all** and then randomizes. Returns 1 branch. | **NOTE:** If you are using an underlying filesystem that reserves blocks such as ext2, ext3, or ext4 be aware that mergerfs respects the reservation by using `f_bavail` (number of free blocks for unprivileged users) rather than `f_bfree` (number of free blocks) in policy calculations. **df** does NOT use `f_bavail`, it uses `f_bfree`, so direct comparisons between **df** output and mergerfs' policies is not appropriate. #### Defaults | Category | Policy | |----------|--------| | action | epall | | create | epmfs | | search | ff | #### func.readdir examples: `func.readdir=seq`, `func.readdir=cor:4` `readdir` has policies to control how it manages reading directory content. | Policy | Description | |--------|-------------| | seq | "sequential" : Iterate over branches in the order defined. This is the default and traditional behavior found prior to the readdir policy introduction. | | cosr | "concurrent open, sequential read" : Concurrently open branch directories using a thread pool and process them in order of definition. This keeps memory and CPU usage low while also reducing the time spent waiting on branches to respond. Number of threads defaults to the number of logical cores. Can be overwritten via the syntax `func.readdir=cosr:N` where `N` is the number of threads. | | cor | "concurrent open and read" : Concurrently open branch directories and immediately start reading their contents using a thread pool. This will result in slightly higher memory and CPU usage but reduced latency. Particularly when using higher latency / slower speed network filesystem branches. Unlike `seq` and `cosr` the order of files could change due the async nature of the thread pool. Number of threads defaults to the number of logical cores. Can be overwritten via the syntax `func.readdir=cor:N` where `N` is the number of threads. Keep in mind that `readdir` mostly just provides a list of file names in a directory and possibly some basic metadata about said files. To know details about the files, as one would see from commands like `find` or `ls`, it is required to call `stat` on the file which is controlled by `fuse.getattr`. #### ioctl When `ioctl` is used with an open file then it will use the file handle which was created at the original `open` call. However, when using `ioctl` with a directory mergerfs will use the `open` policy to find the directory to act on. #### rename & link #### **NOTE:** If you're receiving errors from software when files are moved / renamed / linked then you should consider changing the create policy to one which is **not** path preserving, enabling `ignorepponrename`, or contacting the author of the offending software and requesting that `EXDEV` (cross device / improper link) be properly handled. `rename` and `link` are tricky functions in a union filesystem. `rename` only works within a single filesystem or device. If a rename can't be done atomically due to the source and destination paths existing on different mount points it will return **-1** with **errno = EXDEV** (cross device / improper link). So if a `rename`'s source and target are on different filesystems within the pool it creates an issue. Originally mergerfs would return EXDEV whenever a rename was requested which was cross directory in any way. This made the code simple and was technically compliant with POSIX requirements. However, many applications fail to handle EXDEV at all and treat it as a normal error or otherwise handle it poorly. Such apps include: gvfsd-fuse v1.20.3 and prior, Finder / CIFS/SMB client in Apple OSX 10.9+, NZBGet, Samba's recycling bin feature. As a result a compromise was made in order to get most software to work while still obeying mergerfs' policies. Below is the basic logic. * If using a **create** policy which tries to preserve directory paths (epff,eplfs,eplus,epmfs) * Using the **rename** policy get the list of files to rename * For each file attempt rename: * If failure with ENOENT (no such file or directory) run **create** policy * If create policy returns the same branch as currently evaluating then clone the path * Re-attempt rename * If **any** of the renames succeed the higher level rename is considered a success * If **no** renames succeed the first error encountered will be returned * On success: * Remove the target from all branches with no source file * Remove the source from all branches which failed to rename * If using a **create** policy which does **not** try to preserve directory paths * Using the **rename** policy get the list of files to rename * Using the **getattr** policy get the target path * For each file attempt rename: * If the source branch != target branch: * Clone target path from target branch to source branch * Rename * If **any** of the renames succeed the higher level rename is considered a success * If **no** renames succeed the first error encountered will be returned * On success: * Remove the target from all branches with no source file * Remove the source from all branches which failed to rename The the removals are subject to normal entitlement checks. The above behavior will help minimize the likelihood of EXDEV being returned but it will still be possible. **link** uses the same strategy but without the removals. #### statfs / statvfs #### [statvfs](http://linux.die.net/man/2/statvfs) normalizes the source filesystems based on the fragment size and sums the number of adjusted blocks and inodes. This means you will see the combined space of all sources. Total, used, and free. The sources however are dedupped based on the filesystem so multiple sources on the same drive will not result in double counting its space. Other filesystems mounted further down the tree of the branch will not be included when checking the mount's stats. The options `statfs` and `statfs_ignore` can be used to modify `statfs` behavior. #### flush-on-close https://lkml.kernel.org/linux-fsdevel/20211024132607.1636952-1-amir73il@gmail.com/T/ By default FUSE would issue a flush before the release of a file descriptor. This was considered a bit aggressive and a feature added to give the FUSE server the ability to choose when that happens. Options: * always * never * opened-for-write For now it defaults to "opened-for-write" which is less aggressive than the behavior before this feature was added. It should not be a problem because the flush is really only relevant when a file is written to. Given flush is irrelevant for many filesystems in the future a branch specific flag may be added so only files opened on a specific branch would be flushed on close. # ERROR HANDLING POSIX filesystem functions offer a single return code meaning that there is some complication regarding the handling of multiple branches as mergerfs does. It tries to handle errors in a way that would generally return meaningful values for that particular function. ### chmod, chown, removexattr, setxattr, truncate, utimens 1) if no error: return 0 (success) 2) if no successes: return first error 3) if one of the files acted on was the same as the related search function: return its value 4) return 0 (success) While doing this increases the complexity and cost of error handling, particularly step 3, this provides probably the most reasonable return value. ### unlink, rmdir 1) if no errors: return 0 (success) 2) return first error Older version of mergerfs would return success if any success occurred but for unlink and rmdir there are downstream assumptions that, while not impossible to occur, can confuse some software. ### others For search functions there is always a single thing acted on and as such whatever return value that comes from the single function call is returned. For create functions `mkdir`, `mknod`, and `symlink` which don't return a file descriptor and therefore can have `all` or `epall` policies it will return success if any of the calls succeed and an error otherwise. # INSTALL https://github.com/trapexit/mergerfs/releases If your distribution's package manager includes mergerfs check if the version is up to date. If out of date it is recommended to use the latest release found on the release page. Details for common distros are below. #### Debian Most Debian installs are of a stable branch and therefore do not have the most up to date software. While mergerfs is available via `apt` it is suggested that uses install the most recent version available from the [releases page](https://github.com/trapexit/mergerfs/releases). #### prebuilt deb ``` wget https://github.com/trapexit/mergerfs/releases/download//mergerfs_.debian-_.deb dpkg -i mergerfs_.debian-_.deb ``` #### apt ``` sudo apt install -y mergerfs ``` #### Ubuntu Most Ubuntu installs are of a stable branch and therefore do not have the most up to date software. While mergerfs is available via `apt` it is suggested that uses install the most recent version available from the [releases page](https://github.com/trapexit/mergerfs/releases). #### prebuilt deb ``` wget https://github.com/trapexit/mergerfs/releases/download//mergerfs_.ubuntu-_.deb dpkg -i mergerfs_.ubuntu-_.deb ``` #### apt ``` sudo apt install -y mergerfs ``` #### Raspberry Pi OS Effectively the same as Debian or Ubuntu. #### Fedora ``` wget https://github.com/trapexit/mergerfs/releases/download//mergerfs-.fc..rpm sudo rpm -i mergerfs-.fc..rpm ``` #### CentOS / Rocky ``` wget https://github.com/trapexit/mergerfs/releases/download//mergerfs-.el..rpm sudo rpm -i mergerfs-.el..rpm ``` #### ArchLinux 1. Setup AUR 2. Install `mergerfs` #### Other Static binaries are provided for situations where native packages are unavailable. ``` wget https://github.com/trapexit/mergerfs/releases/download//mergerfs-static-linux_.tar.gz sudo tar xvf mergerfs-static-linux_.tar.gz -C / ``` # BUILD **NOTE:** Prebuilt packages can be found at and recommended for most users: https://github.com/trapexit/mergerfs/releases **NOTE:** Only tagged releases are supported. `master` and other branches should be considered works in progress. First get the code from [github](https://github.com/trapexit/mergerfs). ``` $ git clone https://github.com/trapexit/mergerfs.git $ # or $ wget https://github.com/trapexit/mergerfs/releases/download//mergerfs-.tar.gz ``` #### Debian / Ubuntu ``` $ cd mergerfs $ sudo tools/install-build-pkgs $ make deb $ sudo dpkg -i ../mergerfs__.deb ``` #### RHEL / CentOS / Rocky / Fedora ``` $ su - # cd mergerfs # tools/install-build-pkgs # make rpm # rpm -i rpmbuild/RPMS//mergerfs-..rpm ``` #### Generic Have git, g++, make, python installed. ``` $ cd mergerfs $ make $ sudo make install ``` #### Build options ``` $ make help usage: make make USE_XATTR=0 - build program without xattrs functionality make STATIC=1 - build static binary make LTO=1 - build with link time optimization ``` # UPGRADE mergerfs can be upgraded live by mounting on top of the previous instance. Simply install the new version of mergerfs and follow the instructions below. Run mergerfs again or if using `/etc/fstab` call for it to mount again. Existing open files and such will continue to work fine though they won't see runtime changes since any such change would be the new mount. If you plan on changing settings with the new mount you should / could apply those before mounting the new version. ``` $ sudo mount /mnt/mergerfs $ mount | grep mergerfs media on /mnt/mergerfs type mergerfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other) media on /mnt/mergerfs type mergerfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other) ``` A problem with this approach is that the underlying instance will continue to run even if the software using it stop or are restarted. To work around this you can use a "lazy umount". Before mounting over top the mount point with the new instance of mergerfs issue: `umount -l `. Or you can let mergerfs do it by setting the option `lazy-umount-mountpoint=true`. # RUNTIME INTERFACES ## RUNTIME CONFIG #### .mergerfs pseudo file #### ``` /.mergerfs ``` There is a pseudo file available at the mount point which allows for the runtime modification of certain **mergerfs** options. The file will not show up in **readdir** but can be **stat**'ed and manipulated via [{list,get,set}xattrs](http://linux.die.net/man/2/listxattr) calls. Any changes made at runtime are **not** persisted. If you wish for values to persist they must be included as options wherever you configure the mounting of mergerfs (/etc/fstab). ##### Keys ##### Use `getfattr -d /mountpoint/.mergerfs` or `xattr -l /mountpoint/.mergerfs` to see all supported keys. Some are informational and therefore read-only. `setxattr` will return EINVAL (invalid argument) on read-only keys. ##### Values ##### Same as the command line. ###### user.mergerfs.branches ###### Used to query or modify the list of branches. When modifying there are several shortcuts to easy manipulation of the list. | Value | Description | |--------------|-------------| | [list] | set | | +<[list] | prepend | | +>[list] | append | | -[list] | remove all values provided | | -< | remove first in list | | -> | remove last in list | `xattr -w user.mergerfs.branches + userspace round trips there are kernel side caches for file entries and attributes. The entry cache limits the `lookup` calls to mergerfs which ask if a file exists. The attribute cache limits the need to make `getattr` calls to mergerfs which provide file attributes (mode, size, type, etc.). As with the page cache these should not be used if the underlying filesystems are being manipulated at the same time as it could lead to odd behavior or data corruption. The options for setting these are `cache.entry` and `cache.negative_entry` for the entry cache and `cache.attr` for the attributes cache. `cache.negative_entry` refers to the timeout for negative responses to lookups (non-existent files). #### writeback caching When `cache.files` is enabled the default is for it to perform writethrough caching. This behavior won't help improve performance as each write still goes one for one through the filesystem. By enabling the FUSE writeback cache small writes may be aggregated by the kernel and then sent to mergerfs as one larger request. This can greatly improve the throughput for apps which write to files inefficiently. The amount the kernel can aggregate is limited by the size of a FUSE message. Read the `fuse_msg_size` section for more details. There is a small side effect as a result of enabling writeback caching. Underlying files won't ever be opened with O_APPEND or O_WRONLY. The former because the kernel then manages append mode and the latter because the kernel may request file data from mergerfs to populate the write cache. The O_APPEND change means that if a file is changed outside of mergerfs it could lead to corruption as the kernel won't know the end of the file has changed. That said any time you use caching you should keep from using the same file outside of mergerfs at the same time. Note that if an application is properly sizing writes then writeback caching will have little or no effect. It will only help with writes of sizes below the FUSE message size (128K on older kernels, 1M on newer). #### statfs caching Of the syscalls used by mergerfs in policies the `statfs` / `statvfs` call is perhaps the most expensive. It's used to find out the available space of a filesystem and whether it is mounted read-only. Depending on the setup and usage pattern these queries can be relatively costly. When `cache.statfs` is enabled all calls to `statfs` by a policy will be cached for the number of seconds its set to. Example: If the create policy is `mfs` and the timeout is 60 then for that 60 seconds the same filesystem will be returned as the target for creates because the available space won't be updated for that time. #### symlink caching As of version 4.20 Linux supports symlink caching. Significant performance increases can be had in workloads which use a lot of symlinks. Setting `cache.symlinks=true` will result in requesting symlink caching from the kernel only if supported. As a result its safe to enable it on systems prior to 4.20. That said it is disabled by default for now. You can see if caching is enabled by querying the xattr `user.mergerfs.cache.symlinks` but given it must be requested at startup you can not change it at runtime. #### readdir caching As of version 4.20 Linux supports readdir caching. This can have a significant impact on directory traversal. Especially when combined with entry (`cache.entry`) and attribute (`cache.attr`) caching. Setting `cache.readdir=true` will result in requesting readdir caching from the kernel on each `opendir`. If the kernel doesn't support readdir caching setting the option to `true` has no effect. This option is configurable at runtime via xattr `user.mergerfs.cache.readdir`. #### tiered caching Some storage technologies support what some call "tiered" caching. The placing of usually smaller, faster storage as a transparent cache to larger, slower storage. NVMe, SSD, Optane in front of traditional HDDs for instance. mergerfs does not natively support any sort of tiered caching. Most users have no use for such a feature and its inclusion would complicate the code. However, there are a few situations where a cache filesystem could help with a typical mergerfs setup. 1. Fast network, slow filesystems, many readers: You've a 10+Gbps network with many readers and your regular filesystems can't keep up. 2. Fast network, slow filesystems, small'ish bursty writes: You have a 10+Gbps network and wish to transfer amounts of data less than your cache filesystem but wish to do so quickly. With #1 it's arguable if you should be using mergerfs at all. RAID would probably be the better solution. If you're going to use mergerfs there are other tactics that may help: spreading the data across filesystems (see the mergerfs.dup tool) and setting `func.open=rand`, using `symlinkify`, or using dm-cache or a similar technology to add tiered cache to the underlying device. With #2 one could use dm-cache as well but there is another solution which requires only mergerfs and a cronjob. 1. Create 2 mergerfs pools. One which includes just the slow devices and one which has both the fast devices (SSD,NVME,etc.) and slow devices. 2. The 'cache' pool should have the cache filesystems listed first. 3. The best `create` policies to use for the 'cache' pool would probably be `ff`, `epff`, `lfs`, or `eplfs`. The latter two under the assumption that the cache filesystem(s) are far smaller than the backing filesystems. If using path preserving policies remember that you'll need to manually create the core directories of those paths you wish to be cached. Be sure the permissions are in sync. Use `mergerfs.fsck` to check / correct them. You could also set the slow filesystems mode to `NC` though that'd mean if the cache filesystems fill you'd get "out of space" errors. 4. Enable `moveonenospc` and set `minfreespace` appropriately. To make sure there is enough room on the "slow" pool you might want to set `minfreespace` to at least as large as the size of the largest cache filesystem if not larger. This way in the worst case the whole of the cache filesystem(s) can be moved to the other drives. 5. Set your programs to use the cache pool. 6. Save one of the below scripts or create you're own. 7. Use `cron` (as root) to schedule the command at whatever frequency is appropriate for your workflow. ##### time based expiring Move files from cache to backing pool based only on the last time the file was accessed. Replace `-atime` with `-amin` if you want minutes rather than days. May want to use the `fadvise` / `--drop-cache` version of rsync or run rsync with the tool "nocache". *NOTE:* The arguments to these scripts include the cache **filesystem** itself. Not the pool with the cache filesystem. You could have data loss if the source is the cache pool. [mergerfs.time-based-mover](tools/mergerfs.time-based-mover?raw=1) ##### percentage full expiring Move the oldest file from the cache to the backing pool. Continue till below percentage threshold. *NOTE:* The arguments to these scripts include the cache **filesystem** itself. Not the pool with the cache filesystem. You could have data loss if the source is the cache pool. [mergerfs.percent-full-mover](tools/mergerfs.percent-full-mover?raw=1) # PERFORMANCE mergerfs is at its core just a proxy and therefore its theoretical max performance is that of the underlying devices. However, given it is a FUSE filesystem working from userspace there is an increase in overhead relative to kernel based solutions. That said the performance can match the theoretical max but it depends greatly on the system's configuration. Especially when adding network filesystems into the mix there are many variables which can impact performance. Device speeds and latency, network speeds and latency, general concurrency, read/write sizes, etc. Unfortunately, given the number of variables it has been difficult to find a single set of settings which provide optimal performance. If you're having performance issues please look over the suggestions below (including the benchmarking section.) NOTE: be sure to read about these features before changing them to understand what behaviors it may impact * disable `security_capability` and/or `xattr` * increase cache timeouts `cache.attr`, `cache.entry`, `cache.negative_entry` * enable (or disable) page caching (`cache.files`) * enable `parallel-direct-writes` * enable `cache.writeback` * enable `cache.statfs` * enable `cache.symlinks` * enable `cache.readdir` * change the number of worker threads * disable `posix_acl` * disable `async_read` * test theoretical performance using `nullrw` or mounting a ram disk * use `symlinkify` if your data is largely static and read-only * use tiered cache devices * use LVM and LVM cache to place a SSD in front of your HDDs * increase readahead: `readahead=1024` If you come across a setting that significantly impacts performance please contact trapexit so he may investigate further. Please test both against your normal setup, a singular branch, and with `nullrw=true` # BENCHMARKING Filesystems are complicated. They do many things and many of those are interconnected. Additionally, the OS, drivers, hardware, etc. all can impact performance. Therefore, when benchmarking, it is **necessary** that the test focus as narrowly as possible. For most throughput is the key benchmark. To test throughput `dd` is useful but **must** be used with the correct settings in order to ensure the filesystem or device is actually being tested. The OS can and will cache data. Without forcing synchronous reads and writes and/or disabling caching the values returned will not be representative of the device's true performance. When benchmarking through mergerfs ensure you only use 1 branch to remove any possibility of the policies complicating the situation. Benchmark the underlying filesystem first and then mount mergerfs over it and test again. If you're experience speeds below your expectation you will need to narrow down precisely which component is leading to the slowdown. Preferably test the following in the order listed (but not combined). 1. Enable `nullrw` mode with `nullrw=true`. This will effectively make reads and writes no-ops. Removing the underlying device / filesystem from the equation. This will give us the top theoretical speeds. 2. Mount mergerfs over `tmpfs`. `tmpfs` is a RAM disk. Extremely high speed and very low latency. This is a more realistic best case scenario. Example: `mount -t tmpfs -o size=2G tmpfs /tmp/tmpfs` 3. Mount mergerfs over a local device. NVMe, SSD, HDD, etc. If you have more than one I'd suggest testing each of them as drives and/or controllers (their drivers) could impact performance. 4. Finally, if you intend to use mergerfs with a network filesystem, either as the source of data or to combine with another through mergerfs, test each of those alone as above. Once you find the component which has the performance issue you can do further testing with different options to see if they impact performance. For reads and writes the most relevant would be: `cache.files`, `async_read`. Less likely but relevant when using NFS or with certain filesystems would be `security_capability`, `xattr`, and `posix_acl`. If you find a specific system, device, filesystem, controller, etc. that performs poorly contact trapexit so he may investigate further. Sometimes the problem is really the application accessing or writing data through mergerfs. Some software use small buffer sizes which can lead to more requests and therefore greater overhead. You can test this out yourself by replace `bs=1M` in the examples below with `ibs` or `obs` and using a size of `512` instead of `1M`. In one example test using `nullrw` the write speed dropped from 4.9GB/s to 69.7MB/s when moving from `1M` to `512`. Similar results were had when testing reads. Small writes overhead may be improved by leveraging a write cache but in casual tests little gain was found. More tests will need to be done before this feature would become available. If you have an app that appears slow with mergerfs it could be due to this. Contact trapexit so he may investigate further. ### write benchmark ``` $ dd if=/dev/zero of=/mnt/mergerfs/1GB.file bs=1M count=1024 oflag=nocache conv=fdatasync status=progress ``` ### read benchmark ``` $ dd if=/mnt/mergerfs/1GB.file of=/dev/null bs=1M count=1024 iflag=nocache conv=fdatasync status=progress ``` ### other benchmarks If you are attempting to benchmark other behaviors you must ensure you clear kernel caches before runs. In fact it would be a good deal to run before the read and write benchmarks as well just in case. ``` sync echo 3 | sudo tee /proc/sys/vm/drop_caches ``` # TIPS / NOTES * This document is literal and thorough. If a suspected feature isn't mentioned it doesn't exist. If certain libfuse arguments aren't listed they probably shouldn't be used. * Ensure you're using the latest version. * Run mergerfs as `root`. mergerfs is designed and intended to be run as `root` and may exibit incorrect behavior if run otherwise.. * If you don't see some directories and files you expect, policies seem to skip branches, you get strange permission errors, etc. be sure the underlying filesystems' permissions are all the same. Use `mergerfs.fsck` to audit the filesystem for out of sync permissions. * If you still have permission issues be sure you are using POSIX ACL compliant filesystems. mergerfs doesn't generally make exceptions for FAT, NTFS, or other non-POSIX filesystem. * Do **not** use `cache.files=off` if you expect applications (such as rtorrent) to use [mmap](http://linux.die.net/man/2/mmap) files. Shared mmap is not currently supported in FUSE w/ page caching disabled. Enabling `dropcacheonclose` is recommended when `cache.files=partial|full|auto-full`. * [Kodi](http://kodi.tv), [Plex](http://plex.tv), [Subsonic](http://subsonic.org), etc. can use directory [mtime](http://linux.die.net/man/2/stat) to more efficiently determine whether to scan for new content rather than simply performing a full scan. If using the default **getattr** policy of **ff** it's possible those programs will miss an update on account of it returning the first directory found's **stat** info and it's a later directory on another mount which had the **mtime** recently updated. To fix this you will want to set **func.getattr=newest**. Remember though that this is just **stat**. If the file is later **open**'ed or **unlink**'ed and the policy is different for those then a completely different file or directory could be acted on. * Some policies mixed with some functions may result in strange behaviors. Not that some of these behaviors and race conditions couldn't happen outside **mergerfs** but that they are far more likely to occur on account of the attempt to merge together multiple sources of data which could be out of sync due to the different policies. * For consistency its generally best to set **category** wide policies rather than individual **func**'s. This will help limit the confusion of tools such as [rsync](http://linux.die.net/man/1/rsync). However, the flexibility is there if needed. # KNOWN ISSUES / BUGS #### kernel issues & bugs [https://github.com/trapexit/mergerfs/wiki/Kernel-Issues-&-Bugs](https://github.com/trapexit/mergerfs/wiki/Kernel-Issues-&-Bugs) #### directory mtime is not being updated Remember that the default policy for `getattr` is `ff`. The information for the first directory found will be returned. If it wasn't the directory which had been updated then it will appear outdated. The reason this is the default is because any other policy would be more expensive and for many applications it is unnecessary. To always return the directory with the most recent mtime or a faked value based on all found would require a scan of all filesystems. If you always want the directory information from the one with the most recent mtime then use the `newest` policy for `getattr`. #### 'mv /mnt/pool/foo /mnt/disk1/foo' removes 'foo' This is not a bug. Run in verbose mode to better understand what's happening: ``` $ mv -v /mnt/pool/foo /mnt/disk1/foo copied '/mnt/pool/foo' -> '/mnt/disk1/foo' removed '/mnt/pool/foo' $ ls /mnt/pool/foo ls: cannot access '/mnt/pool/foo': No such file or directory ``` `mv`, when working across devices, is copying the source to target and then removing the source. Since the source **is** the target in this case, depending on the unlink policy, it will remove the just copied file and other files across the branches. If you want to move files to one filesystem just copy them there and use mergerfs.dedup to clean up the old paths or manually remove them from the branches directly. #### cached memory appears greater than it should be Use `cache.files=off` and/or `dropcacheonclose=true`. See the section on page caching. #### NFS clients returning ESTALE / Stale file handle NFS generally does not like out of band changes. Take a look at the section on NFS in the [#remote-filesystems](#remote-filesystems) for more details. #### rtorrent fails with ENODEV (No such device) Be sure to set `cache.files=partial|full|auto-full|per-processe` or turn off `direct_io`. rtorrent and some other applications use [mmap](http://linux.die.net/man/2/mmap) to read and write to files and offer no fallback to traditional methods. FUSE does not currently support mmap while using `direct_io`. There may be a performance penalty on writes with `direct_io` off as well as the problem of double caching but it's the only way to get such applications to work. If the performance loss is too high for other apps you can mount mergerfs twice. Once with `direct_io` enabled and one without it. Be sure to set `dropcacheonclose=true` if not using `direct_io`. #### Plex doesn't work with mergerfs It does. If you're trying to put Plex's config / metadata / database on mergerfs you can't set `cache.files=off` because Plex is using sqlite3 with mmap enabled. Shared mmap is not supported by Linux's FUSE implementation when page caching is disabled. To fix this place the data elsewhere (preferable) or enable `cache.files` (with `dropcacheonclose=true`). Sqlite3 does not need mmap but the developer needs to fall back to standard IO if mmap fails. This applies to other software: Radarr, Sonarr, Lidarr, Jellyfin, etc. I would recommend reaching out to the developers of the software you're having troubles with and asking them to add a fallback to regular file IO when mmap is unavailable. If the issue is that scanning doesn't seem to pick up media then be sure to set `func.getattr=newest` though generally a full scan will pick up all media anyway. #### When a program tries to move or rename a file it fails Please read the section above regarding [rename & link](#rename--link). The problem is that many applications do not properly handle `EXDEV` errors which `rename` and `link` may return even though they are perfectly valid situations which do not indicate actual device, filesystem, or OS errors. The error will only be returned by mergerfs if using a path preserving policy as described in the policy section above. If you do not care about path preservation simply change the mergerfs policy to the non-path preserving version. For example: `-o category.create=mfs` Ideally the offending software would be fixed and it is recommended that if you run into this problem you contact the software's author and request proper handling of `EXDEV` errors. #### my 32bit software has problems Some software have problems with 64bit inode values. The symptoms can include EOVERFLOW errors when trying to list files. You can address this by setting `inodecalc` to one of the 32bit based algos as described in the relevant section. #### Samba: Moving files / directories fails Workaround: Copy the file/directory and then remove the original rather than move. This isn't an issue with Samba but some SMB clients. GVFS-fuse v1.20.3 and prior (found in Ubuntu 14.04 among others) failed to handle certain error codes correctly. Particularly **STATUS_NOT_SAME_DEVICE** which comes from the **EXDEV** which is returned by **rename** when the call is crossing mount points. When a program gets an **EXDEV** it needs to explicitly take an alternate action to accomplish its goal. In the case of **mv** or similar it tries **rename** and on **EXDEV** falls back to a manual copying of data between the two locations and unlinking the source. In these older versions of GVFS-fuse if it received **EXDEV** it would translate that into **EIO**. This would cause **mv** or most any application attempting to move files around on that SMB share to fail with a IO error. [GVFS-fuse v1.22.0](https://bugzilla.gnome.org/show_bug.cgi?id=734568) and above fixed this issue but a large number of systems use the older release. On Ubuntu the version can be checked by issuing `apt-cache showpkg gvfs-fuse`. Most distros released in 2015 seem to have the updated release and will work fine but older systems may not. Upgrading gvfs-fuse or the distro in general will address the problem. In Apple's MacOSX 10.9 they replaced Samba (client and server) with their own product. It appears their new client does not handle **EXDEV** either and responds similar to older release of gvfs on Linux. #### Trashing files occasionally fails This is the same issue as with Samba. `rename` returns `EXDEV` (in our case that will really only happen with path preserving policies like `epmfs`) and the software doesn't handle the situation well. This is unfortunately a common failure of software which moves files around. The standard indicates that an implementation `MAY` choose to support non-user home directory trashing of files (which is a `MUST`). The implementation `MAY` also support "top directory trashes" which many probably do. To create a `$topdir/.Trash` directory as defined in the standard use the [mergerfs-tools](https://github.com/trapexit/mergerfs-tools) tool `mergerfs.mktrash`. #### Supplemental user groups Due to the overhead of [getgroups/setgroups](http://linux.die.net/man/2/setgroups) mergerfs utilizes a cache. This cache is opportunistic and per thread. Each thread will query the supplemental groups for a user when that particular thread needs to change credentials and will keep that data for the lifetime of the thread. This means that if a user is added to a group it may not be picked up without the restart of mergerfs. However, since the high level FUSE API's (at least the standard version) thread pool dynamically grows and shrinks it's possible that over time a thread will be killed and later a new thread with no cache will start and query the new data. The gid cache uses fixed storage to simplify the design and be compatible with older systems which may not have C++11 compilers. There is enough storage for 256 users' supplemental groups. Each user is allowed up to 32 supplemental groups. Linux >= 2.6.3 allows up to 65535 groups per user but most other *nixs allow far less. NFS allowing only 16. The system does handle overflow gracefully. If the user has more than 32 supplemental groups only the first 32 will be used. If more than 256 users are using the system when an uncached user is found it will evict an existing user's cache at random. So long as there aren't more than 256 active users this should be fine. If either value is too low for your needs you will have to modify `gidcache.hpp` to increase the values. Note that doing so will increase the memory needed by each thread. While not a bug some users have found when using containers that supplemental groups defined inside the container don't work properly with regard to permissions. This is expected as mergerfs lives outside the container and therefore is querying the host's group database. There might be a hack to work around this (make mergerfs read the /etc/group file in the container) but it is not yet implemented and would be limited to Linux and the /etc/group DB. Preferably users would mount in the host group file into the containers or use a standard shared user & groups technology like NIS or LDAP. # Remote Filesystems Many users ask about compatibility with remote filesystems. This section is to describe any known issues or quirks when using mergerfs with common remote filesystems. Keep in mind that, like with caching, it is not a good idea to change the contents of the remote filesystem [out-of-band](https://en.wikipedia.org/wiki/Out-of-band). Meaning that you really shouldn't change the contents of the underlying filesystems or mergerfs on the server hosting the remote filesystem. Doing so can lead to weird behavior, inconsistency, errors, and even data corruption should multiple programs try to write or read the same data at the same time. This isn't to say you can't do it or that data corruption is likely but it *could* happen. It is better to always use the remote filesystem. Even on the machine serving it. ## NFS [NFS](https://en.wikipedia.org/wiki/Network_File_System) is a common remote filesystem on Unix/POSIX systems. Due to how NFS works there are some settings which need to be set in order for mergerfs to work with it. It should be noted that NFS and FUSE (the technology mergerfs uses) do not work perfectly with one another due to certain design choices in FUSE (and mergerfs.) Due to these issues it is generally recommended to use SMB when possible till situations change. That said mergerfs should generally work as an export of NFS and issues discovered should still be reported. To ensure compatibility between mergerfs and NFS use the following settings. mergerfs settings: * noforget * inodecalc=path-hash NFS export settings: * fsid=UUID * no\_root\_squash `noforget` is needed because NFS uses the `name_to_handle_at` and `open_by_handle_at` functions which allow a program to keep a reference to a file without technically having it open in the typical sense. The problem is that FUSE has no way to know that NFS has a handle that it will later use to open the file again. As a result it is possible for the kernel to tell mergerfs to forget about the node and should NFS ever ask for that node's details in the future it would have nothing to respond with. Keeping nodes around forever is not ideal but at the moment the only way to manage the situation. `inodecalc=path-hash` is needed because NFS is sensitive to out-of-band changes. FUSE doesn't care if a file's inode value changes but NFS, being stateful, does. So if you used the default inode calculation algorithm then it is possible that if you changed a file or updated a directory the file mergerfs will use will be on a different branch and therefore the inode would change. This isn't an ideal solution and others are being considered but it works for most situations. `fsid=UUID` is needed because FUSE filesystems don't have different `st_dev` values which can cause issues when exporting. The easiest thing to do is set each mergerfs export `fsid` to some random value. An easy way to generate a random value is to use the command line tool `uuid` or `uuidgen` or through a website such as [uuidgenerator.net](https://www.uuidgenerator.net/). `no_root_squash` is not strictly necessary but can lead to confusing permission and ownership issues if root squashing is enabled. ## SMB / CIFS [SMB](https://en.wikipedia.org/wiki/Server_Message_Block) is a protocol most used by Microsoft Windows systems to share file shares, printers, etc. However, due to the popularity for Windows, it is also supported on many other platforms including Linux. The most popular way of supporting SMB on Linux is via the software Samba. [Samba](https://en.wikipedia.org/wiki/Samba_(software)), and other ways of serving Linux filesystems, via SMB should work fine with mergerfs. The services do not tend to use the same technologies which NFS uses and therefore don't have the same issues. There should not be an special settings required to use mergerfs with Samba. However, [CIFSD](https://en.wikipedia.org/wiki/CIFSD) and other programs have not been extensively tested. If you use mergerfs with CIFSD or other SMB servers please submit your experiences so these docs can be updated. ## SSHFS [SSHFS](https://en.wikipedia.org/wiki/SSHFS) is a FUSE filesystem leveraging SSH as the connection and transport layer. While often simpler to setup when compared to NFS or Samba the performance can be lacking and the project is very much in maintenance mode. There are no known issues using sshfs with mergerfs. You may want to use the following arguments to improve performance but your millage may vary. * `-o Ciphers=arcfour` * `-o Compression=no` More info can be found [here](https://ideatrash.net/2016/08/odds-and-ends-optimizing-sshfs-moving.html). ## Other There are other remote filesystems but none popularly used to serve mergerfs. If you use something not listed above feel free to reach out and I will add it to the list. # FAQ #### How well does mergerfs scale? Is it "production ready?" Users have reported running mergerfs on everything from a Raspberry Pi to dual socket Xeon systems with >20 cores. I'm aware of at least a few companies which use mergerfs in production. [Open Media Vault](https://www.openmediavault.org) includes mergerfs as its sole solution for pooling filesystems. The author of mergerfs had it running for over 300 days managing 16+ devices with reasonably heavy 24/7 read and write usage. Stopping only after the machine's power supply died. Most serious issues (crashes or data corruption) have been due to [kernel bugs](https://github.com/trapexit/mergerfs/wiki/Kernel-Issues-&-Bugs). All of which are fixed in stable releases. #### Can mergerfs be used with filesystems which already have data / are in use? Yes. mergerfs is really just a proxy and does **NOT** interfere with the normal form or function of the filesystems / mounts / paths it manages. It is just another userland application that is acting as a man-in-the-middle. It can't do anything that any other random piece of software can't do. mergerfs is **not** a traditional filesystem that takes control over the underlying block device. mergerfs is **not** RAID. It does **not** manipulate the data that passes through it. It does **not** shard data across filesystems. It merely shards some **behavior** and aggregates others. #### Can drives/filesystems be removed from the pool at will? Yes. See previous question's answer. #### Can mergerfs be removed without affecting the data? Yes. See the previous question's answer. #### Can drives/filesystems be moved to another pool? Yes. See the previous question's answer. #### How do I migrate data into or out of the pool when adding/removing drives/filesystems? You don't need to. See the previous question's answer. #### How do I remove a drive/filesystem but keep the data in the pool? Nothing special needs to be done. Remove the branch from mergerfs' config and copy (rsync) the data from the removed filesystem into the pool. Effectively the same as if it were you transfering data from one filesystem to another. If you wish to continue using the pool while performing the transfer simply create another, temporary pool without the filesystem in question and then copy the data. It would probably be a good idea to set the branch to `RO` prior to doing this to ensure no new content is written to the filesystem while performing the copy. #### What policies should I use? Unless you're doing something more niche the average user is probably best off using `mfs` for `category.create`. It will spread files out across your branches based on available space. Use `mspmfs` if you want to try to colocate the data a bit more. You may want to use `lus` if you prefer a slightly different distribution of data if you have a mix of smaller and larger filesystems. Generally though `mfs`, `lus`, or even `rand` are good for the general use case. If you are starting with an imbalanced pool you can use the tool **mergerfs.balance** to redistribute files across the pool. If you really wish to try to colocate files based on directory you can set `func.create` to `epmfs` or similar and `func.mkdir` to `rand` or `eprand` depending on if you just want to colocate generally or on specific branches. Either way the *need* to colocate is rare. For instance: if you wish to remove the device regularly and want the data to predictably be on that device or if you don't use backup at all and don't wish to replace that data piecemeal. In which case using path preservation can help but will require some manual attention. Colocating after the fact can be accomplished using the **mergerfs.consolidate** tool. If you don't need strict colocation which the `ep` policies provide then you can use the `msp` based policies which will walk back the path till finding a branch that works. Ultimately there is no correct answer. It is a preference or based on some particular need. mergerfs is very easy to test and experiment with. I suggest creating a test setup and experimenting to get a sense of what you want. `epmfs` is the default `category.create` policy because `ep` policies are not going to change the general layout of the branches. It won't place files/dirs on branches that don't already have the relative branch. So it keeps the system in a known state. It's much easier to stop using `epmfs` or redistribute files around the filesystem than it is to consolidate them back. #### What settings should I use? Depends on what features you want. Generally speaking there are no "wrong" settings. All settings are performance or feature related. The best bet is to read over the available options and choose what fits your situation. If something isn't clear from the documentation please reach out and the documentation will be improved. That said, for the average person, the following should be fine: `cache.files=off,dropcacheonclose=true,category.create=mfs` #### Why are all my files ending up on 1 filesystem?! Did you start with empty filesystems? Did you explicitly configure a `category.create` policy? Are you using an `existing path` / `path preserving` policy? The default create policy is `epmfs`. That is a path preserving algorithm. With such a policy for `mkdir` and `create` with a set of empty filesystems it will select only 1 filesystem when the first directory is created. Anything, files or directories, created in that first directory will be placed on the same branch because it is preserving paths. This catches a lot of new users off guard but changing the default would break the setup for many existing users and this policy is the safest policy as it will not change the general layout of the existing filesystems. If you do not care about path preservation and wish your files to be spread across all your filesystems change to `mfs` or similar policy as described above. If you do want path preservation you'll need to perform the manual act of creating paths on the filesystems you want the data to land on before transferring your data. Setting `func.mkdir=epall` can simplify managing path preservation for `create`. Or use `func.mkdir=rand` if you're interested in just grouping together directory content by filesystem. #### Do hardlinks work? Yes. See also the option `inodecalc` for how inode values are calculated. What mergerfs does not do is fake hard links across branches. Read the section "rename & link" for how it works. Remember that hardlinks will NOT work across devices. That includes between the original filesystem and a mergerfs pool, between two separate pools of the same underlying filesystems, or bind mounts of paths within the mergerfs pool. The latter is common when using Docker or Podman. Multiple volumes (bind mounts) to the same underlying filesystem are considered different devices. There is no way to link between them. You should mount in the highest directory in the mergerfs pool that includes all the paths you need if you want links to work. #### Does FICLONE or FICLONERANGE work? Unfortunately not. FUSE, the technology mergerfs is based on, does not support the `clone_file_range` feature needed for it to work. mergerfs won't even know such a request is made. The kernel will simply return an error back to the application making the request. Should FUSE gain the ability mergerfs will be updated to support it. #### Can I use mergerfs without SnapRAID? SnapRAID without mergerfs? Yes. They are completely unrelated pieces of software. #### Can mergerfs run via Docker, Podman, Kubernetes, etc. Yes. With Docker you'll need to include `--cap-add=SYS_ADMIN --device=/dev/fuse --security-opt=apparmor:unconfined` or similar with other container runtimes. You should also be running it as root or given sufficient caps to change user and group identity as well as have root like filesystem permissions. Keep in mind that you **MUST** consider identity when using containers. For example: supplemental groups will be picked up from the container unless you properly manage users and groups by sharing relevant /etc files or by using some other means to share identity across containers. Similarly if you use "rootless" containers and user namespaces to do uid/gid translations you **MUST** consider that while managing shared files. Also, as mentioned by [hotio](https://hotio.dev/containers/mergerfs), with Docker you should probably be mounting with `bind-propagation` set to `slave`. #### Does mergerfs support CoW / copy-on-write / writes to read-only filesystems? Not in the sense of a filesystem like BTRFS or ZFS nor in the overlayfs or aufs sense. It does offer a [cow-shell](http://manpages.ubuntu.com/manpages/bionic/man1/cow-shell.1.html) like hard link breaking (copy to temp file then rename over original) which can be useful when wanting to save space by hardlinking duplicate files but wish to treat each name as if it were a unique and separate file. If you want to write to a read-only filesystem you should look at overlayfs. You can always include the overlayfs mount into a mergerfs pool. #### Why can't I see my files / directories? It's almost always a permissions issue. Unlike mhddfs and unionfs-fuse, which runs as root and attempts to access content as such, mergerfs always changes its credentials to that of the caller. This means that if the user does not have access to a file or directory than neither will mergerfs. However, because mergerfs is creating a union of paths it may be able to read some files and directories on one filesystem but not another resulting in an incomplete set. Whenever you run into a split permission issue (seeing some but not all files) try using [mergerfs.fsck](https://github.com/trapexit/mergerfs-tools) tool to check for and fix the mismatch. If you aren't seeing anything at all be sure that the basic permissions are correct. The user and group values are correct and that directories have their executable bit set. A common mistake by users new to Linux is to `chmod -R 644` when they should have `chmod -R u=rwX,go=rX`. If using a network filesystem such as NFS or SMB (Samba) be sure to pay close attention to anything regarding permissioning and users. Root squashing and user translation for instance has bitten a few mergerfs users. Some of these also affect the use of mergerfs from container platforms such as Docker. #### Why use FUSE? Why not a kernel based solution? As with any solutions to a problem there are advantages and disadvantages to each one. A FUSE based solution has all the downsides of FUSE: * Higher IO latency due to the trips in and out of kernel space * Higher general overhead due to trips in and out of kernel space * Double caching when using page caching * Misc limitations due to FUSE's design But FUSE also has a lot of upsides: * Easier to offer a cross platform solution * Easier forward and backward compatibility * Easier updates for users * Easier and faster release cadence * Allows more flexibility in design and features * Overall easier to write, secure, and maintain * Much lower barrier to entry (getting code into the kernel takes a lot of time and effort initially) FUSE was chosen because of all the advantages listed above. The negatives of FUSE do not outweigh the positives. #### Is my OS's libfuse needed for mergerfs to work? No. Normally `mount.fuse` is needed to get mergerfs (or any FUSE filesystem to mount using the `mount` command but in vendoring the libfuse library the `mount.fuse` app has been renamed to `mount.mergerfs` meaning the filesystem type in `fstab` can simply be `mergerfs`. That said there should be no harm in having it installed and continuing to using `fuse.mergerfs` as the type in `/etc/fstab`. If `mergerfs` doesn't work as a type it could be due to how the `mount.mergerfs` tool was installed. Must be in `/sbin/` with proper permissions. #### Why was splice support removed? After a lot of testing over the years splicing always appeared to be at best provide equivalent performance and in cases worse performance. Splice is not supported on other platforms forcing a traditional read/write fallback to be provided. The splice code was removed to simplify the codebase. #### What should mergerfs NOT be used for? * databases: Even if the database stored data in separate files (mergerfs wouldn't offer much otherwise) the higher latency of the indirection will kill performance. If it is a lightly used SQLITE database then it may be fine but you'll need to test. * VM images: For the same reasons as databases. VM images are accessed very aggressively and mergerfs will introduce too much latency (if it works at all). * As replacement for RAID: mergerfs is just for pooling branches. If you need that kind of device performance aggregation or high availability you should stick with RAID. #### Can filesystems be written to directly? Outside of mergerfs while pooled? Yes, however it's not recommended to use the same file from within the pool and from without at the same time (particularly writing). Especially if using caching of any kind (cache.files, cache.entry, cache.attr, cache.negative_entry, cache.symlinks, cache.readdir, etc.) as there could be a conflict between cached data and not. #### Why do I get an "out of space" / "no space left on device" / ENOSPC error even though there appears to be lots of space available? First make sure you've read the sections above about policies, path preservation, branch filtering, and the options **minfreespace**, **moveonenospc**, **statfs**, and **statfs_ignore**. mergerfs is simply presenting a union of the content within multiple branches. The reported free space is an aggregate of space available within the pool (behavior modified by **statfs** and **statfs_ignore**). It does not represent a contiguous space. In the same way that read-only filesystems, those with quotas, or reserved space report the full theoretical space available. Due to path preservation, branch tagging, read-only status, and **minfreespace** settings it is perfectly valid that `ENOSPC` / "out of space" / "no space left on device" be returned. It is doing what was asked of it: filtering possible branches due to those settings. Only one error can be returned and if one of the reasons for filtering a branch was **minfreespace** then it will be returned as such. **moveonenospc** is only relevant to writing a file which is too large for the filesystem it's currently on. It is also possible that the filesystem selected has run out of inodes. Use `df -i` to list the total and available inodes per filesystem. If you don't care about path preservation then simply change the `create` policy to one which isn't. `mfs` is probably what most are looking for. The reason it's not default is because it was originally set to `epmfs` and changing it now would change people's setup. Such a setting change will likely occur in mergerfs 3. #### Why does the total available space in mergerfs not equal outside? Are you using ext2/3/4? With reserve for root? mergerfs uses available space for statfs calculations. If you've reserved space for root then it won't show up. You can remove the reserve by running: `tune2fs -m 0 ` #### I notice massive slowdowns of writes when enabling cache.files. When file caching is enabled in any form (`cache.files!=off`) it will issue `getxattr` requests for `security.capability` prior to *every single write*. This will usually result in a performance degradation, especially when using a network filesystem (such as NFS or SMB.) Unfortunately at this moment the kernel is not caching the response. To work around this situation mergerfs offers a few solutions. 1. Set `security_capability=false`. It will short circuit any call and return `ENOATTR`. This still means though that mergerfs will receive the request before every write but at least it doesn't get passed through to the underlying filesystem. 2. Set `xattr=noattr`. Same as above but applies to *all* calls to getxattr. Not just `security.capability`. This will not be cached by the kernel either but mergerfs' runtime config system will still function. 3. Set `xattr=nosys`. Results in mergerfs returning `ENOSYS` which *will* be cached by the kernel. No future xattr calls will be forwarded to mergerfs. The downside is that also means the xattr based config and query functionality won't work either. 4. Disable file caching. If you aren't using applications which use `mmap` it's probably simpler to just disable it all together. The kernel won't send the requests when caching is disabled. #### It's mentioned that there are some security issues with mhddfs. What are they? How does mergerfs address them? [mhddfs](https://github.com/trapexit/mhddfs) manages running as **root** by calling [getuid()](https://github.com/trapexit/mhddfs/blob/cae96e6251dd91e2bdc24800b4a18a74044f6672/src/main.c#L319) and if it returns **0** then it will [chown](http://linux.die.net/man/1/chown) the file. Not only is that a race condition but it doesn't handle other situations. Rather than attempting to simulate POSIX ACL behavior the proper way to manage this is to use [seteuid](http://linux.die.net/man/2/seteuid) and [setegid](http://linux.die.net/man/2/setegid), in effect becoming the user making the original call, and perform the action as them. This is what mergerfs does and why mergerfs should always run as root. In Linux setreuid syscalls apply only to the thread. GLIBC hides this away by using realtime signals to inform all threads to change credentials. Taking after **Samba**, mergerfs uses **syscall(SYS_setreuid,...)** to set the callers credentials for that thread only. Jumping back to **root** as necessary should escalated privileges be needed (for instance: to clone paths between filesystems). For non-Linux systems mergerfs uses a read-write lock and changes credentials only when necessary. If multiple threads are to be user X then only the first one will need to change the processes credentials. So long as the other threads need to be user X they will take a readlock allowing multiple threads to share the credentials. Once a request comes in to run as user Y that thread will attempt a write lock and change to Y's credentials when it can. If the ability to give writers priority is supported then that flag will be used so threads trying to change credentials don't starve. This isn't the best solution but should work reasonably well assuming there are few users. # mergerfs versus X #### mhddfs mhddfs had not been maintained for some time and has some known stability and security issues. mergerfs provides a superset of mhddfs' features and should offer the same or better performance. Below is an example of mhddfs and mergerfs setup to work similarly. `mhddfs -o mlimit=4G,allow_other /mnt/drive1,/mnt/drive2 /mnt/pool` `mergerfs -o minfreespace=4G,category.create=ff /mnt/drive1:/mnt/drive2 /mnt/pool` #### aufs aufs is mostly abandoned and no longer available in most Linux distros. While aufs can offer better peak performance mergerfs provides more configurability and is generally easier to use. mergerfs however does not offer the overlay / copy-on-write (CoW) features which aufs has. #### unionfs-fuse unionfs-fuse is more like aufs than mergerfs in that it offers overlay / copy-on-write (CoW) features. If you're just looking to create a union of filesystems and want flexibility in file/directory placement then mergerfs offers that whereas unionfs is more for overlaying read/write filesystems over read-only ones. #### overlayfs overlayfs is similar to aufs and unionfs-fuse in that it also is primarily used to layer a read/write filesystem over one or more read-only filesystems. It does not have the ability to spread files/directories across numerous filesystems. #### RAID0, JBOD, drive concatenation, striping With simple JBOD / drive concatenation / stripping / RAID0 a single drive failure will result in full pool failure. mergerfs performs a similar function without the possibility of catastrophic failure and the difficulties in recovery. Drives may fail but all other filesystems and their data will continue to be accessible. The main practical difference with mergerfs is the fact you don't actually have contiguous space as large as if you used those other technologies. Meaning you can't create a 2TB file on a pool of 2 1TB filesystems. When combined with something like [SnapRaid](http://www.snapraid.it) and/or an offsite backup solution you can have the flexibility of JBOD without the single point of failure. #### UnRAID UnRAID is a full OS and its storage layer, as I understand, is proprietary and closed source. Users who have experience with both have often said they prefer the flexibility offered by mergerfs and for some the fact it is open source is important. There are a number of UnRAID users who use mergerfs as well though I'm not entirely familiar with the use case. For semi-static data mergerfs + [SnapRaid](http://www.snapraid.it) provides a similar solution. #### ZFS mergerfs is very different from ZFS. mergerfs is intended to provide flexible pooling of arbitrary filesystems (local or remote), of arbitrary sizes, and arbitrary filesystems. For `write once, read many` usecases such as bulk media storage. Where data integrity and backup is managed in other ways. In those usecases ZFS can introduce a number of costs and limitations as described [here](http://louwrentius.com/the-hidden-cost-of-using-zfs-for-your-home-nas.html), [here](https://markmcb.com/2020/01/07/five-years-of-btrfs/), and [here](https://utcc.utoronto.ca/~cks/space/blog/solaris/ZFSWhyNoRealReshaping). #### StableBit's DrivePool DrivePool works only on Windows so not as common an alternative as other Linux solutions. If you want to use Windows then DrivePool is a good option. Functionally the two projects work a bit differently. DrivePool always writes to the filesystem with the most free space and later rebalances. mergerfs does not offer rebalance but chooses a branch at file/directory create time. DrivePool's rebalancing can be done differently in any directory and has file pattern matching to further customize the behavior. mergerfs, not having rebalancing does not have these features, but similar features are planned for mergerfs v3. DrivePool has builtin file duplication which mergerfs does not natively support (but can be done via an external script.) There are a lot of misc differences between the two projects but most features in DrivePool can be replicated with external tools in combination with mergerfs. Additionally DrivePool is a closed source commercial product vs mergerfs a ISC licensed OSS project. # SUPPORT Filesystems are complex and difficult to debug. mergerfs, while being just a proxy of sorts, can be difficult to debug given the large number of possible settings it can have itself and the number of environments it can run in. When reporting on a suspected issue **please** include as much of the below information as possible otherwise it will be difficult or impossible to diagnose. Also please read the above documentation as it provides details on many previously encountered questions/issues. **Please make sure you are using the [latest release](https://github.com/trapexit/mergerfs/releases) or have tried it in comparison. Old versions, which are often included in distros like Debian and Ubuntu, are not ever going to be updated and the issue you are encountering may have been addressed already.** **For commercial support or feature requests please [contact me directly.](mailto:support@spawn.link)** #### Information to include in bug reports * [Information about the broader problem along with any attempted solutions.](https://xyproblem.info) * Solution already ruled out and why. * Version of mergerfs: `mergerfs --version` * mergerfs settings / arguments: from fstab, systemd unit, command line, OMV plugin, etc. * Version of the OS: `uname -a` and `lsb_release -a` * List of branches, their filesystem types, sizes (before and after issue): `df -h` * **All** information about the relevant paths and files: permissions, ownership, etc. * **All** information about the client app making the requests: version, uid/gid * Runtime environment: * Is mergerfs running within a container? * Are the client apps using mergerfs running in a container? * A `strace` of the app having problems: * `strace -fvTtt -s 256 -o /tmp/app.strace.txt ` * A `strace` of mergerfs while the program is trying to do whatever it is failing to do: * `strace -fvTtt -s 256 -p -o /tmp/mergerfs.strace.txt` * **Precise** directions on replicating the issue. Do not leave **anything** out. * Try to recreate the problem in the simplest way using standard programs: `ln`, `mv`, `cp`, `ls`, `dd`, etc. #### Contact / Issue submission * github.com: https://github.com/trapexit/mergerfs/issues * discord: https://discord.gg/MpAr69V * reddit: https://www.reddit.com/r/mergerfs #### Donations https://github.com/trapexit/support Development and support of a project like mergerfs requires a significant amount of time and effort. The software is released under the very liberal ISC license and is therefore free to use for personal or commercial uses. If you are a personal user and find mergerfs and its support valuable and would like to support the project financially it would be very much appreciated. If you are using mergerfs commercially please consider sponsoring the project to ensure it continues to be maintained and receive updates. If custom features are needed feel free to [contact me directly](mailto:support@spawn.link). # LINKS * https://spawn.link * https://github.com/trapexit/mergerfs * https://github.com/trapexit/mergerfs/wiki * https://github.com/trapexit/mergerfs-tools * https://github.com/trapexit/scorch * https://github.com/trapexit/bbf mergerfs-2.40.2/buildtools/000077500000000000000000000000001457016576200156065ustar00rootroot00000000000000mergerfs-2.40.2/buildtools/create-branches000077500000000000000000000004711457016576200205640ustar00rootroot00000000000000#!/bin/sh set -x BASEPATH="/tmp" for x in $(seq -w 2) do FILEPATH="${BASEPATH}/mergerfs-${x}.img" MOUNTPOINT="${BASEPATH}/mergerfs-${x}" truncate -s 1G "${FILEPATH}" mkdir -p "${MOUNTPOINT}" mkfs.ext4 -m0 -L "mergerfs${x}" "${FILEPATH}" sudo mount "${FILEPATH}" "${MOUNTPOINT}" done mergerfs-2.40.2/buildtools/git2debcl000077500000000000000000000067451457016576200174070ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (c) 2016, Antonio SJ Musumeci # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import sys import subprocess import argparse def call(args): return subprocess.Popen(args,stdout=subprocess.PIPE).communicate()[0].decode() def git_tags(): args = ["git", "tag", '-l'] tags = call(args) tags = [[int(X) for X in tag.split(".")] for tag in tags.split()] tags.sort() tags.reverse() tags = [".".join([str(X) for X in tag]) for tag in tags] return tags def git_log(fromtag,totag): args = ['git','log','--no-merges','--oneline',fromtag+'...'+totag] return call(args).strip().split('\n') def git_author_and_time(tag): args = ['git','log','-1','--format=-- %an <%ae> %cD',tag] return call(args).strip() def git_version(): args = ['git','describe','--always','--tags','--dirty'] return call(args).strip() def guess_distro(): try: args = ['lsb_release','-i','-s'] return call(args).strip().lower() except: return 'unknown' def guess_codename(): try: args = ['lsb_release','-c','-s'] return call(args).strip().lower() except: return 'unknown' def main(): parser = argparse.ArgumentParser(description='Generated debian/changelog from git log') parser.add_argument('--name',type=str,help='Name of package',required=True) parser.add_argument('--version',type=str,help='Place in git history to include upto',default='::guess::') parser.add_argument('--distro',type=str,help='Distribution name',default='::guess::') parser.add_argument('--codename',type=str,help='Distribution codename',default='::guess::') parser.add_argument('--urgency',type=str,help='Urgency',default='medium') args = parser.parse_args() if args.distro == '::guess::': args.distro = guess_distro() if args.codename == '::guess::': args.codename = guess_codename() versuffix = "~"+args.distro+"-"+args.codename if args.version == '::guess::': args.version = git_version() tags = git_tags() if args.version in tags: idx = tags.index(args.version) tags = tags[idx:] tags = list(zip(tags,tags)) else: tags = list(zip(tags,tags)) tags.insert(0,(args.version,'HEAD')) for i in range(0,len(tags)): tags[i] = (tags[i][0] + versuffix,tags[i][1]) tag = tags[0] for prev in tags[1:]: lines = git_log(tag[1],prev[1]) if lines == ['']: tag = prev continue print('{0} ({1}) {2}; urgency={3}\n'.format(args.name,tag[0],args.codename,args.urgency)) for line in lines: print(" * " + line) authorandtime = git_author_and_time(tag[1]) print(' {0}\n'.format(authorandtime)) tag = prev if __name__ == "__main__": main() mergerfs-2.40.2/buildtools/install-build-pkgs000077500000000000000000000025451457016576200212470ustar00rootroot00000000000000#!/bin/sh if [ -e /usr/bin/apt-get ]; then export DEBIAN_FRONTEND=noninteractive apt-get -qy update apt-get -qy --force-yes --no-install-suggests --no-install-recommends \ install \ ca-certificates \ build-essential \ git \ g++ \ debhelper \ automake \ fakeroot \ libtool \ lsb-release apt-get -qy --no-install-suggests --no-install-recommends install python apt-get -qy --no-install-suggests --no-install-recommends install python3 elif [ -e /usr/bin/dnf ]; then dnf -y update dnf -y install \ git rpm-build gcc-c++ make which python3 automake \ libtool gettext-devel elif [ -e /usr/bin/yum ]; then yum -y update yum -y install \ git rpm-build gcc-c++ make which \ python python-argparse \ automake libtool gettext-devel elif [ -e /sbin/apk ]; then apk add \ abuild git gcc g++ make autoconf \ automake libtool gettext-dev linux-headers elif [ -e /usr/sbin/pkg ]; then pkg install \ git gmake gcc autoconf automake libtool \ gettext-tools fi if [ ! -e /usr/bin/python ]; then if [ -e /usr/bin/python3 ]; then ln -s /usr/bin/python3 /usr/bin/python elif [ -e /usr/bin/python2 ]; then ln -s /usr/bin/python2 /usr/bin/python fi fi mergerfs-2.40.2/buildtools/update-version000077500000000000000000000006161457016576200205040ustar00rootroot00000000000000#!/bin/sh VERSION=$(git describe --always --tags --dirty) if [ $? -ne 0 ]; then VERSION=$(cat VERSION) fi if [ "${VERSION}" = "" ]; then VERSION="unknown" fi echo -n "${VERSION}" > VERSION grep -q \"${VERSION}\" src/version.hpp RV=$? if [ $RV -ne 0 ]; then echo "#pragma once" > src/version.hpp echo "static const char MERGERFS_VERSION[] = \"${VERSION}\";" >> src/version.hpp fi mergerfs-2.40.2/debian/000077500000000000000000000000001457016576200146505ustar00rootroot00000000000000mergerfs-2.40.2/debian/compat000066400000000000000000000000021457016576200160460ustar00rootroot000000000000009 mergerfs-2.40.2/debian/control000066400000000000000000000010621457016576200162520ustar00rootroot00000000000000Source: mergerfs Section: utils Priority: optional Maintainer: Antonio SJ Musumeci Build-Depends: debhelper (>= 8.0.0) Standards-Version: 3.9.4 Homepage: http://github.com/trapexit/mergerfs Package: mergerfs Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, fuse [linux-any] | fuse4bsd [kfreebsd-any] Description: another FUSE union filesystem mergerfs is a union filesystem geared towards simplifying storage and management of files across numerous commodity storage devices. It is similar to mhddfs, unionfs, and aufs. mergerfs-2.40.2/debian/copyright000066400000000000000000000020351457016576200166030ustar00rootroot00000000000000Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: mergerfs-1.1.0 Source: http://github.com/trapexit/mergerfs Files: * Copyright: 2014 Antonio SJ Musumeci License: ISC Files: debian/* Copyright: 2014 Antonio SJ Musumeci License: ISC License: ISC Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. . THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. n mergerfs-2.40.2/debian/docs000066400000000000000000000000121457016576200155140ustar00rootroot00000000000000README.md mergerfs-2.40.2/debian/mergerfs.postinst000077500000000000000000000001611457016576200202700ustar00rootroot00000000000000#!/bin/sh set -e case "${1}" in configure) chmod 4755 /usr/bin/mergerfs-fusermount ;; esac mergerfs-2.40.2/debian/rules000077500000000000000000000003251457016576200157300ustar00rootroot00000000000000#!/usr/bin/make -f # -*- makefile -*- # Uncomment this to turn on verbose mode. export DH_VERBOSE=1 %: dh $@ --parallel override_dh_auto_install: $(MAKE) DESTDIR=$(CURDIR)/debian/mergerfs PREFIX=/usr install mergerfs-2.40.2/libfuse/000077500000000000000000000000001457016576200150575ustar00rootroot00000000000000mergerfs-2.40.2/libfuse/AUTHORS000066400000000000000000000035101457016576200161260ustar00rootroot00000000000000Current Maintainer ------------------ Nikolaus Rath Past Maintainers ---------------- Miklos Szeredi (until 12/2015) Contributors ------------ CUSE has been written by Tejun Heo . Furthermore, the following people have contributed patches (autogenerated list): Anatol Pomozov Antonio SJ Musumeci Christopher Harrison Csaba Henk cvs2git <> Dalvik Khertel Daniel Thau David McNab David Sheets Emmanuel Dreyfus Enke Chen Eric Engestrom Eric Wong Fabrice Bauzac Feng Shuo Hendrik Brueckner Ikey Doherty Jan Blumschein Joachim Schiele Joachim Schiele John Muir Laszlo Papp Madan Valluri Mark Glines Max Krasnyansky Michael Grigoriev Miklos Szeredi Miklos Szeredi mkmm@gmx-topmail.de Natanael Copa Nikolaus Rath Olivier Blin Ratna_Bolla@dell.com Reuben Hawkins Richard W.M. Jones Riku Voipio Roland Bauerschmidt Sam Stuewe Sebastian Pipping therealneworld@gmail.com Winfried Koehler mergerfs-2.40.2/libfuse/COPYING000066400000000000000000000432541457016576200161220ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. mergerfs-2.40.2/libfuse/COPYING.LIB000066400000000000000000000636421457016576200165320ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! mergerfs-2.40.2/libfuse/Makefile000066400000000000000000000057401457016576200165250ustar00rootroot00000000000000VERSION = 2.9.7-mergerfs_2.30.0 OS := $(shell uname -s) ifeq ($(OS),Linux) UTILS := utils INSTALLUTILS := install-utils else UTILS := INSTALLUTILS := endif ifeq ($(DEBUG),1) OPT_FLAGS := -O0 -g -fsanitize=undefined else OPT_FLAGS := -O2 endif ifeq ($(LTO),1) LTO_FLAGS := -flto else LTO_FLAGS := endif DESTDIR = PREFIX = /usr/local EXEC_PREFIX = $(PREFIX) DATAROOTDIR = $(PREFIX)/share DATADIR = $(DATAROOTDIR) BINDIR = $(EXEC_PREFIX)/bin SBINDIR = /sbin MANDIR = $(DATAROOTDIR)/man MAN1DIR = $(MANDIR)/man1 INSTALLBINDIR = $(DESTDIR)$(BINDIR) INSTALLSBINDIR = $(DESTDIR)$(SBINDIR) INSTALLMAN1DIR = $(DESTDIR)$(MAN1DIR) AR ?= ar SRC_C = \ lib/buffer.c \ lib/crc32b.c \ lib/debug.c \ lib/fuse.c \ lib/fuse_dirents.c \ lib/fuse_lowlevel.c \ lib/node.c \ lib/fuse_node.c \ lib/fuse_opt.c \ lib/fuse_session.c \ lib/fuse_signals.c \ lib/helper.c \ lib/mount.c SRC_CPP = \ lib/format.cpp \ lib/os.cpp \ lib/cpu.cpp \ lib/fuse_config.cpp \ lib/fuse_loop.cpp \ lib/fuse_msgbuf.cpp OBJS_C = $(SRC_C:lib/%.c=build/%.o) OBJS_CPP = $(SRC_CPP:lib/%.cpp=build/%.o) DEPS_C = $(SRC_C:lib/%.c=build/%.d) DEPS_CPP = $(SRC_CPP:lib/%.cpp=build/%.d) CFLAGS ?= \ $(OPT_FLAGS) CFLAGS := \ ${CFLAGS} \ $(LTO_FLAGS) \ -std=gnu99 \ -Wall \ -pipe \ -MMD CXXFLAGS := \ ${CXXFLAGS} \ $(LTO_FLAGS) \ -std=c++11 \ -Wall \ -pipe \ -MMD FUSERMOUNT_DIR = $(BINDIR) FUSE_FLAGS = \ -Iinclude \ -Ibuild \ -D_REENTRANT \ -D_FILE_OFFSET_BITS=64 \ -DPACKAGE_VERSION=\"$(VERSION)\" \ -DFUSERMOUNT_DIR=\"$(FUSERMOUNT_DIR)\" LDFLAGS := \ ${LDFLAGS} \ -lrt \ -pthread all: build/libfuse.a $(UTILS) build/config.h: build/stamp ecfd/build | tee build/config.h build/stamp: mkdir -p build touch $@ objects: build/config.h $(MAKE) $(OBJS_C) $(OBJS_CPP) build/libfuse.a: objects ${AR} rcs build/libfuse.a $(OBJS_C) $(OBJS_CPP) utils: mergerfs-fusermount mount.mergerfs build/mergerfs-fusermount: build/config.h util/fusermount.c lib/mount_util.c $(CC) $(CFLAGS) $(FUSE_FLAGS) -Ilib -o build/mergerfs-fusermount util/fusermount.c lib/mount_util.c mergerfs-fusermount: build/mergerfs-fusermount build/mount.mergerfs: build/libfuse.a util/mount.mergerfs.c $(CC) $(CFLAGS) $(FUSE_FLAGS) -o build/mount.mergerfs util/mount.mergerfs.c build/libfuse.a $(LDFLAGS) mount.mergerfs: build/mount.mergerfs build/%.o: lib/%.c $(CC) $(CFLAGS) $(FUSE_FLAGS) -c $< -o $@ build/%.o: lib/%.cpp $(CXX) $(CXXFLAGS) $(FUSE_FLAGS) -c $< -o $@ clean: rm -rf build distclean: clean strip: strip --strip-all build/mount.mergerfs strip --strip-all build/mergerfs-fusermount install-utils: mergerfs-fusermount mount.mergerfs strip install -D build/mergerfs-fusermount "$(INSTALLBINDIR)/mergerfs-fusermount" install -D build/mount.mergerfs "$(INSTALLSBINDIR)/mount.mergerfs" chown root "$(INSTALLBINDIR)/mergerfs-fusermount" chmod u+s "$(INSTALLBINDIR)/mergerfs-fusermount" install: $(INSTALLUTILS) .PHONY: objects strip utils install install-utils -include $(DEPS_C) $(DEPS_CPP) mergerfs-2.40.2/libfuse/NEWS000066400000000000000000000170241457016576200155620ustar00rootroot00000000000000What is new in 2.9 - Add "zero copy" support for kernel 2.6.35 or newer - Make maximum background requests tunable on kernel 2.6.32 or newer - Require --no-canonicalize in (u)mount (util-linux version 2.18 or newer) to fix security problems with fusermount - Use dynamically sized hash tables in high level library - Memory use of filesystem daemon can shrink more easily - Add "auto_unmount" option - Add "remember" option - Add man pages for fusermount, mount.fuse and ulockmgr_server - API changes: o Introduce "store" and "retrieve" for accessing kernel buffers on kernel 2.6.36 or newer o Introduce abstract buffer for zero copy operations o Allow path calculation to be omitted on certain operations o Allow batching forget requests o Add "flock" method o Add support for ioctl on directories o Add delete notification o Add fallocate operation (linux kernel 3.5 or newer) - Bug fixes and small improvements ============================================================================ What is new in 2.8 - More scalable directory tree locking - Atomic open(O_TRUNC) support - Support big write requests on kernels 2.6.26 and newer - Out-of-tree fuse module removed - Better NFS exporting support - New ioctl and poll requests - New CUSE (Character Device in Userspace) interface - Allow umask processing in userspace - Added cache invalidation notifications - Bugfixes and small improvements ============================================================================ What is new in 2.7 - Stacking support for the high level API - Add filename charset conversion module - Improved mounting ============================================================================ What is new in 2.6 - Improved read characteristics (asynchronous reads) - Support for aborting filesystem connection - POSIX file locking support - Request interruption support - Building module for Linux kernels earlier than 2.6.9 not supported - Allow block device based filesystems to support swap files - Several bugs fixed, including a rare system hang on SMP ============================================================================ What is new in 2.5 - Merge library part of FreeBSD port - New atomic create+open, access and ftruncate operations - On filesystems implementing the new create+open operation, and running on Linux kernels 2.6.15 or later, the 'cp' operation will work correctly when copying read-only files. - New option parsing interface added to the library - Lots of minor improvements and fixes ============================================================================ What is new in 2.4 - Simplify device opening. Now '/dev/fuse' is a requirement - Allow module auto-loading if user has access to '/dev/fuse' - Allow mounting over a regular file for unprivileged users - Allow mounting of arbitrary FUSE filesystems from /etc/fstab - New mount options: 'umask=M', 'uid=N', 'gid=N' - Check for non-empty mountpoint, and refuse mount by default. New mount option: 'nonempty' - Low level (inode based) API added - Allow 'direct_io' and 'keep_cache' options to be set on a case-by-case basis on open. - Add 'attr_timeout' and 'entry_timeout' mount options to the high-level library. Until now these timeouts were fixed at 1 sec. - Some bugfixes ============================================================================ What is new in 2.3 - Add new directory related operations: opendir(), readdir(), releasedir() and fsyncdir() - Add init() and destroy() operations which are called before the event loop is started and after it has exited - Update kernel ABI so that on dual architectures (e.g. AMD64) 32bit binaries work under a 64bit kernel - Bugfixes ============================================================================ What is new in 2.2 Userspace changes: - Add fuse_file_info structure to file operations, this allows the filesystem to return a file handle in open() which is passed to read(), write(), flush(), fsync() and release(). - Add source compatibility with 2.1 and 1.4 releases - Binary compatibility with 2.1 release is retained Kernel changes: - Make requests interruptible. This prevents the filesystem to go into an unbreakable deadlock with itself. - Make readpages() synchronous. Asynchronous requests are deadlock prone, since they cannot be interrupted (see above) - Remove shared-writeable mapping support, which could deadlock the machine - Remove INVALIDATE userspace initiated request - Update ABI to be independent of sizeof(long), so dual-size archs don't cause problems - Remove /sys/fs/fuse/version. Version checking is now done through the fuse device - Replace directory reading method on the kernel interface. Instead of passing an open file descriptor to the kernel, send data through the FUSE device, like all other operations. ============================================================================ What is new in 2.1 * Bug fixes * Improved support for filesystems implementing a custom event-loop * Add 'pkg-config' support * Kernel module can be compiled separately ============================================================================ What is new in 1.9 * Lots of bugs fixed * Minor modifications to the library API * Improvements to the kernel/userspace interface * Mounting by non-root made more secure * Build shared library in addition to the static one * Consolidated mount options * Optimized reading under 2.6 kernels * Direct I/O support * Support file I/O on deleted files * Extended attributes support ============================================================================ What is new in 1.3 * Thanks to user bugreports and stress testing with LTP and sfx-linux a number of bugs were fixed, some quite serious. * Fix compile problems with recent SuSE kernles ============================================================================ What is new in 1.2 * Fix mount problems on recent 2.6 kernels with SELinux enabled * Fixed writing files lager than 2GBytes * Other bugfixes ============================================================================ What is new in 1.1 * Support for the 2.6 kernels * Support for exporting filesystem over NFS in 2.6 kernels * Read efficiency improvements: read in 64k blocks instead of 4k (Michael Grigoriev). Can be turned on with '-l' option of fusermount * Lazy automatic unmount * Added 'fsync()' VFS call to the FUSE interface * Bugfixes ============================================================================ What is new in 1.0 * Cleanups and bugfixes * Added 'release()' VFS call to the FUSE interface * 64 bit file offsets (handling of > 4 GByte files) * libfuse is now under LGPL * New 'statfs' call (Mark Glines) * Cleaned up mount procedure (mostly by Mark Glines) NOTE: Binaries linked with with a previous version of libavfs may not work with the new version of the fusermount program. In such case recompile the program after installing the new libavfs library. * Fix for problems under linux kernel 2.4.19 ============================================================================ What is new in 0.95 * Optimized read/write operations. Raw throughput has increased to about 60Mbyte/s on a Celeron/360 * Python bindings by Jeff Epler * Perl bindings by Mark Glines * Improved multithreaded operation * Simplified library interface * Bugfixes ============================================================================ What is new in 0.9: * Everything mergerfs-2.40.2/libfuse/README.NFS000066400000000000000000000025461457016576200163730ustar00rootroot00000000000000NFS exporting is supported in Linux kernels 2.6.27 or later. You need to add an fsid=NNN option to /etc/exports to make exporting a FUSE directory work. Filesystem support ------------------ NFS exporting works to some extent on all fuse filesystems, but not perfectly. This is due to the stateless nature of the protocol, the server has no way of knowing whether the client is keeping a reference to a file or not, and hence that file may be removed from the server's cache. In that case there has to be a way to look up that object using the inode number, otherwise an ESTALE error will be returned. 1) low-level interface Filesystems need to implement special lookups for the names "." and "..". The former may be requested on any inode, including non-directories, while the latter is only requested for directories. Otherwise these special lookups should behave identically to ordinary lookups. 2) high-level interface Because the high-level interface is path based, it is not possible to delegate looking up by inode to the filesystem. To work around this, currently a "noforget" option is provided, which makes the library remember nodes forever. This will make the NFS server happy, but also results in an ever growing memory footprint for the filesystem. For this reason if the filesystem is large (or the memory is small), then this option is not recommended. mergerfs-2.40.2/libfuse/README.md000066400000000000000000000077561457016576200163550ustar00rootroot00000000000000libfuse ======= Warning: unresolved security issue ---------------------------------- Be aware that FUSE has an unresolved security bug ([bug #15](https://github.com/libfuse/libfuse/issues/15)): the permission check for accessing a cached directory is only done once when the directory entry is first loaded into the cache. Subsequent accesses will re-use the results of the first check, even if the directory permissions have since changed, and even if the subsequent access is made by a different user. This bug needs to be fixed in the Linux kernel and has been known since 2006 but unfortunately no fix has been applied yet. If you depend on correct permission handling for FUSE file systems, the only workaround is to completely disable caching of directory entries. Alternatively, the severity of the bug can be somewhat reduced by not using the `allow_other` mount option. About ----- FUSE (Filesystem in Userspace) is an interface for userspace programs to export a filesystem to the Linux kernel. The FUSE project consists of two components: the *fuse* kernel module (maintained in the regular kernel repositories) and the *libfuse* userspace library (maintained in this repository). libfuse provides the reference implementation for communicating with the FUSE kernel module. A FUSE file system is typically implemented as a standalone application that links with libfuse. libfuse provides functions to mount the file system, unmount it, read requests from the kernel, and send responses back. libfuse offers two APIs: a "high-level", synchronous API, and a "low-level" asynchronous API. In both cases, incoming requests from the kernel are passed to the main program using callbacks. When using the high-level API, the callbacks may work with file names and paths instead of inodes, and processing of a request finishes when the callback function returns. When using the low-level API, the callbacks must work with inodes and responses must be sent explicitly using a separate set of API functions. Installation ------------ ./configure make -j8 make install You may also need to add `/usr/local/lib` to `/etc/ld.so.conf` and/or run *ldconfig*. If you're building from the git repository (instead of using a release tarball), you also need to run `./makeconf.sh` to create the `configure` script. You'll also need a fuse kernel module (Linux kernels 2.6.14 or later contain FUSE support). For more details see the file `INSTALL` Security implications --------------------- If you run `make install`, the *fusermount* program is installed set-user-id to root. This is done to allow normal users to mount their own filesystem implementations. There must however be some limitations, in order to prevent Bad User from doing nasty things. Currently those limitations are: - The user can only mount on a mountpoint, for which it has write permission - The mountpoint is not a sticky directory which isn't owned by the user (like /tmp usually is) - No other user (including root) can access the contents of the mounted filesystem (though this can be relaxed by allowing the use of the `allow_other` and `allow_root` mount options in `fuse.conf`) Building your own filesystem ------------------------------ FUSE comes with several example file systems in the `examples` directory. For example, the *fusexmp* example mirrors the contents of the root directory under the mountpoint. Start from there and adapt the code! The documentation of the API functions and necessary callbacks is mostly contained in the files `include/fuse.h` (for the high-level API) and `include/fuse_lowlevel.h` (for the low-level API). An autogenerated html version of the API is available in the `doc/html` directory and at http://libfuse.github.io/doxygen. Getting Help ------------ If you need help, please ask on the mailing list (subscribe at https://lists.sourceforge.net/lists/listinfo/fuse-devel). Please report any bugs on the GitHub issue tracker at https://github.com/libfuse/main/issues. mergerfs-2.40.2/libfuse/ecfd/000077500000000000000000000000001457016576200157605ustar00rootroot00000000000000mergerfs-2.40.2/libfuse/ecfd/LICENSE000066400000000000000000000014431457016576200167670ustar00rootroot00000000000000/* ISC License Copyright (c) 2018, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ mergerfs-2.40.2/libfuse/ecfd/README.md000066400000000000000000000016001457016576200172340ustar00rootroot00000000000000# ecfd: embeddable C feature discovery This is a simple script and set of tests to help with what would traditionally be the `configure` step of most C/C++. It is intended to be simple to be embedded into a project and only depends on the C compiler and /bin/sh. ## USAGE First copy `ecfd` into your project. Next simply call the build script which will run the tests and output a `config.` style file to `stdout`. Compile errors will output to `stderr` which can be used to understand why they aren't working. ``` $ ./ecfd/build > include/config.h ``` ## TESTS Each test is in a single module. The build script will try to compile each module and on success will execute the program. If anything is printed to `stdout` it will be appended to the `#define` printed to the build script's `stdout`. If the compilation fails nothing will be printed. Look at the tests directory for examples. mergerfs-2.40.2/libfuse/ecfd/build000077500000000000000000000010611457016576200170030ustar00rootroot00000000000000#!/bin/sh PWD=$(pwd) BASEPATH="${PWD}/${0%/*}" CC=${CC:-cc} OUTPUT="/dev/null" echo "#ifndef CONFIG_H_INCLUDED" echo "#define CONFIG_H_INCLUDED" echo IFS= for file in "${BASEPATH}/tests/"*.c do binary="${file%.c}" basename=$(basename ${binary}) ${CC} -o "${binary}" "${file}" 1>&2 if [ $? -eq 0 ]; then STDOUT=$(${binary}) if [ "${STDOUT}" != "" ]; then echo "#define ${basename} ${STDOUT}" else echo "#define ${basename}" fi rm -f "${binary}" fi done echo echo "#endif" mergerfs-2.40.2/libfuse/ecfd/tests/000077500000000000000000000000001457016576200171225ustar00rootroot00000000000000mergerfs-2.40.2/libfuse/ecfd/tests/HAVE_FORK.c000066400000000000000000000001341457016576200206300ustar00rootroot00000000000000#include #include int main(void) { (void)fork; return 0; } mergerfs-2.40.2/libfuse/ecfd/tests/HAVE_MALLOC_TRIM.c000066400000000000000000000000661457016576200216750ustar00rootroot00000000000000#include int main() { malloc_trim(0); } mergerfs-2.40.2/libfuse/ecfd/tests/HAVE_STRUCT_STAT_ST_ATIM.c000066400000000000000000000002421457016576200231660ustar00rootroot00000000000000#include #include #include int main(int argc, char *argv[]) { struct stat st; (void)st.st_atim; return 0; } mergerfs-2.40.2/libfuse/ecfd/tests/HAVE_UTIMENSAT.c000066400000000000000000000001661457016576200214450ustar00rootroot00000000000000#include #include int main(int argc, char *argv[]) { (void)utimensat; return 0; } mergerfs-2.40.2/libfuse/ecfd/tests/static_assert.h000066400000000000000000000001331457016576200221400ustar00rootroot00000000000000#pragma once #define STATIC_ASSERT(condition) \ ((void)sizeof(char[1 - 2*!(condition)])) mergerfs-2.40.2/libfuse/include/000077500000000000000000000000001457016576200165025ustar00rootroot00000000000000mergerfs-2.40.2/libfuse/include/extern_c.h000066400000000000000000000002251457016576200204610ustar00rootroot00000000000000#pragma once #ifdef __cplusplus #define EXTERN_C_BEGIN extern "C" { #define EXTERN_C_END } #else #define EXTERN_C_BEGIN #define EXTERN_C_END #endif mergerfs-2.40.2/libfuse/include/fuse.h000066400000000000000000000605621457016576200176260ustar00rootroot00000000000000/* FUSE: Filesystem in Userspace Copyright (C) 2001-2007 Miklos Szeredi This program can be distributed under the terms of the GNU LGPLv2. See the file COPYING.LIB. */ #ifndef _FUSE_H_ #define _FUSE_H_ #include "extern_c.h" #include "fuse_common.h" #include #include #include #include #include #include #include EXTERN_C_BEGIN /* ----------------------------------------------------------- * * Basic FUSE API * * ----------------------------------------------------------- */ /** Handle for a FUSE filesystem */ struct fuse; /** Structure containing a raw command */ struct fuse_cmd; struct fuse_dirents_t; typedef struct fuse_dirents_t fuse_dirents_t; /** * The file system operations: * * Most of these should work very similarly to the well known UNIX * file system operations. A major exception is that instead of * returning an error in 'errno', the operation should return the * negated error value (-errno) directly. * * All methods are optional, but some are essential for a useful * filesystem (e.g. getattr). Open, flush, release, fsync, opendir, * releasedir, fsyncdir, access, create, ftruncate, fgetattr, lock, * init and destroy are special purpose methods, without which a full * featured filesystem can still be implemented. * * Almost all operations take a path which can be of any length. * * Changed in fuse 2.8.0 (regardless of API version) * Previously, paths were limited to a length of PATH_MAX. * * See http://fuse.sourceforge.net/wiki/ for more information. There * is also a snapshot of the relevant wiki pages in the doc/ folder. */ struct fuse_operations { /** Get file attributes. * * Similar to stat(). The 'st_dev' and 'st_blksize' fields are * ignored. The 'st_ino' field is ignored except if the 'use_ino' * mount option is given. */ int (*getattr) (const char *, struct stat *, fuse_timeouts_t *); /** Read the target of a symbolic link * * The buffer should be filled with a null terminated string. The * buffer size argument includes the space for the terminating * null character. If the linkname is too long to fit in the * buffer, it should be truncated. The return value should be 0 * for success. */ int (*readlink) (const char *, char *, size_t); /** Create a file node * * This is called for creation of all non-directory, non-symlink * nodes. If the filesystem defines a create() method, then for * regular files that will be called instead. */ int (*mknod) (const char *, mode_t, dev_t); /** Create a directory * * Note that the mode argument may not have the type specification * bits set, i.e. S_ISDIR(mode) can be false. To obtain the * correct directory type bits use mode|S_IFDIR * */ int (*mkdir) (const char *, mode_t); /** Hide files unlinked / renamed over * * Allows storing of a file handle when a file is unlinked * while open. Helps manage the fact the kernel usually does * not send fh with getattr requests. */ int (*prepare_hide)(const char *name_, uint64_t *fh_); int (*free_hide)(const uint64_t fh_); /** Remove a file */ int (*unlink) (const char *); /** Remove a directory */ int (*rmdir) (const char *); /** Create a symbolic link */ int (*symlink) (const char *, const char *, struct stat *, fuse_timeouts_t *); /** Rename a file */ int (*rename) (const char *, const char *); /** Create a hard link to a file */ int (*link) (const char *, const char *, struct stat *, fuse_timeouts_t *); /** Change the permission bits of a file */ int (*chmod) (const char *, mode_t); int (*fchmod)(const fuse_file_info_t *, const mode_t); /** Change the owner and group of a file */ int (*chown) (const char *, uid_t, gid_t); int (*fchown)(const fuse_file_info_t *, const uid_t, const gid_t); /** Change the size of a file */ int (*truncate) (const char *, off_t); /** File open operation * * No creation (O_CREAT, O_EXCL) and by default also no * truncation (O_TRUNC) flags will be passed to open(). If an * application specifies O_TRUNC, fuse first calls truncate() * and then open(). Only if 'atomic_o_trunc' has been * specified and kernel version is 2.6.24 or later, O_TRUNC is * passed on to open. * * Unless the 'default_permissions' mount option is given, * open should check if the operation is permitted for the * given flags. Optionally open may also return an arbitrary * filehandle in the fuse_file_info structure, which will be * passed to all file operations. * * Changed in version 2.2 */ int (*open) (const char *, fuse_file_info_t *); /** Get file system statistics * * The 'f_frsize', 'f_favail', 'f_fsid' and 'f_flag' fields are ignored * * Replaced 'struct statfs' parameter with 'struct statvfs' in * version 2.5 */ int (*statfs) (const char *, struct statvfs *); /** Possibly flush cached data * * BIG NOTE: This is not equivalent to fsync(). It's not a * request to sync dirty data. * * Flush is called on each close() of a file descriptor. So if a * filesystem wants to return write errors in close() and the file * has cached dirty data, this is a good place to write back data * and return any errors. Since many applications ignore close() * errors this is not always useful. * * NOTE: The flush() method may be called more than once for each * open(). This happens if more than one file descriptor refers * to an opened file due to dup(), dup2() or fork() calls. It is * not possible to determine if a flush is final, so each flush * should be treated equally. Multiple write-flush sequences are * relatively rare, so this shouldn't be a problem. * * Filesystems shouldn't assume that flush will always be called * after some writes, or that if will be called at all. * * Changed in version 2.2 */ int (*flush) (const fuse_file_info_t *); /** Release an open file * * Release is called when there are no more references to an open * file: all file descriptors are closed and all memory mappings * are unmapped. * * For every open() call there will be exactly one release() call * with the same flags and file descriptor. It is possible to * have a file opened more than once, in which case only the last * release will mean, that no more reads/writes will happen on the * file. The return value of release is ignored. * * Changed in version 2.2 */ int (*release) (const fuse_file_info_t *); /** Synchronize file contents * * If the datasync parameter is non-zero, then only the user data * should be flushed, not the meta data. * * Changed in version 2.2 */ int (*fsync) (const fuse_file_info_t *, int); /** Set extended attributes */ int (*setxattr) (const char *, const char *, const char *, size_t, int); /** Get extended attributes */ int (*getxattr) (const char *, const char *, char *, size_t); /** List extended attributes */ int (*listxattr) (const char *, char *, size_t); /** Remove extended attributes */ int (*removexattr) (const char *, const char *); /** Open directory * * Unless the 'default_permissions' mount option is given, * this method should check if opendir is permitted for this * directory. Optionally opendir may also return an arbitrary * filehandle in the fuse_file_info structure, which will be * passed to readdir, closedir and fsyncdir. * * Introduced in version 2.3 */ int (*opendir) (const char *, fuse_file_info_t *); /** Read directory * * This supersedes the old getdir() interface. New applications * should use this. * * The filesystem may choose between two modes of operation: * * 1) The readdir implementation ignores the offset parameter, and * passes zero to the filler function's offset. The filler * function will not return '1' (unless an error happens), so the * whole directory is read in a single readdir operation. This * works just like the old getdir() method. * * 2) The readdir implementation keeps track of the offsets of the * directory entries. It uses the offset parameter and always * passes non-zero offset to the filler function. When the buffer * is full (or an error happens) the filler function will return * '1'. * * Introduced in version 2.3 */ int (*readdir)(const fuse_file_info_t *, fuse_dirents_t *); int (*readdir_plus)(const fuse_file_info_t *, fuse_dirents_t *); /** Release directory * * Introduced in version 2.3 */ int (*releasedir) (const fuse_file_info_t *); /** Synchronize directory contents * * If the datasync parameter is non-zero, then only the user data * should be flushed, not the meta data * * Introduced in version 2.3 */ int (*fsyncdir) (const fuse_file_info_t *, int); /** * Initialize filesystem * * The return value will passed in the private_data field of * fuse_context to all file operations and as a parameter to the * destroy() method. * * Introduced in version 2.3 * Changed in version 2.6 */ void *(*init) (struct fuse_conn_info *conn); /** * Clean up filesystem * * Called on filesystem exit. * * Introduced in version 2.3 */ void (*destroy) (void); /** * Check file access permissions * * This will be called for the access() system call. If the * 'default_permissions' mount option is given, this method is not * called. * * This method is not called under Linux kernel versions 2.4.x * * Introduced in version 2.5 */ int (*access) (const char *, int); /** * Create and open a file * * If the file does not exist, first create it with the specified * mode, and then open it. * * If this method is not implemented or under Linux kernel * versions earlier than 2.6.15, the mknod() and open() methods * will be called instead. * * Introduced in version 2.5 */ int (*create) (const char *, mode_t, fuse_file_info_t *); /** * Change the size of an open file * * This method is called instead of the truncate() method if the * truncation was invoked from an ftruncate() system call. * * If this method is not implemented or under Linux kernel * versions earlier than 2.6.15, the truncate() method will be * called instead. * * Introduced in version 2.5 */ int (*ftruncate) (const fuse_file_info_t *, off_t); /** * Get attributes from an open file * * This method is called instead of the getattr() method if the * file information is available. * * Currently this is only called after the create() method if that * is implemented (see above). Later it may be called for * invocations of fstat() too. * * Introduced in version 2.5 */ int (*fgetattr) (const fuse_file_info_t *, struct stat *, fuse_timeouts_t *); /** * Perform POSIX file locking operation * * The cmd argument will be either F_GETLK, F_SETLK or F_SETLKW. * * For the meaning of fields in 'struct flock' see the man page * for fcntl(2). The l_whence field will always be set to * SEEK_SET. * * For checking lock ownership, the 'fuse_file_info->owner' * argument must be used. * * For F_GETLK operation, the library will first check currently * held locks, and if a conflicting lock is found it will return * information without calling this method. This ensures, that * for local locks the l_pid field is correctly filled in. The * results may not be accurate in case of race conditions and in * the presence of hard links, but it's unlikely that an * application would rely on accurate GETLK results in these * cases. If a conflicting lock is not found, this method will be * called, and the filesystem may fill out l_pid by a meaningful * value, or it may leave this field zero. * * For F_SETLK and F_SETLKW the l_pid field will be set to the pid * of the process performing the locking operation. * * Note: if this method is not implemented, the kernel will still * allow file locking to work locally. Hence it is only * interesting for network filesystems and similar. * * Introduced in version 2.6 */ int (*lock) (const fuse_file_info_t *, int cmd, struct flock *); /** * Change the access and modification times of a file with * nanosecond resolution * * This supersedes the old utime() interface. New applications * should use this. * * See the utimensat(2) man page for details. * * Introduced in version 2.6 */ int (*utimens)(const char *, const struct timespec tv[2]); int (*futimens)(const fuse_file_info_t *ffi_, const struct timespec tv_[2]); /** * Map block index within file to block index within device * * Note: This makes sense only for block device backed filesystems * mounted with the 'blkdev' option * * Introduced in version 2.6 */ int (*bmap) (const char *, size_t blocksize, uint64_t *idx); /** * Ioctl * * flags will have FUSE_IOCTL_COMPAT set for 32bit ioctls in * 64bit environment. The size and direction of data is * determined by _IOC_*() decoding of cmd. For _IOC_NONE, * data will be NULL, for _IOC_WRITE data is out area, for * _IOC_READ in area and if both are set in/out area. In all * non-NULL cases, the area is of _IOC_SIZE(cmd) bytes. * * If flags has FUSE_IOCTL_DIR then the fuse_file_info refers to a * directory file handle. * * Introduced in version 2.8 */ int (*ioctl) (const fuse_file_info_t *ffi, unsigned long cmd, void *arg, unsigned int flags, void *data, uint32_t *out_bufsz); /** * Poll for IO readiness events * * Note: If ph is non-NULL, the client should notify * when IO readiness events occur by calling * fuse_notify_poll() with the specified ph. * * Regardless of the number of times poll with a non-NULL ph * is received, single notification is enough to clear all. * Notifying more times incurs overhead but doesn't harm * correctness. * * The callee is responsible for destroying ph with * fuse_pollhandle_destroy() when no longer in use. * * Introduced in version 2.8 */ int (*poll) (const fuse_file_info_t *ffi, fuse_pollhandle_t *ph, unsigned *reventsp); /** Write contents of buffer to an open file * * Similar to the write() method, but data is supplied in a * generic buffer. Use fuse_buf_copy() to transfer data to * the destination. * * Introduced in version 2.9 */ int (*write) (const fuse_file_info_t *ffi, const char *data, size_t size, off_t off); /** Store data from an open file in a buffer * * Similar to the read() method, but data is stored and * returned in a generic buffer. * * No actual copying of data has to take place, the source * file descriptor may simply be stored in the buffer for * later data transfer. * * The buffer must be allocated dynamically and stored at the * location pointed to by bufp. If the buffer contains memory * regions, they too must be allocated using malloc(). The * allocated memory will be freed by the caller. * * Introduced in version 2.9 */ int (*read)(const fuse_file_info_t *ffi, char *buf, size_t size, off_t off); /** * Perform BSD file locking operation * * The op argument will be either LOCK_SH, LOCK_EX or LOCK_UN * * Nonblocking requests will be indicated by ORing LOCK_NB to * the above operations * * For more information see the flock(2) manual page. * * Additionally fi->owner will be set to a value unique to * this open file. This same value will be supplied to * ->release() when the file is released. * * Note: if this method is not implemented, the kernel will still * allow file locking to work locally. Hence it is only * interesting for network filesystems and similar. * * Introduced in version 2.9 */ int (*flock) (const fuse_file_info_t *, int op); /** * Allocates space for an open file * * This function ensures that required space is allocated for specified * file. If this function returns success then any subsequent write * request to specified range is guaranteed not to fail because of lack * of space on the file system media. * * Introduced in version 2.9.1 */ int (*fallocate) (const fuse_file_info_t *, int, off_t, off_t); /** * Copy a range of data from one file to another * * Performs an optimized copy between two file descriptors without * the additional cost of transferring data through the FUSE kernel * module to user space (glibc) and then back into the FUSE filesystem * again. * * In case this method is not implemented, glibc falls back to * reading data from the source and writing to the * destination. Effectively doing an inefficient copy of the * data. */ ssize_t (*copy_file_range)(const fuse_file_info_t *fi_in, off_t offset_in, const fuse_file_info_t *fi_out, off_t offset_out, size_t size, int flags); ssize_t (*setupmapping)(uint64_t *fh_, uint64_t foffset_, uint64_t len_, uint64_t flags_, uint64_t moffset_); int (*removemapping)(); int (*syncfs)(); int (*tmpfile)(const char *, mode_t, fuse_file_info_t *); }; /** Extra context that may be needed by some filesystems * * The uid, gid and pid fields are not filled in case of a writepage * operation. */ struct fuse_context { /** Pointer to the fuse object */ struct fuse *fuse; /** User ID of the calling process */ uid_t uid; /** Group ID of the calling process */ gid_t gid; /** Thread ID of the calling process */ pid_t pid; /** Umask of the calling process (introduced in version 2.8) */ mode_t umask; }; /** * Main function of FUSE. * * This is for the lazy. This is all that has to be called from the * main() function. * * This function does the following: * - parses command line options (-d -s and -h) * - passes relevant mount options to the fuse_mount() * - installs signal handlers for INT, HUP, TERM and PIPE * - registers an exit handler to unmount the filesystem on program exit * - creates a fuse handle * - registers the operations * - calls either the single-threaded or the multi-threaded event loop * * Note: this is currently implemented as a macro. * * @param argc the argument counter passed to the main() function * @param argv the argument vector passed to the main() function * @param op the file system operation * @return 0 on success, nonzero on failure */ /* int fuse_main(int argc, char *argv[], const struct fuse_operations *op); */ #define fuse_main(argc, argv, op) \ fuse_main_real(argc, argv, op, sizeof(*(op))) /* ----------------------------------------------------------- * * More detailed API * * ----------------------------------------------------------- */ /** * Create a new FUSE filesystem. * * @param ch the communication channel * @param args argument vector * @param op the filesystem operations * @param op_size the size of the fuse_operations structure * @return the created FUSE handle */ struct fuse *fuse_new(struct fuse_chan *ch, struct fuse_args *args, const struct fuse_operations *op, size_t op_size); /** * Destroy the FUSE handle. * * The communication channel attached to the handle is also destroyed. * * NOTE: This function does not unmount the filesystem. If this is * needed, call fuse_unmount() before calling this function. * * @param f the FUSE handle */ void fuse_destroy(struct fuse *f); /** * Exit from event loop * * @param f the FUSE handle */ void fuse_exit(struct fuse *f); int fuse_config_read_thread_count(const struct fuse *f); int fuse_config_process_thread_count(const struct fuse *f); int fuse_config_process_thread_queue_depth(const struct fuse *f); const char* fuse_config_pin_threads(const struct fuse *f); /** * FUSE event loop with multiple threads * * Requests from the kernel are processed, and the appropriate * operations are called. Request are processed in parallel by * distributing them between multiple threads. * * Calling this function requires the pthreads library to be linked to * the application. * * @param f the FUSE handle * @return 0 if no error occurred, -1 otherwise */ int fuse_loop_mt(struct fuse *f); /** * Get the current context * * The context is only valid for the duration of a filesystem * operation, and thus must not be stored and used later. * * @return the context */ struct fuse_context *fuse_get_context(void); /** * Check if the current request has already been interrupted * * @return 1 if the request has been interrupted, 0 otherwise */ int fuse_interrupted(void); /** * Obsolete, doesn't do anything * * @return -EINVAL */ int fuse_invalidate(struct fuse *f, const char *path); /* Deprecated, don't use */ int fuse_is_lib_option(const char *opt); /** * The real main function * * Do not call this directly, use fuse_main() */ int fuse_main_real(int argc, char *argv[], const struct fuse_operations *op, size_t op_size); int fuse_start_maintenance_thread(struct fuse *fuse); void fuse_stop_maintenance_thread(struct fuse *fuse); int fuse_log_metrics_get(void); void fuse_log_metrics_set(int enabled); /** * Iterate over cache removing stale entries * use in conjunction with "-oremember" * * NOTE: This is already done for the standard sessions * * @param fuse struct fuse pointer for fuse instance * @return the number of seconds until the next cleanup */ int fuse_clean_cache(struct fuse *fuse); /* * Stacking API */ /** * Fuse filesystem object * * This is opaque object represents a filesystem layer */ struct fuse_fs; /* * These functions call the relevant filesystem operation, and return * the result. * * If the operation is not defined, they return -ENOSYS, with the * exception of fuse_fs_open, fuse_fs_release, fuse_fs_opendir, * fuse_fs_releasedir and fuse_fs_statfs, which return 0. */ int fuse_notify_poll(fuse_pollhandle_t *ph); /** * Create a new fuse filesystem object * * This is usually called from the factory of a fuse module to create * a new instance of a filesystem. * * @param op the filesystem operations * @param op_size the size of the fuse_operations structure * @return a new filesystem object */ struct fuse_fs *fuse_fs_new(const struct fuse_operations *op, size_t op_size); /* ----------------------------------------------------------- * * Advanced API for event handling, don't worry about this... * * ----------------------------------------------------------- */ /* NOTE: the following functions are deprecated, and will be removed from the 3.0 API. Use the lowlevel session functions instead */ /** Function type used to process commands */ typedef void (*fuse_processor_t)(struct fuse *, struct fuse_cmd *, void *); /** This is the part of fuse_main() before the event loop */ struct fuse *fuse_setup(int argc, char *argv[], const struct fuse_operations *op, size_t op_size, char **mountpoint); /** This is the part of fuse_main() after the event loop */ void fuse_teardown(struct fuse *fuse, char *mountpoint); /** Multi threaded event loop, which calls the custom command processor function */ int fuse_loop_mt_proc(struct fuse *f, fuse_processor_t proc, void *data); /** Return the exited flag, which indicates if fuse_exit() has been called */ int fuse_exited(struct fuse *f); /** This function is obsolete and implemented as a no-op */ void fuse_set_getcontext_func(struct fuse_context *(*func)(void)); /** Get session from fuse object */ struct fuse_session *fuse_get_session(struct fuse *f); void fuse_gc1(); void fuse_gc(); void fuse_invalidate_all_nodes(); EXTERN_C_END #endif /* _FUSE_H_ */ mergerfs-2.40.2/libfuse/include/fuse_attr.h000066400000000000000000000006141457016576200206500ustar00rootroot00000000000000#pragma once #include typedef struct fuse_attr_s fuse_attr_t; struct fuse_attr_s { uint64_t ino; uint64_t size; uint64_t blocks; uint64_t atime; uint64_t mtime; uint64_t ctime; uint32_t atimensec; uint32_t mtimensec; uint32_t ctimensec; uint32_t mode; uint32_t nlink; uint32_t uid; uint32_t gid; uint32_t rdev; uint32_t blksize; uint32_t _padding; }; mergerfs-2.40.2/libfuse/include/fuse_common.h000066400000000000000000000300041457016576200211620ustar00rootroot00000000000000/* FUSE: Filesystem in Userspace Copyright (C) 2001-2007 Miklos Szeredi This program can be distributed under the terms of the GNU LGPLv2. See the file COPYING.LIB. */ /** @file */ #if !defined(_FUSE_H_) && !defined(_FUSE_LOWLEVEL_H_) #error "Never include directly; use or instead." #endif #ifndef _FUSE_COMMON_H_ #define _FUSE_COMMON_H_ #include "extern_c.h" #include "fuse_opt.h" #include "fuse_timeouts.h" #include #include /** Major version of FUSE library interface */ #define FUSE_MAJOR_VERSION 2 /** Minor version of FUSE library interface */ #define FUSE_MINOR_VERSION 9 #define FUSE_MAKE_VERSION(maj, min) ((maj) * 10 + (min)) #define FUSE_VERSION FUSE_MAKE_VERSION(FUSE_MAJOR_VERSION, FUSE_MINOR_VERSION) /* This interface uses 64 bit off_t */ #if _FILE_OFFSET_BITS != 64 #error Please add -D_FILE_OFFSET_BITS=64 to your compile flags! #endif #define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32 #define FUSE_MAX_MAX_PAGES 256 EXTERN_C_BEGIN /** * Information about open files * * Changed in version 2.5 */ typedef struct fuse_file_info_t fuse_file_info_t; struct fuse_file_info_t { /** Open flags. Available in open() and release() */ int flags; /** In case of a write operation indicates if this was caused by a writepage */ uint32_t writepage : 1; /** Can be filled in by open, to use direct I/O on this file. Introduced in version 2.4 */ uint32_t direct_io : 1; /** Can be filled in by open, to indicate, that cached file data need not be invalidated. Introduced in version 2.4 */ uint32_t keep_cache : 1; /** Indicates a flush operation. Set in flush operation, also maybe set in highlevel lock operation and lowlevel release operation. Introduced in version 2.6 */ uint32_t flush : 1; /** Can be filled in by open, to indicate that the file is not seekable. Introduced in version 2.8 */ uint32_t nonseekable : 1; /* Indicates that flock locks for this file should be released. If set, lock_owner shall contain a valid value. May only be set in ->release(). Introduced in version 2.9 */ uint32_t flock_release : 1; /* Requests the kernel to cache entries returned by readdir */ uint32_t cache_readdir : 1; uint32_t auto_cache : 1; uint32_t parallel_direct_writes:1; uint32_t noflush:1; /** File handle. May be filled in by filesystem in open(). Available in all other file operations */ uint64_t fh; /** Lock owner id. Available in locking operations and flush */ uint64_t lock_owner; }; /** * Capability bits for 'fuse_conn_info.capable' and 'fuse_conn_info.want' * * FUSE_CAP_ASYNC_READ: filesystem supports asynchronous read requests * FUSE_CAP_POSIX_LOCKS: filesystem supports "remote" locking * FUSE_CAP_ATOMIC_O_TRUNC: filesystem handles the O_TRUNC open flag * FUSE_CAP_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." * FUSE_CAP_BIG_WRITES: filesystem can handle write size larger than 4kB * FUSE_CAP_DONT_MASK: don't apply umask to file mode on create operations * FUSE_CAP_IOCTL_DIR: ioctl support on directories * FUSE_CAP_CACHE_SYMLINKS: cache READLINK responses */ #define FUSE_CAP_ASYNC_READ (1 << 0) #define FUSE_CAP_POSIX_LOCKS (1 << 1) #define FUSE_CAP_ATOMIC_O_TRUNC (1 << 3) #define FUSE_CAP_EXPORT_SUPPORT (1 << 4) #define FUSE_CAP_BIG_WRITES (1 << 5) #define FUSE_CAP_DONT_MASK (1 << 6) #define FUSE_CAP_FLOCK_LOCKS (1 << 10) #define FUSE_CAP_IOCTL_DIR (1 << 11) #define FUSE_CAP_READDIR_PLUS (1 << 13) #define FUSE_CAP_READDIR_PLUS_AUTO (1 << 14) #define FUSE_CAP_ASYNC_DIO (1 << 15) #define FUSE_CAP_WRITEBACK_CACHE (1 << 16) #define FUSE_CAP_PARALLEL_DIROPS (1 << 18) #define FUSE_CAP_POSIX_ACL (1 << 19) #define FUSE_CAP_CACHE_SYMLINKS (1 << 20) #define FUSE_CAP_MAX_PAGES (1 << 21) #define FUSE_CAP_SETXATTR_EXT (1 << 22) /** * Ioctl flags * * FUSE_IOCTL_COMPAT: 32bit compat ioctl on 64bit machine * FUSE_IOCTL_UNRESTRICTED: not restricted to well-formed ioctls, retry allowed * FUSE_IOCTL_RETRY: retry with new iovecs * FUSE_IOCTL_DIR: is a directory * * FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs */ #define FUSE_IOCTL_COMPAT (1 << 0) #define FUSE_IOCTL_UNRESTRICTED (1 << 1) #define FUSE_IOCTL_RETRY (1 << 2) #define FUSE_IOCTL_DIR (1 << 4) #define FUSE_IOCTL_MAX_IOV 256 /** * Connection information, passed to the ->init() method * * Some of the elements are read-write, these can be changed to * indicate the value requested by the filesystem. The requested * value must usually be smaller than the indicated value. */ struct fuse_conn_info { /** * Major version of the protocol (read-only) */ unsigned proto_major; /** * Minor version of the protocol (read-only) */ unsigned proto_minor; /** * Maximum size of the write buffer */ unsigned max_write; /** * Maximum readahead */ unsigned max_readahead; /** * Capability flags, that the kernel supports */ unsigned capable; /** * Capability flags, that the filesystem wants to enable */ unsigned want; /** * Maximum number of backgrounded requests */ unsigned max_background; /** * Kernel congestion threshold parameter */ unsigned congestion_threshold; /** * Max pages */ uint16_t max_pages; /** * For future use. */ unsigned reserved[22]; }; struct fuse_session; struct fuse_chan; struct fuse_pollhandle_t; typedef struct fuse_pollhandle_t fuse_pollhandle_t; /** * Create a FUSE mountpoint * * Returns a control file descriptor suitable for passing to * fuse_new() * * @param mountpoint the mount point path * @param args argument vector * @return the communication channel on success, NULL on failure */ struct fuse_chan *fuse_mount(const char *mountpoint, struct fuse_args *args); /** * Umount a FUSE mountpoint * * @param mountpoint the mount point path * @param ch the communication channel */ void fuse_unmount(const char *mountpoint, struct fuse_chan *ch); /** * Parse common options * * The following options are parsed: * * '-f' foreground * '-d' '-odebug' foreground, but keep the debug option * '-h' '--help' help * '-ho' help without header * '-ofsname=..' file system name, if not present, then set to the program * name * * All parameters may be NULL * * @param args argument vector * @param mountpoint the returned mountpoint, should be freed after use * @param foreground set to 1 if one of the relevant options is present * @return 0 on success, -1 on failure */ int fuse_parse_cmdline(struct fuse_args *args, char **mountpoint, int *foreground); /** * Go into the background * * @param foreground if true, stay in the foreground * @return 0 on success, -1 on failure */ int fuse_daemonize(int foreground); /** * Get the version of the library * * @return the version */ int fuse_version(void); /** * Destroy poll handle * * @param ph the poll handle */ void fuse_pollhandle_destroy(fuse_pollhandle_t *ph); /* ----------------------------------------------------------- * * Data buffer * * ----------------------------------------------------------- */ /** * Buffer flags */ enum fuse_buf_flags { /** * Buffer contains a file descriptor * * If this flag is set, the .fd field is valid, otherwise the * .mem fields is valid. */ FUSE_BUF_IS_FD = (1 << 1), /** * Seek on the file descriptor * * If this flag is set then the .pos field is valid and is * used to seek to the given offset before performing * operation on file descriptor. */ FUSE_BUF_FD_SEEK = (1 << 2), /** * Retry operation on file descriptor * * If this flag is set then retry operation on file descriptor * until .size bytes have been copied or an error or EOF is * detected. */ FUSE_BUF_FD_RETRY = (1 << 3), }; /** * Buffer copy flags */ enum fuse_buf_copy_flags { /** * Don't use splice(2) * * Always fall back to using read and write instead of * splice(2) to copy data from one file descriptor to another. * * If this flag is not set, then only fall back if splice is * unavailable. */ FUSE_BUF_NO_SPLICE = (1 << 1), /** * Force splice * * Always use splice(2) to copy data from one file descriptor * to another. If splice is not available, return -EINVAL. */ FUSE_BUF_FORCE_SPLICE = (1 << 2), /** * Try to move data with splice. * * If splice is used, try to move pages from the source to the * destination instead of copying. See documentation of * SPLICE_F_MOVE in splice(2) man page. */ FUSE_BUF_SPLICE_MOVE = (1 << 3), /** * Don't block on the pipe when copying data with splice * * Makes the operations on the pipe non-blocking (if the pipe * is full or empty). See SPLICE_F_NONBLOCK in the splice(2) * man page. */ FUSE_BUF_SPLICE_NONBLOCK= (1 << 4), }; /** * Single data buffer * * Generic data buffer for I/O, extended attributes, etc... Data may * be supplied as a memory pointer or as a file descriptor */ struct fuse_buf { /** * Size of data in bytes */ size_t size; /** * Buffer flags */ enum fuse_buf_flags flags; /** * Memory pointer * * Used unless FUSE_BUF_IS_FD flag is set. */ void *mem; /** * File descriptor * * Used if FUSE_BUF_IS_FD flag is set. */ int fd; /** * File position * * Used if FUSE_BUF_FD_SEEK flag is set. */ off_t pos; }; /** * Data buffer vector * * An array of data buffers, each containing a memory pointer or a * file descriptor. * * Allocate dynamically to add more than one buffer. */ struct fuse_bufvec { /** * Number of buffers in the array */ size_t count; /** * Index of current buffer within the array */ size_t idx; /** * Current offset within the current buffer */ size_t off; /** * Array of buffers */ struct fuse_buf buf[1]; }; /* Initialize bufvec with a single buffer of given size */ #define FUSE_BUFVEC_INIT(size__) \ ((struct fuse_bufvec) { \ /* .count= */ 1, \ /* .idx = */ 0, \ /* .off = */ 0, \ /* .buf = */ { /* [0] = */ { \ /* .size = */ (size__), \ /* .flags = */ (enum fuse_buf_flags) 0, \ /* .mem = */ NULL, \ /* .fd = */ -1, \ /* .pos = */ 0, \ } } \ } ) /** * Get total size of data in a fuse buffer vector * * @param bufv buffer vector * @return size of data */ size_t fuse_buf_size(const struct fuse_bufvec *bufv); /** * Copy data from one buffer vector to another * * @param dst destination buffer vector * @param src source buffer vector * @param flags flags controlling the copy * @return actual number of bytes copied or -errno on error */ ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags); /* ----------------------------------------------------------- * * Signal handling * * ----------------------------------------------------------- */ /** * Exit session on HUP, TERM and INT signals and ignore PIPE signal * * Stores session in a global variable. May only be called once per * process until fuse_remove_signal_handlers() is called. * * @param se the session to exit * @return 0 on success, -1 on failure */ int fuse_set_signal_handlers(struct fuse_session *se); /** * Restore default signal handlers * * Resets global session. After this fuse_set_signal_handlers() may * be called again. * * @param se the same session as given in fuse_set_signal_handlers() */ void fuse_remove_signal_handlers(struct fuse_session *se); EXTERN_C_END #endif /* _FUSE_COMMON_H_ */ mergerfs-2.40.2/libfuse/include/fuse_config.hpp000066400000000000000000000024061457016576200215040ustar00rootroot00000000000000/* ISC License Copyright (c) 2023, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #pragma once #include int fuse_config_get_read_thread_count(); int fuse_config_get_process_thread_count(); int fuse_config_get_process_thread_queue_depth(); std::string fuse_config_get_pin_threads(); void fuse_config_set_read_thread_count(int const); void fuse_config_set_process_thread_count(int const); void fuse_config_set_process_thread_queue_depth(int const); void fuse_config_set_pin_threads(std::string const); mergerfs-2.40.2/libfuse/include/fuse_dirent.h000066400000000000000000000017451457016576200211710ustar00rootroot00000000000000/* ISC License Copyright (c) 2019, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #pragma once #include typedef struct fuse_dirent_s fuse_dirent_t; struct fuse_dirent_s { uint64_t ino; uint64_t off; uint32_t namelen; uint32_t type; char name[]; }; mergerfs-2.40.2/libfuse/include/fuse_direntplus.h000066400000000000000000000020221457016576200220620ustar00rootroot00000000000000/* ISC License Copyright (c) 2019, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #pragma once #include "fuse_attr.h" #include "fuse_dirent.h" #include "fuse_entry.h" typedef struct fuse_direntplus_s fuse_direntplus_t; struct fuse_direntplus_s { fuse_entry_t entry; fuse_attr_t attr; fuse_dirent_t dirent; }; mergerfs-2.40.2/libfuse/include/fuse_dirents.h000066400000000000000000000050151457016576200213460ustar00rootroot00000000000000/* ISC License Copyright (c) 2019, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #pragma once #include "extern_c.h" EXTERN_C_BEGIN #include "kvec.h" #include "fuse_dirent.h" #include "fuse_direntplus.h" #include "fuse_entry.h" #include "linux_dirent64.h" #include #include #include #include #include #include enum fuse_dirents_type_e { UNSET = 0, NORMAL, PLUS }; typedef enum fuse_dirents_type_e fuse_dirents_type_t; typedef struct fuse_dirents_t fuse_dirents_t; struct fuse_dirents_t { kvec_t(char) data; kvec_t(uint32_t) offs; fuse_dirents_type_t type; }; int fuse_dirents_init(fuse_dirents_t *d); void fuse_dirents_free(fuse_dirents_t *d); void fuse_dirents_reset(fuse_dirents_t *d); int fuse_dirents_add(fuse_dirents_t *d, const struct dirent *de, const uint64_t namelen); int fuse_dirents_add_plus(fuse_dirents_t *d, const struct dirent *de, const uint64_t namelen, const fuse_entry_t *entry, const struct stat *st); int fuse_dirents_add_linux(fuse_dirents_t *d, const linux_dirent64_t *de, const uint64_t namelen); int fuse_dirents_add_linux_plus(fuse_dirents_t *d, const linux_dirent64_t *de, const uint64_t namelen, const fuse_entry_t *entry, const struct stat *st); void *fuse_dirents_find(fuse_dirents_t *d, const uint64_t ino); int fuse_dirents_convert_plus2normal(fuse_dirents_t *d); EXTERN_C_END mergerfs-2.40.2/libfuse/include/fuse_entry.h000066400000000000000000000003701457016576200210360ustar00rootroot00000000000000#pragma once #include typedef struct fuse_entry_s fuse_entry_t; struct fuse_entry_s { uint64_t nodeid; uint64_t generation; uint64_t entry_valid; uint64_t attr_valid; uint32_t entry_valid_nsec; uint32_t attr_valid_nsec; }; mergerfs-2.40.2/libfuse/include/fuse_kernel.h000066400000000000000000000661361457016576200211710ustar00rootroot00000000000000/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ /* This file defines the kernel interface of FUSE Copyright (C) 2001-2008 Miklos Szeredi This program can be distributed under the terms of the GNU GPL. See the file COPYING. This -- and only this -- header file may also be distributed under the terms of the BSD Licence as follows: Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This file defines the kernel interface of FUSE * * Protocol changelog: * * 7.1: * - add the following messages: * FUSE_SETATTR, FUSE_SYMLINK, FUSE_MKNOD, FUSE_MKDIR, FUSE_UNLINK, * FUSE_RMDIR, FUSE_RENAME, FUSE_LINK, FUSE_OPEN, FUSE_READ, FUSE_WRITE, * FUSE_RELEASE, FUSE_FSYNC, FUSE_FLUSH, FUSE_SETXATTR, FUSE_GETXATTR, * FUSE_LISTXATTR, FUSE_REMOVEXATTR, FUSE_OPENDIR, FUSE_READDIR, * FUSE_RELEASEDIR * - add padding to messages to accommodate 32-bit servers on 64-bit kernels * * 7.2: * - add FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE flags * - add FUSE_FSYNCDIR message * * 7.3: * - add FUSE_ACCESS message * - add FUSE_CREATE message * - add filehandle to fuse_setattr_in * * 7.4: * - add frsize to fuse_kstatfs * - clean up request size limit checking * * 7.5: * - add flags and max_write to fuse_init_out * * 7.6: * - add max_readahead to fuse_init_in and fuse_init_out * * 7.7: * - add FUSE_INTERRUPT message * - add POSIX file lock support * * 7.8: * - add lock_owner and flags fields to fuse_release_in * - add FUSE_BMAP message * - add FUSE_DESTROY message * * 7.9: * - new fuse_getattr_in input argument of GETATTR * - add lk_flags in fuse_lk_in * - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in * - add blksize field to fuse_attr * - add file flags field to fuse_read_in and fuse_write_in * - Add ATIME_NOW and MTIME_NOW flags to fuse_setattr_in * * 7.10 * - add nonseekable open flag * * 7.11 * - add IOCTL message * - add unsolicited notification support * - add POLL message and NOTIFY_POLL notification * * 7.12 * - add umask flag to input argument of create, mknod and mkdir * - add notification messages for invalidation of inodes and * directory entries * * 7.13 * - make max number of background requests and congestion threshold * tunables * * 7.14 * - add splice support to fuse device * * 7.15 * - add store notify * - add retrieve notify * * 7.16 * - add BATCH_FORGET request * - FUSE_IOCTL_UNRESTRICTED shall now return with array of 'struct * fuse_ioctl_iovec' instead of ambiguous 'struct iovec' * - add FUSE_IOCTL_32BIT flag * * 7.17 * - add FUSE_FLOCK_LOCKS and FUSE_RELEASE_FLOCK_UNLOCK * * 7.18 * - add FUSE_IOCTL_DIR flag * - add FUSE_NOTIFY_DELETE * * 7.19 * - add FUSE_FALLOCATE * * 7.20 * - add FUSE_AUTO_INVAL_DATA * * 7.21 * - add FUSE_READDIRPLUS * - send the requested events in POLL request * * 7.22 * - add FUSE_ASYNC_DIO * * 7.23 * - add FUSE_WRITEBACK_CACHE * - add time_gran to fuse_init_out * - add reserved space to fuse_init_out * - add FATTR_CTIME * - add ctime and ctimensec to fuse_setattr_in * - add FUSE_RENAME2 request * - add FUSE_NO_OPEN_SUPPORT flag * * 7.24 * - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support * * 7.25 * - add FUSE_PARALLEL_DIROPS * * 7.26 * - add FUSE_HANDLE_KILLPRIV * - add FUSE_POSIX_ACL * * 7.27 * - add FUSE_ABORT_ERROR * * 7.28 * - add FUSE_COPY_FILE_RANGE * - add FOPEN_CACHE_DIR * - add FUSE_MAX_PAGES, add max_pages to init_out * - add FUSE_CACHE_SYMLINKS * * 7.29 * - add FUSE_NO_OPENDIR_SUPPORT flag * * 7.30 * - add FUSE_EXPLICIT_INVAL_DATA * - add FUSE_IOCTL_COMPAT_X32 * * 7.31 * - add FUSE_WRITE_KILL_PRIV flag * - add FUSE_SETUPMAPPING and FUSE_REMOVEMAPPING * - add map_alignment to fuse_init_out, add FUSE_MAP_ALIGNMENT flag * * 7.32 * - add flags to fuse_attr, add FUSE_ATTR_SUBMOUNT, add FUSE_SUBMOUNTS * * 7.33 * - add FUSE_HANDLE_KILLPRIV_V2, FUSE_WRITE_KILL_SUIDGID, FATTR_KILL_SUIDGID * - add FUSE_OPEN_KILL_SUIDGID * - extend fuse_setxattr_in, add FUSE_SETXATTR_EXT * - add FUSE_SETXATTR_ACL_KILL_SGID * * 7.34 * - add FUSE_SYNCFS * * 7.35 * - add FOPEN_NOFLUSH * * 7.36 * - extend fuse_init_in with reserved fields, add FUSE_INIT_EXT init flag * - add flags2 to fuse_init_in and fuse_init_out * - add FUSE_SECURITY_CTX init flag * - add security context to create, mkdir, symlink, and mknod requests * - add FUSE_HAS_INODE_DAX, FUSE_ATTR_DAX * * 7.37 * - add FUSE_TMPFILE * * 7.38 * - add FUSE_EXPIRE_ONLY flag to fuse_notify_inval_entry * - add FOPEN_PARALLEL_DIRECT_WRITES * - add total_extlen to fuse_in_header * - add FUSE_MAX_NR_SECCTX * - add extension header * - add FUSE_EXT_GROUPS * - add FUSE_CREATE_SUPP_GROUP * - add FUSE_HAS_EXPIRE_ONLY * * 7.39 * - add FUSE_DIRECT_IO_ALLOW_MMAP * - add FUSE_STATX and related structures */ #ifndef _LINUX_FUSE_H #define _LINUX_FUSE_H #ifdef __KERNEL__ #include #else #include #endif /* * Version negotiation: * * Both the kernel and userspace send the version they support in the * INIT request and reply respectively. * * If the major versions match then both shall use the smallest * of the two minor versions for communication. * * If the kernel supports a larger major version, then userspace shall * reply with the major version it supports, ignore the rest of the * INIT message and expect a new INIT message from the kernel with a * matching major version. * * If the library supports a larger major version, then it shall fall * back to the major protocol version sent by the kernel for * communication and reply with that major version (and an arbitrary * supported minor version). */ /** Version number of this interface */ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ #define FUSE_KERNEL_MINOR_VERSION 39 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 /* Make sure all structures are padded to 64bit boundary, so 32bit userspace works under 64bit kernels */ struct fuse_attr { uint64_t ino; uint64_t size; uint64_t blocks; uint64_t atime; uint64_t mtime; uint64_t ctime; uint32_t atimensec; uint32_t mtimensec; uint32_t ctimensec; uint32_t mode; uint32_t nlink; uint32_t uid; uint32_t gid; uint32_t rdev; uint32_t blksize; uint32_t flags; }; /* * The following structures are bit-for-bit compatible with the statx(2) ABI in * Linux. */ struct fuse_sx_time { int64_t tv_sec; uint32_t tv_nsec; int32_t __reserved; }; struct fuse_statx { uint32_t mask; uint32_t blksize; uint64_t attributes; uint32_t nlink; uint32_t uid; uint32_t gid; uint16_t mode; uint16_t __spare0[1]; uint64_t ino; uint64_t size; uint64_t blocks; uint64_t attributes_mask; struct fuse_sx_time atime; struct fuse_sx_time btime; struct fuse_sx_time ctime; struct fuse_sx_time mtime; uint32_t rdev_major; uint32_t rdev_minor; uint32_t dev_major; uint32_t dev_minor; uint64_t __spare2[14]; }; struct fuse_kstatfs { uint64_t blocks; uint64_t bfree; uint64_t bavail; uint64_t files; uint64_t ffree; uint32_t bsize; uint32_t namelen; uint32_t frsize; uint32_t padding; uint32_t spare[6]; }; struct fuse_file_lock { uint64_t start; uint64_t end; uint32_t type; uint32_t pid; /* tgid */ }; /** * Bitmasks for fuse_setattr_in.valid */ #define FATTR_MODE (1 << 0) #define FATTR_UID (1 << 1) #define FATTR_GID (1 << 2) #define FATTR_SIZE (1 << 3) #define FATTR_ATIME (1 << 4) #define FATTR_MTIME (1 << 5) #define FATTR_FH (1 << 6) #define FATTR_ATIME_NOW (1 << 7) #define FATTR_MTIME_NOW (1 << 8) #define FATTR_LOCKOWNER (1 << 9) #define FATTR_CTIME (1 << 10) #define FATTR_KILL_SUIDGID (1 << 11) /** * Flags returned by the OPEN request * * FOPEN_DIRECT_IO: bypass page cache for this open file * FOPEN_KEEP_CACHE: don't invalidate the data cache on open * FOPEN_NONSEEKABLE: the file is not seekable * FOPEN_CACHE_DIR: allow caching this directory * FOPEN_STREAM: the file is stream-like (no file position at all) * FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE) * FOPEN_PARALLEL_DIRECT_WRITES: Allow concurrent direct writes on the same inode */ #define FOPEN_DIRECT_IO (1 << 0) #define FOPEN_KEEP_CACHE (1 << 1) #define FOPEN_NONSEEKABLE (1 << 2) #define FOPEN_CACHE_DIR (1 << 3) #define FOPEN_STREAM (1 << 4) #define FOPEN_NOFLUSH (1 << 5) #define FOPEN_PARALLEL_DIRECT_WRITES (1 << 6) /** * INIT request/reply flags * * FUSE_ASYNC_READ: asynchronous read requests * FUSE_POSIX_LOCKS: remote locking for POSIX file locks * FUSE_FILE_OPS: kernel sends file handle for fstat, etc... (not yet supported) * FUSE_ATOMIC_O_TRUNC: handles the O_TRUNC open flag in the filesystem * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." * FUSE_BIG_WRITES: filesystem can handle write size larger than 4kB * FUSE_DONT_MASK: don't apply umask to file mode on create operations * FUSE_SPLICE_WRITE: kernel supports splice write on the device * FUSE_SPLICE_MOVE: kernel supports splice move on the device * FUSE_SPLICE_READ: kernel supports splice read on the device * FUSE_FLOCK_LOCKS: remote locking for BSD style file locks * FUSE_HAS_IOCTL_DIR: kernel supports ioctl on directories * FUSE_AUTO_INVAL_DATA: automatically invalidate cached pages * FUSE_DO_READDIRPLUS: do READDIRPLUS (READDIR+LOOKUP in one) * FUSE_READDIRPLUS_AUTO: adaptive readdirplus * FUSE_ASYNC_DIO: asynchronous direct I/O submission * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens * FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir * FUSE_HANDLE_KILLPRIV: fs handles killing suid/sgid/cap on write/chown/trunc * FUSE_POSIX_ACL: filesystem supports posix acls * FUSE_ABORT_ERROR: reading the device after abort returns ECONNABORTED * FUSE_MAX_PAGES: init_out.max_pages contains the max number of req pages * FUSE_CACHE_SYMLINKS: cache READLINK responses * FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir * FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request * FUSE_MAP_ALIGNMENT: init_out.map_alignment contains log2(byte alignment) for * foffset and moffset fields in struct * fuse_setupmapping_out and fuse_removemapping_one. * FUSE_SUBMOUNTS: kernel supports auto-mounting directory submounts * FUSE_HANDLE_KILLPRIV_V2: fs kills suid/sgid/cap on write/chown/trunc. * Upon write/truncate suid/sgid is only killed if caller * does not have CAP_FSETID. Additionally upon * write/truncate sgid is killed only if file has group * execute permission. (Same as Linux VFS behavior). * FUSE_SETXATTR_EXT: Server supports extended struct fuse_setxattr_in * FUSE_INIT_EXT: extended fuse_init_in request * FUSE_INIT_RESERVED: reserved, do not use * FUSE_SECURITY_CTX: add security context to create, mkdir, symlink, and * mknod * FUSE_HAS_INODE_DAX: use per inode DAX * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir, * symlink and mknod (single group that matches parent) * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) #define FUSE_FILE_OPS (1 << 2) #define FUSE_ATOMIC_O_TRUNC (1 << 3) #define FUSE_EXPORT_SUPPORT (1 << 4) #define FUSE_BIG_WRITES (1 << 5) #define FUSE_DONT_MASK (1 << 6) #define FUSE_SPLICE_WRITE (1 << 7) #define FUSE_SPLICE_MOVE (1 << 8) #define FUSE_SPLICE_READ (1 << 9) #define FUSE_FLOCK_LOCKS (1 << 10) #define FUSE_HAS_IOCTL_DIR (1 << 11) #define FUSE_AUTO_INVAL_DATA (1 << 12) #define FUSE_DO_READDIRPLUS (1 << 13) #define FUSE_READDIRPLUS_AUTO (1 << 14) #define FUSE_ASYNC_DIO (1 << 15) #define FUSE_WRITEBACK_CACHE (1 << 16) #define FUSE_NO_OPEN_SUPPORT (1 << 17) #define FUSE_PARALLEL_DIROPS (1 << 18) #define FUSE_HANDLE_KILLPRIV (1 << 19) #define FUSE_POSIX_ACL (1 << 20) #define FUSE_ABORT_ERROR (1 << 21) #define FUSE_MAX_PAGES (1 << 22) #define FUSE_CACHE_SYMLINKS (1 << 23) #define FUSE_NO_OPENDIR_SUPPORT (1 << 24) #define FUSE_EXPLICIT_INVAL_DATA (1 << 25) #define FUSE_MAP_ALIGNMENT (1 << 26) #define FUSE_SUBMOUNTS (1 << 27) #define FUSE_HANDLE_KILLPRIV_V2 (1 << 28) #define FUSE_SETXATTR_EXT (1 << 29) #define FUSE_INIT_EXT (1 << 30) #define FUSE_INIT_RESERVED (1 << 31) /* bits 32..63 get shifted down 32 bits into the flags2 field */ #define FUSE_SECURITY_CTX (1ULL << 32) #define FUSE_HAS_INODE_DAX (1ULL << 33) #define FUSE_CREATE_SUPP_GROUP (1ULL << 34) #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) #define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP /** * CUSE INIT request/reply flags * * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl */ #define CUSE_UNRESTRICTED_IOCTL (1 << 0) /** * Release flags */ #define FUSE_RELEASE_FLUSH (1 << 0) #define FUSE_RELEASE_FLOCK_UNLOCK (1 << 1) /** * Getattr flags */ #define FUSE_GETATTR_FH (1 << 0) /** * Lock flags */ #define FUSE_LK_FLOCK (1 << 0) /** * WRITE flags * * FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed * FUSE_WRITE_LOCKOWNER: lock_owner field is valid * FUSE_WRITE_KILL_SUIDGID: kill suid and sgid bits */ #define FUSE_WRITE_CACHE (1 << 0) #define FUSE_WRITE_LOCKOWNER (1 << 1) #define FUSE_WRITE_KILL_SUIDGID (1 << 2) /* Obsolete alias; this flag implies killing suid/sgid only. */ #define FUSE_WRITE_KILL_PRIV FUSE_WRITE_KILL_SUIDGID /** * Read flags */ #define FUSE_READ_LOCKOWNER (1 << 1) /** * Ioctl flags * * FUSE_IOCTL_COMPAT: 32bit compat ioctl on 64bit machine * FUSE_IOCTL_UNRESTRICTED: not restricted to well-formed ioctls, retry allowed * FUSE_IOCTL_RETRY: retry with new iovecs * FUSE_IOCTL_32BIT: 32bit ioctl * FUSE_IOCTL_DIR: is a directory * FUSE_IOCTL_COMPAT_X32: x32 compat ioctl on 64bit machine (64bit time_t) * * FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs */ #define FUSE_IOCTL_COMPAT (1 << 0) #define FUSE_IOCTL_UNRESTRICTED (1 << 1) #define FUSE_IOCTL_RETRY (1 << 2) #define FUSE_IOCTL_32BIT (1 << 3) #define FUSE_IOCTL_DIR (1 << 4) #define FUSE_IOCTL_COMPAT_X32 (1 << 5) #define FUSE_IOCTL_MAX_IOV 256 /** * Poll flags * * FUSE_POLL_SCHEDULE_NOTIFY: request poll notify */ #define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0) /** * Fsync flags * * FUSE_FSYNC_FDATASYNC: Sync data only, not metadata */ #define FUSE_FSYNC_FDATASYNC (1 << 0) /** * fuse_attr flags * * FUSE_ATTR_SUBMOUNT: Object is a submount root * FUSE_ATTR_DAX: Enable DAX for this file in per inode DAX mode */ #define FUSE_ATTR_SUBMOUNT (1 << 0) #define FUSE_ATTR_DAX (1 << 1) /** * Open flags * FUSE_OPEN_KILL_SUIDGID: Kill suid and sgid if executable */ #define FUSE_OPEN_KILL_SUIDGID (1 << 0) /** * setxattr flags * FUSE_SETXATTR_ACL_KILL_SGID: Clear SGID when system.posix_acl_access is set */ #define FUSE_SETXATTR_ACL_KILL_SGID (1 << 0) /** * notify_inval_entry flags * FUSE_EXPIRE_ONLY */ #define FUSE_EXPIRE_ONLY (1 << 0) /** * extension type * FUSE_MAX_NR_SECCTX: maximum value of &fuse_secctx_header.nr_secctx * FUSE_EXT_GROUPS: &fuse_supp_groups extension */ enum fuse_ext_type { /* Types 0..31 are reserved for fuse_secctx_header */ FUSE_MAX_NR_SECCTX = 31, FUSE_EXT_GROUPS = 32, }; enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ FUSE_GETATTR = 3, FUSE_SETATTR = 4, FUSE_READLINK = 5, FUSE_SYMLINK = 6, FUSE_MKNOD = 8, FUSE_MKDIR = 9, FUSE_UNLINK = 10, FUSE_RMDIR = 11, FUSE_RENAME = 12, FUSE_LINK = 13, FUSE_OPEN = 14, FUSE_READ = 15, FUSE_WRITE = 16, FUSE_STATFS = 17, FUSE_RELEASE = 18, FUSE_FSYNC = 20, FUSE_SETXATTR = 21, FUSE_GETXATTR = 22, FUSE_LISTXATTR = 23, FUSE_REMOVEXATTR = 24, FUSE_FLUSH = 25, FUSE_INIT = 26, FUSE_OPENDIR = 27, FUSE_READDIR = 28, FUSE_RELEASEDIR = 29, FUSE_FSYNCDIR = 30, FUSE_GETLK = 31, FUSE_SETLK = 32, FUSE_SETLKW = 33, FUSE_ACCESS = 34, FUSE_CREATE = 35, FUSE_INTERRUPT = 36, FUSE_BMAP = 37, FUSE_DESTROY = 38, FUSE_IOCTL = 39, FUSE_POLL = 40, FUSE_NOTIFY_REPLY = 41, FUSE_BATCH_FORGET = 42, FUSE_FALLOCATE = 43, FUSE_READDIRPLUS = 44, FUSE_RENAME2 = 45, FUSE_LSEEK = 46, FUSE_COPY_FILE_RANGE = 47, FUSE_SETUPMAPPING = 48, FUSE_REMOVEMAPPING = 49, FUSE_SYNCFS = 50, FUSE_TMPFILE = 51, FUSE_STATX = 52, /* CUSE specific operations */ CUSE_INIT = 4096, /* Reserved opcodes: helpful to detect structure endian-ness */ CUSE_INIT_BSWAP_RESERVED = 1048576, /* CUSE_INIT << 8 */ FUSE_INIT_BSWAP_RESERVED = 436207616, /* FUSE_INIT << 24 */ }; enum fuse_notify_code { FUSE_NOTIFY_POLL = 1, FUSE_NOTIFY_INVAL_INODE = 2, FUSE_NOTIFY_INVAL_ENTRY = 3, FUSE_NOTIFY_STORE = 4, FUSE_NOTIFY_RETRIEVE = 5, FUSE_NOTIFY_DELETE = 6, FUSE_NOTIFY_CODE_MAX, }; /* The read buffer is required to be at least 8k, but may be much larger */ #define FUSE_MIN_READ_BUFFER 8192 #define FUSE_COMPAT_ENTRY_OUT_SIZE 120 struct fuse_entry_out { uint64_t nodeid; /* Inode ID */ uint64_t generation; /* Inode generation: nodeid:gen must be unique for the fs's lifetime */ uint64_t entry_valid; /* Cache timeout for the name */ uint64_t attr_valid; /* Cache timeout for the attributes */ uint32_t entry_valid_nsec; uint32_t attr_valid_nsec; struct fuse_attr attr; }; struct fuse_forget_in { uint64_t nlookup; }; struct fuse_forget_one { uint64_t nodeid; uint64_t nlookup; }; struct fuse_batch_forget_in { uint32_t count; uint32_t dummy; }; struct fuse_getattr_in { uint32_t getattr_flags; uint32_t dummy; uint64_t fh; }; #define FUSE_COMPAT_ATTR_OUT_SIZE 96 struct fuse_attr_out { uint64_t attr_valid; /* Cache timeout for the attributes */ uint32_t attr_valid_nsec; uint32_t dummy; struct fuse_attr attr; }; struct fuse_statx_in { uint32_t getattr_flags; uint32_t reserved; uint64_t fh; uint32_t sx_flags; uint32_t sx_mask; }; struct fuse_statx_out { uint64_t attr_valid; /* Cache timeout for the attributes */ uint32_t attr_valid_nsec; uint32_t flags; uint64_t spare[2]; struct fuse_statx stat; }; #define FUSE_COMPAT_MKNOD_IN_SIZE 8 struct fuse_mknod_in { uint32_t mode; uint32_t rdev; uint32_t umask; uint32_t padding; }; struct fuse_mkdir_in { uint32_t mode; uint32_t umask; }; struct fuse_rename_in { uint64_t newdir; }; struct fuse_rename2_in { uint64_t newdir; uint32_t flags; uint32_t padding; }; struct fuse_link_in { uint64_t oldnodeid; }; struct fuse_setattr_in { uint32_t valid; uint32_t padding; uint64_t fh; uint64_t size; uint64_t lock_owner; uint64_t atime; uint64_t mtime; uint64_t ctime; uint32_t atimensec; uint32_t mtimensec; uint32_t ctimensec; uint32_t mode; uint32_t unused4; uint32_t uid; uint32_t gid; uint32_t unused5; }; struct fuse_open_in { uint32_t flags; uint32_t open_flags; /* FUSE_OPEN_... */ }; struct fuse_create_in { uint32_t flags; uint32_t mode; uint32_t umask; uint32_t open_flags; /* FUSE_OPEN_... */ }; struct fuse_open_out { uint64_t fh; uint32_t open_flags; uint32_t padding; }; struct fuse_release_in { uint64_t fh; uint32_t flags; uint32_t release_flags; uint64_t lock_owner; }; struct fuse_flush_in { uint64_t fh; uint32_t unused; uint32_t padding; uint64_t lock_owner; }; struct fuse_read_in { uint64_t fh; uint64_t offset; uint32_t size; uint32_t read_flags; uint64_t lock_owner; uint32_t flags; uint32_t padding; }; #define FUSE_COMPAT_WRITE_IN_SIZE 24 struct fuse_write_in { uint64_t fh; uint64_t offset; uint32_t size; uint32_t write_flags; uint64_t lock_owner; uint32_t flags; uint32_t padding; }; struct fuse_write_out { uint32_t size; uint32_t padding; }; #define FUSE_COMPAT_STATFS_SIZE 48 struct fuse_statfs_out { struct fuse_kstatfs st; }; struct fuse_fsync_in { uint64_t fh; uint32_t fsync_flags; uint32_t padding; }; #define FUSE_COMPAT_SETXATTR_IN_SIZE 8 struct fuse_setxattr_in { uint32_t size; uint32_t flags; uint32_t setxattr_flags; uint32_t padding; }; struct fuse_getxattr_in { uint32_t size; uint32_t padding; }; struct fuse_getxattr_out { uint32_t size; uint32_t padding; }; struct fuse_lk_in { uint64_t fh; uint64_t owner; struct fuse_file_lock lk; uint32_t lk_flags; uint32_t padding; }; struct fuse_lk_out { struct fuse_file_lock lk; }; struct fuse_access_in { uint32_t mask; uint32_t padding; }; struct fuse_init_in { uint32_t major; uint32_t minor; uint32_t max_readahead; uint32_t flags; uint32_t flags2; uint32_t unused[11]; }; #define FUSE_COMPAT_INIT_OUT_SIZE 8 #define FUSE_COMPAT_22_INIT_OUT_SIZE 24 struct fuse_init_out { uint32_t major; uint32_t minor; uint32_t max_readahead; uint32_t flags; uint16_t max_background; uint16_t congestion_threshold; uint32_t max_write; uint32_t time_gran; uint16_t max_pages; uint16_t map_alignment; uint32_t flags2; uint32_t unused[7]; }; #define CUSE_INIT_INFO_MAX 4096 struct cuse_init_in { uint32_t major; uint32_t minor; uint32_t unused; uint32_t flags; }; struct cuse_init_out { uint32_t major; uint32_t minor; uint32_t unused; uint32_t flags; uint32_t max_read; uint32_t max_write; uint32_t dev_major; /* chardev major */ uint32_t dev_minor; /* chardev minor */ uint32_t spare[10]; }; struct fuse_interrupt_in { uint64_t unique; }; struct fuse_bmap_in { uint64_t block; uint32_t blocksize; uint32_t padding; }; struct fuse_bmap_out { uint64_t block; }; struct fuse_ioctl_in { uint64_t fh; uint32_t flags; uint32_t cmd; uint64_t arg; uint32_t in_size; uint32_t out_size; }; struct fuse_ioctl_iovec { uint64_t base; uint64_t len; }; struct fuse_ioctl_out { int32_t result; uint32_t flags; uint32_t in_iovs; uint32_t out_iovs; }; struct fuse_poll_in { uint64_t fh; uint64_t kh; uint32_t flags; uint32_t events; }; struct fuse_poll_out { uint32_t revents; uint32_t padding; }; struct fuse_notify_poll_wakeup_out { uint64_t kh; }; struct fuse_fallocate_in { uint64_t fh; uint64_t offset; uint64_t length; uint32_t mode; uint32_t padding; }; struct fuse_in_header { uint32_t len; uint32_t opcode; uint64_t unique; uint64_t nodeid; uint32_t uid; uint32_t gid; uint32_t pid; uint16_t total_extlen; /* length of extensions in 8byte units */ uint16_t padding; }; struct fuse_out_header { uint32_t len; int32_t error; uint64_t unique; }; struct fuse_dirent { uint64_t ino; uint64_t off; uint32_t namelen; uint32_t type; char name[]; }; /* Align variable length records to 64bit boundary */ #define FUSE_REC_ALIGN(x) \ (((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1)) #define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name) #define FUSE_DIRENT_ALIGN(x) FUSE_REC_ALIGN(x) #define FUSE_DIRENT_SIZE(d) \ FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) struct fuse_direntplus { struct fuse_entry_out entry_out; struct fuse_dirent dirent; }; #define FUSE_NAME_OFFSET_DIRENTPLUS \ offsetof(struct fuse_direntplus, dirent.name) #define FUSE_DIRENTPLUS_SIZE(d) \ FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET_DIRENTPLUS + (d)->dirent.namelen) struct fuse_notify_inval_inode_out { uint64_t ino; int64_t off; int64_t len; }; struct fuse_notify_inval_entry_out { uint64_t parent; uint32_t namelen; uint32_t flags; }; struct fuse_notify_delete_out { uint64_t parent; uint64_t child; uint32_t namelen; uint32_t padding; }; struct fuse_notify_store_out { uint64_t nodeid; uint64_t offset; uint32_t size; uint32_t padding; }; struct fuse_notify_retrieve_out { uint64_t notify_unique; uint64_t nodeid; uint64_t offset; uint32_t size; uint32_t padding; }; /* Matches the size of fuse_write_in */ struct fuse_notify_retrieve_in { uint64_t dummy1; uint64_t offset; uint32_t size; uint32_t dummy2; uint64_t dummy3; uint64_t dummy4; }; /* Device ioctls: */ #define FUSE_DEV_IOC_MAGIC 229 #define FUSE_DEV_IOC_CLONE _IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t) struct fuse_lseek_in { uint64_t fh; uint64_t offset; uint32_t whence; uint32_t padding; }; struct fuse_lseek_out { uint64_t offset; }; struct fuse_copy_file_range_in { uint64_t fh_in; uint64_t off_in; uint64_t nodeid_out; uint64_t fh_out; uint64_t off_out; uint64_t len; uint64_t flags; }; #define FUSE_SETUPMAPPING_FLAG_WRITE (1ull << 0) #define FUSE_SETUPMAPPING_FLAG_READ (1ull << 1) struct fuse_setupmapping_in { /* An already open handle */ uint64_t fh; /* Offset into the file to start the mapping */ uint64_t foffset; /* Length of mapping required */ uint64_t len; /* Flags, FUSE_SETUPMAPPING_FLAG_* */ uint64_t flags; /* Offset in Memory Window */ uint64_t moffset; }; struct fuse_removemapping_in { /* number of fuse_removemapping_one follows */ uint32_t count; }; struct fuse_removemapping_one { /* Offset into the dax window start the unmapping */ uint64_t moffset; /* Length of mapping required */ uint64_t len; }; #define FUSE_REMOVEMAPPING_MAX_ENTRY \ (PAGE_SIZE / sizeof(struct fuse_removemapping_one)) struct fuse_syncfs_in { uint64_t padding; }; /* * For each security context, send fuse_secctx with size of security context * fuse_secctx will be followed by security context name and this in turn * will be followed by actual context label. * fuse_secctx, name, context */ struct fuse_secctx { uint32_t size; uint32_t padding; }; /* * Contains the information about how many fuse_secctx structures are being * sent and what's the total size of all security contexts (including * size of fuse_secctx_header). * */ struct fuse_secctx_header { uint32_t size; uint32_t nr_secctx; }; /** * struct fuse_ext_header - extension header * @size: total size of this extension including this header * @type: type of extension * * This is made compatible with fuse_secctx_header by using type values > * FUSE_MAX_NR_SECCTX */ struct fuse_ext_header { uint32_t size; uint32_t type; }; /** * struct fuse_supp_groups - Supplementary group extension * @nr_groups: number of supplementary groups * @groups: flexible array of group IDs */ struct fuse_supp_groups { uint32_t nr_groups; uint32_t groups[]; }; #endif /* _LINUX_FUSE_H */ mergerfs-2.40.2/libfuse/include/fuse_lowlevel.h000066400000000000000000001310351457016576200215310ustar00rootroot00000000000000/* FUSE: Filesystem in Userspace Copyright (C) 2001-2007 Miklos Szeredi This program can be distributed under the terms of the GNU LGPLv2. See the file COPYING.LIB. */ #ifndef _FUSE_LOWLEVEL_H_ #define _FUSE_LOWLEVEL_H_ #include "extern_c.h" #include "fuse_common.h" #include "fuse_kernel.h" #include #include #include #include #include #include #include EXTERN_C_BEGIN /* ----------------------------------------------------------- * * Miscellaneous definitions * * ----------------------------------------------------------- */ /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 /** Request pointer type */ typedef struct fuse_req *fuse_req_t; /** * Session * * This provides hooks for processing requests, and exiting */ struct fuse_session; /** * Channel * * A communication channel, providing hooks for sending and receiving * messages */ struct fuse_chan; /** Directory entry parameters supplied to fuse_reply_entry() */ struct fuse_entry_param { /** Unique inode number * * In lookup, zero means negative entry (from version 2.5) * Returning ENOENT also means negative entry, but by setting zero * ino the kernel may cache negative entries for entry_timeout * seconds. */ uint64_t ino; /** Generation number for this entry. * * If the file system will be exported over NFS, the * ino/generation pairs need to be unique over the file * system's lifetime (rather than just the mount time). So if * the file system reuses an inode after it has been deleted, * it must assign a new, previously unused generation number * to the inode at the same time. * * The generation must be non-zero, otherwise FUSE will treat * it as an error. * */ uint64_t generation; /** Inode attributes. * * Even if attr_timeout == 0, attr must be correct. For example, * for open(), FUSE uses attr.st_size from lookup() to determine * how many bytes to request. If this value is not correct, * incorrect data will be returned. */ struct stat attr; fuse_timeouts_t timeout; }; /** Additional context associated with requests */ struct fuse_ctx { /** User ID of the calling process */ uid_t uid; /** Group ID of the calling process */ gid_t gid; /** Thread ID of the calling process */ pid_t pid; /** Umask of the calling process (introduced in version 2.8) */ mode_t umask; }; /* ----------------------------------------------------------- * * Request methods and replies * * ----------------------------------------------------------- */ /** * Low level filesystem operations * * Most of the methods (with the exception of init and destroy) * receive a request handle (fuse_req_t) as their first argument. * This handle must be passed to one of the specified reply functions. * * This may be done inside the method invocation, or after the call * has returned. The request handle is valid until one of the reply * functions is called. * * Other pointer arguments (name, fuse_file_info, etc) are not valid * after the call has returned, so if they are needed later, their * contents have to be copied. * * The filesystem sometimes needs to handle a return value of -ENOENT * from the reply function, which means, that the request was * interrupted, and the reply discarded. For example if * fuse_reply_open() return -ENOENT means, that the release method for * this file will not be called. */ struct fuse_lowlevel_ops { /** * Initialize filesystem * * Called before any other filesystem method * * There's no reply to this function * * @param userdata the user data passed to fuse_lowlevel_new() */ void (*init)(void *userdata, struct fuse_conn_info *conn); /** * Clean up filesystem * * Called on filesystem exit * * There's no reply to this function * * @param userdata the user data passed to fuse_lowlevel_new() */ void (*destroy)(void *userdata); /** * Look up a directory entry by name and get its attributes. * * Valid replies: * fuse_reply_entry * fuse_reply_err * * @param req request handle * @param parent inode number of the parent directory * @param name the name to look up */ void (*lookup)(fuse_req_t req, struct fuse_in_header *hdr); /** * Forget about an inode * * This function is called when the kernel removes an inode * from its internal caches. * * The inode's lookup count increases by one for every call to * fuse_reply_entry and fuse_reply_create. The nlookup parameter * indicates by how much the lookup count should be decreased. * * Inodes with a non-zero lookup count may receive request from * the kernel even after calls to unlink, rmdir or (when * overwriting an existing file) rename. Filesystems must handle * such requests properly and it is recommended to defer removal * of the inode until the lookup count reaches zero. Calls to * unlink, remdir or rename will be followed closely by forget * unless the file or directory is open, in which case the * kernel issues forget only after the release or releasedir * calls. * * Note that if a file system will be exported over NFS the * inodes lifetime must extend even beyond forget. See the * generation field in struct fuse_entry_param above. * * On unmount the lookup count for all inodes implicitly drops * to zero. It is not guaranteed that the file system will * receive corresponding forget messages for the affected * inodes. * * Valid replies: * fuse_reply_none * * @param req request handle * @param ino the inode number * @param nlookup the number of lookups to forget */ void (*forget)(fuse_req_t req, struct fuse_in_header *hdr); /** * Get file attributes * * Valid replies: * fuse_reply_attr * fuse_reply_err */ void (*getattr)(fuse_req_t req, struct fuse_in_header *hdr); /** * Set file attributes * * In the 'attr' argument only members indicated by the 'to_set' * bitmask contain valid values. Other members contain undefined * values. * * If the setattr was invoked from the ftruncate() system call * under Linux kernel versions 2.6.15 or later, the fi->fh will * contain the value set by the open method or will be undefined * if the open method didn't set any value. Otherwise (not * ftruncate call, or kernel version earlier than 2.6.15) the fi * parameter will be NULL. * * Valid replies: * fuse_reply_attr * fuse_reply_err * * @param req request handle * @param ino the inode number * @param attr the attributes * @param to_set bit mask of attributes which should be set * @param fi file information, or NULL * * Changed in version 2.5: * file information filled in for ftruncate */ void (*setattr)(fuse_req_t req, struct fuse_in_header *hdr); /** * Read symbolic link * * Valid replies: * fuse_reply_readlink * fuse_reply_err * * @param req request handle * @param ino the inode number */ void (*readlink)(fuse_req_t req, struct fuse_in_header *hdr); /** * Create file node * * Create a regular file, character device, block device, fifo or * socket node. * * Valid replies: * fuse_reply_entry * fuse_reply_err * * @param req request handle * @param parent inode number of the parent directory * @param name to create * @param mode file type and mode with which to create the new file * @param rdev the device number (only valid if created file is a device) */ void (*mknod)(fuse_req_t req, struct fuse_in_header *hdr); /** * Create a directory * * Valid replies: * fuse_reply_entry * fuse_reply_err * * @param req request handle * @param parent inode number of the parent directory * @param name to create * @param mode with which to create the new file */ void (*mkdir)(fuse_req_t req, struct fuse_in_header *hdr); /** * Remove a file * * If the file's inode's lookup count is non-zero, the file * system is expected to postpone any removal of the inode * until the lookup count reaches zero (see description of the * forget function). * * Valid replies: * fuse_reply_err * * @param req request handle * @param parent inode number of the parent directory * @param name to remove */ void (*unlink)(fuse_req_t req, struct fuse_in_header *hdr); /** * Remove a directory * * If the directory's inode's lookup count is non-zero, the * file system is expected to postpone any removal of the * inode until the lookup count reaches zero (see description * of the forget function). * * Valid replies: * fuse_reply_err * * @param req request handle * @param parent inode number of the parent directory * @param name to remove */ void (*rmdir)(fuse_req_t req, struct fuse_in_header *hdr); /** * Create a symbolic link * * Valid replies: * fuse_reply_entry * fuse_reply_err * * @param req request handle * @param link the contents of the symbolic link * @param parent inode number of the parent directory * @param name to create */ void (*symlink)(fuse_req_t req, struct fuse_in_header *hdr); /** Rename a file * * If the target exists it should be atomically replaced. If * the target's inode's lookup count is non-zero, the file * system is expected to postpone any removal of the inode * until the lookup count reaches zero (see description of the * forget function). * * Valid replies: * fuse_reply_err * * @param req request handle * @param parent inode number of the old parent directory * @param name old name * @param newparent inode number of the new parent directory * @param newname new name */ void (*rename)(fuse_req_t req, struct fuse_in_header *hdr); /** * Create a hard link * * Valid replies: * fuse_reply_entry * fuse_reply_err * * @param req request handle * @param ino the old inode number * @param newparent inode number of the new parent directory * @param newname new name to create */ void (*link)(fuse_req_t req, struct fuse_in_header *hdr); /** * Open a file * * Open flags (with the exception of O_CREAT, O_EXCL, O_NOCTTY and * O_TRUNC) are available in fi->flags. * * Filesystem may store an arbitrary file handle (pointer, index, * etc) in fi->fh, and use this in other all other file operations * (read, write, flush, release, fsync). * * Filesystem may also implement stateless file I/O and not store * anything in fi->fh. * * There are also some flags (direct_io, keep_cache) which the * filesystem may set in fi, to change the way the file is opened. * See fuse_file_info structure in for more details. * * Valid replies: * fuse_reply_open * fuse_reply_err * * @param req request handle * @param ino the inode number * @param fi file information */ void (*open)(fuse_req_t req, struct fuse_in_header *hdr); /** * Read data * * Read should send exactly the number of bytes requested except * on EOF or error, otherwise the rest of the data will be * substituted with zeroes. An exception to this is when the file * has been opened in 'direct_io' mode, in which case the return * value of the read system call will reflect the return value of * this operation. * * fi->fh will contain the value set by the open method, or will * be undefined if the open method didn't set any value. * * Valid replies: * fuse_reply_buf * fuse_reply_iov * fuse_reply_data * fuse_reply_err * * @param req request handle * @param ino the inode number * @param size number of bytes to read * @param off offset to read from * @param fi file information */ void (*read)(fuse_req_t req, struct fuse_in_header *hdr); /** * Write data * * Write should return exactly the number of bytes requested * except on error. An exception to this is when the file has * been opened in 'direct_io' mode, in which case the return value * of the write system call will reflect the return value of this * operation. * * fi->fh will contain the value set by the open method, or will * be undefined if the open method didn't set any value. * * Valid replies: * fuse_reply_write * fuse_reply_err * * @param req request handle * @param ino the inode number * @param buf data to write * @param size number of bytes to write * @param off offset to write to * @param fi file information */ void (*write)(fuse_req_t req, struct fuse_in_header *hdr); /** * Flush method * * This is called on each close() of the opened file. * * Since file descriptors can be duplicated (dup, dup2, fork), for * one open call there may be many flush calls. * * Filesystems shouldn't assume that flush will always be called * after some writes, or that if will be called at all. * * fi->fh will contain the value set by the open method, or will * be undefined if the open method didn't set any value. * * NOTE: the name of the method is misleading, since (unlike * fsync) the filesystem is not forced to flush pending writes. * One reason to flush data, is if the filesystem wants to return * write errors. * * If the filesystem supports file locking operations (setlk, * getlk) it should remove all locks belonging to 'fi->owner'. * * Valid replies: * fuse_reply_err * * @param req request handle * @param ino the inode number * @param fi file information */ void (*flush)(fuse_req_t req, struct fuse_in_header *hdr); /** * Release an open file * * Release is called when there are no more references to an open * file: all file descriptors are closed and all memory mappings * are unmapped. * * For every open call there will be exactly one release call. * * The filesystem may reply with an error, but error values are * not returned to close() or munmap() which triggered the * release. * * fi->fh will contain the value set by the open method, or will * be undefined if the open method didn't set any value. * fi->flags will contain the same flags as for open. * * Valid replies: * fuse_reply_err * * @param req request handle * @param ino the inode number * @param fi file information */ void (*release)(fuse_req_t req, struct fuse_in_header *hdr); /** * Synchronize file contents * * If the datasync parameter is non-zero, then only the user data * should be flushed, not the meta data. * * Valid replies: * fuse_reply_err * * @param req request handle * @param ino the inode number * @param datasync flag indicating if only data should be flushed * @param fi file information */ void (*fsync)(fuse_req_t req, struct fuse_in_header *hdr); /** * Open a directory * * Filesystem may store an arbitrary file handle (pointer, index, * etc) in fi->fh, and use this in other all other directory * stream operations (readdir, releasedir, fsyncdir). * * Filesystem may also implement stateless directory I/O and not * store anything in fi->fh, though that makes it impossible to * implement standard conforming directory stream operations in * case the contents of the directory can change between opendir * and releasedir. * * Valid replies: * fuse_reply_open * fuse_reply_err * * @param req request handle * @param ino the inode number * @param fi file information */ void (*opendir)(fuse_req_t req, struct fuse_in_header *hdr); /** * Read directory * * Send a buffer filled using fuse_add_direntry(), with size not * exceeding the requested size. Send an empty buffer on end of * stream. * * fi->fh will contain the value set by the opendir method, or * will be undefined if the opendir method didn't set any value. * * Valid replies: * fuse_reply_buf * fuse_reply_data * fuse_reply_err * * @param req request handle * @param ino the inode number * @param size maximum number of bytes to send * @param off offset to continue reading the directory stream * @param fi file information */ void (*readdir)(fuse_req_t req, struct fuse_in_header *hdr); void (*readdir_plus)(fuse_req_t req, struct fuse_in_header *hdr); /** * Release an open directory * * For every opendir call there will be exactly one releasedir * call. * * fi->fh will contain the value set by the opendir method, or * will be undefined if the opendir method didn't set any value. * * Valid replies: * fuse_reply_err * * @param req request handle * @param ino the inode number * @param fi file information */ void (*releasedir)(fuse_req_t req, struct fuse_in_header *hdr); /** * Synchronize directory contents * * If the datasync parameter is non-zero, then only the directory * contents should be flushed, not the meta data. * * fi->fh will contain the value set by the opendir method, or * will be undefined if the opendir method didn't set any value. * * Valid replies: * fuse_reply_err * * @param req request handle * @param ino the inode number * @param datasync flag indicating if only data should be flushed * @param fi file information */ void (*fsyncdir)(fuse_req_t req, struct fuse_in_header *hdr); /** * Get file system statistics * * Valid replies: * fuse_reply_statfs * fuse_reply_err * * @param req request handle * @param ino the inode number, zero means "undefined" */ void (*statfs)(fuse_req_t req, struct fuse_in_header *hdr); /** * Set an extended attribute * * Valid replies: * fuse_reply_err */ void (*setxattr)(fuse_req_t req, struct fuse_in_header *hdr); /** * Get an extended attribute * * If size is zero, the size of the value should be sent with * fuse_reply_xattr. * * If the size is non-zero, and the value fits in the buffer, the * value should be sent with fuse_reply_buf. * * If the size is too small for the value, the ERANGE error should * be sent. * * Valid replies: * fuse_reply_buf * fuse_reply_data * fuse_reply_xattr * fuse_reply_err * * @param req request handle * @param ino the inode number * @param name of the extended attribute * @param size maximum size of the value to send */ void (*getxattr)(fuse_req_t req, struct fuse_in_header *hdr); /** * List extended attribute names * * If size is zero, the total size of the attribute list should be * sent with fuse_reply_xattr. * * If the size is non-zero, and the null character separated * attribute list fits in the buffer, the list should be sent with * fuse_reply_buf. * * If the size is too small for the list, the ERANGE error should * be sent. * * Valid replies: * fuse_reply_buf * fuse_reply_data * fuse_reply_xattr * fuse_reply_err * * @param req request handle * @param ino the inode number * @param size maximum size of the list to send */ void (*listxattr)(fuse_req_t req, struct fuse_in_header *hdr); /** * Remove an extended attribute * * Valid replies: * fuse_reply_err * * @param req request handle * @param ino the inode number * @param name of the extended attribute */ void (*removexattr)(fuse_req_t req, const struct fuse_in_header *hdr); /** * Check file access permissions * * This will be called for the access() system call. If the * 'default_permissions' mount option is given, this method is not * called. * * This method is not called under Linux kernel versions 2.4.x * * Introduced in version 2.5 * * Valid replies: * fuse_reply_err * * @param req request handle * @param ino the inode number * @param mask requested access mode */ void (*access)(fuse_req_t req, struct fuse_in_header *hdr); /** * Create and open a file * * If the file does not exist, first create it with the specified * mode, and then open it. * * Open flags (with the exception of O_NOCTTY) are available in * fi->flags. * * Filesystem may store an arbitrary file handle (pointer, index, * etc) in fi->fh, and use this in other all other file operations * (read, write, flush, release, fsync). * * There are also some flags (direct_io, keep_cache) which the * filesystem may set in fi, to change the way the file is opened. * See fuse_file_info structure in for more details. * * If this method is not implemented or under Linux kernel * versions earlier than 2.6.15, the mknod() and open() methods * will be called instead. * * Introduced in version 2.5 * * Valid replies: * fuse_reply_create * fuse_reply_err * * @param req request handle * @param parent inode number of the parent directory * @param name to create * @param mode file type and mode with which to create the new file * @param fi file information */ void (*create)(fuse_req_t req, struct fuse_in_header *hdr); /** * Test for a POSIX file lock * * Introduced in version 2.6 * * Valid replies: * fuse_reply_lock * fuse_reply_err * * @param req request handle * @param ino the inode number * @param fi file information * @param lock the region/type to test */ void (*getlk)(fuse_req_t req, const struct fuse_in_header *hdr); /** * Acquire, modify or release a POSIX file lock * * For POSIX threads (NPTL) there's a 1-1 relation between pid and * owner, but otherwise this is not always the case. For checking * lock ownership, 'fi->owner' must be used. The l_pid field in * 'struct flock' should only be used to fill in this field in * getlk(). * * Note: if the locking methods are not implemented, the kernel * will still allow file locking to work locally. Hence these are * only interesting for network filesystems and similar. * * Introduced in version 2.6 * * Valid replies: * fuse_reply_err * * @param req request handle * @param ino the inode number * @param fi file information * @param lock the region/type to set * @param sleep locking operation may sleep */ void (*setlk)(fuse_req_t req, uint64_t ino, fuse_file_info_t *fi, struct flock *lock, int sleep); /** * Map block index within file to block index within device * * Note: This makes sense only for block device backed filesystems * mounted with the 'blkdev' option * * Introduced in version 2.6 * * Valid replies: * fuse_reply_bmap * fuse_reply_err * * @param req request handle * @param ino the inode number * @param blocksize unit of block index * @param idx block index within file */ void (*bmap)(fuse_req_t req, const struct fuse_in_header *hdr); /** * Ioctl * * Note: For unrestricted ioctls (not allowed for FUSE * servers), data in and out areas can be discovered by giving * iovs and setting FUSE_IOCTL_RETRY in @flags. For * restricted ioctls, kernel prepares in/out data area * according to the information encoded in cmd. * * Introduced in version 2.8 * * Valid replies: * fuse_reply_ioctl_retry * fuse_reply_ioctl * fuse_reply_ioctl_iov * fuse_reply_err * * @param req request handle * @param ino the inode number * @param cmd ioctl command * @param arg ioctl argument * @param fi file information * @param flags for FUSE_IOCTL_* flags * @param in_buf data fetched from the caller * @param in_bufsz number of fetched bytes * @param out_bufsz maximum size of output data */ void (*ioctl)(fuse_req_t req, const struct fuse_in_header *hdr); /** * Poll for IO readiness * * Introduced in version 2.8 * * Note: If ph is non-NULL, the client should notify * when IO readiness events occur by calling * fuse_lowelevel_notify_poll() with the specified ph. * * Regardless of the number of times poll with a non-NULL ph * is received, single notification is enough to clear all. * Notifying more times incurs overhead but doesn't harm * correctness. * * The callee is responsible for destroying ph with * fuse_pollhandle_destroy() when no longer in use. * * Valid replies: * fuse_reply_poll * fuse_reply_err * * @param req request handle * @param ino the inode number * @param fi file information * @param ph poll handle to be used for notification */ void (*poll)(fuse_req_t req, const struct fuse_in_header *hdr); /** * Callback function for the retrieve request * * Introduced in version 2.9 * * Valid replies: * fuse_reply_none * * @param req request handle * @param cookie user data supplied to fuse_lowlevel_notify_retrieve() * @param ino the inode number supplied to fuse_lowlevel_notify_retrieve() * @param offset the offset supplied to fuse_lowlevel_notify_retrieve() * @param bufv the buffer containing the returned data */ void (*retrieve_reply)(fuse_req_t req, void *cookie, uint64_t ino, off_t offset); /** * Forget about multiple inodes * * See description of the forget function for more * information. * * Introduced in version 2.9 * * Valid replies: * fuse_reply_none * * @param req request handle */ void (*forget_multi)(fuse_req_t req, struct fuse_in_header *hdr); /** * Acquire, modify or release a BSD file lock * * Note: if the locking methods are not implemented, the kernel * will still allow file locking to work locally. Hence these are * only interesting for network filesystems and similar. * * Introduced in version 2.9 * * Valid replies: * fuse_reply_err * * @param req request handle * @param ino the inode number * @param fi file information * @param op the locking operation, see flock(2) */ void (*flock)(fuse_req_t req, uint64_t ino, fuse_file_info_t *fi, int op); /** * Allocate requested space. If this function returns success then * subsequent writes to the specified range shall not fail due to the lack * of free space on the file system storage media. * * Introduced in version 2.9 * * Valid replies: * fuse_reply_err * * @param req request handle * @param ino the inode number * @param offset starting point for allocated region * @param length size of allocated region * @param mode determines the operation to be performed on the given range, * see fallocate(2) */ void (*fallocate)(fuse_req_t req, const struct fuse_in_header *hdr); /** * Copy a range of data from one file to another * * Performs an optimized copy between two file descriptors without * the * additional cost of transferring data through the FUSE kernel * module * to user space (glibc) and then back into the FUSE filesystem * again. * * In case this method is not implemented, glibc falls back to * reading * data from the source and writing to the destination. Effectively * doing an inefficient copy of the data. * * If this request is answered with an error code of ENOSYS, this is * treated as a permanent failure with error code EOPNOTSUPP, * i.e. all * future copy_file_range() requests will fail with EOPNOTSUPP * without * being send to the filesystem process. * * Valid replies: * fuse_reply_write * fuse_reply_err * * @param req request handle * @param ino_in the inode number of the source file * @param off_in starting point from were the data should be read * @param fi_in file information of the source file * @param ino_out the inode number of the destination file * @param off_out starting point where the data should be written * @param fi_out file information of the destination file * @param len maximum size of the data to copy * @param flags passed along with the copy_file_range() syscall */ void (*copy_file_range)(fuse_req_t req, const struct fuse_in_header *hdr); void (*setupmapping)(fuse_req_t req, const struct fuse_in_header *hdr); void (*removemapping)(fuse_req_t req, const struct fuse_in_header *hdr); void (*syncfs)(fuse_req_t req, const struct fuse_in_header *hdr); void (*tmpfile)(fuse_req_t req, const struct fuse_in_header *hdr); }; /** * Reply with an error code or success * * Possible requests: * all except forget * * unlink, rmdir, rename, flush, release, fsync, fsyncdir, setxattr, * removexattr and setlk may send a zero code * * @param req request handle * @param err the positive error value, or zero for success * @return zero for success, -errno for failure to send reply */ int fuse_reply_err(fuse_req_t req, int err); /** * Don't send reply * * Possible requests: * forget * * @param req request handle */ void fuse_reply_none(fuse_req_t req); /** * Reply with a directory entry * * Possible requests: * lookup, mknod, mkdir, symlink, link * * Side effects: * increments the lookup count on success * * @param req request handle * @param e the entry parameters * @return zero for success, -errno for failure to send reply */ int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e); /** * Reply with a directory entry and open parameters * * currently the following members of 'fi' are used: * fh, direct_io, keep_cache * * Possible requests: * create * * Side effects: * increments the lookup count on success * * @param req request handle * @param e the entry parameters * @param fi file information * @return zero for success, -errno for failure to send reply */ int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const fuse_file_info_t *fi); /** * Reply with attributes * * Possible requests: * getattr, setattr * * @param req request handle * @param attr the attributes * @param attr_timeout validity timeout (in seconds) for the attributes * @return zero for success, -errno for failure to send reply */ int fuse_reply_attr(fuse_req_t req, const struct stat *attr, const uint64_t timeout); /** * Reply with the contents of a symbolic link * * Possible requests: * readlink * * @param req request handle * @param link symbolic link contents * @return zero for success, -errno for failure to send reply */ int fuse_reply_readlink(fuse_req_t req, const char *link); /** * Reply with open parameters * * currently the following members of 'fi' are used: * fh, direct_io, keep_cache * * Possible requests: * open, opendir * * @param req request handle * @param fi file information * @return zero for success, -errno for failure to send reply */ int fuse_reply_open(fuse_req_t req, const fuse_file_info_t *fi); /** * Reply with number of bytes written * * Possible requests: * write * * @param req request handle * @param count the number of bytes written * @return zero for success, -errno for failure to send reply */ int fuse_reply_write(fuse_req_t req, size_t count); /** * Reply with data * * Possible requests: * read, readdir, getxattr, listxattr * * @param req request handle * @param buf buffer containing data * @param size the size of data in bytes * @return zero for success, -errno for failure to send reply */ int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size); int fuse_reply_data(fuse_req_t req, char *buf, size_t bufsize); /** * Reply with data vector * * Possible requests: * read, readdir, getxattr, listxattr * * @param req request handle * @param iov the vector containing the data * @param count the size of vector * @return zero for success, -errno for failure to send reply */ int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count); /** * Reply with filesystem statistics * * Possible requests: * statfs * * @param req request handle * @param stbuf filesystem statistics * @return zero for success, -errno for failure to send reply */ int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf); /** * Reply with needed buffer size * * Possible requests: * getxattr, listxattr * * @param req request handle * @param count the buffer size needed in bytes * @return zero for success, -errno for failure to send reply */ int fuse_reply_xattr(fuse_req_t req, size_t count); /** * Reply with file lock information * * Possible requests: * getlk * * @param req request handle * @param lock the lock information * @return zero for success, -errno for failure to send reply */ int fuse_reply_lock(fuse_req_t req, const struct flock *lock); /** * Reply with block index * * Possible requests: * bmap * * @param req request handle * @param idx block index within device * @return zero for success, -errno for failure to send reply */ int fuse_reply_bmap(fuse_req_t req, uint64_t idx); /** * Reply to ask for data fetch and output buffer preparation. ioctl * will be retried with the specified input data fetched and output * buffer prepared. * * Possible requests: * ioctl * * @param req request handle * @param in_iov iovec specifying data to fetch from the caller * @param in_count number of entries in in_iov * @param out_iov iovec specifying addresses to write output to * @param out_count number of entries in out_iov * @return zero for success, -errno for failure to send reply */ int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count); /** * Reply to finish ioctl * * Possible requests: * ioctl * * @param req request handle * @param result result to be passed to the caller * @param buf buffer containing output data * @param size length of output data */ int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, uint32_t size); /** * Reply to finish ioctl with iov buffer * * Possible requests: * ioctl * * @param req request handle * @param result result to be passed to the caller * @param iov the vector containing the data * @param count the size of vector */ int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count); /** * Reply with poll result event mask * * @param req request handle * @param revents poll result event mask */ int fuse_reply_poll(fuse_req_t req, unsigned revents); /* ----------------------------------------------------------- * * Notification * * ----------------------------------------------------------- */ /** * Notify IO readiness event * * For more information, please read comment for poll operation. * * @param ph poll handle to notify IO readiness event for */ int fuse_lowlevel_notify_poll(fuse_pollhandle_t *ph); /** * Notify to invalidate cache for an inode * * @param ch the channel through which to send the invalidation * @param ino the inode number * @param off the offset in the inode where to start invalidating * or negative to invalidate attributes only * @param len the amount of cache to invalidate or 0 for all * @return zero for success, -errno for failure */ int fuse_lowlevel_notify_inval_inode(struct fuse_chan *ch, uint64_t ino, off_t off, off_t len); /** * Notify to invalidate parent attributes and the dentry matching * parent/name * * To avoid a deadlock don't call this function from a filesystem operation and * don't call it with a lock held that can also be held by a filesystem * operation. * * @param ch the channel through which to send the invalidation * @param parent inode number * @param name file name * @param namelen strlen() of file name * @return zero for success, -errno for failure */ int fuse_lowlevel_notify_inval_entry(struct fuse_chan *ch, uint64_t parent, const char *name, size_t namelen); /** * Notify to invalidate parent attributes and delete the dentry matching * parent/name if the dentry's inode number matches child (otherwise it * will invalidate the matching dentry). * * To avoid a deadlock don't call this function from a filesystem operation and * don't call it with a lock held that can also be held by a filesystem * operation. * * @param ch the channel through which to send the notification * @param parent inode number * @param child inode number * @param name file name * @param namelen strlen() of file name * @return zero for success, -errno for failure */ int fuse_lowlevel_notify_delete(struct fuse_chan *ch, uint64_t parent, uint64_t child, const char *name, size_t namelen); /** * Store data to the kernel buffers * * Synchronously store data in the kernel buffers belonging to the * given inode. The stored data is marked up-to-date (no read will be * performed against it, unless it's invalidated or evicted from the * cache). * * If the stored data overflows the current file size, then the size * is extended, similarly to a write(2) on the filesystem. * * If this function returns an error, then the store wasn't fully * completed, but it may have been partially completed. * * @param ch the channel through which to send the invalidation * @param ino the inode number * @param offset the starting offset into the file to store to * @param bufv buffer vector * @param flags flags controlling the copy * @return zero for success, -errno for failure */ int fuse_lowlevel_notify_store(struct fuse_chan *ch, uint64_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags); /** * Retrieve data from the kernel buffers * * Retrieve data in the kernel buffers belonging to the given inode. * If successful then the retrieve_reply() method will be called with * the returned data. * * Only present pages are returned in the retrieve reply. Retrieving * stops when it finds a non-present page and only data prior to that is * returned. * * If this function returns an error, then the retrieve will not be * completed and no reply will be sent. * * This function doesn't change the dirty state of pages in the kernel * buffer. For dirty pages the write() method will be called * regardless of having been retrieved previously. * * @param ch the channel through which to send the invalidation * @param ino the inode number * @param size the number of bytes to retrieve * @param offset the starting offset into the file to retrieve from * @param cookie user data to supply to the reply callback * @return zero for success, -errno for failure */ int fuse_lowlevel_notify_retrieve(struct fuse_chan *ch, uint64_t ino, size_t size, off_t offset, void *cookie); /* ----------------------------------------------------------- * * Utility functions * * ----------------------------------------------------------- */ /** * Get the userdata from the request * * @param req request handle * @return the user data passed to fuse_lowlevel_new() */ void *fuse_req_userdata(fuse_req_t req); /** * Get the context from the request * * The pointer returned by this function will only be valid for the * request's lifetime * * @param req request handle * @return the context structure */ const struct fuse_ctx *fuse_req_ctx(fuse_req_t req); /** * Get the current supplementary group IDs for the specified request * * Similar to the getgroups(2) system call, except the return value is * always the total number of group IDs, even if it is larger than the * specified size. * * The current fuse kernel module in linux (as of 2.6.30) doesn't pass * the group list to userspace, hence this function needs to parse * "/proc/$TID/task/$TID/status" to get the group IDs. * * This feature may not be supported on all operating systems. In * such a case this function will return -ENOSYS. * * @param req request handle * @param size size of given array * @param list array of group IDs to be filled in * @return the total number of supplementary group IDs or -errno on failure */ int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[]); /* ----------------------------------------------------------- * * Filesystem setup * * ----------------------------------------------------------- */ /* Deprecated, don't use */ int fuse_lowlevel_is_lib_option(const char *opt); /** * Create a low level session * * @param args argument vector * @param op the low level filesystem operations * @param op_size sizeof(struct fuse_lowlevel_ops) * @param userdata user data * @return the created session object, or NULL on failure */ struct fuse_session *fuse_lowlevel_new(struct fuse_args *args, const struct fuse_lowlevel_ops *op, size_t op_size, void *userdata); /* ----------------------------------------------------------- * * Session interface * * ----------------------------------------------------------- */ struct fuse_session *fuse_session_new(void *data, void *receive_buf, void *process_buf, void *destroy); void fuse_session_add_chan(struct fuse_session *se, struct fuse_chan *ch); void fuse_session_remove_chan(struct fuse_chan *ch); void fuse_session_destroy(struct fuse_session *se); void fuse_session_exit(struct fuse_session *se); int fuse_session_exited(struct fuse_session *se); void fuse_session_reset(struct fuse_session *se); void *fuse_session_data(struct fuse_session *se); int fuse_session_receive(struct fuse_session *se, struct fuse_buf *buf); void fuse_session_process(struct fuse_session *se, const void *buf, const size_t bufsize); int fuse_session_loop_mt(struct fuse_session *se, const int read_thread_count, const int process_thread_count, const int process_thread_queue_depth, const char *pin_threads_type); /* ----------------------------------------------------------- * * Channel interface * * ----------------------------------------------------------- */ struct fuse_chan *fuse_chan_new(int fd, size_t bufsize); /** * Query the file descriptor of the channel * * @param ch the channel * @return the file descriptor passed to fuse_chan_new() */ int fuse_chan_fd(struct fuse_chan *ch); /** * Query the minimal receive buffer size * * @param ch the channel * @return the buffer size passed to fuse_chan_new() */ size_t fuse_chan_bufsize(struct fuse_chan *ch); /** * Query the user data * * @param ch the channel * @return the user data passed to fuse_chan_new() */ void *fuse_chan_data(struct fuse_chan *ch); /** * Query the session to which this channel is assigned * * @param ch the channel * @return the session, or NULL if the channel is not assigned */ struct fuse_session *fuse_chan_session(struct fuse_chan *ch); void fuse_chan_destroy(struct fuse_chan *ch); EXTERN_C_END #endif /* _FUSE_LOWLEVEL_H_ */ mergerfs-2.40.2/libfuse/include/fuse_msgbuf.h000066400000000000000000000002141457016576200211550ustar00rootroot00000000000000#pragma once #include typedef struct fuse_msgbuf_t fuse_msgbuf_t; struct fuse_msgbuf_t { uint32_t size; char *mem; }; mergerfs-2.40.2/libfuse/include/fuse_opt.h000066400000000000000000000165321457016576200205060ustar00rootroot00000000000000/* FUSE: Filesystem in Userspace Copyright (C) 2001-2007 Miklos Szeredi This program can be distributed under the terms of the GNU LGPLv2. See the file COPYING.LIB. */ #ifndef _FUSE_OPT_H_ #define _FUSE_OPT_H_ #include "extern_c.h" /** @file * * This file defines the option parsing interface of FUSE */ EXTERN_C_BEGIN /** * Option description * * This structure describes a single option, and action associated * with it, in case it matches. * * More than one such match may occur, in which case the action for * each match is executed. * * There are three possible actions in case of a match: * * i) An integer (int or unsigned) variable determined by 'offset' is * set to 'value' * * ii) The processing function is called, with 'value' as the key * * iii) An integer (any) or string (char *) variable determined by * 'offset' is set to the value of an option parameter * * 'offset' should normally be either set to * * - 'offsetof(struct foo, member)' actions i) and iii) * * - -1 action ii) * * The 'offsetof()' macro is defined in the header. * * The template determines which options match, and also have an * effect on the action. Normally the action is either i) or ii), but * if a format is present in the template, then action iii) is * performed. * * The types of templates are: * * 1) "-x", "-foo", "--foo", "--foo-bar", etc. These match only * themselves. Invalid values are "--" and anything beginning * with "-o" * * 2) "foo", "foo-bar", etc. These match "-ofoo", "-ofoo-bar" or * the relevant option in a comma separated option list * * 3) "bar=", "--foo=", etc. These are variations of 1) and 2) * which have a parameter * * 4) "bar=%s", "--foo=%lu", etc. Same matching as above but perform * action iii). * * 5) "-x ", etc. Matches either "-xparam" or "-x param" as * two separate arguments * * 6) "-x %s", etc. Combination of 4) and 5) * * If the format is "%s", memory is allocated for the string unlike * with scanf(). */ struct fuse_opt { /** Matching template and optional parameter formatting */ const char *templ; /** * Offset of variable within 'data' parameter of fuse_opt_parse() * or -1 */ unsigned long offset; /** * Value to set the variable to, or to be passed as 'key' to the * processing function. Ignored if template has a format */ int value; }; /** * Key option. In case of a match, the processing function will be * called with the specified key. */ #define FUSE_OPT_KEY(templ, key) { templ, -1U, key } /** * Last option. An array of 'struct fuse_opt' must end with a NULL * template value */ #define FUSE_OPT_END { NULL, 0, 0 } /** * Argument list */ struct fuse_args { /** Argument count */ int argc; /** Argument vector. NULL terminated */ char **argv; /** Is 'argv' allocated? */ int allocated; }; /** * Initializer for 'struct fuse_args' */ #define FUSE_ARGS_INIT(argc, argv) { argc, argv, 0 } /** * Key value passed to the processing function if an option did not * match any template */ #define FUSE_OPT_KEY_OPT -1 /** * Key value passed to the processing function for all non-options * * Non-options are the arguments beginning with a character other than * '-' or all arguments after the special '--' option */ #define FUSE_OPT_KEY_NONOPT -2 /** * Special key value for options to keep * * Argument is not passed to processing function, but behave as if the * processing function returned 1 */ #define FUSE_OPT_KEY_KEEP -3 /** * Special key value for options to discard * * Argument is not passed to processing function, but behave as if the * processing function returned zero */ #define FUSE_OPT_KEY_DISCARD -4 /** * Processing function * * This function is called if * - option did not match any 'struct fuse_opt' * - argument is a non-option * - option did match and offset was set to -1 * * The 'arg' parameter will always contain the whole argument or * option including the parameter if exists. A two-argument option * ("-x foo") is always converted to single argument option of the * form "-xfoo" before this function is called. * * Options of the form '-ofoo' are passed to this function without the * '-o' prefix. * * The return value of this function determines whether this argument * is to be inserted into the output argument vector, or discarded. * * @param data is the user data passed to the fuse_opt_parse() function * @param arg is the whole argument or option * @param key determines why the processing function was called * @param outargs the current output argument list * @return -1 on error, 0 if arg is to be discarded, 1 if arg should be kept */ typedef int (*fuse_opt_proc_t)(void *data, const char *arg, int key, struct fuse_args *outargs); /** * Option parsing function * * If 'args' was returned from a previous call to fuse_opt_parse() or * it was constructed from * * A NULL 'args' is equivalent to an empty argument vector * * A NULL 'opts' is equivalent to an 'opts' array containing a single * end marker * * A NULL 'proc' is equivalent to a processing function always * returning '1' * * @param args is the input and output argument list * @param data is the user data * @param opts is the option description array * @param proc is the processing function * @return -1 on error, 0 on success */ int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc); /** * Add an option to a comma separated option list * * @param opts is a pointer to an option list, may point to a NULL value * @param opt is the option to add * @return -1 on allocation error, 0 on success */ int fuse_opt_add_opt(char **opts, const char *opt); /** * Add an option, escaping commas, to a comma separated option list * * @param opts is a pointer to an option list, may point to a NULL value * @param opt is the option to add * @return -1 on allocation error, 0 on success */ int fuse_opt_add_opt_escaped(char **opts, const char *opt); /** * Add an argument to a NULL terminated argument vector * * @param args is the structure containing the current argument list * @param arg is the new argument to add * @return -1 on allocation error, 0 on success */ int fuse_opt_add_arg(struct fuse_args *args, const char *arg); /** * Add an argument at the specified position in a NULL terminated * argument vector * * Adds the argument to the N-th position. This is useful for adding * options at the beginning of the array which must not come after the * special '--' option. * * @param args is the structure containing the current argument list * @param pos is the position at which to add the argument * @param arg is the new argument to add * @return -1 on allocation error, 0 on success */ int fuse_opt_insert_arg(struct fuse_args *args, int pos, const char *arg); /** * Free the contents of argument list * * The structure itself is not freed * * @param args is the structure containing the argument list */ void fuse_opt_free_args(struct fuse_args *args); /** * Check if an option matches * * @param opts is the option description array * @param opt is the option to match * @return 1 if a match is found, 0 if not */ int fuse_opt_match(const struct fuse_opt opts[], const char *opt); EXTERN_C_END #endif /* _FUSE_OPT_H_ */ mergerfs-2.40.2/libfuse/include/fuse_pollhandle.h000066400000000000000000000003241457016576200220160ustar00rootroot00000000000000#pragma once #include struct fuse_chan; struct fuse_ll; typedef struct fuse_pollhandle_t fuse_pollhandle_t; struct fuse_pollhandle_t { uint64_t kh; struct fuse_chan *ch; struct fuse_ll *f; }; mergerfs-2.40.2/libfuse/include/fuse_timeouts.h000066400000000000000000000002221457016576200215420ustar00rootroot00000000000000#pragma once #include typedef struct fuse_timeouts_s fuse_timeouts_t; struct fuse_timeouts_s { uint64_t entry; uint64_t attr; }; mergerfs-2.40.2/libfuse/include/kvec.h000066400000000000000000000066451457016576200176160ustar00rootroot00000000000000/* The MIT License Copyright (c) 2008, by Attractive Chaos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* An example: #include "kvec.h" int main() { kvec_t(int) array; kv_init(array); kv_push(int, array, 10); // append kv_a(int, array, 20) = 5; // dynamic kv_A(array, 20) = 4; // static kv_destroy(array); return 0; } */ /* 2008-09-22 (0.1.0): * The initial version. */ #ifndef AC_KVEC_H #define AC_KVEC_H #include #define kv_roundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x)) #define kvec_t(type) struct { size_t n, m; type *a; } #define kv_init(v) ((v).n = (v).m = 0, (v).a = 0) #define kv_destroy(v) free((v).a) #define kv_A(v, i) ((v).a[(i)]) #define kv_pop(v) ((v).a[--(v).n]) #define kv_size(v) ((v).n) #define kv_max(v) ((v).m) #define kv_first(v) (kv_A(v,0)) #define kv_last(v) (kv_A(v,kv_size(v)-1)) #define kv_end(v) (kv_A(v,kv_size(v))) #define kv_delete(v,i) (kv_A(v,i) = kv_pop(v)) #define kv_resize(type, v, s) ((v).m = (s), (v).a = (type*)realloc((v).a, sizeof(type) * (v).m)) #define kv_copy(type, v1, v0) do { \ if ((v1).m < (v0).n) kv_resize(type, v1, (v0).n); \ (v1).n = (v0).n; \ memcpy((v1).a, (v0).a, sizeof(type) * (v0).n); \ } while (0) \ #define kv_push(type, v, x) do { \ if ((v).n == (v).m) { \ (v).m = (v).m? (v).m<<1 : 2; \ (v).a = (type*)realloc((v).a, sizeof(type) * (v).m); \ } \ (v).a[(v).n++] = (x); \ } while (0) #define kv_pushp(type, v) (((v).n == (v).m)? \ ((v).m = ((v).m? (v).m<<1 : 2), \ (v).a = (type*)realloc((v).a, sizeof(type) * (v).m), 0) \ : 0), ((v).a + ((v).n++)) #define kv_a(type, v, i) (((v).m <= (size_t)(i)? \ ((v).m = (v).n = (i) + 1, kv_roundup32((v).m), \ (v).a = (type*)realloc((v).a, sizeof(type) * (v).m), 0) \ : (v).n <= (size_t)(i)? (v).n = (i) + 1 \ : 0), (v).a[(i)]) #endif mergerfs-2.40.2/libfuse/include/linux_dirent64.h000066400000000000000000000004141457016576200215300ustar00rootroot00000000000000#pragma once #include #include #define DIRENT_NAMELEN(X) (strlen((X)->name)) typedef struct linux_dirent64_t linux_dirent64_t; struct linux_dirent64_t { uint64_t ino; int64_t off; uint16_t reclen; uint8_t type; char name[]; }; mergerfs-2.40.2/libfuse/include/moodycamel/000077500000000000000000000000001457016576200206335ustar00rootroot00000000000000mergerfs-2.40.2/libfuse/include/moodycamel/blockingconcurrentqueue.h000066400000000000000000000520141457016576200257460ustar00rootroot00000000000000// Provides an efficient blocking version of moodycamel::ConcurrentQueue. // ©2015-2020 Cameron Desrochers. Distributed under the terms of the simplified // BSD license, available at the top of concurrentqueue.h. // Also dual-licensed under the Boost Software License (see LICENSE.md) // Uses Jeff Preshing's semaphore implementation (under the terms of its // separate zlib license, see lightweightsemaphore.h). #pragma once #include "concurrentqueue.h" #include "lightweightsemaphore.h" #include #include #include #include #include namespace moodycamel { // This is a blocking version of the queue. It has an almost identical interface to // the normal non-blocking version, with the addition of various wait_dequeue() methods // and the removal of producer-specific dequeue methods. template class BlockingConcurrentQueue { private: typedef ::moodycamel::ConcurrentQueue ConcurrentQueue; typedef ::moodycamel::LightweightSemaphore LightweightSemaphore; public: typedef typename ConcurrentQueue::producer_token_t producer_token_t; typedef typename ConcurrentQueue::consumer_token_t consumer_token_t; typedef typename ConcurrentQueue::index_t index_t; typedef typename ConcurrentQueue::size_t size_t; typedef typename std::make_signed::type ssize_t; static const size_t BLOCK_SIZE = ConcurrentQueue::BLOCK_SIZE; static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = ConcurrentQueue::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD; static const size_t EXPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::EXPLICIT_INITIAL_INDEX_SIZE; static const size_t IMPLICIT_INITIAL_INDEX_SIZE = ConcurrentQueue::IMPLICIT_INITIAL_INDEX_SIZE; static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = ConcurrentQueue::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = ConcurrentQueue::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE; static const size_t MAX_SUBQUEUE_SIZE = ConcurrentQueue::MAX_SUBQUEUE_SIZE; public: // Creates a queue with at least `capacity` element slots; note that the // actual number of elements that can be inserted without additional memory // allocation depends on the number of producers and the block size (e.g. if // the block size is equal to `capacity`, only a single block will be allocated // up-front, which means only a single producer will be able to enqueue elements // without an extra allocation -- blocks aren't shared between producers). // This method is not thread safe -- it is up to the user to ensure that the // queue is fully constructed before it starts being used by other threads (this // includes making the memory effects of construction visible, possibly with a // memory barrier). explicit BlockingConcurrentQueue(size_t capacity = 6 * BLOCK_SIZE) : inner(capacity), sema(create(0, (int)Traits::MAX_SEMA_SPINS), &BlockingConcurrentQueue::template destroy) { assert(reinterpret_cast((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && "BlockingConcurrentQueue must have ConcurrentQueue as its first member"); if (!sema) { MOODYCAMEL_THROW(std::bad_alloc()); } } BlockingConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers) : inner(minCapacity, maxExplicitProducers, maxImplicitProducers), sema(create(0, (int)Traits::MAX_SEMA_SPINS), &BlockingConcurrentQueue::template destroy) { assert(reinterpret_cast((BlockingConcurrentQueue*)1) == &((BlockingConcurrentQueue*)1)->inner && "BlockingConcurrentQueue must have ConcurrentQueue as its first member"); if (!sema) { MOODYCAMEL_THROW(std::bad_alloc()); } } // Disable copying and copy assignment BlockingConcurrentQueue(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; BlockingConcurrentQueue& operator=(BlockingConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; // Moving is supported, but note that it is *not* a thread-safe operation. // Nobody can use the queue while it's being moved, and the memory effects // of that move must be propagated to other threads before they can use it. // Note: When a queue is moved, its tokens are still valid but can only be // used with the destination queue (i.e. semantically they are moved along // with the queue itself). BlockingConcurrentQueue(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT : inner(std::move(other.inner)), sema(std::move(other.sema)) { } inline BlockingConcurrentQueue& operator=(BlockingConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT { return swap_internal(other); } // Swaps this queue's state with the other's. Not thread-safe. // Swapping two queues does not invalidate their tokens, however // the tokens that were created for one queue must be used with // only the swapped queue (i.e. the tokens are tied to the // queue's movable state, not the object itself). inline void swap(BlockingConcurrentQueue& other) MOODYCAMEL_NOEXCEPT { swap_internal(other); } private: BlockingConcurrentQueue& swap_internal(BlockingConcurrentQueue& other) { if (this == &other) { return *this; } inner.swap(other.inner); sema.swap(other.sema); return *this; } public: // Enqueues a single item (by copying it). // Allocates memory if required. Only fails if memory allocation fails (or implicit // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Thread-safe. inline bool enqueue(T const& item) { if ((details::likely)(inner.enqueue(item))) { sema->signal(); return true; } return false; } // Enqueues a single item (by moving it, if possible). // Allocates memory if required. Only fails if memory allocation fails (or implicit // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Thread-safe. inline bool enqueue(T&& item) { if ((details::likely)(inner.enqueue(std::move(item)))) { sema->signal(); return true; } return false; } // Enqueues a single item (by copying it) using an explicit producer token. // Allocates memory if required. Only fails if memory allocation fails (or // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Thread-safe. inline bool enqueue(producer_token_t const& token, T const& item) { if ((details::likely)(inner.enqueue(token, item))) { sema->signal(); return true; } return false; } // Enqueues a single item (by moving it, if possible) using an explicit producer token. // Allocates memory if required. Only fails if memory allocation fails (or // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Thread-safe. inline bool enqueue(producer_token_t const& token, T&& item) { if ((details::likely)(inner.enqueue(token, std::move(item)))) { sema->signal(); return true; } return false; } // Enqueues several items. // Allocates memory if required. Only fails if memory allocation fails (or // implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE // is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Note: Use std::make_move_iterator if the elements should be moved instead of copied. // Thread-safe. template inline bool enqueue_bulk(It itemFirst, size_t count) { if ((details::likely)(inner.enqueue_bulk(std::forward(itemFirst), count))) { sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); return true; } return false; } // Enqueues several items using an explicit producer token. // Allocates memory if required. Only fails if memory allocation fails // (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Note: Use std::make_move_iterator if the elements should be moved // instead of copied. // Thread-safe. template inline bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) { if ((details::likely)(inner.enqueue_bulk(token, std::forward(itemFirst), count))) { sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); return true; } return false; } // Enqueues a single item (by copying it). // Does not allocate memory. Fails if not enough room to enqueue (or implicit // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE // is 0). // Thread-safe. inline bool try_enqueue(T const& item) { if (inner.try_enqueue(item)) { sema->signal(); return true; } return false; } // Enqueues a single item (by moving it, if possible). // Does not allocate memory (except for one-time implicit producer). // Fails if not enough room to enqueue (or implicit production is // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). // Thread-safe. inline bool try_enqueue(T&& item) { if (inner.try_enqueue(std::move(item))) { sema->signal(); return true; } return false; } // Enqueues a single item (by copying it) using an explicit producer token. // Does not allocate memory. Fails if not enough room to enqueue. // Thread-safe. inline bool try_enqueue(producer_token_t const& token, T const& item) { if (inner.try_enqueue(token, item)) { sema->signal(); return true; } return false; } // Enqueues a single item (by moving it, if possible) using an explicit producer token. // Does not allocate memory. Fails if not enough room to enqueue. // Thread-safe. inline bool try_enqueue(producer_token_t const& token, T&& item) { if (inner.try_enqueue(token, std::move(item))) { sema->signal(); return true; } return false; } // Enqueues several items. // Does not allocate memory (except for one-time implicit producer). // Fails if not enough room to enqueue (or implicit production is // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). // Note: Use std::make_move_iterator if the elements should be moved // instead of copied. // Thread-safe. template inline bool try_enqueue_bulk(It itemFirst, size_t count) { if (inner.try_enqueue_bulk(std::forward(itemFirst), count)) { sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); return true; } return false; } // Enqueues several items using an explicit producer token. // Does not allocate memory. Fails if not enough room to enqueue. // Note: Use std::make_move_iterator if the elements should be moved // instead of copied. // Thread-safe. template inline bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) { if (inner.try_enqueue_bulk(token, std::forward(itemFirst), count)) { sema->signal((LightweightSemaphore::ssize_t)(ssize_t)count); return true; } return false; } // Attempts to dequeue from the queue. // Returns false if all producer streams appeared empty at the time they // were checked (so, the queue is likely but not guaranteed to be empty). // Never allocates. Thread-safe. template inline bool try_dequeue(U& item) { if (sema->tryWait()) { while (!inner.try_dequeue(item)) { continue; } return true; } return false; } // Attempts to dequeue from the queue using an explicit consumer token. // Returns false if all producer streams appeared empty at the time they // were checked (so, the queue is likely but not guaranteed to be empty). // Never allocates. Thread-safe. template inline bool try_dequeue(consumer_token_t& token, U& item) { if (sema->tryWait()) { while (!inner.try_dequeue(token, item)) { continue; } return true; } return false; } // Attempts to dequeue several elements from the queue. // Returns the number of items actually dequeued. // Returns 0 if all producer streams appeared empty at the time they // were checked (so, the queue is likely but not guaranteed to be empty). // Never allocates. Thread-safe. template inline size_t try_dequeue_bulk(It itemFirst, size_t max) { size_t count = 0; max = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max); while (count != max) { count += inner.template try_dequeue_bulk(itemFirst, max - count); } return count; } // Attempts to dequeue several elements from the queue using an explicit consumer token. // Returns the number of items actually dequeued. // Returns 0 if all producer streams appeared empty at the time they // were checked (so, the queue is likely but not guaranteed to be empty). // Never allocates. Thread-safe. template inline size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) { size_t count = 0; max = (size_t)sema->tryWaitMany((LightweightSemaphore::ssize_t)(ssize_t)max); while (count != max) { count += inner.template try_dequeue_bulk(token, itemFirst, max - count); } return count; } // Blocks the current thread until there's something to dequeue, then // dequeues it. // Never allocates. Thread-safe. template inline void wait_dequeue(U& item) { while (!sema->wait()) { continue; } while (!inner.try_dequeue(item)) { continue; } } // Blocks the current thread until either there's something to dequeue // or the timeout (specified in microseconds) expires. Returns false // without setting `item` if the timeout expires, otherwise assigns // to `item` and returns true. // Using a negative timeout indicates an indefinite timeout, // and is thus functionally equivalent to calling wait_dequeue. // Never allocates. Thread-safe. template inline bool wait_dequeue_timed(U& item, std::int64_t timeout_usecs) { if (!sema->wait(timeout_usecs)) { return false; } while (!inner.try_dequeue(item)) { continue; } return true; } // Blocks the current thread until either there's something to dequeue // or the timeout expires. Returns false without setting `item` if the // timeout expires, otherwise assigns to `item` and returns true. // Never allocates. Thread-safe. template inline bool wait_dequeue_timed(U& item, std::chrono::duration const& timeout) { return wait_dequeue_timed(item, std::chrono::duration_cast(timeout).count()); } // Blocks the current thread until there's something to dequeue, then // dequeues it using an explicit consumer token. // Never allocates. Thread-safe. template inline void wait_dequeue(consumer_token_t& token, U& item) { while (!sema->wait()) { continue; } while (!inner.try_dequeue(token, item)) { continue; } } // Blocks the current thread until either there's something to dequeue // or the timeout (specified in microseconds) expires. Returns false // without setting `item` if the timeout expires, otherwise assigns // to `item` and returns true. // Using a negative timeout indicates an indefinite timeout, // and is thus functionally equivalent to calling wait_dequeue. // Never allocates. Thread-safe. template inline bool wait_dequeue_timed(consumer_token_t& token, U& item, std::int64_t timeout_usecs) { if (!sema->wait(timeout_usecs)) { return false; } while (!inner.try_dequeue(token, item)) { continue; } return true; } // Blocks the current thread until either there's something to dequeue // or the timeout expires. Returns false without setting `item` if the // timeout expires, otherwise assigns to `item` and returns true. // Never allocates. Thread-safe. template inline bool wait_dequeue_timed(consumer_token_t& token, U& item, std::chrono::duration const& timeout) { return wait_dequeue_timed(token, item, std::chrono::duration_cast(timeout).count()); } // Attempts to dequeue several elements from the queue. // Returns the number of items actually dequeued, which will // always be at least one (this method blocks until the queue // is non-empty) and at most max. // Never allocates. Thread-safe. template inline size_t wait_dequeue_bulk(It itemFirst, size_t max) { size_t count = 0; max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max); while (count != max) { count += inner.template try_dequeue_bulk(itemFirst, max - count); } return count; } // Attempts to dequeue several elements from the queue. // Returns the number of items actually dequeued, which can // be 0 if the timeout expires while waiting for elements, // and at most max. // Using a negative timeout indicates an indefinite timeout, // and is thus functionally equivalent to calling wait_dequeue_bulk. // Never allocates. Thread-safe. template inline size_t wait_dequeue_bulk_timed(It itemFirst, size_t max, std::int64_t timeout_usecs) { size_t count = 0; max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max, timeout_usecs); while (count != max) { count += inner.template try_dequeue_bulk(itemFirst, max - count); } return count; } // Attempts to dequeue several elements from the queue. // Returns the number of items actually dequeued, which can // be 0 if the timeout expires while waiting for elements, // and at most max. // Never allocates. Thread-safe. template inline size_t wait_dequeue_bulk_timed(It itemFirst, size_t max, std::chrono::duration const& timeout) { return wait_dequeue_bulk_timed(itemFirst, max, std::chrono::duration_cast(timeout).count()); } // Attempts to dequeue several elements from the queue using an explicit consumer token. // Returns the number of items actually dequeued, which will // always be at least one (this method blocks until the queue // is non-empty) and at most max. // Never allocates. Thread-safe. template inline size_t wait_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) { size_t count = 0; max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max); while (count != max) { count += inner.template try_dequeue_bulk(token, itemFirst, max - count); } return count; } // Attempts to dequeue several elements from the queue using an explicit consumer token. // Returns the number of items actually dequeued, which can // be 0 if the timeout expires while waiting for elements, // and at most max. // Using a negative timeout indicates an indefinite timeout, // and is thus functionally equivalent to calling wait_dequeue_bulk. // Never allocates. Thread-safe. template inline size_t wait_dequeue_bulk_timed(consumer_token_t& token, It itemFirst, size_t max, std::int64_t timeout_usecs) { size_t count = 0; max = (size_t)sema->waitMany((LightweightSemaphore::ssize_t)(ssize_t)max, timeout_usecs); while (count != max) { count += inner.template try_dequeue_bulk(token, itemFirst, max - count); } return count; } // Attempts to dequeue several elements from the queue using an explicit consumer token. // Returns the number of items actually dequeued, which can // be 0 if the timeout expires while waiting for elements, // and at most max. // Never allocates. Thread-safe. template inline size_t wait_dequeue_bulk_timed(consumer_token_t& token, It itemFirst, size_t max, std::chrono::duration const& timeout) { return wait_dequeue_bulk_timed(token, itemFirst, max, std::chrono::duration_cast(timeout).count()); } // Returns an estimate of the total number of elements currently in the queue. This // estimate is only accurate if the queue has completely stabilized before it is called // (i.e. all enqueue and dequeue operations have completed and their memory effects are // visible on the calling thread, and no further operations start while this method is // being called). // Thread-safe. inline size_t size_approx() const { return (size_t)sema->availableApprox(); } // Returns true if the underlying atomic variables used by // the queue are lock-free (they should be on most platforms). // Thread-safe. static constexpr bool is_lock_free() { return ConcurrentQueue::is_lock_free(); } private: template static inline U* create(A1&& a1, A2&& a2) { void* p = (Traits::malloc)(sizeof(U)); return p != nullptr ? new (p) U(std::forward(a1), std::forward(a2)) : nullptr; } template static inline void destroy(U* p) { if (p != nullptr) { p->~U(); } (Traits::free)(p); } private: ConcurrentQueue inner; std::unique_ptr sema; }; template inline void swap(BlockingConcurrentQueue& a, BlockingConcurrentQueue& b) MOODYCAMEL_NOEXCEPT { a.swap(b); } } // end namespace moodycamel mergerfs-2.40.2/libfuse/include/moodycamel/concurrentqueue.h000066400000000000000000004524571457016576200242540ustar00rootroot00000000000000// Provides a C++11 implementation of a multi-producer, multi-consumer lock-free queue. // An overview, including benchmark results, is provided here: // http://moodycamel.com/blog/2014/a-fast-general-purpose-lock-free-queue-for-c++ // The full design is also described in excruciating detail at: // http://moodycamel.com/blog/2014/detailed-design-of-a-lock-free-queue // Simplified BSD license: // Copyright (c) 2013-2020, Cameron Desrochers. // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // - Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // - Redistributions in binary form must reproduce the above copyright notice, this list of // conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL // THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT // OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Also dual-licensed under the Boost Software License (see LICENSE.md) #pragma once #if defined(__GNUC__) && !defined(__INTEL_COMPILER) // Disable -Wconversion warnings (spuriously triggered when Traits::size_t and // Traits::index_t are set to < 32 bits, causing integer promotion, causing warnings // upon assigning any computed values) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #ifdef MCDBGQ_USE_RELACY #pragma GCC diagnostic ignored "-Wint-to-pointer-cast" #endif #endif #if defined(_MSC_VER) && (!defined(_HAS_CXX17) || !_HAS_CXX17) // VS2019 with /W4 warns about constant conditional expressions but unless /std=c++17 or higher // does not support `if constexpr`, so we have no choice but to simply disable the warning #pragma warning(push) #pragma warning(disable: 4127) // conditional expression is constant #endif #if defined(__APPLE__) #include "TargetConditionals.h" #endif #ifdef MCDBGQ_USE_RELACY #include "relacy/relacy_std.hpp" #include "relacy_shims.h" // We only use malloc/free anyway, and the delete macro messes up `= delete` method declarations. // We'll override the default trait malloc ourselves without a macro. #undef new #undef delete #undef malloc #undef free #else #include // Requires C++11. Sorry VS2010. #include #endif #include // for max_align_t #include #include #include #include #include #include #include // for CHAR_BIT #include #include // partly for __WINPTHREADS_VERSION if on MinGW-w64 w/ POSIX threading #include // used for thread exit synchronization // Platform-specific definitions of a numeric thread ID type and an invalid value namespace moodycamel { namespace details { template struct thread_id_converter { typedef thread_id_t thread_id_numeric_size_t; typedef thread_id_t thread_id_hash_t; static thread_id_hash_t prehash(thread_id_t const& x) { return x; } }; } } #if defined(MCDBGQ_USE_RELACY) namespace moodycamel { namespace details { typedef std::uint32_t thread_id_t; static const thread_id_t invalid_thread_id = 0xFFFFFFFFU; static const thread_id_t invalid_thread_id2 = 0xFFFFFFFEU; static inline thread_id_t thread_id() { return rl::thread_index(); } } } #elif defined(_WIN32) || defined(__WINDOWS__) || defined(__WIN32__) // No sense pulling in windows.h in a header, we'll manually declare the function // we use and rely on backwards-compatibility for this not to break extern "C" __declspec(dllimport) unsigned long __stdcall GetCurrentThreadId(void); namespace moodycamel { namespace details { static_assert(sizeof(unsigned long) == sizeof(std::uint32_t), "Expected size of unsigned long to be 32 bits on Windows"); typedef std::uint32_t thread_id_t; static const thread_id_t invalid_thread_id = 0; // See http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx static const thread_id_t invalid_thread_id2 = 0xFFFFFFFFU; // Not technically guaranteed to be invalid, but is never used in practice. Note that all Win32 thread IDs are presently multiples of 4. static inline thread_id_t thread_id() { return static_cast(::GetCurrentThreadId()); } } } #elif defined(__arm__) || defined(_M_ARM) || defined(__aarch64__) || (defined(__APPLE__) && TARGET_OS_IPHONE) || defined(__MVS__) || defined(MOODYCAMEL_NO_THREAD_LOCAL) namespace moodycamel { namespace details { static_assert(sizeof(std::thread::id) == 4 || sizeof(std::thread::id) == 8, "std::thread::id is expected to be either 4 or 8 bytes"); typedef std::thread::id thread_id_t; static const thread_id_t invalid_thread_id; // Default ctor creates invalid ID // Note we don't define a invalid_thread_id2 since std::thread::id doesn't have one; it's // only used if MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is defined anyway, which it won't // be. static inline thread_id_t thread_id() { return std::this_thread::get_id(); } template struct thread_id_size { }; template<> struct thread_id_size<4> { typedef std::uint32_t numeric_t; }; template<> struct thread_id_size<8> { typedef std::uint64_t numeric_t; }; template<> struct thread_id_converter { typedef thread_id_size::numeric_t thread_id_numeric_size_t; #ifndef __APPLE__ typedef std::size_t thread_id_hash_t; #else typedef thread_id_numeric_size_t thread_id_hash_t; #endif static thread_id_hash_t prehash(thread_id_t const& x) { #ifndef __APPLE__ return std::hash()(x); #else return *reinterpret_cast(&x); #endif } }; } } #else // Use a nice trick from this answer: http://stackoverflow.com/a/8438730/21475 // In order to get a numeric thread ID in a platform-independent way, we use a thread-local // static variable's address as a thread identifier :-) #if defined(__GNUC__) || defined(__INTEL_COMPILER) #define MOODYCAMEL_THREADLOCAL __thread #elif defined(_MSC_VER) #define MOODYCAMEL_THREADLOCAL __declspec(thread) #else // Assume C++11 compliant compiler #define MOODYCAMEL_THREADLOCAL thread_local #endif namespace moodycamel { namespace details { typedef std::uintptr_t thread_id_t; static const thread_id_t invalid_thread_id = 0; // Address can't be nullptr static const thread_id_t invalid_thread_id2 = 1; // Member accesses off a null pointer are also generally invalid. Plus it's not aligned. inline thread_id_t thread_id() { static MOODYCAMEL_THREADLOCAL int x; return reinterpret_cast(&x); } } } #endif // Constexpr if #ifndef MOODYCAMEL_CONSTEXPR_IF #if (defined(_MSC_VER) && defined(_HAS_CXX17) && _HAS_CXX17) || __cplusplus > 201402L #define MOODYCAMEL_CONSTEXPR_IF if constexpr #define MOODYCAMEL_MAYBE_UNUSED [[maybe_unused]] #else #define MOODYCAMEL_CONSTEXPR_IF if #define MOODYCAMEL_MAYBE_UNUSED #endif #endif // Exceptions #ifndef MOODYCAMEL_EXCEPTIONS_ENABLED #if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || (!defined(_MSC_VER) && !defined(__GNUC__)) #define MOODYCAMEL_EXCEPTIONS_ENABLED #endif #endif #ifdef MOODYCAMEL_EXCEPTIONS_ENABLED #define MOODYCAMEL_TRY try #define MOODYCAMEL_CATCH(...) catch(__VA_ARGS__) #define MOODYCAMEL_RETHROW throw #define MOODYCAMEL_THROW(expr) throw (expr) #else #define MOODYCAMEL_TRY MOODYCAMEL_CONSTEXPR_IF (true) #define MOODYCAMEL_CATCH(...) else MOODYCAMEL_CONSTEXPR_IF (false) #define MOODYCAMEL_RETHROW #define MOODYCAMEL_THROW(expr) #endif #ifndef MOODYCAMEL_NOEXCEPT #if !defined(MOODYCAMEL_EXCEPTIONS_ENABLED) #define MOODYCAMEL_NOEXCEPT #define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) true #define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) true #elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1800 // VS2012's std::is_nothrow_[move_]constructible is broken and returns true when it shouldn't :-( // We have to assume *all* non-trivial constructors may throw on VS2012! #define MOODYCAMEL_NOEXCEPT _NOEXCEPT #define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference::value && std::is_move_constructible::value ? std::is_trivially_move_constructible::value : std::is_trivially_copy_constructible::value) #define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference::value && std::is_move_assignable::value ? std::is_trivially_move_assignable::value || std::is_nothrow_move_assignable::value : std::is_trivially_copy_assignable::value || std::is_nothrow_copy_assignable::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr)) #elif defined(_MSC_VER) && defined(_NOEXCEPT) && _MSC_VER < 1900 #define MOODYCAMEL_NOEXCEPT _NOEXCEPT #define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) (std::is_rvalue_reference::value && std::is_move_constructible::value ? std::is_trivially_move_constructible::value || std::is_nothrow_move_constructible::value : std::is_trivially_copy_constructible::value || std::is_nothrow_copy_constructible::value) #define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) ((std::is_rvalue_reference::value && std::is_move_assignable::value ? std::is_trivially_move_assignable::value || std::is_nothrow_move_assignable::value : std::is_trivially_copy_assignable::value || std::is_nothrow_copy_assignable::value) && MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr)) #else #define MOODYCAMEL_NOEXCEPT noexcept #define MOODYCAMEL_NOEXCEPT_CTOR(type, valueType, expr) noexcept(expr) #define MOODYCAMEL_NOEXCEPT_ASSIGN(type, valueType, expr) noexcept(expr) #endif #endif #ifndef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED #ifdef MCDBGQ_USE_RELACY #define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED #else // VS2013 doesn't support `thread_local`, and MinGW-w64 w/ POSIX threading has a crippling bug: http://sourceforge.net/p/mingw-w64/bugs/445 // g++ <=4.7 doesn't support thread_local either. // Finally, iOS/ARM doesn't have support for it either, and g++/ARM allows it to compile but it's unconfirmed to actually work #if (!defined(_MSC_VER) || _MSC_VER >= 1900) && (!defined(__MINGW32__) && !defined(__MINGW64__) || !defined(__WINPTHREADS_VERSION)) && (!defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) && (!defined(__APPLE__) || !TARGET_OS_IPHONE) && !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) && !defined(__MVS__) // Assume `thread_local` is fully supported in all other C++11 compilers/platforms #define MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED // tentatively enabled for now; years ago several users report having problems with it on #endif #endif #endif // VS2012 doesn't support deleted functions. // In this case, we declare the function normally but don't define it. A link error will be generated if the function is called. #ifndef MOODYCAMEL_DELETE_FUNCTION #if defined(_MSC_VER) && _MSC_VER < 1800 #define MOODYCAMEL_DELETE_FUNCTION #else #define MOODYCAMEL_DELETE_FUNCTION = delete #endif #endif namespace moodycamel { namespace details { #ifndef MOODYCAMEL_ALIGNAS // VS2013 doesn't support alignas or alignof, and align() requires a constant literal #if defined(_MSC_VER) && _MSC_VER <= 1800 #define MOODYCAMEL_ALIGNAS(alignment) __declspec(align(alignment)) #define MOODYCAMEL_ALIGNOF(obj) __alignof(obj) #define MOODYCAMEL_ALIGNED_TYPE_LIKE(T, obj) typename details::Vs2013Aligned::value, T>::type template struct Vs2013Aligned { }; // default, unsupported alignment template struct Vs2013Aligned<1, T> { typedef __declspec(align(1)) T type; }; template struct Vs2013Aligned<2, T> { typedef __declspec(align(2)) T type; }; template struct Vs2013Aligned<4, T> { typedef __declspec(align(4)) T type; }; template struct Vs2013Aligned<8, T> { typedef __declspec(align(8)) T type; }; template struct Vs2013Aligned<16, T> { typedef __declspec(align(16)) T type; }; template struct Vs2013Aligned<32, T> { typedef __declspec(align(32)) T type; }; template struct Vs2013Aligned<64, T> { typedef __declspec(align(64)) T type; }; template struct Vs2013Aligned<128, T> { typedef __declspec(align(128)) T type; }; template struct Vs2013Aligned<256, T> { typedef __declspec(align(256)) T type; }; #else template struct identity { typedef T type; }; #define MOODYCAMEL_ALIGNAS(alignment) alignas(alignment) #define MOODYCAMEL_ALIGNOF(obj) alignof(obj) #define MOODYCAMEL_ALIGNED_TYPE_LIKE(T, obj) alignas(alignof(obj)) typename details::identity::type #endif #endif } } // TSAN can false report races in lock-free code. To enable TSAN to be used from projects that use this one, // we can apply per-function compile-time suppression. // See https://clang.llvm.org/docs/ThreadSanitizer.html#has-feature-thread-sanitizer #define MOODYCAMEL_NO_TSAN #if defined(__has_feature) #if __has_feature(thread_sanitizer) #undef MOODYCAMEL_NO_TSAN #define MOODYCAMEL_NO_TSAN __attribute__((no_sanitize("thread"))) #endif // TSAN #endif // TSAN // Compiler-specific likely/unlikely hints namespace moodycamel { namespace details { #if defined(__GNUC__) static inline bool (likely)(bool x) { return __builtin_expect((x), true); } static inline bool (unlikely)(bool x) { return __builtin_expect((x), false); } #else static inline bool (likely)(bool x) { return x; } static inline bool (unlikely)(bool x) { return x; } #endif } } #ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG #include "internal/concurrentqueue_internal_debug.h" #endif namespace moodycamel { namespace details { template struct const_numeric_max { static_assert(std::is_integral::value, "const_numeric_max can only be used with integers"); static const T value = std::numeric_limits::is_signed ? (static_cast(1) << (sizeof(T) * CHAR_BIT - 1)) - static_cast(1) : static_cast(-1); }; #if defined(__GLIBCXX__) typedef ::max_align_t std_max_align_t; // libstdc++ forgot to add it to std:: for a while #else typedef std::max_align_t std_max_align_t; // Others (e.g. MSVC) insist it can *only* be accessed via std:: #endif // Some platforms have incorrectly set max_align_t to a type with <8 bytes alignment even while supporting // 8-byte aligned scalar values (*cough* 32-bit iOS). Work around this with our own union. See issue #64. typedef union { std_max_align_t x; long long y; void* z; } max_align_t; } // Default traits for the ConcurrentQueue. To change some of the // traits without re-implementing all of them, inherit from this // struct and shadow the declarations you wish to be different; // since the traits are used as a template type parameter, the // shadowed declarations will be used where defined, and the defaults // otherwise. struct ConcurrentQueueDefaultTraits { // General-purpose size type. std::size_t is strongly recommended. typedef std::size_t size_t; // The type used for the enqueue and dequeue indices. Must be at least as // large as size_t. Should be significantly larger than the number of elements // you expect to hold at once, especially if you have a high turnover rate; // for example, on 32-bit x86, if you expect to have over a hundred million // elements or pump several million elements through your queue in a very // short space of time, using a 32-bit type *may* trigger a race condition. // A 64-bit int type is recommended in that case, and in practice will // prevent a race condition no matter the usage of the queue. Note that // whether the queue is lock-free with a 64-int type depends on the whether // std::atomic is lock-free, which is platform-specific. typedef std::size_t index_t; // Internally, all elements are enqueued and dequeued from multi-element // blocks; this is the smallest controllable unit. If you expect few elements // but many producers, a smaller block size should be favoured. For few producers // and/or many elements, a larger block size is preferred. A sane default // is provided. Must be a power of 2. static const size_t BLOCK_SIZE = 32; // For explicit producers (i.e. when using a producer token), the block is // checked for being empty by iterating through a list of flags, one per element. // For large block sizes, this is too inefficient, and switching to an atomic // counter-based approach is faster. The switch is made for block sizes strictly // larger than this threshold. static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = 32; // How many full blocks can be expected for a single explicit producer? This should // reflect that number's maximum for optimal performance. Must be a power of 2. static const size_t EXPLICIT_INITIAL_INDEX_SIZE = 32; // How many full blocks can be expected for a single implicit producer? This should // reflect that number's maximum for optimal performance. Must be a power of 2. static const size_t IMPLICIT_INITIAL_INDEX_SIZE = 32; // The initial size of the hash table mapping thread IDs to implicit producers. // Note that the hash is resized every time it becomes half full. // Must be a power of two, and either 0 or at least 1. If 0, implicit production // (using the enqueue methods without an explicit producer token) is disabled. static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = 32; // Controls the number of items that an explicit consumer (i.e. one with a token) // must consume before it causes all consumers to rotate and move on to the next // internal queue. static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = 256; // The maximum number of elements (inclusive) that can be enqueued to a sub-queue. // Enqueue operations that would cause this limit to be surpassed will fail. Note // that this limit is enforced at the block level (for performance reasons), i.e. // it's rounded up to the nearest block size. static const size_t MAX_SUBQUEUE_SIZE = details::const_numeric_max::value; // The number of times to spin before sleeping when waiting on a semaphore. // Recommended values are on the order of 1000-10000 unless the number of // consumer threads exceeds the number of idle cores (in which case try 0-100). // Only affects instances of the BlockingConcurrentQueue. static const int MAX_SEMA_SPINS = 10000; // Whether to recycle dynamically-allocated blocks into an internal free list or // not. If false, only pre-allocated blocks (controlled by the constructor // arguments) will be recycled, and all others will be `free`d back to the heap. // Note that blocks consumed by explicit producers are only freed on destruction // of the queue (not following destruction of the token) regardless of this trait. static const bool RECYCLE_ALLOCATED_BLOCKS = false; #ifndef MCDBGQ_USE_RELACY // Memory allocation can be customized if needed. // malloc should return nullptr on failure, and handle alignment like std::malloc. #if defined(malloc) || defined(free) // Gah, this is 2015, stop defining macros that break standard code already! // Work around malloc/free being special macros: static inline void* WORKAROUND_malloc(size_t size) { return malloc(size); } static inline void WORKAROUND_free(void* ptr) { return free(ptr); } static inline void* (malloc)(size_t size) { return WORKAROUND_malloc(size); } static inline void (free)(void* ptr) { return WORKAROUND_free(ptr); } #else static inline void* malloc(size_t size) { return std::malloc(size); } static inline void free(void* ptr) { return std::free(ptr); } #endif #else // Debug versions when running under the Relacy race detector (ignore // these in user code) static inline void* malloc(size_t size) { return rl::rl_malloc(size, $); } static inline void free(void* ptr) { return rl::rl_free(ptr, $); } #endif }; // When producing or consuming many elements, the most efficient way is to: // 1) Use one of the bulk-operation methods of the queue with a token // 2) Failing that, use the bulk-operation methods without a token // 3) Failing that, create a token and use that with the single-item methods // 4) Failing that, use the single-parameter methods of the queue // Having said that, don't create tokens willy-nilly -- ideally there should be // a maximum of one token per thread (of each kind). struct ProducerToken; struct ConsumerToken; template class ConcurrentQueue; template class BlockingConcurrentQueue; class ConcurrentQueueTests; namespace details { struct ConcurrentQueueProducerTypelessBase { ConcurrentQueueProducerTypelessBase* next; std::atomic inactive; ProducerToken* token; ConcurrentQueueProducerTypelessBase() : next(nullptr), inactive(false), token(nullptr) { } }; template struct _hash_32_or_64 { static inline std::uint32_t hash(std::uint32_t h) { // MurmurHash3 finalizer -- see https://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp // Since the thread ID is already unique, all we really want to do is propagate that // uniqueness evenly across all the bits, so that we can use a subset of the bits while // reducing collisions significantly h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; return h ^ (h >> 16); } }; template<> struct _hash_32_or_64<1> { static inline std::uint64_t hash(std::uint64_t h) { h ^= h >> 33; h *= 0xff51afd7ed558ccd; h ^= h >> 33; h *= 0xc4ceb9fe1a85ec53; return h ^ (h >> 33); } }; template struct hash_32_or_64 : public _hash_32_or_64<(size > 4)> { }; static inline size_t hash_thread_id(thread_id_t id) { static_assert(sizeof(thread_id_t) <= 8, "Expected a platform where thread IDs are at most 64-bit values"); return static_cast(hash_32_or_64::thread_id_hash_t)>::hash( thread_id_converter::prehash(id))); } template static inline bool circular_less_than(T a, T b) { static_assert(std::is_integral::value && !std::numeric_limits::is_signed, "circular_less_than is intended to be used only with unsigned integer types"); return static_cast(a - b) > static_cast(static_cast(1) << (static_cast(sizeof(T) * CHAR_BIT - 1))); // Note: extra parens around rhs of operator<< is MSVC bug: https://developercommunity2.visualstudio.com/t/C4554-triggers-when-both-lhs-and-rhs-is/10034931 // silencing the bug requires #pragma warning(disable: 4554) around the calling code and has no effect when done here. } template static inline char* align_for(char* ptr) { const std::size_t alignment = std::alignment_of::value; return ptr + (alignment - (reinterpret_cast(ptr) % alignment)) % alignment; } template static inline T ceil_to_pow_2(T x) { static_assert(std::is_integral::value && !std::numeric_limits::is_signed, "ceil_to_pow_2 is intended to be used only with unsigned integer types"); // Adapted from http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; for (std::size_t i = 1; i < sizeof(T); i <<= 1) { x |= x >> (i << 3); } ++x; return x; } template static inline void swap_relaxed(std::atomic& left, std::atomic& right) { T temp = std::move(left.load(std::memory_order_relaxed)); left.store(std::move(right.load(std::memory_order_relaxed)), std::memory_order_relaxed); right.store(std::move(temp), std::memory_order_relaxed); } template static inline T const& nomove(T const& x) { return x; } template struct nomove_if { template static inline T const& eval(T const& x) { return x; } }; template<> struct nomove_if { template static inline auto eval(U&& x) -> decltype(std::forward(x)) { return std::forward(x); } }; template static inline auto deref_noexcept(It& it) MOODYCAMEL_NOEXCEPT -> decltype(*it) { return *it; } #if defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) template struct is_trivially_destructible : std::is_trivially_destructible { }; #else template struct is_trivially_destructible : std::has_trivial_destructor { }; #endif #ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED #ifdef MCDBGQ_USE_RELACY typedef RelacyThreadExitListener ThreadExitListener; typedef RelacyThreadExitNotifier ThreadExitNotifier; #else class ThreadExitNotifier; struct ThreadExitListener { typedef void (*callback_t)(void*); callback_t callback; void* userData; ThreadExitListener* next; // reserved for use by the ThreadExitNotifier ThreadExitNotifier* chain; // reserved for use by the ThreadExitNotifier }; class ThreadExitNotifier { public: static void subscribe(ThreadExitListener* listener) { auto& tlsInst = instance(); std::lock_guard guard(mutex()); listener->next = tlsInst.tail; listener->chain = &tlsInst; tlsInst.tail = listener; } static void unsubscribe(ThreadExitListener* listener) { std::lock_guard guard(mutex()); if (!listener->chain) { return; // race with ~ThreadExitNotifier } auto& tlsInst = *listener->chain; listener->chain = nullptr; ThreadExitListener** prev = &tlsInst.tail; for (auto ptr = tlsInst.tail; ptr != nullptr; ptr = ptr->next) { if (ptr == listener) { *prev = ptr->next; break; } prev = &ptr->next; } } private: ThreadExitNotifier() : tail(nullptr) { } ThreadExitNotifier(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION; ThreadExitNotifier& operator=(ThreadExitNotifier const&) MOODYCAMEL_DELETE_FUNCTION; ~ThreadExitNotifier() { // This thread is about to exit, let everyone know! assert(this == &instance() && "If this assert fails, you likely have a buggy compiler! Change the preprocessor conditions such that MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED is no longer defined."); std::lock_guard guard(mutex()); for (auto ptr = tail; ptr != nullptr; ptr = ptr->next) { ptr->chain = nullptr; ptr->callback(ptr->userData); } } // Thread-local static inline ThreadExitNotifier& instance() { static thread_local ThreadExitNotifier notifier; return notifier; } static inline std::mutex& mutex() { // Must be static because the ThreadExitNotifier could be destroyed while unsubscribe is called static std::mutex mutex; return mutex; } private: ThreadExitListener* tail; }; #endif #endif template struct static_is_lock_free_num { enum { value = 0 }; }; template<> struct static_is_lock_free_num { enum { value = ATOMIC_CHAR_LOCK_FREE }; }; template<> struct static_is_lock_free_num { enum { value = ATOMIC_SHORT_LOCK_FREE }; }; template<> struct static_is_lock_free_num { enum { value = ATOMIC_INT_LOCK_FREE }; }; template<> struct static_is_lock_free_num { enum { value = ATOMIC_LONG_LOCK_FREE }; }; template<> struct static_is_lock_free_num { enum { value = ATOMIC_LLONG_LOCK_FREE }; }; template struct static_is_lock_free : static_is_lock_free_num::type> { }; template<> struct static_is_lock_free { enum { value = ATOMIC_BOOL_LOCK_FREE }; }; template struct static_is_lock_free { enum { value = ATOMIC_POINTER_LOCK_FREE }; }; } struct ProducerToken { template explicit ProducerToken(ConcurrentQueue& queue); template explicit ProducerToken(BlockingConcurrentQueue& queue); ProducerToken(ProducerToken&& other) MOODYCAMEL_NOEXCEPT : producer(other.producer) { other.producer = nullptr; if (producer != nullptr) { producer->token = this; } } inline ProducerToken& operator=(ProducerToken&& other) MOODYCAMEL_NOEXCEPT { swap(other); return *this; } void swap(ProducerToken& other) MOODYCAMEL_NOEXCEPT { std::swap(producer, other.producer); if (producer != nullptr) { producer->token = this; } if (other.producer != nullptr) { other.producer->token = &other; } } // A token is always valid unless: // 1) Memory allocation failed during construction // 2) It was moved via the move constructor // (Note: assignment does a swap, leaving both potentially valid) // 3) The associated queue was destroyed // Note that if valid() returns true, that only indicates // that the token is valid for use with a specific queue, // but not which one; that's up to the user to track. inline bool valid() const { return producer != nullptr; } ~ProducerToken() { if (producer != nullptr) { producer->token = nullptr; producer->inactive.store(true, std::memory_order_release); } } // Disable copying and assignment ProducerToken(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION; ProducerToken& operator=(ProducerToken const&) MOODYCAMEL_DELETE_FUNCTION; private: template friend class ConcurrentQueue; friend class ConcurrentQueueTests; protected: details::ConcurrentQueueProducerTypelessBase* producer; }; struct ConsumerToken { template explicit ConsumerToken(ConcurrentQueue& q); template explicit ConsumerToken(BlockingConcurrentQueue& q); ConsumerToken(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT : initialOffset(other.initialOffset), lastKnownGlobalOffset(other.lastKnownGlobalOffset), itemsConsumedFromCurrent(other.itemsConsumedFromCurrent), currentProducer(other.currentProducer), desiredProducer(other.desiredProducer) { } inline ConsumerToken& operator=(ConsumerToken&& other) MOODYCAMEL_NOEXCEPT { swap(other); return *this; } void swap(ConsumerToken& other) MOODYCAMEL_NOEXCEPT { std::swap(initialOffset, other.initialOffset); std::swap(lastKnownGlobalOffset, other.lastKnownGlobalOffset); std::swap(itemsConsumedFromCurrent, other.itemsConsumedFromCurrent); std::swap(currentProducer, other.currentProducer); std::swap(desiredProducer, other.desiredProducer); } // Disable copying and assignment ConsumerToken(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION; ConsumerToken& operator=(ConsumerToken const&) MOODYCAMEL_DELETE_FUNCTION; private: template friend class ConcurrentQueue; friend class ConcurrentQueueTests; private: // but shared with ConcurrentQueue std::uint32_t initialOffset; std::uint32_t lastKnownGlobalOffset; std::uint32_t itemsConsumedFromCurrent; details::ConcurrentQueueProducerTypelessBase* currentProducer; details::ConcurrentQueueProducerTypelessBase* desiredProducer; }; // Need to forward-declare this swap because it's in a namespace. // See http://stackoverflow.com/questions/4492062/why-does-a-c-friend-class-need-a-forward-declaration-only-in-other-namespaces template inline void swap(typename ConcurrentQueue::ImplicitProducerKVP& a, typename ConcurrentQueue::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT; template class ConcurrentQueue { public: typedef ::moodycamel::ProducerToken producer_token_t; typedef ::moodycamel::ConsumerToken consumer_token_t; typedef typename Traits::index_t index_t; typedef typename Traits::size_t size_t; static const size_t BLOCK_SIZE = static_cast(Traits::BLOCK_SIZE); static const size_t EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD = static_cast(Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD); static const size_t EXPLICIT_INITIAL_INDEX_SIZE = static_cast(Traits::EXPLICIT_INITIAL_INDEX_SIZE); static const size_t IMPLICIT_INITIAL_INDEX_SIZE = static_cast(Traits::IMPLICIT_INITIAL_INDEX_SIZE); static const size_t INITIAL_IMPLICIT_PRODUCER_HASH_SIZE = static_cast(Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE); static const std::uint32_t EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE = static_cast(Traits::EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE); #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable: 4307) // + integral constant overflow (that's what the ternary expression is for!) #pragma warning(disable: 4309) // static_cast: Truncation of constant value #endif static const size_t MAX_SUBQUEUE_SIZE = (details::const_numeric_max::value - static_cast(Traits::MAX_SUBQUEUE_SIZE) < BLOCK_SIZE) ? details::const_numeric_max::value : ((static_cast(Traits::MAX_SUBQUEUE_SIZE) + (BLOCK_SIZE - 1)) / BLOCK_SIZE * BLOCK_SIZE); #ifdef _MSC_VER #pragma warning(pop) #endif static_assert(!std::numeric_limits::is_signed && std::is_integral::value, "Traits::size_t must be an unsigned integral type"); static_assert(!std::numeric_limits::is_signed && std::is_integral::value, "Traits::index_t must be an unsigned integral type"); static_assert(sizeof(index_t) >= sizeof(size_t), "Traits::index_t must be at least as wide as Traits::size_t"); static_assert((BLOCK_SIZE > 1) && !(BLOCK_SIZE & (BLOCK_SIZE - 1)), "Traits::BLOCK_SIZE must be a power of 2 (and at least 2)"); static_assert((EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD > 1) && !(EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD & (EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD - 1)), "Traits::EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD must be a power of 2 (and greater than 1)"); static_assert((EXPLICIT_INITIAL_INDEX_SIZE > 1) && !(EXPLICIT_INITIAL_INDEX_SIZE & (EXPLICIT_INITIAL_INDEX_SIZE - 1)), "Traits::EXPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)"); static_assert((IMPLICIT_INITIAL_INDEX_SIZE > 1) && !(IMPLICIT_INITIAL_INDEX_SIZE & (IMPLICIT_INITIAL_INDEX_SIZE - 1)), "Traits::IMPLICIT_INITIAL_INDEX_SIZE must be a power of 2 (and greater than 1)"); static_assert((INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) || !(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE & (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE - 1)), "Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be a power of 2"); static_assert(INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0 || INITIAL_IMPLICIT_PRODUCER_HASH_SIZE >= 1, "Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE must be at least 1 (or 0 to disable implicit enqueueing)"); public: // Creates a queue with at least `capacity` element slots; note that the // actual number of elements that can be inserted without additional memory // allocation depends on the number of producers and the block size (e.g. if // the block size is equal to `capacity`, only a single block will be allocated // up-front, which means only a single producer will be able to enqueue elements // without an extra allocation -- blocks aren't shared between producers). // This method is not thread safe -- it is up to the user to ensure that the // queue is fully constructed before it starts being used by other threads (this // includes making the memory effects of construction visible, possibly with a // memory barrier). explicit ConcurrentQueue(size_t capacity = 32 * BLOCK_SIZE) : producerListTail(nullptr), producerCount(0), initialBlockPoolIndex(0), nextExplicitConsumerId(0), globalExplicitConsumerOffset(0) { implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); populate_initial_implicit_producer_hash(); populate_initial_block_list(capacity / BLOCK_SIZE + ((capacity & (BLOCK_SIZE - 1)) == 0 ? 0 : 1)); #ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG // Track all the producers using a fully-resolved typed list for // each kind; this makes it possible to debug them starting from // the root queue object (otherwise wacky casts are needed that // don't compile in the debugger's expression evaluator). explicitProducers.store(nullptr, std::memory_order_relaxed); implicitProducers.store(nullptr, std::memory_order_relaxed); #endif } // Computes the correct amount of pre-allocated blocks for you based // on the minimum number of elements you want available at any given // time, and the maximum concurrent number of each type of producer. ConcurrentQueue(size_t minCapacity, size_t maxExplicitProducers, size_t maxImplicitProducers) : producerListTail(nullptr), producerCount(0), initialBlockPoolIndex(0), nextExplicitConsumerId(0), globalExplicitConsumerOffset(0) { implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); populate_initial_implicit_producer_hash(); size_t blocks = (((minCapacity + BLOCK_SIZE - 1) / BLOCK_SIZE) - 1) * (maxExplicitProducers + 1) + 2 * (maxExplicitProducers + maxImplicitProducers); populate_initial_block_list(blocks); #ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG explicitProducers.store(nullptr, std::memory_order_relaxed); implicitProducers.store(nullptr, std::memory_order_relaxed); #endif } // Note: The queue should not be accessed concurrently while it's // being deleted. It's up to the user to synchronize this. // This method is not thread safe. ~ConcurrentQueue() { // Destroy producers auto ptr = producerListTail.load(std::memory_order_relaxed); while (ptr != nullptr) { auto next = ptr->next_prod(); if (ptr->token != nullptr) { ptr->token->producer = nullptr; } destroy(ptr); ptr = next; } // Destroy implicit producer hash tables MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE != 0) { auto hash = implicitProducerHash.load(std::memory_order_relaxed); while (hash != nullptr) { auto prev = hash->prev; if (prev != nullptr) { // The last hash is part of this object and was not allocated dynamically for (size_t i = 0; i != hash->capacity; ++i) { hash->entries[i].~ImplicitProducerKVP(); } hash->~ImplicitProducerHash(); (Traits::free)(hash); } hash = prev; } } // Destroy global free list auto block = freeList.head_unsafe(); while (block != nullptr) { auto next = block->freeListNext.load(std::memory_order_relaxed); if (block->dynamicallyAllocated) { destroy(block); } block = next; } // Destroy initial free list destroy_array(initialBlockPool, initialBlockPoolSize); } // Disable copying and copy assignment ConcurrentQueue(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; ConcurrentQueue& operator=(ConcurrentQueue const&) MOODYCAMEL_DELETE_FUNCTION; // Moving is supported, but note that it is *not* a thread-safe operation. // Nobody can use the queue while it's being moved, and the memory effects // of that move must be propagated to other threads before they can use it. // Note: When a queue is moved, its tokens are still valid but can only be // used with the destination queue (i.e. semantically they are moved along // with the queue itself). ConcurrentQueue(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT : producerListTail(other.producerListTail.load(std::memory_order_relaxed)), producerCount(other.producerCount.load(std::memory_order_relaxed)), initialBlockPoolIndex(other.initialBlockPoolIndex.load(std::memory_order_relaxed)), initialBlockPool(other.initialBlockPool), initialBlockPoolSize(other.initialBlockPoolSize), freeList(std::move(other.freeList)), nextExplicitConsumerId(other.nextExplicitConsumerId.load(std::memory_order_relaxed)), globalExplicitConsumerOffset(other.globalExplicitConsumerOffset.load(std::memory_order_relaxed)) { // Move the other one into this, and leave the other one as an empty queue implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); populate_initial_implicit_producer_hash(); swap_implicit_producer_hashes(other); other.producerListTail.store(nullptr, std::memory_order_relaxed); other.producerCount.store(0, std::memory_order_relaxed); other.nextExplicitConsumerId.store(0, std::memory_order_relaxed); other.globalExplicitConsumerOffset.store(0, std::memory_order_relaxed); #ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG explicitProducers.store(other.explicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed); other.explicitProducers.store(nullptr, std::memory_order_relaxed); implicitProducers.store(other.implicitProducers.load(std::memory_order_relaxed), std::memory_order_relaxed); other.implicitProducers.store(nullptr, std::memory_order_relaxed); #endif other.initialBlockPoolIndex.store(0, std::memory_order_relaxed); other.initialBlockPoolSize = 0; other.initialBlockPool = nullptr; reown_producers(); } inline ConcurrentQueue& operator=(ConcurrentQueue&& other) MOODYCAMEL_NOEXCEPT { return swap_internal(other); } // Swaps this queue's state with the other's. Not thread-safe. // Swapping two queues does not invalidate their tokens, however // the tokens that were created for one queue must be used with // only the swapped queue (i.e. the tokens are tied to the // queue's movable state, not the object itself). inline void swap(ConcurrentQueue& other) MOODYCAMEL_NOEXCEPT { swap_internal(other); } private: ConcurrentQueue& swap_internal(ConcurrentQueue& other) { if (this == &other) { return *this; } details::swap_relaxed(producerListTail, other.producerListTail); details::swap_relaxed(producerCount, other.producerCount); details::swap_relaxed(initialBlockPoolIndex, other.initialBlockPoolIndex); std::swap(initialBlockPool, other.initialBlockPool); std::swap(initialBlockPoolSize, other.initialBlockPoolSize); freeList.swap(other.freeList); details::swap_relaxed(nextExplicitConsumerId, other.nextExplicitConsumerId); details::swap_relaxed(globalExplicitConsumerOffset, other.globalExplicitConsumerOffset); swap_implicit_producer_hashes(other); reown_producers(); other.reown_producers(); #ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG details::swap_relaxed(explicitProducers, other.explicitProducers); details::swap_relaxed(implicitProducers, other.implicitProducers); #endif return *this; } public: // Enqueues a single item (by copying it). // Allocates memory if required. Only fails if memory allocation fails (or implicit // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Thread-safe. inline bool enqueue(T const& item) { MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; else return inner_enqueue(item); } // Enqueues a single item (by moving it, if possible). // Allocates memory if required. Only fails if memory allocation fails (or implicit // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0, // or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Thread-safe. inline bool enqueue(T&& item) { MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; else return inner_enqueue(std::move(item)); } // Enqueues a single item (by copying it) using an explicit producer token. // Allocates memory if required. Only fails if memory allocation fails (or // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Thread-safe. inline bool enqueue(producer_token_t const& token, T const& item) { return inner_enqueue(token, item); } // Enqueues a single item (by moving it, if possible) using an explicit producer token. // Allocates memory if required. Only fails if memory allocation fails (or // Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Thread-safe. inline bool enqueue(producer_token_t const& token, T&& item) { return inner_enqueue(token, std::move(item)); } // Enqueues several items. // Allocates memory if required. Only fails if memory allocation fails (or // implicit production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE // is 0, or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Note: Use std::make_move_iterator if the elements should be moved instead of copied. // Thread-safe. template bool enqueue_bulk(It itemFirst, size_t count) { MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; else return inner_enqueue_bulk(itemFirst, count); } // Enqueues several items using an explicit producer token. // Allocates memory if required. Only fails if memory allocation fails // (or Traits::MAX_SUBQUEUE_SIZE has been defined and would be surpassed). // Note: Use std::make_move_iterator if the elements should be moved // instead of copied. // Thread-safe. template bool enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) { return inner_enqueue_bulk(token, itemFirst, count); } // Enqueues a single item (by copying it). // Does not allocate memory. Fails if not enough room to enqueue (or implicit // production is disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE // is 0). // Thread-safe. inline bool try_enqueue(T const& item) { MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; else return inner_enqueue(item); } // Enqueues a single item (by moving it, if possible). // Does not allocate memory (except for one-time implicit producer). // Fails if not enough room to enqueue (or implicit production is // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). // Thread-safe. inline bool try_enqueue(T&& item) { MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; else return inner_enqueue(std::move(item)); } // Enqueues a single item (by copying it) using an explicit producer token. // Does not allocate memory. Fails if not enough room to enqueue. // Thread-safe. inline bool try_enqueue(producer_token_t const& token, T const& item) { return inner_enqueue(token, item); } // Enqueues a single item (by moving it, if possible) using an explicit producer token. // Does not allocate memory. Fails if not enough room to enqueue. // Thread-safe. inline bool try_enqueue(producer_token_t const& token, T&& item) { return inner_enqueue(token, std::move(item)); } // Enqueues several items. // Does not allocate memory (except for one-time implicit producer). // Fails if not enough room to enqueue (or implicit production is // disabled because Traits::INITIAL_IMPLICIT_PRODUCER_HASH_SIZE is 0). // Note: Use std::make_move_iterator if the elements should be moved // instead of copied. // Thread-safe. template bool try_enqueue_bulk(It itemFirst, size_t count) { MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) return false; else return inner_enqueue_bulk(itemFirst, count); } // Enqueues several items using an explicit producer token. // Does not allocate memory. Fails if not enough room to enqueue. // Note: Use std::make_move_iterator if the elements should be moved // instead of copied. // Thread-safe. template bool try_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) { return inner_enqueue_bulk(token, itemFirst, count); } // Attempts to dequeue from the queue. // Returns false if all producer streams appeared empty at the time they // were checked (so, the queue is likely but not guaranteed to be empty). // Never allocates. Thread-safe. template bool try_dequeue(U& item) { // Instead of simply trying each producer in turn (which could cause needless contention on the first // producer), we score them heuristically. size_t nonEmptyCount = 0; ProducerBase* best = nullptr; size_t bestSize = 0; for (auto ptr = producerListTail.load(std::memory_order_acquire); nonEmptyCount < 3 && ptr != nullptr; ptr = ptr->next_prod()) { auto size = ptr->size_approx(); if (size > 0) { if (size > bestSize) { bestSize = size; best = ptr; } ++nonEmptyCount; } } // If there was at least one non-empty queue but it appears empty at the time // we try to dequeue from it, we need to make sure every queue's been tried if (nonEmptyCount > 0) { if ((details::likely)(best->dequeue(item))) { return true; } for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { if (ptr != best && ptr->dequeue(item)) { return true; } } } return false; } // Attempts to dequeue from the queue. // Returns false if all producer streams appeared empty at the time they // were checked (so, the queue is likely but not guaranteed to be empty). // This differs from the try_dequeue(item) method in that this one does // not attempt to reduce contention by interleaving the order that producer // streams are dequeued from. So, using this method can reduce overall throughput // under contention, but will give more predictable results in single-threaded // consumer scenarios. This is mostly only useful for internal unit tests. // Never allocates. Thread-safe. template bool try_dequeue_non_interleaved(U& item) { for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { if (ptr->dequeue(item)) { return true; } } return false; } // Attempts to dequeue from the queue using an explicit consumer token. // Returns false if all producer streams appeared empty at the time they // were checked (so, the queue is likely but not guaranteed to be empty). // Never allocates. Thread-safe. template bool try_dequeue(consumer_token_t& token, U& item) { // The idea is roughly as follows: // Every 256 items from one producer, make everyone rotate (increase the global offset) -> this means the highest efficiency consumer dictates the rotation speed of everyone else, more or less // If you see that the global offset has changed, you must reset your consumption counter and move to your designated place // If there's no items where you're supposed to be, keep moving until you find a producer with some items // If the global offset has not changed but you've run out of items to consume, move over from your current position until you find an producer with something in it if (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) { if (!update_current_producer_after_rotation(token)) { return false; } } // If there was at least one non-empty queue but it appears empty at the time // we try to dequeue from it, we need to make sure every queue's been tried if (static_cast(token.currentProducer)->dequeue(item)) { if (++token.itemsConsumedFromCurrent == EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) { globalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed); } return true; } auto tail = producerListTail.load(std::memory_order_acquire); auto ptr = static_cast(token.currentProducer)->next_prod(); if (ptr == nullptr) { ptr = tail; } while (ptr != static_cast(token.currentProducer)) { if (ptr->dequeue(item)) { token.currentProducer = ptr; token.itemsConsumedFromCurrent = 1; return true; } ptr = ptr->next_prod(); if (ptr == nullptr) { ptr = tail; } } return false; } // Attempts to dequeue several elements from the queue. // Returns the number of items actually dequeued. // Returns 0 if all producer streams appeared empty at the time they // were checked (so, the queue is likely but not guaranteed to be empty). // Never allocates. Thread-safe. template size_t try_dequeue_bulk(It itemFirst, size_t max) { size_t count = 0; for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { count += ptr->dequeue_bulk(itemFirst, max - count); if (count == max) { break; } } return count; } // Attempts to dequeue several elements from the queue using an explicit consumer token. // Returns the number of items actually dequeued. // Returns 0 if all producer streams appeared empty at the time they // were checked (so, the queue is likely but not guaranteed to be empty). // Never allocates. Thread-safe. template size_t try_dequeue_bulk(consumer_token_t& token, It itemFirst, size_t max) { if (token.desiredProducer == nullptr || token.lastKnownGlobalOffset != globalExplicitConsumerOffset.load(std::memory_order_relaxed)) { if (!update_current_producer_after_rotation(token)) { return 0; } } size_t count = static_cast(token.currentProducer)->dequeue_bulk(itemFirst, max); if (count == max) { if ((token.itemsConsumedFromCurrent += static_cast(max)) >= EXPLICIT_CONSUMER_CONSUMPTION_QUOTA_BEFORE_ROTATE) { globalExplicitConsumerOffset.fetch_add(1, std::memory_order_relaxed); } return max; } token.itemsConsumedFromCurrent += static_cast(count); max -= count; auto tail = producerListTail.load(std::memory_order_acquire); auto ptr = static_cast(token.currentProducer)->next_prod(); if (ptr == nullptr) { ptr = tail; } while (ptr != static_cast(token.currentProducer)) { auto dequeued = ptr->dequeue_bulk(itemFirst, max); count += dequeued; if (dequeued != 0) { token.currentProducer = ptr; token.itemsConsumedFromCurrent = static_cast(dequeued); } if (dequeued == max) { break; } max -= dequeued; ptr = ptr->next_prod(); if (ptr == nullptr) { ptr = tail; } } return count; } // Attempts to dequeue from a specific producer's inner queue. // If you happen to know which producer you want to dequeue from, this // is significantly faster than using the general-case try_dequeue methods. // Returns false if the producer's queue appeared empty at the time it // was checked (so, the queue is likely but not guaranteed to be empty). // Never allocates. Thread-safe. template inline bool try_dequeue_from_producer(producer_token_t const& producer, U& item) { return static_cast(producer.producer)->dequeue(item); } // Attempts to dequeue several elements from a specific producer's inner queue. // Returns the number of items actually dequeued. // If you happen to know which producer you want to dequeue from, this // is significantly faster than using the general-case try_dequeue methods. // Returns 0 if the producer's queue appeared empty at the time it // was checked (so, the queue is likely but not guaranteed to be empty). // Never allocates. Thread-safe. template inline size_t try_dequeue_bulk_from_producer(producer_token_t const& producer, It itemFirst, size_t max) { return static_cast(producer.producer)->dequeue_bulk(itemFirst, max); } // Returns an estimate of the total number of elements currently in the queue. This // estimate is only accurate if the queue has completely stabilized before it is called // (i.e. all enqueue and dequeue operations have completed and their memory effects are // visible on the calling thread, and no further operations start while this method is // being called). // Thread-safe. size_t size_approx() const { size_t size = 0; for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { size += ptr->size_approx(); } return size; } // Returns true if the underlying atomic variables used by // the queue are lock-free (they should be on most platforms). // Thread-safe. static constexpr bool is_lock_free() { return details::static_is_lock_free::value == 2 && details::static_is_lock_free::value == 2 && details::static_is_lock_free::value == 2 && details::static_is_lock_free::value == 2 && details::static_is_lock_free::value == 2 && details::static_is_lock_free::thread_id_numeric_size_t>::value == 2; } private: friend struct ProducerToken; friend struct ConsumerToken; struct ExplicitProducer; friend struct ExplicitProducer; struct ImplicitProducer; friend struct ImplicitProducer; friend class ConcurrentQueueTests; enum AllocationMode { CanAlloc, CannotAlloc }; /////////////////////////////// // Queue methods /////////////////////////////// template inline bool inner_enqueue(producer_token_t const& token, U&& element) { return static_cast(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue(std::forward(element)); } template inline bool inner_enqueue(U&& element) { auto producer = get_or_add_implicit_producer(); return producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue(std::forward(element)); } template inline bool inner_enqueue_bulk(producer_token_t const& token, It itemFirst, size_t count) { return static_cast(token.producer)->ConcurrentQueue::ExplicitProducer::template enqueue_bulk(itemFirst, count); } template inline bool inner_enqueue_bulk(It itemFirst, size_t count) { auto producer = get_or_add_implicit_producer(); return producer == nullptr ? false : producer->ConcurrentQueue::ImplicitProducer::template enqueue_bulk(itemFirst, count); } inline bool update_current_producer_after_rotation(consumer_token_t& token) { // Ah, there's been a rotation, figure out where we should be! auto tail = producerListTail.load(std::memory_order_acquire); if (token.desiredProducer == nullptr && tail == nullptr) { return false; } auto prodCount = producerCount.load(std::memory_order_relaxed); auto globalOffset = globalExplicitConsumerOffset.load(std::memory_order_relaxed); if ((details::unlikely)(token.desiredProducer == nullptr)) { // Aha, first time we're dequeueing anything. // Figure out our local position // Note: offset is from start, not end, but we're traversing from end -- subtract from count first std::uint32_t offset = prodCount - 1 - (token.initialOffset % prodCount); token.desiredProducer = tail; for (std::uint32_t i = 0; i != offset; ++i) { token.desiredProducer = static_cast(token.desiredProducer)->next_prod(); if (token.desiredProducer == nullptr) { token.desiredProducer = tail; } } } std::uint32_t delta = globalOffset - token.lastKnownGlobalOffset; if (delta >= prodCount) { delta = delta % prodCount; } for (std::uint32_t i = 0; i != delta; ++i) { token.desiredProducer = static_cast(token.desiredProducer)->next_prod(); if (token.desiredProducer == nullptr) { token.desiredProducer = tail; } } token.lastKnownGlobalOffset = globalOffset; token.currentProducer = token.desiredProducer; token.itemsConsumedFromCurrent = 0; return true; } /////////////////////////// // Free list /////////////////////////// template struct FreeListNode { FreeListNode() : freeListRefs(0), freeListNext(nullptr) { } std::atomic freeListRefs; std::atomic freeListNext; }; // A simple CAS-based lock-free free list. Not the fastest thing in the world under heavy contention, but // simple and correct (assuming nodes are never freed until after the free list is destroyed), and fairly // speedy under low contention. template // N must inherit FreeListNode or have the same fields (and initialization of them) struct FreeList { FreeList() : freeListHead(nullptr) { } FreeList(FreeList&& other) : freeListHead(other.freeListHead.load(std::memory_order_relaxed)) { other.freeListHead.store(nullptr, std::memory_order_relaxed); } void swap(FreeList& other) { details::swap_relaxed(freeListHead, other.freeListHead); } FreeList(FreeList const&) MOODYCAMEL_DELETE_FUNCTION; FreeList& operator=(FreeList const&) MOODYCAMEL_DELETE_FUNCTION; inline void add(N* node) { #ifdef MCDBGQ_NOLOCKFREE_FREELIST debug::DebugLock lock(mutex); #endif // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to // set it using a fetch_add if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST, std::memory_order_acq_rel) == 0) { // Oh look! We were the last ones referencing this node, and we know // we want to add it to the free list, so let's do it! add_knowing_refcount_is_zero(node); } } inline N* try_get() { #ifdef MCDBGQ_NOLOCKFREE_FREELIST debug::DebugLock lock(mutex); #endif auto head = freeListHead.load(std::memory_order_acquire); while (head != nullptr) { auto prevHead = head; auto refs = head->freeListRefs.load(std::memory_order_relaxed); if ((refs & REFS_MASK) == 0 || !head->freeListRefs.compare_exchange_strong(refs, refs + 1, std::memory_order_acquire, std::memory_order_relaxed)) { head = freeListHead.load(std::memory_order_acquire); continue; } // Good, reference count has been incremented (it wasn't at zero), which means we can read the // next and not worry about it changing between now and the time we do the CAS auto next = head->freeListNext.load(std::memory_order_relaxed); if (freeListHead.compare_exchange_strong(head, next, std::memory_order_acquire, std::memory_order_relaxed)) { // Yay, got the node. This means it was on the list, which means shouldBeOnFreeList must be false no // matter the refcount (because nobody else knows it's been taken off yet, it can't have been put back on). assert((head->freeListRefs.load(std::memory_order_relaxed) & SHOULD_BE_ON_FREELIST) == 0); // Decrease refcount twice, once for our ref, and once for the list's ref head->freeListRefs.fetch_sub(2, std::memory_order_release); return head; } // OK, the head must have changed on us, but we still need to decrease the refcount we increased. // Note that we don't need to release any memory effects, but we do need to ensure that the reference // count decrement happens-after the CAS on the head. refs = prevHead->freeListRefs.fetch_sub(1, std::memory_order_acq_rel); if (refs == SHOULD_BE_ON_FREELIST + 1) { add_knowing_refcount_is_zero(prevHead); } } return nullptr; } // Useful for traversing the list when there's no contention (e.g. to destroy remaining nodes) N* head_unsafe() const { return freeListHead.load(std::memory_order_relaxed); } private: inline void add_knowing_refcount_is_zero(N* node) { // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we run // only one copy of this method per node at a time, i.e. the single thread case), then we know // we can safely change the next pointer of the node; however, once the refcount is back above // zero, then other threads could increase it (happens under heavy contention, when the refcount // goes to zero in between a load and a refcount increment of a node in try_get, then back up to // something non-zero, then the refcount increment is done by the other thread) -- so, if the CAS // to add the node to the actual list fails, decrease the refcount and leave the add operation to // the next thread who puts the refcount back at zero (which could be us, hence the loop). auto head = freeListHead.load(std::memory_order_relaxed); while (true) { node->freeListNext.store(head, std::memory_order_relaxed); node->freeListRefs.store(1, std::memory_order_release); if (!freeListHead.compare_exchange_strong(head, node, std::memory_order_release, std::memory_order_relaxed)) { // Hmm, the add failed, but we can only try again when the refcount goes back to zero if (node->freeListRefs.fetch_add(SHOULD_BE_ON_FREELIST - 1, std::memory_order_release) == 1) { continue; } } return; } } private: // Implemented like a stack, but where node order doesn't matter (nodes are inserted out of order under contention) std::atomic freeListHead; static const std::uint32_t REFS_MASK = 0x7FFFFFFF; static const std::uint32_t SHOULD_BE_ON_FREELIST = 0x80000000; #ifdef MCDBGQ_NOLOCKFREE_FREELIST debug::DebugMutex mutex; #endif }; /////////////////////////// // Block /////////////////////////// enum InnerQueueContext { implicit_context = 0, explicit_context = 1 }; struct Block { Block() : next(nullptr), elementsCompletelyDequeued(0), freeListRefs(0), freeListNext(nullptr), dynamicallyAllocated(true) { #ifdef MCDBGQ_TRACKMEM owner = nullptr; #endif } template inline bool is_empty() const { MOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { // Check flags for (size_t i = 0; i < BLOCK_SIZE; ++i) { if (!emptyFlags[i].load(std::memory_order_relaxed)) { return false; } } // Aha, empty; make sure we have all other memory effects that happened before the empty flags were set std::atomic_thread_fence(std::memory_order_acquire); return true; } else { // Check counter if (elementsCompletelyDequeued.load(std::memory_order_relaxed) == BLOCK_SIZE) { std::atomic_thread_fence(std::memory_order_acquire); return true; } assert(elementsCompletelyDequeued.load(std::memory_order_relaxed) <= BLOCK_SIZE); return false; } } // Returns true if the block is now empty (does not apply in explicit context) template inline bool set_empty(MOODYCAMEL_MAYBE_UNUSED index_t i) { MOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { // Set flag assert(!emptyFlags[BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1))].load(std::memory_order_relaxed)); emptyFlags[BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1))].store(true, std::memory_order_release); return false; } else { // Increment counter auto prevVal = elementsCompletelyDequeued.fetch_add(1, std::memory_order_release); assert(prevVal < BLOCK_SIZE); return prevVal == BLOCK_SIZE - 1; } } // Sets multiple contiguous item statuses to 'empty' (assumes no wrapping and count > 0). // Returns true if the block is now empty (does not apply in explicit context). template inline bool set_many_empty(MOODYCAMEL_MAYBE_UNUSED index_t i, size_t count) { MOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { // Set flags std::atomic_thread_fence(std::memory_order_release); i = BLOCK_SIZE - 1 - static_cast(i & static_cast(BLOCK_SIZE - 1)) - count + 1; for (size_t j = 0; j != count; ++j) { assert(!emptyFlags[i + j].load(std::memory_order_relaxed)); emptyFlags[i + j].store(true, std::memory_order_relaxed); } return false; } else { // Increment counter auto prevVal = elementsCompletelyDequeued.fetch_add(count, std::memory_order_release); assert(prevVal + count <= BLOCK_SIZE); return prevVal + count == BLOCK_SIZE; } } template inline void set_all_empty() { MOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { // Set all flags for (size_t i = 0; i != BLOCK_SIZE; ++i) { emptyFlags[i].store(true, std::memory_order_relaxed); } } else { // Reset counter elementsCompletelyDequeued.store(BLOCK_SIZE, std::memory_order_relaxed); } } template inline void reset_empty() { MOODYCAMEL_CONSTEXPR_IF (context == explicit_context && BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD) { // Reset flags for (size_t i = 0; i != BLOCK_SIZE; ++i) { emptyFlags[i].store(false, std::memory_order_relaxed); } } else { // Reset counter elementsCompletelyDequeued.store(0, std::memory_order_relaxed); } } inline T* operator[](index_t idx) MOODYCAMEL_NOEXCEPT { return static_cast(static_cast(elements)) + static_cast(idx & static_cast(BLOCK_SIZE - 1)); } inline T const* operator[](index_t idx) const MOODYCAMEL_NOEXCEPT { return static_cast(static_cast(elements)) + static_cast(idx & static_cast(BLOCK_SIZE - 1)); } private: static_assert(std::alignment_of::value <= sizeof(T), "The queue does not support types with an alignment greater than their size at this time"); MOODYCAMEL_ALIGNED_TYPE_LIKE(char[sizeof(T) * BLOCK_SIZE], T) elements; public: Block* next; std::atomic elementsCompletelyDequeued; std::atomic emptyFlags[BLOCK_SIZE <= EXPLICIT_BLOCK_EMPTY_COUNTER_THRESHOLD ? BLOCK_SIZE : 1]; public: std::atomic freeListRefs; std::atomic freeListNext; bool dynamicallyAllocated; // Perhaps a better name for this would be 'isNotPartOfInitialBlockPool' #ifdef MCDBGQ_TRACKMEM void* owner; #endif }; static_assert(std::alignment_of::value >= std::alignment_of::value, "Internal error: Blocks must be at least as aligned as the type they are wrapping"); #ifdef MCDBGQ_TRACKMEM public: struct MemStats; private: #endif /////////////////////////// // Producer base /////////////////////////// struct ProducerBase : public details::ConcurrentQueueProducerTypelessBase { ProducerBase(ConcurrentQueue* parent_, bool isExplicit_) : tailIndex(0), headIndex(0), dequeueOptimisticCount(0), dequeueOvercommit(0), tailBlock(nullptr), isExplicit(isExplicit_), parent(parent_) { } virtual ~ProducerBase() { } template inline bool dequeue(U& element) { if (isExplicit) { return static_cast(this)->dequeue(element); } else { return static_cast(this)->dequeue(element); } } template inline size_t dequeue_bulk(It& itemFirst, size_t max) { if (isExplicit) { return static_cast(this)->dequeue_bulk(itemFirst, max); } else { return static_cast(this)->dequeue_bulk(itemFirst, max); } } inline ProducerBase* next_prod() const { return static_cast(next); } inline size_t size_approx() const { auto tail = tailIndex.load(std::memory_order_relaxed); auto head = headIndex.load(std::memory_order_relaxed); return details::circular_less_than(head, tail) ? static_cast(tail - head) : 0; } inline index_t getTail() const { return tailIndex.load(std::memory_order_relaxed); } protected: std::atomic tailIndex; // Where to enqueue to next std::atomic headIndex; // Where to dequeue from next std::atomic dequeueOptimisticCount; std::atomic dequeueOvercommit; Block* tailBlock; public: bool isExplicit; ConcurrentQueue* parent; protected: #ifdef MCDBGQ_TRACKMEM friend struct MemStats; #endif }; /////////////////////////// // Explicit queue /////////////////////////// struct ExplicitProducer : public ProducerBase { explicit ExplicitProducer(ConcurrentQueue* parent_) : ProducerBase(parent_, true), blockIndex(nullptr), pr_blockIndexSlotsUsed(0), pr_blockIndexSize(EXPLICIT_INITIAL_INDEX_SIZE >> 1), pr_blockIndexFront(0), pr_blockIndexEntries(nullptr), pr_blockIndexRaw(nullptr) { size_t poolBasedIndexSize = details::ceil_to_pow_2(parent_->initialBlockPoolSize) >> 1; if (poolBasedIndexSize > pr_blockIndexSize) { pr_blockIndexSize = poolBasedIndexSize; } new_block_index(0); // This creates an index with double the number of current entries, i.e. EXPLICIT_INITIAL_INDEX_SIZE } ~ExplicitProducer() { // Destruct any elements not yet dequeued. // Since we're in the destructor, we can assume all elements // are either completely dequeued or completely not (no halfways). if (this->tailBlock != nullptr) { // Note this means there must be a block index too // First find the block that's partially dequeued, if any Block* halfDequeuedBlock = nullptr; if ((this->headIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)) != 0) { // The head's not on a block boundary, meaning a block somewhere is partially dequeued // (or the head block is the tail block and was fully dequeued, but the head/tail are still not on a boundary) size_t i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & (pr_blockIndexSize - 1); while (details::circular_less_than(pr_blockIndexEntries[i].base + BLOCK_SIZE, this->headIndex.load(std::memory_order_relaxed))) { i = (i + 1) & (pr_blockIndexSize - 1); } assert(details::circular_less_than(pr_blockIndexEntries[i].base, this->headIndex.load(std::memory_order_relaxed))); halfDequeuedBlock = pr_blockIndexEntries[i].block; } // Start at the head block (note the first line in the loop gives us the head from the tail on the first iteration) auto block = this->tailBlock; do { block = block->next; if (block->ConcurrentQueue::Block::template is_empty()) { continue; } size_t i = 0; // Offset into block if (block == halfDequeuedBlock) { i = static_cast(this->headIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)); } // Walk through all the items in the block; if this is the tail block, we need to stop when we reach the tail index auto lastValidIndex = (this->tailIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)) == 0 ? BLOCK_SIZE : static_cast(this->tailIndex.load(std::memory_order_relaxed) & static_cast(BLOCK_SIZE - 1)); while (i != BLOCK_SIZE && (block != this->tailBlock || i != lastValidIndex)) { (*block)[i++]->~T(); } } while (block != this->tailBlock); } // Destroy all blocks that we own if (this->tailBlock != nullptr) { auto block = this->tailBlock; do { auto nextBlock = block->next; this->parent->add_block_to_free_list(block); block = nextBlock; } while (block != this->tailBlock); } // Destroy the block indices auto header = static_cast(pr_blockIndexRaw); while (header != nullptr) { auto prev = static_cast(header->prev); header->~BlockIndexHeader(); (Traits::free)(header); header = prev; } } template inline bool enqueue(U&& element) { index_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed); index_t newTailIndex = 1 + currentTailIndex; if ((currentTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { // We reached the end of a block, start a new one auto startBlock = this->tailBlock; auto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed; if (this->tailBlock != nullptr && this->tailBlock->next->ConcurrentQueue::Block::template is_empty()) { // We can re-use the block ahead of us, it's empty! this->tailBlock = this->tailBlock->next; this->tailBlock->ConcurrentQueue::Block::template reset_empty(); // We'll put the block on the block index (guaranteed to be room since we're conceptually removing the // last block from it first -- except instead of removing then adding, we can just overwrite). // Note that there must be a valid block index here, since even if allocation failed in the ctor, // it would have been re-attempted when adding the first block to the queue; since there is such // a block, a block index must have been successfully allocated. } else { // Whatever head value we see here is >= the last value we saw here (relatively), // and <= its current value. Since we have the most recent tail, the head must be // <= to it. auto head = this->headIndex.load(std::memory_order_relaxed); assert(!details::circular_less_than(currentTailIndex, head)); if (!details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) { // We can't enqueue in another block because there's not enough leeway -- the // tail could surpass the head by the time the block fills up! (Or we'll exceed // the size limit, if the second part of the condition was true.) return false; } // We're going to need a new block; check that the block index has room if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize) { // Hmm, the circular block index is already full -- we'll need // to allocate a new index. Note pr_blockIndexRaw can only be nullptr if // the initial allocation failed in the constructor. MOODYCAMEL_CONSTEXPR_IF (allocMode == CannotAlloc) { return false; } else if (!new_block_index(pr_blockIndexSlotsUsed)) { return false; } } // Insert a new block in the circular linked list auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); if (newBlock == nullptr) { return false; } #ifdef MCDBGQ_TRACKMEM newBlock->owner = this; #endif newBlock->ConcurrentQueue::Block::template reset_empty(); if (this->tailBlock == nullptr) { newBlock->next = newBlock; } else { newBlock->next = this->tailBlock->next; this->tailBlock->next = newBlock; } this->tailBlock = newBlock; ++pr_blockIndexSlotsUsed; } MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast(nullptr)) T(std::forward(element)))) { // The constructor may throw. We want the element not to appear in the queue in // that case (without corrupting the queue): MOODYCAMEL_TRY { new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); } MOODYCAMEL_CATCH (...) { // Revert change to the current block, but leave the new block available // for next time pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; this->tailBlock = startBlock == nullptr ? this->tailBlock : startBlock; MOODYCAMEL_RETHROW; } } else { (void)startBlock; (void)originalBlockIndexSlotsUsed; } // Add block to block index auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; entry.base = currentTailIndex; entry.block = this->tailBlock; blockIndex.load(std::memory_order_relaxed)->front.store(pr_blockIndexFront, std::memory_order_release); pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast(nullptr)) T(std::forward(element)))) { this->tailIndex.store(newTailIndex, std::memory_order_release); return true; } } // Enqueue new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); this->tailIndex.store(newTailIndex, std::memory_order_release); return true; } template bool dequeue(U& element) { auto tail = this->tailIndex.load(std::memory_order_relaxed); auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); if (details::circular_less_than(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) { // Might be something to dequeue, let's give it a try // Note that this if is purely for performance purposes in the common case when the queue is // empty and the values are eventually consistent -- we may enter here spuriously. // Note that whatever the values of overcommit and tail are, they are not going to change (unless we // change them) and must be the same value at this point (inside the if) as when the if condition was // evaluated. // We insert an acquire fence here to synchronize-with the release upon incrementing dequeueOvercommit below. // This ensures that whatever the value we got loaded into overcommit, the load of dequeueOptisticCount in // the fetch_add below will result in a value at least as recent as that (and therefore at least as large). // Note that I believe a compiler (signal) fence here would be sufficient due to the nature of fetch_add (all // read-modify-write operations are guaranteed to work on the latest value in the modification order), but // unfortunately that can't be shown to be correct using only the C++11 standard. // See http://stackoverflow.com/questions/18223161/what-are-the-c11-memory-ordering-guarantees-in-this-corner-case std::atomic_thread_fence(std::memory_order_acquire); // Increment optimistic counter, then check if it went over the boundary auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed); // Note that since dequeueOvercommit must be <= dequeueOptimisticCount (because dequeueOvercommit is only ever // incremented after dequeueOptimisticCount -- this is enforced in the `else` block below), and since we now // have a version of dequeueOptimisticCount that is at least as recent as overcommit (due to the release upon // incrementing dequeueOvercommit and the acquire above that synchronizes with it), overcommit <= myDequeueCount. // However, we can't assert this since both dequeueOptimisticCount and dequeueOvercommit may (independently) // overflow; in such a case, though, the logic still holds since the difference between the two is maintained. // Note that we reload tail here in case it changed; it will be the same value as before or greater, since // this load is sequenced after (happens after) the earlier load above. This is supported by read-read // coherency (as defined in the standard), explained here: http://en.cppreference.com/w/cpp/atomic/memory_order tail = this->tailIndex.load(std::memory_order_acquire); if ((details::likely)(details::circular_less_than(myDequeueCount - overcommit, tail))) { // Guaranteed to be at least one element to dequeue! // Get the index. Note that since there's guaranteed to be at least one element, this // will never exceed tail. We need to do an acquire-release fence here since it's possible // that whatever condition got us to this point was for an earlier enqueued element (that // we already see the memory effects for), but that by the time we increment somebody else // has incremented it, and we need to see the memory effects for *that* element, which is // in such a case is necessarily visible on the thread that incremented it in the first // place with the more current condition (they must have acquired a tail that is at least // as recent). auto index = this->headIndex.fetch_add(1, std::memory_order_acq_rel); // Determine which block the element is in auto localBlockIndex = blockIndex.load(std::memory_order_acquire); auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire); // We need to be careful here about subtracting and dividing because of index wrap-around. // When an index wraps, we need to preserve the sign of the offset when dividing it by the // block size (in order to get a correct signed block count offset in all cases): auto headBase = localBlockIndex->entries[localBlockIndexHead].base; auto blockBaseIndex = index & ~static_cast(BLOCK_SIZE - 1); auto offset = static_cast(static_cast::type>(blockBaseIndex - headBase) / static_cast::type>(BLOCK_SIZE)); auto block = localBlockIndex->entries[(localBlockIndexHead + offset) & (localBlockIndex->size - 1)].block; // Dequeue auto& el = *((*block)[index]); if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) { // Make sure the element is still fully dequeued and destroyed even if the assignment // throws struct Guard { Block* block; index_t index; ~Guard() { (*block)[index]->~T(); block->ConcurrentQueue::Block::template set_empty(index); } } guard = { block, index }; element = std::move(el); // NOLINT } else { element = std::move(el); // NOLINT el.~T(); // NOLINT block->ConcurrentQueue::Block::template set_empty(index); } return true; } else { // Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent this->dequeueOvercommit.fetch_add(1, std::memory_order_release); // Release so that the fetch_add on dequeueOptimisticCount is guaranteed to happen before this write } } return false; } template bool MOODYCAMEL_NO_TSAN enqueue_bulk(It itemFirst, size_t count) { // First, we need to make sure we have enough room to enqueue all of the elements; // this means pre-allocating blocks and putting them in the block index (but only if // all the allocations succeeded). index_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed); auto startBlock = this->tailBlock; auto originalBlockIndexFront = pr_blockIndexFront; auto originalBlockIndexSlotsUsed = pr_blockIndexSlotsUsed; Block* firstAllocatedBlock = nullptr; // Figure out how many blocks we'll need to allocate, and do so size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1)); index_t currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); if (blockBaseDiff > 0) { // Allocate as many blocks as possible from ahead while (blockBaseDiff > 0 && this->tailBlock != nullptr && this->tailBlock->next != firstAllocatedBlock && this->tailBlock->next->ConcurrentQueue::Block::template is_empty()) { blockBaseDiff -= static_cast(BLOCK_SIZE); currentTailIndex += static_cast(BLOCK_SIZE); this->tailBlock = this->tailBlock->next; firstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock; auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; entry.base = currentTailIndex; entry.block = this->tailBlock; pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); } // Now allocate as many blocks as necessary from the block pool while (blockBaseDiff > 0) { blockBaseDiff -= static_cast(BLOCK_SIZE); currentTailIndex += static_cast(BLOCK_SIZE); auto head = this->headIndex.load(std::memory_order_relaxed); assert(!details::circular_less_than(currentTailIndex, head)); bool full = !details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head)); if (pr_blockIndexRaw == nullptr || pr_blockIndexSlotsUsed == pr_blockIndexSize || full) { MOODYCAMEL_CONSTEXPR_IF (allocMode == CannotAlloc) { // Failed to allocate, undo changes (but keep injected blocks) pr_blockIndexFront = originalBlockIndexFront; pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; return false; } else if (full || !new_block_index(originalBlockIndexSlotsUsed)) { // Failed to allocate, undo changes (but keep injected blocks) pr_blockIndexFront = originalBlockIndexFront; pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; return false; } // pr_blockIndexFront is updated inside new_block_index, so we need to // update our fallback value too (since we keep the new index even if we // later fail) originalBlockIndexFront = originalBlockIndexSlotsUsed; } // Insert a new block in the circular linked list auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); if (newBlock == nullptr) { pr_blockIndexFront = originalBlockIndexFront; pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; return false; } #ifdef MCDBGQ_TRACKMEM newBlock->owner = this; #endif newBlock->ConcurrentQueue::Block::template set_all_empty(); if (this->tailBlock == nullptr) { newBlock->next = newBlock; } else { newBlock->next = this->tailBlock->next; this->tailBlock->next = newBlock; } this->tailBlock = newBlock; firstAllocatedBlock = firstAllocatedBlock == nullptr ? this->tailBlock : firstAllocatedBlock; ++pr_blockIndexSlotsUsed; auto& entry = blockIndex.load(std::memory_order_relaxed)->entries[pr_blockIndexFront]; entry.base = currentTailIndex; entry.block = this->tailBlock; pr_blockIndexFront = (pr_blockIndexFront + 1) & (pr_blockIndexSize - 1); } // Excellent, all allocations succeeded. Reset each block's emptiness before we fill them up, and // publish the new block index front auto block = firstAllocatedBlock; while (true) { block->ConcurrentQueue::Block::template reset_empty(); if (block == this->tailBlock) { break; } block = block->next; } MOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast(nullptr)) T(details::deref_noexcept(itemFirst)))) { blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release); } } // Enqueue, one block at a time index_t newTailIndex = startTailIndex + static_cast(count); currentTailIndex = startTailIndex; auto endBlock = this->tailBlock; this->tailBlock = startBlock; assert((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0); if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) { this->tailBlock = firstAllocatedBlock; } while (true) { index_t stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); if (details::circular_less_than(newTailIndex, stopIndex)) { stopIndex = newTailIndex; } MOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast(nullptr)) T(details::deref_noexcept(itemFirst)))) { while (currentTailIndex != stopIndex) { new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++); } } else { MOODYCAMEL_TRY { while (currentTailIndex != stopIndex) { // Must use copy constructor even if move constructor is available // because we may have to revert if there's an exception. // Sorry about the horrible templated next line, but it was the only way // to disable moving *at compile time*, which is important because a type // may only define a (noexcept) move constructor, and so calls to the // cctor will not compile, even if they are in an if branch that will never // be executed new ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if(nullptr)) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst)); ++currentTailIndex; ++itemFirst; } } MOODYCAMEL_CATCH (...) { // Oh dear, an exception's been thrown -- destroy the elements that // were enqueued so far and revert the entire bulk operation (we'll keep // any allocated blocks in our linked list for later, though). auto constructedStopIndex = currentTailIndex; auto lastBlockEnqueued = this->tailBlock; pr_blockIndexFront = originalBlockIndexFront; pr_blockIndexSlotsUsed = originalBlockIndexSlotsUsed; this->tailBlock = startBlock == nullptr ? firstAllocatedBlock : startBlock; if (!details::is_trivially_destructible::value) { auto block = startBlock; if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { block = firstAllocatedBlock; } currentTailIndex = startTailIndex; while (true) { stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); if (details::circular_less_than(constructedStopIndex, stopIndex)) { stopIndex = constructedStopIndex; } while (currentTailIndex != stopIndex) { (*block)[currentTailIndex++]->~T(); } if (block == lastBlockEnqueued) { break; } block = block->next; } } MOODYCAMEL_RETHROW; } } if (this->tailBlock == endBlock) { assert(currentTailIndex == newTailIndex); break; } this->tailBlock = this->tailBlock->next; } MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast(nullptr)) T(details::deref_noexcept(itemFirst)))) { if (firstAllocatedBlock != nullptr) blockIndex.load(std::memory_order_relaxed)->front.store((pr_blockIndexFront - 1) & (pr_blockIndexSize - 1), std::memory_order_release); } this->tailIndex.store(newTailIndex, std::memory_order_release); return true; } template size_t dequeue_bulk(It& itemFirst, size_t max) { auto tail = this->tailIndex.load(std::memory_order_relaxed); auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); auto desiredCount = static_cast(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit)); if (details::circular_less_than(0, desiredCount)) { desiredCount = desiredCount < max ? desiredCount : max; std::atomic_thread_fence(std::memory_order_acquire); auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed); tail = this->tailIndex.load(std::memory_order_acquire); auto actualCount = static_cast(tail - (myDequeueCount - overcommit)); if (details::circular_less_than(0, actualCount)) { actualCount = desiredCount < actualCount ? desiredCount : actualCount; if (actualCount < desiredCount) { this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release); } // Get the first index. Note that since there's guaranteed to be at least actualCount elements, this // will never exceed tail. auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel); // Determine which block the first element is in auto localBlockIndex = blockIndex.load(std::memory_order_acquire); auto localBlockIndexHead = localBlockIndex->front.load(std::memory_order_acquire); auto headBase = localBlockIndex->entries[localBlockIndexHead].base; auto firstBlockBaseIndex = firstIndex & ~static_cast(BLOCK_SIZE - 1); auto offset = static_cast(static_cast::type>(firstBlockBaseIndex - headBase) / static_cast::type>(BLOCK_SIZE)); auto indexIndex = (localBlockIndexHead + offset) & (localBlockIndex->size - 1); // Iterate the blocks and dequeue auto index = firstIndex; do { auto firstIndexInBlock = index; index_t endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; auto block = localBlockIndex->entries[indexIndex].block; if (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) { while (index != endIndex) { auto& el = *((*block)[index]); *itemFirst++ = std::move(el); el.~T(); ++index; } } else { MOODYCAMEL_TRY { while (index != endIndex) { auto& el = *((*block)[index]); *itemFirst = std::move(el); ++itemFirst; el.~T(); ++index; } } MOODYCAMEL_CATCH (...) { // It's too late to revert the dequeue, but we can make sure that all // the dequeued objects are properly destroyed and the block index // (and empty count) are properly updated before we propagate the exception do { block = localBlockIndex->entries[indexIndex].block; while (index != endIndex) { (*block)[index++]->~T(); } block->ConcurrentQueue::Block::template set_many_empty(firstIndexInBlock, static_cast(endIndex - firstIndexInBlock)); indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1); firstIndexInBlock = index; endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; } while (index != firstIndex + actualCount); MOODYCAMEL_RETHROW; } } block->ConcurrentQueue::Block::template set_many_empty(firstIndexInBlock, static_cast(endIndex - firstIndexInBlock)); indexIndex = (indexIndex + 1) & (localBlockIndex->size - 1); } while (index != firstIndex + actualCount); return actualCount; } else { // Wasn't anything to dequeue after all; make the effective dequeue count eventually consistent this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release); } } return 0; } private: struct BlockIndexEntry { index_t base; Block* block; }; struct BlockIndexHeader { size_t size; std::atomic front; // Current slot (not next, like pr_blockIndexFront) BlockIndexEntry* entries; void* prev; }; bool new_block_index(size_t numberOfFilledSlotsToExpose) { auto prevBlockSizeMask = pr_blockIndexSize - 1; // Create the new block pr_blockIndexSize <<= 1; auto newRawPtr = static_cast((Traits::malloc)(sizeof(BlockIndexHeader) + std::alignment_of::value - 1 + sizeof(BlockIndexEntry) * pr_blockIndexSize)); if (newRawPtr == nullptr) { pr_blockIndexSize >>= 1; // Reset to allow graceful retry return false; } auto newBlockIndexEntries = reinterpret_cast(details::align_for(newRawPtr + sizeof(BlockIndexHeader))); // Copy in all the old indices, if any size_t j = 0; if (pr_blockIndexSlotsUsed != 0) { auto i = (pr_blockIndexFront - pr_blockIndexSlotsUsed) & prevBlockSizeMask; do { newBlockIndexEntries[j++] = pr_blockIndexEntries[i]; i = (i + 1) & prevBlockSizeMask; } while (i != pr_blockIndexFront); } // Update everything auto header = new (newRawPtr) BlockIndexHeader; header->size = pr_blockIndexSize; header->front.store(numberOfFilledSlotsToExpose - 1, std::memory_order_relaxed); header->entries = newBlockIndexEntries; header->prev = pr_blockIndexRaw; // we link the new block to the old one so we can free it later pr_blockIndexFront = j; pr_blockIndexEntries = newBlockIndexEntries; pr_blockIndexRaw = newRawPtr; blockIndex.store(header, std::memory_order_release); return true; } private: std::atomic blockIndex; // To be used by producer only -- consumer must use the ones in referenced by blockIndex size_t pr_blockIndexSlotsUsed; size_t pr_blockIndexSize; size_t pr_blockIndexFront; // Next slot (not current) BlockIndexEntry* pr_blockIndexEntries; void* pr_blockIndexRaw; #ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG public: ExplicitProducer* nextExplicitProducer; private: #endif #ifdef MCDBGQ_TRACKMEM friend struct MemStats; #endif }; ////////////////////////////////// // Implicit queue ////////////////////////////////// struct ImplicitProducer : public ProducerBase { ImplicitProducer(ConcurrentQueue* parent_) : ProducerBase(parent_, false), nextBlockIndexCapacity(IMPLICIT_INITIAL_INDEX_SIZE), blockIndex(nullptr) { new_block_index(); } ~ImplicitProducer() { // Note that since we're in the destructor we can assume that all enqueue/dequeue operations // completed already; this means that all undequeued elements are placed contiguously across // contiguous blocks, and that only the first and last remaining blocks can be only partially // empty (all other remaining blocks must be completely full). #ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED // Unregister ourselves for thread termination notification if (!this->inactive.load(std::memory_order_relaxed)) { details::ThreadExitNotifier::unsubscribe(&threadExitListener); } #endif // Destroy all remaining elements! auto tail = this->tailIndex.load(std::memory_order_relaxed); auto index = this->headIndex.load(std::memory_order_relaxed); Block* block = nullptr; assert(index == tail || details::circular_less_than(index, tail)); bool forceFreeLastBlock = index != tail; // If we enter the loop, then the last (tail) block will not be freed while (index != tail) { if ((index & static_cast(BLOCK_SIZE - 1)) == 0 || block == nullptr) { if (block != nullptr) { // Free the old block this->parent->add_block_to_free_list(block); } block = get_block_index_entry_for_index(index)->value.load(std::memory_order_relaxed); } ((*block)[index])->~T(); ++index; } // Even if the queue is empty, there's still one block that's not on the free list // (unless the head index reached the end of it, in which case the tail will be poised // to create a new block). if (this->tailBlock != nullptr && (forceFreeLastBlock || (tail & static_cast(BLOCK_SIZE - 1)) != 0)) { this->parent->add_block_to_free_list(this->tailBlock); } // Destroy block index auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); if (localBlockIndex != nullptr) { for (size_t i = 0; i != localBlockIndex->capacity; ++i) { localBlockIndex->index[i]->~BlockIndexEntry(); } do { auto prev = localBlockIndex->prev; localBlockIndex->~BlockIndexHeader(); (Traits::free)(localBlockIndex); localBlockIndex = prev; } while (localBlockIndex != nullptr); } } template inline bool enqueue(U&& element) { index_t currentTailIndex = this->tailIndex.load(std::memory_order_relaxed); index_t newTailIndex = 1 + currentTailIndex; if ((currentTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { // We reached the end of a block, start a new one auto head = this->headIndex.load(std::memory_order_relaxed); assert(!details::circular_less_than(currentTailIndex, head)); if (!details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head))) { return false; } #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX debug::DebugLock lock(mutex); #endif // Find out where we'll be inserting this block in the block index BlockIndexEntry* idxEntry; if (!insert_block_index_entry(idxEntry, currentTailIndex)) { return false; } // Get ahold of a new block auto newBlock = this->parent->ConcurrentQueue::template requisition_block(); if (newBlock == nullptr) { rewind_block_index_tail(); idxEntry->value.store(nullptr, std::memory_order_relaxed); return false; } #ifdef MCDBGQ_TRACKMEM newBlock->owner = this; #endif newBlock->ConcurrentQueue::Block::template reset_empty(); MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast(nullptr)) T(std::forward(element)))) { // May throw, try to insert now before we publish the fact that we have this new block MOODYCAMEL_TRY { new ((*newBlock)[currentTailIndex]) T(std::forward(element)); } MOODYCAMEL_CATCH (...) { rewind_block_index_tail(); idxEntry->value.store(nullptr, std::memory_order_relaxed); this->parent->add_block_to_free_list(newBlock); MOODYCAMEL_RETHROW; } } // Insert the new block into the index idxEntry->value.store(newBlock, std::memory_order_relaxed); this->tailBlock = newBlock; MOODYCAMEL_CONSTEXPR_IF (!MOODYCAMEL_NOEXCEPT_CTOR(T, U, new (static_cast(nullptr)) T(std::forward(element)))) { this->tailIndex.store(newTailIndex, std::memory_order_release); return true; } } // Enqueue new ((*this->tailBlock)[currentTailIndex]) T(std::forward(element)); this->tailIndex.store(newTailIndex, std::memory_order_release); return true; } template bool dequeue(U& element) { // See ExplicitProducer::dequeue for rationale and explanation index_t tail = this->tailIndex.load(std::memory_order_relaxed); index_t overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); if (details::circular_less_than(this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit, tail)) { std::atomic_thread_fence(std::memory_order_acquire); index_t myDequeueCount = this->dequeueOptimisticCount.fetch_add(1, std::memory_order_relaxed); tail = this->tailIndex.load(std::memory_order_acquire); if ((details::likely)(details::circular_less_than(myDequeueCount - overcommit, tail))) { index_t index = this->headIndex.fetch_add(1, std::memory_order_acq_rel); // Determine which block the element is in auto entry = get_block_index_entry_for_index(index); // Dequeue auto block = entry->value.load(std::memory_order_relaxed); auto& el = *((*block)[index]); if (!MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, element = std::move(el))) { #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX // Note: Acquiring the mutex with every dequeue instead of only when a block // is released is very sub-optimal, but it is, after all, purely debug code. debug::DebugLock lock(producer->mutex); #endif struct Guard { Block* block; index_t index; BlockIndexEntry* entry; ConcurrentQueue* parent; ~Guard() { (*block)[index]->~T(); if (block->ConcurrentQueue::Block::template set_empty(index)) { entry->value.store(nullptr, std::memory_order_relaxed); parent->add_block_to_free_list(block); } } } guard = { block, index, entry, this->parent }; element = std::move(el); // NOLINT } else { element = std::move(el); // NOLINT el.~T(); // NOLINT if (block->ConcurrentQueue::Block::template set_empty(index)) { { #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX debug::DebugLock lock(mutex); #endif // Add the block back into the global free pool (and remove from block index) entry->value.store(nullptr, std::memory_order_relaxed); } this->parent->add_block_to_free_list(block); // releases the above store } } return true; } else { this->dequeueOvercommit.fetch_add(1, std::memory_order_release); } } return false; } #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable: 4706) // assignment within conditional expression #endif template bool enqueue_bulk(It itemFirst, size_t count) { // First, we need to make sure we have enough room to enqueue all of the elements; // this means pre-allocating blocks and putting them in the block index (but only if // all the allocations succeeded). // Note that the tailBlock we start off with may not be owned by us any more; // this happens if it was filled up exactly to the top (setting tailIndex to // the first index of the next block which is not yet allocated), then dequeued // completely (putting it on the free list) before we enqueue again. index_t startTailIndex = this->tailIndex.load(std::memory_order_relaxed); auto startBlock = this->tailBlock; Block* firstAllocatedBlock = nullptr; auto endBlock = this->tailBlock; // Figure out how many blocks we'll need to allocate, and do so size_t blockBaseDiff = ((startTailIndex + count - 1) & ~static_cast(BLOCK_SIZE - 1)) - ((startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1)); index_t currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); if (blockBaseDiff > 0) { #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX debug::DebugLock lock(mutex); #endif do { blockBaseDiff -= static_cast(BLOCK_SIZE); currentTailIndex += static_cast(BLOCK_SIZE); // Find out where we'll be inserting this block in the block index BlockIndexEntry* idxEntry = nullptr; // initialization here unnecessary but compiler can't always tell Block* newBlock; bool indexInserted = false; auto head = this->headIndex.load(std::memory_order_relaxed); assert(!details::circular_less_than(currentTailIndex, head)); bool full = !details::circular_less_than(head, currentTailIndex + BLOCK_SIZE) || (MAX_SUBQUEUE_SIZE != details::const_numeric_max::value && (MAX_SUBQUEUE_SIZE == 0 || MAX_SUBQUEUE_SIZE - BLOCK_SIZE < currentTailIndex - head)); if (full || !(indexInserted = insert_block_index_entry(idxEntry, currentTailIndex)) || (newBlock = this->parent->ConcurrentQueue::template requisition_block()) == nullptr) { // Index allocation or block allocation failed; revert any other allocations // and index insertions done so far for this operation if (indexInserted) { rewind_block_index_tail(); idxEntry->value.store(nullptr, std::memory_order_relaxed); } currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); for (auto block = firstAllocatedBlock; block != nullptr; block = block->next) { currentTailIndex += static_cast(BLOCK_SIZE); idxEntry = get_block_index_entry_for_index(currentTailIndex); idxEntry->value.store(nullptr, std::memory_order_relaxed); rewind_block_index_tail(); } this->parent->add_blocks_to_free_list(firstAllocatedBlock); this->tailBlock = startBlock; return false; } #ifdef MCDBGQ_TRACKMEM newBlock->owner = this; #endif newBlock->ConcurrentQueue::Block::template reset_empty(); newBlock->next = nullptr; // Insert the new block into the index idxEntry->value.store(newBlock, std::memory_order_relaxed); // Store the chain of blocks so that we can undo if later allocations fail, // and so that we can find the blocks when we do the actual enqueueing if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr) { assert(this->tailBlock != nullptr); this->tailBlock->next = newBlock; } this->tailBlock = newBlock; endBlock = newBlock; firstAllocatedBlock = firstAllocatedBlock == nullptr ? newBlock : firstAllocatedBlock; } while (blockBaseDiff > 0); } // Enqueue, one block at a time index_t newTailIndex = startTailIndex + static_cast(count); currentTailIndex = startTailIndex; this->tailBlock = startBlock; assert((startTailIndex & static_cast(BLOCK_SIZE - 1)) != 0 || firstAllocatedBlock != nullptr || count == 0); if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0 && firstAllocatedBlock != nullptr) { this->tailBlock = firstAllocatedBlock; } while (true) { index_t stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); if (details::circular_less_than(newTailIndex, stopIndex)) { stopIndex = newTailIndex; } MOODYCAMEL_CONSTEXPR_IF (MOODYCAMEL_NOEXCEPT_CTOR(T, decltype(*itemFirst), new (static_cast(nullptr)) T(details::deref_noexcept(itemFirst)))) { while (currentTailIndex != stopIndex) { new ((*this->tailBlock)[currentTailIndex++]) T(*itemFirst++); } } else { MOODYCAMEL_TRY { while (currentTailIndex != stopIndex) { new ((*this->tailBlock)[currentTailIndex]) T(details::nomove_if(nullptr)) T(details::deref_noexcept(itemFirst)))>::eval(*itemFirst)); ++currentTailIndex; ++itemFirst; } } MOODYCAMEL_CATCH (...) { auto constructedStopIndex = currentTailIndex; auto lastBlockEnqueued = this->tailBlock; if (!details::is_trivially_destructible::value) { auto block = startBlock; if ((startTailIndex & static_cast(BLOCK_SIZE - 1)) == 0) { block = firstAllocatedBlock; } currentTailIndex = startTailIndex; while (true) { stopIndex = (currentTailIndex & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); if (details::circular_less_than(constructedStopIndex, stopIndex)) { stopIndex = constructedStopIndex; } while (currentTailIndex != stopIndex) { (*block)[currentTailIndex++]->~T(); } if (block == lastBlockEnqueued) { break; } block = block->next; } } currentTailIndex = (startTailIndex - 1) & ~static_cast(BLOCK_SIZE - 1); for (auto block = firstAllocatedBlock; block != nullptr; block = block->next) { currentTailIndex += static_cast(BLOCK_SIZE); auto idxEntry = get_block_index_entry_for_index(currentTailIndex); idxEntry->value.store(nullptr, std::memory_order_relaxed); rewind_block_index_tail(); } this->parent->add_blocks_to_free_list(firstAllocatedBlock); this->tailBlock = startBlock; MOODYCAMEL_RETHROW; } } if (this->tailBlock == endBlock) { assert(currentTailIndex == newTailIndex); break; } this->tailBlock = this->tailBlock->next; } this->tailIndex.store(newTailIndex, std::memory_order_release); return true; } #ifdef _MSC_VER #pragma warning(pop) #endif template size_t dequeue_bulk(It& itemFirst, size_t max) { auto tail = this->tailIndex.load(std::memory_order_relaxed); auto overcommit = this->dequeueOvercommit.load(std::memory_order_relaxed); auto desiredCount = static_cast(tail - (this->dequeueOptimisticCount.load(std::memory_order_relaxed) - overcommit)); if (details::circular_less_than(0, desiredCount)) { desiredCount = desiredCount < max ? desiredCount : max; std::atomic_thread_fence(std::memory_order_acquire); auto myDequeueCount = this->dequeueOptimisticCount.fetch_add(desiredCount, std::memory_order_relaxed); tail = this->tailIndex.load(std::memory_order_acquire); auto actualCount = static_cast(tail - (myDequeueCount - overcommit)); if (details::circular_less_than(0, actualCount)) { actualCount = desiredCount < actualCount ? desiredCount : actualCount; if (actualCount < desiredCount) { this->dequeueOvercommit.fetch_add(desiredCount - actualCount, std::memory_order_release); } // Get the first index. Note that since there's guaranteed to be at least actualCount elements, this // will never exceed tail. auto firstIndex = this->headIndex.fetch_add(actualCount, std::memory_order_acq_rel); // Iterate the blocks and dequeue auto index = firstIndex; BlockIndexHeader* localBlockIndex; auto indexIndex = get_block_index_index_for_index(index, localBlockIndex); do { auto blockStartIndex = index; index_t endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; auto entry = localBlockIndex->index[indexIndex]; auto block = entry->value.load(std::memory_order_relaxed); if (MOODYCAMEL_NOEXCEPT_ASSIGN(T, T&&, details::deref_noexcept(itemFirst) = std::move((*(*block)[index])))) { while (index != endIndex) { auto& el = *((*block)[index]); *itemFirst++ = std::move(el); el.~T(); ++index; } } else { MOODYCAMEL_TRY { while (index != endIndex) { auto& el = *((*block)[index]); *itemFirst = std::move(el); ++itemFirst; el.~T(); ++index; } } MOODYCAMEL_CATCH (...) { do { entry = localBlockIndex->index[indexIndex]; block = entry->value.load(std::memory_order_relaxed); while (index != endIndex) { (*block)[index++]->~T(); } if (block->ConcurrentQueue::Block::template set_many_empty(blockStartIndex, static_cast(endIndex - blockStartIndex))) { #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX debug::DebugLock lock(mutex); #endif entry->value.store(nullptr, std::memory_order_relaxed); this->parent->add_block_to_free_list(block); } indexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1); blockStartIndex = index; endIndex = (index & ~static_cast(BLOCK_SIZE - 1)) + static_cast(BLOCK_SIZE); endIndex = details::circular_less_than(firstIndex + static_cast(actualCount), endIndex) ? firstIndex + static_cast(actualCount) : endIndex; } while (index != firstIndex + actualCount); MOODYCAMEL_RETHROW; } } if (block->ConcurrentQueue::Block::template set_many_empty(blockStartIndex, static_cast(endIndex - blockStartIndex))) { { #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX debug::DebugLock lock(mutex); #endif // Note that the set_many_empty above did a release, meaning that anybody who acquires the block // we're about to free can use it safely since our writes (and reads!) will have happened-before then. entry->value.store(nullptr, std::memory_order_relaxed); } this->parent->add_block_to_free_list(block); // releases the above store } indexIndex = (indexIndex + 1) & (localBlockIndex->capacity - 1); } while (index != firstIndex + actualCount); return actualCount; } else { this->dequeueOvercommit.fetch_add(desiredCount, std::memory_order_release); } } return 0; } private: // The block size must be > 1, so any number with the low bit set is an invalid block base index static const index_t INVALID_BLOCK_BASE = 1; struct BlockIndexEntry { std::atomic key; std::atomic value; }; struct BlockIndexHeader { size_t capacity; std::atomic tail; BlockIndexEntry* entries; BlockIndexEntry** index; BlockIndexHeader* prev; }; template inline bool insert_block_index_entry(BlockIndexEntry*& idxEntry, index_t blockStartIndex) { auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); // We're the only writer thread, relaxed is OK if (localBlockIndex == nullptr) { return false; // this can happen if new_block_index failed in the constructor } size_t newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1); idxEntry = localBlockIndex->index[newTail]; if (idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE || idxEntry->value.load(std::memory_order_relaxed) == nullptr) { idxEntry->key.store(blockStartIndex, std::memory_order_relaxed); localBlockIndex->tail.store(newTail, std::memory_order_release); return true; } // No room in the old block index, try to allocate another one! MOODYCAMEL_CONSTEXPR_IF (allocMode == CannotAlloc) { return false; } else if (!new_block_index()) { return false; } else { localBlockIndex = blockIndex.load(std::memory_order_relaxed); newTail = (localBlockIndex->tail.load(std::memory_order_relaxed) + 1) & (localBlockIndex->capacity - 1); idxEntry = localBlockIndex->index[newTail]; assert(idxEntry->key.load(std::memory_order_relaxed) == INVALID_BLOCK_BASE); idxEntry->key.store(blockStartIndex, std::memory_order_relaxed); localBlockIndex->tail.store(newTail, std::memory_order_release); return true; } } inline void rewind_block_index_tail() { auto localBlockIndex = blockIndex.load(std::memory_order_relaxed); localBlockIndex->tail.store((localBlockIndex->tail.load(std::memory_order_relaxed) - 1) & (localBlockIndex->capacity - 1), std::memory_order_relaxed); } inline BlockIndexEntry* get_block_index_entry_for_index(index_t index) const { BlockIndexHeader* localBlockIndex; auto idx = get_block_index_index_for_index(index, localBlockIndex); return localBlockIndex->index[idx]; } inline size_t get_block_index_index_for_index(index_t index, BlockIndexHeader*& localBlockIndex) const { #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX debug::DebugLock lock(mutex); #endif index &= ~static_cast(BLOCK_SIZE - 1); localBlockIndex = blockIndex.load(std::memory_order_acquire); auto tail = localBlockIndex->tail.load(std::memory_order_acquire); auto tailBase = localBlockIndex->index[tail]->key.load(std::memory_order_relaxed); assert(tailBase != INVALID_BLOCK_BASE); // Note: Must use division instead of shift because the index may wrap around, causing a negative // offset, whose negativity we want to preserve auto offset = static_cast(static_cast::type>(index - tailBase) / static_cast::type>(BLOCK_SIZE)); size_t idx = (tail + offset) & (localBlockIndex->capacity - 1); assert(localBlockIndex->index[idx]->key.load(std::memory_order_relaxed) == index && localBlockIndex->index[idx]->value.load(std::memory_order_relaxed) != nullptr); return idx; } bool new_block_index() { auto prev = blockIndex.load(std::memory_order_relaxed); size_t prevCapacity = prev == nullptr ? 0 : prev->capacity; auto entryCount = prev == nullptr ? nextBlockIndexCapacity : prevCapacity; auto raw = static_cast((Traits::malloc)( sizeof(BlockIndexHeader) + std::alignment_of::value - 1 + sizeof(BlockIndexEntry) * entryCount + std::alignment_of::value - 1 + sizeof(BlockIndexEntry*) * nextBlockIndexCapacity)); if (raw == nullptr) { return false; } auto header = new (raw) BlockIndexHeader; auto entries = reinterpret_cast(details::align_for(raw + sizeof(BlockIndexHeader))); auto index = reinterpret_cast(details::align_for(reinterpret_cast(entries) + sizeof(BlockIndexEntry) * entryCount)); if (prev != nullptr) { auto prevTail = prev->tail.load(std::memory_order_relaxed); auto prevPos = prevTail; size_t i = 0; do { prevPos = (prevPos + 1) & (prev->capacity - 1); index[i++] = prev->index[prevPos]; } while (prevPos != prevTail); assert(i == prevCapacity); } for (size_t i = 0; i != entryCount; ++i) { new (entries + i) BlockIndexEntry; entries[i].key.store(INVALID_BLOCK_BASE, std::memory_order_relaxed); index[prevCapacity + i] = entries + i; } header->prev = prev; header->entries = entries; header->index = index; header->capacity = nextBlockIndexCapacity; header->tail.store((prevCapacity - 1) & (nextBlockIndexCapacity - 1), std::memory_order_relaxed); blockIndex.store(header, std::memory_order_release); nextBlockIndexCapacity <<= 1; return true; } private: size_t nextBlockIndexCapacity; std::atomic blockIndex; #ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED public: details::ThreadExitListener threadExitListener; private: #endif #ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG public: ImplicitProducer* nextImplicitProducer; private: #endif #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODBLOCKINDEX mutable debug::DebugMutex mutex; #endif #ifdef MCDBGQ_TRACKMEM friend struct MemStats; #endif }; ////////////////////////////////// // Block pool manipulation ////////////////////////////////// void populate_initial_block_list(size_t blockCount) { initialBlockPoolSize = blockCount; if (initialBlockPoolSize == 0) { initialBlockPool = nullptr; return; } initialBlockPool = create_array(blockCount); if (initialBlockPool == nullptr) { initialBlockPoolSize = 0; } for (size_t i = 0; i < initialBlockPoolSize; ++i) { initialBlockPool[i].dynamicallyAllocated = false; } } inline Block* try_get_block_from_initial_pool() { if (initialBlockPoolIndex.load(std::memory_order_relaxed) >= initialBlockPoolSize) { return nullptr; } auto index = initialBlockPoolIndex.fetch_add(1, std::memory_order_relaxed); return index < initialBlockPoolSize ? (initialBlockPool + index) : nullptr; } inline void add_block_to_free_list(Block* block) { #ifdef MCDBGQ_TRACKMEM block->owner = nullptr; #endif if (!Traits::RECYCLE_ALLOCATED_BLOCKS && block->dynamicallyAllocated) { destroy(block); } else { freeList.add(block); } } inline void add_blocks_to_free_list(Block* block) { while (block != nullptr) { auto next = block->next; add_block_to_free_list(block); block = next; } } inline Block* try_get_block_from_free_list() { return freeList.try_get(); } // Gets a free block from one of the memory pools, or allocates a new one (if applicable) template Block* requisition_block() { auto block = try_get_block_from_initial_pool(); if (block != nullptr) { return block; } block = try_get_block_from_free_list(); if (block != nullptr) { return block; } MOODYCAMEL_CONSTEXPR_IF (canAlloc == CanAlloc) { return create(); } else { return nullptr; } } #ifdef MCDBGQ_TRACKMEM public: struct MemStats { size_t allocatedBlocks; size_t usedBlocks; size_t freeBlocks; size_t ownedBlocksExplicit; size_t ownedBlocksImplicit; size_t implicitProducers; size_t explicitProducers; size_t elementsEnqueued; size_t blockClassBytes; size_t queueClassBytes; size_t implicitBlockIndexBytes; size_t explicitBlockIndexBytes; friend class ConcurrentQueue; private: static MemStats getFor(ConcurrentQueue* q) { MemStats stats = { 0 }; stats.elementsEnqueued = q->size_approx(); auto block = q->freeList.head_unsafe(); while (block != nullptr) { ++stats.allocatedBlocks; ++stats.freeBlocks; block = block->freeListNext.load(std::memory_order_relaxed); } for (auto ptr = q->producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { bool implicit = dynamic_cast(ptr) != nullptr; stats.implicitProducers += implicit ? 1 : 0; stats.explicitProducers += implicit ? 0 : 1; if (implicit) { auto prod = static_cast(ptr); stats.queueClassBytes += sizeof(ImplicitProducer); auto head = prod->headIndex.load(std::memory_order_relaxed); auto tail = prod->tailIndex.load(std::memory_order_relaxed); auto hash = prod->blockIndex.load(std::memory_order_relaxed); if (hash != nullptr) { for (size_t i = 0; i != hash->capacity; ++i) { if (hash->index[i]->key.load(std::memory_order_relaxed) != ImplicitProducer::INVALID_BLOCK_BASE && hash->index[i]->value.load(std::memory_order_relaxed) != nullptr) { ++stats.allocatedBlocks; ++stats.ownedBlocksImplicit; } } stats.implicitBlockIndexBytes += hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry); for (; hash != nullptr; hash = hash->prev) { stats.implicitBlockIndexBytes += sizeof(typename ImplicitProducer::BlockIndexHeader) + hash->capacity * sizeof(typename ImplicitProducer::BlockIndexEntry*); } } for (; details::circular_less_than(head, tail); head += BLOCK_SIZE) { //auto block = prod->get_block_index_entry_for_index(head); ++stats.usedBlocks; } } else { auto prod = static_cast(ptr); stats.queueClassBytes += sizeof(ExplicitProducer); auto tailBlock = prod->tailBlock; bool wasNonEmpty = false; if (tailBlock != nullptr) { auto block = tailBlock; do { ++stats.allocatedBlocks; if (!block->ConcurrentQueue::Block::template is_empty() || wasNonEmpty) { ++stats.usedBlocks; wasNonEmpty = wasNonEmpty || block != tailBlock; } ++stats.ownedBlocksExplicit; block = block->next; } while (block != tailBlock); } auto index = prod->blockIndex.load(std::memory_order_relaxed); while (index != nullptr) { stats.explicitBlockIndexBytes += sizeof(typename ExplicitProducer::BlockIndexHeader) + index->size * sizeof(typename ExplicitProducer::BlockIndexEntry); index = static_cast(index->prev); } } } auto freeOnInitialPool = q->initialBlockPoolIndex.load(std::memory_order_relaxed) >= q->initialBlockPoolSize ? 0 : q->initialBlockPoolSize - q->initialBlockPoolIndex.load(std::memory_order_relaxed); stats.allocatedBlocks += freeOnInitialPool; stats.freeBlocks += freeOnInitialPool; stats.blockClassBytes = sizeof(Block) * stats.allocatedBlocks; stats.queueClassBytes += sizeof(ConcurrentQueue); return stats; } }; // For debugging only. Not thread-safe. MemStats getMemStats() { return MemStats::getFor(this); } private: friend struct MemStats; #endif ////////////////////////////////// // Producer list manipulation ////////////////////////////////// ProducerBase* recycle_or_create_producer(bool isExplicit) { #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH debug::DebugLock lock(implicitProdMutex); #endif // Try to re-use one first for (auto ptr = producerListTail.load(std::memory_order_acquire); ptr != nullptr; ptr = ptr->next_prod()) { if (ptr->inactive.load(std::memory_order_relaxed) && ptr->isExplicit == isExplicit) { bool expected = true; if (ptr->inactive.compare_exchange_strong(expected, /* desired */ false, std::memory_order_acquire, std::memory_order_relaxed)) { // We caught one! It's been marked as activated, the caller can have it return ptr; } } } return add_producer(isExplicit ? static_cast(create(this)) : create(this)); } ProducerBase* add_producer(ProducerBase* producer) { // Handle failed memory allocation if (producer == nullptr) { return nullptr; } producerCount.fetch_add(1, std::memory_order_relaxed); // Add it to the lock-free list auto prevTail = producerListTail.load(std::memory_order_relaxed); do { producer->next = prevTail; } while (!producerListTail.compare_exchange_weak(prevTail, producer, std::memory_order_release, std::memory_order_relaxed)); #ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG if (producer->isExplicit) { auto prevTailExplicit = explicitProducers.load(std::memory_order_relaxed); do { static_cast(producer)->nextExplicitProducer = prevTailExplicit; } while (!explicitProducers.compare_exchange_weak(prevTailExplicit, static_cast(producer), std::memory_order_release, std::memory_order_relaxed)); } else { auto prevTailImplicit = implicitProducers.load(std::memory_order_relaxed); do { static_cast(producer)->nextImplicitProducer = prevTailImplicit; } while (!implicitProducers.compare_exchange_weak(prevTailImplicit, static_cast(producer), std::memory_order_release, std::memory_order_relaxed)); } #endif return producer; } void reown_producers() { // After another instance is moved-into/swapped-with this one, all the // producers we stole still think their parents are the other queue. // So fix them up! for (auto ptr = producerListTail.load(std::memory_order_relaxed); ptr != nullptr; ptr = ptr->next_prod()) { ptr->parent = this; } } ////////////////////////////////// // Implicit producer hash ////////////////////////////////// struct ImplicitProducerKVP { std::atomic key; ImplicitProducer* value; // No need for atomicity since it's only read by the thread that sets it in the first place ImplicitProducerKVP() : value(nullptr) { } ImplicitProducerKVP(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT { key.store(other.key.load(std::memory_order_relaxed), std::memory_order_relaxed); value = other.value; } inline ImplicitProducerKVP& operator=(ImplicitProducerKVP&& other) MOODYCAMEL_NOEXCEPT { swap(other); return *this; } inline void swap(ImplicitProducerKVP& other) MOODYCAMEL_NOEXCEPT { if (this != &other) { details::swap_relaxed(key, other.key); std::swap(value, other.value); } } }; template friend void moodycamel::swap(typename ConcurrentQueue::ImplicitProducerKVP&, typename ConcurrentQueue::ImplicitProducerKVP&) MOODYCAMEL_NOEXCEPT; struct ImplicitProducerHash { size_t capacity; ImplicitProducerKVP* entries; ImplicitProducerHash* prev; }; inline void populate_initial_implicit_producer_hash() { MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) { return; } else { implicitProducerHashCount.store(0, std::memory_order_relaxed); auto hash = &initialImplicitProducerHash; hash->capacity = INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; hash->entries = &initialImplicitProducerHashEntries[0]; for (size_t i = 0; i != INITIAL_IMPLICIT_PRODUCER_HASH_SIZE; ++i) { initialImplicitProducerHashEntries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed); } hash->prev = nullptr; implicitProducerHash.store(hash, std::memory_order_relaxed); } } void swap_implicit_producer_hashes(ConcurrentQueue& other) { MOODYCAMEL_CONSTEXPR_IF (INITIAL_IMPLICIT_PRODUCER_HASH_SIZE == 0) { return; } else { // Swap (assumes our implicit producer hash is initialized) initialImplicitProducerHashEntries.swap(other.initialImplicitProducerHashEntries); initialImplicitProducerHash.entries = &initialImplicitProducerHashEntries[0]; other.initialImplicitProducerHash.entries = &other.initialImplicitProducerHashEntries[0]; details::swap_relaxed(implicitProducerHashCount, other.implicitProducerHashCount); details::swap_relaxed(implicitProducerHash, other.implicitProducerHash); if (implicitProducerHash.load(std::memory_order_relaxed) == &other.initialImplicitProducerHash) { implicitProducerHash.store(&initialImplicitProducerHash, std::memory_order_relaxed); } else { ImplicitProducerHash* hash; for (hash = implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &other.initialImplicitProducerHash; hash = hash->prev) { continue; } hash->prev = &initialImplicitProducerHash; } if (other.implicitProducerHash.load(std::memory_order_relaxed) == &initialImplicitProducerHash) { other.implicitProducerHash.store(&other.initialImplicitProducerHash, std::memory_order_relaxed); } else { ImplicitProducerHash* hash; for (hash = other.implicitProducerHash.load(std::memory_order_relaxed); hash->prev != &initialImplicitProducerHash; hash = hash->prev) { continue; } hash->prev = &other.initialImplicitProducerHash; } } } // Only fails (returns nullptr) if memory allocation fails ImplicitProducer* get_or_add_implicit_producer() { // Note that since the data is essentially thread-local (key is thread ID), // there's a reduced need for fences (memory ordering is already consistent // for any individual thread), except for the current table itself. // Start by looking for the thread ID in the current and all previous hash tables. // If it's not found, it must not be in there yet, since this same thread would // have added it previously to one of the tables that we traversed. // Code and algorithm adapted from http://preshing.com/20130605/the-worlds-simplest-lock-free-hash-table #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH debug::DebugLock lock(implicitProdMutex); #endif auto id = details::thread_id(); auto hashedId = details::hash_thread_id(id); auto mainHash = implicitProducerHash.load(std::memory_order_acquire); assert(mainHash != nullptr); // silence clang-tidy and MSVC warnings (hash cannot be null) for (auto hash = mainHash; hash != nullptr; hash = hash->prev) { // Look for the id in this hash auto index = hashedId; while (true) { // Not an infinite loop because at least one slot is free in the hash table index &= hash->capacity - 1u; auto probedKey = hash->entries[index].key.load(std::memory_order_relaxed); if (probedKey == id) { // Found it! If we had to search several hashes deep, though, we should lazily add it // to the current main hash table to avoid the extended search next time. // Note there's guaranteed to be room in the current hash table since every subsequent // table implicitly reserves space for all previous tables (there's only one // implicitProducerHashCount). auto value = hash->entries[index].value; if (hash != mainHash) { index = hashedId; while (true) { index &= mainHash->capacity - 1u; auto empty = details::invalid_thread_id; #ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED auto reusable = details::invalid_thread_id2; if (mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_seq_cst, std::memory_order_relaxed) || mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_seq_cst, std::memory_order_relaxed)) { #else if (mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_seq_cst, std::memory_order_relaxed)) { #endif mainHash->entries[index].value = value; break; } ++index; } } return value; } if (probedKey == details::invalid_thread_id) { break; // Not in this hash table } ++index; } } // Insert! auto newCount = 1 + implicitProducerHashCount.fetch_add(1, std::memory_order_relaxed); while (true) { // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) if (newCount >= (mainHash->capacity >> 1) && !implicitProducerHashResizeInProgress.test_and_set(std::memory_order_acquire)) { // We've acquired the resize lock, try to allocate a bigger hash table. // Note the acquire fence synchronizes with the release fence at the end of this block, and hence when // we reload implicitProducerHash it must be the most recent version (it only gets changed within this // locked block). mainHash = implicitProducerHash.load(std::memory_order_acquire); if (newCount >= (mainHash->capacity >> 1)) { size_t newCapacity = mainHash->capacity << 1; while (newCount >= (newCapacity >> 1)) { newCapacity <<= 1; } auto raw = static_cast((Traits::malloc)(sizeof(ImplicitProducerHash) + std::alignment_of::value - 1 + sizeof(ImplicitProducerKVP) * newCapacity)); if (raw == nullptr) { // Allocation failed implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed); implicitProducerHashResizeInProgress.clear(std::memory_order_relaxed); return nullptr; } auto newHash = new (raw) ImplicitProducerHash; newHash->capacity = static_cast(newCapacity); newHash->entries = reinterpret_cast(details::align_for(raw + sizeof(ImplicitProducerHash))); for (size_t i = 0; i != newCapacity; ++i) { new (newHash->entries + i) ImplicitProducerKVP; newHash->entries[i].key.store(details::invalid_thread_id, std::memory_order_relaxed); } newHash->prev = mainHash; implicitProducerHash.store(newHash, std::memory_order_release); implicitProducerHashResizeInProgress.clear(std::memory_order_release); mainHash = newHash; } else { implicitProducerHashResizeInProgress.clear(std::memory_order_release); } } // If it's < three-quarters full, add to the old one anyway so that we don't have to wait for the next table // to finish being allocated by another thread (and if we just finished allocating above, the condition will // always be true) if (newCount < (mainHash->capacity >> 1) + (mainHash->capacity >> 2)) { auto producer = static_cast(recycle_or_create_producer(false)); if (producer == nullptr) { implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed); return nullptr; } #ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED producer->threadExitListener.callback = &ConcurrentQueue::implicit_producer_thread_exited_callback; producer->threadExitListener.userData = producer; details::ThreadExitNotifier::subscribe(&producer->threadExitListener); #endif auto index = hashedId; while (true) { index &= mainHash->capacity - 1u; auto empty = details::invalid_thread_id; #ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED auto reusable = details::invalid_thread_id2; if (mainHash->entries[index].key.compare_exchange_strong(reusable, id, std::memory_order_seq_cst, std::memory_order_relaxed)) { implicitProducerHashCount.fetch_sub(1, std::memory_order_relaxed); // already counted as a used slot mainHash->entries[index].value = producer; break; } #endif if (mainHash->entries[index].key.compare_exchange_strong(empty, id, std::memory_order_seq_cst, std::memory_order_relaxed)) { mainHash->entries[index].value = producer; break; } ++index; } return producer; } // Hmm, the old hash is quite full and somebody else is busy allocating a new one. // We need to wait for the allocating thread to finish (if it succeeds, we add, if not, // we try to allocate ourselves). mainHash = implicitProducerHash.load(std::memory_order_acquire); } } #ifdef MOODYCAMEL_CPP11_THREAD_LOCAL_SUPPORTED void implicit_producer_thread_exited(ImplicitProducer* producer) { // Remove from hash #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH debug::DebugLock lock(implicitProdMutex); #endif auto hash = implicitProducerHash.load(std::memory_order_acquire); assert(hash != nullptr); // The thread exit listener is only registered if we were added to a hash in the first place auto id = details::thread_id(); auto hashedId = details::hash_thread_id(id); details::thread_id_t probedKey; // We need to traverse all the hashes just in case other threads aren't on the current one yet and are // trying to add an entry thinking there's a free slot (because they reused a producer) for (; hash != nullptr; hash = hash->prev) { auto index = hashedId; do { index &= hash->capacity - 1u; probedKey = id; if (hash->entries[index].key.compare_exchange_strong(probedKey, details::invalid_thread_id2, std::memory_order_seq_cst, std::memory_order_relaxed)) { break; } ++index; } while (probedKey != details::invalid_thread_id); // Can happen if the hash has changed but we weren't put back in it yet, or if we weren't added to this hash in the first place } // Mark the queue as being recyclable producer->inactive.store(true, std::memory_order_release); } static void implicit_producer_thread_exited_callback(void* userData) { auto producer = static_cast(userData); auto queue = producer->parent; queue->implicit_producer_thread_exited(producer); } #endif ////////////////////////////////// // Utility functions ////////////////////////////////// template static inline void* aligned_malloc(size_t size) { MOODYCAMEL_CONSTEXPR_IF (std::alignment_of::value <= std::alignment_of::value) return (Traits::malloc)(size); else { size_t alignment = std::alignment_of::value; void* raw = (Traits::malloc)(size + alignment - 1 + sizeof(void*)); if (!raw) return nullptr; char* ptr = details::align_for(reinterpret_cast(raw) + sizeof(void*)); *(reinterpret_cast(ptr) - 1) = raw; return ptr; } } template static inline void aligned_free(void* ptr) { MOODYCAMEL_CONSTEXPR_IF (std::alignment_of::value <= std::alignment_of::value) return (Traits::free)(ptr); else (Traits::free)(ptr ? *(reinterpret_cast(ptr) - 1) : nullptr); } template static inline U* create_array(size_t count) { assert(count > 0); U* p = static_cast(aligned_malloc(sizeof(U) * count)); if (p == nullptr) return nullptr; for (size_t i = 0; i != count; ++i) new (p + i) U(); return p; } template static inline void destroy_array(U* p, size_t count) { if (p != nullptr) { assert(count > 0); for (size_t i = count; i != 0; ) (p + --i)->~U(); } aligned_free(p); } template static inline U* create() { void* p = aligned_malloc(sizeof(U)); return p != nullptr ? new (p) U : nullptr; } template static inline U* create(A1&& a1) { void* p = aligned_malloc(sizeof(U)); return p != nullptr ? new (p) U(std::forward(a1)) : nullptr; } template static inline void destroy(U* p) { if (p != nullptr) p->~U(); aligned_free(p); } private: std::atomic producerListTail; std::atomic producerCount; std::atomic initialBlockPoolIndex; Block* initialBlockPool; size_t initialBlockPoolSize; #ifndef MCDBGQ_USEDEBUGFREELIST FreeList freeList; #else debug::DebugFreeList freeList; #endif std::atomic implicitProducerHash; std::atomic implicitProducerHashCount; // Number of slots logically used ImplicitProducerHash initialImplicitProducerHash; std::array initialImplicitProducerHashEntries; std::atomic_flag implicitProducerHashResizeInProgress; std::atomic nextExplicitConsumerId; std::atomic globalExplicitConsumerOffset; #ifdef MCDBGQ_NOLOCKFREE_IMPLICITPRODHASH debug::DebugMutex implicitProdMutex; #endif #ifdef MOODYCAMEL_QUEUE_INTERNAL_DEBUG std::atomic explicitProducers; std::atomic implicitProducers; #endif }; template ProducerToken::ProducerToken(ConcurrentQueue& queue) : producer(queue.recycle_or_create_producer(true)) { if (producer != nullptr) { producer->token = this; } } template ProducerToken::ProducerToken(BlockingConcurrentQueue& queue) : producer(reinterpret_cast*>(&queue)->recycle_or_create_producer(true)) { if (producer != nullptr) { producer->token = this; } } template ConsumerToken::ConsumerToken(ConcurrentQueue& queue) : itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr) { initialOffset = queue.nextExplicitConsumerId.fetch_add(1, std::memory_order_release); lastKnownGlobalOffset = static_cast(-1); } template ConsumerToken::ConsumerToken(BlockingConcurrentQueue& queue) : itemsConsumedFromCurrent(0), currentProducer(nullptr), desiredProducer(nullptr) { initialOffset = reinterpret_cast*>(&queue)->nextExplicitConsumerId.fetch_add(1, std::memory_order_release); lastKnownGlobalOffset = static_cast(-1); } template inline void swap(ConcurrentQueue& a, ConcurrentQueue& b) MOODYCAMEL_NOEXCEPT { a.swap(b); } inline void swap(ProducerToken& a, ProducerToken& b) MOODYCAMEL_NOEXCEPT { a.swap(b); } inline void swap(ConsumerToken& a, ConsumerToken& b) MOODYCAMEL_NOEXCEPT { a.swap(b); } template inline void swap(typename ConcurrentQueue::ImplicitProducerKVP& a, typename ConcurrentQueue::ImplicitProducerKVP& b) MOODYCAMEL_NOEXCEPT { a.swap(b); } } #if defined(_MSC_VER) && (!defined(_HAS_CXX17) || !_HAS_CXX17) #pragma warning(pop) #endif #if defined(__GNUC__) && !defined(__INTEL_COMPILER) #pragma GCC diagnostic pop #endif mergerfs-2.40.2/libfuse/include/moodycamel/lightweightsemaphore.h000066400000000000000000000270671457016576200252430ustar00rootroot00000000000000// Provides an efficient implementation of a semaphore (LightweightSemaphore). // This is an extension of Jeff Preshing's sempahore implementation (licensed // under the terms of its separate zlib license) that has been adapted and // extended by Cameron Desrochers. #pragma once #include // For std::size_t #include #include // For std::make_signed #if defined(_WIN32) // Avoid including windows.h in a header; we only need a handful of // items, so we'll redeclare them here (this is relatively safe since // the API generally has to remain stable between Windows versions). // I know this is an ugly hack but it still beats polluting the global // namespace with thousands of generic names or adding a .cpp for nothing. extern "C" { struct _SECURITY_ATTRIBUTES; __declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName); __declspec(dllimport) int __stdcall CloseHandle(void* hObject); __declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds); __declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount); } #elif defined(__MACH__) #include #elif defined(__MVS__) #include #elif defined(__unix__) #include #if defined(__GLIBC_PREREQ) && defined(_GNU_SOURCE) #if __GLIBC_PREREQ(2,30) #define MOODYCAMEL_LIGHTWEIGHTSEMAPHORE_MONOTONIC #endif #endif #endif namespace moodycamel { namespace details { // Code in the mpmc_sema namespace below is an adaptation of Jeff Preshing's // portable + lightweight semaphore implementations, originally from // https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h // LICENSE: // Copyright (c) 2015 Jeff Preshing // // This software is provided 'as-is', without any express or implied // warranty. In no event will the authors be held liable for any damages // arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it // freely, subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; you must not // claim that you wrote the original software. If you use this software // in a product, an acknowledgement in the product documentation would be // appreciated but is not required. // 2. Altered source versions must be plainly marked as such, and must not be // misrepresented as being the original software. // 3. This notice may not be removed or altered from any source distribution. #if defined(_WIN32) class Semaphore { private: void* m_hSema; Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; public: Semaphore(int initialCount = 0) { assert(initialCount >= 0); const long maxLong = 0x7fffffff; m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr); assert(m_hSema); } ~Semaphore() { CloseHandle(m_hSema); } bool wait() { const unsigned long infinite = 0xffffffff; return WaitForSingleObject(m_hSema, infinite) == 0; } bool try_wait() { return WaitForSingleObject(m_hSema, 0) == 0; } bool timed_wait(std::uint64_t usecs) { return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) == 0; } void signal(int count = 1) { while (!ReleaseSemaphore(m_hSema, count, nullptr)); } }; #elif defined(__MACH__) //--------------------------------------------------------- // Semaphore (Apple iOS and OSX) // Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html //--------------------------------------------------------- class Semaphore { private: semaphore_t m_sema; Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; public: Semaphore(int initialCount = 0) { assert(initialCount >= 0); kern_return_t rc = semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount); assert(rc == KERN_SUCCESS); (void)rc; } ~Semaphore() { semaphore_destroy(mach_task_self(), m_sema); } bool wait() { return semaphore_wait(m_sema) == KERN_SUCCESS; } bool try_wait() { return timed_wait(0); } bool timed_wait(std::uint64_t timeout_usecs) { mach_timespec_t ts; ts.tv_sec = static_cast(timeout_usecs / 1000000); ts.tv_nsec = static_cast((timeout_usecs % 1000000) * 1000); // added in OSX 10.10: https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html kern_return_t rc = semaphore_timedwait(m_sema, ts); return rc == KERN_SUCCESS; } void signal() { while (semaphore_signal(m_sema) != KERN_SUCCESS); } void signal(int count) { while (count-- > 0) { while (semaphore_signal(m_sema) != KERN_SUCCESS); } } }; #elif defined(__unix__) || defined(__MVS__) //--------------------------------------------------------- // Semaphore (POSIX, Linux, zOS) //--------------------------------------------------------- class Semaphore { private: sem_t m_sema; Semaphore(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; Semaphore& operator=(const Semaphore& other) MOODYCAMEL_DELETE_FUNCTION; public: Semaphore(int initialCount = 0) { assert(initialCount >= 0); int rc = sem_init(&m_sema, 0, static_cast(initialCount)); assert(rc == 0); (void)rc; } ~Semaphore() { sem_destroy(&m_sema); } bool wait() { // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error int rc; do { rc = sem_wait(&m_sema); } while (rc == -1 && errno == EINTR); return rc == 0; } bool try_wait() { int rc; do { rc = sem_trywait(&m_sema); } while (rc == -1 && errno == EINTR); return rc == 0; } bool timed_wait(std::uint64_t usecs) { struct timespec ts; const int usecs_in_1_sec = 1000000; const int nsecs_in_1_sec = 1000000000; #ifdef MOODYCAMEL_LIGHTWEIGHTSEMAPHORE_MONOTONIC clock_gettime(CLOCK_MONOTONIC, &ts); #else clock_gettime(CLOCK_REALTIME, &ts); #endif ts.tv_sec += (time_t)(usecs / usecs_in_1_sec); ts.tv_nsec += (long)(usecs % usecs_in_1_sec) * 1000; // sem_timedwait bombs if you have more than 1e9 in tv_nsec // so we have to clean things up before passing it in if (ts.tv_nsec >= nsecs_in_1_sec) { ts.tv_nsec -= nsecs_in_1_sec; ++ts.tv_sec; } int rc; do { #ifdef MOODYCAMEL_LIGHTWEIGHTSEMAPHORE_MONOTONIC rc = sem_clockwait(&m_sema, CLOCK_MONOTONIC, &ts); #else rc = sem_timedwait(&m_sema, &ts); #endif } while (rc == -1 && errno == EINTR); return rc == 0; } void signal() { while (sem_post(&m_sema) == -1); } void signal(int count) { while (count-- > 0) { while (sem_post(&m_sema) == -1); } } }; #else #error Unsupported platform! (No semaphore wrapper available) #endif } // end namespace details //--------------------------------------------------------- // LightweightSemaphore //--------------------------------------------------------- class LightweightSemaphore { public: typedef std::make_signed::type ssize_t; private: std::atomic m_count; details::Semaphore m_sema; int m_maxSpins; bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) { ssize_t oldCount; int spin = m_maxSpins; while (--spin >= 0) { oldCount = m_count.load(std::memory_order_relaxed); if ((oldCount > 0) && m_count.compare_exchange_strong(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed)) return true; std::atomic_signal_fence(std::memory_order_acquire); // Prevent the compiler from collapsing the loop. } oldCount = m_count.fetch_sub(1, std::memory_order_acquire); if (oldCount > 0) return true; if (timeout_usecs < 0) { if (m_sema.wait()) return true; } if (timeout_usecs > 0 && m_sema.timed_wait((std::uint64_t)timeout_usecs)) return true; // At this point, we've timed out waiting for the semaphore, but the // count is still decremented indicating we may still be waiting on // it. So we have to re-adjust the count, but only if the semaphore // wasn't signaled enough times for us too since then. If it was, we // need to release the semaphore too. while (true) { oldCount = m_count.load(std::memory_order_acquire); if (oldCount >= 0 && m_sema.try_wait()) return true; if (oldCount < 0 && m_count.compare_exchange_strong(oldCount, oldCount + 1, std::memory_order_relaxed, std::memory_order_relaxed)) return false; } } ssize_t waitManyWithPartialSpinning(ssize_t max, std::int64_t timeout_usecs = -1) { assert(max > 0); ssize_t oldCount; int spin = m_maxSpins; while (--spin >= 0) { oldCount = m_count.load(std::memory_order_relaxed); if (oldCount > 0) { ssize_t newCount = oldCount > max ? oldCount - max : 0; if (m_count.compare_exchange_strong(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed)) return oldCount - newCount; } std::atomic_signal_fence(std::memory_order_acquire); } oldCount = m_count.fetch_sub(1, std::memory_order_acquire); if (oldCount <= 0) { if ((timeout_usecs == 0) || (timeout_usecs < 0 && !m_sema.wait()) || (timeout_usecs > 0 && !m_sema.timed_wait((std::uint64_t)timeout_usecs))) { while (true) { oldCount = m_count.load(std::memory_order_acquire); if (oldCount >= 0 && m_sema.try_wait()) break; if (oldCount < 0 && m_count.compare_exchange_strong(oldCount, oldCount + 1, std::memory_order_relaxed, std::memory_order_relaxed)) return 0; } } } if (max > 1) return 1 + tryWaitMany(max - 1); return 1; } public: LightweightSemaphore(ssize_t initialCount = 0, int maxSpins = 10000) : m_count(initialCount), m_maxSpins(maxSpins) { assert(initialCount >= 0); assert(maxSpins >= 0); } bool tryWait() { ssize_t oldCount = m_count.load(std::memory_order_relaxed); while (oldCount > 0) { if (m_count.compare_exchange_weak(oldCount, oldCount - 1, std::memory_order_acquire, std::memory_order_relaxed)) return true; } return false; } bool wait() { return tryWait() || waitWithPartialSpinning(); } bool wait(std::int64_t timeout_usecs) { return tryWait() || waitWithPartialSpinning(timeout_usecs); } // Acquires between 0 and (greedily) max, inclusive ssize_t tryWaitMany(ssize_t max) { assert(max >= 0); ssize_t oldCount = m_count.load(std::memory_order_relaxed); while (oldCount > 0) { ssize_t newCount = oldCount > max ? oldCount - max : 0; if (m_count.compare_exchange_weak(oldCount, newCount, std::memory_order_acquire, std::memory_order_relaxed)) return oldCount - newCount; } return 0; } // Acquires at least one, and (greedily) at most max ssize_t waitMany(ssize_t max, std::int64_t timeout_usecs) { assert(max >= 0); ssize_t result = tryWaitMany(max); if (result == 0 && max > 0) result = waitManyWithPartialSpinning(max, timeout_usecs); return result; } ssize_t waitMany(ssize_t max) { ssize_t result = waitMany(max, -1); assert(result > 0); return result; } void signal(ssize_t count = 1) { assert(count >= 0); ssize_t oldCount = m_count.fetch_add(count, std::memory_order_release); ssize_t toRelease = -oldCount < count ? -oldCount : count; if (toRelease > 0) { m_sema.signal((int)toRelease); } } std::size_t availableApprox() const { ssize_t count = m_count.load(std::memory_order_relaxed); return count > 0 ? static_cast(count) : 0; } }; } // end namespace moodycamel mergerfs-2.40.2/libfuse/include/stat_utils.h000066400000000000000000000010021457016576200210370ustar00rootroot00000000000000#pragma once #include "config.h" #ifdef HAVE_STRUCT_STAT_ST_ATIM #define ST_ATIM_NSEC(ST) ((ST)->st_atim.tv_nsec) #define ST_CTIM_NSEC(ST) ((ST)->st_ctim.tv_nsec) #define ST_MTIM_NSEC(ST) ((ST)->st_mtim.tv_nsec) #elif defined HAVE_STRUCT_STAT_ST_ATIMESPEC #define ST_ATIM_NSEC(ST) ((ST)->st_atimespec.tv_nsec) #define ST_CTIM_NSEC(ST) ((ST)->st_ctimespec.tv_nsec) #define ST_MTIM_NSEC(ST) ((ST)->st_mtimespec.tv_nsec) #else #define ST_ATIM_NSEC(ST) 0 #define ST_CTIM_NSEC(ST) 0 #define ST_MTIM_NSEC(ST) 0 #endif mergerfs-2.40.2/libfuse/include/thread_pool.hpp000066400000000000000000000154411457016576200215200ustar00rootroot00000000000000#pragma once #include "moodycamel/blockingconcurrentqueue.h" #include #include #include #include #include #include #include #include #include #include #include #include struct ThreadPoolTraits : public moodycamel::ConcurrentQueueDefaultTraits { static const int MAX_SEMA_SPINS = 1; }; class ThreadPool { private: using Func = std::function; using Queue = moodycamel::BlockingConcurrentQueue; public: explicit ThreadPool(unsigned const thread_count_ = std::thread::hardware_concurrency(), unsigned const max_queue_depth_ = std::thread::hardware_concurrency(), std::string const name_ = {}) : _queue(), _queue_depth(0), _max_queue_depth(std::max(thread_count_,max_queue_depth_)), _name(name_) { syslog(LOG_DEBUG, "threadpool (%s): spawning %u threads w/ max queue depth %u%s", _name.c_str(), thread_count_, _max_queue_depth, ((_max_queue_depth != max_queue_depth_) ? " (adjusted)" : "")); sigset_t oldset; sigset_t newset; sigfillset(&newset); pthread_sigmask(SIG_BLOCK,&newset,&oldset); _threads.reserve(thread_count_); for(std::size_t i = 0; i < thread_count_; ++i) { int rv; pthread_t t; rv = pthread_create(&t,NULL,ThreadPool::start_routine,this); if(rv != 0) { syslog(LOG_WARNING, "threadpool (%s): error spawning thread - %d (%s)", _name.c_str(), rv, strerror(rv)); continue; } if(!_name.empty()) pthread_setname_np(t,_name.c_str()); _threads.push_back(t); } pthread_sigmask(SIG_SETMASK,&oldset,NULL); if(_threads.empty()) throw std::runtime_error("threadpool: failed to spawn any threads"); } ~ThreadPool() { syslog(LOG_DEBUG, "threadpool (%s): destroying %lu threads", _name.c_str(), _threads.size()); auto func = []() { pthread_exit(NULL); }; for(std::size_t i = 0; i < _threads.size(); i++) _queue.enqueue(func); for(auto t : _threads) pthread_cancel(t); for(auto t : _threads) pthread_join(t,NULL); } private: static void* start_routine(void *arg_) { ThreadPool *btp = static_cast(arg_); ThreadPool::Func func; ThreadPool::Queue &q = btp->_queue; std::atomic &queue_depth = btp->_queue_depth; moodycamel::ConsumerToken ctok(btp->_queue); while(true) { q.wait_dequeue(ctok,func); func(); queue_depth.fetch_sub(1,std::memory_order_release); } return NULL; } public: int add_thread(std::string const name_ = {}) { int rv; pthread_t t; sigset_t oldset; sigset_t newset; std::string name; name = (name_.empty() ? _name : name_); sigfillset(&newset); pthread_sigmask(SIG_BLOCK,&newset,&oldset); rv = pthread_create(&t,NULL,ThreadPool::start_routine,this); pthread_sigmask(SIG_SETMASK,&oldset,NULL); if(rv != 0) { syslog(LOG_WARNING, "threadpool (%s): error spawning thread - %d (%s)", _name.c_str(), rv, strerror(rv)); return -rv; } if(!name.empty()) pthread_setname_np(t,name.c_str()); { std::lock_guard lg(_threads_mutex); _threads.push_back(t); } syslog(LOG_DEBUG, "threadpool (%s): 1 thread added named '%s'", _name.c_str(), name.c_str()); return 0; } int remove_thread(void) { { std::lock_guard lg(_threads_mutex); if(_threads.size() <= 1) return -EINVAL; } std::promise promise; auto func = [&]() { pthread_t t; t = pthread_self(); promise.set_value(t); { std::lock_guard lg(_threads_mutex); for(auto i = _threads.begin(); i != _threads.end(); ++i) { if(*i != t) continue; _threads.erase(i); break; } } syslog(LOG_DEBUG, "threadpool (%s): 1 thread removed", _name.c_str()); pthread_exit(NULL); }; enqueue_work(func); pthread_join(promise.get_future().get(),NULL); return 0; } int set_threads(std::size_t const count_) { int diff; { std::lock_guard lg(_threads_mutex); diff = ((int)count_ - (int)_threads.size()); } for(auto i = diff; i > 0; --i) add_thread(); for(auto i = diff; i < 0; ++i) remove_thread(); return diff; } public: template void enqueue_work(moodycamel::ProducerToken &ptok_, FuncType &&f_) { timespec ts = {0,1000}; for(unsigned i = 0; i < 1000000; i++) { if(_queue_depth.load(std::memory_order_acquire) < _max_queue_depth) break; ::nanosleep(&ts,NULL); } _queue.enqueue(ptok_,f_); _queue_depth.fetch_add(1,std::memory_order_release); } template void enqueue_work(FuncType &&f_) { timespec ts = {0,1000}; for(unsigned i = 0; i < 1000000; i++) { if(_queue_depth.load(std::memory_order_acquire) < _max_queue_depth) break; ::nanosleep(&ts,NULL); } _queue.enqueue(f_); _queue_depth.fetch_add(1,std::memory_order_release); } template [[nodiscard]] std::future::type> enqueue_task(FuncType&& f_) { using TaskReturnType = typename std::result_of::type; using Promise = std::promise; auto promise = std::make_shared(); auto future = promise->get_future(); auto work = [=]() { auto rv = f_(); promise->set_value(rv); }; timespec ts = {0,1000}; for(unsigned i = 0; i < 1000000; i++) { if(_queue_depth.load(std::memory_order_acquire) < _max_queue_depth) break; ::nanosleep(&ts,NULL); } _queue.enqueue(work); _queue_depth.fetch_add(1,std::memory_order_release); return future; } public: std::vector threads() const { std::lock_guard lg(_threads_mutex); return _threads; } moodycamel::ProducerToken ptoken() { return moodycamel::ProducerToken(_queue); } private: Queue _queue; std::atomic _queue_depth; unsigned const _max_queue_depth; private: std::string const _name; std::vector _threads; mutable std::mutex _threads_mutex; }; mergerfs-2.40.2/libfuse/lib/000077500000000000000000000000001457016576200156255ustar00rootroot00000000000000mergerfs-2.40.2/libfuse/lib/buffer.c000066400000000000000000000153311457016576200172450ustar00rootroot00000000000000/* FUSE: Filesystem in Userspace Copyright (C) 2010 Miklos Szeredi This program can be distributed under the terms of the GNU LGPLv2. See the file COPYING.LIB */ #define _GNU_SOURCE #include "config.h" #include "fuse_i.h" #include "fuse_lowlevel.h" #include #include #include #include size_t fuse_buf_size(const struct fuse_bufvec *bufv) { size_t i; size_t size = 0; for (i = 0; i < bufv->count; i++) { if (bufv->buf[i].size == SIZE_MAX) size = SIZE_MAX; else size += bufv->buf[i].size; } return size; } static size_t min_size(size_t s1, size_t s2) { return s1 < s2 ? s1 : s2; } static ssize_t fuse_buf_write(const struct fuse_buf *dst, size_t dst_off, const struct fuse_buf *src, size_t src_off, size_t len) { ssize_t res = 0; size_t copied = 0; while (len) { if (dst->flags & FUSE_BUF_FD_SEEK) { res = pwrite(dst->fd, src->mem + src_off, len, dst->pos + dst_off); } else { res = write(dst->fd, src->mem + src_off, len); } if (res == -1) { if (!copied) return -errno; break; } if (res == 0) break; copied += res; if (!(dst->flags & FUSE_BUF_FD_RETRY)) break; src_off += res; dst_off += res; len -= res; } return copied; } static ssize_t fuse_buf_read(const struct fuse_buf *dst, size_t dst_off, const struct fuse_buf *src, size_t src_off, size_t len) { ssize_t res = 0; size_t copied = 0; while (len) { if (src->flags & FUSE_BUF_FD_SEEK) { res = pread(src->fd, dst->mem + dst_off, len, src->pos + src_off); } else { res = read(src->fd, dst->mem + dst_off, len); } if (res == -1) { if (!copied) return -errno; break; } if (res == 0) break; copied += res; if (!(src->flags & FUSE_BUF_FD_RETRY)) break; dst_off += res; src_off += res; len -= res; } return copied; } static ssize_t fuse_buf_fd_to_fd(const struct fuse_buf *dst, size_t dst_off, const struct fuse_buf *src, size_t src_off, size_t len) { char buf[4096]; struct fuse_buf tmp = { .size = sizeof(buf), .flags = 0, }; ssize_t res; size_t copied = 0; tmp.mem = buf; while (len) { size_t this_len = min_size(tmp.size, len); size_t read_len; res = fuse_buf_read(&tmp, 0, src, src_off, this_len); if (res < 0) { if (!copied) return res; break; } if (res == 0) break; read_len = res; res = fuse_buf_write(dst, dst_off, &tmp, 0, read_len); if (res < 0) { if (!copied) return res; break; } if (res == 0) break; copied += res; if (res < this_len) break; dst_off += res; src_off += res; len -= res; } return copied; } #ifdef HAVE_SPLICE static ssize_t fuse_buf_splice(const struct fuse_buf *dst, size_t dst_off, const struct fuse_buf *src, size_t src_off, size_t len, enum fuse_buf_copy_flags flags) { int splice_flags = 0; off_t *srcpos = NULL; off_t *dstpos = NULL; off_t srcpos_val; off_t dstpos_val; ssize_t res; size_t copied = 0; if (flags & FUSE_BUF_SPLICE_MOVE) splice_flags |= SPLICE_F_MOVE; if (flags & FUSE_BUF_SPLICE_NONBLOCK) splice_flags |= SPLICE_F_NONBLOCK; if (src->flags & FUSE_BUF_FD_SEEK) { srcpos_val = src->pos + src_off; srcpos = &srcpos_val; } if (dst->flags & FUSE_BUF_FD_SEEK) { dstpos_val = dst->pos + dst_off; dstpos = &dstpos_val; } while (len) { res = splice(src->fd, srcpos, dst->fd, dstpos, len, splice_flags); if (res == -1) { if (copied) break; if (errno != EINVAL || (flags & FUSE_BUF_FORCE_SPLICE)) return -errno; /* Maybe splice is not supported for this combination */ return fuse_buf_fd_to_fd(dst, dst_off, src, src_off, len); } if (res == 0) break; copied += res; if (!(src->flags & FUSE_BUF_FD_RETRY) && !(dst->flags & FUSE_BUF_FD_RETRY)) { break; } len -= res; } return copied; } #else static ssize_t fuse_buf_splice(const struct fuse_buf *dst, size_t dst_off, const struct fuse_buf *src, size_t src_off, size_t len, enum fuse_buf_copy_flags flags) { (void) flags; return fuse_buf_fd_to_fd(dst, dst_off, src, src_off, len); } #endif static ssize_t fuse_buf_copy_one(const struct fuse_buf *dst, size_t dst_off, const struct fuse_buf *src, size_t src_off, size_t len, enum fuse_buf_copy_flags flags) { int src_is_fd = src->flags & FUSE_BUF_IS_FD; int dst_is_fd = dst->flags & FUSE_BUF_IS_FD; if (!src_is_fd && !dst_is_fd) { char *dstmem = dst->mem + dst_off; char *srcmem = src->mem + src_off; if (dstmem != srcmem) { if (dstmem + len <= srcmem || srcmem + len <= dstmem) memcpy(dstmem, srcmem, len); else memmove(dstmem, srcmem, len); } return len; } else if (!src_is_fd) { return fuse_buf_write(dst, dst_off, src, src_off, len); } else if (!dst_is_fd) { return fuse_buf_read(dst, dst_off, src, src_off, len); } else if (flags & FUSE_BUF_NO_SPLICE) { return fuse_buf_fd_to_fd(dst, dst_off, src, src_off, len); } else { return fuse_buf_splice(dst, dst_off, src, src_off, len, flags); } } static const struct fuse_buf *fuse_bufvec_current(struct fuse_bufvec *bufv) { if (bufv->idx < bufv->count) return &bufv->buf[bufv->idx]; else return NULL; } static int fuse_bufvec_advance(struct fuse_bufvec *bufv, size_t len) { const struct fuse_buf *buf = fuse_bufvec_current(bufv); bufv->off += len; assert(bufv->off <= buf->size); if (bufv->off == buf->size) { assert(bufv->idx < bufv->count); bufv->idx++; if (bufv->idx == bufv->count) return 0; bufv->off = 0; } return 1; } ssize_t fuse_buf_copy(struct fuse_bufvec *dstv, struct fuse_bufvec *srcv, enum fuse_buf_copy_flags flags) { size_t copied = 0; if (dstv == srcv) return fuse_buf_size(dstv); for (;;) { const struct fuse_buf *src = fuse_bufvec_current(srcv); const struct fuse_buf *dst = fuse_bufvec_current(dstv); size_t src_len; size_t dst_len; size_t len; ssize_t res; if (src == NULL || dst == NULL) break; src_len = src->size - srcv->off; dst_len = dst->size - dstv->off; len = min_size(src_len, dst_len); res = fuse_buf_copy_one(dst, dstv->off, src, srcv->off, len, flags); if (res < 0) { if (!copied) return res; break; } copied += res; if (!fuse_bufvec_advance(srcv, res) || !fuse_bufvec_advance(dstv, res)) break; if (res < len) break; } return copied; } mergerfs-2.40.2/libfuse/lib/cpu.cpp000066400000000000000000000051631457016576200171250ustar00rootroot00000000000000#ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include "cpu.hpp" #include "ghc/filesystem.hpp" #include "fmt/core.h" #include #include int CPU::getaffinity(const pthread_t thread_id_, cpu_set_t *cpuset_) { CPU_ZERO(cpuset_); return sched_getaffinity(thread_id_, sizeof(cpu_set_t), cpuset_); } int CPU::setaffinity(const pthread_t thread_id_, cpu_set_t *cpuset_) { return pthread_setaffinity_np(thread_id_, sizeof(cpu_set_t), cpuset_); } int CPU::setaffinity(const pthread_t thread_id_, const int cpu_) { cpu_set_t cpuset; CPU_ZERO(&cpuset); CPU_SET(cpu_,&cpuset); return CPU::setaffinity(thread_id_,&cpuset); } int CPU::setaffinity(const pthread_t thread_id_, const std::set cpus_) { cpu_set_t cpuset; CPU_ZERO(&cpuset); for(auto const cpu : cpus_) CPU_SET(cpu,&cpuset); return CPU::setaffinity(thread_id_,&cpuset); } static ghc::filesystem::path generate_cpu_core_id_path(const int cpu_id_) { const ghc::filesystem::path basepath{"/sys/devices/system/cpu"}; return basepath / fmt::format("cpu{}",cpu_id_) / "topology" / "core_id"; } int CPU::count() { int rv; cpu_set_t cpuset; rv = CPU::getaffinity(0,&cpuset); if(rv < 0) return rv; return CPU_COUNT(&cpuset); } CPU::CPUVec CPU::cpus() { cpu_set_t cpuset; CPU::CPUVec cpuvec; CPU::getaffinity(0,&cpuset); for(int i = 0; i < CPU_SETSIZE; i++) { if(!CPU_ISSET(i,&cpuset)) continue; cpuvec.push_back(i); } return cpuvec; } CPU::CPU2CoreMap CPU::cpu2core() { cpu_set_t cpuset; CPU::CPU2CoreMap c2c; CPU::getaffinity(0,&cpuset); for(int i = 0; i < CPU_SETSIZE; i++) { int core_id; std::ifstream ifs; ghc::filesystem::path path; if(!CPU_ISSET(i,&cpuset)) continue; path = ::generate_cpu_core_id_path(i); ifs.open(path); if(!ifs) break; ifs >> core_id; c2c[i] = core_id; ifs.close(); } return c2c; } CPU::Core2CPUsMap CPU::core2cpus() { cpu_set_t cpuset; CPU::Core2CPUsMap c2c; CPU::getaffinity(0,&cpuset); for(int i = 0; i < CPU_SETSIZE; i++) { int core_id; std::ifstream ifs; ghc::filesystem::path path; if(!CPU_ISSET(i,&cpuset)) continue; path = ::generate_cpu_core_id_path(i); ifs.open(path); if(!ifs) break; ifs >> core_id; c2c[core_id].insert(i); ifs.close(); } return c2c; } mergerfs-2.40.2/libfuse/lib/cpu.hpp000066400000000000000000000014521457016576200171270ustar00rootroot00000000000000#pragma once #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include #include #include #include #include class CPU { public: typedef std::vector ThreadIdVec; typedef std::vector CPUVec; typedef std::unordered_map CPU2CoreMap; typedef std::unordered_map> Core2CPUsMap; public: static int count(); static int getaffinity(const pthread_t thread_id, cpu_set_t *cpuset); static int setaffinity(const pthread_t thread_id, cpu_set_t *cpuset); static int setaffinity(const pthread_t thread_id, const int cpu); static int setaffinity(const pthread_t thread_id, const std::set cpus); static CPU::CPUVec cpus(); static CPU::CPU2CoreMap cpu2core(); static CPU::Core2CPUsMap core2cpus(); }; mergerfs-2.40.2/libfuse/lib/crc32b.c000066400000000000000000000111651457016576200170530ustar00rootroot00000000000000/* ISC License Copyright (c) 2021, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "crc32b.h" static const crc32b_t CRC32BTABLE[256] = { 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D }; crc32b_t crc32b_start(void) { return 0xFFFFFFFF; } crc32b_t crc32b_continue(const void *buf_, const crc32b_t len_, const crc32b_t crc_) { int i; char *buf; crc32b_t crc; crc = crc_; buf = (char*)buf_; for(i = 0; i < len_; i++) { crc = (CRC32BTABLE[(crc ^ buf[i]) & 0xFFL] ^ (crc >> 8)); } return crc; } crc32b_t crc32b_finish(const crc32b_t crc_) { return (crc_ ^ 0xFFFFFFFF); } crc32b_t crc32b(const void *buf_, const crc32b_t len_) { crc32b_t crc; crc = crc32b_start(); crc = crc32b_continue(buf_,len_,crc); crc = crc32b_finish(crc); return crc; } mergerfs-2.40.2/libfuse/lib/crc32b.h000066400000000000000000000022061457016576200170540ustar00rootroot00000000000000/* ISC License Copyright (c) 2021, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef CRC32B_H_INCLUDED #define CRC32B_H_INCLUDED typedef unsigned int crc32b_t; crc32b_t crc32b_start(void); crc32b_t crc32b_continue(const void *buf, const crc32b_t len, const crc32b_t crc); crc32b_t crc32b_finish(const crc32b_t crc); crc32b_t crc32b(const void *buf, crc32b_t len); #endif mergerfs-2.40.2/libfuse/lib/debug.c000066400000000000000000000662171457016576200170730ustar00rootroot00000000000000/* ISC License Copyright (c) 2021, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define _GNU_SOURCE #include "fuse_kernel.h" #include #include #include #include #include static FILE *g_OUTPUT = NULL; #define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg))) static int debug_set_output_null() { g_OUTPUT = stderr; setvbuf(g_OUTPUT,NULL,_IOLBF,0); return 0; } static int debug_set_output_filepath(const char *filepath_) { FILE *tmp; tmp = fopen(filepath_,"a"); if(tmp == NULL) return -errno; g_OUTPUT = tmp; setvbuf(g_OUTPUT,NULL,_IOLBF,0); return 0; } int debug_set_output(const char *filepath_) { if(filepath_ == NULL) return debug_set_output_null(); return debug_set_output_filepath(filepath_); } static __attribute__((constructor)) void debug_constructor(void) { debug_set_output(NULL); } static const char* open_accmode_to_str(const int flags_) { switch(flags_ & O_ACCMODE) { case O_RDWR: return "O_RDWR"; case O_RDONLY: return "O_RDONLY"; case O_WRONLY: return "O_WRONLY"; } return ""; } #define FUSE_WRITE_FLAG_CASE(X) case FUSE_WRITE_##X: return #X static const char* fuse_write_flag_to_str(const uint32_t offset_) { switch(1 << offset_) { FUSE_WRITE_FLAG_CASE(CACHE); FUSE_WRITE_FLAG_CASE(LOCKOWNER); FUSE_WRITE_FLAG_CASE(KILL_PRIV); } return NULL; } #undef FUSE_WRITE_FLAG_CASE #define OPEN_FLAG_CASE(X) case X: return #X static const char* open_flag_to_str(const uint64_t offset_) { switch(1 << offset_) { OPEN_FLAG_CASE(O_APPEND); OPEN_FLAG_CASE(O_ASYNC); OPEN_FLAG_CASE(O_CLOEXEC); OPEN_FLAG_CASE(O_CREAT); OPEN_FLAG_CASE(O_DIRECT); OPEN_FLAG_CASE(O_DIRECTORY); #ifdef O_DSYNC OPEN_FLAG_CASE(O_DSYNC); #endif OPEN_FLAG_CASE(O_EXCL); #ifdef O_LARGEFILE OPEN_FLAG_CASE(O_LARGEFILE); #endif #ifdef O_NOATIME OPEN_FLAG_CASE(O_NOATIME); #endif OPEN_FLAG_CASE(O_NOCTTY); OPEN_FLAG_CASE(O_NOFOLLOW); OPEN_FLAG_CASE(O_NONBLOCK); #ifdef O_PATH OPEN_FLAG_CASE(O_PATH); #endif OPEN_FLAG_CASE(O_SYNC); #ifdef O_TMPFILE OPEN_FLAG_CASE(O_TMPFILE); #endif OPEN_FLAG_CASE(O_TRUNC); } return NULL; } #undef OPEN_FLAG_CASE #define FUSE_INIT_FLAG_CASE(X) case FUSE_##X: return #X static const char* fuse_flag_to_str(const uint32_t offset_) { switch(1 << offset_) { FUSE_INIT_FLAG_CASE(ASYNC_READ); FUSE_INIT_FLAG_CASE(POSIX_LOCKS); FUSE_INIT_FLAG_CASE(FILE_OPS); FUSE_INIT_FLAG_CASE(ATOMIC_O_TRUNC); FUSE_INIT_FLAG_CASE(EXPORT_SUPPORT); FUSE_INIT_FLAG_CASE(BIG_WRITES); FUSE_INIT_FLAG_CASE(DONT_MASK); FUSE_INIT_FLAG_CASE(SPLICE_WRITE); FUSE_INIT_FLAG_CASE(SPLICE_MOVE); FUSE_INIT_FLAG_CASE(SPLICE_READ); FUSE_INIT_FLAG_CASE(FLOCK_LOCKS); FUSE_INIT_FLAG_CASE(HAS_IOCTL_DIR); FUSE_INIT_FLAG_CASE(AUTO_INVAL_DATA); FUSE_INIT_FLAG_CASE(DO_READDIRPLUS); FUSE_INIT_FLAG_CASE(READDIRPLUS_AUTO); FUSE_INIT_FLAG_CASE(ASYNC_DIO); FUSE_INIT_FLAG_CASE(WRITEBACK_CACHE); FUSE_INIT_FLAG_CASE(NO_OPEN_SUPPORT); FUSE_INIT_FLAG_CASE(PARALLEL_DIROPS); FUSE_INIT_FLAG_CASE(HANDLE_KILLPRIV); FUSE_INIT_FLAG_CASE(POSIX_ACL); FUSE_INIT_FLAG_CASE(ABORT_ERROR); FUSE_INIT_FLAG_CASE(MAX_PAGES); FUSE_INIT_FLAG_CASE(CACHE_SYMLINKS); FUSE_INIT_FLAG_CASE(NO_OPENDIR_SUPPORT); FUSE_INIT_FLAG_CASE(EXPLICIT_INVAL_DATA); FUSE_INIT_FLAG_CASE(MAP_ALIGNMENT); } return NULL; } #undef FUSE_INIT_FLAG_CASE static void debug_open_flags(const uint32_t flags_) { fprintf(stderr,"%s,",open_accmode_to_str(flags_)); for(int i = 0; i < (sizeof(flags_) * 8); i++) { const char *str; if(!(flags_ & (1 << i))) continue; str = open_flag_to_str(i); if(str == NULL) continue; fprintf(stderr,"%s,",str); } } #define FOPEN_FLAG_CASE(X) case FOPEN_##X: return #X static const char* fuse_fopen_flag_to_str(const uint32_t offset_) { switch(1 << offset_) { FOPEN_FLAG_CASE(DIRECT_IO); FOPEN_FLAG_CASE(KEEP_CACHE); FOPEN_FLAG_CASE(NONSEEKABLE); FOPEN_FLAG_CASE(CACHE_DIR); FOPEN_FLAG_CASE(STREAM); } return NULL; } #undef FOPEN_FLAG_CASE void debug_fuse_open_out(const struct fuse_open_out *arg_) { fprintf(stderr, "fuse_open_out:" " fh=0x%"PRIx64";" " open_flags=0x%X (", arg_->fh, arg_->open_flags); for(int i = 0; i < (sizeof(arg_->open_flags) * 8); i++) { const char *str; if(!(arg_->open_flags & (1 << i))) continue; str = fuse_fopen_flag_to_str(i); if(str == NULL) continue; fprintf(stderr,"%s,",str); } fprintf(stderr,");\n"); } static void debug_fuse_lookup(const void *arg_) { const char *name = arg_; fprintf(g_OUTPUT, "fuse_lookup:" " name=%s;" "\n" , name); } static void debug_fuse_getattr_in(const void *arg_) { const struct fuse_getattr_in *arg = arg_; fprintf(g_OUTPUT, "fuse_getattr_in:" " getattr_flags=0x%08X;" " fh=0x%"PRIx64";\n", arg->getattr_flags, arg->fh); } static void debug_fuse_setattr_in(const void *arg_) { const struct fuse_setattr_in *arg = arg_; fprintf(g_OUTPUT, "fuse_setattr_in:" " valid=%u;" " fh=0x%"PRIx64";" " size=%zu;" " lock_owner=%zu;" " atime=%zu;" " atimensec=%u;" " mtime=%zu;" " mtimensec=%u;" " ctime=%zu;" " ctimensec=%u;" " mode=%o;" " uid=%u;" " gid=%u;" "\n" , arg->valid, arg->fh, arg->size, arg->lock_owner, arg->atime, arg->atimensec, arg->mtime, arg->mtimensec, arg->ctime, arg->ctimensec, arg->mode, arg->uid, arg->gid); } static void debug_fuse_access_in(const void *arg_) { const struct fuse_access_in *arg = arg_; fprintf(g_OUTPUT, "fuse_access_in:" " mask=0x%08X;" "\n" , arg->mask); } static void debug_fuse_mknod_in(const void *arg_) { const struct fuse_mknod_in *arg = arg_; fprintf(g_OUTPUT, "fuse_mknod_in:" " mode=%o;" " rdev=0x%08X;" " umask=%o;" "\n" , arg->mode, arg->rdev, arg->umask); } static void debug_fuse_mkdir_in(const void *arg_) { const struct fuse_mkdir_in *arg = arg_; fprintf(g_OUTPUT, "fuse_mkdir_in:" " mode=%o;" " umask=%o;" " name=%s;" "\n" , arg->mode, arg->umask, PARAM(arg)); } static void debug_fuse_unlink(const void *arg_) { const char *name = arg_; fprintf(g_OUTPUT, "fuse_unlink:" " name=%s;" "\n" , name); } static void debug_fuse_rmdir(const void *arg_) { const char *name = arg_; fprintf(g_OUTPUT, "fuse_mkdir:" " name=%s;" "\n" , name); } static void debug_fuse_symlink(const void *arg_) { const char *name; const char *linkname; name = arg_; linkname = (name + (strlen(name) + 1)); fprintf(g_OUTPUT, "fuse_mkdir:" " linkname=%s;" " name=%s;" "\n" , linkname, name); } static void debug_fuse_rename_in(const void *arg_) { const char *oldname; const char *newname; const struct fuse_rename_in *arg = arg_; oldname = PARAM(arg); newname = (oldname + strlen(oldname) + 1); fprintf(g_OUTPUT, "fuse_rename_in:" " oldname=%s;" " newdir=%zu;" " newname=%s;" "\n" , oldname, arg->newdir, newname); } static void debug_fuse_link_in(const void *arg_) { const char *name; const struct fuse_link_in *arg = arg_; name = PARAM(arg); fprintf(g_OUTPUT, "fuse_link_in:" " oldnodeid=%zu;" " name=%s;" "\n" , arg->oldnodeid, name); } static void debug_fuse_create_in(const void *arg_) { const char *name; const struct fuse_create_in *arg = arg_; name = PARAM(arg); fprintf(g_OUTPUT, "fuse_create_in:" " mode=%o;" " umask=%o;" " name=%s;" " flags=0x%X (", arg->mode, arg->umask, name, arg->flags); debug_open_flags(arg->flags); fprintf(g_OUTPUT,");\n"); } static void debug_fuse_open_in(const void *arg_) { const struct fuse_open_in *arg = arg_; fprintf(g_OUTPUT, "fuse_open_in:" " flags=0x%08X (", arg->flags); debug_open_flags(arg->flags); fprintf(g_OUTPUT,");\n"); } static void debug_fuse_read_in(const void *arg_) { const struct fuse_read_in *arg = arg_; fprintf(g_OUTPUT, "fuse_read_in:" " fh=0x%"PRIx64";" " offset=%zu;" " size=%u;" " read_flags=%X;" " lock_owner=0x%"PRIx64";" " flags=0x%X (" , arg->fh, arg->offset, arg->size, arg->read_flags, arg->lock_owner, arg->flags); debug_open_flags(arg->flags); fprintf(g_OUTPUT,");\n"); } static void debug_fuse_write_in(const void *arg_) { const struct fuse_write_in *arg = arg_; fprintf(g_OUTPUT, "fuse_write_in:" " fh=0x%"PRIx64";" " offset=%zu;" " size=%u;" " lock_owner=0x%"PRIx64";" " flags=0x%X (" , arg->fh, arg->offset, arg->size, arg->lock_owner, arg->flags); debug_open_flags(arg->flags); fprintf(g_OUTPUT, "); write_flags=0x%X (", arg->write_flags); for(int i = 0; i < (sizeof(arg->write_flags) * 8); i++) { const char *str; if(!(arg->write_flags & (1 << i))) continue; str = fuse_write_flag_to_str(i); if(str == NULL) continue; fprintf(g_OUTPUT,"%s,",str); } fprintf(g_OUTPUT,");\n"); } static void debug_fuse_flush_in(const void *arg_) { const struct fuse_flush_in *arg = arg_; fprintf(g_OUTPUT, "fuse_flush_in:" " fh=0x%"PRIx64";" " lock_owner=0x%"PRIx64";" "\n" , arg->fh, arg->lock_owner); } static void debug_fuse_release_in(const void *arg_) { const struct fuse_release_in *arg = arg_; fprintf(g_OUTPUT, "fuse_release_in:" " fh=0x%"PRIx64";" " release_flags=0x%X;" " lock_owner=0x%"PRIx64";" " flags=0x%X (" , arg->fh, arg->release_flags, arg->lock_owner, arg->flags); debug_open_flags(arg->flags); fprintf(g_OUTPUT,");\n"); } static void debug_fuse_fsync_in(const void *arg_) { const struct fuse_fsync_in *arg = arg_; fprintf(g_OUTPUT, "fuse_fsync_in:" " fh=0x%"PRIx64";" " fsync_flags=0x%X;" "\n" , arg->fh, arg->fsync_flags); } static void debug_fuse_setxattr_in(const void *arg_) { const char *name; const char *value; const struct fuse_setxattr_in *arg = arg_; name = PARAM(arg); value = (name + strlen(name) + 1); fprintf(g_OUTPUT, "fuse_setxattr_in:" " size=%u;" " flags=0x%X;" " name=%s;" " value=%s;" "\n" , arg->size, arg->flags, name, value); } static void debug_fuse_getxattr_in(const void *arg_) { const char *name; const struct fuse_getxattr_in *arg = arg_; name = PARAM(arg); fprintf(g_OUTPUT, "fuse_getxattr_in:" " size=%u;" " name=%s;" "\n" , arg->size, name); } static void debug_fuse_listxattr(const void *arg_) { const struct fuse_getxattr_in *arg = arg_; fprintf(g_OUTPUT, "fuse_listxattr:" " size=%u;" "\n" , arg->size); } static void debug_fuse_removexattr(const void *arg_) { const char *name = arg_; fprintf(g_OUTPUT, "fuse_removexattr:" " name=%s;" "\n" , name); } static void debug_fuse_fallocate_in(const void *arg_) { const struct fuse_fallocate_in *arg = arg_; fprintf(g_OUTPUT, "fuse_fallocate_in:" " fh=0x%"PRIx64";" " offset=%zu;" " length=%zu;" " mode=%o;" "\n" , arg->fh, arg->offset, arg->length, arg->mode); } void debug_fuse_init_in(const struct fuse_init_in *arg_) { fprintf(g_OUTPUT, "FUSE_INIT_IN: " " major=%u;" " minor=%u;" " max_readahead=%u;" " flags=0x%08X (", arg_->major, arg_->minor, arg_->max_readahead, arg_->flags); for(uint64_t i = 0; i < (sizeof(arg_->flags)*8); i++) { const char *str; if(!(arg_->flags & (1ULL << i))) continue; str = fuse_flag_to_str(i); if(str == NULL) continue; fprintf(g_OUTPUT,"%s,",str); } fprintf(g_OUTPUT,")\n"); } void debug_fuse_init_out(const uint64_t unique_, const struct fuse_init_out *arg_, const uint64_t argsize_) { const struct fuse_init_out *arg = arg_; fprintf(g_OUTPUT, /* "unique=0x%016"PRIx64";" */ /* " opcode=RESPONSE;" */ /* " error=0 (Success);" */ /* " len=%"PRIu64"; || " */ "FUSE_INIT_OUT:" " major=%u;" " minor=%u;" " max_readahead=%u;" " flags=0x%08X (" , /* unique_, */ /* sizeof(struct fuse_out_header) + argsize_, */ arg->major, arg->minor, arg->max_readahead, arg->flags); for(uint64_t i = 0; i < (sizeof(arg->flags)*8); i++) { const char *str; if(!(arg->flags & (1ULL << i))) continue; str = fuse_flag_to_str(i); if(str == NULL) continue; fprintf(g_OUTPUT,"%s,",str); } fprintf(g_OUTPUT, "); max_background=%u;" " congestion_threshold=%u;" " max_write=%u;" " time_gran=%u;" " max_pages=%u;" " map_alignment=%u;" "\n", arg->max_background, arg->congestion_threshold, arg->max_write, arg->time_gran, arg->max_pages, arg->map_alignment); } static void debug_fuse_attr(const struct fuse_attr *attr_) { fprintf(g_OUTPUT, "attr:" " ino=0x%016"PRIx64";" " size=%"PRIu64";" " blocks=%"PRIu64";" " atime=%"PRIu64";" " atimensec=%u;" " mtime=%"PRIu64";" " mtimensec=%u;" " ctime=%"PRIu64";" " ctimesec=%u;" " mode=%o;" " nlink=%u;" " uid=%u;" " gid=%u;" " rdev=%u;" " blksize=%u;" , attr_->ino, attr_->size, attr_->blocks, attr_->atime, attr_->atimensec, attr_->mtime, attr_->mtimensec, attr_->ctime, attr_->ctimensec, attr_->mode, attr_->nlink, attr_->uid, attr_->gid, attr_->rdev, attr_->blksize); } static void debug_fuse_entry(const struct fuse_entry_out *entry_) { fprintf(g_OUTPUT, " fuse_entry_out:" " nodeid=0x%016"PRIx64";" " generation=0x%016"PRIx64";" " entry_valid=%"PRIu64";" " entry_valid_nsec=%u;" " attr_valid=%"PRIu64";" " attr_valid_nsec=%u;" " ", entry_->nodeid, entry_->generation, entry_->entry_valid, entry_->entry_valid_nsec, entry_->attr_valid, entry_->attr_valid_nsec); debug_fuse_attr(&entry_->attr); } void debug_fuse_entry_out(const uint64_t unique_, const struct fuse_entry_out *arg_, const uint64_t argsize_) { fprintf(g_OUTPUT, "unique=0x%016"PRIx64";" " opcode=RESPONSE;" " error=0 (Success);" " len=%"PRIu64"; || " , unique_, sizeof(struct fuse_out_header) + argsize_); debug_fuse_entry(arg_); } void debug_fuse_attr_out(const uint64_t unique_, const struct fuse_attr_out *arg_, const uint64_t argsize_) { fprintf(g_OUTPUT, "unique=0x%016"PRIx64";" " opcode=RESPONSE;" " error=0 (Success);" " len=%"PRIu64"; || " "fuse_attr_out:" " attr_valid=%zu;" " attr_valid_nsec=%u;" " ino=%zu;" " size=%zu;" " blocks=%zu;" " atime=%zu;" " atimensec=%u;" " mtime=%zu;" " mtimensec=%u;" " ctime=%zu;" " ctimensec=%u;" " mode=%o;" " nlink=%u;" " uid=%u;" " gid=%u;" " rdev=%u;" " blksize=%u;" "\n", unique_, sizeof(struct fuse_out_header) + argsize_, arg_->attr_valid, arg_->attr_valid_nsec, arg_->attr.ino, arg_->attr.size, arg_->attr.blocks, arg_->attr.atime, arg_->attr.atimensec, arg_->attr.mtime, arg_->attr.mtimensec, arg_->attr.ctime, arg_->attr.ctimensec, arg_->attr.mode, arg_->attr.nlink, arg_->attr.uid, arg_->attr.gid, arg_->attr.rdev, arg_->attr.blksize); } static void debug_fuse_interrupt_in(const void *arg_) { const struct fuse_interrupt_in *arg = arg_; fprintf(g_OUTPUT, "fuse_interrupt_in:" " unique=0x%016"PRIx64";" "\n" , arg->unique); } static const char* opcode_name(enum fuse_opcode op_) { static const char *names[] = { [FUSE_LOOKUP] = "LOOKUP", [FUSE_FORGET] = "FORGET", [FUSE_GETATTR] = "GETATTR", [FUSE_SETATTR] = "SETATTR", [FUSE_READLINK] = "READLINK", [FUSE_SYMLINK] = "SYMLINK", [FUSE_MKNOD] = "MKNOD", [FUSE_MKDIR] = "MKDIR", [FUSE_UNLINK] = "UNLINK", [FUSE_RMDIR] = "RMDIR", [FUSE_RENAME] = "RENAME", [FUSE_LINK] = "LINK", [FUSE_OPEN] = "OPEN", [FUSE_READ] = "READ", [FUSE_WRITE] = "WRITE", [FUSE_STATFS] = "STATFS", [FUSE_RELEASE] = "RELEASE", [FUSE_FSYNC] = "FSYNC", [FUSE_SETXATTR] = "SETXATTR", [FUSE_GETXATTR] = "GETXATTR", [FUSE_LISTXATTR] = "LISTXATTR", [FUSE_REMOVEXATTR] = "REMOVEXATTR", [FUSE_FLUSH] = "FLUSH", [FUSE_INIT] = "INIT", [FUSE_OPENDIR] = "OPENDIR", [FUSE_READDIR] = "READDIR", [FUSE_RELEASEDIR] = "RELEASEDIR", [FUSE_FSYNCDIR] = "FSYNCDIR", [FUSE_GETLK] = "GETLK", [FUSE_SETLK] = "SETLK", [FUSE_SETLKW] = "SETLKW", [FUSE_ACCESS] = "ACCESS", [FUSE_CREATE] = "CREATE", [FUSE_INTERRUPT] = "INTERRUPT", [FUSE_BMAP] = "BMAP", [FUSE_DESTROY] = "DESTROY", [FUSE_IOCTL] = "IOCTL", [FUSE_POLL] = "POLL", [FUSE_NOTIFY_REPLY] = "NOTIFY_REPLY", [FUSE_BATCH_FORGET] = "BATCH_FORGET", [FUSE_FALLOCATE] = "FALLOCATE", [FUSE_READDIRPLUS] = "READDIRPLUS", [FUSE_RENAME2] = "RENAME2", [FUSE_LSEEK] = "LSEEK", [FUSE_COPY_FILE_RANGE] = "COPY_FILE_RANGE", [FUSE_SETUPMAPPING] = "SETUPMAPPING", [FUSE_REMOVEMAPPING] = "REMOVEMAPPING" }; if(op_ >= (sizeof(names) / sizeof(names[0]))) return "::UNKNOWN::"; return names[op_]; } void debug_fuse_in_header(const struct fuse_in_header *hdr_) { const void *arg = &hdr_[1]; fprintf(stderr, "unique=0x%016"PRIx64";" " opcode=%s (%u);" " nodeid=%zu;" " uid=%u;" " gid=%u;" " pid=%u; || ", hdr_->unique, opcode_name(hdr_->opcode), hdr_->opcode, hdr_->nodeid, hdr_->uid, hdr_->gid, hdr_->pid); switch(hdr_->opcode) { case FUSE_LOOKUP: debug_fuse_lookup(arg); break; case FUSE_INIT: debug_fuse_init_in(arg); break; case FUSE_GETATTR: debug_fuse_getattr_in(arg); break; case FUSE_SETATTR: debug_fuse_setattr_in(arg); break; case FUSE_ACCESS: debug_fuse_access_in(arg); break; case FUSE_MKNOD: debug_fuse_mknod_in(arg); break; case FUSE_MKDIR: debug_fuse_mkdir_in(arg); break; case FUSE_UNLINK: debug_fuse_unlink(arg); break; case FUSE_RMDIR: debug_fuse_rmdir(arg); break; case FUSE_SYMLINK: debug_fuse_symlink(arg); break; case FUSE_RENAME: debug_fuse_rename_in(arg); break; case FUSE_LINK: debug_fuse_link_in(arg); break; case FUSE_CREATE: debug_fuse_create_in(arg); break; case FUSE_OPEN: debug_fuse_open_in(arg); break; case FUSE_OPENDIR: debug_fuse_open_in(arg); break; case FUSE_READ: debug_fuse_read_in(arg); break; case FUSE_READDIR: debug_fuse_read_in(arg); break; case FUSE_READDIRPLUS: debug_fuse_read_in(arg); break; case FUSE_WRITE: debug_fuse_write_in(arg); break; case FUSE_RELEASE: debug_fuse_release_in(arg); break; case FUSE_RELEASEDIR: debug_fuse_release_in(arg); break; case FUSE_FSYNCDIR: debug_fuse_fsync_in(arg); break; case FUSE_GETXATTR: debug_fuse_getxattr_in(arg); break; case FUSE_LISTXATTR: debug_fuse_listxattr(arg); break; case FUSE_SETXATTR: debug_fuse_setxattr_in(arg); break; case FUSE_REMOVEXATTR: debug_fuse_removexattr(arg); break; case FUSE_FALLOCATE: debug_fuse_fallocate_in(arg); break; case FUSE_FLUSH: debug_fuse_flush_in(arg); break; case FUSE_INTERRUPT: debug_fuse_interrupt_in(arg); break; default: fprintf(g_OUTPUT,"FIXME\n"); break; } } void debug_fuse_out_header(const struct fuse_out_header *hdr_) { fprintf(g_OUTPUT, "unique=0x%016"PRIx64";" " opcode=RESPONSE;" " error=%d (%s);" " len=%"PRIu64";" , hdr_->unique, hdr_->error, strerror(-hdr_->error), sizeof(struct fuse_out_header)); } void debug_fuse_entry_open_out(const uint64_t unique_, const struct fuse_entry_out *entry_, const struct fuse_open_out *open_) { fprintf(g_OUTPUT, "unique=0x%016"PRIx64";" " opcode=RESPONSE;" " error=0 (Success);" " len=%"PRIu64"; || " , unique_, sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)); debug_fuse_entry(entry_); } void debug_fuse_readlink(const uint64_t unique_, const char *linkname_) { fprintf(g_OUTPUT, "unique=0x%016"PRIx64";" " opcode=RESPONSE;" " error=0 (Success);" " len=%"PRIu64"; || " "readlink: linkname=%s" "\n" , unique_, (sizeof(struct fuse_out_header) + strlen(linkname_)), linkname_); } void debug_fuse_write_out(const uint64_t unique_, const struct fuse_write_out *arg_) { fprintf(g_OUTPUT, "unique=0x%016"PRIx64";" " opcode=RESPONSE;" " error=0 (Success);" " len=%"PRIu64"; || " " fuse_write_out:" " size=%u" "\n" , unique_, sizeof(struct fuse_write_out), arg_->size); } void debug_fuse_statfs_out(const uint64_t unique_, const struct fuse_statfs_out *arg_) { fprintf(g_OUTPUT, "unique=0x%016"PRIx64";" " opcode=RESPONSE;" " error=0 (Success);" " len=%"PRIu64"; || " " fuse_statfs_out:" " blocks=%"PRIu64";" " bfree=%"PRIu64";" " bavail=%"PRIu64";" " files=%"PRIu64";" " ffree=%"PRIu64";" " bsize=%u;" " namelen=%u;" " frsize=%u;" "\n" , unique_, sizeof(struct fuse_statfs_out), arg_->st.blocks, arg_->st.bfree, arg_->st.bavail, arg_->st.files, arg_->st.ffree, arg_->st.bsize, arg_->st.namelen, arg_->st.frsize); } void debug_fuse_getxattr_out(const uint64_t unique_, const struct fuse_getxattr_out *arg_) { fprintf(g_OUTPUT, "unique=0x%016"PRIx64";" " opcode=RESPONSE;" " error=0 (Success);" " len=%"PRIu64"; || " " fuse_getxattr_out:" " size=%u;" "\n" , unique_, sizeof(struct fuse_out_header) + sizeof(struct fuse_getxattr_out), arg_->size); } void debug_fuse_lk_out(const uint64_t unique_, const struct fuse_lk_out *arg_) { fprintf(g_OUTPUT, "unique=0x%016"PRIx64";" " opcode=RESPONSE;" " error=0 (Success);" " len=%"PRIu64"; || " " fuse_file_lock:" " start=%"PRIu64";" " end=%"PRIu64";" " type=%u;" " pid=%u;" "\n" , unique_, sizeof(struct fuse_out_header) + sizeof(struct fuse_lk_out), arg_->lk.start, arg_->lk.end, arg_->lk.type, arg_->lk.pid); } void debug_fuse_bmap_out(const uint64_t unique_, const struct fuse_bmap_out *arg_) { fprintf(g_OUTPUT, "unique=0x%016"PRIx64";" " opcode=RESPONSE;" " error=0 (Success);" " len=%"PRIu64"; || " " fuse_bmap_out:" " block=%"PRIu64";" "\n" , unique_, sizeof(struct fuse_out_header) + sizeof(struct fuse_bmap_out), arg_->block); } mergerfs-2.40.2/libfuse/lib/debug.h000066400000000000000000000050721457016576200170700ustar00rootroot00000000000000/* ISC License Copyright (c) 2021, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #pragma once #include "fuse_kernel.h" void debug_fuse_open_out(const uint64_t unique, const struct fuse_open_out *arg, const uint64_t argsize); void debug_fuse_init_in(const struct fuse_init_in *arg); void debug_fuse_init_out(const uint64_t unique, const struct fuse_init_out *arg, const uint64_t argsize); void debug_fuse_entry_out(const uint64_t unique, const struct fuse_entry_out *arg, const uint64_t argsize); void debug_fuse_attr_out(const uint64_t unique, const struct fuse_attr_out *arg, const uint64_t argsize); void debug_fuse_entry_open_out(const uint64_t unique, const struct fuse_entry_out *earg, const struct fuse_open_out *oarg); void debug_fuse_readlink(const uint64_t unique, const char *linkname); void debug_fuse_write_out(const uint64_t unique, const struct fuse_write_out *arg); void debug_fuse_statfs_out(const uint64_t unique, const struct fuse_statfs_out *arg); void debug_fuse_getxattr_out(const uint64_t unique, const struct fuse_getxattr_out *arg); void debug_fuse_lk_out(const uint64_t unique, const struct fuse_lk_out *arg); void debug_fuse_bmap_out(const uint64_t unique, const struct fuse_bmap_out *arg); void debug_fuse_in_header(const struct fuse_in_header *hdr); mergerfs-2.40.2/libfuse/lib/fmp.h000066400000000000000000000147561457016576200165750ustar00rootroot00000000000000/* ISC License Copyright (c) 2021, Antonio SJ Musumeci Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #pragma once #include "kvec.h" #include #include #include #include #include #include #define ROUND_UP(N,S) ((((N) + (S) - 1) / (S)) * (S)) typedef kvec_t(void*) slab_kvec_t; typedef struct mem_stack_t mem_stack_t; struct mem_stack_t { mem_stack_t *next; }; typedef struct fmp_t fmp_t; struct fmp_t { mem_stack_t *objs; slab_kvec_t slabs; uint64_t avail_objs; uint64_t obj_size; uint64_t page_size; uint64_t slab_size; }; static inline uint64_t fmp_page_size() { return sysconf(_SC_PAGESIZE); } static inline void fmp_init(fmp_t *fmp_, const uint64_t obj_size_, const uint64_t page_multiple_) { kv_init(fmp_->slabs); fmp_->objs = NULL; fmp_->avail_objs = 0; fmp_->obj_size = ROUND_UP(obj_size_,sizeof(void*)); fmp_->page_size = fmp_page_size(); fmp_->slab_size = (fmp_->page_size * page_multiple_); } static inline uint64_t fmp_slab_count(fmp_t *fmp_) { return kv_size(fmp_->slabs); } static inline void* fmp_slab_alloc_posix_memalign(fmp_t *fmp_) { int rv; void *mem; const size_t alignment = fmp_->page_size; const size_t size = fmp_->slab_size; rv = posix_memalign(&mem,alignment,size); if(rv != 0) return NULL; return NULL; } static inline void* fmp_slab_alloc_mmap(fmp_t *fmp_) { void *mem; void *address = NULL; const size_t length = fmp_->slab_size; const int protect = PROT_READ|PROT_WRITE; const int flags = MAP_PRIVATE|MAP_ANONYMOUS; const int filedes = -1; const off_t offset = 0; mem = mmap(address,length,protect,flags,filedes,offset); if(mem == MAP_FAILED) return NULL; return mem; } static inline void fmp_slab_free_posix_memalign(fmp_t* fmp_, void *mem_) { (void)fmp_; free(mem_); } static inline void fmp_slab_free_mmap(fmp_t* fmp_, void *mem_) { void *addr = mem_; size_t length = fmp_->slab_size; (void)munmap(addr,length); } static inline int fmp_slab_alloc(fmp_t *fmp_) { char *i; void *mem; mem = fmp_slab_alloc_mmap(fmp_); if(mem == NULL) return -ENOMEM; kv_push(void*,fmp_->slabs,mem); i = ((char*)mem + fmp_->slab_size - fmp_->obj_size); while(i >= (char*)mem) { mem_stack_t *obj = (mem_stack_t*)i; obj->next = fmp_->objs; fmp_->objs = obj; fmp_->avail_objs++; i -= fmp_->obj_size; } return 0; } static inline void* fmp_alloc(fmp_t *fmp_) { void *rv; if(fmp_->objs == NULL) fmp_slab_alloc(fmp_); if(fmp_->objs == NULL) return NULL; rv = fmp_->objs; fmp_->objs = fmp_->objs->next; fmp_->avail_objs--; return rv; } static inline void* fmp_calloc(fmp_t *fmp_) { void *obj; obj = fmp_alloc(fmp_); if(obj == NULL) return NULL; memset(obj,0,fmp_->obj_size); return obj; } static inline void fmp_free(fmp_t *fmp_, void *obj_) { mem_stack_t *obj = (mem_stack_t*)obj_; obj->next = fmp_->objs; fmp_->objs = obj; fmp_->avail_objs++; } static inline void fmp_clear(fmp_t *fmp_) { while(kv_size(fmp_->slabs)) { void *slab = kv_pop(fmp_->slabs); fmp_slab_free_mmap(fmp_,slab); } fmp_->objs = NULL; fmp_->avail_objs = 0; } static inline void fmp_destroy(fmp_t *fmp_) { fmp_clear(fmp_); kv_destroy(fmp_->slabs); } static inline uint64_t fmp_avail_objs(fmp_t *fmp_) { return fmp_->avail_objs; } static inline uint64_t fmp_objs_per_slab(fmp_t *fmp_) { return (fmp_->slab_size / fmp_->obj_size); } static inline uint64_t fmp_objs_in_slab(fmp_t *fmp_, void *slab_) { char *slab; uint64_t objs_per_slab; uint64_t objs_in_slab; slab = (char*)slab_; objs_in_slab = 0; objs_per_slab = fmp_objs_per_slab(fmp_); for(mem_stack_t *stack = fmp_->objs; stack != NULL; stack = stack->next) { char *obj = (char*)stack; if((obj >= slab) && (obj < (slab + fmp_->slab_size))) objs_in_slab++; if(objs_in_slab >= objs_per_slab) break; } return objs_in_slab; } static inline void fmp_remove_objs_in_slab(fmp_t *fmp_, void *slab_) { char *slab; uint64_t objs_per_slab; uint64_t objs_in_slab; mem_stack_t **p; p = &fmp_->objs; slab = (char*)slab_; objs_in_slab = 0; objs_per_slab = fmp_objs_per_slab(fmp_); while((*p) != NULL) { char *obj = (char*)*p; if((obj >= slab) && (obj < (slab + fmp_->slab_size))) { objs_in_slab++; *p = (*p)->next; fmp_->avail_objs--; if(objs_in_slab >= objs_per_slab) break; continue; } p = &(*p)->next; } } static inline int fmp_gc_slab(fmp_t *fmp_, uint64_t slab_idx_) { char *slab; uint64_t objs_in_slab; uint64_t objs_per_slab; slab_idx_ = (slab_idx_ % kv_size(fmp_->slabs)); slab = kv_A(fmp_->slabs,slab_idx_); objs_per_slab = fmp_objs_per_slab(fmp_); objs_in_slab = fmp_objs_in_slab(fmp_,slab); if(objs_in_slab != objs_per_slab) return 0; fmp_remove_objs_in_slab(fmp_,slab); kv_delete(fmp_->slabs,slab_idx_); fmp_slab_free_mmap(fmp_,slab); return 1; } static inline int fmp_gc(fmp_t *fmp_) { uint64_t slab_idx; slab_idx = rand(); return fmp_gc_slab(fmp_,slab_idx); } static inline double fmp_slab_usage_ratio(fmp_t *fmp_) { double avail_objs; double objs_per_slab; double nums_of_slabs; avail_objs = fmp_->avail_objs; objs_per_slab = fmp_objs_per_slab(fmp_); nums_of_slabs = kv_size(fmp_->slabs); return (avail_objs / (objs_per_slab * nums_of_slabs)); } static inline uint64_t fmp_total_allocated_memory(fmp_t *fmp_) { return (fmp_->slab_size * kv_size(fmp_->slabs)); } mergerfs-2.40.2/libfuse/lib/fmt/000077500000000000000000000000001457016576200164135ustar00rootroot00000000000000mergerfs-2.40.2/libfuse/lib/fmt/args.h000066400000000000000000000163741457016576200175330ustar00rootroot00000000000000// Formatting library for C++ - dynamic format arguments // // Copyright (c) 2012 - present, Victor Zverovich // All rights reserved. // // For the license information refer to format.h. #ifndef FMT_ARGS_H_ #define FMT_ARGS_H_ #include // std::reference_wrapper #include // std::unique_ptr #include #include "core.h" FMT_BEGIN_NAMESPACE namespace detail { template struct is_reference_wrapper : std::false_type {}; template struct is_reference_wrapper> : std::true_type {}; template const T& unwrap(const T& v) { return v; } template const T& unwrap(const std::reference_wrapper& v) { return static_cast(v); } class dynamic_arg_list { // Workaround for clang's -Wweak-vtables. Unlike for regular classes, for // templates it doesn't complain about inability to deduce single translation // unit for placing vtable. So storage_node_base is made a fake template. template struct node { virtual ~node() = default; std::unique_ptr> next; }; template struct typed_node : node<> { T value; template FMT_CONSTEXPR typed_node(const Arg& arg) : value(arg) {} template FMT_CONSTEXPR typed_node(const basic_string_view& arg) : value(arg.data(), arg.size()) {} }; std::unique_ptr> head_; public: template const T& push(const Arg& arg) { auto new_node = std::unique_ptr>(new typed_node(arg)); auto& value = new_node->value; new_node->next = std::move(head_); head_ = std::move(new_node); return value; } }; } // namespace detail /** \rst A dynamic version of `fmt::format_arg_store`. It's equipped with a storage to potentially temporary objects which lifetimes could be shorter than the format arguments object. It can be implicitly converted into `~fmt::basic_format_args` for passing into type-erased formatting functions such as `~fmt::vformat`. \endrst */ template class dynamic_format_arg_store #if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 // Workaround a GCC template argument substitution bug. : public basic_format_args #endif { private: using char_type = typename Context::char_type; template struct need_copy { static constexpr detail::type mapped_type = detail::mapped_type_constant::value; enum { value = !(detail::is_reference_wrapper::value || std::is_same>::value || std::is_same>::value || (mapped_type != detail::type::cstring_type && mapped_type != detail::type::string_type && mapped_type != detail::type::custom_type)) }; }; template using stored_type = conditional_t< std::is_convertible>::value && !detail::is_reference_wrapper::value, std::basic_string, T>; // Storage of basic_format_arg must be contiguous. std::vector> data_; std::vector> named_info_; // Storage of arguments not fitting into basic_format_arg must grow // without relocation because items in data_ refer to it. detail::dynamic_arg_list dynamic_args_; friend class basic_format_args; unsigned long long get_types() const { return detail::is_unpacked_bit | data_.size() | (named_info_.empty() ? 0ULL : static_cast(detail::has_named_args_bit)); } const basic_format_arg* data() const { return named_info_.empty() ? data_.data() : data_.data() + 1; } template void emplace_arg(const T& arg) { data_.emplace_back(detail::make_arg(arg)); } template void emplace_arg(const detail::named_arg& arg) { if (named_info_.empty()) { constexpr const detail::named_arg_info* zero_ptr{nullptr}; data_.insert(data_.begin(), {zero_ptr, 0}); } data_.emplace_back(detail::make_arg(detail::unwrap(arg.value))); auto pop_one = [](std::vector>* data) { data->pop_back(); }; std::unique_ptr>, decltype(pop_one)> guard{&data_, pop_one}; named_info_.push_back({arg.name, static_cast(data_.size() - 2u)}); data_[0].value_.named_args = {named_info_.data(), named_info_.size()}; guard.release(); } public: constexpr dynamic_format_arg_store() = default; /** \rst Adds an argument into the dynamic store for later passing to a formatting function. Note that custom types and string types (but not string views) are copied into the store dynamically allocating memory if necessary. **Example**:: fmt::dynamic_format_arg_store store; store.push_back(42); store.push_back("abc"); store.push_back(1.5f); std::string result = fmt::vformat("{} and {} and {}", store); \endrst */ template void push_back(const T& arg) { if (detail::const_check(need_copy::value)) emplace_arg(dynamic_args_.push>(arg)); else emplace_arg(detail::unwrap(arg)); } /** \rst Adds a reference to the argument into the dynamic store for later passing to a formatting function. **Example**:: fmt::dynamic_format_arg_store store; char band[] = "Rolling Stones"; store.push_back(std::cref(band)); band[9] = 'c'; // Changing str affects the output. std::string result = fmt::vformat("{}", store); // result == "Rolling Scones" \endrst */ template void push_back(std::reference_wrapper arg) { static_assert( need_copy::value, "objects of built-in types and string views are always copied"); emplace_arg(arg.get()); } /** Adds named argument into the dynamic store for later passing to a formatting function. ``std::reference_wrapper`` is supported to avoid copying of the argument. The name is always copied into the store. */ template void push_back(const detail::named_arg& arg) { const char_type* arg_name = dynamic_args_.push>(arg.name).c_str(); if (detail::const_check(need_copy::value)) { emplace_arg( fmt::arg(arg_name, dynamic_args_.push>(arg.value))); } else { emplace_arg(fmt::arg(arg_name, arg.value)); } } /** Erase all elements from the store */ void clear() { data_.clear(); named_info_.clear(); dynamic_args_ = detail::dynamic_arg_list(); } /** \rst Reserves space to store at least *new_cap* arguments including *new_cap_named* named arguments. \endrst */ void reserve(size_t new_cap, size_t new_cap_named) { FMT_ASSERT(new_cap >= new_cap_named, "Set of arguments includes set of named arguments"); data_.reserve(new_cap); named_info_.reserve(new_cap_named); } }; FMT_END_NAMESPACE #endif // FMT_ARGS_H_ mergerfs-2.40.2/libfuse/lib/fmt/chrono.h000066400000000000000000002047541457016576200200700ustar00rootroot00000000000000// Formatting library for C++ - chrono support // // Copyright (c) 2012 - present, Victor Zverovich // All rights reserved. // // For the license information refer to format.h. #ifndef FMT_CHRONO_H_ #define FMT_CHRONO_H_ #include #include #include // std::isfinite #include // std::memcpy #include #include #include #include #include #include "format.h" FMT_BEGIN_NAMESPACE // Enable tzset. #ifndef FMT_USE_TZSET // UWP doesn't provide _tzset. # if FMT_HAS_INCLUDE("winapifamily.h") # include # endif # if defined(_WIN32) && (!defined(WINAPI_FAMILY) || \ (WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP)) # define FMT_USE_TZSET 1 # else # define FMT_USE_TZSET 0 # endif #endif // Enable safe chrono durations, unless explicitly disabled. #ifndef FMT_SAFE_DURATION_CAST # define FMT_SAFE_DURATION_CAST 1 #endif #if FMT_SAFE_DURATION_CAST // For conversion between std::chrono::durations without undefined // behaviour or erroneous results. // This is a stripped down version of duration_cast, for inclusion in fmt. // See https://github.com/pauldreik/safe_duration_cast // // Copyright Paul Dreik 2019 namespace safe_duration_cast { template ::value && std::numeric_limits::is_signed == std::numeric_limits::is_signed)> FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { ec = 0; using F = std::numeric_limits; using T = std::numeric_limits; static_assert(F::is_integer, "From must be integral"); static_assert(T::is_integer, "To must be integral"); // A and B are both signed, or both unsigned. if (detail::const_check(F::digits <= T::digits)) { // From fits in To without any problem. } else { // From does not always fit in To, resort to a dynamic check. if (from < (T::min)() || from > (T::max)()) { // outside range. ec = 1; return {}; } } return static_cast(from); } /** * converts From to To, without loss. If the dynamic value of from * can't be converted to To without loss, ec is set. */ template ::value && std::numeric_limits::is_signed != std::numeric_limits::is_signed)> FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { ec = 0; using F = std::numeric_limits; using T = std::numeric_limits; static_assert(F::is_integer, "From must be integral"); static_assert(T::is_integer, "To must be integral"); if (detail::const_check(F::is_signed && !T::is_signed)) { // From may be negative, not allowed! if (fmt::detail::is_negative(from)) { ec = 1; return {}; } // From is positive. Can it always fit in To? if (detail::const_check(F::digits > T::digits) && from > static_cast(detail::max_value())) { ec = 1; return {}; } } if (detail::const_check(!F::is_signed && T::is_signed && F::digits >= T::digits) && from > static_cast(detail::max_value())) { ec = 1; return {}; } return static_cast(from); // Lossless conversion. } template ::value)> FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { ec = 0; return from; } // function // clang-format off /** * converts From to To if possible, otherwise ec is set. * * input | output * ---------------------------------|--------------- * NaN | NaN * Inf | Inf * normal, fits in output | converted (possibly lossy) * normal, does not fit in output | ec is set * subnormal | best effort * -Inf | -Inf */ // clang-format on template ::value)> FMT_CONSTEXPR To safe_float_conversion(const From from, int& ec) { ec = 0; using T = std::numeric_limits; static_assert(std::is_floating_point::value, "From must be floating"); static_assert(std::is_floating_point::value, "To must be floating"); // catch the only happy case if (std::isfinite(from)) { if (from >= T::lowest() && from <= (T::max)()) { return static_cast(from); } // not within range. ec = 1; return {}; } // nan and inf will be preserved return static_cast(from); } // function template ::value)> FMT_CONSTEXPR To safe_float_conversion(const From from, int& ec) { ec = 0; static_assert(std::is_floating_point::value, "From must be floating"); return from; } /** * safe duration cast between integral durations */ template ::value), FMT_ENABLE_IF(std::is_integral::value)> To safe_duration_cast(std::chrono::duration from, int& ec) { using From = std::chrono::duration; ec = 0; // the basic idea is that we need to convert from count() in the from type // to count() in the To type, by multiplying it with this: struct Factor : std::ratio_divide {}; static_assert(Factor::num > 0, "num must be positive"); static_assert(Factor::den > 0, "den must be positive"); // the conversion is like this: multiply from.count() with Factor::num // /Factor::den and convert it to To::rep, all this without // overflow/underflow. let's start by finding a suitable type that can hold // both To, From and Factor::num using IntermediateRep = typename std::common_type::type; // safe conversion to IntermediateRep IntermediateRep count = lossless_integral_conversion(from.count(), ec); if (ec) return {}; // multiply with Factor::num without overflow or underflow if (detail::const_check(Factor::num != 1)) { const auto max1 = detail::max_value() / Factor::num; if (count > max1) { ec = 1; return {}; } const auto min1 = (std::numeric_limits::min)() / Factor::num; if (!std::is_unsigned::value && count < min1) { ec = 1; return {}; } count *= Factor::num; } if (detail::const_check(Factor::den != 1)) count /= Factor::den; auto tocount = lossless_integral_conversion(count, ec); return ec ? To() : To(tocount); } /** * safe duration_cast between floating point durations */ template ::value), FMT_ENABLE_IF(std::is_floating_point::value)> To safe_duration_cast(std::chrono::duration from, int& ec) { using From = std::chrono::duration; ec = 0; if (std::isnan(from.count())) { // nan in, gives nan out. easy. return To{std::numeric_limits::quiet_NaN()}; } // maybe we should also check if from is denormal, and decide what to do about // it. // +-inf should be preserved. if (std::isinf(from.count())) { return To{from.count()}; } // the basic idea is that we need to convert from count() in the from type // to count() in the To type, by multiplying it with this: struct Factor : std::ratio_divide {}; static_assert(Factor::num > 0, "num must be positive"); static_assert(Factor::den > 0, "den must be positive"); // the conversion is like this: multiply from.count() with Factor::num // /Factor::den and convert it to To::rep, all this without // overflow/underflow. let's start by finding a suitable type that can hold // both To, From and Factor::num using IntermediateRep = typename std::common_type::type; // force conversion of From::rep -> IntermediateRep to be safe, // even if it will never happen be narrowing in this context. IntermediateRep count = safe_float_conversion(from.count(), ec); if (ec) { return {}; } // multiply with Factor::num without overflow or underflow if (detail::const_check(Factor::num != 1)) { constexpr auto max1 = detail::max_value() / static_cast(Factor::num); if (count > max1) { ec = 1; return {}; } constexpr auto min1 = std::numeric_limits::lowest() / static_cast(Factor::num); if (count < min1) { ec = 1; return {}; } count *= static_cast(Factor::num); } // this can't go wrong, right? den>0 is checked earlier. if (detail::const_check(Factor::den != 1)) { using common_t = typename std::common_type::type; count /= static_cast(Factor::den); } // convert to the to type, safely using ToRep = typename To::rep; const ToRep tocount = safe_float_conversion(count, ec); if (ec) { return {}; } return To{tocount}; } } // namespace safe_duration_cast #endif // Prevents expansion of a preceding token as a function-style macro. // Usage: f FMT_NOMACRO() #define FMT_NOMACRO namespace detail { template struct null {}; inline null<> localtime_r FMT_NOMACRO(...) { return null<>(); } inline null<> localtime_s(...) { return null<>(); } inline null<> gmtime_r(...) { return null<>(); } inline null<> gmtime_s(...) { return null<>(); } inline const std::locale& get_classic_locale() { static const auto& locale = std::locale::classic(); return locale; } template struct codecvt_result { static constexpr const size_t max_size = 32; CodeUnit buf[max_size]; CodeUnit* end; }; template constexpr const size_t codecvt_result::max_size; template void write_codecvt(codecvt_result& out, string_view in_buf, const std::locale& loc) { #if FMT_CLANG_VERSION # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdeprecated" auto& f = std::use_facet>(loc); # pragma clang diagnostic pop #else auto& f = std::use_facet>(loc); #endif auto mb = std::mbstate_t(); const char* from_next = nullptr; auto result = f.in(mb, in_buf.begin(), in_buf.end(), from_next, std::begin(out.buf), std::end(out.buf), out.end); if (result != std::codecvt_base::ok) FMT_THROW(format_error("failed to format time")); } template auto write_encoded_tm_str(OutputIt out, string_view in, const std::locale& loc) -> OutputIt { if (detail::is_utf8() && loc != get_classic_locale()) { // char16_t and char32_t codecvts are broken in MSVC (linkage errors) and // gcc-4. #if FMT_MSC_VERSION != 0 || \ (defined(__GLIBCXX__) && !defined(_GLIBCXX_USE_DUAL_ABI)) // The _GLIBCXX_USE_DUAL_ABI macro is always defined in libstdc++ from gcc-5 // and newer. using code_unit = wchar_t; #else using code_unit = char32_t; #endif using unit_t = codecvt_result; unit_t unit; write_codecvt(unit, in, loc); // In UTF-8 is used one to four one-byte code units. auto&& buf = basic_memory_buffer(); for (code_unit* p = unit.buf; p != unit.end; ++p) { uint32_t c = static_cast(*p); if (sizeof(code_unit) == 2 && c >= 0xd800 && c <= 0xdfff) { // surrogate pair ++p; if (p == unit.end || (c & 0xfc00) != 0xd800 || (*p & 0xfc00) != 0xdc00) { FMT_THROW(format_error("failed to format time")); } c = (c << 10) + static_cast(*p) - 0x35fdc00; } if (c < 0x80) { buf.push_back(static_cast(c)); } else if (c < 0x800) { buf.push_back(static_cast(0xc0 | (c >> 6))); buf.push_back(static_cast(0x80 | (c & 0x3f))); } else if ((c >= 0x800 && c <= 0xd7ff) || (c >= 0xe000 && c <= 0xffff)) { buf.push_back(static_cast(0xe0 | (c >> 12))); buf.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); buf.push_back(static_cast(0x80 | (c & 0x3f))); } else if (c >= 0x10000 && c <= 0x10ffff) { buf.push_back(static_cast(0xf0 | (c >> 18))); buf.push_back(static_cast(0x80 | ((c & 0x3ffff) >> 12))); buf.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); buf.push_back(static_cast(0x80 | (c & 0x3f))); } else { FMT_THROW(format_error("failed to format time")); } } return copy_str(buf.data(), buf.data() + buf.size(), out); } return copy_str(in.data(), in.data() + in.size(), out); } template ::value)> auto write_tm_str(OutputIt out, string_view sv, const std::locale& loc) -> OutputIt { codecvt_result unit; write_codecvt(unit, sv, loc); return copy_str(unit.buf, unit.end, out); } template ::value)> auto write_tm_str(OutputIt out, string_view sv, const std::locale& loc) -> OutputIt { return write_encoded_tm_str(out, sv, loc); } template inline void do_write(buffer& buf, const std::tm& time, const std::locale& loc, char format, char modifier) { auto&& format_buf = formatbuf>(buf); auto&& os = std::basic_ostream(&format_buf); os.imbue(loc); using iterator = std::ostreambuf_iterator; const auto& facet = std::use_facet>(loc); auto end = facet.put(os, os, Char(' '), &time, format, modifier); if (end.failed()) FMT_THROW(format_error("failed to format time")); } template ::value)> auto write(OutputIt out, const std::tm& time, const std::locale& loc, char format, char modifier = 0) -> OutputIt { auto&& buf = get_buffer(out); do_write(buf, time, loc, format, modifier); return buf.out(); } template ::value)> auto write(OutputIt out, const std::tm& time, const std::locale& loc, char format, char modifier = 0) -> OutputIt { auto&& buf = basic_memory_buffer(); do_write(buf, time, loc, format, modifier); return write_encoded_tm_str(out, string_view(buf.data(), buf.size()), loc); } } // namespace detail FMT_MODULE_EXPORT_BEGIN /** Converts given time since epoch as ``std::time_t`` value into calendar time, expressed in local time. Unlike ``std::localtime``, this function is thread-safe on most platforms. */ inline std::tm localtime(std::time_t time) { struct dispatcher { std::time_t time_; std::tm tm_; dispatcher(std::time_t t) : time_(t) {} bool run() { using namespace fmt::detail; return handle(localtime_r(&time_, &tm_)); } bool handle(std::tm* tm) { return tm != nullptr; } bool handle(detail::null<>) { using namespace fmt::detail; return fallback(localtime_s(&tm_, &time_)); } bool fallback(int res) { return res == 0; } #if !FMT_MSC_VERSION bool fallback(detail::null<>) { using namespace fmt::detail; std::tm* tm = std::localtime(&time_); if (tm) tm_ = *tm; return tm != nullptr; } #endif }; dispatcher lt(time); // Too big time values may be unsupported. if (!lt.run()) FMT_THROW(format_error("time_t value out of range")); return lt.tm_; } inline std::tm localtime( std::chrono::time_point time_point) { return localtime(std::chrono::system_clock::to_time_t(time_point)); } /** Converts given time since epoch as ``std::time_t`` value into calendar time, expressed in Coordinated Universal Time (UTC). Unlike ``std::gmtime``, this function is thread-safe on most platforms. */ inline std::tm gmtime(std::time_t time) { struct dispatcher { std::time_t time_; std::tm tm_; dispatcher(std::time_t t) : time_(t) {} bool run() { using namespace fmt::detail; return handle(gmtime_r(&time_, &tm_)); } bool handle(std::tm* tm) { return tm != nullptr; } bool handle(detail::null<>) { using namespace fmt::detail; return fallback(gmtime_s(&tm_, &time_)); } bool fallback(int res) { return res == 0; } #if !FMT_MSC_VERSION bool fallback(detail::null<>) { std::tm* tm = std::gmtime(&time_); if (tm) tm_ = *tm; return tm != nullptr; } #endif }; dispatcher gt(time); // Too big time values may be unsupported. if (!gt.run()) FMT_THROW(format_error("time_t value out of range")); return gt.tm_; } inline std::tm gmtime( std::chrono::time_point time_point) { return gmtime(std::chrono::system_clock::to_time_t(time_point)); } FMT_BEGIN_DETAIL_NAMESPACE // Writes two-digit numbers a, b and c separated by sep to buf. // The method by Pavel Novikov based on // https://johnnylee-sde.github.io/Fast-unsigned-integer-to-time-string/. inline void write_digit2_separated(char* buf, unsigned a, unsigned b, unsigned c, char sep) { unsigned long long digits = a | (b << 24) | (static_cast(c) << 48); // Convert each value to BCD. // We have x = a * 10 + b and we want to convert it to BCD y = a * 16 + b. // The difference is // y - x = a * 6 // a can be found from x: // a = floor(x / 10) // then // y = x + a * 6 = x + floor(x / 10) * 6 // floor(x / 10) is (x * 205) >> 11 (needs 16 bits). digits += (((digits * 205) >> 11) & 0x000f00000f00000f) * 6; // Put low nibbles to high bytes and high nibbles to low bytes. digits = ((digits & 0x00f00000f00000f0) >> 4) | ((digits & 0x000f00000f00000f) << 8); auto usep = static_cast(sep); // Add ASCII '0' to each digit byte and insert separators. digits |= 0x3030003030003030 | (usep << 16) | (usep << 40); constexpr const size_t len = 8; if (const_check(is_big_endian())) { char tmp[len]; std::memcpy(tmp, &digits, len); std::reverse_copy(tmp, tmp + len, buf); } else { std::memcpy(buf, &digits, len); } } template FMT_CONSTEXPR inline const char* get_units() { if (std::is_same::value) return "as"; if (std::is_same::value) return "fs"; if (std::is_same::value) return "ps"; if (std::is_same::value) return "ns"; if (std::is_same::value) return "µs"; if (std::is_same::value) return "ms"; if (std::is_same::value) return "cs"; if (std::is_same::value) return "ds"; if (std::is_same>::value) return "s"; if (std::is_same::value) return "das"; if (std::is_same::value) return "hs"; if (std::is_same::value) return "ks"; if (std::is_same::value) return "Ms"; if (std::is_same::value) return "Gs"; if (std::is_same::value) return "Ts"; if (std::is_same::value) return "Ps"; if (std::is_same::value) return "Es"; if (std::is_same>::value) return "m"; if (std::is_same>::value) return "h"; return nullptr; } enum class numeric_system { standard, // Alternative numeric system, e.g. 十二 instead of 12 in ja_JP locale. alternative }; // Parses a put_time-like format string and invokes handler actions. template FMT_CONSTEXPR const Char* parse_chrono_format(const Char* begin, const Char* end, Handler&& handler) { auto ptr = begin; while (ptr != end) { auto c = *ptr; if (c == '}') break; if (c != '%') { ++ptr; continue; } if (begin != ptr) handler.on_text(begin, ptr); ++ptr; // consume '%' if (ptr == end) FMT_THROW(format_error("invalid format")); c = *ptr++; switch (c) { case '%': handler.on_text(ptr - 1, ptr); break; case 'n': { const Char newline[] = {'\n'}; handler.on_text(newline, newline + 1); break; } case 't': { const Char tab[] = {'\t'}; handler.on_text(tab, tab + 1); break; } // Year: case 'Y': handler.on_year(numeric_system::standard); break; case 'y': handler.on_short_year(numeric_system::standard); break; case 'C': handler.on_century(numeric_system::standard); break; case 'G': handler.on_iso_week_based_year(); break; case 'g': handler.on_iso_week_based_short_year(); break; // Day of the week: case 'a': handler.on_abbr_weekday(); break; case 'A': handler.on_full_weekday(); break; case 'w': handler.on_dec0_weekday(numeric_system::standard); break; case 'u': handler.on_dec1_weekday(numeric_system::standard); break; // Month: case 'b': case 'h': handler.on_abbr_month(); break; case 'B': handler.on_full_month(); break; case 'm': handler.on_dec_month(numeric_system::standard); break; // Day of the year/month: case 'U': handler.on_dec0_week_of_year(numeric_system::standard); break; case 'W': handler.on_dec1_week_of_year(numeric_system::standard); break; case 'V': handler.on_iso_week_of_year(numeric_system::standard); break; case 'j': handler.on_day_of_year(); break; case 'd': handler.on_day_of_month(numeric_system::standard); break; case 'e': handler.on_day_of_month_space(numeric_system::standard); break; // Hour, minute, second: case 'H': handler.on_24_hour(numeric_system::standard); break; case 'I': handler.on_12_hour(numeric_system::standard); break; case 'M': handler.on_minute(numeric_system::standard); break; case 'S': handler.on_second(numeric_system::standard); break; // Other: case 'c': handler.on_datetime(numeric_system::standard); break; case 'x': handler.on_loc_date(numeric_system::standard); break; case 'X': handler.on_loc_time(numeric_system::standard); break; case 'D': handler.on_us_date(); break; case 'F': handler.on_iso_date(); break; case 'r': handler.on_12_hour_time(); break; case 'R': handler.on_24_hour_time(); break; case 'T': handler.on_iso_time(); break; case 'p': handler.on_am_pm(); break; case 'Q': handler.on_duration_value(); break; case 'q': handler.on_duration_unit(); break; case 'z': handler.on_utc_offset(); break; case 'Z': handler.on_tz_name(); break; // Alternative representation: case 'E': { if (ptr == end) FMT_THROW(format_error("invalid format")); c = *ptr++; switch (c) { case 'Y': handler.on_year(numeric_system::alternative); break; case 'y': handler.on_offset_year(); break; case 'C': handler.on_century(numeric_system::alternative); break; case 'c': handler.on_datetime(numeric_system::alternative); break; case 'x': handler.on_loc_date(numeric_system::alternative); break; case 'X': handler.on_loc_time(numeric_system::alternative); break; default: FMT_THROW(format_error("invalid format")); } break; } case 'O': if (ptr == end) FMT_THROW(format_error("invalid format")); c = *ptr++; switch (c) { case 'y': handler.on_short_year(numeric_system::alternative); break; case 'm': handler.on_dec_month(numeric_system::alternative); break; case 'U': handler.on_dec0_week_of_year(numeric_system::alternative); break; case 'W': handler.on_dec1_week_of_year(numeric_system::alternative); break; case 'V': handler.on_iso_week_of_year(numeric_system::alternative); break; case 'd': handler.on_day_of_month(numeric_system::alternative); break; case 'e': handler.on_day_of_month_space(numeric_system::alternative); break; case 'w': handler.on_dec0_weekday(numeric_system::alternative); break; case 'u': handler.on_dec1_weekday(numeric_system::alternative); break; case 'H': handler.on_24_hour(numeric_system::alternative); break; case 'I': handler.on_12_hour(numeric_system::alternative); break; case 'M': handler.on_minute(numeric_system::alternative); break; case 'S': handler.on_second(numeric_system::alternative); break; default: FMT_THROW(format_error("invalid format")); } break; default: FMT_THROW(format_error("invalid format")); } begin = ptr; } if (begin != ptr) handler.on_text(begin, ptr); return ptr; } template struct null_chrono_spec_handler { FMT_CONSTEXPR void unsupported() { static_cast(this)->unsupported(); } FMT_CONSTEXPR void on_year(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_short_year(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_offset_year() { unsupported(); } FMT_CONSTEXPR void on_century(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_iso_week_based_year() { unsupported(); } FMT_CONSTEXPR void on_iso_week_based_short_year() { unsupported(); } FMT_CONSTEXPR void on_abbr_weekday() { unsupported(); } FMT_CONSTEXPR void on_full_weekday() { unsupported(); } FMT_CONSTEXPR void on_dec0_weekday(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_dec1_weekday(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_abbr_month() { unsupported(); } FMT_CONSTEXPR void on_full_month() { unsupported(); } FMT_CONSTEXPR void on_dec_month(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_dec0_week_of_year(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_dec1_week_of_year(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_iso_week_of_year(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_day_of_year() { unsupported(); } FMT_CONSTEXPR void on_day_of_month(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_day_of_month_space(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_24_hour(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_12_hour(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_minute(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_second(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_datetime(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_loc_date(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_loc_time(numeric_system) { unsupported(); } FMT_CONSTEXPR void on_us_date() { unsupported(); } FMT_CONSTEXPR void on_iso_date() { unsupported(); } FMT_CONSTEXPR void on_12_hour_time() { unsupported(); } FMT_CONSTEXPR void on_24_hour_time() { unsupported(); } FMT_CONSTEXPR void on_iso_time() { unsupported(); } FMT_CONSTEXPR void on_am_pm() { unsupported(); } FMT_CONSTEXPR void on_duration_value() { unsupported(); } FMT_CONSTEXPR void on_duration_unit() { unsupported(); } FMT_CONSTEXPR void on_utc_offset() { unsupported(); } FMT_CONSTEXPR void on_tz_name() { unsupported(); } }; struct tm_format_checker : null_chrono_spec_handler { FMT_NORETURN void unsupported() { FMT_THROW(format_error("no format")); } template FMT_CONSTEXPR void on_text(const Char*, const Char*) {} FMT_CONSTEXPR void on_year(numeric_system) {} FMT_CONSTEXPR void on_short_year(numeric_system) {} FMT_CONSTEXPR void on_offset_year() {} FMT_CONSTEXPR void on_century(numeric_system) {} FMT_CONSTEXPR void on_iso_week_based_year() {} FMT_CONSTEXPR void on_iso_week_based_short_year() {} FMT_CONSTEXPR void on_abbr_weekday() {} FMT_CONSTEXPR void on_full_weekday() {} FMT_CONSTEXPR void on_dec0_weekday(numeric_system) {} FMT_CONSTEXPR void on_dec1_weekday(numeric_system) {} FMT_CONSTEXPR void on_abbr_month() {} FMT_CONSTEXPR void on_full_month() {} FMT_CONSTEXPR void on_dec_month(numeric_system) {} FMT_CONSTEXPR void on_dec0_week_of_year(numeric_system) {} FMT_CONSTEXPR void on_dec1_week_of_year(numeric_system) {} FMT_CONSTEXPR void on_iso_week_of_year(numeric_system) {} FMT_CONSTEXPR void on_day_of_year() {} FMT_CONSTEXPR void on_day_of_month(numeric_system) {} FMT_CONSTEXPR void on_day_of_month_space(numeric_system) {} FMT_CONSTEXPR void on_24_hour(numeric_system) {} FMT_CONSTEXPR void on_12_hour(numeric_system) {} FMT_CONSTEXPR void on_minute(numeric_system) {} FMT_CONSTEXPR void on_second(numeric_system) {} FMT_CONSTEXPR void on_datetime(numeric_system) {} FMT_CONSTEXPR void on_loc_date(numeric_system) {} FMT_CONSTEXPR void on_loc_time(numeric_system) {} FMT_CONSTEXPR void on_us_date() {} FMT_CONSTEXPR void on_iso_date() {} FMT_CONSTEXPR void on_12_hour_time() {} FMT_CONSTEXPR void on_24_hour_time() {} FMT_CONSTEXPR void on_iso_time() {} FMT_CONSTEXPR void on_am_pm() {} FMT_CONSTEXPR void on_utc_offset() {} FMT_CONSTEXPR void on_tz_name() {} }; inline const char* tm_wday_full_name(int wday) { static constexpr const char* full_name_list[] = { "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}; return wday >= 0 && wday <= 6 ? full_name_list[wday] : "?"; } inline const char* tm_wday_short_name(int wday) { static constexpr const char* short_name_list[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; return wday >= 0 && wday <= 6 ? short_name_list[wday] : "???"; } inline const char* tm_mon_full_name(int mon) { static constexpr const char* full_name_list[] = { "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}; return mon >= 0 && mon <= 11 ? full_name_list[mon] : "?"; } inline const char* tm_mon_short_name(int mon) { static constexpr const char* short_name_list[] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", }; return mon >= 0 && mon <= 11 ? short_name_list[mon] : "???"; } template struct has_member_data_tm_gmtoff : std::false_type {}; template struct has_member_data_tm_gmtoff> : std::true_type {}; template struct has_member_data_tm_zone : std::false_type {}; template struct has_member_data_tm_zone> : std::true_type {}; #if FMT_USE_TZSET inline void tzset_once() { static bool init = []() -> bool { _tzset(); return true; }(); ignore_unused(init); } #endif template class tm_writer { private: static constexpr int days_per_week = 7; const std::locale& loc_; const bool is_classic_; OutputIt out_; const std::tm& tm_; auto tm_sec() const noexcept -> int { FMT_ASSERT(tm_.tm_sec >= 0 && tm_.tm_sec <= 61, ""); return tm_.tm_sec; } auto tm_min() const noexcept -> int { FMT_ASSERT(tm_.tm_min >= 0 && tm_.tm_min <= 59, ""); return tm_.tm_min; } auto tm_hour() const noexcept -> int { FMT_ASSERT(tm_.tm_hour >= 0 && tm_.tm_hour <= 23, ""); return tm_.tm_hour; } auto tm_mday() const noexcept -> int { FMT_ASSERT(tm_.tm_mday >= 1 && tm_.tm_mday <= 31, ""); return tm_.tm_mday; } auto tm_mon() const noexcept -> int { FMT_ASSERT(tm_.tm_mon >= 0 && tm_.tm_mon <= 11, ""); return tm_.tm_mon; } auto tm_year() const noexcept -> long long { return 1900ll + tm_.tm_year; } auto tm_wday() const noexcept -> int { FMT_ASSERT(tm_.tm_wday >= 0 && tm_.tm_wday <= 6, ""); return tm_.tm_wday; } auto tm_yday() const noexcept -> int { FMT_ASSERT(tm_.tm_yday >= 0 && tm_.tm_yday <= 365, ""); return tm_.tm_yday; } auto tm_hour12() const noexcept -> int { const auto h = tm_hour(); const auto z = h < 12 ? h : h - 12; return z == 0 ? 12 : z; } // POSIX and the C Standard are unclear or inconsistent about what %C and %y // do if the year is negative or exceeds 9999. Use the convention that %C // concatenated with %y yields the same output as %Y, and that %Y contains at // least 4 characters, with more only if necessary. auto split_year_lower(long long year) const noexcept -> int { auto l = year % 100; if (l < 0) l = -l; // l in [0, 99] return static_cast(l); } // Algorithm: // https://en.wikipedia.org/wiki/ISO_week_date#Calculating_the_week_number_from_a_month_and_day_of_the_month_or_ordinal_date auto iso_year_weeks(long long curr_year) const noexcept -> int { const auto prev_year = curr_year - 1; const auto curr_p = (curr_year + curr_year / 4 - curr_year / 100 + curr_year / 400) % days_per_week; const auto prev_p = (prev_year + prev_year / 4 - prev_year / 100 + prev_year / 400) % days_per_week; return 52 + ((curr_p == 4 || prev_p == 3) ? 1 : 0); } auto iso_week_num(int tm_yday, int tm_wday) const noexcept -> int { return (tm_yday + 11 - (tm_wday == 0 ? days_per_week : tm_wday)) / days_per_week; } auto tm_iso_week_year() const noexcept -> long long { const auto year = tm_year(); const auto w = iso_week_num(tm_yday(), tm_wday()); if (w < 1) return year - 1; if (w > iso_year_weeks(year)) return year + 1; return year; } auto tm_iso_week_of_year() const noexcept -> int { const auto year = tm_year(); const auto w = iso_week_num(tm_yday(), tm_wday()); if (w < 1) return iso_year_weeks(year - 1); if (w > iso_year_weeks(year)) return 1; return w; } void write1(int value) { *out_++ = static_cast('0' + to_unsigned(value) % 10); } void write2(int value) { const char* d = digits2(to_unsigned(value) % 100); *out_++ = *d++; *out_++ = *d; } void write_year_extended(long long year) { // At least 4 characters. int width = 4; if (year < 0) { *out_++ = '-'; year = 0 - year; --width; } uint32_or_64_or_128_t n = to_unsigned(year); const int num_digits = count_digits(n); if (width > num_digits) out_ = std::fill_n(out_, width - num_digits, '0'); out_ = format_decimal(out_, n, num_digits).end; } void write_year(long long year) { if (year >= 0 && year < 10000) { write2(static_cast(year / 100)); write2(static_cast(year % 100)); } else { write_year_extended(year); } } void write_utc_offset(long offset) { if (offset < 0) { *out_++ = '-'; offset = -offset; } else { *out_++ = '+'; } offset /= 60; write2(static_cast(offset / 60)); write2(static_cast(offset % 60)); } template ::value)> void format_utc_offset_impl(const T& tm) { write_utc_offset(tm.tm_gmtoff); } template ::value)> void format_utc_offset_impl(const T& tm) { #if defined(_WIN32) && defined(_UCRT) # if FMT_USE_TZSET tzset_once(); # endif long offset = 0; _get_timezone(&offset); if (tm.tm_isdst) { long dstbias = 0; _get_dstbias(&dstbias); offset += dstbias; } write_utc_offset(-offset); #else ignore_unused(tm); format_localized('z'); #endif } template ::value)> void format_tz_name_impl(const T& tm) { if (is_classic_) out_ = write_tm_str(out_, tm.tm_zone, loc_); else format_localized('Z'); } template ::value)> void format_tz_name_impl(const T&) { format_localized('Z'); } void format_localized(char format, char modifier = 0) { out_ = write(out_, tm_, loc_, format, modifier); } public: tm_writer(const std::locale& loc, OutputIt out, const std::tm& tm) : loc_(loc), is_classic_(loc_ == get_classic_locale()), out_(out), tm_(tm) {} OutputIt out() const { return out_; } FMT_CONSTEXPR void on_text(const Char* begin, const Char* end) { out_ = copy_str(begin, end, out_); } void on_abbr_weekday() { if (is_classic_) out_ = write(out_, tm_wday_short_name(tm_wday())); else format_localized('a'); } void on_full_weekday() { if (is_classic_) out_ = write(out_, tm_wday_full_name(tm_wday())); else format_localized('A'); } void on_dec0_weekday(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write1(tm_wday()); format_localized('w', 'O'); } void on_dec1_weekday(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) { auto wday = tm_wday(); write1(wday == 0 ? days_per_week : wday); } else { format_localized('u', 'O'); } } void on_abbr_month() { if (is_classic_) out_ = write(out_, tm_mon_short_name(tm_mon())); else format_localized('b'); } void on_full_month() { if (is_classic_) out_ = write(out_, tm_mon_full_name(tm_mon())); else format_localized('B'); } void on_datetime(numeric_system ns) { if (is_classic_) { on_abbr_weekday(); *out_++ = ' '; on_abbr_month(); *out_++ = ' '; on_day_of_month_space(numeric_system::standard); *out_++ = ' '; on_iso_time(); *out_++ = ' '; on_year(numeric_system::standard); } else { format_localized('c', ns == numeric_system::standard ? '\0' : 'E'); } } void on_loc_date(numeric_system ns) { if (is_classic_) on_us_date(); else format_localized('x', ns == numeric_system::standard ? '\0' : 'E'); } void on_loc_time(numeric_system ns) { if (is_classic_) on_iso_time(); else format_localized('X', ns == numeric_system::standard ? '\0' : 'E'); } void on_us_date() { char buf[8]; write_digit2_separated(buf, to_unsigned(tm_mon() + 1), to_unsigned(tm_mday()), to_unsigned(split_year_lower(tm_year())), '/'); out_ = copy_str(std::begin(buf), std::end(buf), out_); } void on_iso_date() { auto year = tm_year(); char buf[10]; size_t offset = 0; if (year >= 0 && year < 10000) { copy2(buf, digits2(static_cast(year / 100))); } else { offset = 4; write_year_extended(year); year = 0; } write_digit2_separated(buf + 2, static_cast(year % 100), to_unsigned(tm_mon() + 1), to_unsigned(tm_mday()), '-'); out_ = copy_str(std::begin(buf) + offset, std::end(buf), out_); } void on_utc_offset() { format_utc_offset_impl(tm_); } void on_tz_name() { format_tz_name_impl(tm_); } void on_year(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write_year(tm_year()); format_localized('Y', 'E'); } void on_short_year(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write2(split_year_lower(tm_year())); format_localized('y', 'O'); } void on_offset_year() { if (is_classic_) return write2(split_year_lower(tm_year())); format_localized('y', 'E'); } void on_century(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) { auto year = tm_year(); auto upper = year / 100; if (year >= -99 && year < 0) { // Zero upper on negative year. *out_++ = '-'; *out_++ = '0'; } else if (upper >= 0 && upper < 100) { write2(static_cast(upper)); } else { out_ = write(out_, upper); } } else { format_localized('C', 'E'); } } void on_dec_month(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write2(tm_mon() + 1); format_localized('m', 'O'); } void on_dec0_week_of_year(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write2((tm_yday() + days_per_week - tm_wday()) / days_per_week); format_localized('U', 'O'); } void on_dec1_week_of_year(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) { auto wday = tm_wday(); write2((tm_yday() + days_per_week - (wday == 0 ? (days_per_week - 1) : (wday - 1))) / days_per_week); } else { format_localized('W', 'O'); } } void on_iso_week_of_year(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write2(tm_iso_week_of_year()); format_localized('V', 'O'); } void on_iso_week_based_year() { write_year(tm_iso_week_year()); } void on_iso_week_based_short_year() { write2(split_year_lower(tm_iso_week_year())); } void on_day_of_year() { auto yday = tm_yday() + 1; write1(yday / 100); write2(yday % 100); } void on_day_of_month(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write2(tm_mday()); format_localized('d', 'O'); } void on_day_of_month_space(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) { auto mday = to_unsigned(tm_mday()) % 100; const char* d2 = digits2(mday); *out_++ = mday < 10 ? ' ' : d2[0]; *out_++ = d2[1]; } else { format_localized('e', 'O'); } } void on_24_hour(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write2(tm_hour()); format_localized('H', 'O'); } void on_12_hour(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write2(tm_hour12()); format_localized('I', 'O'); } void on_minute(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write2(tm_min()); format_localized('M', 'O'); } void on_second(numeric_system ns) { if (is_classic_ || ns == numeric_system::standard) return write2(tm_sec()); format_localized('S', 'O'); } void on_12_hour_time() { if (is_classic_) { char buf[8]; write_digit2_separated(buf, to_unsigned(tm_hour12()), to_unsigned(tm_min()), to_unsigned(tm_sec()), ':'); out_ = copy_str(std::begin(buf), std::end(buf), out_); *out_++ = ' '; on_am_pm(); } else { format_localized('r'); } } void on_24_hour_time() { write2(tm_hour()); *out_++ = ':'; write2(tm_min()); } void on_iso_time() { char buf[8]; write_digit2_separated(buf, to_unsigned(tm_hour()), to_unsigned(tm_min()), to_unsigned(tm_sec()), ':'); out_ = copy_str(std::begin(buf), std::end(buf), out_); } void on_am_pm() { if (is_classic_) { *out_++ = tm_hour() < 12 ? 'A' : 'P'; *out_++ = 'M'; } else { format_localized('p'); } } // These apply to chrono durations but not tm. void on_duration_value() {} void on_duration_unit() {} }; struct chrono_format_checker : null_chrono_spec_handler { FMT_NORETURN void unsupported() { FMT_THROW(format_error("no date")); } template FMT_CONSTEXPR void on_text(const Char*, const Char*) {} FMT_CONSTEXPR void on_24_hour(numeric_system) {} FMT_CONSTEXPR void on_12_hour(numeric_system) {} FMT_CONSTEXPR void on_minute(numeric_system) {} FMT_CONSTEXPR void on_second(numeric_system) {} FMT_CONSTEXPR void on_12_hour_time() {} FMT_CONSTEXPR void on_24_hour_time() {} FMT_CONSTEXPR void on_iso_time() {} FMT_CONSTEXPR void on_am_pm() {} FMT_CONSTEXPR void on_duration_value() {} FMT_CONSTEXPR void on_duration_unit() {} }; template ::value)> inline bool isfinite(T) { return true; } // Converts value to Int and checks that it's in the range [0, upper). template ::value)> inline Int to_nonnegative_int(T value, Int upper) { FMT_ASSERT(std::is_unsigned::value || (value >= 0 && to_unsigned(value) <= to_unsigned(upper)), "invalid value"); (void)upper; return static_cast(value); } template ::value)> inline Int to_nonnegative_int(T value, Int upper) { if (value < 0 || value > static_cast(upper)) FMT_THROW(format_error("invalid value")); return static_cast(value); } template ::value)> inline T mod(T x, int y) { return x % static_cast(y); } template ::value)> inline T mod(T x, int y) { return std::fmod(x, static_cast(y)); } // If T is an integral type, maps T to its unsigned counterpart, otherwise // leaves it unchanged (unlike std::make_unsigned). template ::value> struct make_unsigned_or_unchanged { using type = T; }; template struct make_unsigned_or_unchanged { using type = typename std::make_unsigned::type; }; #if FMT_SAFE_DURATION_CAST // throwing version of safe_duration_cast template To fmt_safe_duration_cast(std::chrono::duration from) { int ec; To to = safe_duration_cast::safe_duration_cast(from, ec); if (ec) FMT_THROW(format_error("cannot format duration")); return to; } #endif template ::value)> inline std::chrono::duration get_milliseconds( std::chrono::duration d) { // this may overflow and/or the result may not fit in the // target type. #if FMT_SAFE_DURATION_CAST using CommonSecondsType = typename std::common_type::type; const auto d_as_common = fmt_safe_duration_cast(d); const auto d_as_whole_seconds = fmt_safe_duration_cast(d_as_common); // this conversion should be nonproblematic const auto diff = d_as_common - d_as_whole_seconds; const auto ms = fmt_safe_duration_cast>(diff); return ms; #else auto s = std::chrono::duration_cast(d); return std::chrono::duration_cast(d - s); #endif } // Counts the number of fractional digits in the range [0, 18] according to the // C++20 spec. If more than 18 fractional digits are required then returns 6 for // microseconds precision. template () / 10)> struct count_fractional_digits { static constexpr int value = Num % Den == 0 ? N : count_fractional_digits::value; }; // Base case that doesn't instantiate any more templates // in order to avoid overflow. template struct count_fractional_digits { static constexpr int value = (Num % Den == 0) ? N : 6; }; constexpr long long pow10(std::uint32_t n) { return n == 0 ? 1 : 10 * pow10(n - 1); } template ::is_signed)> constexpr std::chrono::duration abs( std::chrono::duration d) { // We need to compare the duration using the count() method directly // due to a compiler bug in clang-11 regarding the spaceship operator, // when -Wzero-as-null-pointer-constant is enabled. // In clang-12 the bug has been fixed. See // https://bugs.llvm.org/show_bug.cgi?id=46235 and the reproducible example: // https://www.godbolt.org/z/Knbb5joYx. return d.count() >= d.zero().count() ? d : -d; } template ::is_signed)> constexpr std::chrono::duration abs( std::chrono::duration d) { return d; } template ::value)> OutputIt format_duration_value(OutputIt out, Rep val, int) { return write(out, val); } template ::value)> OutputIt format_duration_value(OutputIt out, Rep val, int precision) { auto specs = basic_format_specs(); specs.precision = precision; specs.type = precision >= 0 ? presentation_type::fixed_lower : presentation_type::general_lower; return write(out, val, specs); } template OutputIt copy_unit(string_view unit, OutputIt out, Char) { return std::copy(unit.begin(), unit.end(), out); } template OutputIt copy_unit(string_view unit, OutputIt out, wchar_t) { // This works when wchar_t is UTF-32 because units only contain characters // that have the same representation in UTF-16 and UTF-32. utf8_to_utf16 u(unit); return std::copy(u.c_str(), u.c_str() + u.size(), out); } template OutputIt format_duration_unit(OutputIt out) { if (const char* unit = get_units()) return copy_unit(string_view(unit), out, Char()); *out++ = '['; out = write(out, Period::num); if (const_check(Period::den != 1)) { *out++ = '/'; out = write(out, Period::den); } *out++ = ']'; *out++ = 's'; return out; } class get_locale { private: union { std::locale locale_; }; bool has_locale_ = false; public: get_locale(bool localized, locale_ref loc) : has_locale_(localized) { if (localized) ::new (&locale_) std::locale(loc.template get()); } ~get_locale() { if (has_locale_) locale_.~locale(); } operator const std::locale&() const { return has_locale_ ? locale_ : get_classic_locale(); } }; template struct chrono_formatter { FormatContext& context; OutputIt out; int precision; bool localized = false; // rep is unsigned to avoid overflow. using rep = conditional_t::value && sizeof(Rep) < sizeof(int), unsigned, typename make_unsigned_or_unchanged::type>; rep val; using seconds = std::chrono::duration; seconds s; using milliseconds = std::chrono::duration; bool negative; using char_type = typename FormatContext::char_type; using tm_writer_type = tm_writer; chrono_formatter(FormatContext& ctx, OutputIt o, std::chrono::duration d) : context(ctx), out(o), val(static_cast(d.count())), negative(false) { if (d.count() < 0) { val = 0 - val; negative = true; } // this may overflow and/or the result may not fit in the // target type. #if FMT_SAFE_DURATION_CAST // might need checked conversion (rep!=Rep) auto tmpval = std::chrono::duration(val); s = fmt_safe_duration_cast(tmpval); #else s = std::chrono::duration_cast( std::chrono::duration(val)); #endif } // returns true if nan or inf, writes to out. bool handle_nan_inf() { if (isfinite(val)) { return false; } if (isnan(val)) { write_nan(); return true; } // must be +-inf if (val > 0) { write_pinf(); } else { write_ninf(); } return true; } Rep hour() const { return static_cast(mod((s.count() / 3600), 24)); } Rep hour12() const { Rep hour = static_cast(mod((s.count() / 3600), 12)); return hour <= 0 ? 12 : hour; } Rep minute() const { return static_cast(mod((s.count() / 60), 60)); } Rep second() const { return static_cast(mod(s.count(), 60)); } std::tm time() const { auto time = std::tm(); time.tm_hour = to_nonnegative_int(hour(), 24); time.tm_min = to_nonnegative_int(minute(), 60); time.tm_sec = to_nonnegative_int(second(), 60); return time; } void write_sign() { if (negative) { *out++ = '-'; negative = false; } } void write(Rep value, int width) { write_sign(); if (isnan(value)) return write_nan(); uint32_or_64_or_128_t n = to_unsigned(to_nonnegative_int(value, max_value())); int num_digits = detail::count_digits(n); if (width > num_digits) out = std::fill_n(out, width - num_digits, '0'); out = format_decimal(out, n, num_digits).end; } template void write_fractional_seconds(Duration d) { FMT_ASSERT(!std::is_floating_point::value, ""); constexpr auto num_fractional_digits = count_fractional_digits::value; using subsecond_precision = std::chrono::duration< typename std::common_type::type, std::ratio<1, detail::pow10(num_fractional_digits)>>; if (std::ratio_less::value) { *out++ = '.'; auto fractional = detail::abs(d) - std::chrono::duration_cast(d); auto subseconds = std::chrono::treat_as_floating_point< typename subsecond_precision::rep>::value ? fractional.count() : std::chrono::duration_cast(fractional) .count(); uint32_or_64_or_128_t n = to_unsigned(to_nonnegative_int(subseconds, max_value())); int num_digits = detail::count_digits(n); if (num_fractional_digits > num_digits) out = std::fill_n(out, num_fractional_digits - num_digits, '0'); out = format_decimal(out, n, num_digits).end; } } void write_nan() { std::copy_n("nan", 3, out); } void write_pinf() { std::copy_n("inf", 3, out); } void write_ninf() { std::copy_n("-inf", 4, out); } template void format_tm(const tm& time, Callback cb, Args... args) { if (isnan(val)) return write_nan(); get_locale loc(localized, context.locale()); auto w = tm_writer_type(loc, out, time); (w.*cb)(args...); out = w.out(); } void on_text(const char_type* begin, const char_type* end) { std::copy(begin, end, out); } // These are not implemented because durations don't have date information. void on_abbr_weekday() {} void on_full_weekday() {} void on_dec0_weekday(numeric_system) {} void on_dec1_weekday(numeric_system) {} void on_abbr_month() {} void on_full_month() {} void on_datetime(numeric_system) {} void on_loc_date(numeric_system) {} void on_loc_time(numeric_system) {} void on_us_date() {} void on_iso_date() {} void on_utc_offset() {} void on_tz_name() {} void on_year(numeric_system) {} void on_short_year(numeric_system) {} void on_offset_year() {} void on_century(numeric_system) {} void on_iso_week_based_year() {} void on_iso_week_based_short_year() {} void on_dec_month(numeric_system) {} void on_dec0_week_of_year(numeric_system) {} void on_dec1_week_of_year(numeric_system) {} void on_iso_week_of_year(numeric_system) {} void on_day_of_year() {} void on_day_of_month(numeric_system) {} void on_day_of_month_space(numeric_system) {} void on_24_hour(numeric_system ns) { if (handle_nan_inf()) return; if (ns == numeric_system::standard) return write(hour(), 2); auto time = tm(); time.tm_hour = to_nonnegative_int(hour(), 24); format_tm(time, &tm_writer_type::on_24_hour, ns); } void on_12_hour(numeric_system ns) { if (handle_nan_inf()) return; if (ns == numeric_system::standard) return write(hour12(), 2); auto time = tm(); time.tm_hour = to_nonnegative_int(hour12(), 12); format_tm(time, &tm_writer_type::on_12_hour, ns); } void on_minute(numeric_system ns) { if (handle_nan_inf()) return; if (ns == numeric_system::standard) return write(minute(), 2); auto time = tm(); time.tm_min = to_nonnegative_int(minute(), 60); format_tm(time, &tm_writer_type::on_minute, ns); } void on_second(numeric_system ns) { if (handle_nan_inf()) return; if (ns == numeric_system::standard) { if (std::is_floating_point::value) { constexpr auto num_fractional_digits = count_fractional_digits::value; auto buf = memory_buffer(); format_to(std::back_inserter(buf), runtime("{:.{}f}"), std::fmod(val * static_cast(Period::num) / static_cast(Period::den), static_cast(60)), num_fractional_digits); if (negative) *out++ = '-'; if (buf.size() < 2 || buf[1] == '.') *out++ = '0'; out = std::copy(buf.begin(), buf.end(), out); } else { write(second(), 2); write_fractional_seconds(std::chrono::duration(val)); } return; } auto time = tm(); time.tm_sec = to_nonnegative_int(second(), 60); format_tm(time, &tm_writer_type::on_second, ns); } void on_12_hour_time() { if (handle_nan_inf()) return; format_tm(time(), &tm_writer_type::on_12_hour_time); } void on_24_hour_time() { if (handle_nan_inf()) { *out++ = ':'; handle_nan_inf(); return; } write(hour(), 2); *out++ = ':'; write(minute(), 2); } void on_iso_time() { on_24_hour_time(); *out++ = ':'; if (handle_nan_inf()) return; on_second(numeric_system::standard); } void on_am_pm() { if (handle_nan_inf()) return; format_tm(time(), &tm_writer_type::on_am_pm); } void on_duration_value() { if (handle_nan_inf()) return; write_sign(); out = format_duration_value(out, val, precision); } void on_duration_unit() { out = format_duration_unit(out); } }; FMT_END_DETAIL_NAMESPACE #if defined(__cpp_lib_chrono) && __cpp_lib_chrono >= 201907 using weekday = std::chrono::weekday; #else // A fallback version of weekday. class weekday { private: unsigned char value; public: weekday() = default; explicit constexpr weekday(unsigned wd) noexcept : value(static_cast(wd != 7 ? wd : 0)) {} constexpr unsigned c_encoding() const noexcept { return value; } }; class year_month_day {}; #endif // A rudimentary weekday formatter. template struct formatter { private: bool localized = false; public: FMT_CONSTEXPR auto parse(basic_format_parse_context& ctx) -> decltype(ctx.begin()) { auto begin = ctx.begin(), end = ctx.end(); if (begin != end && *begin == 'L') { ++begin; localized = true; } return begin; } template auto format(weekday wd, FormatContext& ctx) const -> decltype(ctx.out()) { auto time = std::tm(); time.tm_wday = static_cast(wd.c_encoding()); detail::get_locale loc(localized, ctx.locale()); auto w = detail::tm_writer(loc, ctx.out(), time); w.on_abbr_weekday(); return w.out(); } }; template struct formatter, Char> { private: basic_format_specs specs; int precision = -1; using arg_ref_type = detail::arg_ref; arg_ref_type width_ref; arg_ref_type precision_ref; bool localized = false; basic_string_view format_str; using duration = std::chrono::duration; struct spec_handler { formatter& f; basic_format_parse_context& context; basic_string_view format_str; template FMT_CONSTEXPR arg_ref_type make_arg_ref(Id arg_id) { context.check_arg_id(arg_id); return arg_ref_type(arg_id); } FMT_CONSTEXPR arg_ref_type make_arg_ref(basic_string_view arg_id) { context.check_arg_id(arg_id); return arg_ref_type(arg_id); } FMT_CONSTEXPR arg_ref_type make_arg_ref(detail::auto_id) { return arg_ref_type(context.next_arg_id()); } void on_error(const char* msg) { FMT_THROW(format_error(msg)); } FMT_CONSTEXPR void on_fill(basic_string_view fill) { f.specs.fill = fill; } FMT_CONSTEXPR void on_align(align_t align) { f.specs.align = align; } FMT_CONSTEXPR void on_width(int width) { f.specs.width = width; } FMT_CONSTEXPR void on_precision(int _precision) { f.precision = _precision; } FMT_CONSTEXPR void end_precision() {} template FMT_CONSTEXPR void on_dynamic_width(Id arg_id) { f.width_ref = make_arg_ref(arg_id); } template FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) { f.precision_ref = make_arg_ref(arg_id); } }; using iterator = typename basic_format_parse_context::iterator; struct parse_range { iterator begin; iterator end; }; FMT_CONSTEXPR parse_range do_parse(basic_format_parse_context& ctx) { auto begin = ctx.begin(), end = ctx.end(); if (begin == end || *begin == '}') return {begin, begin}; spec_handler handler{*this, ctx, format_str}; begin = detail::parse_align(begin, end, handler); if (begin == end) return {begin, begin}; begin = detail::parse_width(begin, end, handler); if (begin == end) return {begin, begin}; if (*begin == '.') { if (std::is_floating_point::value) begin = detail::parse_precision(begin, end, handler); else handler.on_error("precision not allowed for this argument type"); } if (begin != end && *begin == 'L') { ++begin; localized = true; } end = detail::parse_chrono_format(begin, end, detail::chrono_format_checker()); return {begin, end}; } public: FMT_CONSTEXPR auto parse(basic_format_parse_context& ctx) -> decltype(ctx.begin()) { auto range = do_parse(ctx); format_str = basic_string_view( &*range.begin, detail::to_unsigned(range.end - range.begin)); return range.end; } template auto format(const duration& d, FormatContext& ctx) const -> decltype(ctx.out()) { auto specs_copy = specs; auto precision_copy = precision; auto begin = format_str.begin(), end = format_str.end(); // As a possible future optimization, we could avoid extra copying if width // is not specified. basic_memory_buffer buf; auto out = std::back_inserter(buf); detail::handle_dynamic_spec(specs_copy.width, width_ref, ctx); detail::handle_dynamic_spec(precision_copy, precision_ref, ctx); if (begin == end || *begin == '}') { out = detail::format_duration_value(out, d.count(), precision_copy); detail::format_duration_unit(out); } else { detail::chrono_formatter f( ctx, out, d); f.precision = precision_copy; f.localized = localized; detail::parse_chrono_format(begin, end, f); } return detail::write( ctx.out(), basic_string_view(buf.data(), buf.size()), specs_copy); } }; template struct formatter, Char> : formatter { FMT_CONSTEXPR formatter() { basic_string_view default_specs = detail::string_literal{}; this->do_parse(default_specs.begin(), default_specs.end()); } template auto format(std::chrono::time_point val, FormatContext& ctx) const -> decltype(ctx.out()) { return formatter::format(localtime(val), ctx); } }; template struct formatter { private: enum class spec { unknown, year_month_day, hh_mm_ss, }; spec spec_ = spec::unknown; basic_string_view specs; protected: template FMT_CONSTEXPR auto do_parse(It begin, It end) -> It { if (begin != end && *begin == ':') ++begin; end = detail::parse_chrono_format(begin, end, detail::tm_format_checker()); // Replace default spec only if the new spec is not empty. if (end != begin) specs = {begin, detail::to_unsigned(end - begin)}; return end; } public: FMT_CONSTEXPR auto parse(basic_format_parse_context& ctx) -> decltype(ctx.begin()) { auto end = this->do_parse(ctx.begin(), ctx.end()); // basic_string_view<>::compare isn't constexpr before C++17. if (specs.size() == 2 && specs[0] == Char('%')) { if (specs[1] == Char('F')) spec_ = spec::year_month_day; else if (specs[1] == Char('T')) spec_ = spec::hh_mm_ss; } return end; } template auto format(const std::tm& tm, FormatContext& ctx) const -> decltype(ctx.out()) { const auto loc_ref = ctx.locale(); detail::get_locale loc(static_cast(loc_ref), loc_ref); auto w = detail::tm_writer(loc, ctx.out(), tm); if (spec_ == spec::year_month_day) w.on_iso_date(); else if (spec_ == spec::hh_mm_ss) w.on_iso_time(); else detail::parse_chrono_format(specs.begin(), specs.end(), w); return w.out(); } }; FMT_MODULE_EXPORT_END FMT_END_NAMESPACE #endif // FMT_CHRONO_H_ mergerfs-2.40.2/libfuse/lib/fmt/color.h000066400000000000000000000605001457016576200177030ustar00rootroot00000000000000// Formatting library for C++ - color support // // Copyright (c) 2018 - present, Victor Zverovich and fmt contributors // All rights reserved. // // For the license information refer to format.h. #ifndef FMT_COLOR_H_ #define FMT_COLOR_H_ #include "format.h" FMT_BEGIN_NAMESPACE FMT_MODULE_EXPORT_BEGIN enum class color : uint32_t { alice_blue = 0xF0F8FF, // rgb(240,248,255) antique_white = 0xFAEBD7, // rgb(250,235,215) aqua = 0x00FFFF, // rgb(0,255,255) aquamarine = 0x7FFFD4, // rgb(127,255,212) azure = 0xF0FFFF, // rgb(240,255,255) beige = 0xF5F5DC, // rgb(245,245,220) bisque = 0xFFE4C4, // rgb(255,228,196) black = 0x000000, // rgb(0,0,0) blanched_almond = 0xFFEBCD, // rgb(255,235,205) blue = 0x0000FF, // rgb(0,0,255) blue_violet = 0x8A2BE2, // rgb(138,43,226) brown = 0xA52A2A, // rgb(165,42,42) burly_wood = 0xDEB887, // rgb(222,184,135) cadet_blue = 0x5F9EA0, // rgb(95,158,160) chartreuse = 0x7FFF00, // rgb(127,255,0) chocolate = 0xD2691E, // rgb(210,105,30) coral = 0xFF7F50, // rgb(255,127,80) cornflower_blue = 0x6495ED, // rgb(100,149,237) cornsilk = 0xFFF8DC, // rgb(255,248,220) crimson = 0xDC143C, // rgb(220,20,60) cyan = 0x00FFFF, // rgb(0,255,255) dark_blue = 0x00008B, // rgb(0,0,139) dark_cyan = 0x008B8B, // rgb(0,139,139) dark_golden_rod = 0xB8860B, // rgb(184,134,11) dark_gray = 0xA9A9A9, // rgb(169,169,169) dark_green = 0x006400, // rgb(0,100,0) dark_khaki = 0xBDB76B, // rgb(189,183,107) dark_magenta = 0x8B008B, // rgb(139,0,139) dark_olive_green = 0x556B2F, // rgb(85,107,47) dark_orange = 0xFF8C00, // rgb(255,140,0) dark_orchid = 0x9932CC, // rgb(153,50,204) dark_red = 0x8B0000, // rgb(139,0,0) dark_salmon = 0xE9967A, // rgb(233,150,122) dark_sea_green = 0x8FBC8F, // rgb(143,188,143) dark_slate_blue = 0x483D8B, // rgb(72,61,139) dark_slate_gray = 0x2F4F4F, // rgb(47,79,79) dark_turquoise = 0x00CED1, // rgb(0,206,209) dark_violet = 0x9400D3, // rgb(148,0,211) deep_pink = 0xFF1493, // rgb(255,20,147) deep_sky_blue = 0x00BFFF, // rgb(0,191,255) dim_gray = 0x696969, // rgb(105,105,105) dodger_blue = 0x1E90FF, // rgb(30,144,255) fire_brick = 0xB22222, // rgb(178,34,34) floral_white = 0xFFFAF0, // rgb(255,250,240) forest_green = 0x228B22, // rgb(34,139,34) fuchsia = 0xFF00FF, // rgb(255,0,255) gainsboro = 0xDCDCDC, // rgb(220,220,220) ghost_white = 0xF8F8FF, // rgb(248,248,255) gold = 0xFFD700, // rgb(255,215,0) golden_rod = 0xDAA520, // rgb(218,165,32) gray = 0x808080, // rgb(128,128,128) green = 0x008000, // rgb(0,128,0) green_yellow = 0xADFF2F, // rgb(173,255,47) honey_dew = 0xF0FFF0, // rgb(240,255,240) hot_pink = 0xFF69B4, // rgb(255,105,180) indian_red = 0xCD5C5C, // rgb(205,92,92) indigo = 0x4B0082, // rgb(75,0,130) ivory = 0xFFFFF0, // rgb(255,255,240) khaki = 0xF0E68C, // rgb(240,230,140) lavender = 0xE6E6FA, // rgb(230,230,250) lavender_blush = 0xFFF0F5, // rgb(255,240,245) lawn_green = 0x7CFC00, // rgb(124,252,0) lemon_chiffon = 0xFFFACD, // rgb(255,250,205) light_blue = 0xADD8E6, // rgb(173,216,230) light_coral = 0xF08080, // rgb(240,128,128) light_cyan = 0xE0FFFF, // rgb(224,255,255) light_golden_rod_yellow = 0xFAFAD2, // rgb(250,250,210) light_gray = 0xD3D3D3, // rgb(211,211,211) light_green = 0x90EE90, // rgb(144,238,144) light_pink = 0xFFB6C1, // rgb(255,182,193) light_salmon = 0xFFA07A, // rgb(255,160,122) light_sea_green = 0x20B2AA, // rgb(32,178,170) light_sky_blue = 0x87CEFA, // rgb(135,206,250) light_slate_gray = 0x778899, // rgb(119,136,153) light_steel_blue = 0xB0C4DE, // rgb(176,196,222) light_yellow = 0xFFFFE0, // rgb(255,255,224) lime = 0x00FF00, // rgb(0,255,0) lime_green = 0x32CD32, // rgb(50,205,50) linen = 0xFAF0E6, // rgb(250,240,230) magenta = 0xFF00FF, // rgb(255,0,255) maroon = 0x800000, // rgb(128,0,0) medium_aquamarine = 0x66CDAA, // rgb(102,205,170) medium_blue = 0x0000CD, // rgb(0,0,205) medium_orchid = 0xBA55D3, // rgb(186,85,211) medium_purple = 0x9370DB, // rgb(147,112,219) medium_sea_green = 0x3CB371, // rgb(60,179,113) medium_slate_blue = 0x7B68EE, // rgb(123,104,238) medium_spring_green = 0x00FA9A, // rgb(0,250,154) medium_turquoise = 0x48D1CC, // rgb(72,209,204) medium_violet_red = 0xC71585, // rgb(199,21,133) midnight_blue = 0x191970, // rgb(25,25,112) mint_cream = 0xF5FFFA, // rgb(245,255,250) misty_rose = 0xFFE4E1, // rgb(255,228,225) moccasin = 0xFFE4B5, // rgb(255,228,181) navajo_white = 0xFFDEAD, // rgb(255,222,173) navy = 0x000080, // rgb(0,0,128) old_lace = 0xFDF5E6, // rgb(253,245,230) olive = 0x808000, // rgb(128,128,0) olive_drab = 0x6B8E23, // rgb(107,142,35) orange = 0xFFA500, // rgb(255,165,0) orange_red = 0xFF4500, // rgb(255,69,0) orchid = 0xDA70D6, // rgb(218,112,214) pale_golden_rod = 0xEEE8AA, // rgb(238,232,170) pale_green = 0x98FB98, // rgb(152,251,152) pale_turquoise = 0xAFEEEE, // rgb(175,238,238) pale_violet_red = 0xDB7093, // rgb(219,112,147) papaya_whip = 0xFFEFD5, // rgb(255,239,213) peach_puff = 0xFFDAB9, // rgb(255,218,185) peru = 0xCD853F, // rgb(205,133,63) pink = 0xFFC0CB, // rgb(255,192,203) plum = 0xDDA0DD, // rgb(221,160,221) powder_blue = 0xB0E0E6, // rgb(176,224,230) purple = 0x800080, // rgb(128,0,128) rebecca_purple = 0x663399, // rgb(102,51,153) red = 0xFF0000, // rgb(255,0,0) rosy_brown = 0xBC8F8F, // rgb(188,143,143) royal_blue = 0x4169E1, // rgb(65,105,225) saddle_brown = 0x8B4513, // rgb(139,69,19) salmon = 0xFA8072, // rgb(250,128,114) sandy_brown = 0xF4A460, // rgb(244,164,96) sea_green = 0x2E8B57, // rgb(46,139,87) sea_shell = 0xFFF5EE, // rgb(255,245,238) sienna = 0xA0522D, // rgb(160,82,45) silver = 0xC0C0C0, // rgb(192,192,192) sky_blue = 0x87CEEB, // rgb(135,206,235) slate_blue = 0x6A5ACD, // rgb(106,90,205) slate_gray = 0x708090, // rgb(112,128,144) snow = 0xFFFAFA, // rgb(255,250,250) spring_green = 0x00FF7F, // rgb(0,255,127) steel_blue = 0x4682B4, // rgb(70,130,180) tan = 0xD2B48C, // rgb(210,180,140) teal = 0x008080, // rgb(0,128,128) thistle = 0xD8BFD8, // rgb(216,191,216) tomato = 0xFF6347, // rgb(255,99,71) turquoise = 0x40E0D0, // rgb(64,224,208) violet = 0xEE82EE, // rgb(238,130,238) wheat = 0xF5DEB3, // rgb(245,222,179) white = 0xFFFFFF, // rgb(255,255,255) white_smoke = 0xF5F5F5, // rgb(245,245,245) yellow = 0xFFFF00, // rgb(255,255,0) yellow_green = 0x9ACD32 // rgb(154,205,50) }; // enum class color enum class terminal_color : uint8_t { black = 30, red, green, yellow, blue, magenta, cyan, white, bright_black = 90, bright_red, bright_green, bright_yellow, bright_blue, bright_magenta, bright_cyan, bright_white }; enum class emphasis : uint8_t { bold = 1, faint = 1 << 1, italic = 1 << 2, underline = 1 << 3, blink = 1 << 4, reverse = 1 << 5, conceal = 1 << 6, strikethrough = 1 << 7, }; // rgb is a struct for red, green and blue colors. // Using the name "rgb" makes some editors show the color in a tooltip. struct rgb { FMT_CONSTEXPR rgb() : r(0), g(0), b(0) {} FMT_CONSTEXPR rgb(uint8_t r_, uint8_t g_, uint8_t b_) : r(r_), g(g_), b(b_) {} FMT_CONSTEXPR rgb(uint32_t hex) : r((hex >> 16) & 0xFF), g((hex >> 8) & 0xFF), b(hex & 0xFF) {} FMT_CONSTEXPR rgb(color hex) : r((uint32_t(hex) >> 16) & 0xFF), g((uint32_t(hex) >> 8) & 0xFF), b(uint32_t(hex) & 0xFF) {} uint8_t r; uint8_t g; uint8_t b; }; FMT_BEGIN_DETAIL_NAMESPACE // color is a struct of either a rgb color or a terminal color. struct color_type { FMT_CONSTEXPR color_type() noexcept : is_rgb(), value{} {} FMT_CONSTEXPR color_type(color rgb_color) noexcept : is_rgb(true), value{} { value.rgb_color = static_cast(rgb_color); } FMT_CONSTEXPR color_type(rgb rgb_color) noexcept : is_rgb(true), value{} { value.rgb_color = (static_cast(rgb_color.r) << 16) | (static_cast(rgb_color.g) << 8) | rgb_color.b; } FMT_CONSTEXPR color_type(terminal_color term_color) noexcept : is_rgb(), value{} { value.term_color = static_cast(term_color); } bool is_rgb; union color_union { uint8_t term_color; uint32_t rgb_color; } value; }; FMT_END_DETAIL_NAMESPACE /** A text style consisting of foreground and background colors and emphasis. */ class text_style { public: FMT_CONSTEXPR text_style(emphasis em = emphasis()) noexcept : set_foreground_color(), set_background_color(), ems(em) {} FMT_CONSTEXPR text_style& operator|=(const text_style& rhs) { if (!set_foreground_color) { set_foreground_color = rhs.set_foreground_color; foreground_color = rhs.foreground_color; } else if (rhs.set_foreground_color) { if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb) FMT_THROW(format_error("can't OR a terminal color")); foreground_color.value.rgb_color |= rhs.foreground_color.value.rgb_color; } if (!set_background_color) { set_background_color = rhs.set_background_color; background_color = rhs.background_color; } else if (rhs.set_background_color) { if (!background_color.is_rgb || !rhs.background_color.is_rgb) FMT_THROW(format_error("can't OR a terminal color")); background_color.value.rgb_color |= rhs.background_color.value.rgb_color; } ems = static_cast(static_cast(ems) | static_cast(rhs.ems)); return *this; } friend FMT_CONSTEXPR text_style operator|(text_style lhs, const text_style& rhs) { return lhs |= rhs; } FMT_CONSTEXPR bool has_foreground() const noexcept { return set_foreground_color; } FMT_CONSTEXPR bool has_background() const noexcept { return set_background_color; } FMT_CONSTEXPR bool has_emphasis() const noexcept { return static_cast(ems) != 0; } FMT_CONSTEXPR detail::color_type get_foreground() const noexcept { FMT_ASSERT(has_foreground(), "no foreground specified for this style"); return foreground_color; } FMT_CONSTEXPR detail::color_type get_background() const noexcept { FMT_ASSERT(has_background(), "no background specified for this style"); return background_color; } FMT_CONSTEXPR emphasis get_emphasis() const noexcept { FMT_ASSERT(has_emphasis(), "no emphasis specified for this style"); return ems; } private: FMT_CONSTEXPR text_style(bool is_foreground, detail::color_type text_color) noexcept : set_foreground_color(), set_background_color(), ems() { if (is_foreground) { foreground_color = text_color; set_foreground_color = true; } else { background_color = text_color; set_background_color = true; } } friend FMT_CONSTEXPR text_style fg(detail::color_type foreground) noexcept; friend FMT_CONSTEXPR text_style bg(detail::color_type background) noexcept; detail::color_type foreground_color; detail::color_type background_color; bool set_foreground_color; bool set_background_color; emphasis ems; }; /** Creates a text style from the foreground (text) color. */ FMT_CONSTEXPR inline text_style fg(detail::color_type foreground) noexcept { return text_style(true, foreground); } /** Creates a text style from the background color. */ FMT_CONSTEXPR inline text_style bg(detail::color_type background) noexcept { return text_style(false, background); } FMT_CONSTEXPR inline text_style operator|(emphasis lhs, emphasis rhs) noexcept { return text_style(lhs) | rhs; } FMT_BEGIN_DETAIL_NAMESPACE template struct ansi_color_escape { FMT_CONSTEXPR ansi_color_escape(detail::color_type text_color, const char* esc) noexcept { // If we have a terminal color, we need to output another escape code // sequence. if (!text_color.is_rgb) { bool is_background = esc == string_view("\x1b[48;2;"); uint32_t value = text_color.value.term_color; // Background ASCII codes are the same as the foreground ones but with // 10 more. if (is_background) value += 10u; size_t index = 0; buffer[index++] = static_cast('\x1b'); buffer[index++] = static_cast('['); if (value >= 100u) { buffer[index++] = static_cast('1'); value %= 100u; } buffer[index++] = static_cast('0' + value / 10u); buffer[index++] = static_cast('0' + value % 10u); buffer[index++] = static_cast('m'); buffer[index++] = static_cast('\0'); return; } for (int i = 0; i < 7; i++) { buffer[i] = static_cast(esc[i]); } rgb color(text_color.value.rgb_color); to_esc(color.r, buffer + 7, ';'); to_esc(color.g, buffer + 11, ';'); to_esc(color.b, buffer + 15, 'm'); buffer[19] = static_cast(0); } FMT_CONSTEXPR ansi_color_escape(emphasis em) noexcept { uint8_t em_codes[num_emphases] = {}; if (has_emphasis(em, emphasis::bold)) em_codes[0] = 1; if (has_emphasis(em, emphasis::faint)) em_codes[1] = 2; if (has_emphasis(em, emphasis::italic)) em_codes[2] = 3; if (has_emphasis(em, emphasis::underline)) em_codes[3] = 4; if (has_emphasis(em, emphasis::blink)) em_codes[4] = 5; if (has_emphasis(em, emphasis::reverse)) em_codes[5] = 7; if (has_emphasis(em, emphasis::conceal)) em_codes[6] = 8; if (has_emphasis(em, emphasis::strikethrough)) em_codes[7] = 9; size_t index = 0; for (size_t i = 0; i < num_emphases; ++i) { if (!em_codes[i]) continue; buffer[index++] = static_cast('\x1b'); buffer[index++] = static_cast('['); buffer[index++] = static_cast('0' + em_codes[i]); buffer[index++] = static_cast('m'); } buffer[index++] = static_cast(0); } FMT_CONSTEXPR operator const Char*() const noexcept { return buffer; } FMT_CONSTEXPR const Char* begin() const noexcept { return buffer; } FMT_CONSTEXPR_CHAR_TRAITS const Char* end() const noexcept { return buffer + std::char_traits::length(buffer); } private: static constexpr size_t num_emphases = 8; Char buffer[7u + 3u * num_emphases + 1u]; static FMT_CONSTEXPR void to_esc(uint8_t c, Char* out, char delimiter) noexcept { out[0] = static_cast('0' + c / 100); out[1] = static_cast('0' + c / 10 % 10); out[2] = static_cast('0' + c % 10); out[3] = static_cast(delimiter); } static FMT_CONSTEXPR bool has_emphasis(emphasis em, emphasis mask) noexcept { return static_cast(em) & static_cast(mask); } }; template FMT_CONSTEXPR ansi_color_escape make_foreground_color( detail::color_type foreground) noexcept { return ansi_color_escape(foreground, "\x1b[38;2;"); } template FMT_CONSTEXPR ansi_color_escape make_background_color( detail::color_type background) noexcept { return ansi_color_escape(background, "\x1b[48;2;"); } template FMT_CONSTEXPR ansi_color_escape make_emphasis(emphasis em) noexcept { return ansi_color_escape(em); } template inline void fputs(const Char* chars, FILE* stream) { int result = std::fputs(chars, stream); if (result < 0) FMT_THROW(system_error(errno, FMT_STRING("cannot write to file"))); } template <> inline void fputs(const wchar_t* chars, FILE* stream) { int result = std::fputws(chars, stream); if (result < 0) FMT_THROW(system_error(errno, FMT_STRING("cannot write to file"))); } template inline void reset_color(FILE* stream) { fputs("\x1b[0m", stream); } template <> inline void reset_color(FILE* stream) { fputs(L"\x1b[0m", stream); } template inline void reset_color(buffer& buffer) { auto reset_color = string_view("\x1b[0m"); buffer.append(reset_color.begin(), reset_color.end()); } template struct styled_arg { const T& value; text_style style; }; template void vformat_to(buffer& buf, const text_style& ts, basic_string_view format_str, basic_format_args>> args) { bool has_style = false; if (ts.has_emphasis()) { has_style = true; auto emphasis = detail::make_emphasis(ts.get_emphasis()); buf.append(emphasis.begin(), emphasis.end()); } if (ts.has_foreground()) { has_style = true; auto foreground = detail::make_foreground_color(ts.get_foreground()); buf.append(foreground.begin(), foreground.end()); } if (ts.has_background()) { has_style = true; auto background = detail::make_background_color(ts.get_background()); buf.append(background.begin(), background.end()); } detail::vformat_to(buf, format_str, args, {}); if (has_style) detail::reset_color(buf); } FMT_END_DETAIL_NAMESPACE template > void vprint(std::FILE* f, const text_style& ts, const S& format, basic_format_args>> args) { basic_memory_buffer buf; detail::vformat_to(buf, ts, detail::to_string_view(format), args); if (detail::is_utf8()) { detail::print(f, basic_string_view(buf.begin(), buf.size())); } else { buf.push_back(Char(0)); detail::fputs(buf.data(), f); } } /** \rst Formats a string and prints it to the specified file stream using ANSI escape sequences to specify text formatting. **Example**:: fmt::print(fmt::emphasis::bold | fg(fmt::color::red), "Elapsed time: {0:.2f} seconds", 1.23); \endrst */ template ::value)> void print(std::FILE* f, const text_style& ts, const S& format_str, const Args&... args) { vprint(f, ts, format_str, fmt::make_format_args>>(args...)); } /** \rst Formats a string and prints it to stdout using ANSI escape sequences to specify text formatting. **Example**:: fmt::print(fmt::emphasis::bold | fg(fmt::color::red), "Elapsed time: {0:.2f} seconds", 1.23); \endrst */ template ::value)> void print(const text_style& ts, const S& format_str, const Args&... args) { return print(stdout, ts, format_str, args...); } template > inline std::basic_string vformat( const text_style& ts, const S& format_str, basic_format_args>> args) { basic_memory_buffer buf; detail::vformat_to(buf, ts, detail::to_string_view(format_str), args); return fmt::to_string(buf); } /** \rst Formats arguments and returns the result as a string using ANSI escape sequences to specify text formatting. **Example**:: #include std::string message = fmt::format(fmt::emphasis::bold | fg(fmt::color::red), "The answer is {}", 42); \endrst */ template > inline std::basic_string format(const text_style& ts, const S& format_str, const Args&... args) { return fmt::vformat(ts, detail::to_string_view(format_str), fmt::make_format_args>(args...)); } /** Formats a string with the given text_style and writes the output to ``out``. */ template ::value)> OutputIt vformat_to( OutputIt out, const text_style& ts, basic_string_view format_str, basic_format_args>> args) { auto&& buf = detail::get_buffer(out); detail::vformat_to(buf, ts, format_str, args); return detail::get_iterator(buf); } /** \rst Formats arguments with the given text_style, writes the result to the output iterator ``out`` and returns the iterator past the end of the output range. **Example**:: std::vector out; fmt::format_to(std::back_inserter(out), fmt::emphasis::bold | fg(fmt::color::red), "{}", 42); \endrst */ template >::value&& detail::is_string::value> inline auto format_to(OutputIt out, const text_style& ts, const S& format_str, Args&&... args) -> typename std::enable_if::type { return vformat_to(out, ts, detail::to_string_view(format_str), fmt::make_format_args>>(args...)); } template struct formatter, Char> : formatter { template auto format(const detail::styled_arg& arg, FormatContext& ctx) const -> decltype(ctx.out()) { const auto& ts = arg.style; const auto& value = arg.value; auto out = ctx.out(); bool has_style = false; if (ts.has_emphasis()) { has_style = true; auto emphasis = detail::make_emphasis(ts.get_emphasis()); out = std::copy(emphasis.begin(), emphasis.end(), out); } if (ts.has_foreground()) { has_style = true; auto foreground = detail::make_foreground_color(ts.get_foreground()); out = std::copy(foreground.begin(), foreground.end(), out); } if (ts.has_background()) { has_style = true; auto background = detail::make_background_color(ts.get_background()); out = std::copy(background.begin(), background.end(), out); } out = formatter::format(value, ctx); if (has_style) { auto reset_color = string_view("\x1b[0m"); out = std::copy(reset_color.begin(), reset_color.end(), out); } return out; } }; /** \rst Returns an argument that will be formatted using ANSI escape sequences, to be used in a formatting function. **Example**:: fmt::print("Elapsed time: {0:.2f} seconds", fmt::styled(1.23, fmt::fg(fmt::color::green) | fmt::bg(fmt::color::blue))); \endrst */ template FMT_CONSTEXPR auto styled(const T& value, text_style ts) -> detail::styled_arg> { return detail::styled_arg>{value, ts}; } FMT_MODULE_EXPORT_END FMT_END_NAMESPACE #endif // FMT_COLOR_H_ mergerfs-2.40.2/libfuse/lib/fmt/compile.h000066400000000000000000000513751457016576200202270ustar00rootroot00000000000000// Formatting library for C++ - experimental format string compilation // // Copyright (c) 2012 - present, Victor Zverovich and fmt contributors // All rights reserved. // // For the license information refer to format.h. #ifndef FMT_COMPILE_H_ #define FMT_COMPILE_H_ #include "format.h" FMT_BEGIN_NAMESPACE namespace detail { template FMT_CONSTEXPR inline counting_iterator copy_str(InputIt begin, InputIt end, counting_iterator it) { return it + (end - begin); } template class truncating_iterator_base { protected: OutputIt out_; size_t limit_; size_t count_ = 0; truncating_iterator_base() : out_(), limit_(0) {} truncating_iterator_base(OutputIt out, size_t limit) : out_(out), limit_(limit) {} public: using iterator_category = std::output_iterator_tag; using value_type = typename std::iterator_traits::value_type; using difference_type = std::ptrdiff_t; using pointer = void; using reference = void; FMT_UNCHECKED_ITERATOR(truncating_iterator_base); OutputIt base() const { return out_; } size_t count() const { return count_; } }; // An output iterator that truncates the output and counts the number of objects // written to it. template ::value_type>::type> class truncating_iterator; template class truncating_iterator : public truncating_iterator_base { mutable typename truncating_iterator_base::value_type blackhole_; public: using value_type = typename truncating_iterator_base::value_type; truncating_iterator() = default; truncating_iterator(OutputIt out, size_t limit) : truncating_iterator_base(out, limit) {} truncating_iterator& operator++() { if (this->count_++ < this->limit_) ++this->out_; return *this; } truncating_iterator operator++(int) { auto it = *this; ++*this; return it; } value_type& operator*() const { return this->count_ < this->limit_ ? *this->out_ : blackhole_; } }; template class truncating_iterator : public truncating_iterator_base { public: truncating_iterator() = default; truncating_iterator(OutputIt out, size_t limit) : truncating_iterator_base(out, limit) {} template truncating_iterator& operator=(T val) { if (this->count_++ < this->limit_) *this->out_++ = val; return *this; } truncating_iterator& operator++() { return *this; } truncating_iterator& operator++(int) { return *this; } truncating_iterator& operator*() { return *this; } }; // A compile-time string which is compiled into fast formatting code. class compiled_string {}; template struct is_compiled_string : std::is_base_of {}; /** \rst Converts a string literal *s* into a format string that will be parsed at compile time and converted into efficient formatting code. Requires C++17 ``constexpr if`` compiler support. **Example**:: // Converts 42 into std::string using the most efficient method and no // runtime format string processing. std::string s = fmt::format(FMT_COMPILE("{}"), 42); \endrst */ #if defined(__cpp_if_constexpr) && defined(__cpp_return_type_deduction) # define FMT_COMPILE(s) \ FMT_STRING_IMPL(s, fmt::detail::compiled_string, explicit) #else # define FMT_COMPILE(s) FMT_STRING(s) #endif #if FMT_USE_NONTYPE_TEMPLATE_ARGS template Str> struct udl_compiled_string : compiled_string { using char_type = Char; explicit constexpr operator basic_string_view() const { return {Str.data, N - 1}; } }; #endif template const T& first(const T& value, const Tail&...) { return value; } #if defined(__cpp_if_constexpr) && defined(__cpp_return_type_deduction) template struct type_list {}; // Returns a reference to the argument at index N from [first, rest...]. template constexpr const auto& get([[maybe_unused]] const T& first, [[maybe_unused]] const Args&... rest) { static_assert(N < 1 + sizeof...(Args), "index is out of bounds"); if constexpr (N == 0) return first; else return detail::get(rest...); } template constexpr int get_arg_index_by_name(basic_string_view name, type_list) { return get_arg_index_by_name(name); } template struct get_type_impl; template struct get_type_impl> { using type = remove_cvref_t(std::declval()...))>; }; template using get_type = typename get_type_impl::type; template struct is_compiled_format : std::false_type {}; template struct text { basic_string_view data; using char_type = Char; template constexpr OutputIt format(OutputIt out, const Args&...) const { return write(out, data); } }; template struct is_compiled_format> : std::true_type {}; template constexpr text make_text(basic_string_view s, size_t pos, size_t size) { return {{&s[pos], size}}; } template struct code_unit { Char value; using char_type = Char; template constexpr OutputIt format(OutputIt out, const Args&...) const { return write(out, value); } }; // This ensures that the argument type is convertible to `const T&`. template constexpr const T& get_arg_checked(const Args&... args) { const auto& arg = detail::get(args...); if constexpr (detail::is_named_arg>()) { return arg.value; } else { return arg; } } template struct is_compiled_format> : std::true_type {}; // A replacement field that refers to argument N. template struct field { using char_type = Char; template constexpr OutputIt format(OutputIt out, const Args&... args) const { return write(out, get_arg_checked(args...)); } }; template struct is_compiled_format> : std::true_type {}; // A replacement field that refers to argument with name. template struct runtime_named_field { using char_type = Char; basic_string_view name; template constexpr static bool try_format_argument( OutputIt& out, // [[maybe_unused]] due to unused-but-set-parameter warning in GCC 7,8,9 [[maybe_unused]] basic_string_view arg_name, const T& arg) { if constexpr (is_named_arg::type>::value) { if (arg_name == arg.name) { out = write(out, arg.value); return true; } } return false; } template constexpr OutputIt format(OutputIt out, const Args&... args) const { bool found = (try_format_argument(out, name, args) || ...); if (!found) { FMT_THROW(format_error("argument with specified name is not found")); } return out; } }; template struct is_compiled_format> : std::true_type {}; // A replacement field that refers to argument N and has format specifiers. template struct spec_field { using char_type = Char; formatter fmt; template constexpr FMT_INLINE OutputIt format(OutputIt out, const Args&... args) const { const auto& vargs = fmt::make_format_args>(args...); basic_format_context ctx(out, vargs); return fmt.format(get_arg_checked(args...), ctx); } }; template struct is_compiled_format> : std::true_type {}; template struct concat { L lhs; R rhs; using char_type = typename L::char_type; template constexpr OutputIt format(OutputIt out, const Args&... args) const { out = lhs.format(out, args...); return rhs.format(out, args...); } }; template struct is_compiled_format> : std::true_type {}; template constexpr concat make_concat(L lhs, R rhs) { return {lhs, rhs}; } struct unknown_format {}; template constexpr size_t parse_text(basic_string_view str, size_t pos) { for (size_t size = str.size(); pos != size; ++pos) { if (str[pos] == '{' || str[pos] == '}') break; } return pos; } template constexpr auto compile_format_string(S format_str); template constexpr auto parse_tail(T head, S format_str) { if constexpr (POS != basic_string_view(format_str).size()) { constexpr auto tail = compile_format_string(format_str); if constexpr (std::is_same, unknown_format>()) return tail; else return make_concat(head, tail); } else { return head; } } template struct parse_specs_result { formatter fmt; size_t end; int next_arg_id; }; constexpr int manual_indexing_id = -1; template constexpr parse_specs_result parse_specs(basic_string_view str, size_t pos, int next_arg_id) { str.remove_prefix(pos); auto ctx = compile_parse_context(str, max_value(), nullptr, {}, next_arg_id); auto f = formatter(); auto end = f.parse(ctx); return {f, pos + fmt::detail::to_unsigned(end - str.data()), next_arg_id == 0 ? manual_indexing_id : ctx.next_arg_id()}; } template struct arg_id_handler { arg_ref arg_id; constexpr int operator()() { FMT_ASSERT(false, "handler cannot be used with automatic indexing"); return 0; } constexpr int operator()(int id) { arg_id = arg_ref(id); return 0; } constexpr int operator()(basic_string_view id) { arg_id = arg_ref(id); return 0; } constexpr void on_error(const char* message) { FMT_THROW(format_error(message)); } }; template struct parse_arg_id_result { arg_ref arg_id; const Char* arg_id_end; }; template constexpr auto parse_arg_id(const Char* begin, const Char* end) { auto handler = arg_id_handler{arg_ref{}}; auto arg_id_end = parse_arg_id(begin, end, handler); return parse_arg_id_result{handler.arg_id, arg_id_end}; } template struct field_type { using type = remove_cvref_t; }; template struct field_type::value>> { using type = remove_cvref_t; }; template constexpr auto parse_replacement_field_then_tail(S format_str) { using char_type = typename S::char_type; constexpr auto str = basic_string_view(format_str); constexpr char_type c = END_POS != str.size() ? str[END_POS] : char_type(); if constexpr (c == '}') { return parse_tail( field::type, ARG_INDEX>(), format_str); } else if constexpr (c != ':') { FMT_THROW(format_error("expected ':'")); } else { constexpr auto result = parse_specs::type>( str, END_POS + 1, NEXT_ID == manual_indexing_id ? 0 : NEXT_ID); if constexpr (result.end >= str.size() || str[result.end] != '}') { FMT_THROW(format_error("expected '}'")); return 0; } else { return parse_tail( spec_field::type, ARG_INDEX>{ result.fmt}, format_str); } } } // Compiles a non-empty format string and returns the compiled representation // or unknown_format() on unrecognized input. template constexpr auto compile_format_string(S format_str) { using char_type = typename S::char_type; constexpr auto str = basic_string_view(format_str); if constexpr (str[POS] == '{') { if constexpr (POS + 1 == str.size()) FMT_THROW(format_error("unmatched '{' in format string")); if constexpr (str[POS + 1] == '{') { return parse_tail(make_text(str, POS, 1), format_str); } else if constexpr (str[POS + 1] == '}' || str[POS + 1] == ':') { static_assert(ID != manual_indexing_id, "cannot switch from manual to automatic argument indexing"); constexpr auto next_id = ID != manual_indexing_id ? ID + 1 : manual_indexing_id; return parse_replacement_field_then_tail, Args, POS + 1, ID, next_id>( format_str); } else { constexpr auto arg_id_result = parse_arg_id(str.data() + POS + 1, str.data() + str.size()); constexpr auto arg_id_end_pos = arg_id_result.arg_id_end - str.data(); constexpr char_type c = arg_id_end_pos != str.size() ? str[arg_id_end_pos] : char_type(); static_assert(c == '}' || c == ':', "missing '}' in format string"); if constexpr (arg_id_result.arg_id.kind == arg_id_kind::index) { static_assert( ID == manual_indexing_id || ID == 0, "cannot switch from automatic to manual argument indexing"); constexpr auto arg_index = arg_id_result.arg_id.val.index; return parse_replacement_field_then_tail, Args, arg_id_end_pos, arg_index, manual_indexing_id>( format_str); } else if constexpr (arg_id_result.arg_id.kind == arg_id_kind::name) { constexpr auto arg_index = get_arg_index_by_name(arg_id_result.arg_id.val.name, Args{}); if constexpr (arg_index != invalid_arg_index) { constexpr auto next_id = ID != manual_indexing_id ? ID + 1 : manual_indexing_id; return parse_replacement_field_then_tail< decltype(get_type::value), Args, arg_id_end_pos, arg_index, next_id>(format_str); } else { if constexpr (c == '}') { return parse_tail( runtime_named_field{arg_id_result.arg_id.val.name}, format_str); } else if constexpr (c == ':') { return unknown_format(); // no type info for specs parsing } } } } } else if constexpr (str[POS] == '}') { if constexpr (POS + 1 == str.size()) FMT_THROW(format_error("unmatched '}' in format string")); return parse_tail(make_text(str, POS, 1), format_str); } else { constexpr auto end = parse_text(str, POS + 1); if constexpr (end - POS > 1) { return parse_tail(make_text(str, POS, end - POS), format_str); } else { return parse_tail(code_unit{str[POS]}, format_str); } } } template ::value)> constexpr auto compile(S format_str) { constexpr auto str = basic_string_view(format_str); if constexpr (str.size() == 0) { return detail::make_text(str, 0, 0); } else { constexpr auto result = detail::compile_format_string, 0, 0>( format_str); return result; } } #endif // defined(__cpp_if_constexpr) && defined(__cpp_return_type_deduction) } // namespace detail FMT_MODULE_EXPORT_BEGIN #if defined(__cpp_if_constexpr) && defined(__cpp_return_type_deduction) template ::value)> FMT_INLINE std::basic_string format(const CompiledFormat& cf, const Args&... args) { auto s = std::basic_string(); cf.format(std::back_inserter(s), args...); return s; } template ::value)> constexpr FMT_INLINE OutputIt format_to(OutputIt out, const CompiledFormat& cf, const Args&... args) { return cf.format(out, args...); } template ::value)> FMT_INLINE std::basic_string format(const S&, Args&&... args) { if constexpr (std::is_same::value) { constexpr auto str = basic_string_view(S()); if constexpr (str.size() == 2 && str[0] == '{' && str[1] == '}') { const auto& first = detail::first(args...); if constexpr (detail::is_named_arg< remove_cvref_t>::value) { return fmt::to_string(first.value); } else { return fmt::to_string(first); } } } constexpr auto compiled = detail::compile(S()); if constexpr (std::is_same, detail::unknown_format>()) { return fmt::format( static_cast>(S()), std::forward(args)...); } else { return fmt::format(compiled, std::forward(args)...); } } template ::value)> FMT_CONSTEXPR OutputIt format_to(OutputIt out, const S&, Args&&... args) { constexpr auto compiled = detail::compile(S()); if constexpr (std::is_same, detail::unknown_format>()) { return fmt::format_to( out, static_cast>(S()), std::forward(args)...); } else { return fmt::format_to(out, compiled, std::forward(args)...); } } #endif template ::value)> format_to_n_result format_to_n(OutputIt out, size_t n, const S& format_str, Args&&... args) { auto it = fmt::format_to(detail::truncating_iterator(out, n), format_str, std::forward(args)...); return {it.base(), it.count()}; } template ::value)> FMT_CONSTEXPR20 size_t formatted_size(const S& format_str, const Args&... args) { return fmt::format_to(detail::counting_iterator(), format_str, args...) .count(); } template ::value)> void print(std::FILE* f, const S& format_str, const Args&... args) { memory_buffer buffer; fmt::format_to(std::back_inserter(buffer), format_str, args...); detail::print(f, {buffer.data(), buffer.size()}); } template ::value)> void print(const S& format_str, const Args&... args) { print(stdout, format_str, args...); } #if FMT_USE_NONTYPE_TEMPLATE_ARGS inline namespace literals { template constexpr auto operator""_cf() { using char_t = remove_cvref_t; return detail::udl_compiled_string(); } } // namespace literals #endif FMT_MODULE_EXPORT_END FMT_END_NAMESPACE #endif // FMT_COMPILE_H_ mergerfs-2.40.2/libfuse/lib/fmt/core.h000066400000000000000000003311571457016576200175260ustar00rootroot00000000000000// Formatting library for C++ - the core API for char/UTF-8 // // Copyright (c) 2012 - present, Victor Zverovich // All rights reserved. // // For the license information refer to format.h. #ifndef FMT_CORE_H_ #define FMT_CORE_H_ #include // std::byte #include // std::FILE #include // std::strlen #include #include #include #include // The fmt library version in the form major * 10000 + minor * 100 + patch. #define FMT_VERSION 90100 #if defined(__clang__) && !defined(__ibmxl__) # define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__) #else # define FMT_CLANG_VERSION 0 #endif #if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && \ !defined(__NVCOMPILER) # define FMT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) #else # define FMT_GCC_VERSION 0 #endif #ifndef FMT_GCC_PRAGMA // Workaround _Pragma bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59884. # if FMT_GCC_VERSION >= 504 # define FMT_GCC_PRAGMA(arg) _Pragma(arg) # else # define FMT_GCC_PRAGMA(arg) # endif #endif #ifdef __ICL # define FMT_ICC_VERSION __ICL #elif defined(__INTEL_COMPILER) # define FMT_ICC_VERSION __INTEL_COMPILER #else # define FMT_ICC_VERSION 0 #endif #ifdef _MSC_VER # define FMT_MSC_VERSION _MSC_VER # define FMT_MSC_WARNING(...) __pragma(warning(__VA_ARGS__)) #else # define FMT_MSC_VERSION 0 # define FMT_MSC_WARNING(...) #endif #ifdef _MSVC_LANG # define FMT_CPLUSPLUS _MSVC_LANG #else # define FMT_CPLUSPLUS __cplusplus #endif #ifdef __has_feature # define FMT_HAS_FEATURE(x) __has_feature(x) #else # define FMT_HAS_FEATURE(x) 0 #endif #if (defined(__has_include) || FMT_ICC_VERSION >= 1600 || \ FMT_MSC_VERSION > 1900) && \ !defined(__INTELLISENSE__) # define FMT_HAS_INCLUDE(x) __has_include(x) #else # define FMT_HAS_INCLUDE(x) 0 #endif #ifdef __has_cpp_attribute # define FMT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) #else # define FMT_HAS_CPP_ATTRIBUTE(x) 0 #endif #define FMT_HAS_CPP14_ATTRIBUTE(attribute) \ (FMT_CPLUSPLUS >= 201402L && FMT_HAS_CPP_ATTRIBUTE(attribute)) #define FMT_HAS_CPP17_ATTRIBUTE(attribute) \ (FMT_CPLUSPLUS >= 201703L && FMT_HAS_CPP_ATTRIBUTE(attribute)) // Check if relaxed C++14 constexpr is supported. // GCC doesn't allow throw in constexpr until version 6 (bug 67371). #ifndef FMT_USE_CONSTEXPR # if (FMT_HAS_FEATURE(cxx_relaxed_constexpr) || FMT_MSC_VERSION >= 1912 || \ (FMT_GCC_VERSION >= 600 && FMT_CPLUSPLUS >= 201402L)) && \ !FMT_ICC_VERSION && !defined(__NVCC__) # define FMT_USE_CONSTEXPR 1 # else # define FMT_USE_CONSTEXPR 0 # endif #endif #if FMT_USE_CONSTEXPR # define FMT_CONSTEXPR constexpr #else # define FMT_CONSTEXPR #endif #if ((FMT_CPLUSPLUS >= 202002L) && \ (!defined(_GLIBCXX_RELEASE) || _GLIBCXX_RELEASE > 9)) || \ (FMT_CPLUSPLUS >= 201709L && FMT_GCC_VERSION >= 1002) # define FMT_CONSTEXPR20 constexpr #else # define FMT_CONSTEXPR20 #endif // Check if constexpr std::char_traits<>::{compare,length} are supported. #if defined(__GLIBCXX__) # if FMT_CPLUSPLUS >= 201703L && defined(_GLIBCXX_RELEASE) && \ _GLIBCXX_RELEASE >= 7 // GCC 7+ libstdc++ has _GLIBCXX_RELEASE. # define FMT_CONSTEXPR_CHAR_TRAITS constexpr # endif #elif defined(_LIBCPP_VERSION) && FMT_CPLUSPLUS >= 201703L && \ _LIBCPP_VERSION >= 4000 # define FMT_CONSTEXPR_CHAR_TRAITS constexpr #elif FMT_MSC_VERSION >= 1914 && FMT_CPLUSPLUS >= 201703L # define FMT_CONSTEXPR_CHAR_TRAITS constexpr #endif #ifndef FMT_CONSTEXPR_CHAR_TRAITS # define FMT_CONSTEXPR_CHAR_TRAITS #endif // Check if exceptions are disabled. #ifndef FMT_EXCEPTIONS # if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || \ (FMT_MSC_VERSION && !_HAS_EXCEPTIONS) # define FMT_EXCEPTIONS 0 # else # define FMT_EXCEPTIONS 1 # endif #endif #ifndef FMT_DEPRECATED # if FMT_HAS_CPP14_ATTRIBUTE(deprecated) || FMT_MSC_VERSION >= 1900 # define FMT_DEPRECATED [[deprecated]] # else # if (defined(__GNUC__) && !defined(__LCC__)) || defined(__clang__) # define FMT_DEPRECATED __attribute__((deprecated)) # elif FMT_MSC_VERSION # define FMT_DEPRECATED __declspec(deprecated) # else # define FMT_DEPRECATED /* deprecated */ # endif # endif #endif // [[noreturn]] is disabled on MSVC and NVCC because of bogus unreachable code // warnings. #if FMT_EXCEPTIONS && FMT_HAS_CPP_ATTRIBUTE(noreturn) && !FMT_MSC_VERSION && \ !defined(__NVCC__) # define FMT_NORETURN [[noreturn]] #else # define FMT_NORETURN #endif #if FMT_HAS_CPP17_ATTRIBUTE(fallthrough) # define FMT_FALLTHROUGH [[fallthrough]] #elif defined(__clang__) # define FMT_FALLTHROUGH [[clang::fallthrough]] #elif FMT_GCC_VERSION >= 700 && \ (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 520) # define FMT_FALLTHROUGH [[gnu::fallthrough]] #else # define FMT_FALLTHROUGH #endif #ifndef FMT_NODISCARD # if FMT_HAS_CPP17_ATTRIBUTE(nodiscard) # define FMT_NODISCARD [[nodiscard]] # else # define FMT_NODISCARD # endif #endif #ifndef FMT_USE_FLOAT # define FMT_USE_FLOAT 1 #endif #ifndef FMT_USE_DOUBLE # define FMT_USE_DOUBLE 1 #endif #ifndef FMT_USE_LONG_DOUBLE # define FMT_USE_LONG_DOUBLE 1 #endif #ifndef FMT_INLINE # if FMT_GCC_VERSION || FMT_CLANG_VERSION # define FMT_INLINE inline __attribute__((always_inline)) # else # define FMT_INLINE inline # endif #endif // An inline std::forward replacement. #define FMT_FORWARD(...) static_cast(__VA_ARGS__) #ifdef _MSC_VER # define FMT_UNCHECKED_ITERATOR(It) \ using _Unchecked_type = It // Mark iterator as checked. #else # define FMT_UNCHECKED_ITERATOR(It) using unchecked_type = It #endif #ifndef FMT_BEGIN_NAMESPACE # define FMT_BEGIN_NAMESPACE \ namespace fmt { \ inline namespace v9 { # define FMT_END_NAMESPACE \ } \ } #endif #ifndef FMT_MODULE_EXPORT # define FMT_MODULE_EXPORT # define FMT_MODULE_EXPORT_BEGIN # define FMT_MODULE_EXPORT_END # define FMT_BEGIN_DETAIL_NAMESPACE namespace detail { # define FMT_END_DETAIL_NAMESPACE } #endif #if !defined(FMT_HEADER_ONLY) && defined(_WIN32) # define FMT_CLASS_API FMT_MSC_WARNING(suppress : 4275) # ifdef FMT_EXPORT # define FMT_API __declspec(dllexport) # elif defined(FMT_SHARED) # define FMT_API __declspec(dllimport) # endif #else # define FMT_CLASS_API # if defined(FMT_EXPORT) || defined(FMT_SHARED) # if defined(__GNUC__) || defined(__clang__) # define FMT_API __attribute__((visibility("default"))) # endif # endif #endif #ifndef FMT_API # define FMT_API #endif // libc++ supports string_view in pre-c++17. #if FMT_HAS_INCLUDE() && \ (FMT_CPLUSPLUS >= 201703L || defined(_LIBCPP_VERSION)) # include # define FMT_USE_STRING_VIEW #elif FMT_HAS_INCLUDE("experimental/string_view") && FMT_CPLUSPLUS >= 201402L # include # define FMT_USE_EXPERIMENTAL_STRING_VIEW #endif #ifndef FMT_UNICODE # define FMT_UNICODE !FMT_MSC_VERSION #endif #ifndef FMT_CONSTEVAL # if ((FMT_GCC_VERSION >= 1000 || FMT_CLANG_VERSION >= 1101) && \ FMT_CPLUSPLUS >= 202002L && !defined(__apple_build_version__)) || \ (defined(__cpp_consteval) && \ (!FMT_MSC_VERSION || _MSC_FULL_VER >= 193030704)) // consteval is broken in MSVC before VS2022 and Apple clang 13. # define FMT_CONSTEVAL consteval # define FMT_HAS_CONSTEVAL # else # define FMT_CONSTEVAL # endif #endif #ifndef FMT_USE_NONTYPE_TEMPLATE_ARGS # if defined(__cpp_nontype_template_args) && \ ((FMT_GCC_VERSION >= 903 && FMT_CPLUSPLUS >= 201709L) || \ __cpp_nontype_template_args >= 201911L) && \ !defined(__NVCOMPILER) # define FMT_USE_NONTYPE_TEMPLATE_ARGS 1 # else # define FMT_USE_NONTYPE_TEMPLATE_ARGS 0 # endif #endif // Enable minimal optimizations for more compact code in debug mode. FMT_GCC_PRAGMA("GCC push_options") #if !defined(__OPTIMIZE__) && !defined(__NVCOMPILER) FMT_GCC_PRAGMA("GCC optimize(\"Og\")") #endif FMT_BEGIN_NAMESPACE FMT_MODULE_EXPORT_BEGIN // Implementations of enable_if_t and other metafunctions for older systems. template using enable_if_t = typename std::enable_if::type; template using conditional_t = typename std::conditional::type; template using bool_constant = std::integral_constant; template using remove_reference_t = typename std::remove_reference::type; template using remove_const_t = typename std::remove_const::type; template using remove_cvref_t = typename std::remove_cv>::type; template struct type_identity { using type = T; }; template using type_identity_t = typename type_identity::type; template using underlying_t = typename std::underlying_type::type; template struct disjunction : std::false_type {}; template struct disjunction

: P {}; template struct disjunction : conditional_t> {}; template struct conjunction : std::true_type {}; template struct conjunction

: P {}; template struct conjunction : conditional_t, P1> {}; struct monostate { constexpr monostate() {} }; // An enable_if helper to be used in template parameters which results in much // shorter symbols: https://godbolt.org/z/sWw4vP. Extra parentheses are needed // to workaround a bug in MSVC 2019 (see #1140 and #1186). #ifdef FMT_DOC # define FMT_ENABLE_IF(...) #else # define FMT_ENABLE_IF(...) enable_if_t<(__VA_ARGS__), int> = 0 #endif FMT_BEGIN_DETAIL_NAMESPACE // Suppresses "unused variable" warnings with the method described in // https://herbsutter.com/2009/10/18/mailbag-shutting-up-compiler-warnings/. // (void)var does not work on many Intel compilers. template FMT_CONSTEXPR void ignore_unused(const T&...) {} constexpr FMT_INLINE auto is_constant_evaluated( bool default_value = false) noexcept -> bool { #ifdef __cpp_lib_is_constant_evaluated ignore_unused(default_value); return std::is_constant_evaluated(); #else return default_value; #endif } // Suppresses "conditional expression is constant" warnings. template constexpr FMT_INLINE auto const_check(T value) -> T { return value; } FMT_NORETURN FMT_API void assert_fail(const char* file, int line, const char* message); #ifndef FMT_ASSERT # ifdef NDEBUG // FMT_ASSERT is not empty to avoid -Wempty-body. # define FMT_ASSERT(condition, message) \ ::fmt::detail::ignore_unused((condition), (message)) # else # define FMT_ASSERT(condition, message) \ ((condition) /* void() fails with -Winvalid-constexpr on clang 4.0.1 */ \ ? (void)0 \ : ::fmt::detail::assert_fail(__FILE__, __LINE__, (message))) # endif #endif #if defined(FMT_USE_STRING_VIEW) template using std_string_view = std::basic_string_view; #elif defined(FMT_USE_EXPERIMENTAL_STRING_VIEW) template using std_string_view = std::experimental::basic_string_view; #else template struct std_string_view {}; #endif #ifdef FMT_USE_INT128 // Do nothing. #elif defined(__SIZEOF_INT128__) && !defined(__NVCC__) && \ !(FMT_CLANG_VERSION && FMT_MSC_VERSION) # define FMT_USE_INT128 1 using int128_opt = __int128_t; // An optional native 128-bit integer. using uint128_opt = __uint128_t; template inline auto convert_for_visit(T value) -> T { return value; } #else # define FMT_USE_INT128 0 #endif #if !FMT_USE_INT128 enum class int128_opt {}; enum class uint128_opt {}; // Reduce template instantiations. template auto convert_for_visit(T) -> monostate { return {}; } #endif // Casts a nonnegative integer to unsigned. template FMT_CONSTEXPR auto to_unsigned(Int value) -> typename std::make_unsigned::type { FMT_ASSERT(std::is_unsigned::value || value >= 0, "negative value"); return static_cast::type>(value); } FMT_MSC_WARNING(suppress : 4566) constexpr unsigned char micro[] = "\u00B5"; constexpr auto is_utf8() -> bool { // Avoid buggy sign extensions in MSVC's constant evaluation mode (#2297). using uchar = unsigned char; return FMT_UNICODE || (sizeof(micro) == 3 && uchar(micro[0]) == 0xC2 && uchar(micro[1]) == 0xB5); } FMT_END_DETAIL_NAMESPACE /** An implementation of ``std::basic_string_view`` for pre-C++17. It provides a subset of the API. ``fmt::basic_string_view`` is used for format strings even if ``std::string_view`` is available to prevent issues when a library is compiled with a different ``-std`` option than the client code (which is not recommended). */ template class basic_string_view { private: const Char* data_; size_t size_; public: using value_type = Char; using iterator = const Char*; constexpr basic_string_view() noexcept : data_(nullptr), size_(0) {} /** Constructs a string reference object from a C string and a size. */ constexpr basic_string_view(const Char* s, size_t count) noexcept : data_(s), size_(count) {} /** \rst Constructs a string reference object from a C string computing the size with ``std::char_traits::length``. \endrst */ FMT_CONSTEXPR_CHAR_TRAITS FMT_INLINE basic_string_view(const Char* s) : data_(s), size_(detail::const_check(std::is_same::value && !detail::is_constant_evaluated(true)) ? std::strlen(reinterpret_cast(s)) : std::char_traits::length(s)) {} /** Constructs a string reference from a ``std::basic_string`` object. */ template FMT_CONSTEXPR basic_string_view( const std::basic_string& s) noexcept : data_(s.data()), size_(s.size()) {} template >::value)> FMT_CONSTEXPR basic_string_view(S s) noexcept : data_(s.data()), size_(s.size()) {} /** Returns a pointer to the string data. */ constexpr auto data() const noexcept -> const Char* { return data_; } /** Returns the string size. */ constexpr auto size() const noexcept -> size_t { return size_; } constexpr auto begin() const noexcept -> iterator { return data_; } constexpr auto end() const noexcept -> iterator { return data_ + size_; } constexpr auto operator[](size_t pos) const noexcept -> const Char& { return data_[pos]; } FMT_CONSTEXPR void remove_prefix(size_t n) noexcept { data_ += n; size_ -= n; } // Lexicographically compare this string reference to other. FMT_CONSTEXPR_CHAR_TRAITS auto compare(basic_string_view other) const -> int { size_t str_size = size_ < other.size_ ? size_ : other.size_; int result = std::char_traits::compare(data_, other.data_, str_size); if (result == 0) result = size_ == other.size_ ? 0 : (size_ < other.size_ ? -1 : 1); return result; } FMT_CONSTEXPR_CHAR_TRAITS friend auto operator==(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) == 0; } friend auto operator!=(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) != 0; } friend auto operator<(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) < 0; } friend auto operator<=(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) <= 0; } friend auto operator>(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) > 0; } friend auto operator>=(basic_string_view lhs, basic_string_view rhs) -> bool { return lhs.compare(rhs) >= 0; } }; using string_view = basic_string_view; /** Specifies if ``T`` is a character type. Can be specialized by users. */ template struct is_char : std::false_type {}; template <> struct is_char : std::true_type {}; FMT_BEGIN_DETAIL_NAMESPACE // A base class for compile-time strings. struct compile_string {}; template struct is_compile_string : std::is_base_of {}; // Returns a string view of `s`. template ::value)> FMT_INLINE auto to_string_view(const Char* s) -> basic_string_view { return s; } template inline auto to_string_view(const std::basic_string& s) -> basic_string_view { return s; } template constexpr auto to_string_view(basic_string_view s) -> basic_string_view { return s; } template >::value)> inline auto to_string_view(std_string_view s) -> basic_string_view { return s; } template ::value)> constexpr auto to_string_view(const S& s) -> basic_string_view { return basic_string_view(s); } void to_string_view(...); // Specifies whether S is a string type convertible to fmt::basic_string_view. // It should be a constexpr function but MSVC 2017 fails to compile it in // enable_if and MSVC 2015 fails to compile it as an alias template. // ADL invocation of to_string_view is DEPRECATED! template struct is_string : std::is_class()))> { }; template struct char_t_impl {}; template struct char_t_impl::value>> { using result = decltype(to_string_view(std::declval())); using type = typename result::value_type; }; enum class type { none_type, // Integer types should go first, int_type, uint_type, long_long_type, ulong_long_type, int128_type, uint128_type, bool_type, char_type, last_integer_type = char_type, // followed by floating-point types. float_type, double_type, long_double_type, last_numeric_type = long_double_type, cstring_type, string_type, pointer_type, custom_type }; // Maps core type T to the corresponding type enum constant. template struct type_constant : std::integral_constant {}; #define FMT_TYPE_CONSTANT(Type, constant) \ template \ struct type_constant \ : std::integral_constant {} FMT_TYPE_CONSTANT(int, int_type); FMT_TYPE_CONSTANT(unsigned, uint_type); FMT_TYPE_CONSTANT(long long, long_long_type); FMT_TYPE_CONSTANT(unsigned long long, ulong_long_type); FMT_TYPE_CONSTANT(int128_opt, int128_type); FMT_TYPE_CONSTANT(uint128_opt, uint128_type); FMT_TYPE_CONSTANT(bool, bool_type); FMT_TYPE_CONSTANT(Char, char_type); FMT_TYPE_CONSTANT(float, float_type); FMT_TYPE_CONSTANT(double, double_type); FMT_TYPE_CONSTANT(long double, long_double_type); FMT_TYPE_CONSTANT(const Char*, cstring_type); FMT_TYPE_CONSTANT(basic_string_view, string_type); FMT_TYPE_CONSTANT(const void*, pointer_type); constexpr bool is_integral_type(type t) { return t > type::none_type && t <= type::last_integer_type; } constexpr bool is_arithmetic_type(type t) { return t > type::none_type && t <= type::last_numeric_type; } FMT_NORETURN FMT_API void throw_format_error(const char* message); struct error_handler { constexpr error_handler() = default; constexpr error_handler(const error_handler&) = default; // This function is intentionally not constexpr to give a compile-time error. FMT_NORETURN void on_error(const char* message) { throw_format_error(message); } }; FMT_END_DETAIL_NAMESPACE /** String's character type. */ template using char_t = typename detail::char_t_impl::type; /** \rst Parsing context consisting of a format string range being parsed and an argument counter for automatic indexing. You can use the ``format_parse_context`` type alias for ``char`` instead. \endrst */ template class basic_format_parse_context : private ErrorHandler { private: basic_string_view format_str_; int next_arg_id_; FMT_CONSTEXPR void do_check_arg_id(int id); public: using char_type = Char; using iterator = typename basic_string_view::iterator; explicit constexpr basic_format_parse_context( basic_string_view format_str, ErrorHandler eh = {}, int next_arg_id = 0) : ErrorHandler(eh), format_str_(format_str), next_arg_id_(next_arg_id) {} /** Returns an iterator to the beginning of the format string range being parsed. */ constexpr auto begin() const noexcept -> iterator { return format_str_.begin(); } /** Returns an iterator past the end of the format string range being parsed. */ constexpr auto end() const noexcept -> iterator { return format_str_.end(); } /** Advances the begin iterator to ``it``. */ FMT_CONSTEXPR void advance_to(iterator it) { format_str_.remove_prefix(detail::to_unsigned(it - begin())); } /** Reports an error if using the manual argument indexing; otherwise returns the next argument index and switches to the automatic indexing. */ FMT_CONSTEXPR auto next_arg_id() -> int { if (next_arg_id_ < 0) { on_error("cannot switch from manual to automatic argument indexing"); return 0; } int id = next_arg_id_++; do_check_arg_id(id); return id; } /** Reports an error if using the automatic argument indexing; otherwise switches to the manual indexing. */ FMT_CONSTEXPR void check_arg_id(int id) { if (next_arg_id_ > 0) { on_error("cannot switch from automatic to manual argument indexing"); return; } next_arg_id_ = -1; do_check_arg_id(id); } FMT_CONSTEXPR void check_arg_id(basic_string_view) {} FMT_CONSTEXPR void check_dynamic_spec(int arg_id); FMT_CONSTEXPR void on_error(const char* message) { ErrorHandler::on_error(message); } constexpr auto error_handler() const -> ErrorHandler { return *this; } }; using format_parse_context = basic_format_parse_context; FMT_BEGIN_DETAIL_NAMESPACE // A parse context with extra data used only in compile-time checks. template class compile_parse_context : public basic_format_parse_context { private: int num_args_; const type* types_; using base = basic_format_parse_context; public: explicit FMT_CONSTEXPR compile_parse_context( basic_string_view format_str, int num_args, const type* types, ErrorHandler eh = {}, int next_arg_id = 0) : base(format_str, eh, next_arg_id), num_args_(num_args), types_(types) {} constexpr auto num_args() const -> int { return num_args_; } constexpr auto arg_type(int id) const -> type { return types_[id]; } FMT_CONSTEXPR auto next_arg_id() -> int { int id = base::next_arg_id(); if (id >= num_args_) this->on_error("argument not found"); return id; } FMT_CONSTEXPR void check_arg_id(int id) { base::check_arg_id(id); if (id >= num_args_) this->on_error("argument not found"); } using base::check_arg_id; FMT_CONSTEXPR void check_dynamic_spec(int arg_id) { if (arg_id < num_args_ && types_ && !is_integral_type(types_[arg_id])) this->on_error("width/precision is not integer"); } }; FMT_END_DETAIL_NAMESPACE template FMT_CONSTEXPR void basic_format_parse_context::do_check_arg_id(int id) { // Argument id is only checked at compile-time during parsing because // formatting has its own validation. if (detail::is_constant_evaluated() && FMT_GCC_VERSION >= 1200) { using context = detail::compile_parse_context; if (id >= static_cast(this)->num_args()) on_error("argument not found"); } } template FMT_CONSTEXPR void basic_format_parse_context::check_dynamic_spec(int arg_id) { if (detail::is_constant_evaluated()) { using context = detail::compile_parse_context; static_cast(this)->check_dynamic_spec(arg_id); } } template class basic_format_arg; template class basic_format_args; template class dynamic_format_arg_store; // A formatter for objects of type T. template struct formatter { // A deleted default constructor indicates a disabled formatter. formatter() = delete; }; // Specifies if T has an enabled formatter specialization. A type can be // formattable even if it doesn't have a formatter e.g. via a conversion. template using has_formatter = std::is_constructible>; // Checks whether T is a container with contiguous storage. template struct is_contiguous : std::false_type {}; template struct is_contiguous> : std::true_type {}; class appender; FMT_BEGIN_DETAIL_NAMESPACE template constexpr auto has_const_formatter_impl(T*) -> decltype(typename Context::template formatter_type().format( std::declval(), std::declval()), true) { return true; } template constexpr auto has_const_formatter_impl(...) -> bool { return false; } template constexpr auto has_const_formatter() -> bool { return has_const_formatter_impl(static_cast(nullptr)); } // Extracts a reference to the container from back_insert_iterator. template inline auto get_container(std::back_insert_iterator it) -> Container& { using base = std::back_insert_iterator; struct accessor : base { accessor(base b) : base(b) {} using base::container; }; return *accessor(it).container; } template FMT_CONSTEXPR auto copy_str(InputIt begin, InputIt end, OutputIt out) -> OutputIt { while (begin != end) *out++ = static_cast(*begin++); return out; } template , U>::value&& is_char::value)> FMT_CONSTEXPR auto copy_str(T* begin, T* end, U* out) -> U* { if (is_constant_evaluated()) return copy_str(begin, end, out); auto size = to_unsigned(end - begin); memcpy(out, begin, size * sizeof(U)); return out + size; } /** \rst A contiguous memory buffer with an optional growing ability. It is an internal class and shouldn't be used directly, only via `~fmt::basic_memory_buffer`. \endrst */ template class buffer { private: T* ptr_; size_t size_; size_t capacity_; protected: // Don't initialize ptr_ since it is not accessed to save a few cycles. FMT_MSC_WARNING(suppress : 26495) buffer(size_t sz) noexcept : size_(sz), capacity_(sz) {} FMT_CONSTEXPR20 buffer(T* p = nullptr, size_t sz = 0, size_t cap = 0) noexcept : ptr_(p), size_(sz), capacity_(cap) {} FMT_CONSTEXPR20 ~buffer() = default; buffer(buffer&&) = default; /** Sets the buffer data and capacity. */ FMT_CONSTEXPR void set(T* buf_data, size_t buf_capacity) noexcept { ptr_ = buf_data; capacity_ = buf_capacity; } /** Increases the buffer capacity to hold at least *capacity* elements. */ virtual FMT_CONSTEXPR20 void grow(size_t capacity) = 0; public: using value_type = T; using const_reference = const T&; buffer(const buffer&) = delete; void operator=(const buffer&) = delete; auto begin() noexcept -> T* { return ptr_; } auto end() noexcept -> T* { return ptr_ + size_; } auto begin() const noexcept -> const T* { return ptr_; } auto end() const noexcept -> const T* { return ptr_ + size_; } /** Returns the size of this buffer. */ constexpr auto size() const noexcept -> size_t { return size_; } /** Returns the capacity of this buffer. */ constexpr auto capacity() const noexcept -> size_t { return capacity_; } /** Returns a pointer to the buffer data. */ FMT_CONSTEXPR auto data() noexcept -> T* { return ptr_; } /** Returns a pointer to the buffer data. */ FMT_CONSTEXPR auto data() const noexcept -> const T* { return ptr_; } /** Clears this buffer. */ void clear() { size_ = 0; } // Tries resizing the buffer to contain *count* elements. If T is a POD type // the new elements may not be initialized. FMT_CONSTEXPR20 void try_resize(size_t count) { try_reserve(count); size_ = count <= capacity_ ? count : capacity_; } // Tries increasing the buffer capacity to *new_capacity*. It can increase the // capacity by a smaller amount than requested but guarantees there is space // for at least one additional element either by increasing the capacity or by // flushing the buffer if it is full. FMT_CONSTEXPR20 void try_reserve(size_t new_capacity) { if (new_capacity > capacity_) grow(new_capacity); } FMT_CONSTEXPR20 void push_back(const T& value) { try_reserve(size_ + 1); ptr_[size_++] = value; } /** Appends data to the end of the buffer. */ template void append(const U* begin, const U* end); template FMT_CONSTEXPR auto operator[](Idx index) -> T& { return ptr_[index]; } template FMT_CONSTEXPR auto operator[](Idx index) const -> const T& { return ptr_[index]; } }; struct buffer_traits { explicit buffer_traits(size_t) {} auto count() const -> size_t { return 0; } auto limit(size_t size) -> size_t { return size; } }; class fixed_buffer_traits { private: size_t count_ = 0; size_t limit_; public: explicit fixed_buffer_traits(size_t limit) : limit_(limit) {} auto count() const -> size_t { return count_; } auto limit(size_t size) -> size_t { size_t n = limit_ > count_ ? limit_ - count_ : 0; count_ += size; return size < n ? size : n; } }; // A buffer that writes to an output iterator when flushed. template class iterator_buffer final : public Traits, public buffer { private: OutputIt out_; enum { buffer_size = 256 }; T data_[buffer_size]; protected: FMT_CONSTEXPR20 void grow(size_t) override { if (this->size() == buffer_size) flush(); } void flush() { auto size = this->size(); this->clear(); out_ = copy_str(data_, data_ + this->limit(size), out_); } public: explicit iterator_buffer(OutputIt out, size_t n = buffer_size) : Traits(n), buffer(data_, 0, buffer_size), out_(out) {} iterator_buffer(iterator_buffer&& other) : Traits(other), buffer(data_, 0, buffer_size), out_(other.out_) {} ~iterator_buffer() { flush(); } auto out() -> OutputIt { flush(); return out_; } auto count() const -> size_t { return Traits::count() + this->size(); } }; template class iterator_buffer final : public fixed_buffer_traits, public buffer { private: T* out_; enum { buffer_size = 256 }; T data_[buffer_size]; protected: FMT_CONSTEXPR20 void grow(size_t) override { if (this->size() == this->capacity()) flush(); } void flush() { size_t n = this->limit(this->size()); if (this->data() == out_) { out_ += n; this->set(data_, buffer_size); } this->clear(); } public: explicit iterator_buffer(T* out, size_t n = buffer_size) : fixed_buffer_traits(n), buffer(out, 0, n), out_(out) {} iterator_buffer(iterator_buffer&& other) : fixed_buffer_traits(other), buffer(std::move(other)), out_(other.out_) { if (this->data() != out_) { this->set(data_, buffer_size); this->clear(); } } ~iterator_buffer() { flush(); } auto out() -> T* { flush(); return out_; } auto count() const -> size_t { return fixed_buffer_traits::count() + this->size(); } }; template class iterator_buffer final : public buffer { protected: FMT_CONSTEXPR20 void grow(size_t) override {} public: explicit iterator_buffer(T* out, size_t = 0) : buffer(out, 0, ~size_t()) {} auto out() -> T* { return &*this->end(); } }; // A buffer that writes to a container with the contiguous storage. template class iterator_buffer, enable_if_t::value, typename Container::value_type>> final : public buffer { private: Container& container_; protected: FMT_CONSTEXPR20 void grow(size_t capacity) override { container_.resize(capacity); this->set(&container_[0], capacity); } public: explicit iterator_buffer(Container& c) : buffer(c.size()), container_(c) {} explicit iterator_buffer(std::back_insert_iterator out, size_t = 0) : iterator_buffer(get_container(out)) {} auto out() -> std::back_insert_iterator { return std::back_inserter(container_); } }; // A buffer that counts the number of code units written discarding the output. template class counting_buffer final : public buffer { private: enum { buffer_size = 256 }; T data_[buffer_size]; size_t count_ = 0; protected: FMT_CONSTEXPR20 void grow(size_t) override { if (this->size() != buffer_size) return; count_ += this->size(); this->clear(); } public: counting_buffer() : buffer(data_, 0, buffer_size) {} auto count() -> size_t { return count_ + this->size(); } }; template using buffer_appender = conditional_t::value, appender, std::back_insert_iterator>>; // Maps an output iterator to a buffer. template auto get_buffer(OutputIt out) -> iterator_buffer { return iterator_buffer(out); } template auto get_iterator(Buffer& buf) -> decltype(buf.out()) { return buf.out(); } template auto get_iterator(buffer& buf) -> buffer_appender { return buffer_appender(buf); } template struct fallback_formatter { fallback_formatter() = delete; }; // Specifies if T has an enabled fallback_formatter specialization. template using has_fallback_formatter = #ifdef FMT_DEPRECATED_OSTREAM std::is_constructible>; #else std::false_type; #endif struct view {}; template struct named_arg : view { const Char* name; const T& value; named_arg(const Char* n, const T& v) : name(n), value(v) {} }; template struct named_arg_info { const Char* name; int id; }; template struct arg_data { // args_[0].named_args points to named_args_ to avoid bloating format_args. // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches warning. T args_[1 + (NUM_ARGS != 0 ? NUM_ARGS : +1)]; named_arg_info named_args_[NUM_NAMED_ARGS]; template arg_data(const U&... init) : args_{T(named_args_, NUM_NAMED_ARGS), init...} {} arg_data(const arg_data& other) = delete; auto args() const -> const T* { return args_ + 1; } auto named_args() -> named_arg_info* { return named_args_; } }; template struct arg_data { // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches warning. T args_[NUM_ARGS != 0 ? NUM_ARGS : +1]; template FMT_CONSTEXPR FMT_INLINE arg_data(const U&... init) : args_{init...} {} FMT_CONSTEXPR FMT_INLINE auto args() const -> const T* { return args_; } FMT_CONSTEXPR FMT_INLINE auto named_args() -> std::nullptr_t { return nullptr; } }; template inline void init_named_args(named_arg_info*, int, int) {} template struct is_named_arg : std::false_type {}; template struct is_statically_named_arg : std::false_type {}; template struct is_named_arg> : std::true_type {}; template ::value)> void init_named_args(named_arg_info* named_args, int arg_count, int named_arg_count, const T&, const Tail&... args) { init_named_args(named_args, arg_count + 1, named_arg_count, args...); } template ::value)> void init_named_args(named_arg_info* named_args, int arg_count, int named_arg_count, const T& arg, const Tail&... args) { named_args[named_arg_count++] = {arg.name, arg_count}; init_named_args(named_args, arg_count + 1, named_arg_count, args...); } template FMT_CONSTEXPR FMT_INLINE void init_named_args(std::nullptr_t, int, int, const Args&...) {} template constexpr auto count() -> size_t { return B ? 1 : 0; } template constexpr auto count() -> size_t { return (B1 ? 1 : 0) + count(); } template constexpr auto count_named_args() -> size_t { return count::value...>(); } template constexpr auto count_statically_named_args() -> size_t { return count::value...>(); } struct unformattable {}; struct unformattable_char : unformattable {}; struct unformattable_const : unformattable {}; struct unformattable_pointer : unformattable {}; template struct string_value { const Char* data; size_t size; }; template struct named_arg_value { const named_arg_info* data; size_t size; }; template struct custom_value { using parse_context = typename Context::parse_context_type; void* value; void (*format)(void* arg, parse_context& parse_ctx, Context& ctx); }; // A formatting argument value. template class value { public: using char_type = typename Context::char_type; union { monostate no_value; int int_value; unsigned uint_value; long long long_long_value; unsigned long long ulong_long_value; int128_opt int128_value; uint128_opt uint128_value; bool bool_value; char_type char_value; float float_value; double double_value; long double long_double_value; const void* pointer; string_value string; custom_value custom; named_arg_value named_args; }; constexpr FMT_INLINE value() : no_value() {} constexpr FMT_INLINE value(int val) : int_value(val) {} constexpr FMT_INLINE value(unsigned val) : uint_value(val) {} constexpr FMT_INLINE value(long long val) : long_long_value(val) {} constexpr FMT_INLINE value(unsigned long long val) : ulong_long_value(val) {} FMT_INLINE value(int128_opt val) : int128_value(val) {} FMT_INLINE value(uint128_opt val) : uint128_value(val) {} constexpr FMT_INLINE value(float val) : float_value(val) {} constexpr FMT_INLINE value(double val) : double_value(val) {} FMT_INLINE value(long double val) : long_double_value(val) {} constexpr FMT_INLINE value(bool val) : bool_value(val) {} constexpr FMT_INLINE value(char_type val) : char_value(val) {} FMT_CONSTEXPR FMT_INLINE value(const char_type* val) { string.data = val; if (is_constant_evaluated()) string.size = {}; } FMT_CONSTEXPR FMT_INLINE value(basic_string_view val) { string.data = val.data(); string.size = val.size(); } FMT_INLINE value(const void* val) : pointer(val) {} FMT_INLINE value(const named_arg_info* args, size_t size) : named_args{args, size} {} template FMT_CONSTEXPR FMT_INLINE value(T& val) { using value_type = remove_cvref_t; custom.value = const_cast(&val); // Get the formatter type through the context to allow different contexts // have different extension points, e.g. `formatter` for `format` and // `printf_formatter` for `printf`. custom.format = format_custom_arg< value_type, conditional_t::value, typename Context::template formatter_type, fallback_formatter>>; } value(unformattable); value(unformattable_char); value(unformattable_const); value(unformattable_pointer); private: // Formats an argument of a custom type, such as a user-defined class. template static void format_custom_arg(void* arg, typename Context::parse_context_type& parse_ctx, Context& ctx) { auto f = Formatter(); parse_ctx.advance_to(f.parse(parse_ctx)); using qualified_type = conditional_t(), const T, T>; ctx.advance_to(f.format(*static_cast(arg), ctx)); } }; template FMT_CONSTEXPR auto make_arg(T&& value) -> basic_format_arg; // To minimize the number of types we need to deal with, long is translated // either to int or to long long depending on its size. enum { long_short = sizeof(long) == sizeof(int) }; using long_type = conditional_t; using ulong_type = conditional_t; #ifdef __cpp_lib_byte inline auto format_as(std::byte b) -> unsigned char { return static_cast(b); } #endif template struct has_format_as { template ::value&& std::is_integral::value)> static auto check(U*) -> std::true_type; static auto check(...) -> std::false_type; enum { value = decltype(check(static_cast(nullptr)))::value }; }; // Maps formatting arguments to core types. // arg_mapper reports errors by returning unformattable instead of using // static_assert because it's used in the is_formattable trait. template struct arg_mapper { using char_type = typename Context::char_type; FMT_CONSTEXPR FMT_INLINE auto map(signed char val) -> int { return val; } FMT_CONSTEXPR FMT_INLINE auto map(unsigned char val) -> unsigned { return val; } FMT_CONSTEXPR FMT_INLINE auto map(short val) -> int { return val; } FMT_CONSTEXPR FMT_INLINE auto map(unsigned short val) -> unsigned { return val; } FMT_CONSTEXPR FMT_INLINE auto map(int val) -> int { return val; } FMT_CONSTEXPR FMT_INLINE auto map(unsigned val) -> unsigned { return val; } FMT_CONSTEXPR FMT_INLINE auto map(long val) -> long_type { return val; } FMT_CONSTEXPR FMT_INLINE auto map(unsigned long val) -> ulong_type { return val; } FMT_CONSTEXPR FMT_INLINE auto map(long long val) -> long long { return val; } FMT_CONSTEXPR FMT_INLINE auto map(unsigned long long val) -> unsigned long long { return val; } FMT_CONSTEXPR FMT_INLINE auto map(int128_opt val) -> int128_opt { return val; } FMT_CONSTEXPR FMT_INLINE auto map(uint128_opt val) -> uint128_opt { return val; } FMT_CONSTEXPR FMT_INLINE auto map(bool val) -> bool { return val; } template ::value || std::is_same::value)> FMT_CONSTEXPR FMT_INLINE auto map(T val) -> char_type { return val; } template ::value || #ifdef __cpp_char8_t std::is_same::value || #endif std::is_same::value || std::is_same::value) && !std::is_same::value, int> = 0> FMT_CONSTEXPR FMT_INLINE auto map(T) -> unformattable_char { return {}; } FMT_CONSTEXPR FMT_INLINE auto map(float val) -> float { return val; } FMT_CONSTEXPR FMT_INLINE auto map(double val) -> double { return val; } FMT_CONSTEXPR FMT_INLINE auto map(long double val) -> long double { return val; } FMT_CONSTEXPR FMT_INLINE auto map(char_type* val) -> const char_type* { return val; } FMT_CONSTEXPR FMT_INLINE auto map(const char_type* val) -> const char_type* { return val; } template ::value && !std::is_pointer::value && std::is_same>::value)> FMT_CONSTEXPR FMT_INLINE auto map(const T& val) -> basic_string_view { return to_string_view(val); } template ::value && !std::is_pointer::value && !std::is_same>::value)> FMT_CONSTEXPR FMT_INLINE auto map(const T&) -> unformattable_char { return {}; } template >::value && !is_string::value && !has_formatter::value && !has_fallback_formatter::value)> FMT_CONSTEXPR FMT_INLINE auto map(const T& val) -> basic_string_view { return basic_string_view(val); } template >::value && !std::is_convertible>::value && !is_string::value && !has_formatter::value && !has_fallback_formatter::value)> FMT_CONSTEXPR FMT_INLINE auto map(const T& val) -> basic_string_view { return std_string_view(val); } FMT_CONSTEXPR FMT_INLINE auto map(void* val) -> const void* { return val; } FMT_CONSTEXPR FMT_INLINE auto map(const void* val) -> const void* { return val; } FMT_CONSTEXPR FMT_INLINE auto map(std::nullptr_t val) -> const void* { return val; } // We use SFINAE instead of a const T* parameter to avoid conflicting with // the C array overload. template < typename T, FMT_ENABLE_IF( std::is_pointer::value || std::is_member_pointer::value || std::is_function::type>::value || (std::is_convertible::value && !std::is_convertible::value && !has_formatter::value))> FMT_CONSTEXPR auto map(const T&) -> unformattable_pointer { return {}; } template ::value)> FMT_CONSTEXPR FMT_INLINE auto map(const T (&values)[N]) -> const T (&)[N] { return values; } template ::value&& std::is_convertible::value && !has_format_as::value && !has_formatter::value && !has_fallback_formatter::value)> FMT_DEPRECATED FMT_CONSTEXPR FMT_INLINE auto map(const T& val) -> decltype(std::declval().map( static_cast>(val))) { return map(static_cast>(val)); } template ::value && !has_formatter::value)> FMT_CONSTEXPR FMT_INLINE auto map(const T& val) -> decltype(std::declval().map(format_as(T()))) { return map(format_as(val)); } template > struct formattable : bool_constant() || !std::is_const>::value || has_fallback_formatter::value> {}; #if (FMT_MSC_VERSION != 0 && FMT_MSC_VERSION < 1910) || \ FMT_ICC_VERSION != 0 || defined(__NVCC__) // Workaround a bug in MSVC and Intel (Issue 2746). template FMT_CONSTEXPR FMT_INLINE auto do_map(T&& val) -> T& { return val; } #else template ::value)> FMT_CONSTEXPR FMT_INLINE auto do_map(T&& val) -> T& { return val; } template ::value)> FMT_CONSTEXPR FMT_INLINE auto do_map(T&&) -> unformattable_const { return {}; } #endif template , FMT_ENABLE_IF(!is_string::value && !is_char::value && !std::is_array::value && !std::is_pointer::value && !has_format_as::value && (has_formatter::value || has_fallback_formatter::value))> FMT_CONSTEXPR FMT_INLINE auto map(T&& val) -> decltype(this->do_map(std::forward(val))) { return do_map(std::forward(val)); } template ::value)> FMT_CONSTEXPR FMT_INLINE auto map(const T& named_arg) -> decltype(std::declval().map(named_arg.value)) { return map(named_arg.value); } auto map(...) -> unformattable { return {}; } }; // A type constant after applying arg_mapper. template using mapped_type_constant = type_constant().map(std::declval())), typename Context::char_type>; enum { packed_arg_bits = 4 }; // Maximum number of arguments with packed types. enum { max_packed_args = 62 / packed_arg_bits }; enum : unsigned long long { is_unpacked_bit = 1ULL << 63 }; enum : unsigned long long { has_named_args_bit = 1ULL << 62 }; FMT_END_DETAIL_NAMESPACE // An output iterator that appends to a buffer. // It is used to reduce symbol sizes for the common case. class appender : public std::back_insert_iterator> { using base = std::back_insert_iterator>; template friend auto get_buffer(appender out) -> detail::buffer& { return detail::get_container(out); } public: using std::back_insert_iterator>::back_insert_iterator; appender(base it) noexcept : base(it) {} FMT_UNCHECKED_ITERATOR(appender); auto operator++() noexcept -> appender& { return *this; } auto operator++(int) noexcept -> appender { return *this; } }; // A formatting argument. It is a trivially copyable/constructible type to // allow storage in basic_memory_buffer. template class basic_format_arg { private: detail::value value_; detail::type type_; template friend FMT_CONSTEXPR auto detail::make_arg(T&& value) -> basic_format_arg; template friend FMT_CONSTEXPR auto visit_format_arg(Visitor&& vis, const basic_format_arg& arg) -> decltype(vis(0)); friend class basic_format_args; friend class dynamic_format_arg_store; using char_type = typename Context::char_type; template friend struct detail::arg_data; basic_format_arg(const detail::named_arg_info* args, size_t size) : value_(args, size) {} public: class handle { public: explicit handle(detail::custom_value custom) : custom_(custom) {} void format(typename Context::parse_context_type& parse_ctx, Context& ctx) const { custom_.format(custom_.value, parse_ctx, ctx); } private: detail::custom_value custom_; }; constexpr basic_format_arg() : type_(detail::type::none_type) {} constexpr explicit operator bool() const noexcept { return type_ != detail::type::none_type; } auto type() const -> detail::type { return type_; } auto is_integral() const -> bool { return detail::is_integral_type(type_); } auto is_arithmetic() const -> bool { return detail::is_arithmetic_type(type_); } }; /** \rst Visits an argument dispatching to the appropriate visit method based on the argument type. For example, if the argument type is ``double`` then ``vis(value)`` will be called with the value of type ``double``. \endrst */ template FMT_CONSTEXPR FMT_INLINE auto visit_format_arg( Visitor&& vis, const basic_format_arg& arg) -> decltype(vis(0)) { switch (arg.type_) { case detail::type::none_type: break; case detail::type::int_type: return vis(arg.value_.int_value); case detail::type::uint_type: return vis(arg.value_.uint_value); case detail::type::long_long_type: return vis(arg.value_.long_long_value); case detail::type::ulong_long_type: return vis(arg.value_.ulong_long_value); case detail::type::int128_type: return vis(detail::convert_for_visit(arg.value_.int128_value)); case detail::type::uint128_type: return vis(detail::convert_for_visit(arg.value_.uint128_value)); case detail::type::bool_type: return vis(arg.value_.bool_value); case detail::type::char_type: return vis(arg.value_.char_value); case detail::type::float_type: return vis(arg.value_.float_value); case detail::type::double_type: return vis(arg.value_.double_value); case detail::type::long_double_type: return vis(arg.value_.long_double_value); case detail::type::cstring_type: return vis(arg.value_.string.data); case detail::type::string_type: using sv = basic_string_view; return vis(sv(arg.value_.string.data, arg.value_.string.size)); case detail::type::pointer_type: return vis(arg.value_.pointer); case detail::type::custom_type: return vis(typename basic_format_arg::handle(arg.value_.custom)); } return vis(monostate()); } FMT_BEGIN_DETAIL_NAMESPACE template auto copy_str(InputIt begin, InputIt end, appender out) -> appender { get_container(out).append(begin, end); return out; } template FMT_CONSTEXPR auto copy_str(R&& rng, OutputIt out) -> OutputIt { return detail::copy_str(rng.begin(), rng.end(), out); } #if FMT_GCC_VERSION && FMT_GCC_VERSION < 500 // A workaround for gcc 4.8 to make void_t work in a SFINAE context. template struct void_t_impl { using type = void; }; template using void_t = typename detail::void_t_impl::type; #else template using void_t = void; #endif template struct is_output_iterator : std::false_type {}; template struct is_output_iterator< It, T, void_t::iterator_category, decltype(*std::declval() = std::declval())>> : std::true_type {}; template struct is_back_insert_iterator : std::false_type {}; template struct is_back_insert_iterator> : std::true_type {}; template struct is_contiguous_back_insert_iterator : std::false_type {}; template struct is_contiguous_back_insert_iterator> : is_contiguous {}; template <> struct is_contiguous_back_insert_iterator : std::true_type {}; // A type-erased reference to an std::locale to avoid a heavy include. class locale_ref { private: const void* locale_; // A type-erased pointer to std::locale. public: constexpr locale_ref() : locale_(nullptr) {} template explicit locale_ref(const Locale& loc); explicit operator bool() const noexcept { return locale_ != nullptr; } template auto get() const -> Locale; }; template constexpr auto encode_types() -> unsigned long long { return 0; } template constexpr auto encode_types() -> unsigned long long { return static_cast(mapped_type_constant::value) | (encode_types() << packed_arg_bits); } template FMT_CONSTEXPR FMT_INLINE auto make_value(T&& val) -> value { const auto& arg = arg_mapper().map(FMT_FORWARD(val)); constexpr bool formattable_char = !std::is_same::value; static_assert(formattable_char, "Mixing character types is disallowed."); constexpr bool formattable_const = !std::is_same::value; static_assert(formattable_const, "Cannot format a const argument."); // Formatting of arbitrary pointers is disallowed. If you want to output // a pointer cast it to "void *" or "const void *". In particular, this // forbids formatting of "[const] volatile char *" which is printed as bool // by iostreams. constexpr bool formattable_pointer = !std::is_same::value; static_assert(formattable_pointer, "Formatting of non-void pointers is disallowed."); constexpr bool formattable = !std::is_same::value; static_assert( formattable, "Cannot format an argument. To make type T formattable provide a " "formatter specialization: https://fmt.dev/latest/api.html#udt"); return {arg}; } template FMT_CONSTEXPR auto make_arg(T&& value) -> basic_format_arg { basic_format_arg arg; arg.type_ = mapped_type_constant::value; arg.value_ = make_value(value); return arg; } // The type template parameter is there to avoid an ODR violation when using // a fallback formatter in one translation unit and an implicit conversion in // another (not recommended). template FMT_CONSTEXPR FMT_INLINE auto make_arg(T&& val) -> value { return make_value(val); } template FMT_CONSTEXPR inline auto make_arg(T&& value) -> basic_format_arg { return make_arg(value); } FMT_END_DETAIL_NAMESPACE // Formatting context. template class basic_format_context { public: /** The character type for the output. */ using char_type = Char; private: OutputIt out_; basic_format_args args_; detail::locale_ref loc_; public: using iterator = OutputIt; using format_arg = basic_format_arg; using parse_context_type = basic_format_parse_context; template using formatter_type = formatter; basic_format_context(basic_format_context&&) = default; basic_format_context(const basic_format_context&) = delete; void operator=(const basic_format_context&) = delete; /** Constructs a ``basic_format_context`` object. References to the arguments are stored in the object so make sure they have appropriate lifetimes. */ constexpr basic_format_context( OutputIt out, basic_format_args ctx_args, detail::locale_ref loc = detail::locale_ref()) : out_(out), args_(ctx_args), loc_(loc) {} constexpr auto arg(int id) const -> format_arg { return args_.get(id); } FMT_CONSTEXPR auto arg(basic_string_view name) -> format_arg { return args_.get(name); } FMT_CONSTEXPR auto arg_id(basic_string_view name) -> int { return args_.get_id(name); } auto args() const -> const basic_format_args& { return args_; } FMT_CONSTEXPR auto error_handler() -> detail::error_handler { return {}; } void on_error(const char* message) { error_handler().on_error(message); } // Returns an iterator to the beginning of the output range. FMT_CONSTEXPR auto out() -> iterator { return out_; } // Advances the begin iterator to ``it``. void advance_to(iterator it) { if (!detail::is_back_insert_iterator()) out_ = it; } FMT_CONSTEXPR auto locale() -> detail::locale_ref { return loc_; } }; template using buffer_context = basic_format_context, Char>; using format_context = buffer_context; // Workaround an alias issue: https://stackoverflow.com/q/62767544/471164. #define FMT_BUFFER_CONTEXT(Char) \ basic_format_context, Char> template using is_formattable = bool_constant< !std::is_base_of>().map( std::declval()))>::value && !detail::has_fallback_formatter::value>; /** \rst An array of references to arguments. It can be implicitly converted into `~fmt::basic_format_args` for passing into type-erased formatting functions such as `~fmt::vformat`. \endrst */ template class format_arg_store #if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 // Workaround a GCC template argument substitution bug. : public basic_format_args #endif { private: static const size_t num_args = sizeof...(Args); static const size_t num_named_args = detail::count_named_args(); static const bool is_packed = num_args <= detail::max_packed_args; using value_type = conditional_t, basic_format_arg>; detail::arg_data data_; friend class basic_format_args; static constexpr unsigned long long desc = (is_packed ? detail::encode_types() : detail::is_unpacked_bit | num_args) | (num_named_args != 0 ? static_cast(detail::has_named_args_bit) : 0); public: template FMT_CONSTEXPR FMT_INLINE format_arg_store(T&&... args) : #if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 basic_format_args(*this), #endif data_{detail::make_arg< is_packed, Context, detail::mapped_type_constant, Context>::value>( FMT_FORWARD(args))...} { detail::init_named_args(data_.named_args(), 0, 0, args...); } }; /** \rst Constructs a `~fmt::format_arg_store` object that contains references to arguments and can be implicitly converted to `~fmt::format_args`. `Context` can be omitted in which case it defaults to `~fmt::context`. See `~fmt::arg` for lifetime considerations. \endrst */ template constexpr auto make_format_args(Args&&... args) -> format_arg_store...> { return {FMT_FORWARD(args)...}; } /** \rst Returns a named argument to be used in a formatting function. It should only be used in a call to a formatting function or `dynamic_format_arg_store::push_back`. **Example**:: fmt::print("Elapsed time: {s:.2f} seconds", fmt::arg("s", 1.23)); \endrst */ template inline auto arg(const Char* name, const T& arg) -> detail::named_arg { static_assert(!detail::is_named_arg(), "nested named arguments"); return {name, arg}; } /** \rst A view of a collection of formatting arguments. To avoid lifetime issues it should only be used as a parameter type in type-erased functions such as ``vformat``:: void vlog(string_view format_str, format_args args); // OK format_args args = make_format_args(42); // Error: dangling reference \endrst */ template class basic_format_args { public: using size_type = int; using format_arg = basic_format_arg; private: // A descriptor that contains information about formatting arguments. // If the number of arguments is less or equal to max_packed_args then // argument types are passed in the descriptor. This reduces binary code size // per formatting function call. unsigned long long desc_; union { // If is_packed() returns true then argument values are stored in values_; // otherwise they are stored in args_. This is done to improve cache // locality and reduce compiled code size since storing larger objects // may require more code (at least on x86-64) even if the same amount of // data is actually copied to stack. It saves ~10% on the bloat test. const detail::value* values_; const format_arg* args_; }; constexpr auto is_packed() const -> bool { return (desc_ & detail::is_unpacked_bit) == 0; } auto has_named_args() const -> bool { return (desc_ & detail::has_named_args_bit) != 0; } FMT_CONSTEXPR auto type(int index) const -> detail::type { int shift = index * detail::packed_arg_bits; unsigned int mask = (1 << detail::packed_arg_bits) - 1; return static_cast((desc_ >> shift) & mask); } constexpr FMT_INLINE basic_format_args(unsigned long long desc, const detail::value* values) : desc_(desc), values_(values) {} constexpr basic_format_args(unsigned long long desc, const format_arg* args) : desc_(desc), args_(args) {} public: constexpr basic_format_args() : desc_(0), args_(nullptr) {} /** \rst Constructs a `basic_format_args` object from `~fmt::format_arg_store`. \endrst */ template constexpr FMT_INLINE basic_format_args( const format_arg_store& store) : basic_format_args(format_arg_store::desc, store.data_.args()) {} /** \rst Constructs a `basic_format_args` object from `~fmt::dynamic_format_arg_store`. \endrst */ constexpr FMT_INLINE basic_format_args( const dynamic_format_arg_store& store) : basic_format_args(store.get_types(), store.data()) {} /** \rst Constructs a `basic_format_args` object from a dynamic set of arguments. \endrst */ constexpr basic_format_args(const format_arg* args, int count) : basic_format_args(detail::is_unpacked_bit | detail::to_unsigned(count), args) {} /** Returns the argument with the specified id. */ FMT_CONSTEXPR auto get(int id) const -> format_arg { format_arg arg; if (!is_packed()) { if (id < max_size()) arg = args_[id]; return arg; } if (id >= detail::max_packed_args) return arg; arg.type_ = type(id); if (arg.type_ == detail::type::none_type) return arg; arg.value_ = values_[id]; return arg; } template auto get(basic_string_view name) const -> format_arg { int id = get_id(name); return id >= 0 ? get(id) : format_arg(); } template auto get_id(basic_string_view name) const -> int { if (!has_named_args()) return -1; const auto& named_args = (is_packed() ? values_[-1] : args_[-1].value_).named_args; for (size_t i = 0; i < named_args.size; ++i) { if (named_args.data[i].name == name) return named_args.data[i].id; } return -1; } auto max_size() const -> int { unsigned long long max_packed = detail::max_packed_args; return static_cast(is_packed() ? max_packed : desc_ & ~detail::is_unpacked_bit); } }; /** An alias to ``basic_format_args``. */ // A separate type would result in shorter symbols but break ABI compatibility // between clang and gcc on ARM (#1919). using format_args = basic_format_args; // We cannot use enum classes as bit fields because of a gcc bug, so we put them // in namespaces instead (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414). // Additionally, if an underlying type is specified, older gcc incorrectly warns // that the type is too small. Both bugs are fixed in gcc 9.3. #if FMT_GCC_VERSION && FMT_GCC_VERSION < 903 # define FMT_ENUM_UNDERLYING_TYPE(type) #else # define FMT_ENUM_UNDERLYING_TYPE(type) : type #endif namespace align { enum type FMT_ENUM_UNDERLYING_TYPE(unsigned char){none, left, right, center, numeric}; } using align_t = align::type; namespace sign { enum type FMT_ENUM_UNDERLYING_TYPE(unsigned char){none, minus, plus, space}; } using sign_t = sign::type; FMT_BEGIN_DETAIL_NAMESPACE // Workaround an array initialization issue in gcc 4.8. template struct fill_t { private: enum { max_size = 4 }; Char data_[max_size] = {Char(' '), Char(0), Char(0), Char(0)}; unsigned char size_ = 1; public: FMT_CONSTEXPR void operator=(basic_string_view s) { auto size = s.size(); if (size > max_size) return throw_format_error("invalid fill"); for (size_t i = 0; i < size; ++i) data_[i] = s[i]; size_ = static_cast(size); } constexpr auto size() const -> size_t { return size_; } constexpr auto data() const -> const Char* { return data_; } FMT_CONSTEXPR auto operator[](size_t index) -> Char& { return data_[index]; } FMT_CONSTEXPR auto operator[](size_t index) const -> const Char& { return data_[index]; } }; FMT_END_DETAIL_NAMESPACE enum class presentation_type : unsigned char { none, // Integer types should go first, dec, // 'd' oct, // 'o' hex_lower, // 'x' hex_upper, // 'X' bin_lower, // 'b' bin_upper, // 'B' hexfloat_lower, // 'a' hexfloat_upper, // 'A' exp_lower, // 'e' exp_upper, // 'E' fixed_lower, // 'f' fixed_upper, // 'F' general_lower, // 'g' general_upper, // 'G' chr, // 'c' string, // 's' pointer, // 'p' debug // '?' }; // Format specifiers for built-in and string types. template struct basic_format_specs { int width; int precision; presentation_type type; align_t align : 4; sign_t sign : 3; bool alt : 1; // Alternate form ('#'). bool localized : 1; detail::fill_t fill; constexpr basic_format_specs() : width(0), precision(-1), type(presentation_type::none), align(align::none), sign(sign::none), alt(false), localized(false) {} }; using format_specs = basic_format_specs; FMT_BEGIN_DETAIL_NAMESPACE enum class arg_id_kind { none, index, name }; // An argument reference. template struct arg_ref { FMT_CONSTEXPR arg_ref() : kind(arg_id_kind::none), val() {} FMT_CONSTEXPR explicit arg_ref(int index) : kind(arg_id_kind::index), val(index) {} FMT_CONSTEXPR explicit arg_ref(basic_string_view name) : kind(arg_id_kind::name), val(name) {} FMT_CONSTEXPR auto operator=(int idx) -> arg_ref& { kind = arg_id_kind::index; val.index = idx; return *this; } arg_id_kind kind; union value { FMT_CONSTEXPR value(int id = 0) : index{id} {} FMT_CONSTEXPR value(basic_string_view n) : name(n) {} int index; basic_string_view name; } val; }; // Format specifiers with width and precision resolved at formatting rather // than parsing time to allow re-using the same parsed specifiers with // different sets of arguments (precompilation of format strings). template struct dynamic_format_specs : basic_format_specs { arg_ref width_ref; arg_ref precision_ref; }; struct auto_id {}; // A format specifier handler that sets fields in basic_format_specs. template class specs_setter { protected: basic_format_specs& specs_; public: explicit FMT_CONSTEXPR specs_setter(basic_format_specs& specs) : specs_(specs) {} FMT_CONSTEXPR specs_setter(const specs_setter& other) : specs_(other.specs_) {} FMT_CONSTEXPR void on_align(align_t align) { specs_.align = align; } FMT_CONSTEXPR void on_fill(basic_string_view fill) { specs_.fill = fill; } FMT_CONSTEXPR void on_sign(sign_t s) { specs_.sign = s; } FMT_CONSTEXPR void on_hash() { specs_.alt = true; } FMT_CONSTEXPR void on_localized() { specs_.localized = true; } FMT_CONSTEXPR void on_zero() { if (specs_.align == align::none) specs_.align = align::numeric; specs_.fill[0] = Char('0'); } FMT_CONSTEXPR void on_width(int width) { specs_.width = width; } FMT_CONSTEXPR void on_precision(int precision) { specs_.precision = precision; } FMT_CONSTEXPR void end_precision() {} FMT_CONSTEXPR void on_type(presentation_type type) { specs_.type = type; } }; // Format spec handler that saves references to arguments representing dynamic // width and precision to be resolved at formatting time. template class dynamic_specs_handler : public specs_setter { public: using char_type = typename ParseContext::char_type; FMT_CONSTEXPR dynamic_specs_handler(dynamic_format_specs& specs, ParseContext& ctx) : specs_setter(specs), specs_(specs), context_(ctx) {} FMT_CONSTEXPR dynamic_specs_handler(const dynamic_specs_handler& other) : specs_setter(other), specs_(other.specs_), context_(other.context_) {} template FMT_CONSTEXPR void on_dynamic_width(Id arg_id) { specs_.width_ref = make_arg_ref(arg_id); } template FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) { specs_.precision_ref = make_arg_ref(arg_id); } FMT_CONSTEXPR void on_error(const char* message) { context_.on_error(message); } private: dynamic_format_specs& specs_; ParseContext& context_; using arg_ref_type = arg_ref; FMT_CONSTEXPR auto make_arg_ref(int arg_id) -> arg_ref_type { context_.check_arg_id(arg_id); context_.check_dynamic_spec(arg_id); return arg_ref_type(arg_id); } FMT_CONSTEXPR auto make_arg_ref(auto_id) -> arg_ref_type { int arg_id = context_.next_arg_id(); context_.check_dynamic_spec(arg_id); return arg_ref_type(arg_id); } FMT_CONSTEXPR auto make_arg_ref(basic_string_view arg_id) -> arg_ref_type { context_.check_arg_id(arg_id); basic_string_view format_str( context_.begin(), to_unsigned(context_.end() - context_.begin())); return arg_ref_type(arg_id); } }; template constexpr bool is_ascii_letter(Char c) { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); } // Converts a character to ASCII. Returns a number > 127 on conversion failure. template ::value)> constexpr auto to_ascii(Char c) -> Char { return c; } template ::value)> constexpr auto to_ascii(Char c) -> underlying_t { return c; } FMT_CONSTEXPR inline auto code_point_length_impl(char c) -> int { return "\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\0\0\0\0\0\0\0\2\2\2\2\3\3\4" [static_cast(c) >> 3]; } template FMT_CONSTEXPR auto code_point_length(const Char* begin) -> int { if (const_check(sizeof(Char) != 1)) return 1; int len = code_point_length_impl(static_cast(*begin)); // Compute the pointer to the next character early so that the next // iteration can start working on the next character. Neither Clang // nor GCC figure out this reordering on their own. return len + !len; } // Return the result via the out param to workaround gcc bug 77539. template FMT_CONSTEXPR auto find(Ptr first, Ptr last, T value, Ptr& out) -> bool { for (out = first; out != last; ++out) { if (*out == value) return true; } return false; } template <> inline auto find(const char* first, const char* last, char value, const char*& out) -> bool { out = static_cast( std::memchr(first, value, to_unsigned(last - first))); return out != nullptr; } // Parses the range [begin, end) as an unsigned integer. This function assumes // that the range is non-empty and the first character is a digit. template FMT_CONSTEXPR auto parse_nonnegative_int(const Char*& begin, const Char* end, int error_value) noexcept -> int { FMT_ASSERT(begin != end && '0' <= *begin && *begin <= '9', ""); unsigned value = 0, prev = 0; auto p = begin; do { prev = value; value = value * 10 + unsigned(*p - '0'); ++p; } while (p != end && '0' <= *p && *p <= '9'); auto num_digits = p - begin; begin = p; if (num_digits <= std::numeric_limits::digits10) return static_cast(value); // Check for overflow. const unsigned max = to_unsigned((std::numeric_limits::max)()); return num_digits == std::numeric_limits::digits10 + 1 && prev * 10ull + unsigned(p[-1] - '0') <= max ? static_cast(value) : error_value; } // Parses fill and alignment. template FMT_CONSTEXPR auto parse_align(const Char* begin, const Char* end, Handler&& handler) -> const Char* { FMT_ASSERT(begin != end, ""); auto align = align::none; auto p = begin + code_point_length(begin); if (end - p <= 0) p = begin; for (;;) { switch (to_ascii(*p)) { case '<': align = align::left; break; case '>': align = align::right; break; case '^': align = align::center; break; default: break; } if (align != align::none) { if (p != begin) { auto c = *begin; if (c == '{') return handler.on_error("invalid fill character '{'"), begin; handler.on_fill(basic_string_view(begin, to_unsigned(p - begin))); begin = p + 1; } else ++begin; handler.on_align(align); break; } else if (p == begin) { break; } p = begin; } return begin; } template FMT_CONSTEXPR bool is_name_start(Char c) { return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || '_' == c; } template FMT_CONSTEXPR auto do_parse_arg_id(const Char* begin, const Char* end, IDHandler&& handler) -> const Char* { FMT_ASSERT(begin != end, ""); Char c = *begin; if (c >= '0' && c <= '9') { int index = 0; if (c != '0') index = parse_nonnegative_int(begin, end, (std::numeric_limits::max)()); else ++begin; if (begin == end || (*begin != '}' && *begin != ':')) handler.on_error("invalid format string"); else handler(index); return begin; } if (!is_name_start(c)) { handler.on_error("invalid format string"); return begin; } auto it = begin; do { ++it; } while (it != end && (is_name_start(c = *it) || ('0' <= c && c <= '9'))); handler(basic_string_view(begin, to_unsigned(it - begin))); return it; } template FMT_CONSTEXPR FMT_INLINE auto parse_arg_id(const Char* begin, const Char* end, IDHandler&& handler) -> const Char* { Char c = *begin; if (c != '}' && c != ':') return do_parse_arg_id(begin, end, handler); handler(); return begin; } template FMT_CONSTEXPR auto parse_width(const Char* begin, const Char* end, Handler&& handler) -> const Char* { using detail::auto_id; struct width_adapter { Handler& handler; FMT_CONSTEXPR void operator()() { handler.on_dynamic_width(auto_id()); } FMT_CONSTEXPR void operator()(int id) { handler.on_dynamic_width(id); } FMT_CONSTEXPR void operator()(basic_string_view id) { handler.on_dynamic_width(id); } FMT_CONSTEXPR void on_error(const char* message) { if (message) handler.on_error(message); } }; FMT_ASSERT(begin != end, ""); if ('0' <= *begin && *begin <= '9') { int width = parse_nonnegative_int(begin, end, -1); if (width != -1) handler.on_width(width); else handler.on_error("number is too big"); } else if (*begin == '{') { ++begin; if (begin != end) begin = parse_arg_id(begin, end, width_adapter{handler}); if (begin == end || *begin != '}') return handler.on_error("invalid format string"), begin; ++begin; } return begin; } template FMT_CONSTEXPR auto parse_precision(const Char* begin, const Char* end, Handler&& handler) -> const Char* { using detail::auto_id; struct precision_adapter { Handler& handler; FMT_CONSTEXPR void operator()() { handler.on_dynamic_precision(auto_id()); } FMT_CONSTEXPR void operator()(int id) { handler.on_dynamic_precision(id); } FMT_CONSTEXPR void operator()(basic_string_view id) { handler.on_dynamic_precision(id); } FMT_CONSTEXPR void on_error(const char* message) { if (message) handler.on_error(message); } }; ++begin; auto c = begin != end ? *begin : Char(); if ('0' <= c && c <= '9') { auto precision = parse_nonnegative_int(begin, end, -1); if (precision != -1) handler.on_precision(precision); else handler.on_error("number is too big"); } else if (c == '{') { ++begin; if (begin != end) begin = parse_arg_id(begin, end, precision_adapter{handler}); if (begin == end || *begin++ != '}') return handler.on_error("invalid format string"), begin; } else { return handler.on_error("missing precision specifier"), begin; } handler.end_precision(); return begin; } template FMT_CONSTEXPR auto parse_presentation_type(Char type) -> presentation_type { switch (to_ascii(type)) { case 'd': return presentation_type::dec; case 'o': return presentation_type::oct; case 'x': return presentation_type::hex_lower; case 'X': return presentation_type::hex_upper; case 'b': return presentation_type::bin_lower; case 'B': return presentation_type::bin_upper; case 'a': return presentation_type::hexfloat_lower; case 'A': return presentation_type::hexfloat_upper; case 'e': return presentation_type::exp_lower; case 'E': return presentation_type::exp_upper; case 'f': return presentation_type::fixed_lower; case 'F': return presentation_type::fixed_upper; case 'g': return presentation_type::general_lower; case 'G': return presentation_type::general_upper; case 'c': return presentation_type::chr; case 's': return presentation_type::string; case 'p': return presentation_type::pointer; case '?': return presentation_type::debug; default: return presentation_type::none; } } // Parses standard format specifiers and sends notifications about parsed // components to handler. template FMT_CONSTEXPR FMT_INLINE auto parse_format_specs(const Char* begin, const Char* end, SpecHandler&& handler) -> const Char* { if (1 < end - begin && begin[1] == '}' && is_ascii_letter(*begin) && *begin != 'L') { presentation_type type = parse_presentation_type(*begin++); if (type == presentation_type::none) handler.on_error("invalid type specifier"); handler.on_type(type); return begin; } if (begin == end) return begin; begin = parse_align(begin, end, handler); if (begin == end) return begin; // Parse sign. switch (to_ascii(*begin)) { case '+': handler.on_sign(sign::plus); ++begin; break; case '-': handler.on_sign(sign::minus); ++begin; break; case ' ': handler.on_sign(sign::space); ++begin; break; default: break; } if (begin == end) return begin; if (*begin == '#') { handler.on_hash(); if (++begin == end) return begin; } // Parse zero flag. if (*begin == '0') { handler.on_zero(); if (++begin == end) return begin; } begin = parse_width(begin, end, handler); if (begin == end) return begin; // Parse precision. if (*begin == '.') { begin = parse_precision(begin, end, handler); if (begin == end) return begin; } if (*begin == 'L') { handler.on_localized(); ++begin; } // Parse type. if (begin != end && *begin != '}') { presentation_type type = parse_presentation_type(*begin++); if (type == presentation_type::none) handler.on_error("invalid type specifier"); handler.on_type(type); } return begin; } template FMT_CONSTEXPR auto parse_replacement_field(const Char* begin, const Char* end, Handler&& handler) -> const Char* { struct id_adapter { Handler& handler; int arg_id; FMT_CONSTEXPR void operator()() { arg_id = handler.on_arg_id(); } FMT_CONSTEXPR void operator()(int id) { arg_id = handler.on_arg_id(id); } FMT_CONSTEXPR void operator()(basic_string_view id) { arg_id = handler.on_arg_id(id); } FMT_CONSTEXPR void on_error(const char* message) { if (message) handler.on_error(message); } }; ++begin; if (begin == end) return handler.on_error("invalid format string"), end; if (*begin == '}') { handler.on_replacement_field(handler.on_arg_id(), begin); } else if (*begin == '{') { handler.on_text(begin, begin + 1); } else { auto adapter = id_adapter{handler, 0}; begin = parse_arg_id(begin, end, adapter); Char c = begin != end ? *begin : Char(); if (c == '}') { handler.on_replacement_field(adapter.arg_id, begin); } else if (c == ':') { begin = handler.on_format_specs(adapter.arg_id, begin + 1, end); if (begin == end || *begin != '}') return handler.on_error("unknown format specifier"), end; } else { return handler.on_error("missing '}' in format string"), end; } } return begin + 1; } template FMT_CONSTEXPR FMT_INLINE void parse_format_string( basic_string_view format_str, Handler&& handler) { // Workaround a name-lookup bug in MSVC's modules implementation. using detail::find; auto begin = format_str.data(); auto end = begin + format_str.size(); if (end - begin < 32) { // Use a simple loop instead of memchr for small strings. const Char* p = begin; while (p != end) { auto c = *p++; if (c == '{') { handler.on_text(begin, p - 1); begin = p = parse_replacement_field(p - 1, end, handler); } else if (c == '}') { if (p == end || *p != '}') return handler.on_error("unmatched '}' in format string"); handler.on_text(begin, p); begin = ++p; } } handler.on_text(begin, end); return; } struct writer { FMT_CONSTEXPR void operator()(const Char* from, const Char* to) { if (from == to) return; for (;;) { const Char* p = nullptr; if (!find(from, to, Char('}'), p)) return handler_.on_text(from, to); ++p; if (p == to || *p != '}') return handler_.on_error("unmatched '}' in format string"); handler_.on_text(from, p); from = p + 1; } } Handler& handler_; } write = {handler}; while (begin != end) { // Doing two passes with memchr (one for '{' and another for '}') is up to // 2.5x faster than the naive one-pass implementation on big format strings. const Char* p = begin; if (*begin != '{' && !find(begin + 1, end, Char('{'), p)) return write(begin, end); write(begin, p); begin = parse_replacement_field(p, end, handler); } } template ::value> struct strip_named_arg { using type = T; }; template struct strip_named_arg { using type = remove_cvref_t; }; template FMT_CONSTEXPR auto parse_format_specs(ParseContext& ctx) -> decltype(ctx.begin()) { using char_type = typename ParseContext::char_type; using context = buffer_context; using stripped_type = typename strip_named_arg::type; using mapped_type = conditional_t< mapped_type_constant::value != type::custom_type, decltype(arg_mapper().map(std::declval())), stripped_type>; auto f = conditional_t::value, formatter, fallback_formatter>(); return f.parse(ctx); } template FMT_CONSTEXPR void check_int_type_spec(presentation_type type, ErrorHandler&& eh) { if (type > presentation_type::bin_upper && type != presentation_type::chr) eh.on_error("invalid type specifier"); } // Checks char specs and returns true if the type spec is char (and not int). template FMT_CONSTEXPR auto check_char_specs(const basic_format_specs& specs, ErrorHandler&& eh = {}) -> bool { if (specs.type != presentation_type::none && specs.type != presentation_type::chr && specs.type != presentation_type::debug) { check_int_type_spec(specs.type, eh); return false; } if (specs.align == align::numeric || specs.sign != sign::none || specs.alt) eh.on_error("invalid format specifier for char"); return true; } // A floating-point presentation format. enum class float_format : unsigned char { general, // General: exponent notation or fixed point based on magnitude. exp, // Exponent notation with the default precision of 6, e.g. 1.2e-3. fixed, // Fixed point with the default precision of 6, e.g. 0.0012. hex }; struct float_specs { int precision; float_format format : 8; sign_t sign : 8; bool upper : 1; bool locale : 1; bool binary32 : 1; bool showpoint : 1; }; template FMT_CONSTEXPR auto parse_float_type_spec(const basic_format_specs& specs, ErrorHandler&& eh = {}) -> float_specs { auto result = float_specs(); result.showpoint = specs.alt; result.locale = specs.localized; switch (specs.type) { case presentation_type::none: result.format = float_format::general; break; case presentation_type::general_upper: result.upper = true; FMT_FALLTHROUGH; case presentation_type::general_lower: result.format = float_format::general; break; case presentation_type::exp_upper: result.upper = true; FMT_FALLTHROUGH; case presentation_type::exp_lower: result.format = float_format::exp; result.showpoint |= specs.precision != 0; break; case presentation_type::fixed_upper: result.upper = true; FMT_FALLTHROUGH; case presentation_type::fixed_lower: result.format = float_format::fixed; result.showpoint |= specs.precision != 0; break; case presentation_type::hexfloat_upper: result.upper = true; FMT_FALLTHROUGH; case presentation_type::hexfloat_lower: result.format = float_format::hex; break; default: eh.on_error("invalid type specifier"); break; } return result; } template FMT_CONSTEXPR auto check_cstring_type_spec(presentation_type type, ErrorHandler&& eh = {}) -> bool { if (type == presentation_type::none || type == presentation_type::string || type == presentation_type::debug) return true; if (type != presentation_type::pointer) eh.on_error("invalid type specifier"); return false; } template FMT_CONSTEXPR void check_string_type_spec(presentation_type type, ErrorHandler&& eh = {}) { if (type != presentation_type::none && type != presentation_type::string && type != presentation_type::debug) eh.on_error("invalid type specifier"); } template FMT_CONSTEXPR void check_pointer_type_spec(presentation_type type, ErrorHandler&& eh) { if (type != presentation_type::none && type != presentation_type::pointer) eh.on_error("invalid type specifier"); } // A parse_format_specs handler that checks if specifiers are consistent with // the argument type. template class specs_checker : public Handler { private: detail::type arg_type_; FMT_CONSTEXPR void require_numeric_argument() { if (!is_arithmetic_type(arg_type_)) this->on_error("format specifier requires numeric argument"); } public: FMT_CONSTEXPR specs_checker(const Handler& handler, detail::type arg_type) : Handler(handler), arg_type_(arg_type) {} FMT_CONSTEXPR void on_align(align_t align) { if (align == align::numeric) require_numeric_argument(); Handler::on_align(align); } FMT_CONSTEXPR void on_sign(sign_t s) { require_numeric_argument(); if (is_integral_type(arg_type_) && arg_type_ != type::int_type && arg_type_ != type::long_long_type && arg_type_ != type::int128_type && arg_type_ != type::char_type) { this->on_error("format specifier requires signed argument"); } Handler::on_sign(s); } FMT_CONSTEXPR void on_hash() { require_numeric_argument(); Handler::on_hash(); } FMT_CONSTEXPR void on_localized() { require_numeric_argument(); Handler::on_localized(); } FMT_CONSTEXPR void on_zero() { require_numeric_argument(); Handler::on_zero(); } FMT_CONSTEXPR void end_precision() { if (is_integral_type(arg_type_) || arg_type_ == type::pointer_type) this->on_error("precision not allowed for this argument type"); } }; constexpr int invalid_arg_index = -1; #if FMT_USE_NONTYPE_TEMPLATE_ARGS template constexpr auto get_arg_index_by_name(basic_string_view name) -> int { if constexpr (detail::is_statically_named_arg()) { if (name == T::name) return N; } if constexpr (sizeof...(Args) > 0) return get_arg_index_by_name(name); (void)name; // Workaround an MSVC bug about "unused" parameter. return invalid_arg_index; } #endif template FMT_CONSTEXPR auto get_arg_index_by_name(basic_string_view name) -> int { #if FMT_USE_NONTYPE_TEMPLATE_ARGS if constexpr (sizeof...(Args) > 0) return get_arg_index_by_name<0, Args...>(name); #endif (void)name; return invalid_arg_index; } template class format_string_checker { private: // In the future basic_format_parse_context will replace compile_parse_context // here and will use is_constant_evaluated and downcasting to access the data // needed for compile-time checks: https://godbolt.org/z/GvWzcTjh1. using parse_context_type = compile_parse_context; static constexpr int num_args = sizeof...(Args); // Format specifier parsing function. using parse_func = const Char* (*)(parse_context_type&); parse_context_type context_; parse_func parse_funcs_[num_args > 0 ? static_cast(num_args) : 1]; type types_[num_args > 0 ? static_cast(num_args) : 1]; public: explicit FMT_CONSTEXPR format_string_checker( basic_string_view format_str, ErrorHandler eh) : context_(format_str, num_args, types_, eh), parse_funcs_{&parse_format_specs...}, types_{ mapped_type_constant>::value...} { } FMT_CONSTEXPR void on_text(const Char*, const Char*) {} FMT_CONSTEXPR auto on_arg_id() -> int { return context_.next_arg_id(); } FMT_CONSTEXPR auto on_arg_id(int id) -> int { return context_.check_arg_id(id), id; } FMT_CONSTEXPR auto on_arg_id(basic_string_view id) -> int { #if FMT_USE_NONTYPE_TEMPLATE_ARGS auto index = get_arg_index_by_name(id); if (index == invalid_arg_index) on_error("named argument is not found"); return context_.check_arg_id(index), index; #else (void)id; on_error("compile-time checks for named arguments require C++20 support"); return 0; #endif } FMT_CONSTEXPR void on_replacement_field(int, const Char*) {} FMT_CONSTEXPR auto on_format_specs(int id, const Char* begin, const Char*) -> const Char* { context_.advance_to(context_.begin() + (begin - &*context_.begin())); // id >= 0 check is a workaround for gcc 10 bug (#2065). return id >= 0 && id < num_args ? parse_funcs_[id](context_) : begin; } FMT_CONSTEXPR void on_error(const char* message) { context_.on_error(message); } }; // Reports a compile-time error if S is not a valid format string. template ::value)> FMT_INLINE void check_format_string(const S&) { #ifdef FMT_ENFORCE_COMPILE_STRING static_assert(is_compile_string::value, "FMT_ENFORCE_COMPILE_STRING requires all format strings to use " "FMT_STRING."); #endif } template ::value)> void check_format_string(S format_str) { FMT_CONSTEXPR auto s = basic_string_view(format_str); using checker = format_string_checker...>; FMT_CONSTEXPR bool invalid_format = (parse_format_string(s, checker(s, {})), true); ignore_unused(invalid_format); } template void vformat_to( buffer& buf, basic_string_view fmt, basic_format_args)> args, locale_ref loc = {}); FMT_API void vprint_mojibake(std::FILE*, string_view, format_args); #ifndef _WIN32 inline void vprint_mojibake(std::FILE*, string_view, format_args) {} #endif FMT_END_DETAIL_NAMESPACE // A formatter specialization for the core types corresponding to detail::type // constants. template struct formatter::value != detail::type::custom_type>> { private: detail::dynamic_format_specs specs_; public: // Parses format specifiers stopping either at the end of the range or at the // terminating '}'. template FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { auto begin = ctx.begin(), end = ctx.end(); if (begin == end) return begin; using handler_type = detail::dynamic_specs_handler; auto type = detail::type_constant::value; auto checker = detail::specs_checker(handler_type(specs_, ctx), type); auto it = detail::parse_format_specs(begin, end, checker); auto eh = ctx.error_handler(); switch (type) { case detail::type::none_type: FMT_ASSERT(false, "invalid argument type"); break; case detail::type::bool_type: if (specs_.type == presentation_type::none || specs_.type == presentation_type::string) { break; } FMT_FALLTHROUGH; case detail::type::int_type: case detail::type::uint_type: case detail::type::long_long_type: case detail::type::ulong_long_type: case detail::type::int128_type: case detail::type::uint128_type: detail::check_int_type_spec(specs_.type, eh); break; case detail::type::char_type: detail::check_char_specs(specs_, eh); break; case detail::type::float_type: if (detail::const_check(FMT_USE_FLOAT)) detail::parse_float_type_spec(specs_, eh); else FMT_ASSERT(false, "float support disabled"); break; case detail::type::double_type: if (detail::const_check(FMT_USE_DOUBLE)) detail::parse_float_type_spec(specs_, eh); else FMT_ASSERT(false, "double support disabled"); break; case detail::type::long_double_type: if (detail::const_check(FMT_USE_LONG_DOUBLE)) detail::parse_float_type_spec(specs_, eh); else FMT_ASSERT(false, "long double support disabled"); break; case detail::type::cstring_type: detail::check_cstring_type_spec(specs_.type, eh); break; case detail::type::string_type: detail::check_string_type_spec(specs_.type, eh); break; case detail::type::pointer_type: detail::check_pointer_type_spec(specs_.type, eh); break; case detail::type::custom_type: // Custom format specifiers are checked in parse functions of // formatter specializations. break; } return it; } template ::value, enable_if_t<(U == detail::type::string_type || U == detail::type::cstring_type || U == detail::type::char_type), int> = 0> FMT_CONSTEXPR void set_debug_format() { specs_.type = presentation_type::debug; } template FMT_CONSTEXPR auto format(const T& val, FormatContext& ctx) const -> decltype(ctx.out()); }; #define FMT_FORMAT_AS(Type, Base) \ template \ struct formatter : formatter { \ template \ auto format(Type const& val, FormatContext& ctx) const \ -> decltype(ctx.out()) { \ return formatter::format(static_cast(val), ctx); \ } \ } FMT_FORMAT_AS(signed char, int); FMT_FORMAT_AS(unsigned char, unsigned); FMT_FORMAT_AS(short, int); FMT_FORMAT_AS(unsigned short, unsigned); FMT_FORMAT_AS(long, long long); FMT_FORMAT_AS(unsigned long, unsigned long long); FMT_FORMAT_AS(Char*, const Char*); FMT_FORMAT_AS(std::basic_string, basic_string_view); FMT_FORMAT_AS(std::nullptr_t, const void*); FMT_FORMAT_AS(detail::std_string_view, basic_string_view); template struct basic_runtime { basic_string_view str; }; /** A compile-time format string. */ template class basic_format_string { private: basic_string_view str_; public: template >::value)> FMT_CONSTEVAL FMT_INLINE basic_format_string(const S& s) : str_(s) { static_assert( detail::count< (std::is_base_of>::value && std::is_reference::value)...>() == 0, "passing views as lvalues is disallowed"); #ifdef FMT_HAS_CONSTEVAL if constexpr (detail::count_named_args() == detail::count_statically_named_args()) { using checker = detail::format_string_checker...>; detail::parse_format_string(str_, checker(s, {})); } #else detail::check_format_string(s); #endif } basic_format_string(basic_runtime r) : str_(r.str) {} FMT_INLINE operator basic_string_view() const { return str_; } }; #if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 // Workaround broken conversion on older gcc. template using format_string = string_view; inline auto runtime(string_view s) -> string_view { return s; } #else template using format_string = basic_format_string...>; /** \rst Creates a runtime format string. **Example**:: // Check format string at runtime instead of compile-time. fmt::print(fmt::runtime("{:d}"), "I am not a number"); \endrst */ inline auto runtime(string_view s) -> basic_runtime { return {{s}}; } #endif FMT_API auto vformat(string_view fmt, format_args args) -> std::string; /** \rst Formats ``args`` according to specifications in ``fmt`` and returns the result as a string. **Example**:: #include std::string message = fmt::format("The answer is {}.", 42); \endrst */ template FMT_NODISCARD FMT_INLINE auto format(format_string fmt, T&&... args) -> std::string { return vformat(fmt, fmt::make_format_args(args...)); } /** Formats a string and writes the output to ``out``. */ template ::value)> auto vformat_to(OutputIt out, string_view fmt, format_args args) -> OutputIt { using detail::get_buffer; auto&& buf = get_buffer(out); detail::vformat_to(buf, fmt, args, {}); return detail::get_iterator(buf); } /** \rst Formats ``args`` according to specifications in ``fmt``, writes the result to the output iterator ``out`` and returns the iterator past the end of the output range. `format_to` does not append a terminating null character. **Example**:: auto out = std::vector(); fmt::format_to(std::back_inserter(out), "{}", 42); \endrst */ template ::value)> FMT_INLINE auto format_to(OutputIt out, format_string fmt, T&&... args) -> OutputIt { return vformat_to(out, fmt, fmt::make_format_args(args...)); } template struct format_to_n_result { /** Iterator past the end of the output range. */ OutputIt out; /** Total (not truncated) output size. */ size_t size; }; template ::value)> auto vformat_to_n(OutputIt out, size_t n, string_view fmt, format_args args) -> format_to_n_result { using traits = detail::fixed_buffer_traits; auto buf = detail::iterator_buffer(out, n); detail::vformat_to(buf, fmt, args, {}); return {buf.out(), buf.count()}; } /** \rst Formats ``args`` according to specifications in ``fmt``, writes up to ``n`` characters of the result to the output iterator ``out`` and returns the total (not truncated) output size and the iterator past the end of the output range. `format_to_n` does not append a terminating null character. \endrst */ template ::value)> FMT_INLINE auto format_to_n(OutputIt out, size_t n, format_string fmt, T&&... args) -> format_to_n_result { return vformat_to_n(out, n, fmt, fmt::make_format_args(args...)); } /** Returns the number of chars in the output of ``format(fmt, args...)``. */ template FMT_NODISCARD FMT_INLINE auto formatted_size(format_string fmt, T&&... args) -> size_t { auto buf = detail::counting_buffer<>(); detail::vformat_to(buf, string_view(fmt), fmt::make_format_args(args...), {}); return buf.count(); } FMT_API void vprint(string_view fmt, format_args args); FMT_API void vprint(std::FILE* f, string_view fmt, format_args args); /** \rst Formats ``args`` according to specifications in ``fmt`` and writes the output to ``stdout``. **Example**:: fmt::print("Elapsed time: {0:.2f} seconds", 1.23); \endrst */ template FMT_INLINE void print(format_string fmt, T&&... args) { const auto& vargs = fmt::make_format_args(args...); return detail::is_utf8() ? vprint(fmt, vargs) : detail::vprint_mojibake(stdout, fmt, vargs); } /** \rst Formats ``args`` according to specifications in ``fmt`` and writes the output to the file ``f``. **Example**:: fmt::print(stderr, "Don't {}!", "panic"); \endrst */ template FMT_INLINE void print(std::FILE* f, format_string fmt, T&&... args) { const auto& vargs = fmt::make_format_args(args...); return detail::is_utf8() ? vprint(f, fmt, vargs) : detail::vprint_mojibake(f, fmt, vargs); } FMT_MODULE_EXPORT_END FMT_GCC_PRAGMA("GCC pop_options") FMT_END_NAMESPACE #ifdef FMT_HEADER_ONLY # include "format.h" #endif #endif // FMT_CORE_H_ mergerfs-2.40.2/libfuse/lib/fmt/format-inl.h000066400000000000000000002210401457016576200206330ustar00rootroot00000000000000// Formatting library for C++ - implementation // // Copyright (c) 2012 - 2016, Victor Zverovich // All rights reserved. // // For the license information refer to format.h. #ifndef FMT_FORMAT_INL_H_ #define FMT_FORMAT_INL_H_ #include #include #include // errno #include #include #include #include // std::memmove #include #include #ifndef FMT_STATIC_THOUSANDS_SEPARATOR # include #endif #ifdef _WIN32 # include // _isatty #endif #include "format.h" FMT_BEGIN_NAMESPACE namespace detail { FMT_FUNC void assert_fail(const char* file, int line, const char* message) { // Use unchecked std::fprintf to avoid triggering another assertion when // writing to stderr fails std::fprintf(stderr, "%s:%d: assertion failed: %s", file, line, message); // Chosen instead of std::abort to satisfy Clang in CUDA mode during device // code pass. std::terminate(); } FMT_FUNC void throw_format_error(const char* message) { FMT_THROW(format_error(message)); } FMT_FUNC void format_error_code(detail::buffer& out, int error_code, string_view message) noexcept { // Report error code making sure that the output fits into // inline_buffer_size to avoid dynamic memory allocation and potential // bad_alloc. out.try_resize(0); static const char SEP[] = ": "; static const char ERROR_STR[] = "error "; // Subtract 2 to account for terminating null characters in SEP and ERROR_STR. size_t error_code_size = sizeof(SEP) + sizeof(ERROR_STR) - 2; auto abs_value = static_cast>(error_code); if (detail::is_negative(error_code)) { abs_value = 0 - abs_value; ++error_code_size; } error_code_size += detail::to_unsigned(detail::count_digits(abs_value)); auto it = buffer_appender(out); if (message.size() <= inline_buffer_size - error_code_size) format_to(it, FMT_STRING("{}{}"), message, SEP); format_to(it, FMT_STRING("{}{}"), ERROR_STR, error_code); FMT_ASSERT(out.size() <= inline_buffer_size, ""); } FMT_FUNC void report_error(format_func func, int error_code, const char* message) noexcept { memory_buffer full_message; func(full_message, error_code, message); // Don't use fwrite_fully because the latter may throw. if (std::fwrite(full_message.data(), full_message.size(), 1, stderr) > 0) std::fputc('\n', stderr); } // A wrapper around fwrite that throws on error. inline void fwrite_fully(const void* ptr, size_t size, size_t count, FILE* stream) { size_t written = std::fwrite(ptr, size, count, stream); if (written < count) FMT_THROW(system_error(errno, FMT_STRING("cannot write to file"))); } #ifndef FMT_STATIC_THOUSANDS_SEPARATOR template locale_ref::locale_ref(const Locale& loc) : locale_(&loc) { static_assert(std::is_same::value, ""); } template Locale locale_ref::get() const { static_assert(std::is_same::value, ""); return locale_ ? *static_cast(locale_) : std::locale(); } template FMT_FUNC auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result { auto& facet = std::use_facet>(loc.get()); auto grouping = facet.grouping(); auto thousands_sep = grouping.empty() ? Char() : facet.thousands_sep(); return {std::move(grouping), thousands_sep}; } template FMT_FUNC Char decimal_point_impl(locale_ref loc) { return std::use_facet>(loc.get()) .decimal_point(); } #else template FMT_FUNC auto thousands_sep_impl(locale_ref) -> thousands_sep_result { return {"\03", FMT_STATIC_THOUSANDS_SEPARATOR}; } template FMT_FUNC Char decimal_point_impl(locale_ref) { return '.'; } #endif } // namespace detail #if !FMT_MSC_VERSION FMT_API FMT_FUNC format_error::~format_error() noexcept = default; #endif FMT_FUNC std::system_error vsystem_error(int error_code, string_view format_str, format_args args) { auto ec = std::error_code(error_code, std::generic_category()); return std::system_error(ec, vformat(format_str, args)); } namespace detail { template inline bool operator==(basic_fp x, basic_fp y) { return x.f == y.f && x.e == y.e; } // Compilers should be able to optimize this into the ror instruction. FMT_CONSTEXPR inline uint32_t rotr(uint32_t n, uint32_t r) noexcept { r &= 31; return (n >> r) | (n << (32 - r)); } FMT_CONSTEXPR inline uint64_t rotr(uint64_t n, uint32_t r) noexcept { r &= 63; return (n >> r) | (n << (64 - r)); } // Computes 128-bit result of multiplication of two 64-bit unsigned integers. inline uint128_fallback umul128(uint64_t x, uint64_t y) noexcept { #if FMT_USE_INT128 auto p = static_cast(x) * static_cast(y); return {static_cast(p >> 64), static_cast(p)}; #elif defined(_MSC_VER) && defined(_M_X64) auto result = uint128_fallback(); result.lo_ = _umul128(x, y, &result.hi_); return result; #else const uint64_t mask = static_cast(max_value()); uint64_t a = x >> 32; uint64_t b = x & mask; uint64_t c = y >> 32; uint64_t d = y & mask; uint64_t ac = a * c; uint64_t bc = b * c; uint64_t ad = a * d; uint64_t bd = b * d; uint64_t intermediate = (bd >> 32) + (ad & mask) + (bc & mask); return {ac + (intermediate >> 32) + (ad >> 32) + (bc >> 32), (intermediate << 32) + (bd & mask)}; #endif } // Implementation of Dragonbox algorithm: https://github.com/jk-jeon/dragonbox. namespace dragonbox { // Computes upper 64 bits of multiplication of two 64-bit unsigned integers. inline uint64_t umul128_upper64(uint64_t x, uint64_t y) noexcept { #if FMT_USE_INT128 auto p = static_cast(x) * static_cast(y); return static_cast(p >> 64); #elif defined(_MSC_VER) && defined(_M_X64) return __umulh(x, y); #else return umul128(x, y).high(); #endif } // Computes upper 128 bits of multiplication of a 64-bit unsigned integer and a // 128-bit unsigned integer. inline uint128_fallback umul192_upper128(uint64_t x, uint128_fallback y) noexcept { uint128_fallback r = umul128(x, y.high()); r += umul128_upper64(x, y.low()); return r; } // Computes upper 64 bits of multiplication of a 32-bit unsigned integer and a // 64-bit unsigned integer. inline uint64_t umul96_upper64(uint32_t x, uint64_t y) noexcept { return umul128_upper64(static_cast(x) << 32, y); } // Computes lower 128 bits of multiplication of a 64-bit unsigned integer and a // 128-bit unsigned integer. inline uint128_fallback umul192_lower128(uint64_t x, uint128_fallback y) noexcept { uint64_t high = x * y.high(); uint128_fallback high_low = umul128(x, y.low()); return {high + high_low.high(), high_low.low()}; } // Computes lower 64 bits of multiplication of a 32-bit unsigned integer and a // 64-bit unsigned integer. inline uint64_t umul96_lower64(uint32_t x, uint64_t y) noexcept { return x * y; } // Computes floor(log10(pow(2, e))) for e in [-2620, 2620] using the method from // https://fmt.dev/papers/Dragonbox.pdf#page=28, section 6.1. inline int floor_log10_pow2(int e) noexcept { FMT_ASSERT(e <= 2620 && e >= -2620, "too large exponent"); static_assert((-1 >> 1) == -1, "right shift is not arithmetic"); return (e * 315653) >> 20; } // Various fast log computations. inline int floor_log2_pow10(int e) noexcept { FMT_ASSERT(e <= 1233 && e >= -1233, "too large exponent"); return (e * 1741647) >> 19; } inline int floor_log10_pow2_minus_log10_4_over_3(int e) noexcept { FMT_ASSERT(e <= 2936 && e >= -2985, "too large exponent"); return (e * 631305 - 261663) >> 21; } static constexpr struct { uint32_t divisor; int shift_amount; } div_small_pow10_infos[] = {{10, 16}, {100, 16}}; // Replaces n by floor(n / pow(10, N)) returning true if and only if n is // divisible by pow(10, N). // Precondition: n <= pow(10, N + 1). template bool check_divisibility_and_divide_by_pow10(uint32_t& n) noexcept { // The numbers below are chosen such that: // 1. floor(n/d) = floor(nm / 2^k) where d=10 or d=100, // 2. nm mod 2^k < m if and only if n is divisible by d, // where m is magic_number, k is shift_amount // and d is divisor. // // Item 1 is a common technique of replacing division by a constant with // multiplication, see e.g. "Division by Invariant Integers Using // Multiplication" by Granlund and Montgomery (1994). magic_number (m) is set // to ceil(2^k/d) for large enough k. // The idea for item 2 originates from Schubfach. constexpr auto info = div_small_pow10_infos[N - 1]; FMT_ASSERT(n <= info.divisor * 10, "n is too large"); constexpr uint32_t magic_number = (1u << info.shift_amount) / info.divisor + 1; n *= magic_number; const uint32_t comparison_mask = (1u << info.shift_amount) - 1; bool result = (n & comparison_mask) < magic_number; n >>= info.shift_amount; return result; } // Computes floor(n / pow(10, N)) for small n and N. // Precondition: n <= pow(10, N + 1). template uint32_t small_division_by_pow10(uint32_t n) noexcept { constexpr auto info = div_small_pow10_infos[N - 1]; FMT_ASSERT(n <= info.divisor * 10, "n is too large"); constexpr uint32_t magic_number = (1u << info.shift_amount) / info.divisor + 1; return (n * magic_number) >> info.shift_amount; } // Computes floor(n / 10^(kappa + 1)) (float) inline uint32_t divide_by_10_to_kappa_plus_1(uint32_t n) noexcept { // 1374389535 = ceil(2^37/100) return static_cast((static_cast(n) * 1374389535) >> 37); } // Computes floor(n / 10^(kappa + 1)) (double) inline uint64_t divide_by_10_to_kappa_plus_1(uint64_t n) noexcept { // 2361183241434822607 = ceil(2^(64+7)/1000) return umul128_upper64(n, 2361183241434822607ull) >> 7; } // Various subroutines using pow10 cache template struct cache_accessor; template <> struct cache_accessor { using carrier_uint = float_info::carrier_uint; using cache_entry_type = uint64_t; static uint64_t get_cached_power(int k) noexcept { FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, "k is out of range"); static constexpr const uint64_t pow10_significands[] = { 0x81ceb32c4b43fcf5, 0xa2425ff75e14fc32, 0xcad2f7f5359a3b3f, 0xfd87b5f28300ca0e, 0x9e74d1b791e07e49, 0xc612062576589ddb, 0xf79687aed3eec552, 0x9abe14cd44753b53, 0xc16d9a0095928a28, 0xf1c90080baf72cb2, 0x971da05074da7bef, 0xbce5086492111aeb, 0xec1e4a7db69561a6, 0x9392ee8e921d5d08, 0xb877aa3236a4b44a, 0xe69594bec44de15c, 0x901d7cf73ab0acda, 0xb424dc35095cd810, 0xe12e13424bb40e14, 0x8cbccc096f5088cc, 0xafebff0bcb24aaff, 0xdbe6fecebdedd5bf, 0x89705f4136b4a598, 0xabcc77118461cefd, 0xd6bf94d5e57a42bd, 0x8637bd05af6c69b6, 0xa7c5ac471b478424, 0xd1b71758e219652c, 0x83126e978d4fdf3c, 0xa3d70a3d70a3d70b, 0xcccccccccccccccd, 0x8000000000000000, 0xa000000000000000, 0xc800000000000000, 0xfa00000000000000, 0x9c40000000000000, 0xc350000000000000, 0xf424000000000000, 0x9896800000000000, 0xbebc200000000000, 0xee6b280000000000, 0x9502f90000000000, 0xba43b74000000000, 0xe8d4a51000000000, 0x9184e72a00000000, 0xb5e620f480000000, 0xe35fa931a0000000, 0x8e1bc9bf04000000, 0xb1a2bc2ec5000000, 0xde0b6b3a76400000, 0x8ac7230489e80000, 0xad78ebc5ac620000, 0xd8d726b7177a8000, 0x878678326eac9000, 0xa968163f0a57b400, 0xd3c21bcecceda100, 0x84595161401484a0, 0xa56fa5b99019a5c8, 0xcecb8f27f4200f3a, 0x813f3978f8940985, 0xa18f07d736b90be6, 0xc9f2c9cd04674edf, 0xfc6f7c4045812297, 0x9dc5ada82b70b59e, 0xc5371912364ce306, 0xf684df56c3e01bc7, 0x9a130b963a6c115d, 0xc097ce7bc90715b4, 0xf0bdc21abb48db21, 0x96769950b50d88f5, 0xbc143fa4e250eb32, 0xeb194f8e1ae525fe, 0x92efd1b8d0cf37bf, 0xb7abc627050305ae, 0xe596b7b0c643c71a, 0x8f7e32ce7bea5c70, 0xb35dbf821ae4f38c, 0xe0352f62a19e306f}; return pow10_significands[k - float_info::min_k]; } struct compute_mul_result { carrier_uint result; bool is_integer; }; struct compute_mul_parity_result { bool parity; bool is_integer; }; static compute_mul_result compute_mul( carrier_uint u, const cache_entry_type& cache) noexcept { auto r = umul96_upper64(u, cache); return {static_cast(r >> 32), static_cast(r) == 0}; } static uint32_t compute_delta(const cache_entry_type& cache, int beta) noexcept { return static_cast(cache >> (64 - 1 - beta)); } static compute_mul_parity_result compute_mul_parity( carrier_uint two_f, const cache_entry_type& cache, int beta) noexcept { FMT_ASSERT(beta >= 1, ""); FMT_ASSERT(beta < 64, ""); auto r = umul96_lower64(two_f, cache); return {((r >> (64 - beta)) & 1) != 0, static_cast(r >> (32 - beta)) == 0}; } static carrier_uint compute_left_endpoint_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept { return static_cast( (cache - (cache >> (num_significand_bits() + 2))) >> (64 - num_significand_bits() - 1 - beta)); } static carrier_uint compute_right_endpoint_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept { return static_cast( (cache + (cache >> (num_significand_bits() + 1))) >> (64 - num_significand_bits() - 1 - beta)); } static carrier_uint compute_round_up_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept { return (static_cast( cache >> (64 - num_significand_bits() - 2 - beta)) + 1) / 2; } }; template <> struct cache_accessor { using carrier_uint = float_info::carrier_uint; using cache_entry_type = uint128_fallback; static uint128_fallback get_cached_power(int k) noexcept { FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, "k is out of range"); static constexpr const uint128_fallback pow10_significands[] = { #if FMT_USE_FULL_CACHE_DRAGONBOX {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, {0x9faacf3df73609b1, 0x77b191618c54e9ad}, {0xc795830d75038c1d, 0xd59df5b9ef6a2418}, {0xf97ae3d0d2446f25, 0x4b0573286b44ad1e}, {0x9becce62836ac577, 0x4ee367f9430aec33}, {0xc2e801fb244576d5, 0x229c41f793cda740}, {0xf3a20279ed56d48a, 0x6b43527578c11110}, {0x9845418c345644d6, 0x830a13896b78aaaa}, {0xbe5691ef416bd60c, 0x23cc986bc656d554}, {0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa9}, {0x94b3a202eb1c3f39, 0x7bf7d71432f3d6aa}, {0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc54}, {0xe858ad248f5c22c9, 0xd1b3400f8f9cff69}, {0x91376c36d99995be, 0x23100809b9c21fa2}, {0xb58547448ffffb2d, 0xabd40a0c2832a78b}, {0xe2e69915b3fff9f9, 0x16c90c8f323f516d}, {0x8dd01fad907ffc3b, 0xae3da7d97f6792e4}, {0xb1442798f49ffb4a, 0x99cd11cfdf41779d}, {0xdd95317f31c7fa1d, 0x40405643d711d584}, {0x8a7d3eef7f1cfc52, 0x482835ea666b2573}, {0xad1c8eab5ee43b66, 0xda3243650005eed0}, {0xd863b256369d4a40, 0x90bed43e40076a83}, {0x873e4f75e2224e68, 0x5a7744a6e804a292}, {0xa90de3535aaae202, 0x711515d0a205cb37}, {0xd3515c2831559a83, 0x0d5a5b44ca873e04}, {0x8412d9991ed58091, 0xe858790afe9486c3}, {0xa5178fff668ae0b6, 0x626e974dbe39a873}, {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, {0x80fa687f881c7f8e, 0x7ce66634bc9d0b9a}, {0xa139029f6a239f72, 0x1c1fffc1ebc44e81}, {0xc987434744ac874e, 0xa327ffb266b56221}, {0xfbe9141915d7a922, 0x4bf1ff9f0062baa9}, {0x9d71ac8fada6c9b5, 0x6f773fc3603db4aa}, {0xc4ce17b399107c22, 0xcb550fb4384d21d4}, {0xf6019da07f549b2b, 0x7e2a53a146606a49}, {0x99c102844f94e0fb, 0x2eda7444cbfc426e}, {0xc0314325637a1939, 0xfa911155fefb5309}, {0xf03d93eebc589f88, 0x793555ab7eba27cb}, {0x96267c7535b763b5, 0x4bc1558b2f3458df}, {0xbbb01b9283253ca2, 0x9eb1aaedfb016f17}, {0xea9c227723ee8bcb, 0x465e15a979c1cadd}, {0x92a1958a7675175f, 0x0bfacd89ec191eca}, {0xb749faed14125d36, 0xcef980ec671f667c}, {0xe51c79a85916f484, 0x82b7e12780e7401b}, {0x8f31cc0937ae58d2, 0xd1b2ecb8b0908811}, {0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa16}, {0xdfbdcece67006ac9, 0x67a791e093e1d49b}, {0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e1}, {0xaecc49914078536d, 0x58fae9f773886e19}, {0xda7f5bf590966848, 0xaf39a475506a899f}, {0x888f99797a5e012d, 0x6d8406c952429604}, {0xaab37fd7d8f58178, 0xc8e5087ba6d33b84}, {0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a65}, {0x855c3be0a17fcd26, 0x5cf2eea09a550680}, {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, {0xd0601d8efc57b08b, 0xf13b94daf124da27}, {0x823c12795db6ce57, 0x76c53d08d6b70859}, {0xa2cb1717b52481ed, 0x54768c4b0c64ca6f}, {0xcb7ddcdda26da268, 0xa9942f5dcf7dfd0a}, {0xfe5d54150b090b02, 0xd3f93b35435d7c4d}, {0x9efa548d26e5a6e1, 0xc47bc5014a1a6db0}, {0xc6b8e9b0709f109a, 0x359ab6419ca1091c}, {0xf867241c8cc6d4c0, 0xc30163d203c94b63}, {0x9b407691d7fc44f8, 0x79e0de63425dcf1e}, {0xc21094364dfb5636, 0x985915fc12f542e5}, {0xf294b943e17a2bc4, 0x3e6f5b7b17b2939e}, {0x979cf3ca6cec5b5a, 0xa705992ceecf9c43}, {0xbd8430bd08277231, 0x50c6ff782a838354}, {0xece53cec4a314ebd, 0xa4f8bf5635246429}, {0x940f4613ae5ed136, 0x871b7795e136be9a}, {0xb913179899f68584, 0x28e2557b59846e40}, {0xe757dd7ec07426e5, 0x331aeada2fe589d0}, {0x9096ea6f3848984f, 0x3ff0d2c85def7622}, {0xb4bca50b065abe63, 0x0fed077a756b53aa}, {0xe1ebce4dc7f16dfb, 0xd3e8495912c62895}, {0x8d3360f09cf6e4bd, 0x64712dd7abbbd95d}, {0xb080392cc4349dec, 0xbd8d794d96aacfb4}, {0xdca04777f541c567, 0xecf0d7a0fc5583a1}, {0x89e42caaf9491b60, 0xf41686c49db57245}, {0xac5d37d5b79b6239, 0x311c2875c522ced6}, {0xd77485cb25823ac7, 0x7d633293366b828c}, {0x86a8d39ef77164bc, 0xae5dff9c02033198}, {0xa8530886b54dbdeb, 0xd9f57f830283fdfd}, {0xd267caa862a12d66, 0xd072df63c324fd7c}, {0x8380dea93da4bc60, 0x4247cb9e59f71e6e}, {0xa46116538d0deb78, 0x52d9be85f074e609}, {0xcd795be870516656, 0x67902e276c921f8c}, {0x806bd9714632dff6, 0x00ba1cd8a3db53b7}, {0xa086cfcd97bf97f3, 0x80e8a40eccd228a5}, {0xc8a883c0fdaf7df0, 0x6122cd128006b2ce}, {0xfad2a4b13d1b5d6c, 0x796b805720085f82}, {0x9cc3a6eec6311a63, 0xcbe3303674053bb1}, {0xc3f490aa77bd60fc, 0xbedbfc4411068a9d}, {0xf4f1b4d515acb93b, 0xee92fb5515482d45}, {0x991711052d8bf3c5, 0x751bdd152d4d1c4b}, {0xbf5cd54678eef0b6, 0xd262d45a78a0635e}, {0xef340a98172aace4, 0x86fb897116c87c35}, {0x9580869f0e7aac0e, 0xd45d35e6ae3d4da1}, {0xbae0a846d2195712, 0x8974836059cca10a}, {0xe998d258869facd7, 0x2bd1a438703fc94c}, {0x91ff83775423cc06, 0x7b6306a34627ddd0}, {0xb67f6455292cbf08, 0x1a3bc84c17b1d543}, {0xe41f3d6a7377eeca, 0x20caba5f1d9e4a94}, {0x8e938662882af53e, 0x547eb47b7282ee9d}, {0xb23867fb2a35b28d, 0xe99e619a4f23aa44}, {0xdec681f9f4c31f31, 0x6405fa00e2ec94d5}, {0x8b3c113c38f9f37e, 0xde83bc408dd3dd05}, {0xae0b158b4738705e, 0x9624ab50b148d446}, {0xd98ddaee19068c76, 0x3badd624dd9b0958}, {0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d7}, {0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4d}, {0xd47487cc8470652b, 0x7647c32000696720}, {0x84c8d4dfd2c63f3b, 0x29ecd9f40041e074}, {0xa5fb0a17c777cf09, 0xf468107100525891}, {0xcf79cc9db955c2cc, 0x7182148d4066eeb5}, {0x81ac1fe293d599bf, 0xc6f14cd848405531}, {0xa21727db38cb002f, 0xb8ada00e5a506a7d}, {0xca9cf1d206fdc03b, 0xa6d90811f0e4851d}, {0xfd442e4688bd304a, 0x908f4a166d1da664}, {0x9e4a9cec15763e2e, 0x9a598e4e043287ff}, {0xc5dd44271ad3cdba, 0x40eff1e1853f29fe}, {0xf7549530e188c128, 0xd12bee59e68ef47d}, {0x9a94dd3e8cf578b9, 0x82bb74f8301958cf}, {0xc13a148e3032d6e7, 0xe36a52363c1faf02}, {0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac2}, {0x96f5600f15a7b7e5, 0x29ab103a5ef8c0ba}, {0xbcb2b812db11a5de, 0x7415d448f6b6f0e8}, {0xebdf661791d60f56, 0x111b495b3464ad22}, {0x936b9fcebb25c995, 0xcab10dd900beec35}, {0xb84687c269ef3bfb, 0x3d5d514f40eea743}, {0xe65829b3046b0afa, 0x0cb4a5a3112a5113}, {0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ac}, {0xb3f4e093db73a093, 0x59ed216765690f57}, {0xe0f218b8d25088b8, 0x306869c13ec3532d}, {0x8c974f7383725573, 0x1e414218c73a13fc}, {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, {0xdbac6c247d62a583, 0xdf45f746b74abf3a}, {0x894bc396ce5da772, 0x6b8bba8c328eb784}, {0xab9eb47c81f5114f, 0x066ea92f3f326565}, {0xd686619ba27255a2, 0xc80a537b0efefebe}, {0x8613fd0145877585, 0xbd06742ce95f5f37}, {0xa798fc4196e952e7, 0x2c48113823b73705}, {0xd17f3b51fca3a7a0, 0xf75a15862ca504c6}, {0x82ef85133de648c4, 0x9a984d73dbe722fc}, {0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebbb}, {0xcc963fee10b7d1b3, 0x318df905079926a9}, {0xffbbcfe994e5c61f, 0xfdf17746497f7053}, {0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa634}, {0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc1}, {0xf9bd690a1b68637b, 0x3dfdce7aa3c673b1}, {0x9c1661a651213e2d, 0x06bea10ca65c084f}, {0xc31bfa0fe5698db8, 0x486e494fcff30a63}, {0xf3e2f893dec3f126, 0x5a89dba3c3efccfb}, {0x986ddb5c6b3a76b7, 0xf89629465a75e01d}, {0xbe89523386091465, 0xf6bbb397f1135824}, {0xee2ba6c0678b597f, 0x746aa07ded582e2d}, {0x94db483840b717ef, 0xa8c2a44eb4571cdd}, {0xba121a4650e4ddeb, 0x92f34d62616ce414}, {0xe896a0d7e51e1566, 0x77b020baf9c81d18}, {0x915e2486ef32cd60, 0x0ace1474dc1d122f}, {0xb5b5ada8aaff80b8, 0x0d819992132456bb}, {0xe3231912d5bf60e6, 0x10e1fff697ed6c6a}, {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, {0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb3}, {0xddd0467c64bce4a0, 0xac7cb3f6d05ddbdf}, {0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96c}, {0xad4ab7112eb3929d, 0x86c16c98d2c953c7}, {0xd89d64d57a607744, 0xe871c7bf077ba8b8}, {0x87625f056c7c4a8b, 0x11471cd764ad4973}, {0xa93af6c6c79b5d2d, 0xd598e40d3dd89bd0}, {0xd389b47879823479, 0x4aff1d108d4ec2c4}, {0x843610cb4bf160cb, 0xcedf722a585139bb}, {0xa54394fe1eedb8fe, 0xc2974eb4ee658829}, {0xce947a3da6a9273e, 0x733d226229feea33}, {0x811ccc668829b887, 0x0806357d5a3f5260}, {0xa163ff802a3426a8, 0xca07c2dcb0cf26f8}, {0xc9bcff6034c13052, 0xfc89b393dd02f0b6}, {0xfc2c3f3841f17c67, 0xbbac2078d443ace3}, {0x9d9ba7832936edc0, 0xd54b944b84aa4c0e}, {0xc5029163f384a931, 0x0a9e795e65d4df12}, {0xf64335bcf065d37d, 0x4d4617b5ff4a16d6}, {0x99ea0196163fa42e, 0x504bced1bf8e4e46}, {0xc06481fb9bcf8d39, 0xe45ec2862f71e1d7}, {0xf07da27a82c37088, 0x5d767327bb4e5a4d}, {0x964e858c91ba2655, 0x3a6a07f8d510f870}, {0xbbe226efb628afea, 0x890489f70a55368c}, {0xeadab0aba3b2dbe5, 0x2b45ac74ccea842f}, {0x92c8ae6b464fc96f, 0x3b0b8bc90012929e}, {0xb77ada0617e3bbcb, 0x09ce6ebb40173745}, {0xe55990879ddcaabd, 0xcc420a6a101d0516}, {0x8f57fa54c2a9eab6, 0x9fa946824a12232e}, {0xb32df8e9f3546564, 0x47939822dc96abfa}, {0xdff9772470297ebd, 0x59787e2b93bc56f8}, {0x8bfbea76c619ef36, 0x57eb4edb3c55b65b}, {0xaefae51477a06b03, 0xede622920b6b23f2}, {0xdab99e59958885c4, 0xe95fab368e45ecee}, {0x88b402f7fd75539b, 0x11dbcb0218ebb415}, {0xaae103b5fcd2a881, 0xd652bdc29f26a11a}, {0xd59944a37c0752a2, 0x4be76d3346f04960}, {0x857fcae62d8493a5, 0x6f70a4400c562ddc}, {0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb953}, {0xd097ad07a71f26b2, 0x7e2000a41346a7a8}, {0x825ecc24c873782f, 0x8ed400668c0c28c9}, {0xa2f67f2dfa90563b, 0x728900802f0f32fb}, {0xcbb41ef979346bca, 0x4f2b40a03ad2ffba}, {0xfea126b7d78186bc, 0xe2f610c84987bfa9}, {0x9f24b832e6b0f436, 0x0dd9ca7d2df4d7ca}, {0xc6ede63fa05d3143, 0x91503d1c79720dbc}, {0xf8a95fcf88747d94, 0x75a44c6397ce912b}, {0x9b69dbe1b548ce7c, 0xc986afbe3ee11abb}, {0xc24452da229b021b, 0xfbe85badce996169}, {0xf2d56790ab41c2a2, 0xfae27299423fb9c4}, {0x97c560ba6b0919a5, 0xdccd879fc967d41b}, {0xbdb6b8e905cb600f, 0x5400e987bbc1c921}, {0xed246723473e3813, 0x290123e9aab23b69}, {0x9436c0760c86e30b, 0xf9a0b6720aaf6522}, {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, {0xe7958cb87392c2c2, 0xb60b1d1230b20e05}, {0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c3}, {0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af4}, {0xe2280b6c20dd5232, 0x25c6da63c38de1b1}, {0x8d590723948a535f, 0x579c487e5a38ad0f}, {0xb0af48ec79ace837, 0x2d835a9df0c6d852}, {0xdcdb1b2798182244, 0xf8e431456cf88e66}, {0x8a08f0f8bf0f156b, 0x1b8e9ecb641b5900}, {0xac8b2d36eed2dac5, 0xe272467e3d222f40}, {0xd7adf884aa879177, 0x5b0ed81dcc6abb10}, {0x86ccbb52ea94baea, 0x98e947129fc2b4ea}, {0xa87fea27a539e9a5, 0x3f2398d747b36225}, {0xd29fe4b18e88640e, 0x8eec7f0d19a03aae}, {0x83a3eeeef9153e89, 0x1953cf68300424ad}, {0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd8}, {0xcdb02555653131b6, 0x3792f412cb06794e}, {0x808e17555f3ebf11, 0xe2bbd88bbee40bd1}, {0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec5}, {0xc8de047564d20a8b, 0xf245825a5a445276}, {0xfb158592be068d2e, 0xeed6e2f0f0d56713}, {0x9ced737bb6c4183d, 0x55464dd69685606c}, {0xc428d05aa4751e4c, 0xaa97e14c3c26b887}, {0xf53304714d9265df, 0xd53dd99f4b3066a9}, {0x993fe2c6d07b7fab, 0xe546a8038efe402a}, {0xbf8fdb78849a5f96, 0xde98520472bdd034}, {0xef73d256a5c0f77c, 0x963e66858f6d4441}, {0x95a8637627989aad, 0xdde7001379a44aa9}, {0xbb127c53b17ec159, 0x5560c018580d5d53}, {0xe9d71b689dde71af, 0xaab8f01e6e10b4a7}, {0x9226712162ab070d, 0xcab3961304ca70e9}, {0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d23}, {0xe45c10c42a2b3b05, 0x8cb89a7db77c506b}, {0x8eb98a7a9a5b04e3, 0x77f3608e92adb243}, {0xb267ed1940f1c61c, 0x55f038b237591ed4}, {0xdf01e85f912e37a3, 0x6b6c46dec52f6689}, {0x8b61313bbabce2c6, 0x2323ac4b3b3da016}, {0xae397d8aa96c1b77, 0xabec975e0a0d081b}, {0xd9c7dced53c72255, 0x96e7bd358c904a22}, {0x881cea14545c7575, 0x7e50d64177da2e55}, {0xaa242499697392d2, 0xdde50bd1d5d0b9ea}, {0xd4ad2dbfc3d07787, 0x955e4ec64b44e865}, {0x84ec3c97da624ab4, 0xbd5af13bef0b113f}, {0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58f}, {0xcfb11ead453994ba, 0x67de18eda5814af3}, {0x81ceb32c4b43fcf4, 0x80eacf948770ced8}, {0xa2425ff75e14fc31, 0xa1258379a94d028e}, {0xcad2f7f5359a3b3e, 0x096ee45813a04331}, {0xfd87b5f28300ca0d, 0x8bca9d6e188853fd}, {0x9e74d1b791e07e48, 0x775ea264cf55347e}, {0xc612062576589dda, 0x95364afe032a819e}, {0xf79687aed3eec551, 0x3a83ddbd83f52205}, {0x9abe14cd44753b52, 0xc4926a9672793543}, {0xc16d9a0095928a27, 0x75b7053c0f178294}, {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, {0x971da05074da7bee, 0xd3f6fc16ebca5e04}, {0xbce5086492111aea, 0x88f4bb1ca6bcf585}, {0xec1e4a7db69561a5, 0x2b31e9e3d06c32e6}, {0x9392ee8e921d5d07, 0x3aff322e62439fd0}, {0xb877aa3236a4b449, 0x09befeb9fad487c3}, {0xe69594bec44de15b, 0x4c2ebe687989a9b4}, {0x901d7cf73ab0acd9, 0x0f9d37014bf60a11}, {0xb424dc35095cd80f, 0x538484c19ef38c95}, {0xe12e13424bb40e13, 0x2865a5f206b06fba}, {0x8cbccc096f5088cb, 0xf93f87b7442e45d4}, {0xafebff0bcb24aafe, 0xf78f69a51539d749}, {0xdbe6fecebdedd5be, 0xb573440e5a884d1c}, {0x89705f4136b4a597, 0x31680a88f8953031}, {0xabcc77118461cefc, 0xfdc20d2b36ba7c3e}, {0xd6bf94d5e57a42bc, 0x3d32907604691b4d}, {0x8637bd05af6c69b5, 0xa63f9a49c2c1b110}, {0xa7c5ac471b478423, 0x0fcf80dc33721d54}, {0xd1b71758e219652b, 0xd3c36113404ea4a9}, {0x83126e978d4fdf3b, 0x645a1cac083126ea}, {0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4}, {0xcccccccccccccccc, 0xcccccccccccccccd}, {0x8000000000000000, 0x0000000000000000}, {0xa000000000000000, 0x0000000000000000}, {0xc800000000000000, 0x0000000000000000}, {0xfa00000000000000, 0x0000000000000000}, {0x9c40000000000000, 0x0000000000000000}, {0xc350000000000000, 0x0000000000000000}, {0xf424000000000000, 0x0000000000000000}, {0x9896800000000000, 0x0000000000000000}, {0xbebc200000000000, 0x0000000000000000}, {0xee6b280000000000, 0x0000000000000000}, {0x9502f90000000000, 0x0000000000000000}, {0xba43b74000000000, 0x0000000000000000}, {0xe8d4a51000000000, 0x0000000000000000}, {0x9184e72a00000000, 0x0000000000000000}, {0xb5e620f480000000, 0x0000000000000000}, {0xe35fa931a0000000, 0x0000000000000000}, {0x8e1bc9bf04000000, 0x0000000000000000}, {0xb1a2bc2ec5000000, 0x0000000000000000}, {0xde0b6b3a76400000, 0x0000000000000000}, {0x8ac7230489e80000, 0x0000000000000000}, {0xad78ebc5ac620000, 0x0000000000000000}, {0xd8d726b7177a8000, 0x0000000000000000}, {0x878678326eac9000, 0x0000000000000000}, {0xa968163f0a57b400, 0x0000000000000000}, {0xd3c21bcecceda100, 0x0000000000000000}, {0x84595161401484a0, 0x0000000000000000}, {0xa56fa5b99019a5c8, 0x0000000000000000}, {0xcecb8f27f4200f3a, 0x0000000000000000}, {0x813f3978f8940984, 0x4000000000000000}, {0xa18f07d736b90be5, 0x5000000000000000}, {0xc9f2c9cd04674ede, 0xa400000000000000}, {0xfc6f7c4045812296, 0x4d00000000000000}, {0x9dc5ada82b70b59d, 0xf020000000000000}, {0xc5371912364ce305, 0x6c28000000000000}, {0xf684df56c3e01bc6, 0xc732000000000000}, {0x9a130b963a6c115c, 0x3c7f400000000000}, {0xc097ce7bc90715b3, 0x4b9f100000000000}, {0xf0bdc21abb48db20, 0x1e86d40000000000}, {0x96769950b50d88f4, 0x1314448000000000}, {0xbc143fa4e250eb31, 0x17d955a000000000}, {0xeb194f8e1ae525fd, 0x5dcfab0800000000}, {0x92efd1b8d0cf37be, 0x5aa1cae500000000}, {0xb7abc627050305ad, 0xf14a3d9e40000000}, {0xe596b7b0c643c719, 0x6d9ccd05d0000000}, {0x8f7e32ce7bea5c6f, 0xe4820023a2000000}, {0xb35dbf821ae4f38b, 0xdda2802c8a800000}, {0xe0352f62a19e306e, 0xd50b2037ad200000}, {0x8c213d9da502de45, 0x4526f422cc340000}, {0xaf298d050e4395d6, 0x9670b12b7f410000}, {0xdaf3f04651d47b4c, 0x3c0cdd765f114000}, {0x88d8762bf324cd0f, 0xa5880a69fb6ac800}, {0xab0e93b6efee0053, 0x8eea0d047a457a00}, {0xd5d238a4abe98068, 0x72a4904598d6d880}, {0x85a36366eb71f041, 0x47a6da2b7f864750}, {0xa70c3c40a64e6c51, 0x999090b65f67d924}, {0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d}, {0x82818f1281ed449f, 0xbff8f10e7a8921a5}, {0xa321f2d7226895c7, 0xaff72d52192b6a0e}, {0xcbea6f8ceb02bb39, 0x9bf4f8a69f764491}, {0xfee50b7025c36a08, 0x02f236d04753d5b5}, {0x9f4f2726179a2245, 0x01d762422c946591}, {0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef6}, {0xf8ebad2b84e0d58b, 0xd2e0898765a7deb3}, {0x9b934c3b330c8577, 0x63cc55f49f88eb30}, {0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fc}, {0xf316271c7fc3908a, 0x8bef464e3945ef7b}, {0x97edd871cfda3a56, 0x97758bf0e3cbb5ad}, {0xbde94e8e43d0c8ec, 0x3d52eeed1cbea318}, {0xed63a231d4c4fb27, 0x4ca7aaa863ee4bde}, {0x945e455f24fb1cf8, 0x8fe8caa93e74ef6b}, {0xb975d6b6ee39e436, 0xb3e2fd538e122b45}, {0xe7d34c64a9c85d44, 0x60dbbca87196b617}, {0x90e40fbeea1d3a4a, 0xbc8955e946fe31ce}, {0xb51d13aea4a488dd, 0x6babab6398bdbe42}, {0xe264589a4dcdab14, 0xc696963c7eed2dd2}, {0x8d7eb76070a08aec, 0xfc1e1de5cf543ca3}, {0xb0de65388cc8ada8, 0x3b25a55f43294bcc}, {0xdd15fe86affad912, 0x49ef0eb713f39ebf}, {0x8a2dbf142dfcc7ab, 0x6e3569326c784338}, {0xacb92ed9397bf996, 0x49c2c37f07965405}, {0xd7e77a8f87daf7fb, 0xdc33745ec97be907}, {0x86f0ac99b4e8dafd, 0x69a028bb3ded71a4}, {0xa8acd7c0222311bc, 0xc40832ea0d68ce0d}, {0xd2d80db02aabd62b, 0xf50a3fa490c30191}, {0x83c7088e1aab65db, 0x792667c6da79e0fb}, {0xa4b8cab1a1563f52, 0x577001b891185939}, {0xcde6fd5e09abcf26, 0xed4c0226b55e6f87}, {0x80b05e5ac60b6178, 0x544f8158315b05b5}, {0xa0dc75f1778e39d6, 0x696361ae3db1c722}, {0xc913936dd571c84c, 0x03bc3a19cd1e38ea}, {0xfb5878494ace3a5f, 0x04ab48a04065c724}, {0x9d174b2dcec0e47b, 0x62eb0d64283f9c77}, {0xc45d1df942711d9a, 0x3ba5d0bd324f8395}, {0xf5746577930d6500, 0xca8f44ec7ee3647a}, {0x9968bf6abbe85f20, 0x7e998b13cf4e1ecc}, {0xbfc2ef456ae276e8, 0x9e3fedd8c321a67f}, {0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101f}, {0x95d04aee3b80ece5, 0xbba1f1d158724a13}, {0xbb445da9ca61281f, 0x2a8a6e45ae8edc98}, {0xea1575143cf97226, 0xf52d09d71a3293be}, {0x924d692ca61be758, 0x593c2626705f9c57}, {0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836d}, {0xe498f455c38b997a, 0x0b6dfb9c0f956448}, {0x8edf98b59a373fec, 0x4724bd4189bd5ead}, {0xb2977ee300c50fe7, 0x58edec91ec2cb658}, {0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ee}, {0x8b865b215899f46c, 0xbd79e0d20082ee75}, {0xae67f1e9aec07187, 0xecd8590680a3aa12}, {0xda01ee641a708de9, 0xe80e6f4820cc9496}, {0x884134fe908658b2, 0x3109058d147fdcde}, {0xaa51823e34a7eede, 0xbd4b46f0599fd416}, {0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91b}, {0x850fadc09923329e, 0x03e2cf6bc604ddb1}, {0xa6539930bf6bff45, 0x84db8346b786151d}, {0xcfe87f7cef46ff16, 0xe612641865679a64}, {0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07f}, {0xa26da3999aef7749, 0xe3be5e330f38f09e}, {0xcb090c8001ab551c, 0x5cadf5bfd3072cc6}, {0xfdcb4fa002162a63, 0x73d9732fc7c8f7f7}, {0x9e9f11c4014dda7e, 0x2867e7fddcdd9afb}, {0xc646d63501a1511d, 0xb281e1fd541501b9}, {0xf7d88bc24209a565, 0x1f225a7ca91a4227}, {0x9ae757596946075f, 0x3375788de9b06959}, {0xc1a12d2fc3978937, 0x0052d6b1641c83af}, {0xf209787bb47d6b84, 0xc0678c5dbd23a49b}, {0x9745eb4d50ce6332, 0xf840b7ba963646e1}, {0xbd176620a501fbff, 0xb650e5a93bc3d899}, {0xec5d3fa8ce427aff, 0xa3e51f138ab4cebf}, {0x93ba47c980e98cdf, 0xc66f336c36b10138}, {0xb8a8d9bbe123f017, 0xb80b0047445d4185}, {0xe6d3102ad96cec1d, 0xa60dc059157491e6}, {0x9043ea1ac7e41392, 0x87c89837ad68db30}, {0xb454e4a179dd1877, 0x29babe4598c311fc}, {0xe16a1dc9d8545e94, 0xf4296dd6fef3d67b}, {0x8ce2529e2734bb1d, 0x1899e4a65f58660d}, {0xb01ae745b101e9e4, 0x5ec05dcff72e7f90}, {0xdc21a1171d42645d, 0x76707543f4fa1f74}, {0x899504ae72497eba, 0x6a06494a791c53a9}, {0xabfa45da0edbde69, 0x0487db9d17636893}, {0xd6f8d7509292d603, 0x45a9d2845d3c42b7}, {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b3}, {0xa7f26836f282b732, 0x8e6cac7768d7141f}, {0xd1ef0244af2364ff, 0x3207d795430cd927}, {0x8335616aed761f1f, 0x7f44e6bd49e807b9}, {0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a7}, {0xcd036837130890a1, 0x36dba887c37a8c10}, {0x802221226be55a64, 0xc2494954da2c978a}, {0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6d}, {0xc83553c5c8965d3d, 0x6f92829494e5acc8}, {0xfa42a8b73abbf48c, 0xcb772339ba1f17fa}, {0x9c69a97284b578d7, 0xff2a760414536efc}, {0xc38413cf25e2d70d, 0xfef5138519684abb}, {0xf46518c2ef5b8cd1, 0x7eb258665fc25d6a}, {0x98bf2f79d5993802, 0xef2f773ffbd97a62}, {0xbeeefb584aff8603, 0xaafb550ffacfd8fb}, {0xeeaaba2e5dbf6784, 0x95ba2a53f983cf39}, {0x952ab45cfa97a0b2, 0xdd945a747bf26184}, {0xba756174393d88df, 0x94f971119aeef9e5}, {0xe912b9d1478ceb17, 0x7a37cd5601aab85e}, {0x91abb422ccb812ee, 0xac62e055c10ab33b}, {0xb616a12b7fe617aa, 0x577b986b314d600a}, {0xe39c49765fdf9d94, 0xed5a7e85fda0b80c}, {0x8e41ade9fbebc27d, 0x14588f13be847308}, {0xb1d219647ae6b31c, 0x596eb2d8ae258fc9}, {0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bc}, {0x8aec23d680043bee, 0x25de7bb9480d5855}, {0xada72ccc20054ae9, 0xaf561aa79a10ae6b}, {0xd910f7ff28069da4, 0x1b2ba1518094da05}, {0x87aa9aff79042286, 0x90fb44d2f05d0843}, {0xa99541bf57452b28, 0x353a1607ac744a54}, {0xd3fa922f2d1675f2, 0x42889b8997915ce9}, {0x847c9b5d7c2e09b7, 0x69956135febada12}, {0xa59bc234db398c25, 0x43fab9837e699096}, {0xcf02b2c21207ef2e, 0x94f967e45e03f4bc}, {0x8161afb94b44f57d, 0x1d1be0eebac278f6}, {0xa1ba1ba79e1632dc, 0x6462d92a69731733}, {0xca28a291859bbf93, 0x7d7b8f7503cfdcff}, {0xfcb2cb35e702af78, 0x5cda735244c3d43f}, {0x9defbf01b061adab, 0x3a0888136afa64a8}, {0xc56baec21c7a1916, 0x088aaa1845b8fdd1}, {0xf6c69a72a3989f5b, 0x8aad549e57273d46}, {0x9a3c2087a63f6399, 0x36ac54e2f678864c}, {0xc0cb28a98fcf3c7f, 0x84576a1bb416a7de}, {0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d6}, {0x969eb7c47859e743, 0x9f644ae5a4b1b326}, {0xbc4665b596706114, 0x873d5d9f0dde1fef}, {0xeb57ff22fc0c7959, 0xa90cb506d155a7eb}, {0x9316ff75dd87cbd8, 0x09a7f12442d588f3}, {0xb7dcbf5354e9bece, 0x0c11ed6d538aeb30}, {0xe5d3ef282a242e81, 0x8f1668c8a86da5fb}, {0x8fa475791a569d10, 0xf96e017d694487bd}, {0xb38d92d760ec4455, 0x37c981dcc395a9ad}, {0xe070f78d3927556a, 0x85bbe253f47b1418}, {0x8c469ab843b89562, 0x93956d7478ccec8f}, {0xaf58416654a6babb, 0x387ac8d1970027b3}, {0xdb2e51bfe9d0696a, 0x06997b05fcc0319f}, {0x88fcf317f22241e2, 0x441fece3bdf81f04}, {0xab3c2fddeeaad25a, 0xd527e81cad7626c4}, {0xd60b3bd56a5586f1, 0x8a71e223d8d3b075}, {0x85c7056562757456, 0xf6872d5667844e4a}, {0xa738c6bebb12d16c, 0xb428f8ac016561dc}, {0xd106f86e69d785c7, 0xe13336d701beba53}, {0x82a45b450226b39c, 0xecc0024661173474}, {0xa34d721642b06084, 0x27f002d7f95d0191}, {0xcc20ce9bd35c78a5, 0x31ec038df7b441f5}, {0xff290242c83396ce, 0x7e67047175a15272}, {0x9f79a169bd203e41, 0x0f0062c6e984d387}, {0xc75809c42c684dd1, 0x52c07b78a3e60869}, {0xf92e0c3537826145, 0xa7709a56ccdf8a83}, {0x9bbcc7a142b17ccb, 0x88a66076400bb692}, {0xc2abf989935ddbfe, 0x6acff893d00ea436}, {0xf356f7ebf83552fe, 0x0583f6b8c4124d44}, {0x98165af37b2153de, 0xc3727a337a8b704b}, {0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5d}, {0xeda2ee1c7064130c, 0x1162def06f79df74}, {0x9485d4d1c63e8be7, 0x8addcb5645ac2ba9}, {0xb9a74a0637ce2ee1, 0x6d953e2bd7173693}, {0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0438}, {0x910ab1d4db9914a0, 0x1d9c9892400a22a3}, {0xb54d5e4a127f59c8, 0x2503beb6d00cab4c}, {0xe2a0b5dc971f303a, 0x2e44ae64840fd61e}, {0x8da471a9de737e24, 0x5ceaecfed289e5d3}, {0xb10d8e1456105dad, 0x7425a83e872c5f48}, {0xdd50f1996b947518, 0xd12f124e28f7771a}, {0x8a5296ffe33cc92f, 0x82bd6b70d99aaa70}, {0xace73cbfdc0bfb7b, 0x636cc64d1001550c}, {0xd8210befd30efa5a, 0x3c47f7e05401aa4f}, {0x8714a775e3e95c78, 0x65acfaec34810a72}, {0xa8d9d1535ce3b396, 0x7f1839a741a14d0e}, {0xd31045a8341ca07c, 0x1ede48111209a051}, {0x83ea2b892091e44d, 0x934aed0aab460433}, {0xa4e4b66b68b65d60, 0xf81da84d56178540}, {0xce1de40642e3f4b9, 0x36251260ab9d668f}, {0x80d2ae83e9ce78f3, 0xc1d72b7c6b42601a}, {0xa1075a24e4421730, 0xb24cf65b8612f820}, {0xc94930ae1d529cfc, 0xdee033f26797b628}, {0xfb9b7cd9a4a7443c, 0x169840ef017da3b2}, {0x9d412e0806e88aa5, 0x8e1f289560ee864f}, {0xc491798a08a2ad4e, 0xf1a6f2bab92a27e3}, {0xf5b5d7ec8acb58a2, 0xae10af696774b1dc}, {0x9991a6f3d6bf1765, 0xacca6da1e0a8ef2a}, {0xbff610b0cc6edd3f, 0x17fd090a58d32af4}, {0xeff394dcff8a948e, 0xddfc4b4cef07f5b1}, {0x95f83d0a1fb69cd9, 0x4abdaf101564f98f}, {0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f2}, {0xea53df5fd18d5513, 0x84c86189216dc5ee}, {0x92746b9be2f8552c, 0x32fd3cf5b4e49bb5}, {0xb7118682dbb66a77, 0x3fbc8c33221dc2a2}, {0xe4d5e82392a40515, 0x0fabaf3feaa5334b}, {0x8f05b1163ba6832d, 0x29cb4d87f2a7400f}, {0xb2c71d5bca9023f8, 0x743e20e9ef511013}, {0xdf78e4b2bd342cf6, 0x914da9246b255417}, {0x8bab8eefb6409c1a, 0x1ad089b6c2f7548f}, {0xae9672aba3d0c320, 0xa184ac2473b529b2}, {0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741f}, {0x8865899617fb1871, 0x7e2fa67c7a658893}, {0xaa7eebfb9df9de8d, 0xddbb901b98feeab8}, {0xd51ea6fa85785631, 0x552a74227f3ea566}, {0x8533285c936b35de, 0xd53a88958f872760}, {0xa67ff273b8460356, 0x8a892abaf368f138}, {0xd01fef10a657842c, 0x2d2b7569b0432d86}, {0x8213f56a67f6b29b, 0x9c3b29620e29fc74}, {0xa298f2c501f45f42, 0x8349f3ba91b47b90}, {0xcb3f2f7642717713, 0x241c70a936219a74}, {0xfe0efb53d30dd4d7, 0xed238cd383aa0111}, {0x9ec95d1463e8a506, 0xf4363804324a40ab}, {0xc67bb4597ce2ce48, 0xb143c6053edcd0d6}, {0xf81aa16fdc1b81da, 0xdd94b7868e94050b}, {0x9b10a4e5e9913128, 0xca7cf2b4191c8327}, {0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f1}, {0xf24a01a73cf2dccf, 0xbc633b39673c8ced}, {0x976e41088617ca01, 0xd5be0503e085d814}, {0xbd49d14aa79dbc82, 0x4b2d8644d8a74e19}, {0xec9c459d51852ba2, 0xddf8e7d60ed1219f}, {0x93e1ab8252f33b45, 0xcabb90e5c942b504}, {0xb8da1662e7b00a17, 0x3d6a751f3b936244}, {0xe7109bfba19c0c9d, 0x0cc512670a783ad5}, {0x906a617d450187e2, 0x27fb2b80668b24c6}, {0xb484f9dc9641e9da, 0xb1f9f660802dedf7}, {0xe1a63853bbd26451, 0x5e7873f8a0396974}, {0x8d07e33455637eb2, 0xdb0b487b6423e1e9}, {0xb049dc016abc5e5f, 0x91ce1a9a3d2cda63}, {0xdc5c5301c56b75f7, 0x7641a140cc7810fc}, {0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9e}, {0xac2820d9623bf429, 0x546345fa9fbdcd45}, {0xd732290fbacaf133, 0xa97c177947ad4096}, {0x867f59a9d4bed6c0, 0x49ed8eabcccc485e}, {0xa81f301449ee8c70, 0x5c68f256bfff5a75}, {0xd226fc195c6a2f8c, 0x73832eec6fff3112}, {0x83585d8fd9c25db7, 0xc831fd53c5ff7eac}, {0xa42e74f3d032f525, 0xba3e7ca8b77f5e56}, {0xcd3a1230c43fb26f, 0x28ce1bd2e55f35ec}, {0x80444b5e7aa7cf85, 0x7980d163cf5b81b4}, {0xa0555e361951c366, 0xd7e105bcc3326220}, {0xc86ab5c39fa63440, 0x8dd9472bf3fefaa8}, {0xfa856334878fc150, 0xb14f98f6f0feb952}, {0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d4}, {0xc3b8358109e84f07, 0x0a862f80ec4700c9}, {0xf4a642e14c6262c8, 0xcd27bb612758c0fb}, {0x98e7e9cccfbd7dbd, 0x8038d51cb897789d}, {0xbf21e44003acdd2c, 0xe0470a63e6bd56c4}, {0xeeea5d5004981478, 0x1858ccfce06cac75}, {0x95527a5202df0ccb, 0x0f37801e0c43ebc9}, {0xbaa718e68396cffd, 0xd30560258f54e6bb}, {0xe950df20247c83fd, 0x47c6b82ef32a206a}, {0x91d28b7416cdd27e, 0x4cdc331d57fa5442}, {0xb6472e511c81471d, 0xe0133fe4adf8e953}, {0xe3d8f9e563a198e5, 0x58180fddd97723a7}, {0x8e679c2f5e44ff8f, 0x570f09eaa7ea7649}, {0xb201833b35d63f73, 0x2cd2cc6551e513db}, {0xde81e40a034bcf4f, 0xf8077f7ea65e58d2}, {0x8b112e86420f6191, 0xfb04afaf27faf783}, {0xadd57a27d29339f6, 0x79c5db9af1f9b564}, {0xd94ad8b1c7380874, 0x18375281ae7822bd}, {0x87cec76f1c830548, 0x8f2293910d0b15b6}, {0xa9c2794ae3a3c69a, 0xb2eb3875504ddb23}, {0xd433179d9c8cb841, 0x5fa60692a46151ec}, {0x849feec281d7f328, 0xdbc7c41ba6bcd334}, {0xa5c7ea73224deff3, 0x12b9b522906c0801}, {0xcf39e50feae16bef, 0xd768226b34870a01}, {0x81842f29f2cce375, 0xe6a1158300d46641}, {0xa1e53af46f801c53, 0x60495ae3c1097fd1}, {0xca5e89b18b602368, 0x385bb19cb14bdfc5}, {0xfcf62c1dee382c42, 0x46729e03dd9ed7b6}, {0x9e19db92b4e31ba9, 0x6c07a2c26a8346d2}, {0xc5a05277621be293, 0xc7098b7305241886}, { 0xf70867153aa2db38, 0xb8cbee4fc66d1ea8 } #else {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, {0x86a8d39ef77164bc, 0xae5dff9c02033198}, {0xd98ddaee19068c76, 0x3badd624dd9b0958}, {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, {0xe55990879ddcaabd, 0xcc420a6a101d0516}, {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, {0x95a8637627989aad, 0xdde7001379a44aa9}, {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, {0xc350000000000000, 0x0000000000000000}, {0x9dc5ada82b70b59d, 0xf020000000000000}, {0xfee50b7025c36a08, 0x02f236d04753d5b5}, {0xcde6fd5e09abcf26, 0xed4c0226b55e6f87}, {0xa6539930bf6bff45, 0x84db8346b786151d}, {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b3}, {0xd910f7ff28069da4, 0x1b2ba1518094da05}, {0xaf58416654a6babb, 0x387ac8d1970027b3}, {0x8da471a9de737e24, 0x5ceaecfed289e5d3}, {0xe4d5e82392a40515, 0x0fabaf3feaa5334b}, {0xb8da1662e7b00a17, 0x3d6a751f3b936244}, { 0x95527a5202df0ccb, 0x0f37801e0c43ebc9 } #endif }; #if FMT_USE_FULL_CACHE_DRAGONBOX return pow10_significands[k - float_info::min_k]; #else static constexpr const uint64_t powers_of_5_64[] = { 0x0000000000000001, 0x0000000000000005, 0x0000000000000019, 0x000000000000007d, 0x0000000000000271, 0x0000000000000c35, 0x0000000000003d09, 0x000000000001312d, 0x000000000005f5e1, 0x00000000001dcd65, 0x00000000009502f9, 0x0000000002e90edd, 0x000000000e8d4a51, 0x0000000048c27395, 0x000000016bcc41e9, 0x000000071afd498d, 0x0000002386f26fc1, 0x000000b1a2bc2ec5, 0x000003782dace9d9, 0x00001158e460913d, 0x000056bc75e2d631, 0x0001b1ae4d6e2ef5, 0x000878678326eac9, 0x002a5a058fc295ed, 0x00d3c21bcecceda1, 0x0422ca8b0a00a425, 0x14adf4b7320334b9}; static const int compression_ratio = 27; // Compute base index. int cache_index = (k - float_info::min_k) / compression_ratio; int kb = cache_index * compression_ratio + float_info::min_k; int offset = k - kb; // Get base cache. uint128_fallback base_cache = pow10_significands[cache_index]; if (offset == 0) return base_cache; // Compute the required amount of bit-shift. int alpha = floor_log2_pow10(kb + offset) - floor_log2_pow10(kb) - offset; FMT_ASSERT(alpha > 0 && alpha < 64, "shifting error detected"); // Try to recover the real cache. uint64_t pow5 = powers_of_5_64[offset]; uint128_fallback recovered_cache = umul128(base_cache.high(), pow5); uint128_fallback middle_low = umul128(base_cache.low(), pow5); recovered_cache += middle_low.high(); uint64_t high_to_middle = recovered_cache.high() << (64 - alpha); uint64_t middle_to_low = recovered_cache.low() << (64 - alpha); recovered_cache = uint128_fallback{(recovered_cache.low() >> alpha) | high_to_middle, ((middle_low.low() >> alpha) | middle_to_low)}; FMT_ASSERT(recovered_cache.low() + 1 != 0, ""); return {recovered_cache.high(), recovered_cache.low() + 1}; #endif } struct compute_mul_result { carrier_uint result; bool is_integer; }; struct compute_mul_parity_result { bool parity; bool is_integer; }; static compute_mul_result compute_mul( carrier_uint u, const cache_entry_type& cache) noexcept { auto r = umul192_upper128(u, cache); return {r.high(), r.low() == 0}; } static uint32_t compute_delta(cache_entry_type const& cache, int beta) noexcept { return static_cast(cache.high() >> (64 - 1 - beta)); } static compute_mul_parity_result compute_mul_parity( carrier_uint two_f, const cache_entry_type& cache, int beta) noexcept { FMT_ASSERT(beta >= 1, ""); FMT_ASSERT(beta < 64, ""); auto r = umul192_lower128(two_f, cache); return {((r.high() >> (64 - beta)) & 1) != 0, ((r.high() << beta) | (r.low() >> (64 - beta))) == 0}; } static carrier_uint compute_left_endpoint_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept { return (cache.high() - (cache.high() >> (num_significand_bits() + 2))) >> (64 - num_significand_bits() - 1 - beta); } static carrier_uint compute_right_endpoint_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept { return (cache.high() + (cache.high() >> (num_significand_bits() + 1))) >> (64 - num_significand_bits() - 1 - beta); } static carrier_uint compute_round_up_for_shorter_interval_case( const cache_entry_type& cache, int beta) noexcept { return ((cache.high() >> (64 - num_significand_bits() - 2 - beta)) + 1) / 2; } }; // Various integer checks template bool is_left_endpoint_integer_shorter_interval(int exponent) noexcept { const int case_shorter_interval_left_endpoint_lower_threshold = 2; const int case_shorter_interval_left_endpoint_upper_threshold = 3; return exponent >= case_shorter_interval_left_endpoint_lower_threshold && exponent <= case_shorter_interval_left_endpoint_upper_threshold; } // Remove trailing zeros from n and return the number of zeros removed (float) FMT_INLINE int remove_trailing_zeros(uint32_t& n) noexcept { FMT_ASSERT(n != 0, ""); const uint32_t mod_inv_5 = 0xcccccccd; const uint32_t mod_inv_25 = mod_inv_5 * mod_inv_5; int s = 0; while (true) { auto q = rotr(n * mod_inv_25, 2); if (q > max_value() / 100) break; n = q; s += 2; } auto q = rotr(n * mod_inv_5, 1); if (q <= max_value() / 10) { n = q; s |= 1; } return s; } // Removes trailing zeros and returns the number of zeros removed (double) FMT_INLINE int remove_trailing_zeros(uint64_t& n) noexcept { FMT_ASSERT(n != 0, ""); // This magic number is ceil(2^90 / 10^8). constexpr uint64_t magic_number = 12379400392853802749ull; auto nm = umul128(n, magic_number); // Is n is divisible by 10^8? if ((nm.high() & ((1ull << (90 - 64)) - 1)) == 0 && nm.low() < magic_number) { // If yes, work with the quotient. auto n32 = static_cast(nm.high() >> (90 - 64)); const uint32_t mod_inv_5 = 0xcccccccd; const uint32_t mod_inv_25 = mod_inv_5 * mod_inv_5; int s = 8; while (true) { auto q = rotr(n32 * mod_inv_25, 2); if (q > max_value() / 100) break; n32 = q; s += 2; } auto q = rotr(n32 * mod_inv_5, 1); if (q <= max_value() / 10) { n32 = q; s |= 1; } n = n32; return s; } // If n is not divisible by 10^8, work with n itself. const uint64_t mod_inv_5 = 0xcccccccccccccccd; const uint64_t mod_inv_25 = mod_inv_5 * mod_inv_5; int s = 0; while (true) { auto q = rotr(n * mod_inv_25, 2); if (q > max_value() / 100) break; n = q; s += 2; } auto q = rotr(n * mod_inv_5, 1); if (q <= max_value() / 10) { n = q; s |= 1; } return s; } // The main algorithm for shorter interval case template FMT_INLINE decimal_fp shorter_interval_case(int exponent) noexcept { decimal_fp ret_value; // Compute k and beta const int minus_k = floor_log10_pow2_minus_log10_4_over_3(exponent); const int beta = exponent + floor_log2_pow10(-minus_k); // Compute xi and zi using cache_entry_type = typename cache_accessor::cache_entry_type; const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); auto xi = cache_accessor::compute_left_endpoint_for_shorter_interval_case( cache, beta); auto zi = cache_accessor::compute_right_endpoint_for_shorter_interval_case( cache, beta); // If the left endpoint is not an integer, increase it if (!is_left_endpoint_integer_shorter_interval(exponent)) ++xi; // Try bigger divisor ret_value.significand = zi / 10; // If succeed, remove trailing zeros if necessary and return if (ret_value.significand * 10 >= xi) { ret_value.exponent = minus_k + 1; ret_value.exponent += remove_trailing_zeros(ret_value.significand); return ret_value; } // Otherwise, compute the round-up of y ret_value.significand = cache_accessor::compute_round_up_for_shorter_interval_case(cache, beta); ret_value.exponent = minus_k; // When tie occurs, choose one of them according to the rule if (exponent >= float_info::shorter_interval_tie_lower_threshold && exponent <= float_info::shorter_interval_tie_upper_threshold) { ret_value.significand = ret_value.significand % 2 == 0 ? ret_value.significand : ret_value.significand - 1; } else if (ret_value.significand < xi) { ++ret_value.significand; } return ret_value; } template decimal_fp to_decimal(T x) noexcept { // Step 1: integer promotion & Schubfach multiplier calculation. using carrier_uint = typename float_info::carrier_uint; using cache_entry_type = typename cache_accessor::cache_entry_type; auto br = bit_cast(x); // Extract significand bits and exponent bits. const carrier_uint significand_mask = (static_cast(1) << num_significand_bits()) - 1; carrier_uint significand = (br & significand_mask); int exponent = static_cast((br & exponent_mask()) >> num_significand_bits()); if (exponent != 0) { // Check if normal. exponent -= exponent_bias() + num_significand_bits(); // Shorter interval case; proceed like Schubfach. // In fact, when exponent == 1 and significand == 0, the interval is // regular. However, it can be shown that the end-results are anyway same. if (significand == 0) return shorter_interval_case(exponent); significand |= (static_cast(1) << num_significand_bits()); } else { // Subnormal case; the interval is always regular. if (significand == 0) return {0, 0}; exponent = std::numeric_limits::min_exponent - num_significand_bits() - 1; } const bool include_left_endpoint = (significand % 2 == 0); const bool include_right_endpoint = include_left_endpoint; // Compute k and beta. const int minus_k = floor_log10_pow2(exponent) - float_info::kappa; const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); const int beta = exponent + floor_log2_pow10(-minus_k); // Compute zi and deltai. // 10^kappa <= deltai < 10^(kappa + 1) const uint32_t deltai = cache_accessor::compute_delta(cache, beta); const carrier_uint two_fc = significand << 1; // For the case of binary32, the result of integer check is not correct for // 29711844 * 2^-82 // = 6.1442653300000000008655037797566933477355632930994033813476... * 10^-18 // and 29711844 * 2^-81 // = 1.2288530660000000001731007559513386695471126586198806762695... * 10^-17, // and they are the unique counterexamples. However, since 29711844 is even, // this does not cause any problem for the endpoints calculations; it can only // cause a problem when we need to perform integer check for the center. // Fortunately, with these inputs, that branch is never executed, so we are // fine. const typename cache_accessor::compute_mul_result z_mul = cache_accessor::compute_mul((two_fc | 1) << beta, cache); // Step 2: Try larger divisor; remove trailing zeros if necessary. // Using an upper bound on zi, we might be able to optimize the division // better than the compiler; we are computing zi / big_divisor here. decimal_fp ret_value; ret_value.significand = divide_by_10_to_kappa_plus_1(z_mul.result); uint32_t r = static_cast(z_mul.result - float_info::big_divisor * ret_value.significand); if (r < deltai) { // Exclude the right endpoint if necessary. if (r == 0 && (z_mul.is_integer & !include_right_endpoint)) { --ret_value.significand; r = float_info::big_divisor; goto small_divisor_case_label; } } else if (r > deltai) { goto small_divisor_case_label; } else { // r == deltai; compare fractional parts. const typename cache_accessor::compute_mul_parity_result x_mul = cache_accessor::compute_mul_parity(two_fc - 1, cache, beta); if (!(x_mul.parity | (x_mul.is_integer & include_left_endpoint))) goto small_divisor_case_label; } ret_value.exponent = minus_k + float_info::kappa + 1; // We may need to remove trailing zeros. ret_value.exponent += remove_trailing_zeros(ret_value.significand); return ret_value; // Step 3: Find the significand with the smaller divisor. small_divisor_case_label: ret_value.significand *= 10; ret_value.exponent = minus_k + float_info::kappa; uint32_t dist = r - (deltai / 2) + (float_info::small_divisor / 2); const bool approx_y_parity = ((dist ^ (float_info::small_divisor / 2)) & 1) != 0; // Is dist divisible by 10^kappa? const bool divisible_by_small_divisor = check_divisibility_and_divide_by_pow10::kappa>(dist); // Add dist / 10^kappa to the significand. ret_value.significand += dist; if (!divisible_by_small_divisor) return ret_value; // Check z^(f) >= epsilon^(f). // We have either yi == zi - epsiloni or yi == (zi - epsiloni) - 1, // where yi == zi - epsiloni if and only if z^(f) >= epsilon^(f). // Since there are only 2 possibilities, we only need to care about the // parity. Also, zi and r should have the same parity since the divisor // is an even number. const auto y_mul = cache_accessor::compute_mul_parity(two_fc, cache, beta); // If z^(f) >= epsilon^(f), we might have a tie when z^(f) == epsilon^(f), // or equivalently, when y is an integer. if (y_mul.parity != approx_y_parity) --ret_value.significand; else if (y_mul.is_integer & (ret_value.significand % 2 != 0)) --ret_value.significand; return ret_value; } } // namespace dragonbox #ifdef _MSC_VER FMT_FUNC auto fmt_snprintf(char* buf, size_t size, const char* fmt, ...) -> int { auto args = va_list(); va_start(args, fmt); int result = vsnprintf_s(buf, size, _TRUNCATE, fmt, args); va_end(args); return result; } #endif } // namespace detail template <> struct formatter { FMT_CONSTEXPR auto parse(format_parse_context& ctx) -> format_parse_context::iterator { return ctx.begin(); } template auto format(const detail::bigint& n, FormatContext& ctx) const -> typename FormatContext::iterator { auto out = ctx.out(); bool first = true; for (auto i = n.bigits_.size(); i > 0; --i) { auto value = n.bigits_[i - 1u]; if (first) { out = format_to(out, FMT_STRING("{:x}"), value); first = false; continue; } out = format_to(out, FMT_STRING("{:08x}"), value); } if (n.exp_ > 0) out = format_to(out, FMT_STRING("p{}"), n.exp_ * detail::bigint::bigit_bits); return out; } }; FMT_FUNC detail::utf8_to_utf16::utf8_to_utf16(string_view s) { for_each_codepoint(s, [this](uint32_t cp, string_view) { if (cp == invalid_code_point) FMT_THROW(std::runtime_error("invalid utf8")); if (cp <= 0xFFFF) { buffer_.push_back(static_cast(cp)); } else { cp -= 0x10000; buffer_.push_back(static_cast(0xD800 + (cp >> 10))); buffer_.push_back(static_cast(0xDC00 + (cp & 0x3FF))); } return true; }); buffer_.push_back(0); } FMT_FUNC void format_system_error(detail::buffer& out, int error_code, const char* message) noexcept { FMT_TRY { auto ec = std::error_code(error_code, std::generic_category()); write(std::back_inserter(out), std::system_error(ec, message).what()); return; } FMT_CATCH(...) {} format_error_code(out, error_code, message); } FMT_FUNC void report_system_error(int error_code, const char* message) noexcept { report_error(format_system_error, error_code, message); } FMT_FUNC std::string vformat(string_view fmt, format_args args) { // Don't optimize the "{}" case to keep the binary size small and because it // can be better optimized in fmt::format anyway. auto buffer = memory_buffer(); detail::vformat_to(buffer, fmt, args); return to_string(buffer); } namespace detail { #ifdef _WIN32 using dword = conditional_t; extern "C" __declspec(dllimport) int __stdcall WriteConsoleW( // void*, const void*, dword, dword*, void*); FMT_FUNC bool write_console(std::FILE* f, string_view text) { auto fd = _fileno(f); if (_isatty(fd)) { detail::utf8_to_utf16 u16(string_view(text.data(), text.size())); auto written = detail::dword(); if (detail::WriteConsoleW(reinterpret_cast(_get_osfhandle(fd)), u16.c_str(), static_cast(u16.size()), &written, nullptr)) { return true; } } // We return false if the file descriptor was not TTY, or it was but // SetConsoleW failed which can happen if the output has been redirected to // NUL. In both cases when we return false, we should attempt to do regular // write via fwrite or std::ostream::write. return false; } #endif FMT_FUNC void print(std::FILE* f, string_view text) { #ifdef _WIN32 if (write_console(f, text)) return; #endif detail::fwrite_fully(text.data(), 1, text.size(), f); } } // namespace detail FMT_FUNC void vprint(std::FILE* f, string_view format_str, format_args args) { memory_buffer buffer; detail::vformat_to(buffer, format_str, args); detail::print(f, {buffer.data(), buffer.size()}); } #ifdef _WIN32 // Print assuming legacy (non-Unicode) encoding. FMT_FUNC void detail::vprint_mojibake(std::FILE* f, string_view format_str, format_args args) { memory_buffer buffer; detail::vformat_to(buffer, format_str, basic_format_args>(args)); fwrite_fully(buffer.data(), 1, buffer.size(), f); } #endif FMT_FUNC void vprint(string_view format_str, format_args args) { vprint(stdout, format_str, args); } namespace detail { struct singleton { unsigned char upper; unsigned char lower_count; }; inline auto is_printable(uint16_t x, const singleton* singletons, size_t singletons_size, const unsigned char* singleton_lowers, const unsigned char* normal, size_t normal_size) -> bool { auto upper = x >> 8; auto lower_start = 0; for (size_t i = 0; i < singletons_size; ++i) { auto s = singletons[i]; auto lower_end = lower_start + s.lower_count; if (upper < s.upper) break; if (upper == s.upper) { for (auto j = lower_start; j < lower_end; ++j) { if (singleton_lowers[j] == (x & 0xff)) return false; } } lower_start = lower_end; } auto xsigned = static_cast(x); auto current = true; for (size_t i = 0; i < normal_size; ++i) { auto v = static_cast(normal[i]); auto len = (v & 0x80) != 0 ? (v & 0x7f) << 8 | normal[++i] : v; xsigned -= len; if (xsigned < 0) break; current = !current; } return current; } // This code is generated by support/printable.py. FMT_FUNC auto is_printable(uint32_t cp) -> bool { static constexpr singleton singletons0[] = { {0x00, 1}, {0x03, 5}, {0x05, 6}, {0x06, 3}, {0x07, 6}, {0x08, 8}, {0x09, 17}, {0x0a, 28}, {0x0b, 25}, {0x0c, 20}, {0x0d, 16}, {0x0e, 13}, {0x0f, 4}, {0x10, 3}, {0x12, 18}, {0x13, 9}, {0x16, 1}, {0x17, 5}, {0x18, 2}, {0x19, 3}, {0x1a, 7}, {0x1c, 2}, {0x1d, 1}, {0x1f, 22}, {0x20, 3}, {0x2b, 3}, {0x2c, 2}, {0x2d, 11}, {0x2e, 1}, {0x30, 3}, {0x31, 2}, {0x32, 1}, {0xa7, 2}, {0xa9, 2}, {0xaa, 4}, {0xab, 8}, {0xfa, 2}, {0xfb, 5}, {0xfd, 4}, {0xfe, 3}, {0xff, 9}, }; static constexpr unsigned char singletons0_lower[] = { 0xad, 0x78, 0x79, 0x8b, 0x8d, 0xa2, 0x30, 0x57, 0x58, 0x8b, 0x8c, 0x90, 0x1c, 0x1d, 0xdd, 0x0e, 0x0f, 0x4b, 0x4c, 0xfb, 0xfc, 0x2e, 0x2f, 0x3f, 0x5c, 0x5d, 0x5f, 0xb5, 0xe2, 0x84, 0x8d, 0x8e, 0x91, 0x92, 0xa9, 0xb1, 0xba, 0xbb, 0xc5, 0xc6, 0xc9, 0xca, 0xde, 0xe4, 0xe5, 0xff, 0x00, 0x04, 0x11, 0x12, 0x29, 0x31, 0x34, 0x37, 0x3a, 0x3b, 0x3d, 0x49, 0x4a, 0x5d, 0x84, 0x8e, 0x92, 0xa9, 0xb1, 0xb4, 0xba, 0xbb, 0xc6, 0xca, 0xce, 0xcf, 0xe4, 0xe5, 0x00, 0x04, 0x0d, 0x0e, 0x11, 0x12, 0x29, 0x31, 0x34, 0x3a, 0x3b, 0x45, 0x46, 0x49, 0x4a, 0x5e, 0x64, 0x65, 0x84, 0x91, 0x9b, 0x9d, 0xc9, 0xce, 0xcf, 0x0d, 0x11, 0x29, 0x45, 0x49, 0x57, 0x64, 0x65, 0x8d, 0x91, 0xa9, 0xb4, 0xba, 0xbb, 0xc5, 0xc9, 0xdf, 0xe4, 0xe5, 0xf0, 0x0d, 0x11, 0x45, 0x49, 0x64, 0x65, 0x80, 0x84, 0xb2, 0xbc, 0xbe, 0xbf, 0xd5, 0xd7, 0xf0, 0xf1, 0x83, 0x85, 0x8b, 0xa4, 0xa6, 0xbe, 0xbf, 0xc5, 0xc7, 0xce, 0xcf, 0xda, 0xdb, 0x48, 0x98, 0xbd, 0xcd, 0xc6, 0xce, 0xcf, 0x49, 0x4e, 0x4f, 0x57, 0x59, 0x5e, 0x5f, 0x89, 0x8e, 0x8f, 0xb1, 0xb6, 0xb7, 0xbf, 0xc1, 0xc6, 0xc7, 0xd7, 0x11, 0x16, 0x17, 0x5b, 0x5c, 0xf6, 0xf7, 0xfe, 0xff, 0x80, 0x0d, 0x6d, 0x71, 0xde, 0xdf, 0x0e, 0x0f, 0x1f, 0x6e, 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, 0x7e, 0xae, 0xaf, 0xbb, 0xbc, 0xfa, 0x16, 0x17, 0x1e, 0x1f, 0x46, 0x47, 0x4e, 0x4f, 0x58, 0x5a, 0x5c, 0x5e, 0x7e, 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, 0xdc, 0xf0, 0xf1, 0xf5, 0x72, 0x73, 0x8f, 0x74, 0x75, 0x96, 0x2f, 0x5f, 0x26, 0x2e, 0x2f, 0xa7, 0xaf, 0xb7, 0xbf, 0xc7, 0xcf, 0xd7, 0xdf, 0x9a, 0x40, 0x97, 0x98, 0x30, 0x8f, 0x1f, 0xc0, 0xc1, 0xce, 0xff, 0x4e, 0x4f, 0x5a, 0x5b, 0x07, 0x08, 0x0f, 0x10, 0x27, 0x2f, 0xee, 0xef, 0x6e, 0x6f, 0x37, 0x3d, 0x3f, 0x42, 0x45, 0x90, 0x91, 0xfe, 0xff, 0x53, 0x67, 0x75, 0xc8, 0xc9, 0xd0, 0xd1, 0xd8, 0xd9, 0xe7, 0xfe, 0xff, }; static constexpr singleton singletons1[] = { {0x00, 6}, {0x01, 1}, {0x03, 1}, {0x04, 2}, {0x08, 8}, {0x09, 2}, {0x0a, 5}, {0x0b, 2}, {0x0e, 4}, {0x10, 1}, {0x11, 2}, {0x12, 5}, {0x13, 17}, {0x14, 1}, {0x15, 2}, {0x17, 2}, {0x19, 13}, {0x1c, 5}, {0x1d, 8}, {0x24, 1}, {0x6a, 3}, {0x6b, 2}, {0xbc, 2}, {0xd1, 2}, {0xd4, 12}, {0xd5, 9}, {0xd6, 2}, {0xd7, 2}, {0xda, 1}, {0xe0, 5}, {0xe1, 2}, {0xe8, 2}, {0xee, 32}, {0xf0, 4}, {0xf8, 2}, {0xf9, 2}, {0xfa, 2}, {0xfb, 1}, }; static constexpr unsigned char singletons1_lower[] = { 0x0c, 0x27, 0x3b, 0x3e, 0x4e, 0x4f, 0x8f, 0x9e, 0x9e, 0x9f, 0x06, 0x07, 0x09, 0x36, 0x3d, 0x3e, 0x56, 0xf3, 0xd0, 0xd1, 0x04, 0x14, 0x18, 0x36, 0x37, 0x56, 0x57, 0x7f, 0xaa, 0xae, 0xaf, 0xbd, 0x35, 0xe0, 0x12, 0x87, 0x89, 0x8e, 0x9e, 0x04, 0x0d, 0x0e, 0x11, 0x12, 0x29, 0x31, 0x34, 0x3a, 0x45, 0x46, 0x49, 0x4a, 0x4e, 0x4f, 0x64, 0x65, 0x5c, 0xb6, 0xb7, 0x1b, 0x1c, 0x07, 0x08, 0x0a, 0x0b, 0x14, 0x17, 0x36, 0x39, 0x3a, 0xa8, 0xa9, 0xd8, 0xd9, 0x09, 0x37, 0x90, 0x91, 0xa8, 0x07, 0x0a, 0x3b, 0x3e, 0x66, 0x69, 0x8f, 0x92, 0x6f, 0x5f, 0xee, 0xef, 0x5a, 0x62, 0x9a, 0x9b, 0x27, 0x28, 0x55, 0x9d, 0xa0, 0xa1, 0xa3, 0xa4, 0xa7, 0xa8, 0xad, 0xba, 0xbc, 0xc4, 0x06, 0x0b, 0x0c, 0x15, 0x1d, 0x3a, 0x3f, 0x45, 0x51, 0xa6, 0xa7, 0xcc, 0xcd, 0xa0, 0x07, 0x19, 0x1a, 0x22, 0x25, 0x3e, 0x3f, 0xc5, 0xc6, 0x04, 0x20, 0x23, 0x25, 0x26, 0x28, 0x33, 0x38, 0x3a, 0x48, 0x4a, 0x4c, 0x50, 0x53, 0x55, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x63, 0x65, 0x66, 0x6b, 0x73, 0x78, 0x7d, 0x7f, 0x8a, 0xa4, 0xaa, 0xaf, 0xb0, 0xc0, 0xd0, 0xae, 0xaf, 0x79, 0xcc, 0x6e, 0x6f, 0x93, }; static constexpr unsigned char normal0[] = { 0x00, 0x20, 0x5f, 0x22, 0x82, 0xdf, 0x04, 0x82, 0x44, 0x08, 0x1b, 0x04, 0x06, 0x11, 0x81, 0xac, 0x0e, 0x80, 0xab, 0x35, 0x28, 0x0b, 0x80, 0xe0, 0x03, 0x19, 0x08, 0x01, 0x04, 0x2f, 0x04, 0x34, 0x04, 0x07, 0x03, 0x01, 0x07, 0x06, 0x07, 0x11, 0x0a, 0x50, 0x0f, 0x12, 0x07, 0x55, 0x07, 0x03, 0x04, 0x1c, 0x0a, 0x09, 0x03, 0x08, 0x03, 0x07, 0x03, 0x02, 0x03, 0x03, 0x03, 0x0c, 0x04, 0x05, 0x03, 0x0b, 0x06, 0x01, 0x0e, 0x15, 0x05, 0x3a, 0x03, 0x11, 0x07, 0x06, 0x05, 0x10, 0x07, 0x57, 0x07, 0x02, 0x07, 0x15, 0x0d, 0x50, 0x04, 0x43, 0x03, 0x2d, 0x03, 0x01, 0x04, 0x11, 0x06, 0x0f, 0x0c, 0x3a, 0x04, 0x1d, 0x25, 0x5f, 0x20, 0x6d, 0x04, 0x6a, 0x25, 0x80, 0xc8, 0x05, 0x82, 0xb0, 0x03, 0x1a, 0x06, 0x82, 0xfd, 0x03, 0x59, 0x07, 0x15, 0x0b, 0x17, 0x09, 0x14, 0x0c, 0x14, 0x0c, 0x6a, 0x06, 0x0a, 0x06, 0x1a, 0x06, 0x59, 0x07, 0x2b, 0x05, 0x46, 0x0a, 0x2c, 0x04, 0x0c, 0x04, 0x01, 0x03, 0x31, 0x0b, 0x2c, 0x04, 0x1a, 0x06, 0x0b, 0x03, 0x80, 0xac, 0x06, 0x0a, 0x06, 0x21, 0x3f, 0x4c, 0x04, 0x2d, 0x03, 0x74, 0x08, 0x3c, 0x03, 0x0f, 0x03, 0x3c, 0x07, 0x38, 0x08, 0x2b, 0x05, 0x82, 0xff, 0x11, 0x18, 0x08, 0x2f, 0x11, 0x2d, 0x03, 0x20, 0x10, 0x21, 0x0f, 0x80, 0x8c, 0x04, 0x82, 0x97, 0x19, 0x0b, 0x15, 0x88, 0x94, 0x05, 0x2f, 0x05, 0x3b, 0x07, 0x02, 0x0e, 0x18, 0x09, 0x80, 0xb3, 0x2d, 0x74, 0x0c, 0x80, 0xd6, 0x1a, 0x0c, 0x05, 0x80, 0xff, 0x05, 0x80, 0xdf, 0x0c, 0xee, 0x0d, 0x03, 0x84, 0x8d, 0x03, 0x37, 0x09, 0x81, 0x5c, 0x14, 0x80, 0xb8, 0x08, 0x80, 0xcb, 0x2a, 0x38, 0x03, 0x0a, 0x06, 0x38, 0x08, 0x46, 0x08, 0x0c, 0x06, 0x74, 0x0b, 0x1e, 0x03, 0x5a, 0x04, 0x59, 0x09, 0x80, 0x83, 0x18, 0x1c, 0x0a, 0x16, 0x09, 0x4c, 0x04, 0x80, 0x8a, 0x06, 0xab, 0xa4, 0x0c, 0x17, 0x04, 0x31, 0xa1, 0x04, 0x81, 0xda, 0x26, 0x07, 0x0c, 0x05, 0x05, 0x80, 0xa5, 0x11, 0x81, 0x6d, 0x10, 0x78, 0x28, 0x2a, 0x06, 0x4c, 0x04, 0x80, 0x8d, 0x04, 0x80, 0xbe, 0x03, 0x1b, 0x03, 0x0f, 0x0d, }; static constexpr unsigned char normal1[] = { 0x5e, 0x22, 0x7b, 0x05, 0x03, 0x04, 0x2d, 0x03, 0x66, 0x03, 0x01, 0x2f, 0x2e, 0x80, 0x82, 0x1d, 0x03, 0x31, 0x0f, 0x1c, 0x04, 0x24, 0x09, 0x1e, 0x05, 0x2b, 0x05, 0x44, 0x04, 0x0e, 0x2a, 0x80, 0xaa, 0x06, 0x24, 0x04, 0x24, 0x04, 0x28, 0x08, 0x34, 0x0b, 0x01, 0x80, 0x90, 0x81, 0x37, 0x09, 0x16, 0x0a, 0x08, 0x80, 0x98, 0x39, 0x03, 0x63, 0x08, 0x09, 0x30, 0x16, 0x05, 0x21, 0x03, 0x1b, 0x05, 0x01, 0x40, 0x38, 0x04, 0x4b, 0x05, 0x2f, 0x04, 0x0a, 0x07, 0x09, 0x07, 0x40, 0x20, 0x27, 0x04, 0x0c, 0x09, 0x36, 0x03, 0x3a, 0x05, 0x1a, 0x07, 0x04, 0x0c, 0x07, 0x50, 0x49, 0x37, 0x33, 0x0d, 0x33, 0x07, 0x2e, 0x08, 0x0a, 0x81, 0x26, 0x52, 0x4e, 0x28, 0x08, 0x2a, 0x56, 0x1c, 0x14, 0x17, 0x09, 0x4e, 0x04, 0x1e, 0x0f, 0x43, 0x0e, 0x19, 0x07, 0x0a, 0x06, 0x48, 0x08, 0x27, 0x09, 0x75, 0x0b, 0x3f, 0x41, 0x2a, 0x06, 0x3b, 0x05, 0x0a, 0x06, 0x51, 0x06, 0x01, 0x05, 0x10, 0x03, 0x05, 0x80, 0x8b, 0x62, 0x1e, 0x48, 0x08, 0x0a, 0x80, 0xa6, 0x5e, 0x22, 0x45, 0x0b, 0x0a, 0x06, 0x0d, 0x13, 0x39, 0x07, 0x0a, 0x36, 0x2c, 0x04, 0x10, 0x80, 0xc0, 0x3c, 0x64, 0x53, 0x0c, 0x48, 0x09, 0x0a, 0x46, 0x45, 0x1b, 0x48, 0x08, 0x53, 0x1d, 0x39, 0x81, 0x07, 0x46, 0x0a, 0x1d, 0x03, 0x47, 0x49, 0x37, 0x03, 0x0e, 0x08, 0x0a, 0x06, 0x39, 0x07, 0x0a, 0x81, 0x36, 0x19, 0x80, 0xb7, 0x01, 0x0f, 0x32, 0x0d, 0x83, 0x9b, 0x66, 0x75, 0x0b, 0x80, 0xc4, 0x8a, 0xbc, 0x84, 0x2f, 0x8f, 0xd1, 0x82, 0x47, 0xa1, 0xb9, 0x82, 0x39, 0x07, 0x2a, 0x04, 0x02, 0x60, 0x26, 0x0a, 0x46, 0x0a, 0x28, 0x05, 0x13, 0x82, 0xb0, 0x5b, 0x65, 0x4b, 0x04, 0x39, 0x07, 0x11, 0x40, 0x05, 0x0b, 0x02, 0x0e, 0x97, 0xf8, 0x08, 0x84, 0xd6, 0x2a, 0x09, 0xa2, 0xf7, 0x81, 0x1f, 0x31, 0x03, 0x11, 0x04, 0x08, 0x81, 0x8c, 0x89, 0x04, 0x6b, 0x05, 0x0d, 0x03, 0x09, 0x07, 0x10, 0x93, 0x60, 0x80, 0xf6, 0x0a, 0x73, 0x08, 0x6e, 0x17, 0x46, 0x80, 0x9a, 0x14, 0x0c, 0x57, 0x09, 0x19, 0x80, 0x87, 0x81, 0x47, 0x03, 0x85, 0x42, 0x0f, 0x15, 0x85, 0x50, 0x2b, 0x80, 0xd5, 0x2d, 0x03, 0x1a, 0x04, 0x02, 0x81, 0x70, 0x3a, 0x05, 0x01, 0x85, 0x00, 0x80, 0xd7, 0x29, 0x4c, 0x04, 0x0a, 0x04, 0x02, 0x83, 0x11, 0x44, 0x4c, 0x3d, 0x80, 0xc2, 0x3c, 0x06, 0x01, 0x04, 0x55, 0x05, 0x1b, 0x34, 0x02, 0x81, 0x0e, 0x2c, 0x04, 0x64, 0x0c, 0x56, 0x0a, 0x80, 0xae, 0x38, 0x1d, 0x0d, 0x2c, 0x04, 0x09, 0x07, 0x02, 0x0e, 0x06, 0x80, 0x9a, 0x83, 0xd8, 0x08, 0x0d, 0x03, 0x0d, 0x03, 0x74, 0x0c, 0x59, 0x07, 0x0c, 0x14, 0x0c, 0x04, 0x38, 0x08, 0x0a, 0x06, 0x28, 0x08, 0x22, 0x4e, 0x81, 0x54, 0x0c, 0x15, 0x03, 0x03, 0x05, 0x07, 0x09, 0x19, 0x07, 0x07, 0x09, 0x03, 0x0d, 0x07, 0x29, 0x80, 0xcb, 0x25, 0x0a, 0x84, 0x06, }; auto lower = static_cast(cp); if (cp < 0x10000) { return is_printable(lower, singletons0, sizeof(singletons0) / sizeof(*singletons0), singletons0_lower, normal0, sizeof(normal0)); } if (cp < 0x20000) { return is_printable(lower, singletons1, sizeof(singletons1) / sizeof(*singletons1), singletons1_lower, normal1, sizeof(normal1)); } if (0x2a6de <= cp && cp < 0x2a700) return false; if (0x2b735 <= cp && cp < 0x2b740) return false; if (0x2b81e <= cp && cp < 0x2b820) return false; if (0x2cea2 <= cp && cp < 0x2ceb0) return false; if (0x2ebe1 <= cp && cp < 0x2f800) return false; if (0x2fa1e <= cp && cp < 0x30000) return false; if (0x3134b <= cp && cp < 0xe0100) return false; if (0xe01f0 <= cp && cp < 0x110000) return false; return cp < 0x110000; } } // namespace detail FMT_END_NAMESPACE #endif // FMT_FORMAT_INL_H_ mergerfs-2.40.2/libfuse/lib/fmt/format.h000066400000000000000000004551051457016576200200660ustar00rootroot00000000000000/* Formatting library for C++ Copyright (c) 2012 - present, Victor Zverovich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --- Optional exception to the license --- As an exception, if, as a result of your compiling your source code, portions of this Software are embedded into a machine-executable object form of such source code, you may redistribute such embedded portions in such object form without including the above copyright and permission notices. */ #ifndef FMT_FORMAT_H_ #define FMT_FORMAT_H_ #include // std::signbit #include // uint32_t #include // std::memcpy #include // std::numeric_limits #include // std::uninitialized_copy #include // std::runtime_error #include // std::system_error #ifdef __cpp_lib_bit_cast # include // std::bitcast #endif #include "core.h" #if FMT_GCC_VERSION # define FMT_GCC_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) #else # define FMT_GCC_VISIBILITY_HIDDEN #endif #ifdef __NVCC__ # define FMT_CUDA_VERSION (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__) #else # define FMT_CUDA_VERSION 0 #endif #ifdef __has_builtin # define FMT_HAS_BUILTIN(x) __has_builtin(x) #else # define FMT_HAS_BUILTIN(x) 0 #endif #if FMT_GCC_VERSION || FMT_CLANG_VERSION # define FMT_NOINLINE __attribute__((noinline)) #else # define FMT_NOINLINE #endif #if FMT_MSC_VERSION # define FMT_MSC_DEFAULT = default #else # define FMT_MSC_DEFAULT #endif #ifndef FMT_THROW # if FMT_EXCEPTIONS # if FMT_MSC_VERSION || defined(__NVCC__) FMT_BEGIN_NAMESPACE namespace detail { template inline void do_throw(const Exception& x) { // Silence unreachable code warnings in MSVC and NVCC because these // are nearly impossible to fix in a generic code. volatile bool b = true; if (b) throw x; } } // namespace detail FMT_END_NAMESPACE # define FMT_THROW(x) detail::do_throw(x) # else # define FMT_THROW(x) throw x # endif # else # define FMT_THROW(x) \ do { \ FMT_ASSERT(false, (x).what()); \ } while (false) # endif #endif #if FMT_EXCEPTIONS # define FMT_TRY try # define FMT_CATCH(x) catch (x) #else # define FMT_TRY if (true) # define FMT_CATCH(x) if (false) #endif #ifndef FMT_MAYBE_UNUSED # if FMT_HAS_CPP17_ATTRIBUTE(maybe_unused) # define FMT_MAYBE_UNUSED [[maybe_unused]] # else # define FMT_MAYBE_UNUSED # endif #endif #ifndef FMT_USE_USER_DEFINED_LITERALS // EDG based compilers (Intel, NVIDIA, Elbrus, etc), GCC and MSVC support UDLs. # if (FMT_HAS_FEATURE(cxx_user_literals) || FMT_GCC_VERSION >= 407 || \ FMT_MSC_VERSION >= 1900) && \ (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= /* UDL feature */ 480) # define FMT_USE_USER_DEFINED_LITERALS 1 # else # define FMT_USE_USER_DEFINED_LITERALS 0 # endif #endif // Defining FMT_REDUCE_INT_INSTANTIATIONS to 1, will reduce the number of // integer formatter template instantiations to just one by only using the // largest integer type. This results in a reduction in binary size but will // cause a decrease in integer formatting performance. #if !defined(FMT_REDUCE_INT_INSTANTIATIONS) # define FMT_REDUCE_INT_INSTANTIATIONS 0 #endif // __builtin_clz is broken in clang with Microsoft CodeGen: // https://github.com/fmtlib/fmt/issues/519. #if !FMT_MSC_VERSION # if FMT_HAS_BUILTIN(__builtin_clz) || FMT_GCC_VERSION || FMT_ICC_VERSION # define FMT_BUILTIN_CLZ(n) __builtin_clz(n) # endif # if FMT_HAS_BUILTIN(__builtin_clzll) || FMT_GCC_VERSION || FMT_ICC_VERSION # define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n) # endif #endif // __builtin_ctz is broken in Intel Compiler Classic on Windows: // https://github.com/fmtlib/fmt/issues/2510. #ifndef __ICL # if FMT_HAS_BUILTIN(__builtin_ctz) || FMT_GCC_VERSION || FMT_ICC_VERSION || \ defined(__NVCOMPILER) # define FMT_BUILTIN_CTZ(n) __builtin_ctz(n) # endif # if FMT_HAS_BUILTIN(__builtin_ctzll) || FMT_GCC_VERSION || \ FMT_ICC_VERSION || defined(__NVCOMPILER) # define FMT_BUILTIN_CTZLL(n) __builtin_ctzll(n) # endif #endif #if FMT_MSC_VERSION # include // _BitScanReverse[64], _BitScanForward[64], _umul128 #endif // Some compilers masquerade as both MSVC and GCC-likes or otherwise support // __builtin_clz and __builtin_clzll, so only define FMT_BUILTIN_CLZ using the // MSVC intrinsics if the clz and clzll builtins are not available. #if FMT_MSC_VERSION && !defined(FMT_BUILTIN_CLZLL) && \ !defined(FMT_BUILTIN_CTZLL) FMT_BEGIN_NAMESPACE namespace detail { // Avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning. # if !defined(__clang__) # pragma intrinsic(_BitScanForward) # pragma intrinsic(_BitScanReverse) # if defined(_WIN64) # pragma intrinsic(_BitScanForward64) # pragma intrinsic(_BitScanReverse64) # endif # endif inline auto clz(uint32_t x) -> int { unsigned long r = 0; _BitScanReverse(&r, x); FMT_ASSERT(x != 0, ""); // Static analysis complains about using uninitialized data // "r", but the only way that can happen is if "x" is 0, // which the callers guarantee to not happen. FMT_MSC_WARNING(suppress : 6102) return 31 ^ static_cast(r); } # define FMT_BUILTIN_CLZ(n) detail::clz(n) inline auto clzll(uint64_t x) -> int { unsigned long r = 0; # ifdef _WIN64 _BitScanReverse64(&r, x); # else // Scan the high 32 bits. if (_BitScanReverse(&r, static_cast(x >> 32))) return 63 ^ (r + 32); // Scan the low 32 bits. _BitScanReverse(&r, static_cast(x)); # endif FMT_ASSERT(x != 0, ""); FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. return 63 ^ static_cast(r); } # define FMT_BUILTIN_CLZLL(n) detail::clzll(n) inline auto ctz(uint32_t x) -> int { unsigned long r = 0; _BitScanForward(&r, x); FMT_ASSERT(x != 0, ""); FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. return static_cast(r); } # define FMT_BUILTIN_CTZ(n) detail::ctz(n) inline auto ctzll(uint64_t x) -> int { unsigned long r = 0; FMT_ASSERT(x != 0, ""); FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. # ifdef _WIN64 _BitScanForward64(&r, x); # else // Scan the low 32 bits. if (_BitScanForward(&r, static_cast(x))) return static_cast(r); // Scan the high 32 bits. _BitScanForward(&r, static_cast(x >> 32)); r += 32; # endif return static_cast(r); } # define FMT_BUILTIN_CTZLL(n) detail::ctzll(n) } // namespace detail FMT_END_NAMESPACE #endif FMT_BEGIN_NAMESPACE namespace detail { FMT_CONSTEXPR inline void abort_fuzzing_if(bool condition) { ignore_unused(condition); #ifdef FMT_FUZZ if (condition) throw std::runtime_error("fuzzing limit reached"); #endif } template struct string_literal { static constexpr CharT value[sizeof...(C)] = {C...}; constexpr operator basic_string_view() const { return {value, sizeof...(C)}; } }; #if FMT_CPLUSPLUS < 201703L template constexpr CharT string_literal::value[sizeof...(C)]; #endif template class formatbuf : public Streambuf { private: using char_type = typename Streambuf::char_type; using streamsize = decltype(std::declval().sputn(nullptr, 0)); using int_type = typename Streambuf::int_type; using traits_type = typename Streambuf::traits_type; buffer& buffer_; public: explicit formatbuf(buffer& buf) : buffer_(buf) {} protected: // The put area is always empty. This makes the implementation simpler and has // the advantage that the streambuf and the buffer are always in sync and // sputc never writes into uninitialized memory. A disadvantage is that each // call to sputc always results in a (virtual) call to overflow. There is no // disadvantage here for sputn since this always results in a call to xsputn. auto overflow(int_type ch) -> int_type override { if (!traits_type::eq_int_type(ch, traits_type::eof())) buffer_.push_back(static_cast(ch)); return ch; } auto xsputn(const char_type* s, streamsize count) -> streamsize override { buffer_.append(s, s + count); return count; } }; // Implementation of std::bit_cast for pre-C++20. template FMT_CONSTEXPR20 auto bit_cast(const From& from) -> To { #ifdef __cpp_lib_bit_cast if (is_constant_evaluated()) return std::bit_cast(from); #endif auto to = To(); // The cast suppresses a bogus -Wclass-memaccess on GCC. std::memcpy(static_cast(&to), &from, sizeof(to)); return to; } inline auto is_big_endian() -> bool { #ifdef _WIN32 return false; #elif defined(__BIG_ENDIAN__) return true; #elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) return __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; #else struct bytes { char data[sizeof(int)]; }; return bit_cast(1).data[0] == 0; #endif } class uint128_fallback { private: uint64_t lo_, hi_; friend uint128_fallback umul128(uint64_t x, uint64_t y) noexcept; public: constexpr uint128_fallback(uint64_t hi, uint64_t lo) : lo_(lo), hi_(hi) {} constexpr uint128_fallback(uint64_t value = 0) : lo_(value), hi_(0) {} constexpr uint64_t high() const noexcept { return hi_; } constexpr uint64_t low() const noexcept { return lo_; } template ::value)> constexpr explicit operator T() const { return static_cast(lo_); } friend constexpr auto operator==(const uint128_fallback& lhs, const uint128_fallback& rhs) -> bool { return lhs.hi_ == rhs.hi_ && lhs.lo_ == rhs.lo_; } friend constexpr auto operator!=(const uint128_fallback& lhs, const uint128_fallback& rhs) -> bool { return !(lhs == rhs); } friend constexpr auto operator>(const uint128_fallback& lhs, const uint128_fallback& rhs) -> bool { return lhs.hi_ != rhs.hi_ ? lhs.hi_ > rhs.hi_ : lhs.lo_ > rhs.lo_; } friend constexpr auto operator|(const uint128_fallback& lhs, const uint128_fallback& rhs) -> uint128_fallback { return {lhs.hi_ | rhs.hi_, lhs.lo_ | rhs.lo_}; } friend constexpr auto operator&(const uint128_fallback& lhs, const uint128_fallback& rhs) -> uint128_fallback { return {lhs.hi_ & rhs.hi_, lhs.lo_ & rhs.lo_}; } friend auto operator+(const uint128_fallback& lhs, const uint128_fallback& rhs) -> uint128_fallback { auto result = uint128_fallback(lhs); result += rhs; return result; } friend auto operator*(const uint128_fallback& lhs, uint32_t rhs) -> uint128_fallback { FMT_ASSERT(lhs.hi_ == 0, ""); uint64_t hi = (lhs.lo_ >> 32) * rhs; uint64_t lo = (lhs.lo_ & ~uint32_t()) * rhs; uint64_t new_lo = (hi << 32) + lo; return {(hi >> 32) + (new_lo < lo ? 1 : 0), new_lo}; } friend auto operator-(const uint128_fallback& lhs, uint64_t rhs) -> uint128_fallback { return {lhs.hi_ - (lhs.lo_ < rhs ? 1 : 0), lhs.lo_ - rhs}; } FMT_CONSTEXPR auto operator>>(int shift) const -> uint128_fallback { if (shift == 64) return {0, hi_}; if (shift > 64) return uint128_fallback(0, hi_) >> (shift - 64); return {hi_ >> shift, (hi_ << (64 - shift)) | (lo_ >> shift)}; } FMT_CONSTEXPR auto operator<<(int shift) const -> uint128_fallback { if (shift == 64) return {lo_, 0}; if (shift > 64) return uint128_fallback(lo_, 0) << (shift - 64); return {hi_ << shift | (lo_ >> (64 - shift)), (lo_ << shift)}; } FMT_CONSTEXPR auto operator>>=(int shift) -> uint128_fallback& { return *this = *this >> shift; } FMT_CONSTEXPR void operator+=(uint128_fallback n) { uint64_t new_lo = lo_ + n.lo_; uint64_t new_hi = hi_ + n.hi_ + (new_lo < lo_ ? 1 : 0); FMT_ASSERT(new_hi >= hi_, ""); lo_ = new_lo; hi_ = new_hi; } FMT_CONSTEXPR20 uint128_fallback& operator+=(uint64_t n) noexcept { if (is_constant_evaluated()) { lo_ += n; hi_ += (lo_ < n ? 1 : 0); return *this; } #if FMT_HAS_BUILTIN(__builtin_addcll) && !defined(__ibmxl__) unsigned long long carry; lo_ = __builtin_addcll(lo_, n, 0, &carry); hi_ += carry; #elif FMT_HAS_BUILTIN(__builtin_ia32_addcarryx_u64) && !defined(__ibmxl__) unsigned long long result; auto carry = __builtin_ia32_addcarryx_u64(0, lo_, n, &result); lo_ = result; hi_ += carry; #elif defined(_MSC_VER) && defined(_M_X64) auto carry = _addcarry_u64(0, lo_, n, &lo_); _addcarry_u64(carry, hi_, 0, &hi_); #else lo_ += n; hi_ += (lo_ < n ? 1 : 0); #endif return *this; } }; using uint128_t = conditional_t; #ifdef UINTPTR_MAX using uintptr_t = ::uintptr_t; #else using uintptr_t = uint128_t; #endif // Returns the largest possible value for type T. Same as // std::numeric_limits::max() but shorter and not affected by the max macro. template constexpr auto max_value() -> T { return (std::numeric_limits::max)(); } template constexpr auto num_bits() -> int { return std::numeric_limits::digits; } // std::numeric_limits::digits may return 0 for 128-bit ints. template <> constexpr auto num_bits() -> int { return 128; } template <> constexpr auto num_bits() -> int { return 128; } // A heterogeneous bit_cast used for converting 96-bit long double to uint128_t // and 128-bit pointers to uint128_fallback. template sizeof(From))> inline auto bit_cast(const From& from) -> To { constexpr auto size = static_cast(sizeof(From) / sizeof(unsigned)); struct data_t { unsigned value[static_cast(size)]; } data = bit_cast(from); auto result = To(); if (const_check(is_big_endian())) { for (int i = 0; i < size; ++i) result = (result << num_bits()) | data.value[i]; } else { for (int i = size - 1; i >= 0; --i) result = (result << num_bits()) | data.value[i]; } return result; } FMT_INLINE void assume(bool condition) { (void)condition; #if FMT_HAS_BUILTIN(__builtin_assume) && !FMT_ICC_VERSION __builtin_assume(condition); #endif } // An approximation of iterator_t for pre-C++20 systems. template using iterator_t = decltype(std::begin(std::declval())); template using sentinel_t = decltype(std::end(std::declval())); // A workaround for std::string not having mutable data() until C++17. template inline auto get_data(std::basic_string& s) -> Char* { return &s[0]; } template inline auto get_data(Container& c) -> typename Container::value_type* { return c.data(); } #if defined(_SECURE_SCL) && _SECURE_SCL // Make a checked iterator to avoid MSVC warnings. template using checked_ptr = stdext::checked_array_iterator; template constexpr auto make_checked(T* p, size_t size) -> checked_ptr { return {p, size}; } #else template using checked_ptr = T*; template constexpr auto make_checked(T* p, size_t) -> T* { return p; } #endif // Attempts to reserve space for n extra characters in the output range. // Returns a pointer to the reserved range or a reference to it. template ::value)> #if FMT_CLANG_VERSION >= 307 && !FMT_ICC_VERSION __attribute__((no_sanitize("undefined"))) #endif inline auto reserve(std::back_insert_iterator it, size_t n) -> checked_ptr { Container& c = get_container(it); size_t size = c.size(); c.resize(size + n); return make_checked(get_data(c) + size, n); } template inline auto reserve(buffer_appender it, size_t n) -> buffer_appender { buffer& buf = get_container(it); buf.try_reserve(buf.size() + n); return it; } template constexpr auto reserve(Iterator& it, size_t) -> Iterator& { return it; } template using reserve_iterator = remove_reference_t(), 0))>; template constexpr auto to_pointer(OutputIt, size_t) -> T* { return nullptr; } template auto to_pointer(buffer_appender it, size_t n) -> T* { buffer& buf = get_container(it); auto size = buf.size(); if (buf.capacity() < size + n) return nullptr; buf.try_resize(size + n); return buf.data() + size; } template ::value)> inline auto base_iterator(std::back_insert_iterator& it, checked_ptr) -> std::back_insert_iterator { return it; } template constexpr auto base_iterator(Iterator, Iterator it) -> Iterator { return it; } // is spectacularly slow to compile in C++20 so use a simple fill_n // instead (#1998). template FMT_CONSTEXPR auto fill_n(OutputIt out, Size count, const T& value) -> OutputIt { for (Size i = 0; i < count; ++i) *out++ = value; return out; } template FMT_CONSTEXPR20 auto fill_n(T* out, Size count, char value) -> T* { if (is_constant_evaluated()) { return fill_n(out, count, value); } std::memset(out, value, to_unsigned(count)); return out + count; } #ifdef __cpp_char8_t using char8_type = char8_t; #else enum char8_type : unsigned char {}; #endif template FMT_CONSTEXPR FMT_NOINLINE auto copy_str_noinline(InputIt begin, InputIt end, OutputIt out) -> OutputIt { return copy_str(begin, end, out); } // A public domain branchless UTF-8 decoder by Christopher Wellons: // https://github.com/skeeto/branchless-utf8 /* Decode the next character, c, from s, reporting errors in e. * * Since this is a branchless decoder, four bytes will be read from the * buffer regardless of the actual length of the next character. This * means the buffer _must_ have at least three bytes of zero padding * following the end of the data stream. * * Errors are reported in e, which will be non-zero if the parsed * character was somehow invalid: invalid byte sequence, non-canonical * encoding, or a surrogate half. * * The function returns a pointer to the next character. When an error * occurs, this pointer will be a guess that depends on the particular * error, but it will always advance at least one byte. */ FMT_CONSTEXPR inline auto utf8_decode(const char* s, uint32_t* c, int* e) -> const char* { constexpr const int masks[] = {0x00, 0x7f, 0x1f, 0x0f, 0x07}; constexpr const uint32_t mins[] = {4194304, 0, 128, 2048, 65536}; constexpr const int shiftc[] = {0, 18, 12, 6, 0}; constexpr const int shifte[] = {0, 6, 4, 2, 0}; int len = code_point_length_impl(*s); // Compute the pointer to the next character early so that the next // iteration can start working on the next character. Neither Clang // nor GCC figure out this reordering on their own. const char* next = s + len + !len; using uchar = unsigned char; // Assume a four-byte character and load four bytes. Unused bits are // shifted out. *c = uint32_t(uchar(s[0]) & masks[len]) << 18; *c |= uint32_t(uchar(s[1]) & 0x3f) << 12; *c |= uint32_t(uchar(s[2]) & 0x3f) << 6; *c |= uint32_t(uchar(s[3]) & 0x3f) << 0; *c >>= shiftc[len]; // Accumulate the various error conditions. *e = (*c < mins[len]) << 6; // non-canonical encoding *e |= ((*c >> 11) == 0x1b) << 7; // surrogate half? *e |= (*c > 0x10FFFF) << 8; // out of range? *e |= (uchar(s[1]) & 0xc0) >> 2; *e |= (uchar(s[2]) & 0xc0) >> 4; *e |= uchar(s[3]) >> 6; *e ^= 0x2a; // top two bits of each tail byte correct? *e >>= shifte[len]; return next; } constexpr uint32_t invalid_code_point = ~uint32_t(); // Invokes f(cp, sv) for every code point cp in s with sv being the string view // corresponding to the code point. cp is invalid_code_point on error. template FMT_CONSTEXPR void for_each_codepoint(string_view s, F f) { auto decode = [f](const char* buf_ptr, const char* ptr) { auto cp = uint32_t(); auto error = 0; auto end = utf8_decode(buf_ptr, &cp, &error); bool result = f(error ? invalid_code_point : cp, string_view(ptr, error ? 1 : to_unsigned(end - buf_ptr))); return result ? (error ? buf_ptr + 1 : end) : nullptr; }; auto p = s.data(); const size_t block_size = 4; // utf8_decode always reads blocks of 4 chars. if (s.size() >= block_size) { for (auto end = p + s.size() - block_size + 1; p < end;) { p = decode(p, p); if (!p) return; } } if (auto num_chars_left = s.data() + s.size() - p) { char buf[2 * block_size - 1] = {}; copy_str(p, p + num_chars_left, buf); const char* buf_ptr = buf; do { auto end = decode(buf_ptr, p); if (!end) return; p += end - buf_ptr; buf_ptr = end; } while (buf_ptr - buf < num_chars_left); } } template inline auto compute_width(basic_string_view s) -> size_t { return s.size(); } // Computes approximate display width of a UTF-8 string. FMT_CONSTEXPR inline size_t compute_width(string_view s) { size_t num_code_points = 0; // It is not a lambda for compatibility with C++14. struct count_code_points { size_t* count; FMT_CONSTEXPR auto operator()(uint32_t cp, string_view) const -> bool { *count += detail::to_unsigned( 1 + (cp >= 0x1100 && (cp <= 0x115f || // Hangul Jamo init. consonants cp == 0x2329 || // LEFT-POINTING ANGLE BRACKET cp == 0x232a || // RIGHT-POINTING ANGLE BRACKET // CJK ... Yi except IDEOGRAPHIC HALF FILL SPACE: (cp >= 0x2e80 && cp <= 0xa4cf && cp != 0x303f) || (cp >= 0xac00 && cp <= 0xd7a3) || // Hangul Syllables (cp >= 0xf900 && cp <= 0xfaff) || // CJK Compatibility Ideographs (cp >= 0xfe10 && cp <= 0xfe19) || // Vertical Forms (cp >= 0xfe30 && cp <= 0xfe6f) || // CJK Compatibility Forms (cp >= 0xff00 && cp <= 0xff60) || // Fullwidth Forms (cp >= 0xffe0 && cp <= 0xffe6) || // Fullwidth Forms (cp >= 0x20000 && cp <= 0x2fffd) || // CJK (cp >= 0x30000 && cp <= 0x3fffd) || // Miscellaneous Symbols and Pictographs + Emoticons: (cp >= 0x1f300 && cp <= 0x1f64f) || // Supplemental Symbols and Pictographs: (cp >= 0x1f900 && cp <= 0x1f9ff)))); return true; } }; for_each_codepoint(s, count_code_points{&num_code_points}); return num_code_points; } inline auto compute_width(basic_string_view s) -> size_t { return compute_width( string_view(reinterpret_cast(s.data()), s.size())); } template inline auto code_point_index(basic_string_view s, size_t n) -> size_t { size_t size = s.size(); return n < size ? n : size; } // Calculates the index of the nth code point in a UTF-8 string. inline auto code_point_index(string_view s, size_t n) -> size_t { const char* data = s.data(); size_t num_code_points = 0; for (size_t i = 0, size = s.size(); i != size; ++i) { if ((data[i] & 0xc0) != 0x80 && ++num_code_points > n) return i; } return s.size(); } inline auto code_point_index(basic_string_view s, size_t n) -> size_t { return code_point_index( string_view(reinterpret_cast(s.data()), s.size()), n); } #ifndef FMT_USE_FLOAT128 # ifdef __SIZEOF_FLOAT128__ # define FMT_USE_FLOAT128 1 # else # define FMT_USE_FLOAT128 0 # endif #endif #if FMT_USE_FLOAT128 using float128 = __float128; #else using float128 = void; #endif template using is_float128 = std::is_same; template using is_floating_point = bool_constant::value || is_float128::value>; template ::value> struct is_fast_float : bool_constant::is_iec559 && sizeof(T) <= sizeof(double)> {}; template struct is_fast_float : std::false_type {}; template using is_double_double = bool_constant::digits == 106>; #ifndef FMT_USE_FULL_CACHE_DRAGONBOX # define FMT_USE_FULL_CACHE_DRAGONBOX 0 #endif template template void buffer::append(const U* begin, const U* end) { while (begin != end) { auto count = to_unsigned(end - begin); try_reserve(size_ + count); auto free_cap = capacity_ - size_; if (free_cap < count) count = free_cap; std::uninitialized_copy_n(begin, count, make_checked(ptr_ + size_, count)); size_ += count; begin += count; } } template struct is_locale : std::false_type {}; template struct is_locale> : std::true_type {}; } // namespace detail FMT_MODULE_EXPORT_BEGIN // The number of characters to store in the basic_memory_buffer object itself // to avoid dynamic memory allocation. enum { inline_buffer_size = 500 }; /** \rst A dynamically growing memory buffer for trivially copyable/constructible types with the first ``SIZE`` elements stored in the object itself. You can use the ``memory_buffer`` type alias for ``char`` instead. **Example**:: auto out = fmt::memory_buffer(); format_to(std::back_inserter(out), "The answer is {}.", 42); This will append the following output to the ``out`` object: .. code-block:: none The answer is 42. The output can be converted to an ``std::string`` with ``to_string(out)``. \endrst */ template > class basic_memory_buffer final : public detail::buffer { private: T store_[SIZE]; // Don't inherit from Allocator avoid generating type_info for it. Allocator alloc_; // Deallocate memory allocated by the buffer. FMT_CONSTEXPR20 void deallocate() { T* data = this->data(); if (data != store_) alloc_.deallocate(data, this->capacity()); } protected: FMT_CONSTEXPR20 void grow(size_t size) override; public: using value_type = T; using const_reference = const T&; FMT_CONSTEXPR20 explicit basic_memory_buffer( const Allocator& alloc = Allocator()) : alloc_(alloc) { this->set(store_, SIZE); if (detail::is_constant_evaluated()) detail::fill_n(store_, SIZE, T()); } FMT_CONSTEXPR20 ~basic_memory_buffer() { deallocate(); } private: // Move data from other to this buffer. FMT_CONSTEXPR20 void move(basic_memory_buffer& other) { alloc_ = std::move(other.alloc_); T* data = other.data(); size_t size = other.size(), capacity = other.capacity(); if (data == other.store_) { this->set(store_, capacity); detail::copy_str(other.store_, other.store_ + size, detail::make_checked(store_, capacity)); } else { this->set(data, capacity); // Set pointer to the inline array so that delete is not called // when deallocating. other.set(other.store_, 0); other.clear(); } this->resize(size); } public: /** \rst Constructs a :class:`fmt::basic_memory_buffer` object moving the content of the other object to it. \endrst */ FMT_CONSTEXPR20 basic_memory_buffer(basic_memory_buffer&& other) noexcept { move(other); } /** \rst Moves the content of the other ``basic_memory_buffer`` object to this one. \endrst */ auto operator=(basic_memory_buffer&& other) noexcept -> basic_memory_buffer& { FMT_ASSERT(this != &other, ""); deallocate(); move(other); return *this; } // Returns a copy of the allocator associated with this buffer. auto get_allocator() const -> Allocator { return alloc_; } /** Resizes the buffer to contain *count* elements. If T is a POD type new elements may not be initialized. */ FMT_CONSTEXPR20 void resize(size_t count) { this->try_resize(count); } /** Increases the buffer capacity to *new_capacity*. */ void reserve(size_t new_capacity) { this->try_reserve(new_capacity); } // Directly append data into the buffer using detail::buffer::append; template void append(const ContiguousRange& range) { append(range.data(), range.data() + range.size()); } }; template FMT_CONSTEXPR20 void basic_memory_buffer::grow( size_t size) { detail::abort_fuzzing_if(size > 5000); const size_t max_size = std::allocator_traits::max_size(alloc_); size_t old_capacity = this->capacity(); size_t new_capacity = old_capacity + old_capacity / 2; if (size > new_capacity) new_capacity = size; else if (new_capacity > max_size) new_capacity = size > max_size ? size : max_size; T* old_data = this->data(); T* new_data = std::allocator_traits::allocate(alloc_, new_capacity); // The following code doesn't throw, so the raw pointer above doesn't leak. std::uninitialized_copy(old_data, old_data + this->size(), detail::make_checked(new_data, new_capacity)); this->set(new_data, new_capacity); // deallocate must not throw according to the standard, but even if it does, // the buffer already uses the new storage and will deallocate it in // destructor. if (old_data != store_) alloc_.deallocate(old_data, old_capacity); } using memory_buffer = basic_memory_buffer; template struct is_contiguous> : std::true_type { }; namespace detail { #ifdef _WIN32 FMT_API bool write_console(std::FILE* f, string_view text); #endif FMT_API void print(std::FILE*, string_view); } // namespace detail /** A formatting error such as invalid format string. */ FMT_CLASS_API class FMT_API format_error : public std::runtime_error { public: explicit format_error(const char* message) : std::runtime_error(message) {} explicit format_error(const std::string& message) : std::runtime_error(message) {} format_error(const format_error&) = default; format_error& operator=(const format_error&) = default; format_error(format_error&&) = default; format_error& operator=(format_error&&) = default; ~format_error() noexcept override FMT_MSC_DEFAULT; }; namespace detail_exported { #if FMT_USE_NONTYPE_TEMPLATE_ARGS template struct fixed_string { constexpr fixed_string(const Char (&str)[N]) { detail::copy_str(static_cast(str), str + N, data); } Char data[N] = {}; }; #endif // Converts a compile-time string to basic_string_view. template constexpr auto compile_string_to_view(const Char (&s)[N]) -> basic_string_view { // Remove trailing NUL character if needed. Won't be present if this is used // with a raw character array (i.e. not defined as a string). return {s, N - (std::char_traits::to_int_type(s[N - 1]) == 0 ? 1 : 0)}; } template constexpr auto compile_string_to_view(detail::std_string_view s) -> basic_string_view { return {s.data(), s.size()}; } } // namespace detail_exported FMT_BEGIN_DETAIL_NAMESPACE template struct is_integral : std::is_integral {}; template <> struct is_integral : std::true_type {}; template <> struct is_integral : std::true_type {}; template using is_signed = std::integral_constant::is_signed || std::is_same::value>; // Returns true if value is negative, false otherwise. // Same as `value < 0` but doesn't produce warnings if T is an unsigned type. template ::value)> constexpr auto is_negative(T value) -> bool { return value < 0; } template ::value)> constexpr auto is_negative(T) -> bool { return false; } template FMT_CONSTEXPR auto is_supported_floating_point(T) -> bool { if (std::is_same()) return FMT_USE_FLOAT; if (std::is_same()) return FMT_USE_DOUBLE; if (std::is_same()) return FMT_USE_LONG_DOUBLE; return true; } // Smallest of uint32_t, uint64_t, uint128_t that is large enough to // represent all values of an integral type T. template using uint32_or_64_or_128_t = conditional_t() <= 32 && !FMT_REDUCE_INT_INSTANTIATIONS, uint32_t, conditional_t() <= 64, uint64_t, uint128_t>>; template using uint64_or_128_t = conditional_t() <= 64, uint64_t, uint128_t>; #define FMT_POWERS_OF_10(factor) \ factor * 10, (factor)*100, (factor)*1000, (factor)*10000, (factor)*100000, \ (factor)*1000000, (factor)*10000000, (factor)*100000000, \ (factor)*1000000000 // Converts value in the range [0, 100) to a string. constexpr const char* digits2(size_t value) { // GCC generates slightly better code when value is pointer-size. return &"0001020304050607080910111213141516171819" "2021222324252627282930313233343536373839" "4041424344454647484950515253545556575859" "6061626364656667686970717273747576777879" "8081828384858687888990919293949596979899"[value * 2]; } // Sign is a template parameter to workaround a bug in gcc 4.8. template constexpr Char sign(Sign s) { #if !FMT_GCC_VERSION || FMT_GCC_VERSION >= 604 static_assert(std::is_same::value, ""); #endif return static_cast("\0-+ "[s]); } template FMT_CONSTEXPR auto count_digits_fallback(T n) -> int { int count = 1; for (;;) { // Integer division is slow so do it for a group of four digits instead // of for every digit. The idea comes from the talk by Alexandrescu // "Three Optimization Tips for C++". See speed-test for a comparison. if (n < 10) return count; if (n < 100) return count + 1; if (n < 1000) return count + 2; if (n < 10000) return count + 3; n /= 10000u; count += 4; } } #if FMT_USE_INT128 FMT_CONSTEXPR inline auto count_digits(uint128_opt n) -> int { return count_digits_fallback(n); } #endif #ifdef FMT_BUILTIN_CLZLL // It is a separate function rather than a part of count_digits to workaround // the lack of static constexpr in constexpr functions. inline auto do_count_digits(uint64_t n) -> int { // This has comparable performance to the version by Kendall Willets // (https://github.com/fmtlib/format-benchmark/blob/master/digits10) // but uses smaller tables. // Maps bsr(n) to ceil(log10(pow(2, bsr(n) + 1) - 1)). static constexpr uint8_t bsr2log10[] = { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 19, 20}; auto t = bsr2log10[FMT_BUILTIN_CLZLL(n | 1) ^ 63]; static constexpr const uint64_t zero_or_powers_of_10[] = { 0, 0, FMT_POWERS_OF_10(1U), FMT_POWERS_OF_10(1000000000ULL), 10000000000000000000ULL}; return t - (n < zero_or_powers_of_10[t]); } #endif // Returns the number of decimal digits in n. Leading zeros are not counted // except for n == 0 in which case count_digits returns 1. FMT_CONSTEXPR20 inline auto count_digits(uint64_t n) -> int { #ifdef FMT_BUILTIN_CLZLL if (!is_constant_evaluated()) { return do_count_digits(n); } #endif return count_digits_fallback(n); } // Counts the number of digits in n. BITS = log2(radix). template FMT_CONSTEXPR auto count_digits(UInt n) -> int { #ifdef FMT_BUILTIN_CLZ if (!is_constant_evaluated() && num_bits() == 32) return (FMT_BUILTIN_CLZ(static_cast(n) | 1) ^ 31) / BITS + 1; #endif // Lambda avoids unreachable code warnings from NVHPC. return [](UInt m) { int num_digits = 0; do { ++num_digits; } while ((m >>= BITS) != 0); return num_digits; }(n); } #ifdef FMT_BUILTIN_CLZ // It is a separate function rather than a part of count_digits to workaround // the lack of static constexpr in constexpr functions. FMT_INLINE auto do_count_digits(uint32_t n) -> int { // An optimization by Kendall Willets from https://bit.ly/3uOIQrB. // This increments the upper 32 bits (log10(T) - 1) when >= T is added. # define FMT_INC(T) (((sizeof(# T) - 1ull) << 32) - T) static constexpr uint64_t table[] = { FMT_INC(0), FMT_INC(0), FMT_INC(0), // 8 FMT_INC(10), FMT_INC(10), FMT_INC(10), // 64 FMT_INC(100), FMT_INC(100), FMT_INC(100), // 512 FMT_INC(1000), FMT_INC(1000), FMT_INC(1000), // 4096 FMT_INC(10000), FMT_INC(10000), FMT_INC(10000), // 32k FMT_INC(100000), FMT_INC(100000), FMT_INC(100000), // 256k FMT_INC(1000000), FMT_INC(1000000), FMT_INC(1000000), // 2048k FMT_INC(10000000), FMT_INC(10000000), FMT_INC(10000000), // 16M FMT_INC(100000000), FMT_INC(100000000), FMT_INC(100000000), // 128M FMT_INC(1000000000), FMT_INC(1000000000), FMT_INC(1000000000), // 1024M FMT_INC(1000000000), FMT_INC(1000000000) // 4B }; auto inc = table[FMT_BUILTIN_CLZ(n | 1) ^ 31]; return static_cast((n + inc) >> 32); } #endif // Optional version of count_digits for better performance on 32-bit platforms. FMT_CONSTEXPR20 inline auto count_digits(uint32_t n) -> int { #ifdef FMT_BUILTIN_CLZ if (!is_constant_evaluated()) { return do_count_digits(n); } #endif return count_digits_fallback(n); } template constexpr auto digits10() noexcept -> int { return std::numeric_limits::digits10; } template <> constexpr auto digits10() noexcept -> int { return 38; } template <> constexpr auto digits10() noexcept -> int { return 38; } template struct thousands_sep_result { std::string grouping; Char thousands_sep; }; template FMT_API auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result; template inline auto thousands_sep(locale_ref loc) -> thousands_sep_result { auto result = thousands_sep_impl(loc); return {result.grouping, Char(result.thousands_sep)}; } template <> inline auto thousands_sep(locale_ref loc) -> thousands_sep_result { return thousands_sep_impl(loc); } template FMT_API auto decimal_point_impl(locale_ref loc) -> Char; template inline auto decimal_point(locale_ref loc) -> Char { return Char(decimal_point_impl(loc)); } template <> inline auto decimal_point(locale_ref loc) -> wchar_t { return decimal_point_impl(loc); } // Compares two characters for equality. template auto equal2(const Char* lhs, const char* rhs) -> bool { return lhs[0] == Char(rhs[0]) && lhs[1] == Char(rhs[1]); } inline auto equal2(const char* lhs, const char* rhs) -> bool { return memcmp(lhs, rhs, 2) == 0; } // Copies two characters from src to dst. template FMT_CONSTEXPR20 FMT_INLINE void copy2(Char* dst, const char* src) { if (!is_constant_evaluated() && sizeof(Char) == sizeof(char)) { memcpy(dst, src, 2); return; } *dst++ = static_cast(*src++); *dst = static_cast(*src); } template struct format_decimal_result { Iterator begin; Iterator end; }; // Formats a decimal unsigned integer value writing into out pointing to a // buffer of specified size. The caller must ensure that the buffer is large // enough. template FMT_CONSTEXPR20 auto format_decimal(Char* out, UInt value, int size) -> format_decimal_result { FMT_ASSERT(size >= count_digits(value), "invalid digit count"); out += size; Char* end = out; while (value >= 100) { // Integer division is slow so do it for a group of two digits instead // of for every digit. The idea comes from the talk by Alexandrescu // "Three Optimization Tips for C++". See speed-test for a comparison. out -= 2; copy2(out, digits2(static_cast(value % 100))); value /= 100; } if (value < 10) { *--out = static_cast('0' + value); return {out, end}; } out -= 2; copy2(out, digits2(static_cast(value))); return {out, end}; } template >::value)> FMT_CONSTEXPR inline auto format_decimal(Iterator out, UInt value, int size) -> format_decimal_result { // Buffer is large enough to hold all digits (digits10 + 1). Char buffer[digits10() + 1]; auto end = format_decimal(buffer, value, size).end; return {out, detail::copy_str_noinline(buffer, end, out)}; } template FMT_CONSTEXPR auto format_uint(Char* buffer, UInt value, int num_digits, bool upper = false) -> Char* { buffer += num_digits; Char* end = buffer; do { const char* digits = upper ? "0123456789ABCDEF" : "0123456789abcdef"; unsigned digit = static_cast(value & ((1 << BASE_BITS) - 1)); *--buffer = static_cast(BASE_BITS < 4 ? static_cast('0' + digit) : digits[digit]); } while ((value >>= BASE_BITS) != 0); return end; } template inline auto format_uint(It out, UInt value, int num_digits, bool upper = false) -> It { if (auto ptr = to_pointer(out, to_unsigned(num_digits))) { format_uint(ptr, value, num_digits, upper); return out; } // Buffer should be large enough to hold all digits (digits / BASE_BITS + 1). char buffer[num_bits() / BASE_BITS + 1]; format_uint(buffer, value, num_digits, upper); return detail::copy_str_noinline(buffer, buffer + num_digits, out); } // A converter from UTF-8 to UTF-16. class utf8_to_utf16 { private: basic_memory_buffer buffer_; public: FMT_API explicit utf8_to_utf16(string_view s); operator basic_string_view() const { return {&buffer_[0], size()}; } auto size() const -> size_t { return buffer_.size() - 1; } auto c_str() const -> const wchar_t* { return &buffer_[0]; } auto str() const -> std::wstring { return {&buffer_[0], size()}; } }; namespace dragonbox { // Type-specific information that Dragonbox uses. template struct float_info; template <> struct float_info { using carrier_uint = uint32_t; static const int exponent_bits = 8; static const int kappa = 1; static const int big_divisor = 100; static const int small_divisor = 10; static const int min_k = -31; static const int max_k = 46; static const int shorter_interval_tie_lower_threshold = -35; static const int shorter_interval_tie_upper_threshold = -35; }; template <> struct float_info { using carrier_uint = uint64_t; static const int exponent_bits = 11; static const int kappa = 2; static const int big_divisor = 1000; static const int small_divisor = 100; static const int min_k = -292; static const int max_k = 326; static const int shorter_interval_tie_lower_threshold = -77; static const int shorter_interval_tie_upper_threshold = -77; }; // An 80- or 128-bit floating point number. template struct float_info::digits == 64 || std::numeric_limits::digits == 113 || is_float128::value>> { using carrier_uint = detail::uint128_t; static const int exponent_bits = 15; }; // A double-double floating point number. template struct float_info::value>> { using carrier_uint = detail::uint128_t; }; template struct decimal_fp { using significand_type = typename float_info::carrier_uint; significand_type significand; int exponent; }; template FMT_API auto to_decimal(T x) noexcept -> decimal_fp; } // namespace dragonbox // Returns true iff Float has the implicit bit which is not stored. template constexpr bool has_implicit_bit() { // An 80-bit FP number has a 64-bit significand an no implicit bit. return std::numeric_limits::digits != 64; } // Returns the number of significand bits stored in Float. The implicit bit is // not counted since it is not stored. template constexpr int num_significand_bits() { // std::numeric_limits may not support __float128. return is_float128() ? 112 : (std::numeric_limits::digits - (has_implicit_bit() ? 1 : 0)); } template constexpr auto exponent_mask() -> typename dragonbox::float_info::carrier_uint { using uint = typename dragonbox::float_info::carrier_uint; return ((uint(1) << dragonbox::float_info::exponent_bits) - 1) << num_significand_bits(); } template constexpr auto exponent_bias() -> int { // std::numeric_limits may not support __float128. return is_float128() ? 16383 : std::numeric_limits::max_exponent - 1; } // Writes the exponent exp in the form "[+-]d{2,3}" to buffer. template FMT_CONSTEXPR auto write_exponent(int exp, It it) -> It { FMT_ASSERT(-10000 < exp && exp < 10000, "exponent out of range"); if (exp < 0) { *it++ = static_cast('-'); exp = -exp; } else { *it++ = static_cast('+'); } if (exp >= 100) { const char* top = digits2(to_unsigned(exp / 100)); if (exp >= 1000) *it++ = static_cast(top[0]); *it++ = static_cast(top[1]); exp %= 100; } const char* d = digits2(to_unsigned(exp)); *it++ = static_cast(d[0]); *it++ = static_cast(d[1]); return it; } // A floating-point number f * pow(2, e) where F is an unsigned type. template struct basic_fp { F f; int e; static constexpr const int num_significand_bits = static_cast(sizeof(F) * num_bits()); constexpr basic_fp() : f(0), e(0) {} constexpr basic_fp(uint64_t f_val, int e_val) : f(f_val), e(e_val) {} // Constructs fp from an IEEE754 floating-point number. template FMT_CONSTEXPR basic_fp(Float n) { assign(n); } // Assigns n to this and return true iff predecessor is closer than successor. template ::value)> FMT_CONSTEXPR auto assign(Float n) -> bool { static_assert(std::numeric_limits::digits <= 113, "unsupported FP"); // Assume Float is in the format [sign][exponent][significand]. using carrier_uint = typename dragonbox::float_info::carrier_uint; const auto num_float_significand_bits = detail::num_significand_bits(); const auto implicit_bit = carrier_uint(1) << num_float_significand_bits; const auto significand_mask = implicit_bit - 1; auto u = bit_cast(n); f = static_cast(u & significand_mask); auto biased_e = static_cast((u & exponent_mask()) >> num_float_significand_bits); // The predecessor is closer if n is a normalized power of 2 (f == 0) // other than the smallest normalized number (biased_e > 1). auto is_predecessor_closer = f == 0 && biased_e > 1; if (biased_e == 0) biased_e = 1; // Subnormals use biased exponent 1 (min exponent). else if (has_implicit_bit()) f += static_cast(implicit_bit); e = biased_e - exponent_bias() - num_float_significand_bits; if (!has_implicit_bit()) ++e; return is_predecessor_closer; } template ::value)> FMT_CONSTEXPR auto assign(Float n) -> bool { static_assert(std::numeric_limits::is_iec559, "unsupported FP"); return assign(static_cast(n)); } }; using fp = basic_fp; // Normalizes the value converted from double and multiplied by (1 << SHIFT). template FMT_CONSTEXPR basic_fp normalize(basic_fp value) { // Handle subnormals. const auto implicit_bit = F(1) << num_significand_bits(); const auto shifted_implicit_bit = implicit_bit << SHIFT; while ((value.f & shifted_implicit_bit) == 0) { value.f <<= 1; --value.e; } // Subtract 1 to account for hidden bit. const auto offset = basic_fp::num_significand_bits - num_significand_bits() - SHIFT - 1; value.f <<= offset; value.e -= offset; return value; } // Computes lhs * rhs / pow(2, 64) rounded to nearest with half-up tie breaking. FMT_CONSTEXPR inline uint64_t multiply(uint64_t lhs, uint64_t rhs) { #if FMT_USE_INT128 auto product = static_cast<__uint128_t>(lhs) * rhs; auto f = static_cast(product >> 64); return (static_cast(product) & (1ULL << 63)) != 0 ? f + 1 : f; #else // Multiply 32-bit parts of significands. uint64_t mask = (1ULL << 32) - 1; uint64_t a = lhs >> 32, b = lhs & mask; uint64_t c = rhs >> 32, d = rhs & mask; uint64_t ac = a * c, bc = b * c, ad = a * d, bd = b * d; // Compute mid 64-bit of result and round. uint64_t mid = (bd >> 32) + (ad & mask) + (bc & mask) + (1U << 31); return ac + (ad >> 32) + (bc >> 32) + (mid >> 32); #endif } FMT_CONSTEXPR inline fp operator*(fp x, fp y) { return {multiply(x.f, y.f), x.e + y.e + 64}; } template struct basic_data { // Normalized 64-bit significands of pow(10, k), for k = -348, -340, ..., 340. // These are generated by support/compute-powers.py. static constexpr uint64_t pow10_significands[87] = { 0xfa8fd5a0081c0288, 0xbaaee17fa23ebf76, 0x8b16fb203055ac76, 0xcf42894a5dce35ea, 0x9a6bb0aa55653b2d, 0xe61acf033d1a45df, 0xab70fe17c79ac6ca, 0xff77b1fcbebcdc4f, 0xbe5691ef416bd60c, 0x8dd01fad907ffc3c, 0xd3515c2831559a83, 0x9d71ac8fada6c9b5, 0xea9c227723ee8bcb, 0xaecc49914078536d, 0x823c12795db6ce57, 0xc21094364dfb5637, 0x9096ea6f3848984f, 0xd77485cb25823ac7, 0xa086cfcd97bf97f4, 0xef340a98172aace5, 0xb23867fb2a35b28e, 0x84c8d4dfd2c63f3b, 0xc5dd44271ad3cdba, 0x936b9fcebb25c996, 0xdbac6c247d62a584, 0xa3ab66580d5fdaf6, 0xf3e2f893dec3f126, 0xb5b5ada8aaff80b8, 0x87625f056c7c4a8b, 0xc9bcff6034c13053, 0x964e858c91ba2655, 0xdff9772470297ebd, 0xa6dfbd9fb8e5b88f, 0xf8a95fcf88747d94, 0xb94470938fa89bcf, 0x8a08f0f8bf0f156b, 0xcdb02555653131b6, 0x993fe2c6d07b7fac, 0xe45c10c42a2b3b06, 0xaa242499697392d3, 0xfd87b5f28300ca0e, 0xbce5086492111aeb, 0x8cbccc096f5088cc, 0xd1b71758e219652c, 0x9c40000000000000, 0xe8d4a51000000000, 0xad78ebc5ac620000, 0x813f3978f8940984, 0xc097ce7bc90715b3, 0x8f7e32ce7bea5c70, 0xd5d238a4abe98068, 0x9f4f2726179a2245, 0xed63a231d4c4fb27, 0xb0de65388cc8ada8, 0x83c7088e1aab65db, 0xc45d1df942711d9a, 0x924d692ca61be758, 0xda01ee641a708dea, 0xa26da3999aef774a, 0xf209787bb47d6b85, 0xb454e4a179dd1877, 0x865b86925b9bc5c2, 0xc83553c5c8965d3d, 0x952ab45cfa97a0b3, 0xde469fbd99a05fe3, 0xa59bc234db398c25, 0xf6c69a72a3989f5c, 0xb7dcbf5354e9bece, 0x88fcf317f22241e2, 0xcc20ce9bd35c78a5, 0x98165af37b2153df, 0xe2a0b5dc971f303a, 0xa8d9d1535ce3b396, 0xfb9b7cd9a4a7443c, 0xbb764c4ca7a44410, 0x8bab8eefb6409c1a, 0xd01fef10a657842c, 0x9b10a4e5e9913129, 0xe7109bfba19c0c9d, 0xac2820d9623bf429, 0x80444b5e7aa7cf85, 0xbf21e44003acdd2d, 0x8e679c2f5e44ff8f, 0xd433179d9c8cb841, 0x9e19db92b4e31ba9, 0xeb96bf6ebadf77d9, 0xaf87023b9bf0ee6b, }; #if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wnarrowing" #endif // Binary exponents of pow(10, k), for k = -348, -340, ..., 340, corresponding // to significands above. static constexpr int16_t pow10_exponents[87] = { -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, -954, -927, -901, -874, -847, -821, -794, -768, -741, -715, -688, -661, -635, -608, -582, -555, -529, -502, -475, -449, -422, -396, -369, -343, -316, -289, -263, -236, -210, -183, -157, -130, -103, -77, -50, -24, 3, 30, 56, 83, 109, 136, 162, 189, 216, 242, 269, 295, 322, 348, 375, 402, 428, 455, 481, 508, 534, 561, 588, 614, 641, 667, 694, 720, 747, 774, 800, 827, 853, 880, 907, 933, 960, 986, 1013, 1039, 1066}; #if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 # pragma GCC diagnostic pop #endif static constexpr uint64_t power_of_10_64[20] = { 1, FMT_POWERS_OF_10(1ULL), FMT_POWERS_OF_10(1000000000ULL), 10000000000000000000ULL}; }; #if FMT_CPLUSPLUS < 201703L template constexpr uint64_t basic_data::pow10_significands[]; template constexpr int16_t basic_data::pow10_exponents[]; template constexpr uint64_t basic_data::power_of_10_64[]; #endif // This is a struct rather than an alias to avoid shadowing warnings in gcc. struct data : basic_data<> {}; // Returns a cached power of 10 `c_k = c_k.f * pow(2, c_k.e)` such that its // (binary) exponent satisfies `min_exponent <= c_k.e <= min_exponent + 28`. FMT_CONSTEXPR inline fp get_cached_power(int min_exponent, int& pow10_exponent) { const int shift = 32; // log10(2) = 0x0.4d104d427de7fbcc... const int64_t significand = 0x4d104d427de7fbcc; int index = static_cast( ((min_exponent + fp::num_significand_bits - 1) * (significand >> shift) + ((int64_t(1) << shift) - 1)) // ceil >> 32 // arithmetic shift ); // Decimal exponent of the first (smallest) cached power of 10. const int first_dec_exp = -348; // Difference between 2 consecutive decimal exponents in cached powers of 10. const int dec_exp_step = 8; index = (index - first_dec_exp - 1) / dec_exp_step + 1; pow10_exponent = first_dec_exp + index * dec_exp_step; // Using *(x + index) instead of x[index] avoids an issue with some compilers // using the EDG frontend (e.g. nvhpc/22.3 in C++17 mode). return {*(data::pow10_significands + index), *(data::pow10_exponents + index)}; } #ifndef _MSC_VER # define FMT_SNPRINTF snprintf #else FMT_API auto fmt_snprintf(char* buf, size_t size, const char* fmt, ...) -> int; # define FMT_SNPRINTF fmt_snprintf #endif // _MSC_VER // Formats a floating-point number with snprintf using the hexfloat format. template auto snprintf_float(T value, int precision, float_specs specs, buffer& buf) -> int { // Buffer capacity must be non-zero, otherwise MSVC's vsnprintf_s will fail. FMT_ASSERT(buf.capacity() > buf.size(), "empty buffer"); FMT_ASSERT(specs.format == float_format::hex, ""); static_assert(!std::is_same::value, ""); // Build the format string. char format[7]; // The longest format is "%#.*Le". char* format_ptr = format; *format_ptr++ = '%'; if (specs.showpoint) *format_ptr++ = '#'; if (precision >= 0) { *format_ptr++ = '.'; *format_ptr++ = '*'; } if (std::is_same()) *format_ptr++ = 'L'; *format_ptr++ = specs.upper ? 'A' : 'a'; *format_ptr = '\0'; // Format using snprintf. auto offset = buf.size(); for (;;) { auto begin = buf.data() + offset; auto capacity = buf.capacity() - offset; abort_fuzzing_if(precision > 100000); // Suppress the warning about a nonliteral format string. // Cannot use auto because of a bug in MinGW (#1532). int (*snprintf_ptr)(char*, size_t, const char*, ...) = FMT_SNPRINTF; int result = precision >= 0 ? snprintf_ptr(begin, capacity, format, precision, value) : snprintf_ptr(begin, capacity, format, value); if (result < 0) { // The buffer will grow exponentially. buf.try_reserve(buf.capacity() + 1); continue; } auto size = to_unsigned(result); // Size equal to capacity means that the last character was truncated. if (size < capacity) { buf.try_resize(size + offset); return 0; } buf.try_reserve(size + offset + 1); // Add 1 for the terminating '\0'. } } template using convert_float_result = conditional_t::value || sizeof(T) == sizeof(double), double, T>; template constexpr auto convert_float(T value) -> convert_float_result { return static_cast>(value); } template FMT_NOINLINE FMT_CONSTEXPR auto fill(OutputIt it, size_t n, const fill_t& fill) -> OutputIt { auto fill_size = fill.size(); if (fill_size == 1) return detail::fill_n(it, n, fill[0]); auto data = fill.data(); for (size_t i = 0; i < n; ++i) it = copy_str(data, data + fill_size, it); return it; } // Writes the output of f, padded according to format specifications in specs. // size: output size in code units. // width: output display width in (terminal) column positions. template FMT_CONSTEXPR auto write_padded(OutputIt out, const basic_format_specs& specs, size_t size, size_t width, F&& f) -> OutputIt { static_assert(align == align::left || align == align::right, ""); unsigned spec_width = to_unsigned(specs.width); size_t padding = spec_width > width ? spec_width - width : 0; // Shifts are encoded as string literals because static constexpr is not // supported in constexpr functions. auto* shifts = align == align::left ? "\x1f\x1f\x00\x01" : "\x00\x1f\x00\x01"; size_t left_padding = padding >> shifts[specs.align]; size_t right_padding = padding - left_padding; auto it = reserve(out, size + padding * specs.fill.size()); if (left_padding != 0) it = fill(it, left_padding, specs.fill); it = f(it); if (right_padding != 0) it = fill(it, right_padding, specs.fill); return base_iterator(out, it); } template constexpr auto write_padded(OutputIt out, const basic_format_specs& specs, size_t size, F&& f) -> OutputIt { return write_padded(out, specs, size, size, f); } template FMT_CONSTEXPR auto write_bytes(OutputIt out, string_view bytes, const basic_format_specs& specs) -> OutputIt { return write_padded( out, specs, bytes.size(), [bytes](reserve_iterator it) { const char* data = bytes.data(); return copy_str(data, data + bytes.size(), it); }); } template auto write_ptr(OutputIt out, UIntPtr value, const basic_format_specs* specs) -> OutputIt { int num_digits = count_digits<4>(value); auto size = to_unsigned(num_digits) + size_t(2); auto write = [=](reserve_iterator it) { *it++ = static_cast('0'); *it++ = static_cast('x'); return format_uint<4, Char>(it, value, num_digits); }; return specs ? write_padded(out, *specs, size, write) : base_iterator(out, write(reserve(out, size))); } // Returns true iff the code point cp is printable. FMT_API auto is_printable(uint32_t cp) -> bool; inline auto needs_escape(uint32_t cp) -> bool { return cp < 0x20 || cp == 0x7f || cp == '"' || cp == '\\' || !is_printable(cp); } template struct find_escape_result { const Char* begin; const Char* end; uint32_t cp; }; template using make_unsigned_char = typename conditional_t::value, std::make_unsigned, type_identity>::type; template auto find_escape(const Char* begin, const Char* end) -> find_escape_result { for (; begin != end; ++begin) { uint32_t cp = static_cast>(*begin); if (const_check(sizeof(Char) == 1) && cp >= 0x80) continue; if (needs_escape(cp)) return {begin, begin + 1, cp}; } return {begin, nullptr, 0}; } inline auto find_escape(const char* begin, const char* end) -> find_escape_result { if (!is_utf8()) return find_escape(begin, end); auto result = find_escape_result{end, nullptr, 0}; for_each_codepoint(string_view(begin, to_unsigned(end - begin)), [&](uint32_t cp, string_view sv) { if (needs_escape(cp)) { result = {sv.begin(), sv.end(), cp}; return false; } return true; }); return result; } #define FMT_STRING_IMPL(s, base, explicit) \ [] { \ /* Use the hidden visibility as a workaround for a GCC bug (#1973). */ \ /* Use a macro-like name to avoid shadowing warnings. */ \ struct FMT_GCC_VISIBILITY_HIDDEN FMT_COMPILE_STRING : base { \ using char_type FMT_MAYBE_UNUSED = fmt::remove_cvref_t; \ FMT_MAYBE_UNUSED FMT_CONSTEXPR explicit \ operator fmt::basic_string_view() const { \ return fmt::detail_exported::compile_string_to_view(s); \ } \ }; \ return FMT_COMPILE_STRING(); \ }() /** \rst Constructs a compile-time format string from a string literal *s*. **Example**:: // A compile-time error because 'd' is an invalid specifier for strings. std::string s = fmt::format(FMT_STRING("{:d}"), "foo"); \endrst */ #define FMT_STRING(s) FMT_STRING_IMPL(s, fmt::detail::compile_string, ) template auto write_codepoint(OutputIt out, char prefix, uint32_t cp) -> OutputIt { *out++ = static_cast('\\'); *out++ = static_cast(prefix); Char buf[width]; fill_n(buf, width, static_cast('0')); format_uint<4>(buf, cp, width); return copy_str(buf, buf + width, out); } template auto write_escaped_cp(OutputIt out, const find_escape_result& escape) -> OutputIt { auto c = static_cast(escape.cp); switch (escape.cp) { case '\n': *out++ = static_cast('\\'); c = static_cast('n'); break; case '\r': *out++ = static_cast('\\'); c = static_cast('r'); break; case '\t': *out++ = static_cast('\\'); c = static_cast('t'); break; case '"': FMT_FALLTHROUGH; case '\'': FMT_FALLTHROUGH; case '\\': *out++ = static_cast('\\'); break; default: if (is_utf8()) { if (escape.cp < 0x100) { return write_codepoint<2, Char>(out, 'x', escape.cp); } if (escape.cp < 0x10000) { return write_codepoint<4, Char>(out, 'u', escape.cp); } if (escape.cp < 0x110000) { return write_codepoint<8, Char>(out, 'U', escape.cp); } } for (Char escape_char : basic_string_view( escape.begin, to_unsigned(escape.end - escape.begin))) { out = write_codepoint<2, Char>(out, 'x', static_cast(escape_char) & 0xFF); } return out; } *out++ = c; return out; } template auto write_escaped_string(OutputIt out, basic_string_view str) -> OutputIt { *out++ = static_cast('"'); auto begin = str.begin(), end = str.end(); do { auto escape = find_escape(begin, end); out = copy_str(begin, escape.begin, out); begin = escape.end; if (!begin) break; out = write_escaped_cp(out, escape); } while (begin != end); *out++ = static_cast('"'); return out; } template auto write_escaped_char(OutputIt out, Char v) -> OutputIt { *out++ = static_cast('\''); if ((needs_escape(static_cast(v)) && v != static_cast('"')) || v == static_cast('\'')) { out = write_escaped_cp( out, find_escape_result{&v, &v + 1, static_cast(v)}); } else { *out++ = v; } *out++ = static_cast('\''); return out; } template FMT_CONSTEXPR auto write_char(OutputIt out, Char value, const basic_format_specs& specs) -> OutputIt { bool is_debug = specs.type == presentation_type::debug; return write_padded(out, specs, 1, [=](reserve_iterator it) { if (is_debug) return write_escaped_char(it, value); *it++ = value; return it; }); } template FMT_CONSTEXPR auto write(OutputIt out, Char value, const basic_format_specs& specs, locale_ref loc = {}) -> OutputIt { return check_char_specs(specs) ? write_char(out, value, specs) : write(out, static_cast(value), specs, loc); } // Data for write_int that doesn't depend on output iterator type. It is used to // avoid template code bloat. template struct write_int_data { size_t size; size_t padding; FMT_CONSTEXPR write_int_data(int num_digits, unsigned prefix, const basic_format_specs& specs) : size((prefix >> 24) + to_unsigned(num_digits)), padding(0) { if (specs.align == align::numeric) { auto width = to_unsigned(specs.width); if (width > size) { padding = width - size; size = width; } } else if (specs.precision > num_digits) { size = (prefix >> 24) + to_unsigned(specs.precision); padding = to_unsigned(specs.precision - num_digits); } } }; // Writes an integer in the format // // where are written by write_digits(it). // prefix contains chars in three lower bytes and the size in the fourth byte. template FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, int num_digits, unsigned prefix, const basic_format_specs& specs, W write_digits) -> OutputIt { // Slightly faster check for specs.width == 0 && specs.precision == -1. if ((specs.width | (specs.precision + 1)) == 0) { auto it = reserve(out, to_unsigned(num_digits) + (prefix >> 24)); if (prefix != 0) { for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) *it++ = static_cast(p & 0xff); } return base_iterator(out, write_digits(it)); } auto data = write_int_data(num_digits, prefix, specs); return write_padded( out, specs, data.size, [=](reserve_iterator it) { for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) *it++ = static_cast(p & 0xff); it = detail::fill_n(it, data.padding, static_cast('0')); return write_digits(it); }); } template class digit_grouping { private: thousands_sep_result sep_; struct next_state { std::string::const_iterator group; int pos; }; next_state initial_state() const { return {sep_.grouping.begin(), 0}; } // Returns the next digit group separator position. int next(next_state& state) const { if (!sep_.thousands_sep) return max_value(); if (state.group == sep_.grouping.end()) return state.pos += sep_.grouping.back(); if (*state.group <= 0 || *state.group == max_value()) return max_value(); state.pos += *state.group++; return state.pos; } public: explicit digit_grouping(locale_ref loc, bool localized = true) { if (localized) sep_ = thousands_sep(loc); else sep_.thousands_sep = Char(); } explicit digit_grouping(thousands_sep_result sep) : sep_(sep) {} Char separator() const { return sep_.thousands_sep; } int count_separators(int num_digits) const { int count = 0; auto state = initial_state(); while (num_digits > next(state)) ++count; return count; } // Applies grouping to digits and write the output to out. template Out apply(Out out, basic_string_view digits) const { auto num_digits = static_cast(digits.size()); auto separators = basic_memory_buffer(); separators.push_back(0); auto state = initial_state(); while (int i = next(state)) { if (i >= num_digits) break; separators.push_back(i); } for (int i = 0, sep_index = static_cast(separators.size() - 1); i < num_digits; ++i) { if (num_digits - i == separators[sep_index]) { *out++ = separator(); --sep_index; } *out++ = static_cast(digits[to_unsigned(i)]); } return out; } }; template auto write_int_localized(OutputIt out, UInt value, unsigned prefix, const basic_format_specs& specs, const digit_grouping& grouping) -> OutputIt { static_assert(std::is_same, UInt>::value, ""); int num_digits = count_digits(value); char digits[40]; format_decimal(digits, value, num_digits); unsigned size = to_unsigned((prefix != 0 ? 1 : 0) + num_digits + grouping.count_separators(num_digits)); return write_padded( out, specs, size, size, [&](reserve_iterator it) { if (prefix != 0) { char sign = static_cast(prefix); *it++ = static_cast(sign); } return grouping.apply(it, string_view(digits, to_unsigned(num_digits))); }); } template auto write_int_localized(OutputIt& out, UInt value, unsigned prefix, const basic_format_specs& specs, locale_ref loc) -> bool { auto grouping = digit_grouping(loc); out = write_int_localized(out, value, prefix, specs, grouping); return true; } FMT_CONSTEXPR inline void prefix_append(unsigned& prefix, unsigned value) { prefix |= prefix != 0 ? value << 8 : value; prefix += (1u + (value > 0xff ? 1 : 0)) << 24; } template struct write_int_arg { UInt abs_value; unsigned prefix; }; template FMT_CONSTEXPR auto make_write_int_arg(T value, sign_t sign) -> write_int_arg> { auto prefix = 0u; auto abs_value = static_cast>(value); if (is_negative(value)) { prefix = 0x01000000 | '-'; abs_value = 0 - abs_value; } else { constexpr const unsigned prefixes[4] = {0, 0, 0x1000000u | '+', 0x1000000u | ' '}; prefix = prefixes[sign]; } return {abs_value, prefix}; } template FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, write_int_arg arg, const basic_format_specs& specs, locale_ref loc) -> OutputIt { static_assert(std::is_same>::value, ""); auto abs_value = arg.abs_value; auto prefix = arg.prefix; switch (specs.type) { case presentation_type::none: case presentation_type::dec: { if (specs.localized && write_int_localized(out, static_cast>(abs_value), prefix, specs, loc)) { return out; } auto num_digits = count_digits(abs_value); return write_int( out, num_digits, prefix, specs, [=](reserve_iterator it) { return format_decimal(it, abs_value, num_digits).end; }); } case presentation_type::hex_lower: case presentation_type::hex_upper: { bool upper = specs.type == presentation_type::hex_upper; if (specs.alt) prefix_append(prefix, unsigned(upper ? 'X' : 'x') << 8 | '0'); int num_digits = count_digits<4>(abs_value); return write_int( out, num_digits, prefix, specs, [=](reserve_iterator it) { return format_uint<4, Char>(it, abs_value, num_digits, upper); }); } case presentation_type::bin_lower: case presentation_type::bin_upper: { bool upper = specs.type == presentation_type::bin_upper; if (specs.alt) prefix_append(prefix, unsigned(upper ? 'B' : 'b') << 8 | '0'); int num_digits = count_digits<1>(abs_value); return write_int(out, num_digits, prefix, specs, [=](reserve_iterator it) { return format_uint<1, Char>(it, abs_value, num_digits); }); } case presentation_type::oct: { int num_digits = count_digits<3>(abs_value); // Octal prefix '0' is counted as a digit, so only add it if precision // is not greater than the number of digits. if (specs.alt && specs.precision <= num_digits && abs_value != 0) prefix_append(prefix, '0'); return write_int(out, num_digits, prefix, specs, [=](reserve_iterator it) { return format_uint<3, Char>(it, abs_value, num_digits); }); } case presentation_type::chr: return write_char(out, static_cast(abs_value), specs); default: throw_format_error("invalid type specifier"); } return out; } template FMT_CONSTEXPR FMT_NOINLINE auto write_int_noinline( OutputIt out, write_int_arg arg, const basic_format_specs& specs, locale_ref loc) -> OutputIt { return write_int(out, arg, specs, loc); } template ::value && !std::is_same::value && std::is_same>::value)> FMT_CONSTEXPR FMT_INLINE auto write(OutputIt out, T value, const basic_format_specs& specs, locale_ref loc) -> OutputIt { return write_int_noinline(out, make_write_int_arg(value, specs.sign), specs, loc); } // An inlined version of write used in format string compilation. template ::value && !std::is_same::value && !std::is_same>::value)> FMT_CONSTEXPR FMT_INLINE auto write(OutputIt out, T value, const basic_format_specs& specs, locale_ref loc) -> OutputIt { return write_int(out, make_write_int_arg(value, specs.sign), specs, loc); } // An output iterator that counts the number of objects written to it and // discards them. class counting_iterator { private: size_t count_; public: using iterator_category = std::output_iterator_tag; using difference_type = std::ptrdiff_t; using pointer = void; using reference = void; FMT_UNCHECKED_ITERATOR(counting_iterator); struct value_type { template FMT_CONSTEXPR void operator=(const T&) {} }; FMT_CONSTEXPR counting_iterator() : count_(0) {} FMT_CONSTEXPR size_t count() const { return count_; } FMT_CONSTEXPR counting_iterator& operator++() { ++count_; return *this; } FMT_CONSTEXPR counting_iterator operator++(int) { auto it = *this; ++*this; return it; } FMT_CONSTEXPR friend counting_iterator operator+(counting_iterator it, difference_type n) { it.count_ += static_cast(n); return it; } FMT_CONSTEXPR value_type operator*() const { return {}; } }; template FMT_CONSTEXPR auto write(OutputIt out, basic_string_view s, const basic_format_specs& specs) -> OutputIt { auto data = s.data(); auto size = s.size(); if (specs.precision >= 0 && to_unsigned(specs.precision) < size) size = code_point_index(s, to_unsigned(specs.precision)); bool is_debug = specs.type == presentation_type::debug; size_t width = 0; if (specs.width != 0) { if (is_debug) width = write_escaped_string(counting_iterator{}, s).count(); else width = compute_width(basic_string_view(data, size)); } return write_padded(out, specs, size, width, [=](reserve_iterator it) { if (is_debug) return write_escaped_string(it, s); return copy_str(data, data + size, it); }); } template FMT_CONSTEXPR auto write(OutputIt out, basic_string_view> s, const basic_format_specs& specs, locale_ref) -> OutputIt { check_string_type_spec(specs.type); return write(out, s, specs); } template FMT_CONSTEXPR auto write(OutputIt out, const Char* s, const basic_format_specs& specs, locale_ref) -> OutputIt { return check_cstring_type_spec(specs.type) ? write(out, basic_string_view(s), specs, {}) : write_ptr(out, bit_cast(s), &specs); } template ::value && !std::is_same::value && !std::is_same::value)> FMT_CONSTEXPR auto write(OutputIt out, T value) -> OutputIt { auto abs_value = static_cast>(value); bool negative = is_negative(value); // Don't do -abs_value since it trips unsigned-integer-overflow sanitizer. if (negative) abs_value = ~abs_value + 1; int num_digits = count_digits(abs_value); auto size = (negative ? 1 : 0) + static_cast(num_digits); auto it = reserve(out, size); if (auto ptr = to_pointer(it, size)) { if (negative) *ptr++ = static_cast('-'); format_decimal(ptr, abs_value, num_digits); return out; } if (negative) *it++ = static_cast('-'); it = format_decimal(it, abs_value, num_digits).end; return base_iterator(out, it); } template FMT_CONSTEXPR20 auto write_nonfinite(OutputIt out, bool isnan, basic_format_specs specs, const float_specs& fspecs) -> OutputIt { auto str = isnan ? (fspecs.upper ? "NAN" : "nan") : (fspecs.upper ? "INF" : "inf"); constexpr size_t str_size = 3; auto sign = fspecs.sign; auto size = str_size + (sign ? 1 : 0); // Replace '0'-padding with space for non-finite values. const bool is_zero_fill = specs.fill.size() == 1 && *specs.fill.data() == static_cast('0'); if (is_zero_fill) specs.fill[0] = static_cast(' '); return write_padded(out, specs, size, [=](reserve_iterator it) { if (sign) *it++ = detail::sign(sign); return copy_str(str, str + str_size, it); }); } // A decimal floating-point number significand * pow(10, exp). struct big_decimal_fp { const char* significand; int significand_size; int exponent; }; constexpr auto get_significand_size(const big_decimal_fp& f) -> int { return f.significand_size; } template inline auto get_significand_size(const dragonbox::decimal_fp& f) -> int { return count_digits(f.significand); } template constexpr auto write_significand(OutputIt out, const char* significand, int significand_size) -> OutputIt { return copy_str(significand, significand + significand_size, out); } template inline auto write_significand(OutputIt out, UInt significand, int significand_size) -> OutputIt { return format_decimal(out, significand, significand_size).end; } template FMT_CONSTEXPR20 auto write_significand(OutputIt out, T significand, int significand_size, int exponent, const Grouping& grouping) -> OutputIt { if (!grouping.separator()) { out = write_significand(out, significand, significand_size); return detail::fill_n(out, exponent, static_cast('0')); } auto buffer = memory_buffer(); write_significand(appender(buffer), significand, significand_size); detail::fill_n(appender(buffer), exponent, '0'); return grouping.apply(out, string_view(buffer.data(), buffer.size())); } template ::value)> inline auto write_significand(Char* out, UInt significand, int significand_size, int integral_size, Char decimal_point) -> Char* { if (!decimal_point) return format_decimal(out, significand, significand_size).end; out += significand_size + 1; Char* end = out; int floating_size = significand_size - integral_size; for (int i = floating_size / 2; i > 0; --i) { out -= 2; copy2(out, digits2(static_cast(significand % 100))); significand /= 100; } if (floating_size % 2 != 0) { *--out = static_cast('0' + significand % 10); significand /= 10; } *--out = decimal_point; format_decimal(out - integral_size, significand, integral_size); return end; } template >::value)> inline auto write_significand(OutputIt out, UInt significand, int significand_size, int integral_size, Char decimal_point) -> OutputIt { // Buffer is large enough to hold digits (digits10 + 1) and a decimal point. Char buffer[digits10() + 2]; auto end = write_significand(buffer, significand, significand_size, integral_size, decimal_point); return detail::copy_str_noinline(buffer, end, out); } template FMT_CONSTEXPR auto write_significand(OutputIt out, const char* significand, int significand_size, int integral_size, Char decimal_point) -> OutputIt { out = detail::copy_str_noinline(significand, significand + integral_size, out); if (!decimal_point) return out; *out++ = decimal_point; return detail::copy_str_noinline(significand + integral_size, significand + significand_size, out); } template FMT_CONSTEXPR20 auto write_significand(OutputIt out, T significand, int significand_size, int integral_size, Char decimal_point, const Grouping& grouping) -> OutputIt { if (!grouping.separator()) { return write_significand(out, significand, significand_size, integral_size, decimal_point); } auto buffer = basic_memory_buffer(); write_significand(buffer_appender(buffer), significand, significand_size, integral_size, decimal_point); grouping.apply( out, basic_string_view(buffer.data(), to_unsigned(integral_size))); return detail::copy_str_noinline(buffer.data() + integral_size, buffer.end(), out); } template > FMT_CONSTEXPR20 auto do_write_float(OutputIt out, const DecimalFP& f, const basic_format_specs& specs, float_specs fspecs, locale_ref loc) -> OutputIt { auto significand = f.significand; int significand_size = get_significand_size(f); const Char zero = static_cast('0'); auto sign = fspecs.sign; size_t size = to_unsigned(significand_size) + (sign ? 1 : 0); using iterator = reserve_iterator; Char decimal_point = fspecs.locale ? detail::decimal_point(loc) : static_cast('.'); int output_exp = f.exponent + significand_size - 1; auto use_exp_format = [=]() { if (fspecs.format == float_format::exp) return true; if (fspecs.format != float_format::general) return false; // Use the fixed notation if the exponent is in [exp_lower, exp_upper), // e.g. 0.0001 instead of 1e-04. Otherwise use the exponent notation. const int exp_lower = -4, exp_upper = 16; return output_exp < exp_lower || output_exp >= (fspecs.precision > 0 ? fspecs.precision : exp_upper); }; if (use_exp_format()) { int num_zeros = 0; if (fspecs.showpoint) { num_zeros = fspecs.precision - significand_size; if (num_zeros < 0) num_zeros = 0; size += to_unsigned(num_zeros); } else if (significand_size == 1) { decimal_point = Char(); } auto abs_output_exp = output_exp >= 0 ? output_exp : -output_exp; int exp_digits = 2; if (abs_output_exp >= 100) exp_digits = abs_output_exp >= 1000 ? 4 : 3; size += to_unsigned((decimal_point ? 1 : 0) + 2 + exp_digits); char exp_char = fspecs.upper ? 'E' : 'e'; auto write = [=](iterator it) { if (sign) *it++ = detail::sign(sign); // Insert a decimal point after the first digit and add an exponent. it = write_significand(it, significand, significand_size, 1, decimal_point); if (num_zeros > 0) it = detail::fill_n(it, num_zeros, zero); *it++ = static_cast(exp_char); return write_exponent(output_exp, it); }; return specs.width > 0 ? write_padded(out, specs, size, write) : base_iterator(out, write(reserve(out, size))); } int exp = f.exponent + significand_size; if (f.exponent >= 0) { // 1234e5 -> 123400000[.0+] size += to_unsigned(f.exponent); int num_zeros = fspecs.precision - exp; abort_fuzzing_if(num_zeros > 5000); if (fspecs.showpoint) { ++size; if (num_zeros <= 0 && fspecs.format != float_format::fixed) num_zeros = 1; if (num_zeros > 0) size += to_unsigned(num_zeros); } auto grouping = Grouping(loc, fspecs.locale); size += to_unsigned(grouping.count_separators(exp)); return write_padded(out, specs, size, [&](iterator it) { if (sign) *it++ = detail::sign(sign); it = write_significand(it, significand, significand_size, f.exponent, grouping); if (!fspecs.showpoint) return it; *it++ = decimal_point; return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; }); } else if (exp > 0) { // 1234e-2 -> 12.34[0+] int num_zeros = fspecs.showpoint ? fspecs.precision - significand_size : 0; size += 1 + to_unsigned(num_zeros > 0 ? num_zeros : 0); auto grouping = Grouping(loc, fspecs.locale); size += to_unsigned(grouping.count_separators(significand_size)); return write_padded(out, specs, size, [&](iterator it) { if (sign) *it++ = detail::sign(sign); it = write_significand(it, significand, significand_size, exp, decimal_point, grouping); return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; }); } // 1234e-6 -> 0.001234 int num_zeros = -exp; if (significand_size == 0 && fspecs.precision >= 0 && fspecs.precision < num_zeros) { num_zeros = fspecs.precision; } bool pointy = num_zeros != 0 || significand_size != 0 || fspecs.showpoint; size += 1 + (pointy ? 1 : 0) + to_unsigned(num_zeros); return write_padded(out, specs, size, [&](iterator it) { if (sign) *it++ = detail::sign(sign); *it++ = zero; if (!pointy) return it; *it++ = decimal_point; it = detail::fill_n(it, num_zeros, zero); return write_significand(it, significand, significand_size); }); } template class fallback_digit_grouping { public: constexpr fallback_digit_grouping(locale_ref, bool) {} constexpr Char separator() const { return Char(); } constexpr int count_separators(int) const { return 0; } template constexpr Out apply(Out out, basic_string_view) const { return out; } }; template FMT_CONSTEXPR20 auto write_float(OutputIt out, const DecimalFP& f, const basic_format_specs& specs, float_specs fspecs, locale_ref loc) -> OutputIt { if (is_constant_evaluated()) { return do_write_float>(out, f, specs, fspecs, loc); } else { return do_write_float(out, f, specs, fspecs, loc); } } template constexpr bool isnan(T value) { return !(value >= value); // std::isnan doesn't support __float128. } template struct has_isfinite : std::false_type {}; template struct has_isfinite> : std::true_type {}; template ::value&& has_isfinite::value)> FMT_CONSTEXPR20 bool isfinite(T value) { constexpr T inf = T(std::numeric_limits::infinity()); if (is_constant_evaluated()) return !detail::isnan(value) && value != inf && value != -inf; return std::isfinite(value); } template ::value)> FMT_CONSTEXPR bool isfinite(T value) { T inf = T(std::numeric_limits::infinity()); // std::isfinite doesn't support __float128. return !detail::isnan(value) && value != inf && value != -inf; } template ::value)> FMT_INLINE FMT_CONSTEXPR bool signbit(T value) { if (is_constant_evaluated()) { #ifdef __cpp_if_constexpr if constexpr (std::numeric_limits::is_iec559) { auto bits = detail::bit_cast(static_cast(value)); return (bits >> (num_bits() - 1)) != 0; } #endif } return std::signbit(static_cast(value)); } enum class round_direction { unknown, up, down }; // Given the divisor (normally a power of 10), the remainder = v % divisor for // some number v and the error, returns whether v should be rounded up, down, or // whether the rounding direction can't be determined due to error. // error should be less than divisor / 2. FMT_CONSTEXPR inline round_direction get_round_direction(uint64_t divisor, uint64_t remainder, uint64_t error) { FMT_ASSERT(remainder < divisor, ""); // divisor - remainder won't overflow. FMT_ASSERT(error < divisor, ""); // divisor - error won't overflow. FMT_ASSERT(error < divisor - error, ""); // error * 2 won't overflow. // Round down if (remainder + error) * 2 <= divisor. if (remainder <= divisor - remainder && error * 2 <= divisor - remainder * 2) return round_direction::down; // Round up if (remainder - error) * 2 >= divisor. if (remainder >= error && remainder - error >= divisor - (remainder - error)) { return round_direction::up; } return round_direction::unknown; } namespace digits { enum result { more, // Generate more digits. done, // Done generating digits. error // Digit generation cancelled due to an error. }; } struct gen_digits_handler { char* buf; int size; int precision; int exp10; bool fixed; FMT_CONSTEXPR digits::result on_digit(char digit, uint64_t divisor, uint64_t remainder, uint64_t error, bool integral) { FMT_ASSERT(remainder < divisor, ""); buf[size++] = digit; if (!integral && error >= remainder) return digits::error; if (size < precision) return digits::more; if (!integral) { // Check if error * 2 < divisor with overflow prevention. // The check is not needed for the integral part because error = 1 // and divisor > (1 << 32) there. if (error >= divisor || error >= divisor - error) return digits::error; } else { FMT_ASSERT(error == 1 && divisor > 2, ""); } auto dir = get_round_direction(divisor, remainder, error); if (dir != round_direction::up) return dir == round_direction::down ? digits::done : digits::error; ++buf[size - 1]; for (int i = size - 1; i > 0 && buf[i] > '9'; --i) { buf[i] = '0'; ++buf[i - 1]; } if (buf[0] > '9') { buf[0] = '1'; if (fixed) buf[size++] = '0'; else ++exp10; } return digits::done; } }; inline FMT_CONSTEXPR20 void adjust_precision(int& precision, int exp10) { // Adjust fixed precision by exponent because it is relative to decimal // point. if (exp10 > 0 && precision > max_value() - exp10) FMT_THROW(format_error("number is too big")); precision += exp10; } // Generates output using the Grisu digit-gen algorithm. // error: the size of the region (lower, upper) outside of which numbers // definitely do not round to value (Delta in Grisu3). FMT_INLINE FMT_CONSTEXPR20 auto grisu_gen_digits(fp value, uint64_t error, int& exp, gen_digits_handler& handler) -> digits::result { const fp one(1ULL << -value.e, value.e); // The integral part of scaled value (p1 in Grisu) = value / one. It cannot be // zero because it contains a product of two 64-bit numbers with MSB set (due // to normalization) - 1, shifted right by at most 60 bits. auto integral = static_cast(value.f >> -one.e); FMT_ASSERT(integral != 0, ""); FMT_ASSERT(integral == value.f >> -one.e, ""); // The fractional part of scaled value (p2 in Grisu) c = value % one. uint64_t fractional = value.f & (one.f - 1); exp = count_digits(integral); // kappa in Grisu. // Non-fixed formats require at least one digit and no precision adjustment. if (handler.fixed) { adjust_precision(handler.precision, exp + handler.exp10); // Check if precision is satisfied just by leading zeros, e.g. // format("{:.2f}", 0.001) gives "0.00" without generating any digits. if (handler.precision <= 0) { if (handler.precision < 0) return digits::done; // Divide by 10 to prevent overflow. uint64_t divisor = data::power_of_10_64[exp - 1] << -one.e; auto dir = get_round_direction(divisor, value.f / 10, error * 10); if (dir == round_direction::unknown) return digits::error; handler.buf[handler.size++] = dir == round_direction::up ? '1' : '0'; return digits::done; } } // Generate digits for the integral part. This can produce up to 10 digits. do { uint32_t digit = 0; auto divmod_integral = [&](uint32_t divisor) { digit = integral / divisor; integral %= divisor; }; // This optimization by Milo Yip reduces the number of integer divisions by // one per iteration. switch (exp) { case 10: divmod_integral(1000000000); break; case 9: divmod_integral(100000000); break; case 8: divmod_integral(10000000); break; case 7: divmod_integral(1000000); break; case 6: divmod_integral(100000); break; case 5: divmod_integral(10000); break; case 4: divmod_integral(1000); break; case 3: divmod_integral(100); break; case 2: divmod_integral(10); break; case 1: digit = integral; integral = 0; break; default: FMT_ASSERT(false, "invalid number of digits"); } --exp; auto remainder = (static_cast(integral) << -one.e) + fractional; auto result = handler.on_digit(static_cast('0' + digit), data::power_of_10_64[exp] << -one.e, remainder, error, true); if (result != digits::more) return result; } while (exp > 0); // Generate digits for the fractional part. for (;;) { fractional *= 10; error *= 10; char digit = static_cast('0' + (fractional >> -one.e)); fractional &= one.f - 1; --exp; auto result = handler.on_digit(digit, one.f, fractional, error, false); if (result != digits::more) return result; } } class bigint { private: // A bigint is stored as an array of bigits (big digits), with bigit at index // 0 being the least significant one. using bigit = uint32_t; using double_bigit = uint64_t; enum { bigits_capacity = 32 }; basic_memory_buffer bigits_; int exp_; FMT_CONSTEXPR20 bigit operator[](int index) const { return bigits_[to_unsigned(index)]; } FMT_CONSTEXPR20 bigit& operator[](int index) { return bigits_[to_unsigned(index)]; } static constexpr const int bigit_bits = num_bits(); friend struct formatter; FMT_CONSTEXPR20 void subtract_bigits(int index, bigit other, bigit& borrow) { auto result = static_cast((*this)[index]) - other - borrow; (*this)[index] = static_cast(result); borrow = static_cast(result >> (bigit_bits * 2 - 1)); } FMT_CONSTEXPR20 void remove_leading_zeros() { int num_bigits = static_cast(bigits_.size()) - 1; while (num_bigits > 0 && (*this)[num_bigits] == 0) --num_bigits; bigits_.resize(to_unsigned(num_bigits + 1)); } // Computes *this -= other assuming aligned bigints and *this >= other. FMT_CONSTEXPR20 void subtract_aligned(const bigint& other) { FMT_ASSERT(other.exp_ >= exp_, "unaligned bigints"); FMT_ASSERT(compare(*this, other) >= 0, ""); bigit borrow = 0; int i = other.exp_ - exp_; for (size_t j = 0, n = other.bigits_.size(); j != n; ++i, ++j) subtract_bigits(i, other.bigits_[j], borrow); while (borrow > 0) subtract_bigits(i, 0, borrow); remove_leading_zeros(); } FMT_CONSTEXPR20 void multiply(uint32_t value) { const double_bigit wide_value = value; bigit carry = 0; for (size_t i = 0, n = bigits_.size(); i < n; ++i) { double_bigit result = bigits_[i] * wide_value + carry; bigits_[i] = static_cast(result); carry = static_cast(result >> bigit_bits); } if (carry != 0) bigits_.push_back(carry); } template ::value || std::is_same::value)> FMT_CONSTEXPR20 void multiply(UInt value) { using half_uint = conditional_t::value, uint64_t, uint32_t>; const int shift = num_bits() - bigit_bits; const UInt lower = static_cast(value); const UInt upper = value >> num_bits(); UInt carry = 0; for (size_t i = 0, n = bigits_.size(); i < n; ++i) { UInt result = lower * bigits_[i] + static_cast(carry); carry = (upper * bigits_[i] << shift) + (result >> bigit_bits) + (carry >> bigit_bits); bigits_[i] = static_cast(result); } while (carry != 0) { bigits_.push_back(static_cast(carry)); carry >>= bigit_bits; } } template ::value || std::is_same::value)> FMT_CONSTEXPR20 void assign(UInt n) { size_t num_bigits = 0; do { bigits_[num_bigits++] = static_cast(n); n >>= bigit_bits; } while (n != 0); bigits_.resize(num_bigits); exp_ = 0; } public: FMT_CONSTEXPR20 bigint() : exp_(0) {} explicit bigint(uint64_t n) { assign(n); } bigint(const bigint&) = delete; void operator=(const bigint&) = delete; FMT_CONSTEXPR20 void assign(const bigint& other) { auto size = other.bigits_.size(); bigits_.resize(size); auto data = other.bigits_.data(); std::copy(data, data + size, make_checked(bigits_.data(), size)); exp_ = other.exp_; } template FMT_CONSTEXPR20 void operator=(Int n) { FMT_ASSERT(n > 0, ""); assign(uint64_or_128_t(n)); } FMT_CONSTEXPR20 int num_bigits() const { return static_cast(bigits_.size()) + exp_; } FMT_NOINLINE FMT_CONSTEXPR20 bigint& operator<<=(int shift) { FMT_ASSERT(shift >= 0, ""); exp_ += shift / bigit_bits; shift %= bigit_bits; if (shift == 0) return *this; bigit carry = 0; for (size_t i = 0, n = bigits_.size(); i < n; ++i) { bigit c = bigits_[i] >> (bigit_bits - shift); bigits_[i] = (bigits_[i] << shift) + carry; carry = c; } if (carry != 0) bigits_.push_back(carry); return *this; } template FMT_CONSTEXPR20 bigint& operator*=(Int value) { FMT_ASSERT(value > 0, ""); multiply(uint32_or_64_or_128_t(value)); return *this; } friend FMT_CONSTEXPR20 int compare(const bigint& lhs, const bigint& rhs) { int num_lhs_bigits = lhs.num_bigits(), num_rhs_bigits = rhs.num_bigits(); if (num_lhs_bigits != num_rhs_bigits) return num_lhs_bigits > num_rhs_bigits ? 1 : -1; int i = static_cast(lhs.bigits_.size()) - 1; int j = static_cast(rhs.bigits_.size()) - 1; int end = i - j; if (end < 0) end = 0; for (; i >= end; --i, --j) { bigit lhs_bigit = lhs[i], rhs_bigit = rhs[j]; if (lhs_bigit != rhs_bigit) return lhs_bigit > rhs_bigit ? 1 : -1; } if (i != j) return i > j ? 1 : -1; return 0; } // Returns compare(lhs1 + lhs2, rhs). friend FMT_CONSTEXPR20 int add_compare(const bigint& lhs1, const bigint& lhs2, const bigint& rhs) { auto minimum = [](int a, int b) { return a < b ? a : b; }; auto maximum = [](int a, int b) { return a > b ? a : b; }; int max_lhs_bigits = maximum(lhs1.num_bigits(), lhs2.num_bigits()); int num_rhs_bigits = rhs.num_bigits(); if (max_lhs_bigits + 1 < num_rhs_bigits) return -1; if (max_lhs_bigits > num_rhs_bigits) return 1; auto get_bigit = [](const bigint& n, int i) -> bigit { return i >= n.exp_ && i < n.num_bigits() ? n[i - n.exp_] : 0; }; double_bigit borrow = 0; int min_exp = minimum(minimum(lhs1.exp_, lhs2.exp_), rhs.exp_); for (int i = num_rhs_bigits - 1; i >= min_exp; --i) { double_bigit sum = static_cast(get_bigit(lhs1, i)) + get_bigit(lhs2, i); bigit rhs_bigit = get_bigit(rhs, i); if (sum > rhs_bigit + borrow) return 1; borrow = rhs_bigit + borrow - sum; if (borrow > 1) return -1; borrow <<= bigit_bits; } return borrow != 0 ? -1 : 0; } // Assigns pow(10, exp) to this bigint. FMT_CONSTEXPR20 void assign_pow10(int exp) { FMT_ASSERT(exp >= 0, ""); if (exp == 0) return *this = 1; // Find the top bit. int bitmask = 1; while (exp >= bitmask) bitmask <<= 1; bitmask >>= 1; // pow(10, exp) = pow(5, exp) * pow(2, exp). First compute pow(5, exp) by // repeated squaring and multiplication. *this = 5; bitmask >>= 1; while (bitmask != 0) { square(); if ((exp & bitmask) != 0) *this *= 5; bitmask >>= 1; } *this <<= exp; // Multiply by pow(2, exp) by shifting. } FMT_CONSTEXPR20 void square() { int num_bigits = static_cast(bigits_.size()); int num_result_bigits = 2 * num_bigits; basic_memory_buffer n(std::move(bigits_)); bigits_.resize(to_unsigned(num_result_bigits)); auto sum = uint128_t(); for (int bigit_index = 0; bigit_index < num_bigits; ++bigit_index) { // Compute bigit at position bigit_index of the result by adding // cross-product terms n[i] * n[j] such that i + j == bigit_index. for (int i = 0, j = bigit_index; j >= 0; ++i, --j) { // Most terms are multiplied twice which can be optimized in the future. sum += static_cast(n[i]) * n[j]; } (*this)[bigit_index] = static_cast(sum); sum >>= num_bits(); // Compute the carry. } // Do the same for the top half. for (int bigit_index = num_bigits; bigit_index < num_result_bigits; ++bigit_index) { for (int j = num_bigits - 1, i = bigit_index - j; i < num_bigits;) sum += static_cast(n[i++]) * n[j--]; (*this)[bigit_index] = static_cast(sum); sum >>= num_bits(); } remove_leading_zeros(); exp_ *= 2; } // If this bigint has a bigger exponent than other, adds trailing zero to make // exponents equal. This simplifies some operations such as subtraction. FMT_CONSTEXPR20 void align(const bigint& other) { int exp_difference = exp_ - other.exp_; if (exp_difference <= 0) return; int num_bigits = static_cast(bigits_.size()); bigits_.resize(to_unsigned(num_bigits + exp_difference)); for (int i = num_bigits - 1, j = i + exp_difference; i >= 0; --i, --j) bigits_[j] = bigits_[i]; std::uninitialized_fill_n(bigits_.data(), exp_difference, 0); exp_ -= exp_difference; } // Divides this bignum by divisor, assigning the remainder to this and // returning the quotient. FMT_CONSTEXPR20 int divmod_assign(const bigint& divisor) { FMT_ASSERT(this != &divisor, ""); if (compare(*this, divisor) < 0) return 0; FMT_ASSERT(divisor.bigits_[divisor.bigits_.size() - 1u] != 0, ""); align(divisor); int quotient = 0; do { subtract_aligned(divisor); ++quotient; } while (compare(*this, divisor) >= 0); return quotient; } }; // format_dragon flags. enum dragon { predecessor_closer = 1, fixup = 2, // Run fixup to correct exp10 which can be off by one. fixed = 4, }; // Formats a floating-point number using a variation of the Fixed-Precision // Positive Floating-Point Printout ((FPP)^2) algorithm by Steele & White: // https://fmt.dev/papers/p372-steele.pdf. FMT_CONSTEXPR20 inline void format_dragon(basic_fp value, unsigned flags, int num_digits, buffer& buf, int& exp10) { bigint numerator; // 2 * R in (FPP)^2. bigint denominator; // 2 * S in (FPP)^2. // lower and upper are differences between value and corresponding boundaries. bigint lower; // (M^- in (FPP)^2). bigint upper_store; // upper's value if different from lower. bigint* upper = nullptr; // (M^+ in (FPP)^2). // Shift numerator and denominator by an extra bit or two (if lower boundary // is closer) to make lower and upper integers. This eliminates multiplication // by 2 during later computations. bool is_predecessor_closer = (flags & dragon::predecessor_closer) != 0; int shift = is_predecessor_closer ? 2 : 1; if (value.e >= 0) { numerator = value.f; numerator <<= value.e + shift; lower = 1; lower <<= value.e; if (is_predecessor_closer) { upper_store = 1; upper_store <<= value.e + 1; upper = &upper_store; } denominator.assign_pow10(exp10); denominator <<= shift; } else if (exp10 < 0) { numerator.assign_pow10(-exp10); lower.assign(numerator); if (is_predecessor_closer) { upper_store.assign(numerator); upper_store <<= 1; upper = &upper_store; } numerator *= value.f; numerator <<= shift; denominator = 1; denominator <<= shift - value.e; } else { numerator = value.f; numerator <<= shift; denominator.assign_pow10(exp10); denominator <<= shift - value.e; lower = 1; if (is_predecessor_closer) { upper_store = 1ULL << 1; upper = &upper_store; } } int even = static_cast((value.f & 1) == 0); if (!upper) upper = &lower; if ((flags & dragon::fixup) != 0) { if (add_compare(numerator, *upper, denominator) + even <= 0) { --exp10; numerator *= 10; if (num_digits < 0) { lower *= 10; if (upper != &lower) *upper *= 10; } } if ((flags & dragon::fixed) != 0) adjust_precision(num_digits, exp10 + 1); } // Invariant: value == (numerator / denominator) * pow(10, exp10). if (num_digits < 0) { // Generate the shortest representation. num_digits = 0; char* data = buf.data(); for (;;) { int digit = numerator.divmod_assign(denominator); bool low = compare(numerator, lower) - even < 0; // numerator <[=] lower. // numerator + upper >[=] pow10: bool high = add_compare(numerator, *upper, denominator) + even > 0; data[num_digits++] = static_cast('0' + digit); if (low || high) { if (!low) { ++data[num_digits - 1]; } else if (high) { int result = add_compare(numerator, numerator, denominator); // Round half to even. if (result > 0 || (result == 0 && (digit % 2) != 0)) ++data[num_digits - 1]; } buf.try_resize(to_unsigned(num_digits)); exp10 -= num_digits - 1; return; } numerator *= 10; lower *= 10; if (upper != &lower) *upper *= 10; } } // Generate the given number of digits. exp10 -= num_digits - 1; if (num_digits == 0) { denominator *= 10; auto digit = add_compare(numerator, numerator, denominator) > 0 ? '1' : '0'; buf.push_back(digit); return; } buf.try_resize(to_unsigned(num_digits)); for (int i = 0; i < num_digits - 1; ++i) { int digit = numerator.divmod_assign(denominator); buf[i] = static_cast('0' + digit); numerator *= 10; } int digit = numerator.divmod_assign(denominator); auto result = add_compare(numerator, numerator, denominator); if (result > 0 || (result == 0 && (digit % 2) != 0)) { if (digit == 9) { const auto overflow = '0' + 10; buf[num_digits - 1] = overflow; // Propagate the carry. for (int i = num_digits - 1; i > 0 && buf[i] == overflow; --i) { buf[i] = '0'; ++buf[i - 1]; } if (buf[0] == overflow) { buf[0] = '1'; ++exp10; } return; } ++digit; } buf[num_digits - 1] = static_cast('0' + digit); } template FMT_CONSTEXPR20 auto format_float(Float value, int precision, float_specs specs, buffer& buf) -> int { // float is passed as double to reduce the number of instantiations. static_assert(!std::is_same::value, ""); FMT_ASSERT(value >= 0, "value is negative"); auto converted_value = convert_float(value); const bool fixed = specs.format == float_format::fixed; if (value <= 0) { // <= instead of == to silence a warning. if (precision <= 0 || !fixed) { buf.push_back('0'); return 0; } buf.try_resize(to_unsigned(precision)); fill_n(buf.data(), precision, '0'); return -precision; } int exp = 0; bool use_dragon = true; unsigned dragon_flags = 0; if (!is_fast_float()) { const auto inv_log2_10 = 0.3010299956639812; // 1 / log2(10) using info = dragonbox::float_info; const auto f = basic_fp(converted_value); // Compute exp, an approximate power of 10, such that // 10^(exp - 1) <= value < 10^exp or 10^exp <= value < 10^(exp + 1). // This is based on log10(value) == log2(value) / log2(10) and approximation // of log2(value) by e + num_fraction_bits idea from double-conversion. exp = static_cast( std::ceil((f.e + count_digits<1>(f.f) - 1) * inv_log2_10 - 1e-10)); dragon_flags = dragon::fixup; } else if (!is_constant_evaluated() && precision < 0) { // Use Dragonbox for the shortest format. if (specs.binary32) { auto dec = dragonbox::to_decimal(static_cast(value)); write(buffer_appender(buf), dec.significand); return dec.exponent; } auto dec = dragonbox::to_decimal(static_cast(value)); write(buffer_appender(buf), dec.significand); return dec.exponent; } else { // Use Grisu + Dragon4 for the given precision: // https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf. const int min_exp = -60; // alpha in Grisu. int cached_exp10 = 0; // K in Grisu. fp normalized = normalize(fp(converted_value)); const auto cached_pow = get_cached_power( min_exp - (normalized.e + fp::num_significand_bits), cached_exp10); normalized = normalized * cached_pow; gen_digits_handler handler{buf.data(), 0, precision, -cached_exp10, fixed}; if (grisu_gen_digits(normalized, 1, exp, handler) != digits::error && !is_constant_evaluated()) { exp += handler.exp10; buf.try_resize(to_unsigned(handler.size)); use_dragon = false; } else { exp += handler.size - cached_exp10 - 1; precision = handler.precision; } } if (use_dragon) { auto f = basic_fp(); bool is_predecessor_closer = specs.binary32 ? f.assign(static_cast(value)) : f.assign(converted_value); if (is_predecessor_closer) dragon_flags |= dragon::predecessor_closer; if (fixed) dragon_flags |= dragon::fixed; // Limit precision to the maximum possible number of significant digits in // an IEEE754 double because we don't need to generate zeros. const int max_double_digits = 767; if (precision > max_double_digits) precision = max_double_digits; format_dragon(f, dragon_flags, precision, buf, exp); } if (!fixed && !specs.showpoint) { // Remove trailing zeros. auto num_digits = buf.size(); while (num_digits > 0 && buf[num_digits - 1] == '0') { --num_digits; ++exp; } buf.try_resize(num_digits); } return exp; } template ::value)> FMT_CONSTEXPR20 auto write(OutputIt out, T value, basic_format_specs specs, locale_ref loc = {}) -> OutputIt { if (const_check(!is_supported_floating_point(value))) return out; float_specs fspecs = parse_float_type_spec(specs); fspecs.sign = specs.sign; if (detail::signbit(value)) { // value < 0 is false for NaN so use signbit. fspecs.sign = sign::minus; value = -value; } else if (fspecs.sign == sign::minus) { fspecs.sign = sign::none; } if (!detail::isfinite(value)) return write_nonfinite(out, detail::isnan(value), specs, fspecs); if (specs.align == align::numeric && fspecs.sign) { auto it = reserve(out, 1); *it++ = detail::sign(fspecs.sign); out = base_iterator(out, it); fspecs.sign = sign::none; if (specs.width != 0) --specs.width; } memory_buffer buffer; if (fspecs.format == float_format::hex) { if (fspecs.sign) buffer.push_back(detail::sign(fspecs.sign)); snprintf_float(convert_float(value), specs.precision, fspecs, buffer); return write_bytes(out, {buffer.data(), buffer.size()}, specs); } int precision = specs.precision >= 0 || specs.type == presentation_type::none ? specs.precision : 6; if (fspecs.format == float_format::exp) { if (precision == max_value()) throw_format_error("number is too big"); else ++precision; } else if (fspecs.format != float_format::fixed && precision == 0) { precision = 1; } if (const_check(std::is_same())) fspecs.binary32 = true; int exp = format_float(convert_float(value), precision, fspecs, buffer); fspecs.precision = precision; auto f = big_decimal_fp{buffer.data(), static_cast(buffer.size()), exp}; return write_float(out, f, specs, fspecs, loc); } template ::value)> FMT_CONSTEXPR20 auto write(OutputIt out, T value) -> OutputIt { if (is_constant_evaluated()) return write(out, value, basic_format_specs()); if (const_check(!is_supported_floating_point(value))) return out; auto fspecs = float_specs(); if (detail::signbit(value)) { fspecs.sign = sign::minus; value = -value; } constexpr auto specs = basic_format_specs(); using floaty = conditional_t::value, double, T>; using uint = typename dragonbox::float_info::carrier_uint; uint mask = exponent_mask(); if ((bit_cast(value) & mask) == mask) return write_nonfinite(out, std::isnan(value), specs, fspecs); auto dec = dragonbox::to_decimal(static_cast(value)); return write_float(out, dec, specs, fspecs, {}); } template ::value && !is_fast_float::value)> inline auto write(OutputIt out, T value) -> OutputIt { return write(out, value, basic_format_specs()); } template auto write(OutputIt out, monostate, basic_format_specs = {}, locale_ref = {}) -> OutputIt { FMT_ASSERT(false, ""); return out; } template FMT_CONSTEXPR auto write(OutputIt out, basic_string_view value) -> OutputIt { auto it = reserve(out, value.size()); it = copy_str_noinline(value.begin(), value.end(), it); return base_iterator(out, it); } template ::value)> constexpr auto write(OutputIt out, const T& value) -> OutputIt { return write(out, to_string_view(value)); } // FMT_ENABLE_IF() condition separated to workaround an MSVC bug. template < typename Char, typename OutputIt, typename T, bool check = std::is_enum::value && !std::is_same::value && mapped_type_constant>::value != type::custom_type, FMT_ENABLE_IF(check)> FMT_CONSTEXPR auto write(OutputIt out, T value) -> OutputIt { return write(out, static_cast>(value)); } template ::value)> FMT_CONSTEXPR auto write(OutputIt out, T value, const basic_format_specs& specs = {}, locale_ref = {}) -> OutputIt { return specs.type != presentation_type::none && specs.type != presentation_type::string ? write(out, value ? 1 : 0, specs, {}) : write_bytes(out, value ? "true" : "false", specs); } template FMT_CONSTEXPR auto write(OutputIt out, Char value) -> OutputIt { auto it = reserve(out, 1); *it++ = value; return base_iterator(out, it); } template FMT_CONSTEXPR_CHAR_TRAITS auto write(OutputIt out, const Char* value) -> OutputIt { if (!value) { throw_format_error("string pointer is null"); } else { out = write(out, basic_string_view(value)); } return out; } template ::value)> auto write(OutputIt out, const T* value, const basic_format_specs& specs = {}, locale_ref = {}) -> OutputIt { check_pointer_type_spec(specs.type, error_handler()); return write_ptr(out, bit_cast(value), &specs); } // A write overload that handles implicit conversions. template > FMT_CONSTEXPR auto write(OutputIt out, const T& value) -> enable_if_t< std::is_class::value && !is_string::value && !is_floating_point::value && !std::is_same::value && !std::is_same().map(value))>::value, OutputIt> { return write(out, arg_mapper().map(value)); } template > FMT_CONSTEXPR auto write(OutputIt out, const T& value) -> enable_if_t::value == type::custom_type, OutputIt> { using formatter_type = conditional_t::value, typename Context::template formatter_type, fallback_formatter>; auto ctx = Context(out, {}, {}); return formatter_type().format(value, ctx); } // An argument visitor that formats the argument and writes it via the output // iterator. It's a class and not a generic lambda for compatibility with C++11. template struct default_arg_formatter { using iterator = buffer_appender; using context = buffer_context; iterator out; basic_format_args args; locale_ref loc; template auto operator()(T value) -> iterator { return write(out, value); } auto operator()(typename basic_format_arg::handle h) -> iterator { basic_format_parse_context parse_ctx({}); context format_ctx(out, args, loc); h.format(parse_ctx, format_ctx); return format_ctx.out(); } }; template struct arg_formatter { using iterator = buffer_appender; using context = buffer_context; iterator out; const basic_format_specs& specs; locale_ref locale; template FMT_CONSTEXPR FMT_INLINE auto operator()(T value) -> iterator { return detail::write(out, value, specs, locale); } auto operator()(typename basic_format_arg::handle) -> iterator { // User-defined types are handled separately because they require access // to the parse context. return out; } }; template struct custom_formatter { basic_format_parse_context& parse_ctx; buffer_context& ctx; void operator()( typename basic_format_arg>::handle h) const { h.format(parse_ctx, ctx); } template void operator()(T) const {} }; template using is_integer = bool_constant::value && !std::is_same::value && !std::is_same::value && !std::is_same::value>; template class width_checker { public: explicit FMT_CONSTEXPR width_checker(ErrorHandler& eh) : handler_(eh) {} template ::value)> FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { if (is_negative(value)) handler_.on_error("negative width"); return static_cast(value); } template ::value)> FMT_CONSTEXPR auto operator()(T) -> unsigned long long { handler_.on_error("width is not integer"); return 0; } private: ErrorHandler& handler_; }; template class precision_checker { public: explicit FMT_CONSTEXPR precision_checker(ErrorHandler& eh) : handler_(eh) {} template ::value)> FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { if (is_negative(value)) handler_.on_error("negative precision"); return static_cast(value); } template ::value)> FMT_CONSTEXPR auto operator()(T) -> unsigned long long { handler_.on_error("precision is not integer"); return 0; } private: ErrorHandler& handler_; }; template