pax_global_header00006660000000000000000000000064132431706020014510gustar00rootroot0000000000000052 comment=4769f3932c0b93d9e5f929ac8589cef2e4a7d5d6 skiboot-5.10-rc4/000077500000000000000000000000001324317060200135755ustar00rootroot00000000000000skiboot-5.10-rc4/.gitignore000066400000000000000000000040021324317060200155610ustar00rootroot00000000000000*~ *.o *.d *.gcda *.gcno *.rej *.swp *.pyc *.patch .version skiboot-nosection.elf skiboot*.elf skiboot.lds skiboot.lid skiboot.lid.xz skiboot*.map skiboot*.stb skiboot.info coverage-report/ extract-gcov .tags* TAGS tags GTAGS GRTAGS GPATH cscope.out gmon.out compile_commands.json asm/asm-offsets.s include/asm-offsets.h version.c ccan/*/test/*-gcov ccan/*/test/run ccan/build_assert/test/run-BUILD_ASSERT_OR_ZERO ccan/list/test/run-check-corrupt ccan/list/test/run-list_del_from-assert ccan/list/test/run-single-eval ccan/list/test/run-with-debug ccan/short_types/test/run-endian ccan/str/test/run-STR_MAX_CHARS core/test/run-console-log core/test/run-time-utils core/test/run-timebase core/test/run-timer core/test/run-console-log-buf-overrun core/test/run-console-log-pr_fmt core/test/run-mem_range_is_reserved core/test/run-mem_region_next core/test/run-mem_region_reservations ccan/*/test/gmon.out core/test/run-device core/test/run-malloc core/test/run-malloc-speed core/test/run-mem_region core/test/run-mem_region_init core/test/run-mem_region_release_unused core/test/run-mem_region_release_unused_noalloc core/test/run-msg core/test/run-nvram-format core/test/run-pel core/test/run-pool core/test/run-trace core/test/run-api-test core/test/run-bitmap core/test/run-buddy core/test/run-flash-subpartition core/test/*-gcov debian-jessie-vmlinux debian-jessie-initrd.gz external/dump_trace external/mambo/skiboot-boot_test.dump external/mambo/skiboot-hello_world.dump external/mambo/mambo-socket-proxy external/memboot/memboot hdata/test/hdata_to_dt hdata/test/hdata_to_dt-gcov hw/test/phys-map-test hw/ipmi/test/run-fru hw/ipmi/test/*-gcov libc/test/run-time libc/test/run-time-gcov libflash/test/test-blocklevel libflash/test/test-flash libflash/test/test-ecc libflash/test/test-mbox libflash/test/test-flash-gcov libstb/create-container libstb/test/run-stb-container libstb/test/print-stb-container libstb/test/*-gcov test/hello_world/hello_kernel/hello_kernel test/sreset_world/sreset_kernel/sreset_kernel zImage.epapr vgcore.* doc/_build skiboot-5.10-rc4/.travis.yml000066400000000000000000000050511324317060200157070ustar00rootroot00000000000000language: c cache: ccache env: matrix: - RUN_ON_CONTAINER=ubuntu-12.04 - RUN_ON_CONTAINER=ubuntu-16.04 - RUN_ON_CONTAINER=ubuntu-latest - RUN_ON_CONTAINER=centos6 - RUN_ON_CONTAINER=centos7 - RUN_ON_CONTAINER=fedora24 - RUN_ON_CONTAINER=fedora25 - RUN_ON_CONTAINER=fedora26 - RUN_ON_CONTAINER=fedora27 - RUN_ON_CONTAINER=fedora-rawhide - RUN_ON_CONTAINER=debian-stretch - RUN_ON_CONTAINER=debian-unstable global: # The next declaration is the encrypted COVERITY_SCAN_TOKEN, created # via the "travis encrypt" command using the project repo's public key secure: "MpNEGFa1VrF/vsQq24n5UgfRbz1wVC6B8mubFnyK4gX0IuQ9xhWuTzMLUQF9UJxe5jnC2DTmVUvYTYN/hggw+PpYwbOOAE0QGR5pmPHA4PSRmc5pxt1q18/sv7EPFw66GFyWJq94nWjpigyKQ8KGtA67j1xFqrDoS43OA76WZgo=" matrix: allow_failures: - env: RUN_ON_CONTAINER=fedora-rawhide - env: RUN_ON_CONTAINER=debian-unstable sudo: required services: docker dist: trusty before_install: - echo -n | openssl s_client -connect scan.coverity.com:443 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | sudo tee -a /etc/ssl/certs/ca-certificates.crt script: - if [ ${COVERITY_SCAN_BRANCH} != 1 ]; then docker build --pull -t ${RUN_ON_CONTAINER} -f opal-ci/Dockerfile-${RUN_ON_CONTAINER} . && docker run --volume $HOME/.ccache:/root/.ccache --rm -t $RUN_ON_CONTAINER; fi - if [ ${COVERITY_SCAN_BRANCH} == 1 ]; then sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && sudo apt-get update -qq && sudo apt-get install -y gcc-4.8 libstdc++6 valgrind expect xterm && sudo apt-get install -y gcc-arm-linux-gnueabi gcc-powerpc64le-linux-gnu gcc && sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 50 && wget https://www.kernel.org/pub/tools/crosstool/files/bin/x86_64/4.8.0/x86_64-gcc-4.8.0-nolibc_powerpc64-linux.tar.xz && sudo mkdir /opt/cross && sudo tar -C /opt/cross -xvf x86_64-gcc-4.8.0-nolibc_powerpc64-linux.tar.xz; fi addons: coverity_scan: project: name: "open-power/skiboot" description: "Build submitted via Travis CI" notification_email: stewart@linux.vnet.ibm.com build_command_prepend: "cov-configure --comptype gcc --compiler powerpc64-linux-gcc --template; cov-configure --comptype gcc --compiler powerpc64le-linux-gnu-gcc; cov-configure --comptype gcc --compiler arm-linux-gnueabi-gcc; cov-configure --comptype gcc --compiler x86_64-linux-gnu-gcc" build_command: "make -j4 all check" branch_pattern: coverity_scan skiboot-5.10-rc4/CONTRIBUTING.md000066400000000000000000000056431324317060200160360ustar00rootroot00000000000000Contributing to skiboot ======================= skiboot is OPAL (OpenPOWER Abstraction Layer) boot and runtime firmware for POWER. If you haven't already, join us on IRC (#openpower on Freenode) and on the mailing list ( skiboot@lists.ozlabs.org - subscribe by going to https://lists.ozlabs.org/listinfo/skiboot ) While we do use GitHub Issues, patches are accepted via the mailing list. We use GitHub Issues and Pull Requests for tracking contributions. We expect participants to adhere to the GitHub Community Guidelines (found at https://help.github.com/articles/github-community-guidelines/ ). If you are unable or unwilling to use GitHub, we can accept contributions via the mailing list. All contributions should have a Developer Certificate of Origin (see below). Development Environment ----------------------- A host GCC of at least 4.9 is recommended (all modern Linux distributions provide this). You can build on x86-64, ppc64 or ppc64le, you just need a powerpc64 (BE) cross compiler. The powerpc64le cross compilers packaged in Linux distributions can build BE code, so they are fine. Developer Certificate of Origin ------------------------------- Contributions to this project should conform to the `Developer Certificate of Origin` as defined at http://elinux.org/Developer_Certificate_Of_Origin. Commits to this project need to contain the following line to indicate the submitter accepts the DCO: ``` Signed-off-by: Your Name ``` By contributing in this way, you agree to the terms as follows: ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` skiboot-5.10-rc4/LICENCE000066400000000000000000000261361324317060200145720ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. skiboot-5.10-rc4/Makefile000066400000000000000000000023141324317060200152350ustar00rootroot00000000000000# If you want to build in another directory copy this file there and # fill in the following values # # Prefix of cross toolchain, if anything # Example: CROSS= powerpc64-unknown-linux-gnu- # ARCH = $(shell uname -m) ifdef CROSS_COMPILE CROSS ?= $(CROSS_COMPILE) endif ifeq ("$(ARCH)", "ppc64") CROSS ?= else CROSS ?= powerpc64-linux- endif # # Main debug switch # DEBUG ?= 0 # Run tests under valgrind? USE_VALGRIND ?= 1 # # Optional location of embedded linux kernel file # This can be a raw vmlinux, stripped vmlinux or # zImage.epapr # KERNEL ?= # # Optional build with advanced stack checking # STACK_CHECK ?= $(DEBUG) # # Experimental (unsupported) build options # # Little-endian does not yet build. Include it here to set ELF ABI. LITTLE_ENDIAN ?= 0 # ELF v2 ABI is more efficient and compact ELF_ABI_v2 ?= $(LITTLE_ENDIAN) # Discard unreferenced code and data at link-time DEAD_CODE_ELIMINATION ?= 0 # # Where is the source directory, must be a full path (no ~) # Example: SRC= /home/me/skiboot # SRC=$(CURDIR) # # Where to get information about this machine (subdir name) # DEVSRC=hdata # # default config file, see include config_*.h for more specifics # CONFIG := config.h include $(SRC)/Makefile.main skiboot-5.10-rc4/Makefile.main000066400000000000000000000223671324317060200161720ustar00rootroot00000000000000# -*-Makefile-*- # # This is the main Makefile # Target tools CC=$(CROSS)gcc$(POSTFIX) LD=$(CROSS)ld$(POSTFIX) AS=$(CROSS)as AR=$(CROSS)ar NM=$(CROSS)nm OBJCOPY=$(CROSS)objcopy OBJDUMP=$(CROSS)objdump SIZE=$(CROSS)size LD_TEXT=0x0 NM += --synthetic try = $(shell set -e; if ($(1)) >/dev/null 2>&1; \ then echo "$(2)"; \ else echo "$(3)"; fi ) try-cflag = $(call try,$(1) $(2) -x c -c /dev/null -o /dev/null,$(2)) test_cflag = $(call try,$(1) $(2) -x c -c /dev/null -o /dev/null,1,0) # Base warnings CWARNS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -Werror-implicit-function-declaration -Wdeclaration-after-statement \ -Wno-pointer-sign -Wextra -Wno-sign-compare \ -Wmissing-prototypes -Wmissing-declarations \ -Wwrite-strings -Wcast-align \ -Winit-self \ -Wframe-larger-than=1024 \ -Werror # Host tools and options HOSTCC=gcc HOSTEND=$(shell uname -m | sed -e 's/^i.*86$$/LITTLE/' -e 's/^x86.*/LITTLE/' -e 's/^ppc64le/LITTLE/' -e 's/^ppc.*/BIG/') HOSTCFLAGS=-O1 $(CWARNS) -DHAVE_$(HOSTEND)_ENDIAN -MMD HOSTCFLAGS += $(call try-cflag,$(HOSTCC),-std=gnu11) HOSTCFLAGS += $(call try-cflag,$(HOSTCC),-m64) HOSTCFLAGS += $(call try-cflag,$(HOSTCC),-Wjump-misses-init) \ $(call try-cflag,$(HOSTCC),-Wsuggest-attribute=const) \ $(call try-cflag,$(HOSTCC),-Wsuggest-attribute=noreturn) HOSTCFLAGS += -DDEBUG -DCCAN_LIST_DEBUG HOSTGCOVCFLAGS = -fprofile-arcs -ftest-coverage -lgcov -O0 -g -pg VALGRIND := valgrind -q --show-reachable=yes --error-exitcode=99 # Target options OPTS=-Os DBG=-g CPPFLAGS := -I$(SRC)/include -Iinclude -MMD -include $(SRC)/include/config.h CPPFLAGS += -I$(SRC)/libfdt -I$(SRC)/libflash -I$(SRC)/libxz -I$(SRC)/libc/include -I$(SRC) CPPFLAGS += -I$(SRC)/libpore CPPFLAGS += -D__SKIBOOT__ -nostdinc CPPFLAGS += -isystem $(shell $(CC) -print-file-name=include) CPPFLAGS += -DBITS_PER_LONG=64 -DHAVE_BIG_ENDIAN # We might want to remove our copy of stdint.h # but that means uint64_t becomes an ulong instead of an ullong # causing all our printf's to warn CPPFLAGS += -ffreestanding ifeq ($(DEBUG),1) CPPFLAGS += -DDEBUG -DCCAN_LIST_DEBUG endif CFLAGS := -fno-strict-aliasing -pie -fpie -fno-pic -mbig-endian -m64 CFLAGS += -mcpu=power7 CFLAGS += -Wl,--oformat,elf64-powerpc CFLAGS += -ffixed-r13 CFLAGS += $(call try-cflag,$(CC),-std=gnu11) ifeq ($(ELF_ABI_v2),1) CFLAGS += $(call try-cflag,$(CC),-mabi=elfv2) else CFLAGS += $(call try-cflag,$(CC),-mabi=elfv1) endif ifeq ($(DEAD_CODE_ELIMINATION),1) CFLAGS += -ffunction-sections -fdata-sections endif ifeq ($(SKIBOOT_GCOV),1) CFLAGS += -fprofile-arcs -ftest-coverage -DSKIBOOT_GCOV=1 endif ifeq ($(USE_VALGRIND),1) CFLAGS += -DUSE_VALGRIND=1 else VALGRIND := endif # Check if the new parametrized stack protector option is supported # by gcc, otherwise disable stack protector STACK_PROT_CFLAGS := -mstack-protector-guard=tls -mstack-protector-guard-reg=r13 STACK_PROT_CFLAGS += -mstack-protector-guard-offset=0 HAS_STACK_PROT := $(call test_cflag,$(CC),$(STACK_PROT_CFLAGS)) ifeq ($(STACK_CHECK),1) STACK_PROT_CFLAGS += -fstack-protector-all CPPFLAGS += -DSTACK_CHECK_ENABLED CFLAGS += -pg else STACK_PROT_CFLAGS += -fstack-protector STACK_PROT_CFLAGS += $(call try-cflag,$(CC),-fstack-protector-strong) endif ifeq ($(HAS_STACK_PROT),1) CPPFLAGS += -DHAS_STACK_PROT CFLAGS += $(STACK_PROT_CFLAGS) else CFLAGS += -fno-stack-protector endif CFLAGS += $(call try-cflag,$(CC),-Wjump-misses-init) \ $(call try-cflag,$(CC),-Wsuggest-attribute=const) \ $(call try-cflag,$(CC),-Wsuggest-attribute=noreturn) \ $(call try-cflag,$(CC),-Wstack-usage=1024) CFLAGS += $(CWARNS) $(OPTS) $(DBG) LDFLAGS := -m64 -static -nostdlib -pie LDFLAGS += -Wl,-pie LDFLAGS += -Wl,-Ttext-segment,$(LD_TEXT) -Wl,-N -Wl,--build-id=none LDFLAGS += -Wl,--no-multi-toc LDFLAGS += -mcpu=power7 -mbig-endian -Wl,--oformat,elf64-powerpc LDRFLAGS=-melf64ppc # Debug stuff #LDFLAGS += -Wl,-v -Wl,-Map,foomap ifeq ($(DEAD_CODE_ELIMINATION),1) LDFLAGS += -Wl,--gc-sections endif AFLAGS := -D__ASSEMBLY__ -mbig-endian -m64 ifeq ($(ELF_ABI_v2),1) AFLAGS += $(call try-cflag,$(CC),-mabi=elfv2) else AFLAGS += $(call try-cflag,$(CC),-mabi=elfv1) endif # Special tool flags: # Do not use the floating point unit CFLAGS += -msoft-float # Do not use string instructions CFLAGS += -mno-string # do not use load/store multiple word instrcutions CFLAGS += -mno-multiple # do not use any automatic vector foo # While it would be safe during boot, we don't save/restore across OPAL calls CFLAGS += $(call try-cflag,$(CC),-mno-vsx) \ $(call try-cflag,$(CC),-mno-direct-move) \ $(call try-cflag,$(CC),-mno-altivec) # Do not use load/store update. You REALLY do not want to use this! # The async safety of the ABI stack depends on the atomicity # of update on store. #CFLAGS += -mno-update ifneq ($(KERNEL),) CPPFLAGS += -DBUILTIN_KERNEL="\"$(KERNEL)\"" endif CHECK = sparse CHECKFLAGS := $(CF) CHECK_CFLAGS_SKIP = -std=gnu11 .SECONDARY: vpath %.c $(SRC) vpath %.C $(SRC) vpath %.S $(SRC) default: all include/asm-offsets.h: asm/asm-offsets.s @mkdir -p include $(call Q,GN, $(SRC)/make_offsets.sh $< >$@, $@) TARGET = skiboot include $(SRC)/asm/Makefile.inc include $(SRC)/core/Makefile.inc include $(SRC)/hw/Makefile.inc include $(SRC)/platforms/Makefile.inc include $(SRC)/libfdt/Makefile.inc include $(SRC)/libflash/Makefile.inc include $(SRC)/libxz/Makefile.inc include $(SRC)/libpore/Makefile.inc include $(SRC)/libc/Makefile.inc include $(SRC)/ccan/Makefile.inc include $(SRC)/$(DEVSRC)/Makefile.inc include $(SRC)/libstb/Makefile.inc # hack for travis-ci and coverity gard: (cd external/gard; CROSS_COMPILE="" CFLAGS="$(HOSTCFLAGS) $(HOSTGCOVCFLAGS)" make) pflash: (cd external/pflash; CROSS_COMPILE="" CFLAGS="$(HOSTCFLAGS) $(HOSTGCOVCFLAGS)" make) pflash-coverity: (cd external/pflash; ./build-all-arch.sh) all: $(SUBDIRS) $(TARGET).lid $(TARGET).lid.xz $(TARGET).map extract-gcov all: $(TARGET).lid.stb $(TARGET).lid.xz.stb OBJS := $(ASM) $(CORE) $(HW) $(PLATFORMS) $(LIBFDT) $(LIBXZ) $(LIBFLASH) $(LIBSTB) OBJS += $(LIBC) $(CCAN) $(DEVSRC_OBJ) $(LIBPORE) OBJS_NO_VER = $(OBJS) ALL_OBJS = $(OBJS) version.o ALL_OBJS_1 = $(ALL_OBJS) asm/dummy_map.o ALL_OBJS_2 = $(ALL_OBJS) asm/real_map.o $(TARGET).lid.xz: $(TARGET).lid $(call Q,XZ, cat $^ | xz -9 -C crc32 > $@, $@) $(TARGET).lid: $(TARGET).elf $(call Q,OBJCOPY, $(OBJCOPY) -O binary -S $^ $@, $@) $(TARGET).lid.stb: $(TARGET).lid libstb/create-container $(call Q,STB-DEVELOPMENT-SIGNED-CONTAINER,$(SRC)/libstb/sign-with-local-keys.sh $< $@ $(SRC)/libstb/keys/ PAYLOAD,$@) $(TARGET).lid.xz.stb: $(TARGET).lid.xz libstb/create-container $(call Q,STB-DEVELOPMENT-SIGNED-CONTAINER,$(SRC)/libstb/sign-with-local-keys.sh $< $@ $(SRC)/libstb/keys/ PAYLOAD,$@) $(TARGET).tmp.elf: $(ALL_OBJS_1) $(TARGET).lds $(KERNEL) $(call Q,LD, $(CC) $(LDFLAGS) -T $(TARGET).lds $(ALL_OBJS_1) -o $@, $@) asm/real_map.o : $(TARGET).tmp.map $(TARGET).elf: $(ALL_OBJS_2) $(TARGET).lds $(KERNEL) $(call Q,LD, $(CC) $(LDFLAGS) -T $(TARGET).lds $(ALL_OBJS_2) -o $@, $@) $(SUBDIRS): $(call Q,MKDIR,mkdir -p $@, $@) -include $(wildcard *.d) -include $(wildcard $(SUBDIRS:%=%/*.d)) # Set V=1 if you want to see everything. include $(SRC)/Makefile.rules VERSION ?= $(shell cd $(SRC); GIT_DIR=$(SRC)/.git $(SRC)/make_version.sh) .PHONY: VERSION-always .version: VERSION-always @echo $(VERSION) > $@.tmp @cmp -s $@ $@.tmp || cp $@.tmp $@ @rm -f $@.tmp version.c: $(SRC)/make_version.sh $(OBJS_NO_VER) .version @(if [ "a$(VERSION)" = "a" ]; then \ echo "#error You need to set SKIBOOT_VERSION environment variable" > $@ ;\ else \ echo "const char version[] = \"$(VERSION)\";" ;\ fi) > $@ .PHONY: coverage include $(shell find $(SRC)/* -name Makefile.check) extract-gcov: extract-gcov.c $(call Q, HOSTCC ,$(HOSTCC) $(HOSTCFLAGS) \ -DTARGET__GNUC__=`echo '__GNUC__'|$(CC) -E -|grep -v '^#'` \ -DTARGET__GNUC_MINOR__=`echo '__GNUC_MINOR__'|$(CC) -E -|grep -v '^#'` \ -Wpadded -O0 -g -I$(SRC) -o $@ $<,$<) coverage-report: skiboot.info genhtml --branch-coverage -q -o $@ $< # A rather nasty hack here that relies on the lcov file format external/pflash/pflash.info: coverage (cd external/pflash; lcov -q -c -d . -o pflash.info --rc lcov_branch_coverage=1; sed -i -e 's%external/pflash/libflash%libflash%; s%external/pflash/ccan%ccan%' pflash.info) external/gard/gard.info: coverage (cd external/gard; lcov -q -c -d . -o gard.info --rc lcov_branch_coverage=1; sed -i -e 's%external/gard/libflash%libflash%; s%external/gard/ccan%ccan%' gard.info) skiboot.info: coverage external/pflash/pflash.info external/gard/gard.info lcov -q -c -d . $(LCOV_DIRS) -o $@ --rc lcov_branch_coverage=1 lcov -q -r $@ 'external/pflash/*' -o $@ lcov -q -r $@ 'external/gard/*' -o $@ lcov -q -a $@ -a external/pflash/pflash.info -o $@ lcov -q -a $@ -a external/gard/gard.info -o $@ lcov -q -r $@ $(LCOV_EXCLUDE) -o $@ --rc lcov_branch_coverage=1 tags: find . -name '*.[chS]' | xargs ctags TAGS: find . -name '*.[chS]' | xargs etags .PHONY: tags TAGS check coverage cscope: find . -name '*.[chS]' | xargs cscope clean: $(RM) *.[odsa] $(SUBDIRS:%=%/*.[odsa]) $(RM) *.elf $(TARGET).lid *.map $(TARGET).lds $(TARGET).lid.xz $(RM) include/asm-offsets.h version.c .version $(RM) skiboot.info external/gard/gard.info external/pflash/pflash.info $(RM) extract-gcov $(TARGET).lid.stb $(TARGET).lid.xz.stb distclean: clean $(RM) *~ $(SUBDIRS:%=%/*~) include/*~ skiboot-5.10-rc4/Makefile.rules000066400000000000000000000031401324317060200163640ustar00rootroot00000000000000# # These allow for the build to be less verbose # ifdef V VERBOSE:= $(V) else VERBOSE:= 0 endif ifeq ($(VERBOSE),1) define Q $(2) endef else ifneq ($(filter s% -s%,$(MAKEFLAGS)),) define Q @$(2) endef else define Q @echo " [$1] $(3)" @$(2) endef endif endif define QTEST $(call Q,$1, ./test/run.sh $2, $3) endef define cook_aflags $(filter-out $(AFLAGS_SKIP_$(1)), $(CPPFLAGS) $(AFLAGS)) $(AFLAGS_$(1)) endef define cook_cflags $(filter-out $(CFLAGS_SKIP_$(1)), $(CPPFLAGS) $(CFLAGS)) $(CFLAGS_$(1)) endef ifeq ($(C),1) ifeq ($(VERBOSE),1) cmd_check = $(CHECK) $(CHECKFLAGS) $(filter-out $(CHECK_CFLAGS_SKIP),$(call cook_cflags,$@)) $< else cmd_check = @$(CHECK) $(CHECKFLAGS) $(filter-out $(CHECK_CFLAGS_SKIP),$(call cook_cflags,$@)) $< endif endif %.o : %.S include/asm-offsets.h $(call Q,AS, $(CC) $(call cook_aflags,$@) -c $< -o $@, $@) %.s : %.S include/asm-offsets.h $(call Q,CC, $(CC) $(call cook_aflags,$@) -E -c $< -o $@, $@) %.o : %.c $(call cmd_check) $(call Q,CC, $(CC) $(call cook_cflags,$@) -c $< -o $@, $@) # Force the use of the C compiler, not C++ for the .C files in libpore %.o : %.C $(call cmd_check) $(call Q,CC, $(CC) $(call cook_cflags,$@) -x c -c $< -o $@, $@) %.s : %.c $(call Q,CC, $(CC) $(call cook_cflags,$@) -S -c $< -o $@, $@) %.i : %.c $(call Q,CC, $(CC) $(call cook_cflags,$@) -E -c $< -o $@, $@) %built-in.o : $(call Q,LD, $(LD) $(LDRFLAGS) -r $^ -o $@, $@) %.lds : %.lds.S $(call Q,CC, $(CC) $(CPPFLAGS) -P -E $< -o $@, $@) %.map: %.elf $(call Q,NM, $(NM) --synthetic -n $< | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)' > $@, $@) skiboot-5.10-rc4/README.md000066400000000000000000000104411324317060200150540ustar00rootroot00000000000000# skiboot Firmware for OpenPower systems. Source: https://github.com/open-power/skiboot Mailing list: skiboot@lists.ozlabs.org Info/subscribe: https://lists.ozlabs.org/listinfo/skiboot Archives: https://lists.ozlabs.org/pipermail/skiboot/ Patchwork: http://patchwork.ozlabs.org/project/skiboot/list/ ## Overview OPAL firmware (OpenPower Abstraction Layer) comes in several parts. A simplified flow of what happens when the power button is pressed is: 1. The baseboard management controller (BMC) powers the system on. 2. The BMC selects the master chip and releases the self-boot engines (SBEs) on the POWER8 chips, master last. 3. The BMC relinquishes control of the flexible service interface (FSI) SCAN/SCOM engines. 4. The hostboot firmware IPLs the system. It initiates a secondary power-on sequence through a digital power systems sweep (DPSS). 5. The hostboot firmware loads the OPAL image and moves all processors to their execution starting points. Here, the OPAL image is three parts: 1. skiboot (includes OPAL runtime services) 2. skiroot - the bootloader environment * kernel * initramfs (containing petitboot bootloader) They may be all part of one payload or three separate images (depending on platform). The bootloader will kexec a host kernel (probably linux). The host OS can make OPAL calls. The OPAL API is documented in doc/opal-api/ (there are missing parts, patches are welcome!) See doc/overview.rst for a more in depth overview of skiboot. ## Building Any host OS can build and test skiboot provided it has a C cross compiler for *big endian* powerpc64. All good Linux distributions (and several bad ones) provide a packaged compiler that can be installed through the usual package management tools. To build on Ubuntu: ``` apt-get install gcc-powerpc64le-linux-gnu gcc valgrind \ expect libssl-dev device-tree-compiler make \ xz-utils CROSS=powerpc64le-linux-gnu- make -j`nproc` ``` To build on Fedora: ``` dnf install gcc-powerpc64le-linux-gnu binutils-powerpc64-linux-gnu gcc make \ diffutils findutils expect valgrind-devel dtc openssl-devel xz CROSS=powerpc64le-linux-gnu- make -j`nproc` ``` (The little-endian powerpc64le compilers in Ubuntu and Fedora are actually bi-endian and can compile skiboot even though it's big-endian. We recommend installing a little-endian toolchain if you plan on building other projects.) On any POWER system with a bi-endian system compiler: ``` CROSS="" make -j`nproc` ``` Alternatively, pre-built cross compilers for x86 systems can be downloaded from here: https://www.kernel.org/pub/tools/crosstool/ When using these compilers add /opt/cross/gcc-4.8.0-nolibc/powerpc64-linux/bin/ to your PATH. Once this is done skiboot can be compiler by just running `make` ## Testing Skiboot comes with a set of unit tests that can be run on your desktop. They can can be run with: ``` make check ``` To test in a simulator, install the IBM POWER8 Functional Simulator from: http://www-304.ibm.com/support/customercare/sas/f/pwrfs/home.html Also see external/mambo/README.md Qemu (as of 2.2.0) is not suitable as it does not (yet) implement the HyperVisor mode of the POWER8 processor. See https://www.flamingspork.com/blog/2015/08/28/running-opal-in-qemu-the-powernv-platform/ for instructions on how to use a work-in-progress patchset to qemu that may be suitable for some work. To run a boot-to-bootloader test, you'll need a zImage.papr built using the mambo_defconfig config for op-build. See https://github.com/open-power/op-build/ on howto build. Drop zImage.epapr in the skiboot directory and the skiboot test suite will automatically pick it up. See opal-ci/README for further testing instructions. To test on real hardware, you will need to understand how to flash new skiboot onto your system. This will vary from platform to platform. You may want to start with external/boot-tests/boot_test.sh as it can (provided the correct usernames/passwords) automatically flash a new skiboot onto ASTBMC based OpenPower machines. ## Hacking All patches should be sent to the mailing list with linux-kernel style 'Signed-Off-By'. The following git commands are your friends: ``` git commit -s git format-patch ``` You probably want to read the linux https://kernel.org/doc/html/latest/process/submitting-patches.html as much of it applies to skiboot. ## License See LICENSE skiboot-5.10-rc4/asm/000077500000000000000000000000001324317060200143555ustar00rootroot00000000000000skiboot-5.10-rc4/asm/Makefile.inc000066400000000000000000000003241324317060200165640ustar00rootroot00000000000000# -*-Makefile-*- SUBDIRS += asm ASM_OBJS = head.o misc.o kernel-wrapper.o cvc_entry.o ASM=asm/built-in.o # Add extra dependency to the kernel wrapper kernel_wrapper.o : $(KERNEL) $(ASM): $(ASM_OBJS:%=asm/%) skiboot-5.10-rc4/asm/asm-offsets.c000066400000000000000000000066401324317060200167560ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include "../hdata/spira.h" #include #include #include #define DEFINE(sym, val) \ asm volatile("\n#define " #sym " %0 /* " #val " */" : : "i" (val)) #define OFFSET(sym, str, mem) \ DEFINE(sym, offsetof(struct str, mem)) int main(void); int main(void) { OFFSET(SPIRA_ACTUAL_SIZE, spira, reserved); OFFSET(CPUTHREAD_PIR, cpu_thread, pir); OFFSET(CPUTHREAD_SAVE_R1, cpu_thread, save_r1); OFFSET(CPUTHREAD_STATE, cpu_thread, state); OFFSET(CPUTHREAD_CUR_TOKEN, cpu_thread, current_token); DEFINE(CPUTHREAD_GAP, sizeof(struct cpu_thread) + STACK_SAFETY_GAP); #ifdef STACK_CHECK_ENABLED OFFSET(CPUTHREAD_STACK_BOT_MARK, cpu_thread, stack_bot_mark); OFFSET(CPUTHREAD_STACK_BOT_PC, cpu_thread, stack_bot_pc); OFFSET(CPUTHREAD_STACK_BOT_TOK, cpu_thread, stack_bot_tok); #endif OFFSET(STACK_TYPE, stack_frame, type); OFFSET(STACK_LOCALS, stack_frame, locals); OFFSET(STACK_GPR0, stack_frame, gpr[0]); OFFSET(STACK_GPR1, stack_frame, gpr[1]); OFFSET(STACK_GPR2, stack_frame, gpr[2]); OFFSET(STACK_GPR3, stack_frame, gpr[3]); OFFSET(STACK_GPR4, stack_frame, gpr[4]); OFFSET(STACK_GPR5, stack_frame, gpr[5]); OFFSET(STACK_GPR6, stack_frame, gpr[6]); OFFSET(STACK_GPR7, stack_frame, gpr[7]); OFFSET(STACK_GPR8, stack_frame, gpr[8]); OFFSET(STACK_GPR9, stack_frame, gpr[9]); OFFSET(STACK_GPR10, stack_frame, gpr[10]); OFFSET(STACK_GPR11, stack_frame, gpr[11]); OFFSET(STACK_GPR12, stack_frame, gpr[12]); OFFSET(STACK_GPR13, stack_frame, gpr[13]); OFFSET(STACK_GPR14, stack_frame, gpr[14]); OFFSET(STACK_GPR15, stack_frame, gpr[15]); OFFSET(STACK_GPR16, stack_frame, gpr[16]); OFFSET(STACK_GPR17, stack_frame, gpr[17]); OFFSET(STACK_GPR18, stack_frame, gpr[18]); OFFSET(STACK_GPR19, stack_frame, gpr[19]); OFFSET(STACK_GPR20, stack_frame, gpr[20]); OFFSET(STACK_GPR21, stack_frame, gpr[21]); OFFSET(STACK_GPR22, stack_frame, gpr[22]); OFFSET(STACK_GPR23, stack_frame, gpr[23]); OFFSET(STACK_GPR24, stack_frame, gpr[24]); OFFSET(STACK_GPR25, stack_frame, gpr[25]); OFFSET(STACK_GPR26, stack_frame, gpr[26]); OFFSET(STACK_GPR27, stack_frame, gpr[27]); OFFSET(STACK_GPR28, stack_frame, gpr[28]); OFFSET(STACK_GPR29, stack_frame, gpr[29]); OFFSET(STACK_GPR30, stack_frame, gpr[30]); OFFSET(STACK_GPR31, stack_frame, gpr[31]); OFFSET(STACK_CR, stack_frame, cr); OFFSET(STACK_XER, stack_frame, xer); OFFSET(STACK_DSISR, stack_frame, dsisr); OFFSET(STACK_CTR, stack_frame, ctr); OFFSET(STACK_LR, stack_frame, lr); OFFSET(STACK_PC, stack_frame, pc); OFFSET(STACK_CFAR, stack_frame, cfar); OFFSET(STACK_SRR0, stack_frame, srr0); OFFSET(STACK_SRR1, stack_frame, srr1); OFFSET(STACK_HSRR0, stack_frame, hsrr0); OFFSET(STACK_HSRR1, stack_frame, hsrr1); OFFSET(STACK_DAR, stack_frame, dar); DEFINE(STACK_FRAMESIZE, sizeof(struct stack_frame)); return 0; } skiboot-5.10-rc4/asm/cvc_entry.S000066400000000000000000000022741324317060200165020ustar00rootroot00000000000000# IBM_PROLOG_BEGIN_TAG # This is an automatically generated prolog. # # $Source: src/usr/secureboot/base/rom_entry.S $ # # OpenPOWER HostBoot Project # # COPYRIGHT International Business Machines Corp. 2013,2016 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # # IBM_PROLOG_END_TAG #.include "kernel/ppcconsts.S" .section .text .global __cvc_verify_v1 .global __cvc_sha512_v1 __cvc_verify_v1: __cvc_sha512_v1: call_rom_entry: std %r2, 40(%r1) mflr %r0 std %r0, 16(%r1) stdu %r1, -128(%r1) li %r2, 0 mtctr %r3 mr %r3, %r4 mr %r4, %r5 mr %r5, %r6 mr %r6, %r7 mr %r7, %r8 bctrl addi %r1, %r1, 128 ld %r2, 40(%r1) ld %r0, 16(%r1) mtlr %r0 blr skiboot-5.10-rc4/asm/dummy_map.S000066400000000000000000000000431324317060200164660ustar00rootroot00000000000000 .section ".sym_map","a" .byte 0 skiboot-5.10-rc4/asm/head.S000066400000000000000000000526671324317060200154220ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #define EPAPR_MAGIC 0x65504150 /* Power management instructions */ #define PPC_INST_NAP .long 0x4c000364 #define PPC_INST_SLEEP .long 0x4c0003a4 #define PPC_INST_RVWINKLE .long 0x4c0003e4 #define PPC_INST_STOP .long 0x4c0002e4 #define GET_STACK(stack_reg,pir_reg) \ sldi stack_reg,pir_reg,STACK_SHIFT; \ addis stack_reg,stack_reg,CPU_STACKS_OFFSET@ha; \ addi stack_reg,stack_reg,CPU_STACKS_OFFSET@l; #define GET_CPU() \ clrrdi %r13,%r1,STACK_SHIFT #define SAVE_GPR(reg,sp) std %r##reg,STACK_GPR##reg(sp) #define REST_GPR(reg,sp) ld %r##reg,STACK_GPR##reg(sp) .section ".head","ax" . = 0 .global __head __head: /* * When booting a P7 machine in OPAL mode this pointer is used to * find the opal variant of the NACA. Unused on other machines. */ .llong opal_naca /* This entry point is used when booting with a flat device-tree * pointer in r3 */ . = 0x10 .global fdt_entry fdt_entry: mr %r27,%r3 b boot_entry /* This is a pointer to a descriptor used by debugging tools * on the service processor to get to various trace buffers */ . = 0x80 .llong debug_descriptor /* This is our boot semaphore used for CPUs to sync, it has to be * at an easy to locate address (without relocation) since we * need to get at it very early, before we apply our relocs */ . = 0xf0 boot_sem: .long 0 /* And this is a boot flag used to kick secondaries into the * main code. */ boot_flag: .long 0 /* This is used to trigger an assert() and in turn an ATTN * in skiboot when a special sequence is written at this * address. For testing purposes only. */ . = 0xf8 .global attn_trigger attn_trigger: .long 0 /* This is the host initiated reset trigger for test */ . = 0xfc .global hir_trigger hir_trigger: .long 0 /* * At 0x100 and 0x180 reside our entry points. Once started, * we will ovewrite them with our actual 0x100 exception handler * used for recovering from rvw or nap mode */ . = 0x100 /* BML entry, load up r3 with device tree location */ li %r3, 0 oris %r3, %r3, 0xa b fdt_entry /* hack for lab boot */ /* Entry point set by the FSP */ .= 0x180 hdat_entry: li %r27,0 b boot_entry #define EXCEPTION(nr) \ .= nr ;\ mtsprg0 %r3 ;\ mfspr %r3,SPR_CFAR ;\ mtsprg1 %r4 ;\ li %r4,nr ;\ b _exception /* More exception stubs */ EXCEPTION(0x200) EXCEPTION(0x300) EXCEPTION(0x380) EXCEPTION(0x400) EXCEPTION(0x480) EXCEPTION(0x500) EXCEPTION(0x600) EXCEPTION(0x700) EXCEPTION(0x800) EXCEPTION(0x900) EXCEPTION(0x980) EXCEPTION(0xa00) EXCEPTION(0xb00) EXCEPTION(0xc00) EXCEPTION(0xd00) EXCEPTION(0xe00) EXCEPTION(0xe20) EXCEPTION(0xe40) EXCEPTION(0xe60) EXCEPTION(0xe80) EXCEPTION(0xf00) EXCEPTION(0xf20) EXCEPTION(0xf40) EXCEPTION(0xf60) EXCEPTION(0xf80) EXCEPTION(0x1000) EXCEPTION(0x1100) EXCEPTION(0x1200) EXCEPTION(0x1300) EXCEPTION(0x1400) EXCEPTION(0x1500) EXCEPTION(0x1600) .= 0x1e00 _exception: stdu %r1,-STACK_FRAMESIZE(%r1) std %r3,STACK_CFAR(%r1) std %r4,STACK_TYPE(%r1) mfsprg0 %r3 mfsprg1 %r4 SAVE_GPR(0,%r1) SAVE_GPR(1,%r1) SAVE_GPR(2,%r1) SAVE_GPR(3,%r1) SAVE_GPR(4,%r1) SAVE_GPR(5,%r1) SAVE_GPR(6,%r1) SAVE_GPR(7,%r1) SAVE_GPR(8,%r1) SAVE_GPR(9,%r1) SAVE_GPR(10,%r1) SAVE_GPR(11,%r1) SAVE_GPR(12,%r1) SAVE_GPR(13,%r1) SAVE_GPR(14,%r1) SAVE_GPR(15,%r1) SAVE_GPR(16,%r1) SAVE_GPR(17,%r1) SAVE_GPR(18,%r1) SAVE_GPR(19,%r1) SAVE_GPR(20,%r1) SAVE_GPR(21,%r1) SAVE_GPR(22,%r1) SAVE_GPR(23,%r1) SAVE_GPR(24,%r1) SAVE_GPR(25,%r1) SAVE_GPR(26,%r1) SAVE_GPR(27,%r1) SAVE_GPR(28,%r1) SAVE_GPR(29,%r1) SAVE_GPR(30,%r1) SAVE_GPR(31,%r1) mfcr %r3 mfxer %r4 mfctr %r5 mflr %r6 stw %r3,STACK_CR(%r1) stw %r4,STACK_XER(%r1) std %r5,STACK_CTR(%r1) std %r6,STACK_LR(%r1) mfspr %r3,SPR_SRR0 mfspr %r4,SPR_SRR1 mfspr %r5,SPR_HSRR0 mfspr %r6,SPR_HSRR1 std %r3,STACK_SRR0(%r1) std %r4,STACK_SRR1(%r1) std %r5,STACK_HSRR0(%r1) std %r6,STACK_HSRR1(%r1) mfspr %r3,SPR_DSISR mfspr %r4,SPR_DAR stw %r3,STACK_DSISR(%r1) std %r4,STACK_DAR(%r1) mr %r3,%r1 LOAD_IMM64(%r4, SKIBOOT_BASE) LOAD_IMM32(%r5, exception_entry_foo - __head) add %r4,%r4,%r5 mtctr %r4 bctrl b . exception_entry_foo: b exception_entry .= 0x2000 /* This is the OPAL branch table. It's populated at boot time * with function pointers to the various OPAL functions from * the content of the .opal_table section, indexed by Token. */ .global opal_branch_table opal_branch_table: .space 8 * (OPAL_LAST + 1) /* Stores the offset we were started from. Used later on if we want to * read any unrelocated code/data such as the built-in kernel image */ .global boot_offset boot_offset: .llong 0 /* * * Boot time entry point from FSP * * All CPUs come here * * Boot code NV register usage: * * r31 : Boot PIR * r30 : Current running offset * r29 : Target address * r28 : PVR * r27 : DTB pointer (or NULL) * r26 : PIR thread mask */ .global boot_entry boot_entry: /* Check PVR and set some CR bits */ mfspr %r28,SPR_PVR li %r26,3 /* Default to SMT4 */ srdi %r3,%r28,16 cmpwi cr0,%r3,PVR_TYPE_P7 beq 1f cmpwi cr0,%r3,PVR_TYPE_P7P beq 1f cmpwi cr0,%r3,PVR_TYPE_P8 beq 2f cmpwi cr0,%r3,PVR_TYPE_P8E beq 2f cmpwi cr0,%r3,PVR_TYPE_P8NVL beq 2f cmpwi cr0,%r3,PVR_TYPE_P9 beq 1f attn /* Unsupported CPU type... what do we do ? */ b . /* loop here, just in case attn is disabled */ /* P8 -> 8 threads */ 2: li %r26,7 /* Get our reloc offset into r30 */ 1: bcl 20,31,$+4 1: mflr %r30 subi %r30,%r30,(1b - __head) /* Store reloc offset in boot_offset */ LOAD_IMM32(%r3, boot_offset - __head) add %r3,%r3,%r30 std %r30,0(%r3) /* Get ourselves a TOC & relocate it to our target address */ LOAD_IMM32(%r2,__toc_start - __head) LOAD_IMM64(%r29, SKIBOOT_BASE) add %r2,%r2,%r29 /* Fixup our MSR (remove TA) */ LOAD_IMM64(%r3, (MSR_HV | MSR_SF)) mtmsrd %r3,0 /* Check our PIR, avoid threads */ mfspr %r31,SPR_PIR and. %r0,%r31,%r26 bne secondary_wait /* Initialize per-core SPRs */ bl init_shared_sprs /* Pick a boot CPU, cpu index in r31 */ LOAD_IMM32(%r3, boot_sem - __head) add %r3,%r3,%r30 1: lwarx %r4,0,%r3 addi %r0,%r4,1 stwcx. %r0,0,%r3 bne 1b isync cmpwi cr0,%r4,0 bne secondary_wait /* Make sure we are in SMT medium */ smt_medium /* Initialize thread SPRs */ bl init_replicated_sprs /* Save the initial offset. The secondary threads will spin on boot_flag * before relocation so we need to keep track of its location to wake * them up. */ mr %r15,%r30 /* Check if we need to copy ourselves up and update %r30 to * be our new offset */ cmpd %r29,%r30 beq 2f LOAD_IMM32(%r3, _sbss - __head) srdi %r3,%r3,3 mtctr %r3 mr %r4,%r30 mr %r30,%r29 /* copy the skiboot image to the new offset */ 1: ld %r0,0(%r4) std %r0,0(%r29) addi %r29,%r29,8 addi %r4,%r4,8 bdnz 1b /* flush caches, etc */ sync icbi 0,%r29 sync isync /* branch to the new image location and continue */ LOAD_IMM32(%r3, 2f - __head) add %r3,%r3,%r30 mtctr %r3 bctr /* Get ready for C code: get a stack */ 2: GET_STACK(%r1,%r31) /* Clear up initial frame */ li %r3,0 std %r3,0(%r1) std %r3,8(%r1) std %r3,16(%r1) /* Relocate ourselves */ bl call_relocate /* Tell secondaries to move to second stage (relocated) spin loop */ LOAD_IMM32(%r3, boot_flag - __head) add %r3,%r3,%r15 li %r0,1 stw %r0,0(%r3) /* Clear BSS */ li %r0,0 LOAD_ADDR_FROM_TOC(%r3, _sbss) LOAD_ADDR_FROM_TOC(%r4, _ebss) subf %r4,%r3,%r4 srdi %r4,%r4,3 mtctr %r4 1: std %r0,0(%r3) addi %r3,%r3,8 bdnz 1b /* Get our per-cpu pointer into r13 */ GET_CPU() #ifdef STACK_CHECK_ENABLED /* Initialize stack bottom mark to 0, it will be updated in C code */ li %r0,0 std %r0,CPUTHREAD_STACK_BOT_MARK(%r13) #endif /* Initialize the stack guard */ LOAD_IMM64(%r3,STACK_CHECK_GUARD_BASE); xor %r3,%r3,%r31 std %r3,0(%r13) /* Jump to C */ mr %r3,%r27 bl main_cpu_entry b . /* Secondary CPUs wait here r31 is PIR */ secondary_wait: /* The primary might be in the middle of relocating us, * so first we spin on the boot_flag */ LOAD_IMM32(%r3, boot_flag - __head) add %r3,%r3,%r30 1: smt_lowest lwz %r0,0(%r3) cmpdi %r0,0 beq 1b /* Init some registers */ bl init_replicated_sprs /* Switch to new runtime address */ mr %r30,%r29 LOAD_IMM32(%r3, 1f - __head) add %r3,%r3,%r30 mtctr %r3 isync bctr 1: /* Now wait for cpu_secondary_start to be set */ LOAD_ADDR_FROM_TOC(%r3, cpu_secondary_start) 1: smt_lowest ld %r0,0(%r3) cmpdi %r0,0 beq 1b smt_medium /* Check our PIR is in bound */ LOAD_ADDR_FROM_TOC(%r5, cpu_max_pir) lwz %r5,0(%r5) cmpw %r31,%r5 bgt- secondary_not_found /* Get our stack, cpu thread, and jump to C */ GET_STACK(%r1,%r31) li %r0,0 std %r0,0(%r1) std %r0,16(%r1) GET_CPU() bl secondary_cpu_entry b . /* Not found... what to do ? set some global error ? */ secondary_not_found: smt_lowest b . call_relocate: mflr %r14 LOAD_IMM32(%r4,__dynamic_start - __head) LOAD_IMM32(%r5,__rela_dyn_start - __head) add %r4,%r4,%r30 add %r5,%r5,%r30 mr %r3,%r30 bl relocate cmpwi %r3,0 bne 1f mtlr %r14 blr 1: /* Fatal relocate failure */ attn #define FIXUP_ENDIAN \ tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \ b $+36; /* Skip trampoline if endian is good */ \ .long 0x05009f42; /* bcl 20,31,$+4 */ \ .long 0xa602487d; /* mflr r10 */ \ .long 0x1c004a39; /* addi r10,r10,28 */ \ .long 0xa600607d; /* mfmsr r11 */ \ .long 0x01006b69; /* xori r11,r11,1 */ \ .long 0xa6035a7d; /* mtsrr0 r10 */ \ .long 0xa6037b7d; /* mtsrr1 r11 */ \ .long 0x2400004c /* rfid */ .global enable_machine_check enable_machine_check: mflr %r0 bcl 20,31,$+4 0: mflr %r3 addi %r3,%r3,(1f - 0b) mtspr SPR_HSRR0,%r3 mfmsr %r3 ori %r3,%r3,MSR_ME mtspr SPR_HSRR1,%r3 hrfid 1: mtlr %r0 blr .global disable_machine_check disable_machine_check: mflr %r0 bcl 20,31,$+4 0: mflr %r3 addi %r3,%r3,(1f - 0b) mtspr SPR_HSRR0,%r3 mfmsr %r3 li %r4,MSR_ME andc %r3,%r3,%r4 mtspr SPR_HSRR1,%r3 hrfid 1: mtlr %r0 blr pm_save_regs: SAVE_GPR(2,%r1) SAVE_GPR(14,%r1) SAVE_GPR(15,%r1) SAVE_GPR(16,%r1) SAVE_GPR(17,%r1) SAVE_GPR(18,%r1) SAVE_GPR(19,%r1) SAVE_GPR(20,%r1) SAVE_GPR(21,%r1) SAVE_GPR(22,%r1) SAVE_GPR(23,%r1) SAVE_GPR(24,%r1) SAVE_GPR(25,%r1) SAVE_GPR(26,%r1) SAVE_GPR(27,%r1) SAVE_GPR(28,%r1) SAVE_GPR(29,%r1) SAVE_GPR(30,%r1) SAVE_GPR(31,%r1) mfcr %r4 mfxer %r5 mfspr %r6,SPR_HSPRG0 mfspr %r7,SPR_HSPRG1 stw %r4,STACK_CR(%r1) stw %r5,STACK_XER(%r1) std %r6,STACK_GPR0(%r1) std %r7,STACK_GPR1(%r1) blr .global enter_p8_pm_state enter_p8_pm_state: /* Before entering map or rvwinkle, we create a stack frame * and save our non-volatile registers. * * We also save these SPRs: * * - HSPRG0 in GPR0 slot * - HSPRG1 in GPR1 slot * * - xxx TODO: HIDs * - TODO: Mask MSR:ME during the process * * On entry, r3 indicates: * * 0 = nap * 1 = rvwinkle */ mflr %r0 std %r0,16(%r1) stdu %r1,-STACK_FRAMESIZE(%r1) bl pm_save_regs /* Save stack pointer in struct cpu_thread */ std %r1,CPUTHREAD_SAVE_R1(%r13) /* Winkle or nap ? */ cmpli %cr0,0,%r3,0 bne 1f /* nap sequence */ ptesync 0: ld %r0,CPUTHREAD_SAVE_R1(%r13) cmpd cr0,%r0,%r0 bne 0b PPC_INST_NAP b . /* rvwinkle sequence */ 1: ptesync 0: ld %r0,CPUTHREAD_SAVE_R1(%r13) cmpd cr0,%r0,%r0 bne 0b PPC_INST_RVWINKLE b . .global enter_p9_pm_lite_state enter_p9_pm_lite_state: mtspr SPR_PSSCR,%r3 PPC_INST_STOP blr .global enter_p9_pm_state enter_p9_pm_state: mflr %r0 std %r0,16(%r1) stdu %r1,-STACK_FRAMESIZE(%r1) bl pm_save_regs /* Save stack pointer in struct cpu_thread */ std %r1,CPUTHREAD_SAVE_R1(%r13) mtspr SPR_PSSCR,%r3 PPC_INST_STOP b . /* This is a little piece of code that is copied down to * 0x100 for handling power management wakeups */ .global reset_patch_start reset_patch_start: FIXUP_ENDIAN smt_medium LOAD_IMM64(%r30, SKIBOOT_BASE) LOAD_IMM32(%r3, reset_wakeup - __head) add %r3,%r30,%r3 mtctr %r3 bctr .global reset_patch_end reset_patch_end: reset_wakeup: /* Get PIR */ mfspr %r31,SPR_PIR /* Get that CPU stack base and use it to restore r13 */ GET_STACK(%r1,%r31) GET_CPU() /* Restore original stack pointer */ ld %r3,CPUTHREAD_SAVE_R1(%r13) /* If it's 0, we are doing a fast reboot */ cmpldi %r3,0 beq fast_reset_entry mr %r1,%r3 /* Restore more stuff */ lwz %r3,STACK_CR(%r1) lwz %r4,STACK_XER(%r1) ld %r5,STACK_GPR0(%r1) ld %r6,STACK_GPR1(%r1) mtcr %r3 mtxer %r4 mtspr SPR_HSPRG0,%r5 mtspr SPR_HSPRG1,%r6 REST_GPR(2,%r1) REST_GPR(14,%r1) REST_GPR(15,%r1) REST_GPR(16,%r1) REST_GPR(17,%r1) REST_GPR(18,%r1) REST_GPR(19,%r1) REST_GPR(20,%r1) REST_GPR(21,%r1) REST_GPR(22,%r1) REST_GPR(23,%r1) REST_GPR(24,%r1) REST_GPR(25,%r1) REST_GPR(26,%r1) REST_GPR(27,%r1) REST_GPR(28,%r1) REST_GPR(29,%r1) REST_GPR(30,%r1) REST_GPR(31,%r1) /* Get LR back, pop stack and return */ addi %r1,%r1,STACK_FRAMESIZE ld %r0,16(%r1) mtlr %r0 blr /* Fast reset code. We clean up the TLB and a few SPRs and * return to C code. All CPUs do that, the CPU triggering the * reset does it to itself last. The C code will sort out who * the master is. We come from the trampoline above with * r30 containing SKIBOOT_BASE */ fast_reset_entry: /* Clear out SLB */ li %r6,0 slbmte %r6,%r6 slbia ptesync /* Dummy stack frame */ li %r3,0 std %r3,0(%r1) std %r3,8(%r1) std %r3,16(%r1) /* Get our TOC */ addis %r2,%r30,(__toc_start - __head)@ha addi %r2,%r2,(__toc_start - __head)@l /* Go to C ! */ bl fast_reboot_entry b . /* Functions to initialize replicated and shared SPRs to sane * values. This is called at boot and on soft-reset */ .global init_shared_sprs init_shared_sprs: li %r0,0 mtspr SPR_AMOR, %r0 mfspr %r3,SPR_PVR srdi %r3,%r3,16 cmpwi cr0,%r3,PVR_TYPE_P7 beq 1f cmpwi cr0,%r3,PVR_TYPE_P7P beq 2f cmpwi cr0,%r3,PVR_TYPE_P8E beq 3f cmpwi cr0,%r3,PVR_TYPE_P8 beq 3f cmpwi cr0,%r3,PVR_TYPE_P8NVL beq 3f cmpwi cr0,%r3,PVR_TYPE_P9 beq 4f /* Unsupported CPU type... what do we do ? */ b 9f 1: /* P7 */ mtspr SPR_SDR1, %r0 /* TSCR: Value from pHyp */ LOAD_IMM32(%r3,0x880DE880) mtspr SPR_TSCR, %r3 b 9f 2: /* P7+ */ mtspr SPR_SDR1, %r0 /* TSCR: Recommended value by HW folks */ LOAD_IMM32(%r3,0x88CDE880) mtspr SPR_TSCR, %r3 b 9f 3: /* P8E/P8 */ mtspr SPR_SDR1, %r0 /* TSCR: Recommended value by HW folks */ LOAD_IMM32(%r3,0x8ACC6880) mtspr SPR_TSCR, %r3 /* HID0: Clear bit 13 (enable core recovery) * Clear bit 19 (HILE) */ mfspr %r3,SPR_HID0 li %r0,1 sldi %r4,%r0,(63-13) sldi %r5,%r0,(63-19) or %r0,%r4,%r5, andc %r3,%r3,%r0 sync mtspr SPR_HID0,%r3 mfspr %r3,SPR_HID0 mfspr %r3,SPR_HID0 mfspr %r3,SPR_HID0 mfspr %r3,SPR_HID0 mfspr %r3,SPR_HID0 mfspr %r3,SPR_HID0 isync /* HMEER: Enable HMIs for core recovery and TOD errors. */ LOAD_IMM64(%r0,SPR_HMEER_HMI_ENABLE_MASK) mfspr %r3,SPR_HMEER or %r3,%r3,%r0 sync mtspr SPR_HMEER,%r3 isync /* RPR (per-LPAR but let's treat it as replicated for now) */ LOAD_IMM64(%r3,0x00000103070F1F3F) mtspr SPR_RPR,%r3 b 9f 4: /* P9 */ /* TSCR: Recommended value by HW folks */ LOAD_IMM32(%r3,0x80287880) mtspr SPR_TSCR, %r3 /* HID0: Clear bit 5 (enable core recovery) * Clear bit 4 (HILE) */ mfspr %r3,SPR_HID0 li %r0,1 sldi %r4,%r0,(63-5) sldi %r5,%r0,(63-4) or %r0,%r4,%r5, andc %r3,%r3,%r0 sync mtspr SPR_HID0,%r3 isync /* HMEER: Enable HMIs for core recovery and TOD errors. */ LOAD_IMM64(%r0,SPR_HMEER_HMI_ENABLE_MASK) mfspr %r3,SPR_HMEER or %r3,%r3,%r0 sync mtspr SPR_HMEER,%r3 isync LOAD_IMM64(%r3,0x00000103070F1F3F) mtspr SPR_RPR,%r3 9: blr .global init_replicated_sprs init_replicated_sprs: mfspr %r3,SPR_PVR srdi %r3,%r3,16 cmpwi cr0,%r3,PVR_TYPE_P7 beq 1f cmpwi cr0,%r3,PVR_TYPE_P7P beq 1f cmpwi cr0,%r3,PVR_TYPE_P8E beq 3f cmpwi cr0,%r3,PVR_TYPE_P8 beq 3f cmpwi cr0,%r3,PVR_TYPE_P8NVL beq 3f cmpwi cr0,%r3,PVR_TYPE_P9 beq 4f /* Unsupported CPU type... what do we do ? */ b 9f 1: /* P7, P7+ */ /* LPCR: sane value */ LOAD_IMM64(%r3,0x0040000000000004) mtspr SPR_LPCR, %r3 sync isync LOAD_IMM64(%r3,0x0) mtspr SPR_DSCR,%r3 b 9f 3: /* P8, P8E */ /* LPCR: sane value */ LOAD_IMM64(%r3,0x0040000000000000) mtspr SPR_LPCR, %r3 sync isync LOAD_IMM64(%r3,0x0) mtspr SPR_DSCR,%r3 b 9f 4: /* P9 */ /* LPCR: sane value */ LOAD_IMM64(%r3,0x0040000000000000) mtspr SPR_LPCR, %r3 sync isync /* DSCR: Stride-N Stream Enable */ LOAD_IMM64(%r3,0x0000000000000010) mtspr SPR_DSCR,%r3 9: blr .global enter_nap enter_nap: std %r0,0(%r1) ptesync ld %r0,0(%r1) 1: cmp %cr0,0,%r0,%r0 bne 1b nap b . /* * * NACA structure, accessed by the FPS to find the SPIRA * */ . = 0x4000 .global naca naca: .llong spirah /* 0x0000 : SPIRA-H */ .llong 0 /* 0x0008 : Reserved */ .llong 0 /* 0x0010 : Reserved */ .llong hv_release_data /* 0x0018 : HV release data */ .llong 0 /* 0x0020 : Reserved */ .llong 0 /* 0x0028 : Reserved */ .llong spira /* 0x0030 : SP Interface Root */ .llong hv_lid_load_table /* 0x0038 : LID load table */ .llong 0 /* 0x0040 : Reserved */ .space 68 .long 0 /* 0x008c : Reserved */ .space 16 .long SPIRA_ACTUAL_SIZE /* 0x00a0 : Actual size of SPIRA */ .space 28 .llong 0 /* 0x00c0 : resident module loadmap */ .space 136 .llong 0 /* 0x0150 : reserved */ .space 40 .llong 0 /* 0x0180 : reserved */ .space 36 .long 0 /* 0x01ac : control flags */ .byte 0 /* 0x01b0 : reserved */ .space 4 .byte 0 /* 0x01b5 : default state for SW attn */ .space 1 .byte 0x01 /* 0x01b7 : PCIA format */ .llong hdat_entry /* 0x01b8 : Primary thread entry */ .llong hdat_entry /* 0x01c0 : Secondary thread entry */ .space 0xe38 .balign 0x10 hv_release_data: .space 58 .llong 0x666 /* VRM ? */ .balign 0x10 hv_lid_load_table: .long 0x10 .long 0x10 .long 0 .long 0 /* * * OPAL variant of NACA. This is only used when booting a P7 in OPAL mode. * */ .global opal_naca opal_naca: .llong opal_boot_trampoline /* Primary entry (used ?) */ .llong opal_boot_trampoline /* Secondary entry (used ?) */ .llong spira /* Spira pointer */ .llong 0 /* Load address */ .llong opal_boot_trampoline /* 0x180 trampoline */ .llong 0 /* More stuff as seen in objdump ...*/ .llong 0 .llong 0 .llong 0 /* The FSP seems to ignore our primary/secondary entry * points and instead copy that bit down to 0x180 and * patch the first instruction to get our expected * boot CPU number. We ignore that patching for now and * got to the same entry we use for pHyp and FDT HB. */ opal_boot_trampoline: li %r27,-1 ba boot_entry - __head /* * * OPAL entry point from operating system * * Register usage: * * r0: Token * r2: OPAL Base * r3..r10: Args * r12: Scratch * r13..r31: Preserved * */ .balign 0x10 .global opal_entry opal_entry: /* Get our per CPU stack */ mfspr %r12,SPR_PIR GET_STACK(%r12,%r12) stdu %r12,-STACK_FRAMESIZE(%r12) /* Save caller r1, establish new r1 */ std %r1,STACK_GPR1(%r12) mr %r1,%r12 /* Save arguments because we call C */ std %r3,STACK_GPR3(%r1) std %r4,STACK_GPR4(%r1) std %r5,STACK_GPR5(%r1) std %r6,STACK_GPR6(%r1) std %r7,STACK_GPR7(%r1) std %r8,STACK_GPR8(%r1) std %r9,STACK_GPR9(%r1) std %r10,STACK_GPR10(%r1) /* Save Token (r0), LR and r13 */ mflr %r12 std %r0,STACK_GPR0(%r1) std %r13,STACK_GPR13(%r1) std %r12,STACK_LR(%r1) /* Get the CPU thread */ GET_CPU() /* Store token in CPU thread */ std %r0,CPUTHREAD_CUR_TOKEN(%r13) /* Mark the stack frame */ li %r12,STACK_ENTRY_OPAL_API std %r12,STACK_TYPE(%r1) /* Get our TOC */ addis %r2,%r2,(__toc_start - __head)@ha addi %r2,%r2,(__toc_start - __head)@l /* Check entry */ mr %r3,%r1 bl opal_entry_check cmpdi %r3,0 bne 1f ld %r0,STACK_GPR0(%r1) ld %r3,STACK_GPR3(%r1) ld %r4,STACK_GPR4(%r1) ld %r5,STACK_GPR5(%r1) ld %r6,STACK_GPR6(%r1) ld %r7,STACK_GPR7(%r1) ld %r8,STACK_GPR8(%r1) ld %r9,STACK_GPR9(%r1) ld %r10,STACK_GPR10(%r1) /* Convert our token into a table entry and get the * function pointer. Also check the token. * For ELFv2 ABI, the local entry point is used so no need for r12. */ sldi %r0,%r0,3 LOAD_ADDR_FROM_TOC(%r12, opal_branch_table) ldx %r0,%r12,%r0 mtctr %r0 /* Jump ! */ bctrl mr %r4,%r1 bl opal_exit_check 1: ld %r12,STACK_LR(%r1) mtlr %r12 ld %r13,STACK_GPR13(%r1) ld %r1,STACK_GPR1(%r1) blr .global start_kernel start_kernel: sync icbi 0,%r3 sync isync mtctr %r3 mr %r3,%r4 LOAD_IMM64(%r8,SKIBOOT_BASE); LOAD_IMM32(%r10, opal_entry - __head) add %r9,%r8,%r10 LOAD_IMM32(%r6, EPAPR_MAGIC) addi %r7,%r5,1 li %r4,0 li %r5,0 bctr .global start_kernel32 start_kernel32: mfmsr %r10 clrldi %r10,%r10,1 mtmsrd %r10,0 sync isync b start_kernel .global start_kernel_secondary start_kernel_secondary: sync isync mtctr %r3 mfspr %r3,SPR_PIR bctr skiboot-5.10-rc4/asm/kernel-wrapper.S000066400000000000000000000012731324317060200174420ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ .section ".builtin_kernel","a" .balign 0x10000 #ifdef BUILTIN_KERNEL .incbin BUILTIN_KERNEL #endif skiboot-5.10-rc4/asm/misc.S000066400000000000000000000044171324317060200154420ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #define OLD_BINUTILS 1 .section ".text","ax" .balign 0x10 /* void set_hid0(unsigned long hid0) */ .global set_hid0 set_hid0: sync mtspr SPR_HID0,%r3 mfspr %r3,SPR_HID0 mfspr %r3,SPR_HID0 mfspr %r3,SPR_HID0 mfspr %r3,SPR_HID0 mfspr %r3,SPR_HID0 mfspr %r3,SPR_HID0 isync blr .global __trigger_attn __trigger_attn: sync isync attn blr #ifdef STACK_CHECK_ENABLED .global _mcount _mcount: mr %r3,%r1 mflr %r4 b __mcount_stack_check #endif .global cleanup_local_tlb cleanup_local_tlb: /* Clean the TLB */ li %r3,512 mtctr %r3 li %r4,0xc00 /* IS field = 0b11 */ ptesync 1: tlbiel %r4 addi %r4,%r4,0x1000 bdnz 1b ptesync blr .global cleanup_global_tlb cleanup_global_tlb: /* Only supported on P9 for now */ mfspr %r3,SPR_PVR srdi %r3,%r3,16 cmpwi cr0,%r3,PVR_TYPE_P9 bnelr /* Sync out previous updates */ ptesync #ifndef OLD_BINUTILS .machine "power9" #endif /* Lead RB with IS=11 */ li %r3,3 sldi %r3,%r3,10 li %r0,0 /* Blow up radix partition scoped translations */ #ifdef OLD_BINUTILS .long 0x7c0b1a64 #else tlbie %r3, %r0 /* rs */, 2 /* ric */, 1 /* prs */, 1 /* r */ #endif eieio tlbsync ptesync #ifdef OLD_BINUTILS .long 0x7c091a64 #else tlbie %r3, %r0 /* rs */, 2 /* ric */, 0 /* prs */, 1 /* r */ #endif eieio tlbsync ptesync /* Blow up hash partition scoped translations */ #ifdef OLD_BINUTILS .long 0x7c0a1a64 #else tlbie %r3, %r0 /* rs */, 2 /* ric */, 1 /* prs */, 0 /* r */ #endif eieio tlbsync ptesync #ifdef OLD_BINUTILS .long 0x7c081a64 #else tlbie %r3, %r0 /* rs */, 2 /* ric */, 0 /* prs */, 0 /* r */ #endif eieio tlbsync ptesync blr skiboot-5.10-rc4/asm/real_map.S000066400000000000000000000012131324317060200162560ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ .section ".sym_map","a" .incbin "skiboot.tmp.map" skiboot-5.10-rc4/ccan/000077500000000000000000000000001324317060200145015ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/Makefile.check000066400000000000000000000022531324317060200172170ustar00rootroot00000000000000CCAN_TEST_SRC := $(wildcard ccan/*/test/run*.c) LCOV_EXCLUDE += $(CCAN_TEST_SRC) ccan/list/test/helper.c CCAN_TEST := $(CCAN_TEST_SRC:%.c=%) .PHONY: $(CCAN_TEST:%=%-gcov-run) ccan-check ccan-check: $(CCAN_TEST:%=%-check) check: ccan-check $(CCAN_TEST:%=%-gcov-run) .PHONY: ccan-coverage coverage: ccan-coverage ccan-coverage: $(CCAN_TEST:%=%-gcov-run) $(CCAN_TEST:%=%-gcov-run) : %-run: % $(eval LCOV_DIRS += -d $(dir $<) ) $(call Q, TEST-COVERAGE , (cd $(dir $<); GCOV_PREFIX_STRIP=`(c=0; while [ "\`pwd\`" != '/' ]; do cd ..; c=\`expr 1 + $$c\`; done; echo $$c)` ./$(notdir $<) ), $< ) $(CCAN_TEST:%=%-check) : %-check: % $(call Q, RUN-TEST , $(VALGRIND) $<, $<) $(CCAN_TEST) : % : %.c $(call Q, HOSTCC ,$(HOSTCC) $(HOSTCFLAGS) -O0 -g -I . -Iccan/ -o $@ $<,$<) $(CCAN_TEST:%=%-gcov): %-gcov : %.c $(call Q, HOSTCC , (cd $(dir $<); $(HOSTCC) $(HOSTCFLAGS) $(HOSTGCOVCFLAGS) -I $(shell pwd) -I$(shell pwd)/./ccan/ -o $(notdir $@) $(notdir $<) ), $<) -include $(wildcard ccan/*/test/*.d) clean: ccan-test-clean ccan-test-clean: $(RM) -f $(CCAN_TEST) \ $(CCAN_TEST:%=%-gcov) \ $(CCAN_TEST:%=%.d) \ $(CCAN_TEST:%=%.o) \ $(CCAN_TEST:%=%.gcda) \ $(CCAN_TEST:%=%.gcno) skiboot-5.10-rc4/ccan/Makefile.inc000066400000000000000000000002151324317060200167070ustar00rootroot00000000000000# -*-Makefile-*- SUBDIRS += ccan ccan/list ccan/str CCAN_OBJS = list/list.o str/str.o CCAN=ccan/built-in.o $(CCAN): $(CCAN_OBJS:%=ccan/%) skiboot-5.10-rc4/ccan/array_size/000077500000000000000000000000001324317060200166515ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/array_size/LICENSE000066400000000000000000000143571324317060200176700ustar00rootroot00000000000000Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; moral rights retained by the original author(s) and/or performer(s); publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; rights protecting the extraction, dissemination, use and reuse of data in a Work; database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. skiboot-5.10-rc4/ccan/array_size/_info000066400000000000000000000020061324317060200176640ustar00rootroot00000000000000#include #include #include "config.h" /** * array_size - routine for safely deriving the size of a visible array. * * This provides a simple ARRAY_SIZE() macro, which (given a good compiler) * will also break compile if you try to use it on a pointer. * * This can ensure your code is robust to changes, without needing a gratuitous * macro or constant. * * Example: * // Outputs "Initialized 32 values" * #include * #include * #include * * // We currently use 32 random values. * static unsigned int vals[32]; * * int main(void) * { * unsigned int i; * for (i = 0; i < ARRAY_SIZE(vals); i++) * vals[i] = random(); * printf("Initialized %u values\n", i); * return 0; * } * * License: CC0 (Public domain) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/build_assert\n"); return 0; } return 1; } skiboot-5.10-rc4/ccan/array_size/array_size.h000066400000000000000000000015711324317060200211760ustar00rootroot00000000000000/* CC0 (Public domain) - see LICENSE file for details */ #ifndef CCAN_ARRAY_SIZE_H #define CCAN_ARRAY_SIZE_H #include "config.h" #include /** * ARRAY_SIZE - get the number of elements in a visible array * @arr: the array whose size you want. * * This does not work on pointers, or arrays declared as [], or * function parameters. With correct compiler support, such usage * will cause a build error (see build_assert). */ #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + _array_size_chk(arr)) #if HAVE_BUILTIN_TYPES_COMPATIBLE_P && HAVE_TYPEOF /* Two gcc extensions. * &a[0] degrades to a pointer: a different type from an array */ #define _array_size_chk(arr) \ BUILD_ASSERT_OR_ZERO(!__builtin_types_compatible_p(typeof(arr), \ typeof(&(arr)[0]))) #else #define _array_size_chk(arr) 0 #endif #endif /* CCAN_ALIGNOF_H */ skiboot-5.10-rc4/ccan/array_size/test/000077500000000000000000000000001324317060200176305ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/array_size/test/compile_fail-function-param.c000066400000000000000000000007651324317060200253500ustar00rootroot00000000000000#include #include struct foo { unsigned int a, b; }; int check_parameter(const struct foo array[4]); int check_parameter(const struct foo array[4]) { #ifdef FAIL return (ARRAY_SIZE(array) == 4); #if !HAVE_TYPEOF || !HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if _array_size_chk is a noop." #endif #else return sizeof(array) == 4 * sizeof(struct foo); #endif } int main(int argc, char *argv[]) { return check_parameter(NULL); } skiboot-5.10-rc4/ccan/array_size/test/compile_fail.c000066400000000000000000000005021324317060200224140ustar00rootroot00000000000000#include int main(int argc, char *argv[8]) { char array[100]; #ifdef FAIL return ARRAY_SIZE(argv) + ARRAY_SIZE(array); #if !HAVE_TYPEOF || !HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if _array_size_chk is a noop." #endif #else return ARRAY_SIZE(array); #endif } skiboot-5.10-rc4/ccan/array_size/test/run.c000066400000000000000000000014531324317060200206030ustar00rootroot00000000000000#include #include static char array1[1]; static int array2[2]; static unsigned long array3[3][5]; struct foo { unsigned int a, b; char string[100]; }; static struct foo array4[4]; /* Make sure they can be used in initializers. */ static int array1_size = ARRAY_SIZE(array1); static int array2_size = ARRAY_SIZE(array2); static int array3_size = ARRAY_SIZE(array3); static int array4_size = ARRAY_SIZE(array4); int __attribute__((const)) main(int argc, char *argv[]) { (void)argc; (void)argv; plan_tests(8); ok1(array1_size == 1); ok1(array2_size == 2); ok1(array3_size == 3); ok1(array4_size == 4); ok1(ARRAY_SIZE(array1) == 1); ok1(ARRAY_SIZE(array2) == 2); ok1(ARRAY_SIZE(array3) == 3); ok1(ARRAY_SIZE(array4) == 4); return exit_status(); } skiboot-5.10-rc4/ccan/build_assert/000077500000000000000000000000001324317060200171615ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/build_assert/LICENSE000066400000000000000000000143571324317060200202000ustar00rootroot00000000000000Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; moral rights retained by the original author(s) and/or performer(s); publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; rights protecting the extraction, dissemination, use and reuse of data in a Work; database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. skiboot-5.10-rc4/ccan/build_assert/_info000066400000000000000000000025061324317060200202010ustar00rootroot00000000000000#include #include #include "config.h" /** * build_assert - routines for build-time assertions * * This code provides routines which will cause compilation to fail should some * assertion be untrue: such failures are preferable to run-time assertions, * but much more limited since they can only depends on compile-time constants. * * These assertions are most useful when two parts of the code must be kept in * sync: it is better to avoid such cases if possible, but seconds best is to * detect invalid changes at build time. * * For example, a tricky piece of code might rely on a certain element being at * the start of the structure. To ensure that future changes don't break it, * you would catch such changes in your code like so: * * Example: * #include * #include * * struct foo { * char string[5]; * int x; * }; * * static char *foo_string(struct foo *foo) * { * // This trick requires that the string be first in the structure * BUILD_ASSERT(offsetof(struct foo, string) == 0); * return (char *)foo; * } * * License: CC0 (Public domain) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) /* Nothing. */ return 0; return 1; } skiboot-5.10-rc4/ccan/build_assert/build_assert.h000066400000000000000000000023141324317060200220120ustar00rootroot00000000000000/* CC0 (Public domain) - see LICENSE file for details */ #ifndef CCAN_BUILD_ASSERT_H #define CCAN_BUILD_ASSERT_H /** * BUILD_ASSERT - assert a build-time dependency. * @cond: the compile-time condition which must be true. * * Your compile will fail if the condition isn't true, or can't be evaluated * by the compiler. This can only be used within a function. * * Example: * #include * ... * static char *foo_to_char(struct foo *foo) * { * // This code needs string to be at start of foo. * BUILD_ASSERT(offsetof(struct foo, string) == 0); * return (char *)foo; * } */ #define BUILD_ASSERT(cond) \ do { (void) sizeof(char [1 - 2*!(cond)]); } while(0) /** * BUILD_ASSERT_OR_ZERO - assert a build-time dependency, as an expression. * @cond: the compile-time condition which must be true. * * Your compile will fail if the condition isn't true, or can't be evaluated * by the compiler. This can be used in an expression: its value is "0". * * Example: * #define foo_to_char(foo) \ * ((char *)(foo) \ * + BUILD_ASSERT_OR_ZERO(offsetof(struct foo, string) == 0)) */ #define BUILD_ASSERT_OR_ZERO(cond) \ (sizeof(char [1 - 2*!(cond)]) - 1) #endif /* CCAN_BUILD_ASSERT_H */ skiboot-5.10-rc4/ccan/build_assert/test/000077500000000000000000000000001324317060200201405ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/build_assert/test/compile_fail-expr.c000066400000000000000000000002341324317060200237020ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { #ifdef FAIL return BUILD_ASSERT_OR_ZERO(1 == 0); #else return 0; #endif } skiboot-5.10-rc4/ccan/build_assert/test/compile_fail.c000066400000000000000000000002071324317060200227260ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { #ifdef FAIL BUILD_ASSERT(1 == 0); #endif return 0; } skiboot-5.10-rc4/ccan/build_assert/test/compile_ok.c000066400000000000000000000001641324317060200224260ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { BUILD_ASSERT(1 == 1); return 0; } skiboot-5.10-rc4/ccan/build_assert/test/run-BUILD_ASSERT_OR_ZERO.c000066400000000000000000000003561324317060200242710ustar00rootroot00000000000000#include #include int __attribute__((const)) main(int argc, char *argv[]) { (void)argc; (void)argv; plan_tests(1); ok1(BUILD_ASSERT_OR_ZERO(1 == 1) == 0); return exit_status(); } skiboot-5.10-rc4/ccan/check_type/000077500000000000000000000000001324317060200166175ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/check_type/LICENSE000066400000000000000000000143571324317060200176360ustar00rootroot00000000000000Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; moral rights retained by the original author(s) and/or performer(s); publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; rights protecting the extraction, dissemination, use and reuse of data in a Work; database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. skiboot-5.10-rc4/ccan/check_type/_info000066400000000000000000000015111324317060200176320ustar00rootroot00000000000000#include #include #include "config.h" /** * check_type - routines for compile time type checking * * C has fairly weak typing: ints get automatically converted to longs, signed * to unsigned, etc. There are some cases where this is best avoided, and * these macros provide methods for evoking warnings (or build errors) when * a precise type isn't used. * * On compilers which don't support typeof() these routines are less effective, * since they have to use sizeof() which can only distiguish between types of * different size. * * License: CC0 (Public domain) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { #if !HAVE_TYPEOF printf("ccan/build_assert\n"); #endif return 0; } return 1; } skiboot-5.10-rc4/ccan/check_type/check_type.h000066400000000000000000000045051324317060200211120ustar00rootroot00000000000000/* CC0 (Public domain) - see LICENSE file for details */ #ifndef CCAN_CHECK_TYPE_H #define CCAN_CHECK_TYPE_H #include "config.h" /** * check_type - issue a warning or build failure if type is not correct. * @expr: the expression whose type we should check (not evaluated). * @type: the exact type we expect the expression to be. * * This macro is usually used within other macros to try to ensure that a macro * argument is of the expected type. No type promotion of the expression is * done: an unsigned int is not the same as an int! * * check_type() always evaluates to 0. * * If your compiler does not support typeof, then the best we can do is fail * to compile if the sizes of the types are unequal (a less complete check). * * Example: * // They should always pass a 64-bit value to _set_some_value! * #define set_some_value(expr) \ * _set_some_value((check_type((expr), uint64_t), (expr))) */ /** * check_types_match - issue a warning or build failure if types are not same. * @expr1: the first expression (not evaluated). * @expr2: the second expression (not evaluated). * * This macro is usually used within other macros to try to ensure that * arguments are of identical types. No type promotion of the expressions is * done: an unsigned int is not the same as an int! * * check_types_match() always evaluates to 0. * * If your compiler does not support typeof, then the best we can do is fail * to compile if the sizes of the types are unequal (a less complete check). * * Example: * // Do subtraction to get to enclosing type, but make sure that * // pointer is of correct type for that member. * #define container_of(mbr_ptr, encl_type, mbr) \ * (check_types_match((mbr_ptr), &((encl_type *)0)->mbr), \ * ((encl_type *) \ * ((char *)(mbr_ptr) - offsetof(enclosing_type, mbr)))) */ #if HAVE_TYPEOF #define check_type(expr, type) \ ((typeof(expr) *)0 != (type *)0) #define check_types_match(expr1, expr2) \ ((typeof(expr1) *)0 != (typeof(expr2) *)0) #else #include /* Without typeof, we can only test the sizes. */ #define check_type(expr, type) \ BUILD_ASSERT_OR_ZERO(sizeof(expr) == sizeof(type)) #define check_types_match(expr1, expr2) \ BUILD_ASSERT_OR_ZERO(sizeof(expr1) == sizeof(expr2)) #endif /* HAVE_TYPEOF */ #endif /* CCAN_CHECK_TYPE_H */ skiboot-5.10-rc4/ccan/check_type/test/000077500000000000000000000000001324317060200175765ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/check_type/test/compile_fail-check_type.c000066400000000000000000000002051324317060200244760ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { #ifdef FAIL check_type(argc, char); #endif return 0; } skiboot-5.10-rc4/ccan/check_type/test/compile_fail-check_type_unsigned.c000066400000000000000000000003751324317060200264020ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { #ifdef FAIL #if HAVE_TYPEOF check_type(argc, unsigned int); #else /* This doesn't work without typeof, so just fail */ #error "Fail without typeof" #endif #endif return 0; } skiboot-5.10-rc4/ccan/check_type/test/compile_fail-check_types_match.c000066400000000000000000000002421324317060200260360ustar00rootroot00000000000000#include int main(int argc, char *argv[]) { unsigned char x = argc; #ifdef FAIL check_types_match(argc, x); #endif return x; } skiboot-5.10-rc4/ccan/check_type/test/run.c000066400000000000000000000011441324317060200205460ustar00rootroot00000000000000#include #include int __attribute__((const)) main(int argc, char *argv[]) { int x = 0, y = 0; (void)argc; (void)argv; plan_tests(9); ok1(check_type(argc, int) == 0); ok1(check_type(&argc, int *) == 0); ok1(check_types_match(argc, argc) == 0); ok1(check_types_match(argc, x) == 0); ok1(check_types_match(&argc, &x) == 0); ok1(check_type(x++, int) == 0); ok(x == 0, "check_type does not evaluate expression"); ok1(check_types_match(x++, y++) == 0); ok(x == 0 && y == 0, "check_types_match does not evaluate expressions"); return exit_status(); } skiboot-5.10-rc4/ccan/config.h000066400000000000000000000015521324317060200161220ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Dummy config.h for CCAN test suite */ #define HAVE_BUILTIN_TYPES_COMPATIBLE_P 1 #define HAVE_TYPEOF 1 #ifndef HAVE_BIG_ENDIAN #define HAVE_BIG_ENDIAN 0 #endif #ifndef HAVE_LITTLE_ENDIAN #define HAVE_LITTLE_ENDIAN 0 #endif #define HAVE_BYTESWAP_H 0 #define HAVE_BSWAP_64 0 skiboot-5.10-rc4/ccan/container_of/000077500000000000000000000000001324317060200171475ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/container_of/LICENSE000066400000000000000000000143571324317060200201660ustar00rootroot00000000000000Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; moral rights retained by the original author(s) and/or performer(s); publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; rights protecting the extraction, dissemination, use and reuse of data in a Work; database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. skiboot-5.10-rc4/ccan/container_of/_info000066400000000000000000000024741324317060200201730ustar00rootroot00000000000000#include #include #include "config.h" /** * container_of - routine for upcasting * * It is often convenient to create code where the caller registers a pointer * to a generic structure and a callback. The callback might know that the * pointer points to within a larger structure, and container_of gives a * convenient and fairly type-safe way of returning to the enclosing structure. * * This idiom is an alternative to providing a void * pointer for every * callback. * * Example: * #include * #include * * struct timer { * void *members; * }; * * struct info { * int my_stuff; * struct timer timer; * }; * * static void register_timer(struct timer *timer) * { * //... * } * * static void my_timer_callback(struct timer *timer) * { * struct info *info = container_of(timer, struct info, timer); * printf("my_stuff is %u\n", info->my_stuff); * } * * int main(void) * { * struct info info = { .my_stuff = 1 }; * * register_timer(&info.timer); * // ... * return 0; * } * * License: CC0 (Public domain) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/check_type\n"); return 0; } return 1; } skiboot-5.10-rc4/ccan/container_of/container_of.h000066400000000000000000000061201324317060200217650ustar00rootroot00000000000000/* CC0 (Public domain) - see LICENSE file for details */ #ifndef CCAN_CONTAINER_OF_H #define CCAN_CONTAINER_OF_H #include #include "config.h" #include /** * container_of - get pointer to enclosing structure * @member_ptr: pointer to the structure member * @containing_type: the type this member is within * @member: the name of this member within the structure. * * Given a pointer to a member of a structure, this macro does pointer * subtraction to return the pointer to the enclosing type. * * Example: * struct foo { * int fielda, fieldb; * // ... * }; * struct info { * int some_other_field; * struct foo my_foo; * }; * * static struct info *foo_to_info(struct foo *foo) * { * return container_of(foo, struct info, my_foo); * } */ #define container_of(member_ptr, containing_type, member) \ ((containing_type *) \ ((char *)(member_ptr) \ - container_off(containing_type, member)) \ + check_types_match(*(member_ptr), ((containing_type *)0)->member)) /** * container_off - get offset to enclosing structure * @containing_type: the type this member is within * @member: the name of this member within the structure. * * Given a pointer to a member of a structure, this macro does * typechecking and figures out the offset to the enclosing type. * * Example: * struct foo { * int fielda, fieldb; * // ... * }; * struct info { * int some_other_field; * struct foo my_foo; * }; * * static struct info *foo_to_info(struct foo *foo) * { * size_t off = container_off(struct info, my_foo); * return (void *)((char *)foo - off); * } */ #define container_off(containing_type, member) \ offsetof(containing_type, member) /** * container_of_var - get pointer to enclosing structure using a variable * @member_ptr: pointer to the structure member * @container_var: a pointer of same type as this member's container * @member: the name of this member within the structure. * * Given a pointer to a member of a structure, this macro does pointer * subtraction to return the pointer to the enclosing type. * * Example: * static struct info *foo_to_i(struct foo *foo) * { * struct info *i = container_of_var(foo, i, my_foo); * return i; * } */ #if HAVE_TYPEOF #define container_of_var(member_ptr, container_var, member) \ container_of(member_ptr, typeof(*container_var), member) #else #define container_of_var(member_ptr, container_var, member) \ ((void *)((char *)(member_ptr) - \ container_off_var(container_var, member))) #endif /** * container_off_var - get offset of a field in enclosing structure * @container_var: a pointer to a container structure * @member: the name of a member within the structure. * * Given (any) pointer to a structure and a its member name, this * macro does pointer subtraction to return offset of member in a * structure memory layout. * */ #if HAVE_TYPEOF #define container_off_var(var, member) \ container_off(typeof(*var), member) #else #define container_off_var(var, member) \ ((char *)&(var)->member - (char *)(var)) #endif #endif /* CCAN_CONTAINER_OF_H */ skiboot-5.10-rc4/ccan/container_of/test/000077500000000000000000000000001324317060200201265ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/container_of/test/compile_fail-bad-type.c000066400000000000000000000005511324317060200244210ustar00rootroot00000000000000#include #include struct foo { int a; char b; }; int main(int argc, char *argv[]) { struct foo foo = { .a = 1, .b = 2 }; int *intp = &foo.a; char *p; #ifdef FAIL /* p is a char *, but this gives a struct foo * */ p = container_of(intp, struct foo, a); #else p = (char *)intp; #endif return p == NULL; } skiboot-5.10-rc4/ccan/container_of/test/compile_fail-types.c000066400000000000000000000006321324317060200240600ustar00rootroot00000000000000#include #include struct foo { int a; char b; }; int main(int argc, char *argv[]) { struct foo foo = { .a = 1, .b = 2 }, *foop; int *intp = &foo.a; #ifdef FAIL /* b is a char, but intp is an int * */ foop = container_of(intp, struct foo, b); #else foop = NULL; #endif (void) foop; /* Suppress unused-but-set-variable warning. */ return intp == NULL; } skiboot-5.10-rc4/ccan/container_of/test/compile_fail-var-types.c000066400000000000000000000007561324317060200246550ustar00rootroot00000000000000#include #include struct foo { int a; char b; }; int main(int argc, char *argv[]) { struct foo foo = { .a = 1, .b = 2 }, *foop; int *intp = &foo.a; #ifdef FAIL /* b is a char, but intp is an int * */ foop = container_of_var(intp, foop, b); #if !HAVE_TYPEOF #error "Unfortunately we don't fail if we don't have typeof." #endif #else foop = NULL; #endif (void) foop; /* Suppress unused-but-set-variable warning. */ return intp == NULL; } skiboot-5.10-rc4/ccan/container_of/test/run.c000066400000000000000000000011451324317060200210770ustar00rootroot00000000000000#include #include struct foo { int a; char b; }; int __attribute__((const)) main(int argc, char *argv[]) { struct foo foo = { .a = 1, .b = 2 }; int *intp = &foo.a; char *charp = &foo.b; (void)argc; (void)argv; plan_tests(6); ok1(container_of(intp, struct foo, a) == &foo); ok1(container_of(charp, struct foo, b) == &foo); ok1(container_of_var(intp, &foo, a) == &foo); ok1(container_of_var(charp, &foo, b) == &foo); ok1(container_off(struct foo, a) == 0); ok1(container_off(struct foo, b) == offsetof(struct foo, b)); return exit_status(); } skiboot-5.10-rc4/ccan/endian/000077500000000000000000000000001324317060200157375ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/endian/.depends000066400000000000000000000000001324317060200173500ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/endian/LICENSE000066400000000000000000000143571324317060200167560ustar00rootroot00000000000000Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; moral rights retained by the original author(s) and/or performer(s); publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; rights protecting the extraction, dissemination, use and reuse of data in a Work; database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. skiboot-5.10-rc4/ccan/endian/_info000066400000000000000000000026131324317060200167560ustar00rootroot00000000000000#include "config.h" #include #include /** * endian - endian conversion macros for simple types * * Portable protocols (such as on-disk formats, or network protocols) * are often defined to be a particular endian: little-endian (least * significant bytes first) or big-endian (most significant bytes * first). * * Similarly, some CPUs lay out values in memory in little-endian * order (most commonly, Intel's 8086 and derivatives), or big-endian * order (almost everyone else). * * This module provides conversion routines, inspired by the linux kernel. * It also provides leint32_t, beint32_t etc typedefs, which are annotated for * the sparse checker. * * Example: * #include * #include * #include * * // * int main(int argc, char *argv[]) * { * uint32_t value; * * if (argc != 2) * errx(1, "Usage: %s ", argv[0]); * * value = atoi(argv[1]); * printf("native: %08x\n", value); * printf("little-endian: %08x\n", cpu_to_le32(value)); * printf("big-endian: %08x\n", cpu_to_be32(value)); * printf("byte-reversed: %08x\n", bswap_32(value)); * exit(0); * } * * License: License: CC0 (Public domain) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) /* Nothing */ return 0; return 1; } skiboot-5.10-rc4/ccan/endian/endian.h000066400000000000000000000222521324317060200173510ustar00rootroot00000000000000/* CC0 (Public domain) - see LICENSE file for details */ #ifndef CCAN_ENDIAN_H #define CCAN_ENDIAN_H #include #include "config.h" /** * BSWAP_16 - reverse bytes in a constant uint16_t value. * @val: constant value whose bytes to swap. * * Designed to be usable in constant-requiring initializers. * * Example: * struct mystruct { * char buf[BSWAP_16(0x1234)]; * }; */ #define BSWAP_16(val) \ ((((uint16_t)(val) & 0x00ff) << 8) \ | (((uint16_t)(val) & 0xff00) >> 8)) /** * BSWAP_32 - reverse bytes in a constant uint32_t value. * @val: constant value whose bytes to swap. * * Designed to be usable in constant-requiring initializers. * * Example: * struct mystruct { * char buf[BSWAP_32(0xff000000)]; * }; */ #define BSWAP_32(val) \ ((((uint32_t)(val) & 0x000000ff) << 24) \ | (((uint32_t)(val) & 0x0000ff00) << 8) \ | (((uint32_t)(val) & 0x00ff0000) >> 8) \ | (((uint32_t)(val) & 0xff000000) >> 24)) /** * BSWAP_64 - reverse bytes in a constant uint64_t value. * @val: constantvalue whose bytes to swap. * * Designed to be usable in constant-requiring initializers. * * Example: * struct mystruct { * char buf[BSWAP_64(0xff00000000000000ULL)]; * }; */ #define BSWAP_64(val) \ ((((uint64_t)(val) & 0x00000000000000ffULL) << 56) \ | (((uint64_t)(val) & 0x000000000000ff00ULL) << 40) \ | (((uint64_t)(val) & 0x0000000000ff0000ULL) << 24) \ | (((uint64_t)(val) & 0x00000000ff000000ULL) << 8) \ | (((uint64_t)(val) & 0x000000ff00000000ULL) >> 8) \ | (((uint64_t)(val) & 0x0000ff0000000000ULL) >> 24) \ | (((uint64_t)(val) & 0x00ff000000000000ULL) >> 40) \ | (((uint64_t)(val) & 0xff00000000000000ULL) >> 56)) #if HAVE_BYTESWAP_H #include #else /** * bswap_16 - reverse bytes in a uint16_t value. * @val: value whose bytes to swap. * * Example: * // Output contains "1024 is 4 as two bytes reversed" * printf("1024 is %u as two bytes reversed\n", bswap_16(1024)); */ static inline uint16_t bswap_16(uint16_t val) { return BSWAP_16(val); } /** * bswap_32 - reverse bytes in a uint32_t value. * @val: value whose bytes to swap. * * Example: * // Output contains "1024 is 262144 as four bytes reversed" * printf("1024 is %u as four bytes reversed\n", bswap_32(1024)); */ static inline uint32_t bswap_32(uint32_t val) { return BSWAP_32(val); } #endif /* !HAVE_BYTESWAP_H */ #if !HAVE_BSWAP_64 /** * bswap_64 - reverse bytes in a uint64_t value. * @val: value whose bytes to swap. * * Example: * // Output contains "1024 is 1125899906842624 as eight bytes reversed" * printf("1024 is %llu as eight bytes reversed\n", * (unsigned long long)bswap_64(1024)); */ static inline uint64_t bswap_64(uint64_t val) { return BSWAP_64(val); } #endif /* Needed for Glibc like endiness check */ #define __LITTLE_ENDIAN 1234 #define __BIG_ENDIAN 4321 /* Sanity check the defines. We don't handle weird endianness. */ #if !HAVE_LITTLE_ENDIAN && !HAVE_BIG_ENDIAN #error "Unknown endian" #elif HAVE_LITTLE_ENDIAN && HAVE_BIG_ENDIAN #error "Can't compile for both big and little endian." #elif HAVE_LITTLE_ENDIAN #define __BYTE_ORDER __LITTLE_ENDIAN #elif HAVE_BIG_ENDIAN #define __BYTE_ORDER __BIG_ENDIAN #endif #ifdef __CHECKER__ /* sparse needs forcing to remove bitwise attribute from ccan/short_types */ #define ENDIAN_CAST __attribute__((force)) #define ENDIAN_TYPE __attribute__((bitwise)) #else #define ENDIAN_CAST #define ENDIAN_TYPE #endif typedef uint64_t ENDIAN_TYPE leint64_t; typedef uint64_t ENDIAN_TYPE beint64_t; typedef uint32_t ENDIAN_TYPE leint32_t; typedef uint32_t ENDIAN_TYPE beint32_t; typedef uint16_t ENDIAN_TYPE leint16_t; typedef uint16_t ENDIAN_TYPE beint16_t; #if HAVE_LITTLE_ENDIAN /** * CPU_TO_LE64 - convert a constant uint64_t value to little-endian * @native: constant to convert */ #define CPU_TO_LE64(native) ((ENDIAN_CAST leint64_t)(native)) /** * CPU_TO_LE32 - convert a constant uint32_t value to little-endian * @native: constant to convert */ #define CPU_TO_LE32(native) ((ENDIAN_CAST leint32_t)(native)) /** * CPU_TO_LE16 - convert a constant uint16_t value to little-endian * @native: constant to convert */ #define CPU_TO_LE16(native) ((ENDIAN_CAST leint16_t)(native)) /** * LE64_TO_CPU - convert a little-endian uint64_t constant * @le_val: little-endian constant to convert */ #define LE64_TO_CPU(le_val) ((ENDIAN_CAST uint64_t)(le_val)) /** * LE32_TO_CPU - convert a little-endian uint32_t constant * @le_val: little-endian constant to convert */ #define LE32_TO_CPU(le_val) ((ENDIAN_CAST uint32_t)(le_val)) /** * LE16_TO_CPU - convert a little-endian uint16_t constant * @le_val: little-endian constant to convert */ #define LE16_TO_CPU(le_val) ((ENDIAN_CAST uint16_t)(le_val)) #else /* ... HAVE_BIG_ENDIAN */ #define CPU_TO_LE64(native) ((ENDIAN_CAST leint64_t)BSWAP_64(native)) #define CPU_TO_LE32(native) ((ENDIAN_CAST leint32_t)BSWAP_32(native)) #define CPU_TO_LE16(native) ((ENDIAN_CAST leint16_t)BSWAP_16(native)) #define LE64_TO_CPU(le_val) BSWAP_64((ENDIAN_CAST uint64_t)le_val) #define LE32_TO_CPU(le_val) BSWAP_32((ENDIAN_CAST uint32_t)le_val) #define LE16_TO_CPU(le_val) BSWAP_16((ENDIAN_CAST uint16_t)le_val) #endif /* HAVE_BIG_ENDIAN */ #if HAVE_BIG_ENDIAN /** * CPU_TO_BE64 - convert a constant uint64_t value to big-endian * @native: constant to convert */ #define CPU_TO_BE64(native) ((ENDIAN_CAST beint64_t)(native)) /** * CPU_TO_BE32 - convert a constant uint32_t value to big-endian * @native: constant to convert */ #define CPU_TO_BE32(native) ((ENDIAN_CAST beint32_t)(native)) /** * CPU_TO_BE16 - convert a constant uint16_t value to big-endian * @native: constant to convert */ #define CPU_TO_BE16(native) ((ENDIAN_CAST beint16_t)(native)) /** * BE64_TO_CPU - convert a big-endian uint64_t constant * @le_val: big-endian constant to convert */ #define BE64_TO_CPU(le_val) ((ENDIAN_CAST uint64_t)(le_val)) /** * BE32_TO_CPU - convert a big-endian uint32_t constant * @le_val: big-endian constant to convert */ #define BE32_TO_CPU(le_val) ((ENDIAN_CAST uint32_t)(le_val)) /** * BE16_TO_CPU - convert a big-endian uint16_t constant * @le_val: big-endian constant to convert */ #define BE16_TO_CPU(le_val) ((ENDIAN_CAST uint16_t)(le_val)) #else /* ... HAVE_LITTLE_ENDIAN */ #define CPU_TO_BE64(native) ((ENDIAN_CAST beint64_t)BSWAP_64(native)) #define CPU_TO_BE32(native) ((ENDIAN_CAST beint32_t)BSWAP_32(native)) #define CPU_TO_BE16(native) ((ENDIAN_CAST beint16_t)BSWAP_16(native)) #define BE64_TO_CPU(le_val) BSWAP_64((ENDIAN_CAST uint64_t)le_val) #define BE32_TO_CPU(le_val) BSWAP_32((ENDIAN_CAST uint32_t)le_val) #define BE16_TO_CPU(le_val) BSWAP_16((ENDIAN_CAST uint16_t)le_val) #endif /* HAVE_LITTE_ENDIAN */ /** * cpu_to_le64 - convert a uint64_t value to little-endian * @native: value to convert */ static inline leint64_t cpu_to_le64(uint64_t native) { return CPU_TO_LE64(native); } /** * cpu_to_le32 - convert a uint32_t value to little-endian * @native: value to convert */ static inline leint32_t cpu_to_le32(uint32_t native) { return CPU_TO_LE32(native); } /** * cpu_to_le16 - convert a uint16_t value to little-endian * @native: value to convert */ static inline leint16_t cpu_to_le16(uint16_t native) { return CPU_TO_LE16(native); } /** * le64_to_cpu - convert a little-endian uint64_t value * @le_val: little-endian value to convert */ static inline uint64_t le64_to_cpu(leint64_t le_val) { return LE64_TO_CPU(le_val); } /** * le32_to_cpu - convert a little-endian uint32_t value * @le_val: little-endian value to convert */ static inline uint32_t le32_to_cpu(leint32_t le_val) { return LE32_TO_CPU(le_val); } /** * le16_to_cpu - convert a little-endian uint16_t value * @le_val: little-endian value to convert */ static inline uint16_t le16_to_cpu(leint16_t le_val) { return LE16_TO_CPU(le_val); } /** * cpu_to_be64 - convert a uint64_t value to big endian. * @native: value to convert */ static inline beint64_t cpu_to_be64(uint64_t native) { return CPU_TO_BE64(native); } /** * cpu_to_be32 - convert a uint32_t value to big endian. * @native: value to convert */ static inline beint32_t cpu_to_be32(uint32_t native) { return CPU_TO_BE32(native); } /** * cpu_to_be16 - convert a uint16_t value to big endian. * @native: value to convert */ static inline beint16_t cpu_to_be16(uint16_t native) { return CPU_TO_BE16(native); } /** * be64_to_cpu - convert a big-endian uint64_t value * @be_val: big-endian value to convert */ static inline uint64_t be64_to_cpu(beint64_t be_val) { return BE64_TO_CPU(be_val); } /** * be32_to_cpu - convert a big-endian uint32_t value * @be_val: big-endian value to convert */ static inline uint32_t be32_to_cpu(beint32_t be_val) { return BE32_TO_CPU(be_val); } /** * be16_to_cpu - convert a big-endian uint16_t value * @be_val: big-endian value to convert */ static inline uint16_t be16_to_cpu(beint16_t be_val) { return BE16_TO_CPU(be_val); } /* Whichever they include first, they get these definitions. */ #ifdef CCAN_SHORT_TYPES_H /** * be64/be32/be16 - 64/32/16 bit big-endian representation. */ typedef beint64_t be64; typedef beint32_t be32; typedef beint16_t be16; /** * le64/le32/le16 - 64/32/16 bit little-endian representation. */ typedef leint64_t le64; typedef leint32_t le32; typedef leint16_t le16; #endif #endif /* CCAN_ENDIAN_H */ skiboot-5.10-rc4/ccan/endian/test/000077500000000000000000000000001324317060200167165ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/endian/test/compile_ok-constant.c000066400000000000000000000002741324317060200230350ustar00rootroot00000000000000#include struct foo { char one[BSWAP_16(0xFF00)]; char two[BSWAP_32(0xFF000000)]; char three[BSWAP_64(0xFF00000000000000ULL)]; }; int main(void) { return 0; } skiboot-5.10-rc4/ccan/endian/test/run.c000066400000000000000000000052711324317060200176730ustar00rootroot00000000000000#include #include #include #include int main(int argc, char *argv[]) { union { uint64_t u64; unsigned char u64_bytes[8]; } u64; union { uint32_t u32; unsigned char u32_bytes[4]; } u32; union { uint16_t u16; unsigned char u16_bytes[2]; } u16; (void)argc; (void)argv; plan_tests(48); /* Straight swap tests. */ u64.u64_bytes[0] = 0x00; u64.u64_bytes[1] = 0x11; u64.u64_bytes[2] = 0x22; u64.u64_bytes[3] = 0x33; u64.u64_bytes[4] = 0x44; u64.u64_bytes[5] = 0x55; u64.u64_bytes[6] = 0x66; u64.u64_bytes[7] = 0x77; u64.u64 = bswap_64(u64.u64); ok1(u64.u64_bytes[7] == 0x00); ok1(u64.u64_bytes[6] == 0x11); ok1(u64.u64_bytes[5] == 0x22); ok1(u64.u64_bytes[4] == 0x33); ok1(u64.u64_bytes[3] == 0x44); ok1(u64.u64_bytes[2] == 0x55); ok1(u64.u64_bytes[1] == 0x66); ok1(u64.u64_bytes[0] == 0x77); u32.u32_bytes[0] = 0x00; u32.u32_bytes[1] = 0x11; u32.u32_bytes[2] = 0x22; u32.u32_bytes[3] = 0x33; u32.u32 = bswap_32(u32.u32); ok1(u32.u32_bytes[3] == 0x00); ok1(u32.u32_bytes[2] == 0x11); ok1(u32.u32_bytes[1] == 0x22); ok1(u32.u32_bytes[0] == 0x33); u16.u16_bytes[0] = 0x00; u16.u16_bytes[1] = 0x11; u16.u16 = bswap_16(u16.u16); ok1(u16.u16_bytes[1] == 0x00); ok1(u16.u16_bytes[0] == 0x11); /* Endian tests. */ u64.u64 = cpu_to_le64(0x0011223344556677ULL); ok1(u64.u64_bytes[0] == 0x77); ok1(u64.u64_bytes[1] == 0x66); ok1(u64.u64_bytes[2] == 0x55); ok1(u64.u64_bytes[3] == 0x44); ok1(u64.u64_bytes[4] == 0x33); ok1(u64.u64_bytes[5] == 0x22); ok1(u64.u64_bytes[6] == 0x11); ok1(u64.u64_bytes[7] == 0x00); ok1(le64_to_cpu(u64.u64) == 0x0011223344556677ULL); u64.u64 = cpu_to_be64(0x0011223344556677ULL); ok1(u64.u64_bytes[7] == 0x77); ok1(u64.u64_bytes[6] == 0x66); ok1(u64.u64_bytes[5] == 0x55); ok1(u64.u64_bytes[4] == 0x44); ok1(u64.u64_bytes[3] == 0x33); ok1(u64.u64_bytes[2] == 0x22); ok1(u64.u64_bytes[1] == 0x11); ok1(u64.u64_bytes[0] == 0x00); ok1(be64_to_cpu(u64.u64) == 0x0011223344556677ULL); u32.u32 = cpu_to_le32(0x00112233); ok1(u32.u32_bytes[0] == 0x33); ok1(u32.u32_bytes[1] == 0x22); ok1(u32.u32_bytes[2] == 0x11); ok1(u32.u32_bytes[3] == 0x00); ok1(le32_to_cpu(u32.u32) == 0x00112233); u32.u32 = cpu_to_be32(0x00112233); ok1(u32.u32_bytes[3] == 0x33); ok1(u32.u32_bytes[2] == 0x22); ok1(u32.u32_bytes[1] == 0x11); ok1(u32.u32_bytes[0] == 0x00); ok1(be32_to_cpu(u32.u32) == 0x00112233); u16.u16 = cpu_to_le16(0x0011); ok1(u16.u16_bytes[0] == 0x11); ok1(u16.u16_bytes[1] == 0x00); ok1(le16_to_cpu(u16.u16) == 0x0011); u16.u16 = cpu_to_be16(0x0011); ok1(u16.u16_bytes[1] == 0x11); ok1(u16.u16_bytes[0] == 0x00); ok1(be16_to_cpu(u16.u16) == 0x0011); exit(exit_status()); } skiboot-5.10-rc4/ccan/list/000077500000000000000000000000001324317060200154545ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/list/LICENSE000066400000000000000000000017771324317060200164750ustar00rootroot00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. skiboot-5.10-rc4/ccan/list/_info000066400000000000000000000027011324317060200164710ustar00rootroot00000000000000#include #include #include "config.h" /** * list - double linked list routines * * The list header contains routines for manipulating double linked lists. * It defines two types: struct list_head used for anchoring lists, and * struct list_node which is usually embedded in the structure which is placed * in the list. * * Example: * #include * #include * #include * #include * * struct parent { * const char *name; * struct list_head children; * unsigned int num_children; * }; * * struct child { * const char *name; * struct list_node list; * }; * * int main(int argc, char *argv[]) * { * struct parent p; * struct child *c; * unsigned int i; * * if (argc < 2) * errx(1, "Usage: %s parent children...", argv[0]); * * p.name = argv[1]; * list_head_init(&p.children); * p.num_children = 0; * for (i = 2; i < argc; i++) { * c = malloc(sizeof(*c)); * c->name = argv[i]; * list_add(&p.children, &c->list); * p.num_children++; * } * * printf("%s has %u children:", p.name, p.num_children); * list_for_each(&p.children, c, list) * printf("%s ", c->name); * printf("\n"); * return 0; * } * * License: BSD-MIT * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/container_of\n"); return 0; } return 1; } skiboot-5.10-rc4/ccan/list/list.c000066400000000000000000000017501324317060200165760ustar00rootroot00000000000000/* Licensed under BSD-MIT - see LICENSE file for details */ #include #include #include "list.h" static void *corrupt(const char *abortstr, const struct list_node *head, const struct list_node *node, unsigned int count) { if (abortstr) { fprintf(stderr, "%s: prev corrupt in node %p (%u) of %p\n", abortstr, node, count, head); abort(); } return NULL; } struct list_node *list_check_node(const struct list_node *node, const char *abortstr) { const struct list_node *p, *n; int count = 0; for (p = node, n = node->next; n != node; p = n, n = n->next) { count++; if (n->prev != p) return corrupt(abortstr, node, n, count); } /* Check prev on head node. */ if (node->prev != p) return corrupt(abortstr, node, node, 0); return (struct list_node *)node; } struct list_head *list_check(const struct list_head *h, const char *abortstr) { if (!list_check_node(&h->n, abortstr)) return NULL; return (struct list_head *)h; } skiboot-5.10-rc4/ccan/list/list.h000066400000000000000000000355551324317060200166150ustar00rootroot00000000000000/* Licensed under BSD-MIT - see LICENSE file for details */ #ifndef CCAN_LIST_H #define CCAN_LIST_H #include #include #include #include /** * struct list_node - an entry in a doubly-linked list * @next: next entry (self if empty) * @prev: previous entry (self if empty) * * This is used as an entry in a linked list. * Example: * struct child { * const char *name; * // Linked list of all us children. * struct list_node list; * }; */ struct list_node { struct list_node *next, *prev; }; /** * struct list_head - the head of a doubly-linked list * @h: the list_head (containing next and prev pointers) * * This is used as the head of a linked list. * Example: * struct parent { * const char *name; * struct list_head children; * unsigned int num_children; * }; */ struct list_head { struct list_node n; }; /** * list_check - check head of a list for consistency * @h: the list_head * @abortstr: the location to print on aborting, or NULL. * * Because list_nodes have redundant information, consistency checking between * the back and forward links can be done. This is useful as a debugging check. * If @abortstr is non-NULL, that will be printed in a diagnostic if the list * is inconsistent, and the function will abort. * * Returns the list head if the list is consistent, NULL if not (it * can never return NULL if @abortstr is set). * * See also: list_check_node() * * Example: * static void dump_parent(struct parent *p) * { * struct child *c; * * printf("%s (%u children):\n", p->name, p->num_children); * list_check(&p->children, "bad child list"); * list_for_each(&p->children, c, list) * printf(" -> %s\n", c->name); * } */ struct list_head *list_check(const struct list_head *h, const char *abortstr); /** * list_check_node - check node of a list for consistency * @n: the list_node * @abortstr: the location to print on aborting, or NULL. * * Check consistency of the list node is in (it must be in one). * * See also: list_check() * * Example: * static void dump_child(const struct child *c) * { * list_check_node(&c->list, "bad child list"); * printf("%s\n", c->name); * } */ struct list_node *list_check_node(const struct list_node *n, const char *abortstr); #ifdef CCAN_LIST_DEBUG #define list_debug(h) list_check((h), __func__) #define list_debug_node(n) list_check_node((n), __func__) #else #define list_debug(h) (h) #define list_debug_node(n) (n) #endif /** * LIST_HEAD_INIT - initializer for an empty list_head * @name: the name of the list. * * Explicit initializer for an empty list. * * See also: * LIST_HEAD, list_head_init() * * Example: * static struct list_head my_list = LIST_HEAD_INIT(my_list); */ #define LIST_HEAD_INIT(name) { { &name.n, &name.n } } /** * LIST_HEAD - define and initialize an empty list_head * @name: the name of the list. * * The LIST_HEAD macro defines a list_head and initializes it to an empty * list. It can be prepended by "static" to define a static list_head. * * See also: * LIST_HEAD_INIT, list_head_init() * * Example: * static LIST_HEAD(my_global_list); */ #define LIST_HEAD(name) \ struct list_head name = LIST_HEAD_INIT(name) /** * list_head_init - initialize a list_head * @h: the list_head to set to the empty list * * Example: * ... * struct parent *parent = malloc(sizeof(*parent)); * * list_head_init(&parent->children); * parent->num_children = 0; */ static inline void list_head_init(struct list_head *h) { h->n.next = h->n.prev = &h->n; } /** * list_add - add an entry at the start of a linked list. * @h: the list_head to add the node to * @n: the list_node to add to the list. * * The list_node does not need to be initialized; it will be overwritten. * Example: * struct child *child = malloc(sizeof(*child)); * * child->name = "marvin"; * list_add(&parent->children, &child->list); * parent->num_children++; */ static inline void list_add(struct list_head *h, struct list_node *n) { n->next = h->n.next; n->prev = &h->n; h->n.next->prev = n; h->n.next = n; (void)list_debug(h); } /** * list_add_before - add an entry before another entry. * @h: the list_head to add the node to (we use it for debug purposes, can be NULL) * @n: the list_node to add to the list. * @p: the list_node of the other entry * * The list_node does not need to be initialized; it will be overwritten. */ static inline void list_add_before(struct list_head *h, struct list_node *n, struct list_node *p) { n->next = p; n->prev = p->prev; p->prev = n; n->prev->next = n; if (h) (void)list_debug(h); } /** * list_add_tail - add an entry at the end of a linked list. * @h: the list_head to add the node to * @n: the list_node to add to the list. * * The list_node does not need to be initialized; it will be overwritten. * Example: * list_add_tail(&parent->children, &child->list); * parent->num_children++; */ static inline void list_add_tail(struct list_head *h, struct list_node *n) { n->next = &h->n; n->prev = h->n.prev; h->n.prev->next = n; h->n.prev = n; (void)list_debug(h); } /** * list_empty - is a list empty? * @h: the list_head * * If the list is empty, returns true. * * Example: * assert(list_empty(&parent->children) == (parent->num_children == 0)); */ static inline bool list_empty(const struct list_head *h) { (void)list_debug(h); return h->n.next == &h->n; } /** * list_empty_nocheck - is a list empty? * @h: the list_head * * If the list is empty, returns true. This doesn't perform any * debug check for list consistency, so it can be called without * locks, racing with the list being modified. This is ok for * checks where an incorrect result is not an issue (optimized * bail out path for example). */ static inline bool list_empty_nocheck(const struct list_head *h) { return h->n.next == &h->n; } /** * list_del - delete an entry from an (unknown) linked list. * @n: the list_node to delete from the list. * * Note that this leaves @n in an undefined state; it can be added to * another list, but not deleted again. * * See also: * list_del_from() * * Example: * list_del(&child->list); * parent->num_children--; */ static inline void list_del(struct list_node *n) { (void)list_debug_node(n); n->next->prev = n->prev; n->prev->next = n->next; #ifdef CCAN_LIST_DEBUG /* Catch use-after-del. */ n->next = n->prev = NULL; #endif } /** * list_del_from - delete an entry from a known linked list. * @h: the list_head the node is in. * @n: the list_node to delete from the list. * * This explicitly indicates which list a node is expected to be in, * which is better documentation and can catch more bugs. * * See also: list_del() * * Example: * list_del_from(&parent->children, &child->list); * parent->num_children--; */ static inline void list_del_from(struct list_head *h, struct list_node *n) { #ifdef CCAN_LIST_DEBUG { /* Thorough check: make sure it was in list! */ struct list_node *i; for (i = h->n.next; i != n; i = i->next) assert(i != &h->n); } #endif /* CCAN_LIST_DEBUG */ /* Quick test that catches a surprising number of bugs. */ assert(!list_empty(h)); list_del(n); } /** * list_entry - convert a list_node back into the structure containing it. * @n: the list_node * @type: the type of the entry * @member: the list_node member of the type * * Example: * // First list entry is children.next; convert back to child. * child = list_entry(parent->children.n.next, struct child, list); * * See Also: * list_top(), list_for_each() */ #define list_entry(n, type, member) container_of(n, type, member) /** * list_top - get the first entry in a list * @h: the list_head * @type: the type of the entry * @member: the list_node member of the type * * If the list is empty, returns NULL. * * Example: * struct child *first; * first = list_top(&parent->children, struct child, list); * if (!first) * printf("Empty list!\n"); */ #define list_top(h, type, member) \ ((type *)list_top_((h), list_off_(type, member))) static inline const void *list_top_(const struct list_head *h, size_t off) { if (list_empty(h)) return NULL; return (const char *)h->n.next - off; } /** * list_pop - get the first entry in a list and dequeue it * @h: the list_head * @type: the type of the entry * @member: the list_node member of the type */ #define list_pop(h, type, member) \ ((type *)list_pop_((h), list_off_(type, member))) static inline const void *list_pop_(struct list_head *h, size_t off) { struct list_node *n; if (list_empty(h)) return NULL; n = h->n.next; list_del(n); return (const char *)n - off; } /** * list_tail - get the last entry in a list * @h: the list_head * @type: the type of the entry * @member: the list_node member of the type * * If the list is empty, returns NULL. * * Example: * struct child *last; * last = list_tail(&parent->children, struct child, list); * if (!last) * printf("Empty list!\n"); */ #define list_tail(h, type, member) \ ((type *)list_tail_((h), list_off_(type, member))) static inline const void *list_tail_(const struct list_head *h, size_t off) { if (list_empty(h)) return NULL; return (const char *)h->n.prev - off; } /** * list_for_each - iterate through a list. * @h: the list_head (warning: evaluated multiple times!) * @i: the structure containing the list_node * @member: the list_node member of the structure * * This is a convenient wrapper to iterate @i over the entire list. It's * a for loop, so you can break and continue as normal. * * Example: * list_for_each(&parent->children, child, list) * printf("Name: %s\n", child->name); */ #define list_for_each(h, i, member) \ list_for_each_off(h, i, list_off_var_(i, member)) /** * list_for_each_rev - iterate through a list backwards. * @h: the list_head * @i: the structure containing the list_node * @member: the list_node member of the structure * * This is a convenient wrapper to iterate @i over the entire list. It's * a for loop, so you can break and continue as normal. * * Example: * list_for_each_rev(&parent->children, child, list) * printf("Name: %s\n", child->name); */ #define list_for_each_rev(h, i, member) \ for (i = container_of_var(list_debug(h)->n.prev, i, member); \ &i->member != &(h)->n; \ i = container_of_var(i->member.prev, i, member)) /** * list_for_each_safe - iterate through a list, maybe during deletion * @h: the list_head * @i: the structure containing the list_node * @nxt: the structure containing the list_node * @member: the list_node member of the structure * * This is a convenient wrapper to iterate @i over the entire list. It's * a for loop, so you can break and continue as normal. The extra variable * @nxt is used to hold the next element, so you can delete @i from the list. * * Example: * struct child *next; * list_for_each_safe(&parent->children, child, next, list) { * list_del(&child->list); * parent->num_children--; * } */ #define list_for_each_safe(h, i, nxt, member) \ list_for_each_safe_off(h, i, nxt, list_off_var_(i, member)) /** * list_for_each_off - iterate through a list of memory regions. * @h: the list_head * @i: the pointer to a memory region which contains list node data. * @off: offset(relative to @i) at which list node data resides. * * This is a low-level wrapper to iterate @i over the entire list, used to * implement all oher, more high-level, for-each constructs. It's a for loop, * so you can break and continue as normal. * * WARNING! Being the low-level macro that it is, this wrapper doesn't know * nor care about the type of @i. The only assumtion made is that @i points * to a chunk of memory that at some @offset, relative to @i, contains a * properly filled `struct node_list' which in turn contains pointers to * memory chunks and it's turtles all the way down. With all that in mind * remember that given the wrong pointer/offset couple this macro will * happily churn all you memory until SEGFAULT stops it, in other words * caveat emptor. * * It is worth mentioning that one of legitimate use-cases for that wrapper * is operation on opaque types with known offset for `struct list_node' * member(preferably 0), because it allows you not to disclose the type of * @i. * * Example: * list_for_each_off(&parent->children, child, * offsetof(struct child, list)) * printf("Name: %s\n", child->name); */ #define list_for_each_off(h, i, off) \ for (i = list_node_to_off_(list_debug(h)->n.next, (off)); \ list_node_from_off_((void *)i, (off)) != &(h)->n; \ i = list_node_to_off_(list_node_from_off_((void *)i, (off))->next, \ (off))) /** * list_for_each_safe_off - iterate through a list of memory regions, maybe * during deletion * @h: the list_head * @i: the pointer to a memory region which contains list node data. * @nxt: the structure containing the list_node * @off: offset(relative to @i) at which list node data resides. * * For details see `list_for_each_off' and `list_for_each_safe' * descriptions. * * Example: * list_for_each_safe_off(&parent->children, child, * next, offsetof(struct child, list)) * printf("Name: %s\n", child->name); */ #define list_for_each_safe_off(h, i, nxt, off) \ for (i = list_node_to_off_(list_debug(h)->n.next, (off)), \ nxt = list_node_to_off_(list_node_from_off_(i, (off))->next, \ (off)); \ list_node_from_off_(i, (off)) != &(h)->n; \ i = nxt, \ nxt = list_node_to_off_(list_node_from_off_(i, (off))->next, \ (off))) /* Other -off variants. */ #define list_entry_off(n, type, off) \ ((type *)list_node_from_off_((n), (off))) #define list_head_off(h, type, off) \ ((type *)list_head_off((h), (off))) #define list_tail_off(h, type, off) \ ((type *)list_tail_((h), (off))) #define list_add_off(h, n, off) \ list_add((h), list_node_from_off_((n), (off))) #define list_del_off(n, off) \ list_del(list_node_from_off_((n), (off))) #define list_del_from_off(h, n, off) \ list_del_from(h, list_node_from_off_((n), (off))) /* Offset helper functions so we only single-evaluate. */ static inline void *list_node_to_off_(struct list_node *node, size_t off) { return (void *)((char *)node - off); } static inline struct list_node *list_node_from_off_(void *ptr, size_t off) { return (struct list_node *)((char *)ptr + off); } /* Get the offset of the member, but make sure it's a list_node. */ #define list_off_(type, member) \ (container_off(type, member) + \ check_type(((type *)0)->member, struct list_node)) #define list_off_var_(var, member) \ (container_off_var(var, member) + \ check_type(var->member, struct list_node)) #endif /* CCAN_LIST_H */ skiboot-5.10-rc4/ccan/list/test/000077500000000000000000000000001324317060200164335ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/list/test/compile_ok-constant.c000066400000000000000000000015641324317060200225550ustar00rootroot00000000000000#include #include #include #include #include struct child { const char *name; struct list_node list; }; static bool children(const struct list_head *list) { return !list_empty(list); } static const struct child *first_child(const struct list_head *list) { return list_top(list, struct child, list); } static const struct child *last_child(const struct list_head *list) { return list_tail(list, struct child, list); } static void check_children(const struct list_head *list) { list_check(list, "bad child list"); } static void print_children(const struct list_head *list) { const struct child *c; list_for_each(list, c, list) printf("%s\n", c->name); } int main(void) { LIST_HEAD(h); children(&h); first_child(&h); last_child(&h); check_children(&h); print_children(&h); return 0; } skiboot-5.10-rc4/ccan/list/test/helper.c000066400000000000000000000024261324317060200200620ustar00rootroot00000000000000#include #include #include #include #include "helper.h" #define ANSWER_TO_THE_ULTIMATE_QUESTION_OF_LIFE_THE_UNIVERSE_AND_EVERYTHING \ (42) struct opaque { struct list_node list; size_t secret_offset; char secret_drawer[42]; }; static bool not_randomized = true; struct opaque *create_opaque_blob(void) { struct opaque *blob = calloc(1, sizeof(struct opaque)); if (not_randomized) { srandom((int)time(NULL)); not_randomized = false; } blob->secret_offset = random() % (sizeof(blob->secret_drawer)); blob->secret_drawer[blob->secret_offset] = ANSWER_TO_THE_ULTIMATE_QUESTION_OF_LIFE_THE_UNIVERSE_AND_EVERYTHING; return blob; } bool if_blobs_know_the_secret(struct opaque *blob) { bool answer = true; int i; for (i = 0; i < sizeof(blob->secret_drawer) / sizeof(blob->secret_drawer[0]); i++) if (i != blob->secret_offset) answer = answer && (blob->secret_drawer[i] == 0); else answer = answer && (blob->secret_drawer[blob->secret_offset] == ANSWER_TO_THE_ULTIMATE_QUESTION_OF_LIFE_THE_UNIVERSE_AND_EVERYTHING); return answer; } void destroy_opaque_blob(struct opaque *blob) { free(blob); } skiboot-5.10-rc4/ccan/list/test/helper.h000066400000000000000000000003711324317060200200640ustar00rootroot00000000000000/* These are in a separate C file so we can test undefined structures. */ struct opaque; typedef struct opaque opaque_t; opaque_t *create_opaque_blob(void); bool if_blobs_know_the_secret(opaque_t *blob); void destroy_opaque_blob(opaque_t *blob); skiboot-5.10-rc4/ccan/list/test/run-check-corrupt.c000066400000000000000000000041151324317060200221530ustar00rootroot00000000000000#include #include #include #include #include #include /* We don't actually want it to exit... */ static jmp_buf aborted; #define abort() longjmp(aborted, 1) #define fprintf my_fprintf static char printf_buffer[1000]; static int my_fprintf(FILE *stream, const char *format, ...) { va_list ap; int ret; (void)stream; va_start(ap, format); ret = vsnprintf(printf_buffer, sizeof(printf_buffer), format, ap); va_end(ap); return ret; } #include #include #include int main(int argc, char *argv[]) { struct list_head list; struct list_node n1; char expect[100]; (void)argc; (void)argv; plan_tests(9); /* Empty list. */ list.n.next = &list.n; list.n.prev = &list.n; ok1(list_check(&list, NULL) == &list); /* Bad back ptr */ list.n.prev = &n1; /* Non-aborting version. */ ok1(list_check(&list, NULL) == NULL); /* Aborting version. */ sprintf(expect, "test message: prev corrupt in node %p (0) of %p\n", &list, &list); if (setjmp(aborted) == 0) { list_check(&list, "test message"); fail("list_check on empty with bad back ptr didn't fail!"); } else { ok1(strcmp(printf_buffer, expect) == 0); } /* n1 in list. */ list.n.next = &n1; list.n.prev = &n1; n1.prev = &list.n; n1.next = &list.n; ok1(list_check(&list, NULL) == &list); ok1(list_check_node(&n1, NULL) == &n1); /* Bad back ptr */ n1.prev = &n1; ok1(list_check(&list, NULL) == NULL); ok1(list_check_node(&n1, NULL) == NULL); /* Aborting version. */ sprintf(expect, "test message: prev corrupt in node %p (1) of %p\n", &n1, &list); if (setjmp(aborted) == 0) { list_check(&list, "test message"); fail("list_check on n1 bad back ptr didn't fail!"); } else { ok1(strcmp(printf_buffer, expect) == 0); } sprintf(expect, "test message: prev corrupt in node %p (0) of %p\n", &n1, &n1); if (setjmp(aborted) == 0) { list_check_node(&n1, "test message"); fail("list_check_node on n1 bad back ptr didn't fail!"); } else { ok1(strcmp(printf_buffer, expect) == 0); } return exit_status(); } skiboot-5.10-rc4/ccan/list/test/run-list_del_from-assert.c000066400000000000000000000014361324317060200235260ustar00rootroot00000000000000#define CCAN_LIST_DEBUG 1 #include #include #include #include #include #include #include int main(int argc, char *argv[]) { struct list_head list1, list2; struct list_node n1, n2, n3; pid_t child; int status; (void)argc; (void)argv; plan_tests(1); list_head_init(&list1); list_head_init(&list2); list_add(&list1, &n1); list_add(&list2, &n2); list_add_tail(&list2, &n3); child = fork(); if (child) { wait(&status); } else { close(2); /* Close stderr so we don't print confusing assert */ /* This should abort. */ list_del_from(&list1, &n3); exit(0); } ok1(WIFSIGNALED(status) && WTERMSIG(status) == SIGABRT); list_del_from(&list2, &n3); return exit_status(); } skiboot-5.10-rc4/ccan/list/test/run-single-eval.c000066400000000000000000000112041324317060200216050ustar00rootroot00000000000000/* Make sure macros only evaluate their args once. */ #include #include #include struct parent { const char *name; struct list_head children; unsigned int num_children; int eval_count; }; struct child { const char *name; struct list_node list; }; static LIST_HEAD(static_list); #define ref(obj, counter) ((counter)++, (obj)) int main(int argc, char *argv[]) { struct parent parent; struct child c1, c2, c3, *c, *n; unsigned int i; unsigned int static_count = 0, parent_count = 0, list_count = 0, node_count = 0; struct list_head list = LIST_HEAD_INIT(list); (void)argc; (void)argv; plan_tests(74); /* Test LIST_HEAD, LIST_HEAD_INIT, list_empty and check_list */ ok1(list_empty(ref(&static_list, static_count))); ok1(static_count == 1); ok1(list_check(ref(&static_list, static_count), NULL)); ok1(static_count == 2); ok1(list_empty(ref(&list, list_count))); ok1(list_count == 1); ok1(list_check(ref(&list, list_count), NULL)); ok1(list_count == 2); parent.num_children = 0; list_head_init(ref(&parent.children, parent_count)); ok1(parent_count == 1); /* Test list_head_init */ ok1(list_empty(ref(&parent.children, parent_count))); ok1(parent_count == 2); ok1(list_check(ref(&parent.children, parent_count), NULL)); ok1(parent_count == 3); c2.name = "c2"; list_add(ref(&parent.children, parent_count), &c2.list); ok1(parent_count == 4); /* Test list_add and !list_empty. */ ok1(!list_empty(ref(&parent.children, parent_count))); ok1(parent_count == 5); ok1(c2.list.next == &parent.children.n); ok1(c2.list.prev == &parent.children.n); ok1(parent.children.n.next == &c2.list); ok1(parent.children.n.prev == &c2.list); /* Test list_check */ ok1(list_check(ref(&parent.children, parent_count), NULL)); ok1(parent_count == 6); c1.name = "c1"; list_add(ref(&parent.children, parent_count), &c1.list); ok1(parent_count == 7); /* Test list_add and !list_empty. */ ok1(!list_empty(ref(&parent.children, parent_count))); ok1(parent_count == 8); ok1(c2.list.next == &parent.children.n); ok1(c2.list.prev == &c1.list); ok1(parent.children.n.next == &c1.list); ok1(parent.children.n.prev == &c2.list); ok1(c1.list.next == &c2.list); ok1(c1.list.prev == &parent.children.n); /* Test list_check */ ok1(list_check(ref(&parent.children, parent_count), NULL)); ok1(parent_count == 9); c3.name = "c3"; list_add_tail(ref(&parent.children, parent_count), &c3.list); ok1(parent_count == 10); /* Test list_add_tail and !list_empty. */ ok1(!list_empty(ref(&parent.children, parent_count))); ok1(parent_count == 11); ok1(parent.children.n.next == &c1.list); ok1(parent.children.n.prev == &c3.list); ok1(c1.list.next == &c2.list); ok1(c1.list.prev == &parent.children.n); ok1(c2.list.next == &c3.list); ok1(c2.list.prev == &c1.list); ok1(c3.list.next == &parent.children.n); ok1(c3.list.prev == &c2.list); /* Test list_check */ ok1(list_check(ref(&parent.children, parent_count), NULL)); ok1(parent_count == 12); /* Test list_check_node */ ok1(list_check_node(&c1.list, NULL)); ok1(list_check_node(&c2.list, NULL)); ok1(list_check_node(&c3.list, NULL)); /* Test list_top */ ok1(list_top(ref(&parent.children, parent_count), struct child, list) == &c1); ok1(parent_count == 13); /* Test list_tail */ ok1(list_tail(ref(&parent.children, parent_count), struct child, list) == &c3); ok1(parent_count == 14); /* Test list_for_each. */ i = 0; list_for_each(&parent.children, c, list) { switch (i++) { case 0: ok1(c == &c1); break; case 1: ok1(c == &c2); break; case 2: ok1(c == &c3); break; } if (i > 2) break; } ok1(i == 3); /* Test list_for_each_safe, list_del and list_del_from. */ i = 0; list_for_each_safe(&parent.children, c, n, list) { switch (i++) { case 0: ok1(c == &c1); list_del(ref(&c->list, node_count)); ok1(node_count == 1); break; case 1: ok1(c == &c2); list_del_from(ref(&parent.children, parent_count), ref(&c->list, node_count)); ok1(node_count == 2); break; case 2: ok1(c == &c3); list_del_from(ref(&parent.children, parent_count), ref(&c->list, node_count)); ok1(node_count == 3); break; } ok1(list_check(ref(&parent.children, parent_count), NULL)); if (i > 2) break; } ok1(i == 3); ok1(parent_count == 19); ok1(list_empty(ref(&parent.children, parent_count))); ok1(parent_count == 20); /* Test list_top/list_tail on empty list. */ ok1(list_top(ref(&parent.children, parent_count), struct child, list) == NULL); ok1(parent_count == 21); ok1(list_tail(ref(&parent.children, parent_count), struct child, list) == NULL); ok1(parent_count == 22); return exit_status(); } skiboot-5.10-rc4/ccan/list/test/run-with-debug.c000066400000000000000000000001641324317060200214410ustar00rootroot00000000000000/* Just like run.c, but with all debug checks enabled. */ #define CCAN_LIST_DEBUG 1 #include skiboot-5.10-rc4/ccan/list/test/run.c000066400000000000000000000112431324317060200174040ustar00rootroot00000000000000#include #include #include #include #include "helper.h" struct parent { const char *name; struct list_head children; unsigned int num_children; }; struct child { const char *name; struct list_node list; }; static LIST_HEAD(static_list); int main(int argc, char *argv[]) { struct parent parent; struct child c1, c2, c3, *c, *n; unsigned int i; struct list_head list = LIST_HEAD_INIT(list); opaque_t *q, *nq; struct list_head opaque_list = LIST_HEAD_INIT(opaque_list); (void)argc; (void)argv; plan_tests(65); /* Test LIST_HEAD, LIST_HEAD_INIT, list_empty and check_list */ ok1(list_empty(&static_list)); ok1(list_check(&static_list, NULL)); ok1(list_empty(&list)); ok1(list_check(&list, NULL)); parent.num_children = 0; list_head_init(&parent.children); /* Test list_head_init */ ok1(list_empty(&parent.children)); ok1(list_check(&parent.children, NULL)); c2.name = "c2"; list_add(&parent.children, &c2.list); /* Test list_add and !list_empty. */ ok1(!list_empty(&parent.children)); ok1(c2.list.next == &parent.children.n); ok1(c2.list.prev == &parent.children.n); ok1(parent.children.n.next == &c2.list); ok1(parent.children.n.prev == &c2.list); /* Test list_check */ ok1(list_check(&parent.children, NULL)); c1.name = "c1"; list_add(&parent.children, &c1.list); /* Test list_add and !list_empty. */ ok1(!list_empty(&parent.children)); ok1(c2.list.next == &parent.children.n); ok1(c2.list.prev == &c1.list); ok1(parent.children.n.next == &c1.list); ok1(parent.children.n.prev == &c2.list); ok1(c1.list.next == &c2.list); ok1(c1.list.prev == &parent.children.n); /* Test list_check */ ok1(list_check(&parent.children, NULL)); c3.name = "c3"; list_add_tail(&parent.children, &c3.list); /* Test list_add_tail and !list_empty. */ ok1(!list_empty(&parent.children)); ok1(parent.children.n.next == &c1.list); ok1(parent.children.n.prev == &c3.list); ok1(c1.list.next == &c2.list); ok1(c1.list.prev == &parent.children.n); ok1(c2.list.next == &c3.list); ok1(c2.list.prev == &c1.list); ok1(c3.list.next == &parent.children.n); ok1(c3.list.prev == &c2.list); /* Test list_check */ ok1(list_check(&parent.children, NULL)); /* Test list_check_node */ ok1(list_check_node(&c1.list, NULL)); ok1(list_check_node(&c2.list, NULL)); ok1(list_check_node(&c3.list, NULL)); /* Test list_top */ ok1(list_top(&parent.children, struct child, list) == &c1); /* Test list_tail */ ok1(list_tail(&parent.children, struct child, list) == &c3); /* Test list_for_each. */ i = 0; list_for_each(&parent.children, c, list) { switch (i++) { case 0: ok1(c == &c1); break; case 1: ok1(c == &c2); break; case 2: ok1(c == &c3); break; } if (i > 2) break; } ok1(i == 3); /* Test list_for_each_rev. */ i = 0; list_for_each_rev(&parent.children, c, list) { switch (i++) { case 0: ok1(c == &c3); break; case 1: ok1(c == &c2); break; case 2: ok1(c == &c1); break; } if (i > 2) break; } ok1(i == 3); /* Test list_for_each_safe, list_del and list_del_from. */ i = 0; list_for_each_safe(&parent.children, c, n, list) { switch (i++) { case 0: ok1(c == &c1); list_del(&c->list); break; case 1: ok1(c == &c2); list_del_from(&parent.children, &c->list); break; case 2: ok1(c == &c3); list_del_from(&parent.children, &c->list); break; } ok1(list_check(&parent.children, NULL)); if (i > 2) break; } ok1(i == 3); ok1(list_empty(&parent.children)); /* Test list_for_each_off. */ list_add_tail(&opaque_list, (struct list_node *)create_opaque_blob()); list_add_tail(&opaque_list, (struct list_node *)create_opaque_blob()); list_add_tail(&opaque_list, (struct list_node *)create_opaque_blob()); i = 0; list_for_each_off(&opaque_list, q, 0) { i++; ok1(if_blobs_know_the_secret(q)); } ok1(i == 3); /* Test list_for_each_safe_off, list_del_off and list_del_from_off. */ i = 0; list_for_each_safe_off(&opaque_list, q, nq, 0) { switch (i++) { case 0: ok1(if_blobs_know_the_secret(q)); list_del_off(q, 0); destroy_opaque_blob(q); break; case 1: ok1(if_blobs_know_the_secret(q)); list_del_from_off(&opaque_list, q, 0); destroy_opaque_blob(q); break; case 2: ok1(c == &c3); list_del_from_off(&opaque_list, q, 0); destroy_opaque_blob(q); break; } ok1(list_check(&opaque_list, NULL)); if (i > 2) break; } ok1(i == 3); ok1(list_empty(&opaque_list)); /* Test list_top/list_tail on empty list. */ ok1(list_top(&parent.children, struct child, list) == NULL); ok1(list_tail(&parent.children, struct child, list) == NULL); return exit_status(); } skiboot-5.10-rc4/ccan/short_types/000077500000000000000000000000001324317060200170645ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/short_types/LICENSE000066400000000000000000000143571324317060200201030ustar00rootroot00000000000000Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; moral rights retained by the original author(s) and/or performer(s); publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; rights protecting the extraction, dissemination, use and reuse of data in a Work; database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. skiboot-5.10-rc4/ccan/short_types/_info000066400000000000000000000051631324317060200201060ustar00rootroot00000000000000#include "config.h" #include #include /** * short_types - shorter names for standard integer types * * "C is a Spartan language, and so should your naming be." * -- Linus Torvalds * * The short_types header provides for convenient abbreviations for the * posixly-damned uint32_t types. If ccan/endian/endian.h is included, * it also provides be32/le32 for explicitly annotating types of specific * endian. * * Include this header, if only to stop people using these identifiers * for other things! * * Example: * #include * #include * #include * #include * * // Print nonsensical numerical comparison of POSIX vs. short_types. * #define stringify_1(x) #x * #define stringify(x) stringify_1(x) * * static void evaluate(size_t size, const char *posix, const char *sht, * unsigned int *posix_total, unsigned int *sht_total, * unsigned int *size_total) * { * printf("\t%ssigned %s: POSIX %zu%%, short %zu%%\n", * sht[0] == 'u' ? "un" : "", * sht+1, * strlen(posix)*100 / size, * strlen(sht)*100 / size); * *posix_total += strlen(posix); * *sht_total += strlen(sht); * *size_total += size; * } * * #define EVALUATE(psx, short, pt, st, t) \ * evaluate(sizeof(psx), stringify(psx), stringify(sht), pt, st, t) * * int main(void) * { * unsigned int posix_total = 0, sht_total = 0, size_total = 0; * * printf("Comparing size of type vs size of name:\n"); * * EVALUATE(uint8_t, u8, &posix_total, &sht_total, &size_total); * EVALUATE(int8_t, s8, &posix_total, &sht_total, &size_total); * EVALUATE(uint16_t, u16, &posix_total, &sht_total, &size_total); * EVALUATE(int16_t, s16, &posix_total, &sht_total, &size_total); * EVALUATE(uint32_t, u32, &posix_total, &sht_total, &size_total); * EVALUATE(int32_t, s32, &posix_total, &sht_total, &size_total); * EVALUATE(uint64_t, u64, &posix_total, &sht_total, &size_total); * EVALUATE(int64_t, s64, &posix_total, &sht_total, &size_total); * * printf("Conclusion:\n" * "\tPOSIX is %u%% LESS efficient than binary.\n" * "\tshort_types.h is %u%% MORE efficient than binary.\n", * (posix_total - size_total) * 100 / size_total, * (size_total - sht_total) * 100 / size_total); * return 0; * } * * License: CC0 (Public domain) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { return 0; } if (strcmp(argv[1], "testdepends") == 0) { printf("ccan/endian\n"); return 0; } return 1; } skiboot-5.10-rc4/ccan/short_types/short_types.h000066400000000000000000000014311324317060200216170ustar00rootroot00000000000000/* CC0 (Public domain) - see LICENSE file for details */ #ifndef CCAN_SHORT_TYPES_H #define CCAN_SHORT_TYPES_H #include /** * u64/s64/u32/s32/u16/s16/u8/s8 - short names for explicitly-sized types. */ typedef uint64_t u64; typedef int64_t s64; typedef uint32_t u32; typedef int32_t s32; typedef uint16_t u16; typedef int16_t s16; typedef uint8_t u8; typedef int8_t s8; /* Whichever they include first, they get these definitions. */ #ifdef CCAN_ENDIAN_H /** * be64/be32/be16 - 64/32/16 bit big-endian representation. */ typedef beint64_t be64; typedef beint32_t be32; typedef beint16_t be16; /** * le64/le32/le16 - 64/32/16 bit little-endian representation. */ typedef leint64_t le64; typedef leint32_t le32; typedef leint16_t le16; #endif #endif /* CCAN_SHORT_TYPES_H */ skiboot-5.10-rc4/ccan/short_types/test/000077500000000000000000000000001324317060200200435ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/short_types/test/run-endian.c000066400000000000000000000005641324317060200222540ustar00rootroot00000000000000#include #include #include #include #include int __attribute__((const)) main(void) { plan_tests(6); ok1(sizeof(be64) == 8); ok1(sizeof(be32) == 4); ok1(sizeof(be16) == 2); ok1(sizeof(le64) == 8); ok1(sizeof(le32) == 4); ok1(sizeof(le16) == 2); return exit_status(); } skiboot-5.10-rc4/ccan/short_types/test/run.c000066400000000000000000000010541324317060200210130ustar00rootroot00000000000000#include #include #include #include int __attribute__((const)) main(void) { plan_tests(16); ok1(sizeof(u64) == 8); ok1(sizeof(s64) == 8); ok1(sizeof(u32) == 4); ok1(sizeof(s32) == 4); ok1(sizeof(u16) == 2); ok1(sizeof(s16) == 2); ok1(sizeof(u8) == 1); ok1(sizeof(s8) == 1); /* Signedness tests. */ ok1((u64)-1 > 0); ok1((u32)-1 > 0); ok1((u16)-1 > 0); ok1((u8)-1 > 0); ok1((s64)-1 < 0); ok1((s32)-1 < 0); ok1((s16)-1 < 0); ok1((s8)-1 < 0); return exit_status(); } skiboot-5.10-rc4/ccan/str/000077500000000000000000000000001324317060200153115ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/str/LICENSE000066400000000000000000000143571324317060200163300ustar00rootroot00000000000000Statement of Purpose The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. 1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; moral rights retained by the original author(s) and/or performer(s); publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; rights protecting the extraction, dissemination, use and reuse of data in a Work; database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. 2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. 3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. 4. Limitations and Disclaimers. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. skiboot-5.10-rc4/ccan/str/_info000066400000000000000000000025101324317060200163240ustar00rootroot00000000000000#include #include #include "config.h" /** * str - string helper routines * * This is a grab bag of functions for string operations, designed to enhance * the standard string.h. * * Note that if you define CCAN_STR_DEBUG, you will get extra compile * checks on common misuses of the following functions (they will now * be out-of-line, so there is a runtime penalty!). * * strstr, strchr, strrchr: * Return const char * if first argument is const (gcc only). * * isalnum, isalpha, isascii, isblank, iscntrl, isdigit, isgraph, * islower, isprint, ispunct, isspace, isupper, isxdigit: * Static and runtime check that input is EOF or an *unsigned* * char, as per C standard (really!). * * Example: * #include * #include * * int main(int argc, char *argv[]) * { * if (argv[1] && streq(argv[1], "--verbose")) * printf("verbose set\n"); * if (argv[1] && strstarts(argv[1], "--")) * printf("Some option set\n"); * if (argv[1] && strends(argv[1], "cow-powers")) * printf("Magic option set\n"); * return 0; * } * * License: CC0 (Public domain) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/build_assert\n"); return 0; } return 1; } skiboot-5.10-rc4/ccan/str/str.c000066400000000000000000000004331324317060200162650ustar00rootroot00000000000000/* CC0 (Public domain) - see LICENSE file for details */ #include size_t strcount(const char *haystack, const char *needle) { size_t i = 0, nlen = strlen(needle); while ((haystack = strstr(haystack, needle)) != NULL) { i++; haystack += nlen; } return i; } skiboot-5.10-rc4/ccan/str/str.h000066400000000000000000000064301324317060200162750ustar00rootroot00000000000000/* CC0 (Public domain) - see LICENSE file for details */ #ifndef CCAN_STR_H #define CCAN_STR_H #include "config.h" #include #include #include #include /** * streq - Are two strings equal? * @a: first string * @b: first string * * This macro is arguably more readable than "!strcmp(a, b)". * * Example: * if (streq(somestring, "")) * printf("String is empty!\n"); */ #define streq(a,b) (strcmp((a),(b)) == 0) /** * strstarts - Does this string start with this prefix? * @str: string to test * @prefix: prefix to look for at start of str * * Example: * if (strstarts(somestring, "foo")) * printf("String %s begins with 'foo'!\n", somestring); */ #define strstarts(str,prefix) (strncmp((str),(prefix),strlen(prefix)) == 0) /** * strends - Does this string end with this postfix? * @str: string to test * @postfix: postfix to look for at end of str * * Example: * if (strends(somestring, "foo")) * printf("String %s end with 'foo'!\n", somestring); */ static inline bool strends(const char *str, const char *postfix) { if (strlen(str) < strlen(postfix)) return false; return streq(str + strlen(str) - strlen(postfix), postfix); } /** * stringify - Turn expression into a string literal * @expr: any C expression * * Example: * #define PRINT_COND_IF_FALSE(cond) \ * ((cond) || printf("%s is false!", stringify(cond))) */ #define stringify(expr) stringify_1(expr) /* Double-indirection required to stringify expansions */ #define stringify_1(expr) #expr /** * strcount - Count number of (non-overlapping) occurrences of a substring. * @haystack: a C string * @needle: a substring * * Example: * assert(strcount("aaa aaa", "a") == 6); * assert(strcount("aaa aaa", "ab") == 0); * assert(strcount("aaa aaa", "aa") == 2); */ size_t strcount(const char *haystack, const char *needle); /** * STR_MAX_CHARS - Maximum possible size of numeric string for this type. * @type_or_expr: a pointer or integer type or expression. * * This provides enough space for a nul-terminated string which represents the * largest possible value for the type or expression. * * Note: The implementation adds extra space so hex values or negative * values will fit (eg. sprintf(... "%p"). ) * * Example: * char str[STR_MAX_CHARS(int)]; * * sprintf(str, "%i", 7); */ #define STR_MAX_CHARS(type_or_expr) \ ((sizeof(type_or_expr) * CHAR_BIT + 8) / 9 * 3 + 2 \ + STR_MAX_CHARS_TCHECK_(type_or_expr)) #if HAVE_TYPEOF /* Only a simple type can have 0 assigned, so test that. */ #define STR_MAX_CHARS_TCHECK_(type_or_expr) \ ({ typeof(type_or_expr) x = 0; (void)x; 0; }) #else #define STR_MAX_CHARS_TCHECK_(type_or_expr) 0 #endif /* These checks force things out of line, hence they are under DEBUG. */ #ifdef CCAN_STR_DEBUG #if HAVE_TYPEOF /* With GNU magic, we can make const-respecting standard string functions. */ #undef strstr #undef strchr #undef strrchr /* + 0 is needed to decay array into pointer. */ #define strstr(haystack, needle) \ ((typeof((haystack) + 0))str_strstr((haystack), (needle))) #define strchr(haystack, c) \ ((typeof((haystack) + 0))str_strchr((haystack), (c))) #define strrchr(haystack, c) \ ((typeof((haystack) + 0))str_strrchr((haystack), (c))) #endif #endif /* CCAN_STR_DEBUG */ #endif /* CCAN_STR_H */ skiboot-5.10-rc4/ccan/str/test/000077500000000000000000000000001324317060200162705ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/str/test/compile_fail-STR_MAX_CHARS.c000066400000000000000000000004561324317060200231570ustar00rootroot00000000000000#include struct s { int val; }; int main(int argc, char *argv[]) { struct s #ifdef FAIL #if !HAVE_TYPEOF #error We need typeof to check STR_MAX_CHARS. #endif #else /* A pointer is OK. */ * #endif val; char str[STR_MAX_CHARS(val)]; str[0] = '\0'; return str[0] ? 0 : 1; } skiboot-5.10-rc4/ccan/str/test/compile_fail-isalnum.c000066400000000000000000000005611324317060200225270ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isalnum. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isalnum(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-isalpha.c000066400000000000000000000005611324317060200225000ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isalpha. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isalpha(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-isascii.c000066400000000000000000000005611324317060200225030ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isascii. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isascii(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-isblank.c000066400000000000000000000006531324317060200225040ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF || !HAVE_ISBLANK #error We need typeof to check isblank. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif #if HAVE_ISBLANK return isblank(c); #else return c; #endif } skiboot-5.10-rc4/ccan/str/test/compile_fail-iscntrl.c000066400000000000000000000005611324317060200225350ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check iscntrl. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return iscntrl(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-isdigit.c000066400000000000000000000005611324317060200225130ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isdigit. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isdigit(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-islower.c000066400000000000000000000005611324317060200225430ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check islower. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return islower(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-isprint.c000066400000000000000000000005611324317060200225470ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isprint. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isprint(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-ispunct.c000066400000000000000000000005611324317060200225440ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check ispunct. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return ispunct(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-isspace.c000066400000000000000000000005611324317060200225060ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isspace. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isspace(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-isupper.c000066400000000000000000000005611324317060200225460ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isupper. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isupper(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-isxdigit.c000066400000000000000000000005631324317060200227050ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_BUILTIN_TYPES_COMPATIBLE_P || !HAVE_TYPEOF #error We need typeof to check isxdigit. #endif char #else unsigned char #endif c = argv[0][0]; #ifdef FAIL /* Fake fail on unsigned char platforms. */ BUILD_ASSERT((char)255 < 0); #endif return isxdigit(c); } skiboot-5.10-rc4/ccan/str/test/compile_fail-strchr.c000066400000000000000000000004211324317060200223570ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_TYPEOF #error We need typeof to check strstr. #endif #else const #endif char *ret; const char *str = "hello"; ret = strchr(str, 'l'); return ret ? 0 : 1; } skiboot-5.10-rc4/ccan/str/test/compile_fail-strrchr.c000066400000000000000000000004221324317060200225420ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_TYPEOF #error We need typeof to check strstr. #endif #else const #endif char *ret; const char *str = "hello"; ret = strrchr(str, 'l'); return ret ? 0 : 1; } skiboot-5.10-rc4/ccan/str/test/compile_fail-strstr.c000066400000000000000000000004241324317060200224160ustar00rootroot00000000000000#define CCAN_STR_DEBUG 1 #include int main(int argc, char *argv[]) { #ifdef FAIL #if !HAVE_TYPEOF #error We need typeof to check strstr. #endif #else const #endif char *ret; const char *str = "hello"; ret = strstr(str, "hell"); return ret ? 0 : 1; } skiboot-5.10-rc4/ccan/str/test/debug.c000066400000000000000000000003241324317060200175210ustar00rootroot00000000000000/* We can't use the normal "#include the .c file" trick, since this is contaminated by str.h's macro overrides. So we put it in all tests like this. */ #define CCAN_STR_DEBUG 1 #include skiboot-5.10-rc4/ccan/str/test/run-STR_MAX_CHARS.c000066400000000000000000000034131324317060200213340ustar00rootroot00000000000000#include #include #include #include #include int main(int argc, char *argv[]) { char *str = (char*)malloc(sizeof(char)*1000); struct { uint8_t u1byte; int8_t s1byte; uint16_t u2byte; int16_t s2byte; uint32_t u4byte; int32_t s4byte; uint64_t u8byte; int64_t s8byte; void *ptr; } types; (void)argc; (void)argv; assert(str); plan_tests(13); memset(&types, 0xFF, sizeof(types)); /* Hex versions */ sprintf(str, "0x%llx", (unsigned long long)types.u1byte); ok1(strlen(str) < STR_MAX_CHARS(types.u1byte)); sprintf(str, "0x%llx", (unsigned long long)types.u2byte); ok1(strlen(str) < STR_MAX_CHARS(types.u2byte)); sprintf(str, "0x%llx", (unsigned long long)types.u4byte); ok1(strlen(str) < STR_MAX_CHARS(types.u4byte)); sprintf(str, "0x%llx", (unsigned long long)types.u8byte); ok1(strlen(str) < STR_MAX_CHARS(types.u8byte)); /* Decimal versions */ sprintf(str, "%u", types.u1byte); ok1(strlen(str) < STR_MAX_CHARS(types.u1byte)); sprintf(str, "%d", types.s1byte); ok1(strlen(str) < STR_MAX_CHARS(types.s1byte)); sprintf(str, "%u", types.u2byte); ok1(strlen(str) < STR_MAX_CHARS(types.u2byte)); sprintf(str, "%d", types.s2byte); ok1(strlen(str) < STR_MAX_CHARS(types.s2byte)); sprintf(str, "%u", types.u4byte); ok1(strlen(str) < STR_MAX_CHARS(types.u4byte)); sprintf(str, "%d", types.s4byte); ok1(strlen(str) < STR_MAX_CHARS(types.s4byte)); sprintf(str, "%llu", (unsigned long long)types.u8byte); ok1(strlen(str) < STR_MAX_CHARS(types.u8byte)); sprintf(str, "%lld", (long long)types.s8byte); ok1(strlen(str) < STR_MAX_CHARS(types.s8byte)); /* Pointer version. */ sprintf(str, "%p", types.ptr); ok1(strlen(str) < STR_MAX_CHARS(types.ptr)); free(str); return exit_status(); } skiboot-5.10-rc4/ccan/str/test/run.c000066400000000000000000000051651324317060200172470ustar00rootroot00000000000000#include #include #include #include #include #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0])) static const char *substrings[] = { "far", "bar", "baz", "b", "ba", "z", "ar", NULL }; #define NUM_SUBSTRINGS (ARRAY_SIZE(substrings) - 1) static char *strdup_rev(const char *s) { char *ret = strdup(s); unsigned int i; for (i = 0; i < strlen(s); i++) ret[i] = s[strlen(s) - i - 1]; return ret; } int main(int argc, char *argv[]) { unsigned int i, j, n; char *strings[NUM_SUBSTRINGS * NUM_SUBSTRINGS]; (void)argc; (void)argv; n = 0; for (i = 0; i < NUM_SUBSTRINGS; i++) { for (j = 0; j < NUM_SUBSTRINGS; j++) { strings[n] = malloc(strlen(substrings[i]) + strlen(substrings[j]) + 1); sprintf(strings[n++], "%s%s", substrings[i], substrings[j]); } } plan_tests(n * n * 5 + 16); for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { unsigned int k, identical = 0; char *reva, *revb; /* Find first difference. */ for (k = 0; strings[i][k]==strings[j][k]; k++) { if (k == strlen(strings[i])) { identical = 1; break; } } if (identical) ok1(streq(strings[i], strings[j])); else ok1(!streq(strings[i], strings[j])); /* Postfix test should be equivalent to prefix * test on reversed string. */ reva = strdup_rev(strings[i]); revb = strdup_rev(strings[j]); if (!strings[i][k]) { ok1(strstarts(strings[j], strings[i])); ok1(strends(revb, reva)); } else { ok1(!strstarts(strings[j], strings[i])); ok1(!strends(revb, reva)); } if (!strings[j][k]) { ok1(strstarts(strings[i], strings[j])); ok1(strends(reva, revb)); } else { ok1(!strstarts(strings[i], strings[j])); ok1(!strends(reva, revb)); } free(reva); free(revb); } } for (i = 0; i < n; i++) free(strings[i]); ok1(streq(stringify(NUM_SUBSTRINGS), "((sizeof(substrings) / sizeof(substrings[0])) - 1)")); ok1(streq(stringify(ARRAY_SIZE(substrings)), "(sizeof(substrings) / sizeof(substrings[0]))")); ok1(streq(stringify(i == 0), "i == 0")); ok1(strcount("aaaaaa", "b") == 0); ok1(strcount("aaaaaa", "a") == 6); ok1(strcount("aaaaaa", "aa") == 3); ok1(strcount("aaaaaa", "aaa") == 2); ok1(strcount("aaaaaa", "aaaa") == 1); ok1(strcount("aaaaaa", "aaaaa") == 1); ok1(strcount("aaaaaa", "aaaaaa") == 1); ok1(strcount("aaa aaa", "b") == 0); ok1(strcount("aaa aaa", "a") == 6); ok1(strcount("aaa aaa", "aa") == 2); ok1(strcount("aaa aaa", "aaa") == 2); ok1(strcount("aaa aaa", "aaaa") == 0); ok1(strcount("aaa aaa", "aaaaa") == 0); return exit_status(); } skiboot-5.10-rc4/ccan/tap/000077500000000000000000000000001324317060200152655ustar00rootroot00000000000000skiboot-5.10-rc4/ccan/tap/tap.h000066400000000000000000000014351324317060200162250ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Dummy tap.h for ccan tests */ #include #define plan_tests(x) do { } while(0) #define ok1(e) assert(e) #define ok(e, ...) assert(e) #define fail(...) assert(0) #define exit_status() (0) skiboot-5.10-rc4/core/000077500000000000000000000000001324317060200145255ustar00rootroot00000000000000skiboot-5.10-rc4/core/Makefile.inc000066400000000000000000000017361324317060200167440ustar00rootroot00000000000000# -*-Makefile-*- SUBDIRS += core CORE_OBJS = relocate.o console.o stack.o init.o chip.o mem_region.o CORE_OBJS += malloc.o lock.o cpu.o utils.o fdt.o opal.o interrupts.o timebase.o CORE_OBJS += opal-msg.o pci.o pci-iov.o pci-virt.o pci-slot.o pcie-slot.o CORE_OBJS += pci-opal.o fast-reboot.o device.o exceptions.o trace.o affinity.o CORE_OBJS += vpd.o hostservices.o platform.o nvram.o nvram-format.o hmi.o CORE_OBJS += console-log.o ipmi.o time-utils.o pel.o pool.o errorlog.o CORE_OBJS += timer.o i2c.o rtc.o flash.o sensor.o ipmi-opal.o CORE_OBJS += flash-subpartition.o bitmap.o buddy.o pci-quirk.o powercap.o psr.o CORE_OBJS += pci-dt-slot.o direct-controls.o cpufeatures.o ifeq ($(SKIBOOT_GCOV),1) CORE_OBJS += gcov-profiling.o endif CORE=core/built-in.o CFLAGS_SKIP_core/relocate.o = -pg -fstack-protector-all CFLAGS_SKIP_core/relocate.o += -fstack-protector -fstack-protector-strong CFLAGS_SKIP_core/relocate.o += -fprofile-arcs -ftest-coverage $(CORE): $(CORE_OBJS:%=core/%) skiboot-5.10-rc4/core/affinity.c000066400000000000000000000101021324317060200164740ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * * We currently construct our associativity properties as such: * * - For "chip" devices (bridges, memory, ...), 4 entries: * * - CCM node ID * - HW card ID * - HW module ID * - Chip ID * * The information is constructed based on the chip ID which (unlike * pHyp) is our HW chip ID (aka "XSCOM" chip ID). We use it to retrieve * the other properties from the corresponding chip/xscom node in the * device-tree. If those properties are absent, 0 is used. * * - For "core" devices, we add a 5th entry: * * - Core ID * * Here too, we do not use the "cooked" HW processor ID from HDAT but * instead use the real HW core ID which is basically the interrupt * server number of thread 0 on that core. * * * The ibm,associativity-reference-points property is currently set to * 4,4 indicating that the chip ID is our only reference point. This * should be extended to encompass the node IDs eventually. */ #include #include #include #include #include #include #include #include static uint32_t get_chip_node_id(struct proc_chip *chip) { /* If the xscom node has an ibm,ccm-node-id property, use it */ if (dt_has_node_property(chip->devnode, "ibm,ccm-node-id", NULL)) return dt_prop_get_u32(chip->devnode, "ibm,ccm-node-id"); /* * Else use the 3 top bits of the chip ID which should be * the node on both P7 and P8 */ return chip->id >> 3; } void add_associativity_ref_point(void) { int ref2 = 0x4; /* * Note about our use of reference points: * * Linux currently supports up to three levels of NUMA. We use the * first reference point for the node ID and the second reference * point for a second level of affinity. We always use the chip ID * (4) for the first reference point. * * Choosing the second level of affinity is model specific * unfortunately. Current POWER8E models should use the DCM * as a second level of NUMA. * * If there is a way to obtain this information from the FSP * that would be ideal, but for now hardwire our POWER8E setting. * * For GPU nodes we add a third level of NUMA, such that the * distance of the GPU node from all other nodes is uniformly * the highest. */ if (PVR_TYPE(mfspr(SPR_PVR)) == PVR_TYPE_P8E) ref2 = 0x3; dt_add_property_cells(opal_node, "ibm,associativity-reference-points", 0x4, ref2, 0x2); } void add_chip_dev_associativity(struct dt_node *dev) { uint32_t chip_id = dt_get_chip_id(dev); struct proc_chip *chip = get_chip(chip_id); uint32_t hw_cid, hw_mid; if (!chip) return; hw_cid = dt_prop_get_u32_def(chip->devnode, "ibm,hw-card-id", 0); hw_mid = dt_prop_get_u32_def(chip->devnode, "ibm,hw-module-id", 0); dt_add_property_cells(dev, "ibm,associativity", 4, get_chip_node_id(chip), hw_cid, hw_mid, chip_id); } void add_core_associativity(struct cpu_thread *cpu) { struct proc_chip *chip = get_chip(cpu->chip_id); uint32_t hw_cid, hw_mid, core_id; if (!chip) return; if (proc_gen == proc_gen_p7) core_id = (cpu->pir >> 2) & 0x7; else if (proc_gen == proc_gen_p8) core_id = (cpu->pir >> 3) & 0xf; else if (proc_gen == proc_gen_p9) core_id = (cpu->pir >> 2) & 0x1f; else return; hw_cid = dt_prop_get_u32_def(chip->devnode, "ibm,hw-card-id", 0); hw_mid = dt_prop_get_u32_def(chip->devnode, "ibm,hw-module-id", 0); dt_add_property_cells(cpu->node, "ibm,associativity", 5, get_chip_node_id(chip), hw_cid, hw_mid, chip->id, core_id); } skiboot-5.10-rc4/core/bitmap.c000066400000000000000000000027061324317060200161520ustar00rootroot00000000000000/* Copyright 2016 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bitmap.h" static int __bitmap_find_bit(bitmap_t map, unsigned int start, unsigned int count, bool value) { unsigned int el, first_bit; unsigned int end = start + count; bitmap_elem_t e, ev; int b; ev = value ? -1ul : 0; el = BITMAP_ELEM(start); first_bit = BITMAP_BIT(start); while (start < end) { e = map[el] ^ ev; e |= ((1ul << first_bit) - 1); if (~e) break; start = (start + BITMAP_ELSZ) & ~(BITMAP_ELSZ - 1); first_bit = 0; el++; } for (b = first_bit; b < BITMAP_ELSZ && start < end; b++,start++) { if ((e & (1ull << b)) == 0) return start; } return -1; } int bitmap_find_zero_bit(bitmap_t map, unsigned int start, unsigned int count) { return __bitmap_find_bit(map, start, count, false); } int bitmap_find_one_bit(bitmap_t map, unsigned int start, unsigned int count) { return __bitmap_find_bit(map, start, count, true); } skiboot-5.10-rc4/core/buddy.c000066400000000000000000000163541324317060200160110ustar00rootroot00000000000000/* Copyright 2016 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include "buddy.h" #define BUDDY_DEBUG #undef BUDDY_VERBOSE #ifdef BUDDY_VERBOSE #define BUDDY_NOISE(fmt...) printf(fmt) #else #define BUDDY_NOISE(fmt...) do { } while(0) #endif static inline unsigned int buddy_map_size(struct buddy *b) { return 1u << (b->max_order + 1); } static inline unsigned int buddy_order_start(struct buddy *b, unsigned int order) { unsigned int level = b->max_order - order; /* Starting bit of index for order */ return 1u << level; } static inline unsigned int buddy_index_to_node(struct buddy *b, unsigned int index, unsigned int order) { /* Ensure the index is a multiple of the order */ assert((index & ((1u << order) - 1)) == 0); return buddy_order_start(b, order) + (index >> order); } static inline unsigned int buddy_node_to_index(struct buddy *b, unsigned int node, unsigned int order) { unsigned int start = buddy_order_start(b, order); return (node - start) << order; } #ifdef BUDDY_DEBUG static void buddy_check_alloc(struct buddy *b, unsigned int node) { assert(bitmap_tst_bit(b->map, node)); } static void buddy_check_alloc_down(struct buddy *b, unsigned int node) { unsigned int i, count = 1; while (node < buddy_map_size(b)) { for (i = 0; i < count; i++) buddy_check_alloc(b, node + i); /* Down one level */ node <<= 1; count <<= 1; } } #else static inline void buddy_check_alloc(struct buddy *b, unsigned int node) {} static inline void buddy_check_alloc_down(struct buddy *b, unsigned int node) {} #endif int buddy_alloc(struct buddy *b, unsigned int order) { unsigned int o; int node, index; BUDDY_NOISE("buddy_alloc(%d)\n", order); /* * Find the first order up the tree from our requested order that * has at least one free node. */ for (o = order; o <= b->max_order; o++) { if (b->freecounts[o] > 0) break; } /* Nothing found ? fail */ if (o > b->max_order) { BUDDY_NOISE(" no free nodes !\n"); return -1; } BUDDY_NOISE(" %d free node(s) at order %d, bits %d(%d)\n", b->freecounts[o], o, buddy_order_start(b, o), 1u << (b->max_order - o)); /* Now find a free node */ node = bitmap_find_zero_bit(b->map, buddy_order_start(b, o), 1u << (b->max_order - o)); /* There should always be one */ assert(node >= 0); /* Mark it allocated and decrease free count */ bitmap_set_bit(b->map, node); b->freecounts[o]--; /* We know that node was free which means all its children must have * been marked "allocated". Double check. */ buddy_check_alloc_down(b, node); /* We have a node, we've marked it allocated, now we need to go down * the tree until we reach "order" which is the order we need. For * each level along the way, we mark the buddy free and leave the * first child allocated. */ while (o > order) { /* Next level down */ o--; node <<= 1; BUDDY_NOISE(" order %d, using %d marking %d free\n", o, node, node ^ 1); bitmap_clr_bit(b->map, node ^ 1); b->freecounts[o]++; assert(bitmap_tst_bit(b->map, node)); } index = buddy_node_to_index(b, node, order); BUDDY_NOISE(" result is index %d (node %d)\n", index, node); /* We have a node, convert it to an element number */ return index; } bool buddy_reserve(struct buddy *b, unsigned int index, unsigned int order) { unsigned int node, freenode, o; assert(index < (1u << b->max_order)); BUDDY_NOISE("buddy_reserve(%d,%d)\n", index, order); /* Get bit number for node */ node = buddy_index_to_node(b, index, order); BUDDY_NOISE(" node=%d\n", node); /* Find something free */ for (freenode = node, o = order; freenode > 0; freenode >>= 1, o++) if (!bitmap_tst_bit(b->map, freenode)) break; BUDDY_NOISE(" freenode=%d order %d\n", freenode, o); /* Nothing free, error out */ if (!freenode) return false; /* We sit on a free node, mark it busy */ bitmap_set_bit(b->map, freenode); assert(b->freecounts[o]); b->freecounts[o]--; /* We know that node was free which means all its children must have * been marked "allocated". Double check. */ buddy_check_alloc_down(b, freenode); /* Reverse-walk the path and break down nodes */ while (o > order) { /* Next level down */ o--; freenode <<= 1; /* Find the right one on the path to node */ if (node & (1u << (o - order))) freenode++; BUDDY_NOISE(" order %d, using %d marking %d free\n", o, freenode, freenode ^ 1); bitmap_clr_bit(b->map, freenode ^ 1); b->freecounts[o]++; assert(bitmap_tst_bit(b->map, node)); } assert(node == freenode); return true; } void buddy_free(struct buddy *b, unsigned int index, unsigned int order) { unsigned int node; assert(index < (1u << b->max_order)); BUDDY_NOISE("buddy_free(%d,%d)\n", index, order); /* Get bit number for node */ node = buddy_index_to_node(b, index, order); BUDDY_NOISE(" node=%d\n", node); /* We assume that anything freed was fully allocated, ie, * there is no child node of that allocation index/order * that is already free. * * BUDDY_DEBUG will verify it at the cost of performances */ buddy_check_alloc_down(b, node); /* Propagate if buddy is free */ while (order < b->max_order && !bitmap_tst_bit(b->map, node ^ 1)) { BUDDY_NOISE(" order %d node %d buddy %d free, propagating\n", order, node, node ^ 1); /* Mark buddy busy (we are already marked busy) */ bitmap_set_bit(b->map, node ^ 1); /* Reduce free count */ assert(b->freecounts[order] > 0); b->freecounts[order]--; /* Get parent */ node >>= 1; order++; /* It must be busy already ! */ buddy_check_alloc(b, node); BUDDY_NOISE(" testing order %d node %d\n", order, node ^ 1); } /* No more coalescing, mark it free */ bitmap_clr_bit(b->map, node); /* Increase the freelist count for that level */ b->freecounts[order]++; BUDDY_NOISE(" free count at order %d is %d\n", order, b->freecounts[order]); } void buddy_reset(struct buddy *b) { unsigned int bsize = BITMAP_BYTES(1u << (b->max_order + 1)); BUDDY_NOISE("buddy_reset()\n"); /* We fill the bitmap with 1's to make it completely "busy" */ memset(b->map, 0xff, bsize); memset(b->freecounts, 0, sizeof(b->freecounts)); /* We mark the root of the tree free, this is entry 1 as entry 0 * is unused. */ buddy_free(b, 0, b->max_order); } struct buddy *buddy_create(unsigned int max_order) { struct buddy *b; unsigned int bsize; assert(max_order <= BUDDY_MAX_ORDER); bsize = BITMAP_BYTES(1u << (max_order + 1)); b = zalloc(sizeof(struct buddy) + bsize); if (!b) return NULL; b->max_order = max_order; BUDDY_NOISE("Map @%p, size: %d bytes\n", b->map, bsize); buddy_reset(b); return b; } void buddy_destroy(struct buddy *b) { free(b); } skiboot-5.10-rc4/core/chip.c000066400000000000000000000075121324317060200156210ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include static struct proc_chip *chips[MAX_CHIPS]; enum proc_chip_quirks proc_chip_quirks; uint32_t pir_to_chip_id(uint32_t pir) { if (proc_gen == proc_gen_p9) return P9_PIR2GCID(pir); else if (proc_gen == proc_gen_p8) return P8_PIR2GCID(pir); else return P7_PIR2GCID(pir); } uint32_t pir_to_core_id(uint32_t pir) { if (proc_gen == proc_gen_p9) return P9_PIR2COREID(pir); else if (proc_gen == proc_gen_p8) return P8_PIR2COREID(pir); else return P7_PIR2COREID(pir); } uint32_t pir_to_thread_id(uint32_t pir) { if (proc_gen == proc_gen_p9) return P9_PIR2THREADID(pir); else if (proc_gen == proc_gen_p8) return P8_PIR2THREADID(pir); else return P7_PIR2THREADID(pir); } struct proc_chip *next_chip(struct proc_chip *chip) { unsigned int i; for (i = chip ? (chip->id + 1) : 0; i < MAX_CHIPS; i++) if (chips[i]) return chips[i]; return NULL; } struct proc_chip *get_chip(uint32_t chip_id) { if (chip_id >= MAX_CHIPS) return NULL; return chips[chip_id]; } static void init_chip(struct dt_node *dn) { struct proc_chip *chip; uint32_t id; const char *lc = NULL; id = dt_get_chip_id(dn); assert(id < MAX_CHIPS); assert(chips[id] == NULL); chip = zalloc(sizeof(struct proc_chip)); assert(chip); chip->id = id; chip->devnode = dn; chip->dbob_id = dt_prop_get_u32_def(dn, "ibm,dbob-id", 0xffffffff); chip->pcid = dt_prop_get_u32_def(dn, "ibm,proc-chip-id", 0xffffffff); if (dt_prop_get_u32_def(dn, "ibm,occ-functional-state", 0)) chip->occ_functional = true; else chip->occ_functional = false; list_head_init(&chip->i2cms); /* Update the location code for this chip. */ if (dt_has_node_property(dn, "ibm,loc-code", NULL)) lc = dt_prop_get(dn, "ibm,loc-code"); else if (dt_has_node_property(dn, "ibm,slot-location-code", NULL)) lc = dt_prop_get(dn, "ibm,slot-location-code"); if (lc) chip->loc_code = strdup(lc); prlog(PR_INFO, "CHIP: Initialised chip %d from %s\n", id, dn->name); chips[id] = chip; } void init_chips(void) { struct dt_node *xn; /* Detect mambo chip */ if (dt_find_by_path(dt_root, "/mambo")) { proc_chip_quirks |= QUIRK_NO_CHIPTOD | QUIRK_MAMBO_CALLOUTS | QUIRK_NO_F000F | QUIRK_NO_PBA | QUIRK_NO_OCC_IRQ | QUIRK_NO_RNG; enable_mambo_console(); prlog(PR_NOTICE, "CHIP: Detected Mambo simulator\n"); dt_for_each_compatible(dt_root, xn, "ibm,mambo-chip") init_chip(xn); } /* Detect simics */ if (dt_find_by_path(dt_root, "/simics")) { proc_chip_quirks |= QUIRK_SIMICS | QUIRK_NO_PBA | QUIRK_NO_OCC_IRQ | QUIRK_SLOW_SIM; tb_hz = 512000; prlog(PR_NOTICE, "CHIP: Detected Simics simulator\n"); } /* Detect Awan emulator */ if (dt_find_by_path(dt_root, "/awan")) { proc_chip_quirks |= QUIRK_NO_CHIPTOD | QUIRK_NO_F000F | QUIRK_NO_PBA | QUIRK_NO_OCC_IRQ | QUIRK_SLOW_SIM; tb_hz = 512000; prlog(PR_NOTICE, "CHIP: Detected Awan emulator\n"); } /* Detect Qemu */ if (dt_node_is_compatible(dt_root, "qemu,powernv")) { proc_chip_quirks |= QUIRK_NO_CHIPTOD | QUIRK_NO_PBA | QUIRK_NO_DIRECT_CTL; prlog(PR_NOTICE, "CHIP: Detected Qemu simulator\n"); } /* We walk the chips based on xscom nodes in the tree */ dt_for_each_compatible(dt_root, xn, "ibm,xscom") { init_chip(xn); } } skiboot-5.10-rc4/core/console-log.c000066400000000000000000000043731324317060200171210ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Console Log routines * Wraps libc and console lower level functions * does fancy-schmancy things like timestamps and priorities * Doesn't make waffles. */ #include "skiboot.h" #include "unistd.h" #include "stdio.h" #include "console.h" #include "timebase.h" static int vprlog(int log_level, const char *fmt, va_list ap) { int count; char buffer[320]; bool flush_to_drivers = true; unsigned long tb = mftb(); /* It's safe to return 0 when we "did" something here * as only printf cares about how much we wrote, and * if you change log_level to below PR_PRINTF then you * get everything you deserve. * By default, only PR_DEBUG and higher are stored in memory. * PR_TRACE and PR_INSANE are for those having a bad day. */ if (log_level > (debug_descriptor.console_log_levels >> 4)) return 0; count = snprintf(buffer, sizeof(buffer), "[%5lu.%09lu,%d] ", tb_to_secs(tb), tb_remaining_nsecs(tb), log_level); count+= vsnprintf(buffer+count, sizeof(buffer)-count, fmt, ap); if (log_level > (debug_descriptor.console_log_levels & 0x0f)) flush_to_drivers = false; console_write(flush_to_drivers, buffer, count); return count; } /* we don't return anything as what on earth are we going to do * if we actually fail to print a log message? Print a log message about it? * Callers shouldn't care, prlog and friends should do something generically * sane in such crazy situations. */ void _prlog(int log_level, const char* fmt, ...) { va_list ap; va_start(ap, fmt); vprlog(log_level, fmt, ap); va_end(ap); } int _printf(const char* fmt, ...) { int count; va_list ap; va_start(ap, fmt); count = vprlog(PR_PRINTF, fmt, ap); va_end(ap); return count; } skiboot-5.10-rc4/core/console.c000066400000000000000000000245311324317060200163400ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Console IO routine for use by libc * * fd is the classic posix 0,1,2 (stdin, stdout, stderr) */ #include #include #include #include #include #include #include static char *con_buf = (char *)INMEM_CON_START; static size_t con_in; static size_t con_out; static bool con_wrapped; /* Internal console driver ops */ static struct con_ops *con_driver; /* External (OPAL) console driver ops */ static struct opal_con_ops *opal_con_driver = &dummy_opal_con; static struct lock con_lock = LOCK_UNLOCKED; /* This is mapped via TCEs so we keep it alone in a page */ struct memcons memcons __section(".data.memcons") = { .magic = MEMCONS_MAGIC, .obuf_phys = INMEM_CON_START, .ibuf_phys = INMEM_CON_START + INMEM_CON_OUT_LEN, .obuf_size = INMEM_CON_OUT_LEN, .ibuf_size = INMEM_CON_IN_LEN, }; static bool dummy_console_enabled(void) { #ifdef FORCE_DUMMY_CONSOLE return true; #else return dt_has_node_property(dt_chosen, "sapphire,enable-dummy-console", NULL); #endif } /* * Helper function for adding /ibm,opal/consoles/serial@ nodes */ struct dt_node *add_opal_console_node(int index, const char *type, uint32_t write_buffer_size) { struct dt_node *con, *consoles; char buffer[32]; consoles = dt_find_by_name(opal_node, "consoles"); if (!consoles) { consoles = dt_new(opal_node, "consoles"); assert(consoles); dt_add_property_cells(consoles, "#address-cells", 1); dt_add_property_cells(consoles, "#size-cells", 0); } con = dt_new_addr(consoles, "serial", index); assert(con); snprintf(buffer, sizeof(buffer), "ibm,opal-console-%s", type); dt_add_property_string(con, "compatible", buffer); dt_add_property_cells(con, "#write-buffer-size", write_buffer_size); dt_add_property_cells(con, "reg", index); dt_add_property_string(con, "device_type", "serial"); return con; } void clear_console(void) { memset(con_buf, 0, INMEM_CON_LEN); } /* * Flush the console buffer into the driver, returns true * if there is more to go. * Optionally can skip flushing to drivers, leaving messages * just in memory console. */ static bool __flush_console(bool flush_to_drivers) { struct cpu_thread *cpu = this_cpu(); size_t req, len = 0; static bool in_flush, more_flush; /* Is there anything to flush ? Bail out early if not */ if (con_in == con_out || !con_driver) return false; /* * Console flushing is suspended on this CPU, typically because * some critical locks are held that would potentially cause a * flush to deadlock */ if (cpu->con_suspend) { cpu->con_need_flush = true; return false; } cpu->con_need_flush = false; /* * We must call the underlying driver with the console lock * dropped otherwise we get some deadlocks if anything down * that path tries to printf() something. * * So instead what we do is we keep a static in_flush flag * set/released with the lock held, which is used to prevent * concurrent attempts at flushing the same chunk of buffer * by other processors. */ if (in_flush) { more_flush = true; return false; } in_flush = true; /* * NB: this must appear after the in_flush check since it modifies * con_out. */ if (!flush_to_drivers) { con_out = con_in; in_flush = false; return false; } do { more_flush = false; if (con_out > con_in) { req = INMEM_CON_OUT_LEN - con_out; more_flush = true; } else req = con_in - con_out; unlock(&con_lock); len = con_driver->write(con_buf + con_out, req); lock(&con_lock); con_out = (con_out + len) % INMEM_CON_OUT_LEN; /* write error? */ if (len < req) break; } while(more_flush); in_flush = false; return con_out != con_in; } bool flush_console(void) { bool ret; lock(&con_lock); ret = __flush_console(true); unlock(&con_lock); return ret; } static void inmem_write(char c) { uint32_t opos; if (!c) return; con_buf[con_in++] = c; if (con_in >= INMEM_CON_OUT_LEN) { con_in = 0; con_wrapped = true; } /* * We must always re-generate memcons.out_pos because * under some circumstances, the console script will * use a broken putmemproc that does RMW on the full * 8 bytes containing out_pos and in_prod, thus corrupting * out_pos */ opos = con_in; if (con_wrapped) opos |= MEMCONS_OUT_POS_WRAP; lwsync(); memcons.out_pos = opos; /* If head reaches tail, push tail around & drop chars */ if (con_in == con_out) con_out = (con_in + 1) % INMEM_CON_OUT_LEN; } static size_t inmem_read(char *buf, size_t req) { size_t read = 0; char *ibuf = (char *)memcons.ibuf_phys; while (req && memcons.in_prod != memcons.in_cons) { *(buf++) = ibuf[memcons.in_cons]; lwsync(); memcons.in_cons = (memcons.in_cons + 1) % INMEM_CON_IN_LEN; req--; read++; } return read; } static void write_char(char c) { #ifdef MAMBO_DEBUG_CONSOLE mambo_console_write(&c, 1); #endif inmem_write(c); } ssize_t console_write(bool flush_to_drivers, const void *buf, size_t count) { /* We use recursive locking here as we can get called * from fairly deep debug path */ bool need_unlock = lock_recursive(&con_lock); const char *cbuf = buf; while(count--) { char c = *(cbuf++); if (c == '\n') write_char('\r'); write_char(c); } __flush_console(flush_to_drivers); if (need_unlock) unlock(&con_lock); return count; } ssize_t write(int fd __unused, const void *buf, size_t count) { return console_write(true, buf, count); } ssize_t read(int fd __unused, void *buf, size_t req_count) { bool need_unlock = lock_recursive(&con_lock); size_t count = 0; if (con_driver && con_driver->read) count = con_driver->read(buf, req_count); if (!count) count = inmem_read(buf, req_count); if (need_unlock) unlock(&con_lock); return count; } /* Helper function to perform a full synchronous flush */ void console_complete_flush(void) { /* * Using term 0 here is a dumb hack that works because the UART * only has term 0 and the FSP doesn't have an explicit flush method. */ int64_t ret = opal_con_driver->flush(0); if (ret == OPAL_UNSUPPORTED || ret == OPAL_PARAMETER) return; while (ret != OPAL_SUCCESS) { ret = opal_con_driver->flush(0); } } /* * set_console() * * This sets the driver used internally by Skiboot. This is different to the * OPAL console driver. */ void set_console(struct con_ops *driver) { con_driver = driver; if (driver) flush_console(); } /* * set_opal_console() * * Configure the console driver to handle the console provided by the OPAL API. * They are different to the above in that they are typically buffered, and used * by the host OS rather than skiboot. */ static bool opal_cons_init = false; void set_opal_console(struct opal_con_ops *driver) { assert(!opal_cons_init); opal_con_driver = driver; } void init_opal_console(void) { assert(!opal_cons_init); opal_cons_init = true; if (dummy_console_enabled() && opal_con_driver != &dummy_opal_con) { prlog(PR_WARNING, "OPAL: Dummy console forced, %s ignored\n", opal_con_driver->name); opal_con_driver = &dummy_opal_con; } prlog(PR_NOTICE, "OPAL: Using %s\n", opal_con_driver->name); if (opal_con_driver->init) opal_con_driver->init(); opal_register(OPAL_CONSOLE_READ, opal_con_driver->read, 3); opal_register(OPAL_CONSOLE_WRITE, opal_con_driver->write, 3); opal_register(OPAL_CONSOLE_FLUSH, opal_con_driver->flush, 1); opal_register(OPAL_CONSOLE_WRITE_BUFFER_SPACE, opal_con_driver->space, 2); } void memcons_add_properties(void) { dt_add_property_u64(opal_node, "ibm,opal-memcons", (u64) &memcons); } /* * The default OPAL console. * * In the absence of a "real" OPAL console driver we handle the OPAL_CONSOLE_* * calls by writing into the skiboot log buffer. Reads are a little more * complicated since they can come from the in-memory console (BML) or from the * internal skiboot console driver. */ static int64_t dummy_console_write(int64_t term_number, int64_t *length, const uint8_t *buffer) { if (term_number != 0) return OPAL_PARAMETER; if (!opal_addr_valid(length) || !opal_addr_valid(buffer)) return OPAL_PARAMETER; write(0, buffer, *length); return OPAL_SUCCESS; } static int64_t dummy_console_write_buffer_space(int64_t term_number, int64_t *length) { if (term_number != 0) return OPAL_PARAMETER; if (!opal_addr_valid(length)) return OPAL_PARAMETER; if (length) *length = INMEM_CON_OUT_LEN; return OPAL_SUCCESS; } static int64_t dummy_console_read(int64_t term_number, int64_t *length, uint8_t *buffer) { if (term_number != 0) return OPAL_PARAMETER; if (!opal_addr_valid(length) || !opal_addr_valid(buffer)) return OPAL_PARAMETER; *length = read(0, buffer, *length); opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT, 0); return OPAL_SUCCESS; } static int64_t dummy_console_flush(int64_t term_number __unused) { return OPAL_UNSUPPORTED; } static void dummy_console_poll(void *data __unused) { bool has_data = false; lock(&con_lock); if (con_driver && con_driver->poll_read) has_data = con_driver->poll_read(); if (memcons.in_prod != memcons.in_cons) has_data = true; if (has_data) opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT, OPAL_EVENT_CONSOLE_INPUT); else opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT, 0); unlock(&con_lock); } void dummy_console_add_nodes(void) { struct dt_property *p; add_opal_console_node(0, "raw", memcons.obuf_size); /* Mambo might have left a crap one, clear it */ p = __dt_find_property(dt_chosen, "linux,stdout-path"); if (p) dt_del_property(dt_chosen, p); dt_add_property_string(dt_chosen, "linux,stdout-path", "/ibm,opal/consoles/serial@0"); opal_add_poller(dummy_console_poll, NULL); } struct opal_con_ops dummy_opal_con = { .name = "Dummy Console", .init = dummy_console_add_nodes, .read = dummy_console_read, .write = dummy_console_write, .space = dummy_console_write_buffer_space, .flush = dummy_console_flush, }; skiboot-5.10-rc4/core/cpu.c000066400000000000000000001020711324317060200154610ustar00rootroot00000000000000/* Copyright 2013-2017 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * TODO: Index array by PIR to be able to catch them easily * from assembly such as machine checks etc... */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* The cpu_threads array is static and indexed by PIR in * order to speed up lookup from asm entry points */ struct cpu_stack { union { uint8_t stack[STACK_SIZE]; struct cpu_thread cpu; }; } __align(STACK_SIZE); static struct cpu_stack *cpu_stacks = (struct cpu_stack *)CPU_STACKS_BASE; unsigned int cpu_thread_count; unsigned int cpu_max_pir; struct cpu_thread *boot_cpu; static struct lock reinit_lock = LOCK_UNLOCKED; static bool hile_supported; static bool radix_supported; static unsigned long hid0_hile; static unsigned long hid0_attn; static bool sreset_enabled; static bool ipi_enabled; static bool pm_enabled; static bool current_hile_mode; static bool current_radix_mode; unsigned long cpu_secondary_start __force_data = 0; struct cpu_job { struct list_node link; void (*func)(void *data); void *data; const char *name; bool complete; bool no_return; }; /* attribute const as cpu_stacks is constant. */ unsigned long __attrconst cpu_stack_bottom(unsigned int pir) { return ((unsigned long)&cpu_stacks[pir]) + sizeof(struct cpu_thread) + STACK_SAFETY_GAP; } unsigned long __attrconst cpu_stack_top(unsigned int pir) { /* This is the top of the MC stack which is above the normal * stack, which means a SP between cpu_stack_bottom() and * cpu_stack_top() can either be a normal stack pointer or * a Machine Check stack pointer */ return ((unsigned long)&cpu_stacks[pir]) + NORMAL_STACK_SIZE - STACK_TOP_GAP; } static void cpu_wake(struct cpu_thread *cpu) { /* Is it idle ? If not, no need to wake */ sync(); if (!cpu->in_idle) return; if (proc_gen == proc_gen_p8 || proc_gen == proc_gen_p7) { /* Poke IPI */ icp_kick_cpu(cpu); } else if (proc_gen == proc_gen_p9) { p9_dbell_send(cpu->pir); } } static struct cpu_thread *cpu_find_job_target(void) { struct cpu_thread *cpu, *best, *me = this_cpu(); uint32_t best_count; /* We try to find a target to run a job. We need to avoid * a CPU that has a "no return" job on its queue as it might * never be able to process anything. * * Additionally we don't check the list but the job count * on the target CPUs, since that is decremented *after* * a job has been completed. */ /* First we scan all available primary threads */ for_each_available_cpu(cpu) { if (cpu == me || !cpu_is_thread0(cpu) || cpu->job_has_no_return) continue; if (cpu->job_count) continue; lock(&cpu->job_lock); if (!cpu->job_count) return cpu; unlock(&cpu->job_lock); } /* Now try again with secondary threads included and keep * track of the one with the less jobs queued up. This is * done in a racy way, but it's just an optimization in case * we are overcommitted on jobs. Could could also just pick * a random one... */ best = NULL; best_count = -1u; for_each_available_cpu(cpu) { if (cpu == me || cpu->job_has_no_return) continue; if (!best || cpu->job_count < best_count) { best = cpu; best_count = cpu->job_count; } if (cpu->job_count) continue; lock(&cpu->job_lock); if (!cpu->job_count) return cpu; unlock(&cpu->job_lock); } /* We haven't found anybody, do we have a bestie ? */ if (best) { lock(&best->job_lock); return best; } /* Go away */ return NULL; } struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu, const char *name, void (*func)(void *data), void *data, bool no_return) { struct cpu_job *job; #ifdef DEBUG_SERIALIZE_CPU_JOBS if (cpu == NULL) cpu = this_cpu(); #endif if (cpu && !cpu_is_available(cpu)) { prerror("CPU: Tried to queue job on unavailable CPU 0x%04x\n", cpu->pir); return NULL; } job = zalloc(sizeof(struct cpu_job)); if (!job) return NULL; job->func = func; job->data = data; job->name = name; job->complete = false; job->no_return = no_return; /* Pick a candidate. Returns with target queue locked */ if (cpu == NULL) cpu = cpu_find_job_target(); else if (cpu != this_cpu()) lock(&cpu->job_lock); else cpu = NULL; /* Can't be scheduled, run it now */ if (cpu == NULL) { func(data); job->complete = true; return job; } /* That's bad, the job will never run */ if (cpu->job_has_no_return) { prlog(PR_WARNING, "WARNING ! Job %s scheduled on CPU 0x%x" " which has a no-return job on its queue !\n", job->name, cpu->pir); backtrace(); } list_add_tail(&cpu->job_queue, &job->link); if (no_return) cpu->job_has_no_return = true; else cpu->job_count++; if (pm_enabled) cpu_wake(cpu); unlock(&cpu->job_lock); return job; } bool cpu_poll_job(struct cpu_job *job) { lwsync(); return job->complete; } void cpu_wait_job(struct cpu_job *job, bool free_it) { unsigned long time_waited = 0; if (!job) return; while (!job->complete) { /* This will call OPAL pollers for us */ time_wait_ms(10); time_waited += 10; lwsync(); } lwsync(); if (time_waited > 1000) prlog(PR_DEBUG, "cpu_wait_job(%s) for %lums\n", job->name, time_waited); if (free_it) free(job); } bool cpu_check_jobs(struct cpu_thread *cpu) { return !list_empty_nocheck(&cpu->job_queue); } void cpu_process_jobs(void) { struct cpu_thread *cpu = this_cpu(); struct cpu_job *job = NULL; void (*func)(void *); void *data; sync(); if (!cpu_check_jobs(cpu)) return; lock(&cpu->job_lock); while (true) { bool no_return; job = list_pop(&cpu->job_queue, struct cpu_job, link); if (!job) break; func = job->func; data = job->data; no_return = job->no_return; unlock(&cpu->job_lock); prlog(PR_TRACE, "running job %s on %x\n", job->name, cpu->pir); if (no_return) free(job); func(data); if (!list_empty(&cpu->locks_held)) { prlog(PR_ERR, "OPAL job %s returning with locks held\n", job->name); drop_my_locks(true); } lock(&cpu->job_lock); if (!no_return) { cpu->job_count--; lwsync(); job->complete = true; } } unlock(&cpu->job_lock); } enum cpu_wake_cause { cpu_wake_on_job, cpu_wake_on_dec, }; static void cpu_idle_p8(enum cpu_wake_cause wake_on) { uint64_t lpcr = mfspr(SPR_LPCR) & ~SPR_LPCR_P8_PECE; struct cpu_thread *cpu = this_cpu(); if (!pm_enabled) { prlog_once(PR_DEBUG, "cpu_idle_p8 called pm disabled\n"); return; } /* Clean up ICP, be ready for IPIs */ icp_prep_for_pm(); /* Synchronize with wakers */ if (wake_on == cpu_wake_on_job) { /* Mark ourselves in idle so other CPUs know to send an IPI */ cpu->in_idle = true; sync(); /* Check for jobs again */ if (cpu_check_jobs(cpu) || !pm_enabled) goto skip_sleep; /* Setup wakup cause in LPCR: EE (for IPI) */ lpcr |= SPR_LPCR_P8_PECE2; mtspr(SPR_LPCR, lpcr); } else { /* Mark outselves sleeping so cpu_set_pm_enable knows to * send an IPI */ cpu->in_sleep = true; sync(); /* Check if PM got disabled */ if (!pm_enabled) goto skip_sleep; /* EE and DEC */ lpcr |= SPR_LPCR_P8_PECE2 | SPR_LPCR_P8_PECE3; mtspr(SPR_LPCR, lpcr); } /* Enter nap */ enter_p8_pm_state(false); skip_sleep: /* Restore */ sync(); cpu->in_idle = false; cpu->in_sleep = false; reset_cpu_icp(); } static void cpu_idle_p9(enum cpu_wake_cause wake_on) { uint64_t lpcr = mfspr(SPR_LPCR) & ~SPR_LPCR_P9_PECE; uint64_t psscr; struct cpu_thread *cpu = this_cpu(); if (!pm_enabled) { prlog_once(PR_DEBUG, "cpu_idle_p9 called pm disabled\n"); return; } msgclr(); /* flush pending messages */ /* Synchronize with wakers */ if (wake_on == cpu_wake_on_job) { /* Mark ourselves in idle so other CPUs know to send an IPI */ cpu->in_idle = true; sync(); /* Check for jobs again */ if (cpu_check_jobs(cpu) || !pm_enabled) goto skip_sleep; /* HV DBELL for IPI */ lpcr |= SPR_LPCR_P9_PECEL1; } else { /* Mark outselves sleeping so cpu_set_pm_enable knows to * send an IPI */ cpu->in_sleep = true; sync(); /* Check if PM got disabled */ if (!pm_enabled) goto skip_sleep; /* HV DBELL and DEC */ lpcr |= SPR_LPCR_P9_PECEL1 | SPR_LPCR_P9_PECEL3; mtspr(SPR_LPCR, lpcr); } mtspr(SPR_LPCR, lpcr); if (sreset_enabled) { /* stop with EC=1 (sreset) and ESL=1 (enable thread switch). */ /* PSSCR SD=0 ESL=1 EC=1 PSSL=0 TR=3 MTL=0 RL=3 */ psscr = PPC_BIT(42) | PPC_BIT(43) | PPC_BITMASK(54, 55) | PPC_BITMASK(62,63); enter_p9_pm_state(psscr); } else { /* stop with EC=0 (resumes) which does not require sreset. */ /* PSSCR SD=0 ESL=0 EC=0 PSSL=0 TR=3 MTL=0 RL=3 */ psscr = PPC_BITMASK(54, 55) | PPC_BITMASK(62,63); enter_p9_pm_lite_state(psscr); } skip_sleep: /* Restore */ cpu->in_idle = false; cpu->in_sleep = false; p9_dbell_receive(); } static void cpu_idle_pm(enum cpu_wake_cause wake_on) { switch(proc_gen) { case proc_gen_p8: cpu_idle_p8(wake_on); break; case proc_gen_p9: cpu_idle_p9(wake_on); break; default: prlog_once(PR_DEBUG, "cpu_idle_pm called with bad processor type\n"); break; } } void cpu_idle_job(void) { if (pm_enabled) { cpu_idle_pm(cpu_wake_on_job); } else { struct cpu_thread *cpu = this_cpu(); smt_lowest(); /* Check for jobs again */ while (!cpu_check_jobs(cpu)) { if (pm_enabled) break; barrier(); } smt_medium(); } } void cpu_idle_delay(unsigned long delay) { unsigned long now = mftb(); unsigned long end = now + delay; unsigned long min_pm = usecs_to_tb(10); if (pm_enabled && delay > min_pm) { pm: for (;;) { if (delay >= 0x7fffffff) delay = 0x7fffffff; mtspr(SPR_DEC, delay); cpu_idle_pm(cpu_wake_on_dec); now = mftb(); if (tb_compare(now, end) == TB_AAFTERB) break; delay = end - now; if (!(pm_enabled && delay > min_pm)) goto no_pm; } } else { no_pm: smt_lowest(); for (;;) { now = mftb(); if (tb_compare(now, end) == TB_AAFTERB) break; delay = end - now; if (pm_enabled && delay > min_pm) { smt_medium(); goto pm; } } smt_medium(); } } static void cpu_pm_disable(void) { struct cpu_thread *cpu; pm_enabled = false; sync(); if (proc_gen == proc_gen_p8) { for_each_available_cpu(cpu) { while (cpu->in_sleep || cpu->in_idle) { icp_kick_cpu(cpu); cpu_relax(); } } } else if (proc_gen == proc_gen_p9) { for_each_available_cpu(cpu) { if (cpu->in_sleep || cpu->in_idle) p9_dbell_send(cpu->pir); } smt_lowest(); for_each_available_cpu(cpu) { while (cpu->in_sleep || cpu->in_idle) barrier(); } smt_medium(); } } void cpu_set_sreset_enable(bool enabled) { if (sreset_enabled == enabled) return; if (proc_gen == proc_gen_p8) { /* Public P8 Mambo has broken NAP */ if (chip_quirk(QUIRK_MAMBO_CALLOUTS)) return; sreset_enabled = enabled; sync(); if (!enabled) { cpu_pm_disable(); } else { if (ipi_enabled) pm_enabled = true; } } else if (proc_gen == proc_gen_p9) { /* Don't use sreset idle on DD1 (has a number of bugs) */ uint32_t version = mfspr(SPR_PVR); if (is_power9n(version) && (PVR_VERS_MAJ(version) == 1)) return; sreset_enabled = enabled; sync(); /* * Kick everybody out of PM so they can adjust the PM * mode they are using (EC=0/1). */ cpu_pm_disable(); if (ipi_enabled) pm_enabled = true; } } void cpu_set_ipi_enable(bool enabled) { if (ipi_enabled == enabled) return; if (proc_gen == proc_gen_p8) { ipi_enabled = enabled; sync(); if (!enabled) { cpu_pm_disable(); } else { if (sreset_enabled) pm_enabled = true; } } else if (proc_gen == proc_gen_p9) { /* Don't use doorbell on DD1 (requires darn for msgsync) */ uint32_t version = mfspr(SPR_PVR); if (is_power9n(version) && (PVR_VERS_MAJ(version) == 1)) return; ipi_enabled = enabled; sync(); if (!enabled) cpu_pm_disable(); else pm_enabled = true; } } void cpu_process_local_jobs(void) { struct cpu_thread *cpu = first_available_cpu(); while (cpu) { if (cpu != this_cpu()) return; cpu = next_available_cpu(cpu); } if (!cpu) cpu = first_available_cpu(); /* No CPU to run on, just run synchro */ if (cpu == this_cpu()) { prlog_once(PR_DEBUG, "Processing jobs synchronously\n"); cpu_process_jobs(); opal_run_pollers(); } } struct dt_node *get_cpu_node(u32 pir) { struct cpu_thread *t = find_cpu_by_pir(pir); return t ? t->node : NULL; } /* This only covers primary, active cpus */ struct cpu_thread *find_cpu_by_chip_id(u32 chip_id) { struct cpu_thread *t; for_each_available_cpu(t) { if (t->is_secondary) continue; if (t->chip_id == chip_id) return t; } return NULL; } struct cpu_thread *find_cpu_by_node(struct dt_node *cpu) { struct cpu_thread *t; for_each_available_cpu(t) { if (t->node == cpu) return t; } return NULL; } struct cpu_thread *find_cpu_by_pir(u32 pir) { if (pir > cpu_max_pir) return NULL; return &cpu_stacks[pir].cpu; } struct cpu_thread *find_cpu_by_server(u32 server_no) { struct cpu_thread *t; for_each_cpu(t) { if (t->server_no == server_no) return t; } return NULL; } struct cpu_thread *next_cpu(struct cpu_thread *cpu) { struct cpu_stack *s = container_of(cpu, struct cpu_stack, cpu); unsigned int index; if (cpu == NULL) index = 0; else index = s - cpu_stacks + 1; for (; index <= cpu_max_pir; index++) { cpu = &cpu_stacks[index].cpu; if (cpu->state != cpu_state_no_cpu) return cpu; } return NULL; } struct cpu_thread *first_cpu(void) { return next_cpu(NULL); } struct cpu_thread *next_available_cpu(struct cpu_thread *cpu) { do { cpu = next_cpu(cpu); } while(cpu && !cpu_is_available(cpu)); return cpu; } struct cpu_thread *first_available_cpu(void) { return next_available_cpu(NULL); } struct cpu_thread *next_present_cpu(struct cpu_thread *cpu) { do { cpu = next_cpu(cpu); } while(cpu && !cpu_is_present(cpu)); return cpu; } struct cpu_thread *first_present_cpu(void) { return next_present_cpu(NULL); } struct cpu_thread *next_ungarded_cpu(struct cpu_thread *cpu) { do { cpu = next_cpu(cpu); } while(cpu && cpu->state == cpu_state_unavailable); return cpu; } struct cpu_thread *first_ungarded_cpu(void) { return next_ungarded_cpu(NULL); } struct cpu_thread *next_ungarded_primary(struct cpu_thread *cpu) { do { cpu = next_cpu(cpu); } while(cpu && cpu->state == cpu_state_unavailable && cpu->primary != cpu); return cpu; } struct cpu_thread *first_ungarded_primary(void) { return next_ungarded_primary(NULL); } u8 get_available_nr_cores_in_chip(u32 chip_id) { struct cpu_thread *core; u8 nr_cores = 0; for_each_available_core_in_chip(core, chip_id) nr_cores++; return nr_cores; } struct cpu_thread *next_available_core_in_chip(struct cpu_thread *core, u32 chip_id) { do { core = next_cpu(core); } while(core && (!cpu_is_available(core) || core->chip_id != chip_id || core->is_secondary)); return core; } struct cpu_thread *first_available_core_in_chip(u32 chip_id) { return next_available_core_in_chip(NULL, chip_id); } uint32_t cpu_get_core_index(struct cpu_thread *cpu) { return pir_to_core_id(cpu->pir); } void cpu_remove_node(const struct cpu_thread *t) { struct dt_node *i; /* Find this cpu node */ dt_for_each_node(dt_root, i) { const struct dt_property *p; if (!dt_has_node_property(i, "device_type", "cpu")) continue; p = dt_find_property(i, "ibm,pir"); if (!p) continue; if (dt_property_get_cell(p, 0) == t->pir) { dt_free(i); return; } } prerror("CPU: Could not find cpu node %i to remove!\n", t->pir); abort(); } void cpu_disable_all_threads(struct cpu_thread *cpu) { unsigned int i; struct dt_property *p; for (i = 0; i <= cpu_max_pir; i++) { struct cpu_thread *t = &cpu_stacks[i].cpu; if (t->primary == cpu->primary) t->state = cpu_state_disabled; } /* Mark this core as bad so that Linux kernel don't use this CPU. */ prlog(PR_DEBUG, "CPU: Mark CPU bad (PIR 0x%04x)...\n", cpu->pir); p = __dt_find_property(cpu->node, "status"); if (p) dt_del_property(cpu->node, p); dt_add_property_string(cpu->node, "status", "bad"); /* XXX Do something to actually stop the core */ } static void init_cpu_thread(struct cpu_thread *t, enum cpu_thread_state state, unsigned int pir) { init_lock(&t->dctl_lock); init_lock(&t->job_lock); list_head_init(&t->job_queue); list_head_init(&t->locks_held); t->stack_guard = STACK_CHECK_GUARD_BASE ^ pir; t->state = state; t->pir = pir; #ifdef STACK_CHECK_ENABLED t->stack_bot_mark = LONG_MAX; #endif assert(pir == container_of(t, struct cpu_stack, cpu) - cpu_stacks); } static void enable_attn(void) { unsigned long hid0; hid0 = mfspr(SPR_HID0); hid0 |= hid0_attn; set_hid0(hid0); } static void disable_attn(void) { unsigned long hid0; hid0 = mfspr(SPR_HID0); hid0 &= ~hid0_attn; set_hid0(hid0); } extern void __trigger_attn(void); void trigger_attn(void) { enable_attn(); __trigger_attn(); } static void init_hid(void) { /* attn is enabled even when HV=0, so make sure it's off */ disable_attn(); } void __nomcount pre_init_boot_cpu(void) { struct cpu_thread *cpu = this_cpu(); /* We skip the stack guard ! */ memset(((void *)cpu) + 8, 0, sizeof(struct cpu_thread) - 8); } void init_boot_cpu(void) { unsigned int i, pir, pvr; pir = mfspr(SPR_PIR); pvr = mfspr(SPR_PVR); /* Get CPU family and other flags based on PVR */ switch(PVR_TYPE(pvr)) { case PVR_TYPE_P7: case PVR_TYPE_P7P: proc_gen = proc_gen_p7; break; case PVR_TYPE_P8E: case PVR_TYPE_P8: proc_gen = proc_gen_p8; hile_supported = PVR_VERS_MAJ(mfspr(SPR_PVR)) >= 2; hid0_hile = SPR_HID0_POWER8_HILE; hid0_attn = SPR_HID0_POWER8_ENABLE_ATTN; break; case PVR_TYPE_P8NVL: proc_gen = proc_gen_p8; hile_supported = true; hid0_hile = SPR_HID0_POWER8_HILE; hid0_attn = SPR_HID0_POWER8_ENABLE_ATTN; break; case PVR_TYPE_P9: proc_gen = proc_gen_p9; hile_supported = true; radix_supported = true; hid0_hile = SPR_HID0_POWER9_HILE; hid0_attn = SPR_HID0_POWER9_ENABLE_ATTN; break; default: proc_gen = proc_gen_unknown; } /* Get a CPU thread count and an initial max PIR based on family */ switch(proc_gen) { case proc_gen_p7: cpu_thread_count = 4; cpu_max_pir = SPR_PIR_P7_MASK; prlog(PR_INFO, "CPU: P7 generation processor" " (max %d threads/core)\n", cpu_thread_count); break; case proc_gen_p8: cpu_thread_count = 8; cpu_max_pir = SPR_PIR_P8_MASK; prlog(PR_INFO, "CPU: P8 generation processor" " (max %d threads/core)\n", cpu_thread_count); break; case proc_gen_p9: cpu_thread_count = 4; cpu_max_pir = SPR_PIR_P9_MASK; prlog(PR_INFO, "CPU: P9 generation processor" " (max %d threads/core)\n", cpu_thread_count); break; default: prerror("CPU: Unknown PVR, assuming 1 thread\n"); cpu_thread_count = 1; cpu_max_pir = mfspr(SPR_PIR); } prlog(PR_DEBUG, "CPU: Boot CPU PIR is 0x%04x PVR is 0x%08x\n", pir, pvr); prlog(PR_DEBUG, "CPU: Initial max PIR set to 0x%x\n", cpu_max_pir); /* * Adjust top of RAM to include CPU stacks. While we *could* have * less RAM than this... during early boot, it's enough of a check * until we start parsing device tree / hdat and find out for sure */ top_of_ram += (cpu_max_pir + 1) * STACK_SIZE; /* Clear the CPU structs */ for (i = 0; i <= cpu_max_pir; i++) { /* boot CPU already cleared and we don't want to clobber * its stack guard value. */ if (i == pir) continue; memset(&cpu_stacks[i].cpu, 0, sizeof(struct cpu_thread)); } /* Setup boot CPU state */ boot_cpu = &cpu_stacks[pir].cpu; init_cpu_thread(boot_cpu, cpu_state_active, pir); init_boot_tracebuf(boot_cpu); assert(this_cpu() == boot_cpu); init_hid(); } static void enable_large_dec(bool on) { u64 lpcr = mfspr(SPR_LPCR); if (on) lpcr |= SPR_LPCR_P9_LD; else lpcr &= ~SPR_LPCR_P9_LD; mtspr(SPR_LPCR, lpcr); } #define HIGH_BIT (1ull << 63) static int find_dec_bits(void) { int bits = 65; /* we always decrement once */ u64 mask = ~0ull; if (proc_gen < proc_gen_p9) return 32; /* The ISA doesn't specify the width of the decrementer register so we * need to discover it. When in large mode (LPCR.LD = 1) reads from the * DEC SPR are sign extended to 64 bits and writes are truncated to the * physical register width. We can use this behaviour to detect the * width by starting from an all 1s value and left shifting until we * read a value from the DEC with it's high bit cleared. */ enable_large_dec(true); do { bits--; mask = mask >> 1; mtspr(SPR_DEC, mask); } while (mfspr(SPR_DEC) & HIGH_BIT); enable_large_dec(false); prlog(PR_DEBUG, "CPU: decrementer bits %d\n", bits); return bits; } void init_all_cpus(void) { struct dt_node *cpus, *cpu; unsigned int thread, new_max_pir = 0; int dec_bits = find_dec_bits(); cpus = dt_find_by_path(dt_root, "/cpus"); assert(cpus); /* Iterate all CPUs in the device-tree */ dt_for_each_child(cpus, cpu) { unsigned int pir, server_no, chip_id; enum cpu_thread_state state; const struct dt_property *p; struct cpu_thread *t, *pt; /* Skip cache nodes */ if (strcmp(dt_prop_get(cpu, "device_type"), "cpu")) continue; server_no = dt_prop_get_u32(cpu, "reg"); /* If PIR property is absent, assume it's the same as the * server number */ pir = dt_prop_get_u32_def(cpu, "ibm,pir", server_no); /* We should always have an ibm,chip-id property */ chip_id = dt_get_chip_id(cpu); /* Only use operational CPUs */ if (!strcmp(dt_prop_get(cpu, "status"), "okay")) state = cpu_state_present; else state = cpu_state_unavailable; prlog(PR_INFO, "CPU: CPU from DT PIR=0x%04x Server#=0x%x" " State=%d\n", pir, server_no, state); /* Setup thread 0 */ assert(pir <= cpu_max_pir); t = pt = &cpu_stacks[pir].cpu; if (t != boot_cpu) { init_cpu_thread(t, state, pir); /* Each cpu gets its own later in init_trace_buffers */ t->trace = boot_cpu->trace; } t->server_no = server_no; t->primary = t; t->node = cpu; t->chip_id = chip_id; t->icp_regs = NULL; /* Will be set later */ t->core_hmi_state = 0; t->core_hmi_state_ptr = &t->core_hmi_state; t->thread_mask = 1; /* Add associativity properties */ add_core_associativity(t); /* Add the decrementer width property */ dt_add_property_cells(cpu, "ibm,dec-bits", dec_bits); /* Adjust max PIR */ if (new_max_pir < (pir + cpu_thread_count - 1)) new_max_pir = pir + cpu_thread_count - 1; /* Iterate threads */ p = dt_find_property(cpu, "ibm,ppc-interrupt-server#s"); if (!p) continue; for (thread = 1; thread < (p->len / 4); thread++) { prlog(PR_TRACE, "CPU: secondary thread %d found\n", thread); t = &cpu_stacks[pir + thread].cpu; init_cpu_thread(t, state, pir + thread); t->trace = boot_cpu->trace; t->server_no = ((const u32 *)p->prop)[thread]; t->is_secondary = true; t->primary = pt; t->node = cpu; t->chip_id = chip_id; t->core_hmi_state_ptr = &pt->core_hmi_state; t->thread_mask = 1 << thread; } prlog(PR_INFO, "CPU: %d secondary threads\n", thread); } cpu_max_pir = new_max_pir; prlog(PR_DEBUG, "CPU: New max PIR set to 0x%x\n", new_max_pir); adjust_cpu_stacks_alloc(); } void cpu_bringup(void) { struct cpu_thread *t; uint32_t count = 0; prlog(PR_INFO, "CPU: Setting up secondary CPU state\n"); op_display(OP_LOG, OP_MOD_CPU, 0x0000); /* Tell everybody to chime in ! */ prlog(PR_INFO, "CPU: Calling in all processors...\n"); cpu_secondary_start = 1; sync(); op_display(OP_LOG, OP_MOD_CPU, 0x0002); for_each_cpu(t) { if (t->state != cpu_state_present && t->state != cpu_state_active) continue; /* Add a callin timeout ? If so, call cpu_remove_node(t). */ while (t->state != cpu_state_active) { smt_lowest(); sync(); } smt_medium(); count++; } prlog(PR_NOTICE, "CPU: All %d processors called in...\n", count); op_display(OP_LOG, OP_MOD_CPU, 0x0003); } void cpu_callin(struct cpu_thread *cpu) { sync(); cpu->state = cpu_state_active; sync(); cpu->job_has_no_return = false; init_hid(); } static void opal_start_thread_job(void *data) { cpu_give_self_os(); /* We do not return, so let's mark the job as * complete */ start_kernel_secondary((uint64_t)data); } static int64_t opal_start_cpu_thread(uint64_t server_no, uint64_t start_address) { struct cpu_thread *cpu; struct cpu_job *job; if (!opal_addr_valid((void *)start_address)) return OPAL_PARAMETER; cpu = find_cpu_by_server(server_no); if (!cpu) { prerror("OPAL: Start invalid CPU 0x%04llx !\n", server_no); return OPAL_PARAMETER; } prlog(PR_DEBUG, "OPAL: Start CPU 0x%04llx (PIR 0x%04x) -> 0x%016llx\n", server_no, cpu->pir, start_address); lock(&reinit_lock); if (!cpu_is_available(cpu)) { unlock(&reinit_lock); prerror("OPAL: CPU not active in OPAL !\n"); return OPAL_WRONG_STATE; } if (cpu->in_reinit) { unlock(&reinit_lock); prerror("OPAL: CPU being reinitialized !\n"); return OPAL_WRONG_STATE; } job = __cpu_queue_job(cpu, "start_thread", opal_start_thread_job, (void *)start_address, true); unlock(&reinit_lock); if (!job) { prerror("OPAL: Failed to create CPU start job !\n"); return OPAL_INTERNAL_ERROR; } return OPAL_SUCCESS; } opal_call(OPAL_START_CPU, opal_start_cpu_thread, 2); static int64_t opal_query_cpu_status(uint64_t server_no, uint8_t *thread_status) { struct cpu_thread *cpu; if (!opal_addr_valid(thread_status)) return OPAL_PARAMETER; cpu = find_cpu_by_server(server_no); if (!cpu) { prerror("OPAL: Query invalid CPU 0x%04llx !\n", server_no); return OPAL_PARAMETER; } if (!cpu_is_available(cpu) && cpu->state != cpu_state_os) { prerror("OPAL: CPU not active in OPAL nor OS !\n"); return OPAL_PARAMETER; } switch(cpu->state) { case cpu_state_os: *thread_status = OPAL_THREAD_STARTED; break; case cpu_state_active: /* Active in skiboot -> inactive in OS */ *thread_status = OPAL_THREAD_INACTIVE; break; default: *thread_status = OPAL_THREAD_UNAVAILABLE; } return OPAL_SUCCESS; } opal_call(OPAL_QUERY_CPU_STATUS, opal_query_cpu_status, 2); static int64_t opal_return_cpu(void) { prlog(PR_DEBUG, "OPAL: Returning CPU 0x%04x\n", this_cpu()->pir); this_cpu()->in_opal_call--; if (this_cpu()->in_opal_call != 0) { printf("OPAL in_opal_call=%u\n", this_cpu()->in_opal_call); } __secondary_cpu_entry(); return OPAL_HARDWARE; /* Should not happen */ } opal_call(OPAL_RETURN_CPU, opal_return_cpu, 0); struct hid0_change_req { uint64_t clr_bits; uint64_t set_bits; }; static void cpu_change_hid0(void *__req) { struct hid0_change_req *req = __req; unsigned long hid0, new_hid0; hid0 = new_hid0 = mfspr(SPR_HID0); new_hid0 &= ~req->clr_bits; new_hid0 |= req->set_bits; prlog(PR_DEBUG, "CPU: [%08x] HID0 change 0x%016lx -> 0x%016lx\n", this_cpu()->pir, hid0, new_hid0); set_hid0(new_hid0); } static int64_t cpu_change_all_hid0(struct hid0_change_req *req) { struct cpu_thread *cpu; for_each_available_cpu(cpu) { if (!cpu_is_thread0(cpu)) continue; if (cpu == this_cpu()) { cpu_change_hid0(req); continue; } cpu_wait_job(cpu_queue_job(cpu, "cpu_change_hid0", cpu_change_hid0, req), true); } return OPAL_SUCCESS; } void cpu_set_radix_mode(void) { struct hid0_change_req req; if (!radix_supported) return; req.clr_bits = 0; req.set_bits = SPR_HID0_POWER9_RADIX; cleanup_global_tlb(); current_radix_mode = true; cpu_change_all_hid0(&req); } static void cpu_cleanup_one(void *param __unused) { mtspr(SPR_AMR, 0); mtspr(SPR_IAMR, 0); } static int64_t cpu_cleanup_all(void) { struct cpu_thread *cpu; for_each_available_cpu(cpu) { if (cpu == this_cpu()) { cpu_cleanup_one(NULL); continue; } cpu_wait_job(cpu_queue_job(cpu, "cpu_cleanup", cpu_cleanup_one, NULL), true); } return OPAL_SUCCESS; } void cpu_fast_reboot_complete(void) { /* Fast reboot will have cleared HID0:HILE */ current_hile_mode = false; /* On P9, restore radix mode */ cpu_set_radix_mode(); } static int64_t opal_reinit_cpus(uint64_t flags) { struct hid0_change_req req = { 0, 0 }; struct cpu_thread *cpu; int64_t rc = OPAL_SUCCESS; int i; prlog(PR_DEBUG, "OPAL: CPU re-init with flags: 0x%llx\n", flags); if (flags & OPAL_REINIT_CPUS_HILE_LE) prlog(PR_NOTICE, "OPAL: Switch to little-endian OS\n"); else if (flags & OPAL_REINIT_CPUS_HILE_BE) prlog(PR_NOTICE, "OPAL: Switch to big-endian OS\n"); again: lock(&reinit_lock); for (cpu = first_cpu(); cpu; cpu = next_cpu(cpu)) { if (cpu == this_cpu() || cpu->in_reinit) continue; if (cpu->state == cpu_state_os) { unlock(&reinit_lock); /* * That might be a race with return CPU during kexec * where we are still, wait a bit and try again */ for (i = 0; (i < 1000) && (cpu->state == cpu_state_os); i++) { time_wait_ms(1); } if (cpu->state == cpu_state_os) { prerror("OPAL: CPU 0x%x not in OPAL !\n", cpu->pir); return OPAL_WRONG_STATE; } goto again; } cpu->in_reinit = true; } /* * Now we need to mark ourselves "active" or we'll be skipped * by the various "for_each_active_..." calls done by slw_reinit() */ this_cpu()->state = cpu_state_active; this_cpu()->in_reinit = true; unlock(&reinit_lock); /* * This cleans up a few things left over by Linux * that can cause problems in cases such as radix->hash * transitions. Ideally Linux should do it but doing it * here works around existing broken kernels. */ cpu_cleanup_all(); /* If HILE change via HID0 is supported ... */ if (hile_supported && (flags & (OPAL_REINIT_CPUS_HILE_BE | OPAL_REINIT_CPUS_HILE_LE))) { bool hile = !!(flags & OPAL_REINIT_CPUS_HILE_LE); flags &= ~(OPAL_REINIT_CPUS_HILE_BE | OPAL_REINIT_CPUS_HILE_LE); if (hile != current_hile_mode) { if (hile) req.set_bits |= hid0_hile; else req.clr_bits |= hid0_hile; current_hile_mode = hile; } } /* If MMU mode change is supported */ if (radix_supported && (flags & (OPAL_REINIT_CPUS_MMU_HASH | OPAL_REINIT_CPUS_MMU_RADIX))) { bool radix = !!(flags & OPAL_REINIT_CPUS_MMU_RADIX); flags &= ~(OPAL_REINIT_CPUS_MMU_HASH | OPAL_REINIT_CPUS_MMU_RADIX); if (radix != current_radix_mode) { if (radix) req.set_bits |= SPR_HID0_POWER9_RADIX; else req.clr_bits |= SPR_HID0_POWER9_RADIX; current_radix_mode = radix; } } /* Cleanup the TLB. We do that unconditionally, this works * around issues where OSes fail to invalidate the PWC in Radix * mode for example. This only works on P9 and later, but we * also know we don't have a problem with Linux cleanups on * P8 so this isn't a problem. If we wanted to cleanup the * TLB on P8 as well, we'd have to use jobs to do it locally * on each CPU. */ cleanup_global_tlb(); /* Apply HID bits changes if any */ if (req.set_bits || req.clr_bits) cpu_change_all_hid0(&req); /* If we have a P7, error out for LE switch, do nothing for BE */ if (proc_gen < proc_gen_p8) { if (flags & OPAL_REINIT_CPUS_HILE_LE) rc = OPAL_UNSUPPORTED; flags &= ~(OPAL_REINIT_CPUS_HILE_BE | OPAL_REINIT_CPUS_HILE_LE); } if (flags & OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED) { flags &= ~OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED; /* * Pending a hostboot change we can't determine the status of * this, so it always fails. */ rc = OPAL_UNSUPPORTED; } /* Handle P8 DD1 SLW reinit */ if (flags != 0 && proc_gen == proc_gen_p8 && !hile_supported) rc = slw_reinit(flags); else if (flags != 0) rc = OPAL_UNSUPPORTED; /* And undo the above */ lock(&reinit_lock); this_cpu()->state = cpu_state_os; for (cpu = first_cpu(); cpu; cpu = next_cpu(cpu)) cpu->in_reinit = false; unlock(&reinit_lock); return rc; } opal_call(OPAL_REINIT_CPUS, opal_reinit_cpus, 1); #define NMMU_XLAT_CTL_PTCR 0xb static int64_t nmmu_set_ptcr(uint64_t chip_id, struct dt_node *node, uint64_t ptcr) { uint32_t nmmu_base_addr; nmmu_base_addr = dt_get_address(node, 0, NULL); return xscom_write(chip_id, nmmu_base_addr + NMMU_XLAT_CTL_PTCR, ptcr); } /* * Setup the the Nest MMU PTCR register for all chips in the system or * the specified chip id. * * The PTCR value may be overwritten so long as all users have been * quiesced. If it is set to an invalid memory address the system will * checkstop if anything attempts to use it. * * Returns OPAL_UNSUPPORTED if no nest mmu was found. */ static int64_t opal_nmmu_set_ptcr(uint64_t chip_id, uint64_t ptcr) { struct dt_node *node; int64_t rc = OPAL_UNSUPPORTED; if (chip_id == -1ULL) dt_for_each_compatible(dt_root, node, "ibm,power9-nest-mmu") { chip_id = dt_get_chip_id(node); if ((rc = nmmu_set_ptcr(chip_id, node, ptcr))) return rc; } else dt_for_each_compatible_on_chip(dt_root, node, "ibm,power9-nest-mmu", chip_id) if ((rc = nmmu_set_ptcr(chip_id, node, ptcr))) return rc; return rc; } opal_call(OPAL_NMMU_SET_PTCR, opal_nmmu_set_ptcr, 2); skiboot-5.10-rc4/core/cpufeatures.c000066400000000000000000000465741324317060200172370ustar00rootroot00000000000000/* Copyright 2017-2018 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * This file deals with setup of /cpus/ibm,powerpc-cpu-features dt */ #include #include #include #include #include #ifdef DEBUG #define DBG(fmt, a...) prlog(PR_DEBUG, "CPUFT: " fmt, ##a) #else #define DBG(fmt, a...) #endif /* Device-tree visible constants follow */ #define ISA_V2_07B 2070 #define ISA_V3_0B 3000 #define USABLE_PR (1U << 0) #define USABLE_OS (1U << 1) #define USABLE_HV (1U << 2) #define HV_SUPPORT_HFSCR (1U << 0) #define OS_SUPPORT_FSCR (1U << 0) /* Following are definitions for the match tables, not the DT binding itself */ #define ISA_BASE 0 #define HV_NONE 0 #define HV_CUSTOM 1 #define HV_HFSCR 2 #define OS_NONE 0 #define OS_CUSTOM 1 #define OS_FSCR 2 /* CPU bitmasks for match table */ #define CPU_P8_DD1 (1U << 0) #define CPU_P8_DD2 (1U << 1) #define CPU_P9_DD1 (1U << 2) #define CPU_P9_DD2 (1U << 3) #define CPU_P8 (CPU_P8_DD1|CPU_P8_DD2) #define CPU_P9 (CPU_P9_DD1|CPU_P9_DD2) #define CPU_ALL (CPU_P8|CPU_P9) struct cpu_feature { const char *name; uint32_t cpus_supported; uint32_t isa; uint32_t usable_privilege; uint32_t hv_support; uint32_t os_support; uint32_t hfscr_bit_nr; uint32_t fscr_bit_nr; uint32_t hwcap_bit_nr; const char *dependencies_names; /* space-delimited names */ }; /* * The base (or NULL) cpu feature set is the CPU features available * when no child nodes of the /cpus/ibm,powerpc-cpu-features node exist. The * base feature set is POWER8 (ISAv2.07B), less features that are listed * explicitly. * * XXX: currently, the feature dependencies are not necessarily captured * exactly or completely. This is somewhat acceptable because all * implementations must be aware of all these features. */ static const struct cpu_feature cpu_features_table[] = { /* * Big endian as in ISAv2.07B, MSR_LE=0 */ { "big-endian", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * Little endian as in ISAv2.07B, MSR_LE=1. * * When both big and little endian are defined, there is an LPCR ILE * bit and implementation specific way to switch HILE mode, MSR_SLE, * etc. */ { "little-endian", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * MSR_HV=1 mode as in ISAv2.07B (i.e., hypervisor privileged * instructions and registers). */ { "hypervisor", CPU_ALL, ISA_BASE, USABLE_HV, HV_CUSTOM, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv2.07B interrupt vectors, registers, and control registers * (e.g., AIL, ILE, HV, etc LPCR bits). * * This does not necessarily specify all possible interrupt types. * floating-point, for example requires some ways to handle floating * point exceptions, but the low level details of interrupt handler * is not a dependency there. There will always be *some* interrupt * handler, (and some way to provide memory magagement, etc.). */ { "interrupt-facilities", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, { "smt", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, -1, -1, 14, NULL, }, /* * ISAv2.07B Program Priority Registers (PPR) * PPR and associated control registers (e.g. RPR, PSPB), * priority "or" instructions, etc. */ { "program-priority-register", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv2.07B Book3S Chapter 5.7.9.1. Virtual Page Class Key Protecion * AMR, IAMR, AMOR, UAMOR, etc registers and MMU key bits. */ { "virtual-page-class-key-protection", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv2.07B SAO storage control attribute */ { "strong-access-ordering", CPU_ALL & ~CPU_P9_DD1, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv2.07B no-execute storage control attribute */ { "no-execute", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * Cache inhibited attribute supported on large pages. */ { "cache-inhibited-large-page", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv2.07B Book3S Chapter 8. Debug Facilities * CIEA, CIABR, DEAW, MEte, trace interrupt, etc. * Except CFAR, branch tracing. */ { "debug-facilities", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv2.07B CFAR */ { "come-from-address-register", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, "debug-facilities", }, /* * ISAv2.07B Branch tracing (optional in ISA) */ { "branch-tracing", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, "debug-facilities", }, /* * ISAv2.07B Floating-point Facility */ { "floating-point", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, PPC_BITLSHIFT(63), -1, 27, NULL, }, /* * ISAv2.07B Vector Facility (VMX) */ { "vector", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, PPC_BITLSHIFT(62), -1, 28, "floating-point", }, /* * ISAv2.07B Vector-scalar Facility (VSX) */ { "vector-scalar", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, -1, -1, 7, "vector", }, { "vector-crypto", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, 57, "vector", }, /* * ISAv2.07B Quadword Load and Store instructions * including lqarx/stdqcx. instructions. */ { "quadword-load-store", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv2.07B Binary Coded Decimal (BCD) * BCD fixed point instructions */ { "decimal-integer", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv2.07B Decimal floating-point Facility (DFP) */ { "decimal-floating-point", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, 10, "floating-point", }, /* * ISAv2.07B * DSCR, default data prefetch LPCR, etc */ { "data-stream-control-register", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, PPC_BITLSHIFT(61), PPC_BITLSHIFT(61), 61, NULL, }, /* * ISAv2.07B Branch History Rolling Buffer (BHRB) */ { "branch-history-rolling-buffer", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, PPC_BITLSHIFT(59), -1, -1, NULL, }, /* * ISAv2.07B Transactional Memory Facility (TM or HTM) */ { "transactional-memory", CPU_P8, /* P9 support is not enabled yet */ ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, PPC_BITLSHIFT(58), -1, 62, NULL, }, /* * ISAv3.0B TM additions * TEXASR bit 17, self-induced vs external footprint overflow */ { "transactional-memory-v3", 0, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, "transactional-memory", }, /* * ISAv2.07B Event-Based Branch Facility (EBB) */ { "event-based-branch", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, PPC_BITLSHIFT(56), PPC_BITLSHIFT(56), 60, NULL, }, /* * ISAv2.07B Target Address Register (TAR) */ { "target-address-register", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_CUSTOM, OS_CUSTOM, PPC_BITLSHIFT(55), PPC_BITLSHIFT(55), 58, NULL, }, /* * ISAv2.07B Control Register (CTRL) */ { "control-register", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv2.07B Book3S Chapter 11. Processor Control. * msgsnd, msgsndp, doorbell, etc. * * ISAv3.0B is not compatible (different addressing, HFSCR required * for msgsndp). */ { "processor-control-facility", CPU_P8_DD2, /* P8 DD1 has no dbell */ ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv2.07B PURR, SPURR registers */ { "processor-utilization-of-resources-register", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * POWER8 initiate coprocessor store word indexed (icswx) instruction */ { "coprocessor-icswx", CPU_P8, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv2.07B hash based MMU and all instructions, registers, * data structures, exceptions, etc. */ { "mmu-hash", CPU_P8, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * POWER8 MCE / machine check exception. */ { "machine-check-power8", CPU_P8, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * POWER8 PMU / performance monitor unit. */ { "performance-monitor-power8", CPU_P8, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv2.07B alignment interrupts set DSISR register * * POWER CPUs do not used this, and it's removed from ISAv3.0B. */ { "alignment-interrupt-dsisr", 0, ISA_BASE, USABLE_HV|USABLE_OS, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv2.07B / POWER8 doze, nap, sleep, winkle instructions * XXX: is Linux we using some BookIV specific implementation details * in nap handling? We have no POWER8 specific key here. */ { "idle-nap", CPU_P8, ISA_BASE, USABLE_HV, HV_CUSTOM, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv2.07B wait instruction */ { "wait", CPU_P8, ISA_BASE, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, { "subcore", CPU_P8, ISA_BASE, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, "smt", }, /* * ISAv3.0B radix based MMU */ { "mmu-radix", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv3.0B hash based MMU, new hash pte format, PCTR, etc */ { "mmu-hash-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv3.0B wait instruction */ { "wait-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv3.0B stop idle instructions and registers * XXX: Same question as for idle-nap */ { "idle-stop", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv3.0B Hypervisor Virtualization Interrupt * Also associated system registers, LPCR EE, HEIC, HVICE, * system reset SRR1 reason, etc. */ { "hypervisor-virtualization-interrupt", CPU_P9, ISA_V3_0B, USABLE_HV, HV_CUSTOM, OS_NONE, -1, -1, -1, NULL, }, /* * POWER9 MCE / machine check exception. */ { "machine-check-power9", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * POWER9 PMU / performance monitor unit. */ { "performance-monitor-power9", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_CUSTOM, -1, -1, -1, NULL, }, /* * ISAv3.0B scv/rfscv system call instructions and exceptions, fscr bit * etc. */ { "system-call-vectored", CPU_P9, ISA_V3_0B, USABLE_OS|USABLE_PR, HV_NONE, OS_CUSTOM, -1, PPC_BITLSHIFT(51), -1, NULL, }, /* * ISAv3.0B Book3S Chapter 10. Processor Control. * global msgsnd, msgsndp, msgsync, doorbell, etc. */ { "processor-control-facility-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS, HV_CUSTOM, OS_NONE, PPC_BITLSHIFT(53), -1, -1, NULL, }, /* * ISAv3.0B addpcis instruction */ { "pc-relative-addressing", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv2.07B Book3S Chapter 7. Timer Facilities * TB, VTB, DEC, HDEC, IC, etc registers and exceptions. * Not including PURR or SPURR registers. */ { "timer-facilities", CPU_ALL, ISA_BASE, USABLE_HV|USABLE_OS, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv3.0B Book3S Chapter 7. Timer Facilities * Large decrementer and hypervisor decrementer */ { "timer-facilities-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS, HV_NONE, OS_NONE, -1, -1, -1, "timer-facilities", }, /* * ISAv3.0B deliver a random number instruction (darn) */ { "random-number-generator", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv3.0B fixed point instructions and registers * multiply-add, modulo, count trailing zeroes, cmprb, cmpeqb, * extswsli, mfvsrld, mtvsrdd, mtvsrws, addex, CA32, OV32, * mcrxrx, setb */ { "fixed-point-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, { "decimal-integer-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, "fixed-point-v3 decimal-integer", }, /* * ISAv3.0B lightweight mffs */ { "floating-point-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, "floating-point", }, { "decimal-floating-point-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, "floating-point-v3 decimal-floating-point", }, { "vector-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, "vector", }, { "vector-scalar-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, "vector-v3 vector-scalar" }, { "vector-binary128", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, 54, "vector-scalar-v3", }, { "vector-binary16", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, "vector-v3", }, /* * ISAv3.0B external exception for EBB */ { "event-based-branch-v3", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, "event-based-branch", }, /* * ISAv3.0B Atomic Memory Operations (AMO) */ { "atomic-memory-operations", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv3.0B Copy-Paste Facility */ { "copy-paste", CPU_P9, ISA_V3_0B, USABLE_HV|USABLE_OS|USABLE_PR, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, /* * ISAv3.0B GSR SPR register * POWER9 does not implement it */ { "group-start-register", 0, ISA_V3_0B, USABLE_HV|USABLE_OS, HV_NONE, OS_NONE, -1, -1, -1, NULL, }, }; static void add_cpu_feature_nodeps(struct dt_node *features, const struct cpu_feature *f) { struct dt_node *feature; feature = dt_new(features, f->name); assert(feature); dt_add_property_cells(feature, "isa", f->isa); dt_add_property_cells(feature, "usable-privilege", f->usable_privilege); if (f->usable_privilege & USABLE_HV) { if (f->hv_support != HV_NONE) { uint32_t s = 0; if (f->hv_support == HV_HFSCR) s |= HV_SUPPORT_HFSCR; dt_add_property_cells(feature, "hv-support", s); if (f->hfscr_bit_nr != -1) dt_add_property_cells(feature, "hfscr-bit-nr", f->hfscr_bit_nr); } else { assert(f->hfscr_bit_nr == -1); } } if (f->usable_privilege & USABLE_OS) { if (f->os_support != OS_NONE) { uint32_t s = 0; if (f->os_support == OS_FSCR) s |= OS_SUPPORT_FSCR; dt_add_property_cells(feature, "os-support", s); if (f->fscr_bit_nr != -1) dt_add_property_cells(feature, "fscr-bit-nr", f->fscr_bit_nr); } else { assert(f->fscr_bit_nr == -1); } } if (f->usable_privilege & USABLE_PR) { if (f->hwcap_bit_nr != -1) dt_add_property_cells(feature, "hwcap-bit-nr", f->hwcap_bit_nr); } if (f->dependencies_names) dt_add_property(feature, "dependencies", NULL, 0); } static void add_cpufeatures_dependencies(struct dt_node *features) { struct dt_node *feature; dt_for_each_node(features, feature) { const struct cpu_feature *f = NULL; const char *deps_names; struct dt_property *deps; int nr_deps; int i; /* Find features with dependencies */ deps = __dt_find_property(feature, "dependencies"); if (!deps) continue; /* Find the matching cpu table */ for (i = 0; i < ARRAY_SIZE(cpu_features_table); i++) { f = &cpu_features_table[i]; if (!strcmp(f->name, feature->name)) break; } assert(f); assert(f->dependencies_names); /* * Count number of depended features and allocate space * for phandles in the property. */ deps_names = f->dependencies_names; nr_deps = strcount(deps_names, " ") + 1; dt_resize_property(&deps, nr_deps * sizeof(u32)); deps->len = nr_deps * sizeof(u32); DBG("feature %s has %d dependencies (%s)\n", f->name, nr_deps, deps_names); /* * For each one, find the depended feature then advance to * next name. */ for (i = 0; i < nr_deps; i++) { struct dt_node *dep; int len; if (nr_deps - i == 1) len = strlen(deps_names); else len = strchr(deps_names, ' ') - deps_names; dt_for_each_node(features, dep) { if (!strncmp(deps_names, dep->name, len)) goto found_dep; } prlog(PR_ERR, "CPUFT: feature %s dependencies not found\n", f->name); break; found_dep: DBG(" %s found dep (%s)\n", f->name, dep->name); dt_property_set_cell(deps, i, dep->phandle); /* Advance over the name + delimiter */ deps_names += len + 1; } } } static void add_cpufeatures(struct dt_node *cpus, uint32_t cpu_feature_isa, uint32_t cpu_feature_cpu, const char *cpu_name) { struct dt_node *features; int i; DBG("creating cpufeatures for cpu:%d isa:%d\n", cpu_feature_cpu, cpu_feature_isa); features = dt_new(cpus, "ibm,powerpc-cpu-features"); assert(features); dt_add_property_cells(features, "isa", cpu_feature_isa); dt_add_property_string(features, "device_type", "cpu-features"); dt_add_property_string(features, "compatible", "ibm,powerpc-cpu-features"); dt_add_property_string(features, "display-name", cpu_name); /* add without dependencies */ for (i = 0; i < ARRAY_SIZE(cpu_features_table); i++) { const struct cpu_feature *f = &cpu_features_table[i]; if (f->cpus_supported & cpu_feature_cpu) { DBG(" '%s'\n", f->name); add_cpu_feature_nodeps(features, f); } } /* dependency construction pass */ add_cpufeatures_dependencies(features); } void dt_add_cpufeatures(struct dt_node *root) { int version; uint32_t cpu_feature_isa = 0; uint32_t cpu_feature_cpu = 0; struct dt_node *cpus; const char *cpu_name = NULL; version = mfspr(SPR_PVR); switch(PVR_TYPE(version)) { case PVR_TYPE_P8: if (!cpu_name) cpu_name = "POWER8"; /* fallthrough */ case PVR_TYPE_P8E: if (!cpu_name) cpu_name = "POWER8E"; /* fallthrough */ case PVR_TYPE_P8NVL: if (!cpu_name) cpu_name = "POWER8NVL"; cpu_feature_isa = ISA_V2_07B; if (PVR_VERS_MAJ(version) == 1) cpu_feature_cpu = CPU_P8_DD1; else cpu_feature_cpu = CPU_P8_DD2; break; case PVR_TYPE_P9: if (!cpu_name) cpu_name = "POWER9"; cpu_feature_isa = ISA_V3_0B; if (is_power9n(version) && (PVR_VERS_MAJ(version) == 1)) { /* P9N DD1 */ cpu_feature_cpu = CPU_P9_DD1; } else if (is_power9n(version) && (PVR_VERS_MAJ(version) == 2)) { /* P9N DD2.x */ cpu_feature_cpu = CPU_P9_DD2; } else { assert(0); } break; default: return; } cpus = dt_new_check(root, "cpus"); add_cpufeatures(cpus, cpu_feature_isa, cpu_feature_cpu, cpu_name); } skiboot-5.10-rc4/core/device.c000066400000000000000000000607001324317060200161330ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include /* Used to give unique handles. */ u32 last_phandle = 0; struct dt_node *dt_root; struct dt_node *dt_chosen; static const char *take_name(const char *name) { if (!is_rodata(name) && !(name = strdup(name))) { prerror("Failed to allocate copy of name"); abort(); } return name; } static void free_name(const char *name) { if (!is_rodata(name)) free((char *)name); } static struct dt_node *new_node(const char *name) { struct dt_node *node = malloc(sizeof *node); if (!node) { prerror("Failed to allocate node\n"); abort(); } node->name = take_name(name); node->parent = NULL; list_head_init(&node->properties); list_head_init(&node->children); /* FIXME: locking? */ node->phandle = new_phandle(); return node; } struct dt_node *dt_new_root(const char *name) { return new_node(name); } static const char *get_unitname(const struct dt_node *node) { const char *c = strchr(node->name, '@'); if (!c) return NULL; return c + 1; } int dt_cmp_subnodes(const struct dt_node *a, const struct dt_node *b) { const char *a_unit = get_unitname(a); const char *b_unit = get_unitname(b); ptrdiff_t basenamelen = a_unit - a->name; /* sort hex unit addresses by number */ if (a_unit && b_unit && !strncmp(a->name, b->name, basenamelen)) { unsigned long long a_num, b_num; char *a_end, *b_end; a_num = strtoul(a_unit, &a_end, 16); b_num = strtoul(b_unit, &b_end, 16); /* only compare if the unit addr parsed correctly */ if (*a_end == 0 && *b_end == 0) return (a_num > b_num) - (a_num < b_num); } return strcmp(a->name, b->name); } bool dt_attach_root(struct dt_node *parent, struct dt_node *root) { struct dt_node *node; assert(!root->parent); if (list_empty(&parent->children)) { list_add(&parent->children, &root->list); root->parent = parent; return true; } dt_for_each_child(parent, node) { int cmp = dt_cmp_subnodes(node, root); /* Look for duplicates */ if (cmp == 0) { prerror("DT: %s failed, duplicate %s\n", __func__, root->name); return false; } /* insert before the first node that's larger * the the node we're inserting */ if (cmp > 0) break; } list_add_before(&parent->children, &root->list, &node->list); root->parent = parent; return true; } static inline void dt_destroy(struct dt_node *dn) { if (!dn) return; free_name(dn->name); free(dn); } struct dt_node *dt_new(struct dt_node *parent, const char *name) { struct dt_node *new; assert(parent); new = new_node(name); if (!dt_attach_root(parent, new)) { dt_destroy(new); return NULL; } return new; } /* * low level variant, we export this because there are "weird" address * formats, such as LPC/ISA bus addresses which have a letter to identify * which bus space the address is inside of. */ struct dt_node *__dt_find_by_name_addr(struct dt_node *parent, const char *name, const char *addr) { struct dt_node *node; if (list_empty(&parent->children)) return NULL; dt_for_each_child(parent, node) { const char *unit = get_unitname(node); int len; if (!unit) continue; /* match the name */ len = (int) (unit - node->name) - 1; if (strncmp(node->name, name, len)) continue; /* match the unit */ if (strcmp(unit, addr) == 0) return node; } dt_for_each_child(parent, node) { struct dt_node *ret = __dt_find_by_name_addr(node, name, addr); if (ret) return ret; } return NULL; } struct dt_node *dt_find_by_name_addr(struct dt_node *parent, const char *name, uint64_t addr) { char addr_str[16 + 1]; /* max size of a 64bit int */ snprintf(addr_str, sizeof(addr_str), "%" PRIx64, addr); return __dt_find_by_name_addr(parent, name, addr_str); } struct dt_node *dt_new_addr(struct dt_node *parent, const char *name, uint64_t addr) { char *lname; struct dt_node *new; size_t len; assert(parent); len = strlen(name) + STR_MAX_CHARS(addr) + 2; lname = malloc(len); if (!lname) return NULL; snprintf(lname, len, "%s@%llx", name, (long long)addr); new = new_node(lname); free(lname); if (!dt_attach_root(parent, new)) { dt_destroy(new); return NULL; } return new; } struct dt_node *dt_new_2addr(struct dt_node *parent, const char *name, uint64_t addr0, uint64_t addr1) { char *lname; struct dt_node *new; size_t len; assert(parent); len = strlen(name) + 2*STR_MAX_CHARS(addr0) + 3; lname = malloc(len); if (!lname) return NULL; snprintf(lname, len, "%s@%llx,%llx", name, (long long)addr0, (long long)addr1); new = new_node(lname); free(lname); if (!dt_attach_root(parent, new)) { dt_destroy(new); return NULL; } return new; } static struct dt_node *__dt_copy(struct dt_node *node, struct dt_node *parent, bool root) { struct dt_property *prop, *new_prop; struct dt_node *new_node, *child; new_node = dt_new(parent, node->name); if (!new_node) return NULL; list_for_each(&node->properties, prop, list) { new_prop = dt_add_property(new_node, prop->name, prop->prop, prop->len); if (!new_prop) goto fail; } list_for_each(&node->children, child, list) { child = __dt_copy(child, new_node, false); if (!child) goto fail; } return new_node; fail: /* dt_free will recurse for us, so only free when we unwind to the * top-level failure */ if (root) dt_free(new_node); return NULL; } struct dt_node *dt_copy(struct dt_node *node, struct dt_node *parent) { return __dt_copy(node, parent, true); } char *dt_get_path(const struct dt_node *node) { unsigned int len = 0; const struct dt_node *n; char *path, *p; /* Dealing with NULL is for test/debug purposes */ if (!node) return strdup(""); for (n = node; n; n = n->parent) { len += strlen(n->name); if (n->parent || n == node) len++; } path = zalloc(len + 1); assert(path); p = path + len; for (n = node; n; n = n->parent) { len = strlen(n->name); p -= len; memcpy(p, n->name, len); if (n->parent || n == node) *(--p) = '/'; } assert(p == path); return p; } static const char *__dt_path_split(const char *p, const char **namep, unsigned int *namel, const char **addrp, unsigned int *addrl) { const char *at, *sl; *namel = *addrl = 0; /* Skip initial '/' */ while (*p == '/') p++; /* Check empty path */ if (*p == 0) return p; at = strchr(p, '@'); sl = strchr(p, '/'); if (sl == NULL) sl = p + strlen(p); if (sl < at) at = NULL; if (at) { *addrp = at + 1; *addrl = sl - at - 1; } *namep = p; *namel = at ? (at - p) : (sl - p); return sl; } struct dt_node *dt_find_by_path(struct dt_node *root, const char *path) { struct dt_node *n; const char *pn, *pa, *p = path, *nn, *na; unsigned int pnl, pal, nnl, nal; bool match; /* Walk path components */ while (*p) { /* Extract next path component */ p = __dt_path_split(p, &pn, &pnl, &pa, &pal); if (pnl == 0 && pal == 0) break; /* Compare with each child node */ match = false; list_for_each(&root->children, n, list) { match = true; __dt_path_split(n->name, &nn, &nnl, &na, &nal); if (pnl && (pnl != nnl || strncmp(pn, nn, pnl))) match = false; if (pal && (pal != nal || strncmp(pa, na, pal))) match = false; if (match) { root = n; break; } } /* No child match */ if (!match) return NULL; } return root; } struct dt_node *dt_find_by_name(struct dt_node *root, const char *name) { struct dt_node *child, *match; list_for_each(&root->children, child, list) { if (!strcmp(child->name, name)) return child; match = dt_find_by_name(child, name); if (match) return match; } return NULL; } struct dt_node *dt_new_check(struct dt_node *parent, const char *name) { struct dt_node *node = dt_find_by_name(parent, name); if (!node) { node = dt_new(parent, name); assert(node); } return node; } struct dt_node *dt_find_by_phandle(struct dt_node *root, u32 phandle) { struct dt_node *node; dt_for_each_node(root, node) if (node->phandle == phandle) return node; return NULL; } static struct dt_property *new_property(struct dt_node *node, const char *name, size_t size) { struct dt_property *p = malloc(sizeof(*p) + size); char *path; if (!p) { path = dt_get_path(node); prerror("Failed to allocate property \"%s\" for %s of %zu bytes\n", name, path, size); free(path); abort(); } if (dt_find_property(node, name)) { path = dt_get_path(node); prerror("Duplicate property \"%s\" in node %s\n", name, path); free(path); abort(); } p->name = take_name(name); p->len = size; list_add_tail(&node->properties, &p->list); return p; } struct dt_property *dt_add_property(struct dt_node *node, const char *name, const void *val, size_t size) { struct dt_property *p; /* * Filter out phandle properties, we re-generate them * when flattening */ if (strcmp(name, "linux,phandle") == 0 || strcmp(name, "phandle") == 0) { assert(size == 4); node->phandle = *(const u32 *)val; if (node->phandle >= last_phandle) set_last_phandle(node->phandle); return NULL; } p = new_property(node, name, size); if (size) memcpy(p->prop, val, size); return p; } void dt_resize_property(struct dt_property **prop, size_t len) { size_t new_len = sizeof(**prop) + len; *prop = realloc(*prop, new_len); /* Fix up linked lists in case we moved. (note: not an empty list). */ (*prop)->list.next->prev = &(*prop)->list; (*prop)->list.prev->next = &(*prop)->list; } struct dt_property *dt_add_property_string(struct dt_node *node, const char *name, const char *value) { return dt_add_property(node, name, value, strlen(value)+1); } struct dt_property *dt_add_property_nstr(struct dt_node *node, const char *name, const char *value, unsigned int vlen) { struct dt_property *p; char *tmp = zalloc(vlen + 1); if (!tmp) return NULL; strncpy(tmp, value, vlen); p = dt_add_property(node, name, tmp, strlen(tmp)+1); free(tmp); return p; } struct dt_property *__dt_add_property_cells(struct dt_node *node, const char *name, int count, ...) { struct dt_property *p; u32 *val; unsigned int i; va_list args; p = new_property(node, name, count * sizeof(u32)); val = (u32 *)p->prop; va_start(args, count); for (i = 0; i < count; i++) val[i] = cpu_to_fdt32(va_arg(args, u32)); va_end(args); return p; } struct dt_property *__dt_add_property_u64s(struct dt_node *node, const char *name, int count, ...) { struct dt_property *p; u64 *val; unsigned int i; va_list args; p = new_property(node, name, count * sizeof(u64)); val = (u64 *)p->prop; va_start(args, count); for (i = 0; i < count; i++) val[i] = cpu_to_fdt64(va_arg(args, u64)); va_end(args); return p; } struct dt_property *__dt_add_property_strings(struct dt_node *node, const char *name, int count, ...) { struct dt_property *p; unsigned int i, size; va_list args; const char *sstr; char *s; va_start(args, count); for (i = size = 0; i < count; i++) { sstr = va_arg(args, const char *); if (sstr) size += strlen(sstr) + 1; } va_end(args); if (!size) size = 1; p = new_property(node, name, size); s = (char *)p->prop; *s = 0; va_start(args, count); for (i = 0; i < count; i++) { sstr = va_arg(args, const char *); if (sstr) { strcpy(s, sstr); s = s + strlen(sstr) + 1; } } va_end(args); return p; } void dt_del_property(struct dt_node *node, struct dt_property *prop) { list_del_from(&node->properties, &prop->list); free_name(prop->name); free(prop); } u32 dt_property_get_cell(const struct dt_property *prop, u32 index) { assert(prop->len >= (index+1)*sizeof(u32)); /* Always aligned, so this works. */ return fdt32_to_cpu(((const u32 *)prop->prop)[index]); } void dt_property_set_cell(struct dt_property *prop, u32 index, u32 val) { assert(prop->len >= (index+1)*sizeof(u32)); /* Always aligned, so this works. */ ((u32 *)prop->prop)[index] = cpu_to_fdt32(val); } /* First child of this node. */ struct dt_node *dt_first(const struct dt_node *root) { return list_top(&root->children, struct dt_node, list); } /* Return next node, or NULL. */ struct dt_node *dt_next(const struct dt_node *root, const struct dt_node *prev) { if (!prev) { struct dt_node *first = dt_first(root); if (!first) return NULL; else return first; } /* Children? */ if (!list_empty(&prev->children)) return dt_first(prev); do { /* More siblings? */ if (prev->list.next != &prev->parent->children.n) return list_entry(prev->list.next, struct dt_node,list); /* No more siblings, move up to parent. */ prev = prev->parent; } while (prev != root); return NULL; } struct dt_property *__dt_find_property(struct dt_node *node, const char *name) { struct dt_property *i; list_for_each(&node->properties, i, list) if (strcmp(i->name, name) == 0) return i; return NULL; } const struct dt_property *dt_find_property(const struct dt_node *node, const char *name) { const struct dt_property *i; list_for_each(&node->properties, i, list) if (strcmp(i->name, name) == 0) return i; return NULL; } void dt_check_del_prop(struct dt_node *node, const char *name) { struct dt_property *p; p = __dt_find_property(node, name); if (p) dt_del_property(node, p); } const struct dt_property *dt_require_property(const struct dt_node *node, const char *name, int wanted_len) { const struct dt_property *p = dt_find_property(node, name); if (!p) { const char *path = dt_get_path(node); prerror("DT: Missing required property %s/%s\n", path, name); assert(false); } if (wanted_len >= 0 && p->len != wanted_len) { const char *path = dt_get_path(node); prerror("DT: Unexpected property length %s/%s\n", path, name); prerror("DT: Expected len: %d got len: %zu\n", wanted_len, p->len); assert(false); } return p; } bool dt_has_node_property(const struct dt_node *node, const char *name, const char *val) { const struct dt_property *p = dt_find_property(node, name); if (!p) return false; if (!val) return true; return p->len == strlen(val) + 1 && memcmp(p->prop, val, p->len) == 0; } bool dt_prop_find_string(const struct dt_property *p, const char *s) { const char *c, *end; if (!p) return false; c = p->prop; end = c + p->len; while(c < end) { if (!strcasecmp(s, c)) return true; c += strlen(c) + 1; } return false; } bool dt_node_is_compatible(const struct dt_node *node, const char *compat) { const struct dt_property *p = dt_find_property(node, "compatible"); return dt_prop_find_string(p, compat); } struct dt_node *dt_find_compatible_node(struct dt_node *root, struct dt_node *prev, const char *compat) { struct dt_node *node = prev; while ((node = dt_next(root, node))) if (dt_node_is_compatible(node, compat)) return node; return NULL; } u64 dt_prop_get_u64(const struct dt_node *node, const char *prop) { const struct dt_property *p = dt_require_property(node, prop, 8); return ((u64)dt_property_get_cell(p, 0) << 32) | dt_property_get_cell(p, 1); } u64 dt_prop_get_u64_def(const struct dt_node *node, const char *prop, u64 def) { const struct dt_property *p = dt_find_property(node, prop); if (!p) return def; return ((u64)dt_property_get_cell(p, 0) << 32) | dt_property_get_cell(p, 1); } u32 dt_prop_get_u32(const struct dt_node *node, const char *prop) { const struct dt_property *p = dt_require_property(node, prop, 4); return dt_property_get_cell(p, 0); } u32 dt_prop_get_u32_def(const struct dt_node *node, const char *prop, u32 def) { const struct dt_property *p = dt_find_property(node, prop); if (!p) return def; return dt_property_get_cell(p, 0); } const void *dt_prop_get(const struct dt_node *node, const char *prop) { const struct dt_property *p = dt_require_property(node, prop, -1); return p->prop; } const void *dt_prop_get_def(const struct dt_node *node, const char *prop, void *def) { const struct dt_property *p = dt_find_property(node, prop); return p ? p->prop : def; } const void *dt_prop_get_def_size(const struct dt_node *node, const char *prop, void *def, size_t *len) { const struct dt_property *p = dt_find_property(node, prop); *len = 0; if (p) *len = p->len; return p ? p->prop : def; } u32 dt_prop_get_cell(const struct dt_node *node, const char *prop, u32 cell) { const struct dt_property *p = dt_require_property(node, prop, -1); return dt_property_get_cell(p, cell); } u32 dt_prop_get_cell_def(const struct dt_node *node, const char *prop, u32 cell, u32 def) { const struct dt_property *p = dt_find_property(node, prop); if (!p) return def; return dt_property_get_cell(p, cell); } void dt_free(struct dt_node *node) { struct dt_node *child; struct dt_property *p; while ((child = list_top(&node->children, struct dt_node, list))) dt_free(child); while ((p = list_pop(&node->properties, struct dt_property, list))) { free_name(p->name); free(p); } if (node->parent) list_del_from(&node->parent->children, &node->list); dt_destroy(node); } int dt_expand_node(struct dt_node *node, const void *fdt, int fdt_node) { const struct fdt_property *prop; int offset, nextoffset, err; struct dt_node *child; const char *name; uint32_t tag; if (((err = fdt_check_header(fdt)) != 0) || ((err = _fdt_check_node_offset(fdt, fdt_node)) < 0)) { prerror("FDT: Error %d parsing node 0x%x\n", err, fdt_node); return -1; } nextoffset = err; do { offset = nextoffset; tag = fdt_next_tag(fdt, offset, &nextoffset); switch (tag) { case FDT_PROP: prop = _fdt_offset_ptr(fdt, offset); name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff)); dt_add_property(node, name, prop->data, fdt32_to_cpu(prop->len)); break; case FDT_BEGIN_NODE: name = fdt_get_name(fdt, offset, NULL); child = dt_new_root(name); assert(child); nextoffset = dt_expand_node(child, fdt, offset); /* * This may fail in case of duplicate, keep it * going for now, we may ultimately want to * assert */ if (!dt_attach_root(node, child)) /** * @fwts-label DTHasDuplicateNodeID * @fwts-advice OPAL will parse the Flattened * Device Tree(FDT), which can be generated * from different firmware sources. During * expansion of FDT, OPAL observed a node * assigned multiple times (a duplicate). This * indicates either a Hostboot bug *OR*, more * likely, a bug in the platform XML. Check * the platform XML for duplicate IDs for * this type of device. Because of this * duplicate node, OPAL won't add the hardware * device found with a duplicate node ID into * DT, rendering the corresponding device not * functional. */ prlog(PR_ERR, "DT: Found duplicate node: %s\n", child->name); break; case FDT_END: return -1; } } while (tag != FDT_END_NODE); return nextoffset; } void dt_expand(const void *fdt) { prlog(PR_DEBUG, "FDT: Parsing fdt @%p\n", fdt); if (dt_expand_node(dt_root, fdt, 0) < 0) abort(); } u64 dt_get_number(const void *pdata, unsigned int cells) { const u32 *p = pdata; u64 ret = 0; while(cells--) ret = (ret << 32) | be32_to_cpu(*(p++)); return ret; } u32 dt_n_address_cells(const struct dt_node *node) { if (!node->parent) return 0; return dt_prop_get_u32_def(node->parent, "#address-cells", 2); } u32 dt_n_size_cells(const struct dt_node *node) { if (!node->parent) return 0; return dt_prop_get_u32_def(node->parent, "#size-cells", 1); } u64 dt_get_address(const struct dt_node *node, unsigned int index, u64 *out_size) { const struct dt_property *p; u32 na = dt_n_address_cells(node); u32 ns = dt_n_size_cells(node); u32 pos, n; p = dt_require_property(node, "reg", -1); n = (na + ns) * sizeof(u32); pos = n * index; assert((pos + n) <= p->len); if (out_size) *out_size = dt_get_number(p->prop + pos + na * sizeof(u32), ns); return dt_get_number(p->prop + pos, na); } static u32 __dt_get_chip_id(const struct dt_node *node) { const struct dt_property *prop; for (; node; node = node->parent) { prop = dt_find_property(node, "ibm,chip-id"); if (prop) return dt_property_get_cell(prop, 0); } return 0xffffffff; } u32 dt_get_chip_id(const struct dt_node *node) { u32 id = __dt_get_chip_id(node); assert(id != 0xffffffff); return id; } struct dt_node *dt_find_compatible_node_on_chip(struct dt_node *root, struct dt_node *prev, const char *compat, uint32_t chip_id) { struct dt_node *node = prev; while ((node = dt_next(root, node))) { u32 cid = __dt_get_chip_id(node); if (cid == chip_id && dt_node_is_compatible(node, compat)) return node; } return NULL; } unsigned int dt_count_addresses(const struct dt_node *node) { const struct dt_property *p; u32 na = dt_n_address_cells(node); u32 ns = dt_n_size_cells(node); u32 n; p = dt_require_property(node, "reg", -1); n = (na + ns) * sizeof(u32); if (n == 0) return 0; return p->len / n; } /* Translates an address from the given bus into its parent's address space */ static u64 dt_translate_one(const struct dt_node *bus, u64 addr) { u32 ranges_count, na, ns, parent_na; const struct dt_property *p; const u32 *ranges; int i, stride; assert(bus->parent); na = dt_prop_get_u32_def(bus, "#address-cells", 2); ns = dt_prop_get_u32_def(bus, "#size-cells", 2); parent_na = dt_n_address_cells(bus); stride = na + ns + parent_na; /* * FIXME: We should handle arbitrary length addresses, rather than * limiting it to 64bit. If someone wants/needs that they * can implement the bignum math for it :) */ assert(na <= 2); assert(parent_na <= 2); /* We should never be trying to translate an address without a ranges */ p = dt_require_property(bus, "ranges", -1); ranges = (u32 *) &p->prop; ranges_count = (p->len / 4) / (na + parent_na + ns); /* An empty ranges property implies 1-1 translation */ if (ranges_count == 0) return addr; for (i = 0; i < ranges_count; i++, ranges += stride) { /* ranges format: */ u64 child_base = dt_get_number(ranges, na); u64 parent_base = dt_get_number(ranges + na, parent_na); u64 size = dt_get_number(ranges + na + parent_na, ns); if (addr >= child_base && addr < child_base + size) return (addr - child_base) + parent_base; } /* input address was outside the any of our mapped ranges */ return 0; } u64 dt_translate_address(const struct dt_node *node, unsigned int index, u64 *out_size) { u64 addr = dt_get_address(node, index, NULL); struct dt_node *bus = node->parent; /* FIXME: One day we will probably want to use this, but for now just * force it it to be zero since we only support returning a u64 or u32 */ assert(!out_size); /* apply each translation until we hit the root bus */ while (bus->parent) { addr = dt_translate_one(bus, addr); bus = bus->parent; } return addr; } bool dt_node_is_enabled(struct dt_node *node) { const struct dt_property *p = dt_find_property(node, "status"); if (!p) return true; return p->len > 1 && p->prop[0] == 'o' && p->prop[1] == 'k'; } /* * Function to fixup the phandle in the subtree. */ void dt_adjust_subtree_phandle(struct dt_node *dev, const char** (get_properties_to_fix)(struct dt_node *n)) { struct dt_node *node; struct dt_property *prop; u32 phandle, max_phandle = 0, import_phandle = new_phandle(); const char **name; dt_for_each_node(dev, node) { const char **props_to_update; node->phandle += import_phandle; /* * calculate max_phandle(new_tree), needed to update * last_phandle. */ if (node->phandle >= max_phandle) max_phandle = node->phandle; props_to_update = get_properties_to_fix(node); if (!props_to_update) continue; for (name = props_to_update; *name != NULL; name++) { prop = __dt_find_property(node, *name); if (!prop) continue; phandle = dt_prop_get_u32(node, *name); phandle += import_phandle; memcpy((char *)&prop->prop, &phandle, prop->len); } } set_last_phandle(max_phandle); } skiboot-5.10-rc4/core/direct-controls.c000066400000000000000000000501741324317060200200130ustar00rootroot00000000000000/* Copyright 2017 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include /**************** mambo direct controls ****************/ extern unsigned long callthru_tcl(const char *str, int len); static void mambo_sreset_cpu(struct cpu_thread *cpu) { uint32_t core_id = pir_to_core_id(cpu->pir); uint32_t thread_id = pir_to_thread_id(cpu->pir); char tcl_cmd[50]; snprintf(tcl_cmd, sizeof(tcl_cmd), "mysim cpu 0:%i:%i start_thread 0x100", core_id, thread_id); callthru_tcl(tcl_cmd, strlen(tcl_cmd)); } static void mambo_stop_cpu(struct cpu_thread *cpu) { uint32_t core_id = pir_to_core_id(cpu->pir); uint32_t thread_id = pir_to_thread_id(cpu->pir); char tcl_cmd[50]; snprintf(tcl_cmd, sizeof(tcl_cmd), "mysim cpu 0:%i:%i stop_thread", core_id, thread_id); callthru_tcl(tcl_cmd, strlen(tcl_cmd)); } /**************** POWER8 direct controls ****************/ #define P8_EX_TCTL_DIRECT_CONTROLS(t) (0x10013000 + (t) * 0x10) #define P8_DIRECT_CTL_STOP PPC_BIT(63) #define P8_DIRECT_CTL_PRENAP PPC_BIT(47) #define P8_DIRECT_CTL_SRESET PPC_BIT(60) static int p8_core_set_special_wakeup(struct cpu_thread *cpu) { uint64_t val, poll_target, stamp; uint32_t core_id; int rc; /* * Note: HWP checks for checkstops, but I assume we don't need to * as we wouldn't be running if one was present */ /* Grab core ID once */ core_id = pir_to_core_id(cpu->pir); prlog(PR_DEBUG, "RESET Waking up core 0x%x\n", core_id); /* * The original HWp reads the XSCOM first but ignores the result * and error, let's do the same until I know for sure that is * not necessary */ xscom_read(cpu->chip_id, XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP), &val); /* Then we write special wakeup */ rc = xscom_write(cpu->chip_id, XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP), PPC_BIT(0)); if (rc) { prerror("RESET: XSCOM error %d asserting special" " wakeup on 0x%x\n", rc, cpu->pir); return rc; } /* * HWP uses the history for Perf register here, dunno why it uses * that one instead of the pHyp one, maybe to avoid clobbering it... * * In any case, it does that to check for run/nap vs.sleep/winkle/other * to decide whether to poll on checkstop or not. Since we don't deal * with checkstop conditions here, we ignore that part. */ /* * Now poll for completion of special wakeup. The HWP is nasty here, * it will poll at 5ms intervals for up to 200ms. This is not quite * acceptable for us at runtime, at least not until we have the * ability to "context switch" HBRT. In practice, because we don't * winkle, it will never take that long, so we increase the polling * frequency to 1us per poll. However we do have to keep the same * timeout. * * We don't use time_wait_ms() either for now as we don't want to * poll the FSP here. */ stamp = mftb(); poll_target = stamp + msecs_to_tb(200); val = 0; while (!(val & EX_PM_GP0_SPECIAL_WAKEUP_DONE)) { /* Wait 1 us */ time_wait_us(1); /* Read PM state */ rc = xscom_read(cpu->chip_id, XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_GP0), &val); if (rc) { prerror("RESET: XSCOM error %d reading PM state on" " 0x%x\n", rc, cpu->pir); return rc; } /* Check timeout */ if (mftb() > poll_target) break; } /* Success ? */ if (val & EX_PM_GP0_SPECIAL_WAKEUP_DONE) { uint64_t now = mftb(); prlog(PR_TRACE, "RESET: Special wakeup complete after %ld us\n", tb_to_usecs(now - stamp)); return 0; } /* * We timed out ... * * HWP has a complex workaround for HW255321 which affects * Murano DD1 and Venice DD1. Ignore that for now * * Instead we just dump some XSCOMs for error logging */ prerror("RESET: Timeout on special wakeup of 0x%0x\n", cpu->pir); prerror("RESET: PM0 = 0x%016llx\n", val); val = -1; xscom_read(cpu->chip_id, XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP), &val); prerror("RESET: SPC_WKUP = 0x%016llx\n", val); val = -1; xscom_read(cpu->chip_id, XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_IDLE_STATE_HISTORY_PHYP), &val); prerror("RESET: HISTORY = 0x%016llx\n", val); return OPAL_HARDWARE; } static int p8_core_clear_special_wakeup(struct cpu_thread *cpu) { uint64_t val; uint32_t core_id; int rc; /* * Note: HWP checks for checkstops, but I assume we don't need to * as we wouldn't be running if one was present */ /* Grab core ID once */ core_id = pir_to_core_id(cpu->pir); prlog(PR_DEBUG, "RESET: Releasing core 0x%x wakeup\n", core_id); /* * The original HWp reads the XSCOM first but ignores the result * and error, let's do the same until I know for sure that is * not necessary */ xscom_read(cpu->chip_id, XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP), &val); /* Then we write special wakeup */ rc = xscom_write(cpu->chip_id, XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP), 0); if (rc) { prerror("RESET: XSCOM error %d deasserting" " special wakeup on 0x%x\n", rc, cpu->pir); return rc; } /* * The original HWp reads the XSCOM again with the comment * "This puts an inherent delay in the propagation of the reset * transition" */ xscom_read(cpu->chip_id, XSCOM_ADDR_P8_EX_SLAVE(core_id, EX_PM_SPECIAL_WAKEUP_PHYP), &val); return 0; } static int p8_stop_thread(struct cpu_thread *cpu) { uint32_t core_id = pir_to_core_id(cpu->pir); uint32_t chip_id = pir_to_chip_id(cpu->pir); uint32_t thread_id = pir_to_thread_id(cpu->pir); uint32_t xscom_addr; xscom_addr = XSCOM_ADDR_P8_EX(core_id, P8_EX_TCTL_DIRECT_CONTROLS(thread_id)); if (xscom_write(chip_id, xscom_addr, P8_DIRECT_CTL_STOP)) { prlog(PR_ERR, "Could not stop thread %u:%u:%u:" " Unable to write EX_TCTL_DIRECT_CONTROLS.\n", chip_id, core_id, thread_id); return OPAL_HARDWARE; } return OPAL_SUCCESS; } static int p8_sreset_thread(struct cpu_thread *cpu) { uint32_t core_id = pir_to_core_id(cpu->pir); uint32_t chip_id = pir_to_chip_id(cpu->pir); uint32_t thread_id = pir_to_thread_id(cpu->pir); uint32_t xscom_addr; xscom_addr = XSCOM_ADDR_P8_EX(core_id, P8_EX_TCTL_DIRECT_CONTROLS(thread_id)); if (xscom_write(chip_id, xscom_addr, P8_DIRECT_CTL_PRENAP)) { prlog(PR_ERR, "Could not prenap thread %u:%u:%u:" " Unable to write EX_TCTL_DIRECT_CONTROLS.\n", chip_id, core_id, thread_id); return OPAL_HARDWARE; } if (xscom_write(chip_id, xscom_addr, P8_DIRECT_CTL_SRESET)) { prlog(PR_ERR, "Could not sreset thread %u:%u:%u:" " Unable to write EX_TCTL_DIRECT_CONTROLS.\n", chip_id, core_id, thread_id); return OPAL_HARDWARE; } return OPAL_SUCCESS; } /**************** POWER9 direct controls ****************/ #define P9_RAS_STATUS 0x10a02 #define P9_THREAD_QUIESCED(t) PPC_BITMASK(0 + 8*(t), 3 + 8*(t)) /* Long running instructions may take time to complete. Timeout 100ms */ #define P9_QUIESCE_POLL_INTERVAL 100 #define P9_QUIESCE_TIMEOUT 100000 #define P9_EC_DIRECT_CONTROLS 0x10a9c #define P9_THREAD_STOP(t) PPC_BIT(7 + 8*(t)) #define P9_THREAD_CONT(t) PPC_BIT(6 + 8*(t)) #define P9_THREAD_SRESET(t) PPC_BIT(4 + 8*(t)) #define P9_THREAD_PWR(t) PPC_BIT(32 + 8*(t)) /* EC_PPM_SPECIAL_WKUP_HYP */ #define P9_SPWKUP_SET PPC_BIT(0) #define P9_EC_PPM_SSHHYP 0x0114 #define P9_CORE_GATED PPC_BIT(0) #define P9_SPECIAL_WKUP_DONE PPC_BIT(1) /* Waking may take up to 5ms for deepest sleep states. Set timeout to 100ms */ #define P9_SPWKUP_POLL_INTERVAL 100 #define P9_SPWKUP_TIMEOUT 100000 /* * This implements direct control facilities of processor cores and threads * using scom registers. */ static int p9_core_set_special_wakeup(struct cpu_thread *cpu) { uint32_t chip_id = pir_to_chip_id(cpu->pir); uint32_t core_id = pir_to_core_id(cpu->pir); uint32_t swake_addr; uint32_t sshhyp_addr; uint64_t val; int i; swake_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, EC_PPM_SPECIAL_WKUP_HYP); sshhyp_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, P9_EC_PPM_SSHHYP); if (xscom_write(chip_id, swake_addr, P9_SPWKUP_SET)) { prlog(PR_ERR, "Could not set special wakeup on %u:%u:" " Unable to write PPM_SPECIAL_WKUP_HYP.\n", chip_id, core_id); return OPAL_HARDWARE; } for (i = 0; i < P9_SPWKUP_TIMEOUT / P9_SPWKUP_POLL_INTERVAL; i++) { if (xscom_read(chip_id, sshhyp_addr, &val)) { prlog(PR_ERR, "Could not set special wakeup on %u:%u:" " Unable to read PPM_SSHHYP.\n", chip_id, core_id); goto out_fail; } if (val & P9_SPECIAL_WKUP_DONE) return 0; time_wait_us(P9_SPWKUP_POLL_INTERVAL); } prlog(PR_ERR, "Could not set special wakeup on %u:%u:" " timeout waiting for SPECIAL_WKUP_DONE.\n", chip_id, core_id); out_fail: /* De-assert special wakeup after a small delay. */ time_wait_us(1); xscom_write(chip_id, swake_addr, 0); return OPAL_HARDWARE; } static int p9_core_clear_special_wakeup(struct cpu_thread *cpu) { uint32_t chip_id = pir_to_chip_id(cpu->pir); uint32_t core_id = pir_to_core_id(cpu->pir); uint32_t swake_addr; uint32_t sshhyp_addr; uint64_t val; int i; swake_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, EC_PPM_SPECIAL_WKUP_HYP); sshhyp_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, P9_EC_PPM_SSHHYP); /* * De-assert special wakeup after a small delay. * The delay may help avoid problems setting and clearing special * wakeup back-to-back. This should be confirmed. */ time_wait_us(1); if (xscom_write(chip_id, swake_addr, 0)) { prlog(PR_ERR, "Could not clear special wakeup on %u:%u:" " Unable to write PPM_SPECIAL_WKUP_HYP.\n", chip_id, core_id); return OPAL_HARDWARE; } time_wait_us(1); for (i = 0; i < P9_SPWKUP_TIMEOUT / P9_SPWKUP_POLL_INTERVAL; i++) { if (xscom_read(chip_id, sshhyp_addr, &val)) { prlog(PR_ERR, "Could not clear special wakeup on %u:%u:" " Unable to read PPM_SSHHYP.\n", chip_id, core_id); return OPAL_HARDWARE; } if (!(val & P9_SPECIAL_WKUP_DONE)) return 0; time_wait_us(P9_SPWKUP_POLL_INTERVAL); } prlog(PR_ERR, "Could not clear special wakeup on %u:%u:" " timeout waiting for clear of SPECIAL_WKUP_DONE.\n", chip_id, core_id); return OPAL_HARDWARE; } static int p9_thread_quiesced(struct cpu_thread *cpu) { uint32_t chip_id = pir_to_chip_id(cpu->pir); uint32_t core_id = pir_to_core_id(cpu->pir); uint32_t thread_id = pir_to_thread_id(cpu->pir); uint32_t ras_addr; uint64_t ras_status; ras_addr = XSCOM_ADDR_P9_EC(core_id, P9_RAS_STATUS); if (xscom_read(chip_id, ras_addr, &ras_status)) { prlog(PR_ERR, "Could not check thread state on %u:%u:" " Unable to read RAS_STATUS.\n", chip_id, core_id); return OPAL_HARDWARE; } /* * This returns true when the thread is quiesced and all * instructions completed. For sreset this may not be necessary, * but we may want to use instruction ramming or stepping * direct controls where it is important. */ if ((ras_status & P9_THREAD_QUIESCED(thread_id)) == P9_THREAD_QUIESCED(thread_id)) return 1; return 0; } static int p9_stop_thread(struct cpu_thread *cpu) { uint32_t chip_id = pir_to_chip_id(cpu->pir); uint32_t core_id = pir_to_core_id(cpu->pir); uint32_t thread_id = pir_to_thread_id(cpu->pir); uint32_t dctl_addr; int rc; int i; dctl_addr = XSCOM_ADDR_P9_EC(core_id, P9_EC_DIRECT_CONTROLS); rc = p9_thread_quiesced(cpu); if (rc < 0) return rc; if (rc) prlog(PR_WARNING, "Stopping thread %u:%u:%u warning:" " thread is quiesced already.\n", chip_id, core_id, thread_id); if (xscom_write(chip_id, dctl_addr, P9_THREAD_STOP(thread_id))) { prlog(PR_ERR, "Could not stop thread %u:%u:%u:" " Unable to write EC_DIRECT_CONTROLS.\n", chip_id, core_id, thread_id); return OPAL_HARDWARE; } for (i = 0; i < P9_QUIESCE_TIMEOUT / P9_QUIESCE_POLL_INTERVAL; i++) { int rc = p9_thread_quiesced(cpu); if (rc < 0) break; if (rc) return 0; time_wait_us(P9_QUIESCE_POLL_INTERVAL); } prlog(PR_ERR, "Could not stop thread %u:%u:%u:" " Unable to quiesce thread.\n", chip_id, core_id, thread_id); if (xscom_write(chip_id, dctl_addr, P9_THREAD_CONT(thread_id))) { prlog(PR_ERR, "Could not resume thread %u:%u:%u:" " Unable to write EC_DIRECT_CONTROLS.\n", chip_id, core_id, thread_id); } return OPAL_HARDWARE; } static int p9_cont_thread(struct cpu_thread *cpu) { uint32_t chip_id = pir_to_chip_id(cpu->pir); uint32_t core_id = pir_to_core_id(cpu->pir); uint32_t thread_id = pir_to_thread_id(cpu->pir); uint32_t dctl_addr; dctl_addr = XSCOM_ADDR_P9_EC(core_id, P9_EC_DIRECT_CONTROLS); if (xscom_write(chip_id, dctl_addr, P9_THREAD_CONT(thread_id))) { prlog(PR_ERR, "Could not resume thread %u:%u:%u:" " Unable to write EC_DIRECT_CONTROLS.\n", chip_id, core_id, thread_id); } return 0; } static int p9_sreset_thread(struct cpu_thread *cpu) { uint32_t chip_id = pir_to_chip_id(cpu->pir); uint32_t core_id = pir_to_core_id(cpu->pir); uint32_t thread_id = pir_to_thread_id(cpu->pir); uint32_t dctl_addr; dctl_addr = XSCOM_ADDR_P9_EC(core_id, P9_EC_DIRECT_CONTROLS); if (xscom_write(chip_id, dctl_addr, P9_THREAD_SRESET(thread_id))) { prlog(PR_ERR, "Could not sreset thread %u:%u:%u:" " Unable to write EC_DIRECT_CONTROLS.\n", chip_id, core_id, thread_id); return OPAL_HARDWARE; } return 0; } /**************** generic direct controls ****************/ int dctl_set_special_wakeup(struct cpu_thread *t) { struct cpu_thread *c = t->primary; int rc = OPAL_SUCCESS; if (proc_gen != proc_gen_p9 && proc_gen != proc_gen_p8) return OPAL_UNSUPPORTED; lock(&c->dctl_lock); if (c->special_wakeup_count == 0) { if (proc_gen == proc_gen_p9) rc = p9_core_set_special_wakeup(c); else /* (proc_gen == proc_gen_p8) */ rc = p8_core_set_special_wakeup(c); } if (!rc) c->special_wakeup_count++; unlock(&c->dctl_lock); return rc; } int dctl_clear_special_wakeup(struct cpu_thread *t) { struct cpu_thread *c = t->primary; int rc = OPAL_SUCCESS; if (proc_gen != proc_gen_p9 && proc_gen != proc_gen_p8) return OPAL_UNSUPPORTED; lock(&c->dctl_lock); if (!c->special_wakeup_count) goto out; if (c->special_wakeup_count == 1) { if (proc_gen == proc_gen_p9) rc = p9_core_clear_special_wakeup(c); else /* (proc_gen == proc_gen_p8) */ rc = p8_core_clear_special_wakeup(c); } if (!rc) c->special_wakeup_count--; out: unlock(&c->dctl_lock); return rc; } int dctl_core_is_gated(struct cpu_thread *t) { struct cpu_thread *c = t->primary; uint32_t chip_id = pir_to_chip_id(c->pir); uint32_t core_id = pir_to_core_id(c->pir); uint32_t sshhyp_addr; uint64_t val; if (proc_gen != proc_gen_p9) return OPAL_UNSUPPORTED; sshhyp_addr = XSCOM_ADDR_P9_EC_SLAVE(core_id, P9_EC_PPM_SSHHYP); if (xscom_read(chip_id, sshhyp_addr, &val)) { prlog(PR_ERR, "Could not query core gated on %u:%u:" " Unable to read PPM_SSHHYP.\n", chip_id, core_id); return OPAL_HARDWARE; } return !!(val & P9_CORE_GATED); } static int dctl_stop(struct cpu_thread *t) { struct cpu_thread *c = t->primary; int rc; if (proc_gen != proc_gen_p9 && proc_gen != proc_gen_p8) return OPAL_UNSUPPORTED; lock(&c->dctl_lock); if (t->dctl_stopped) { unlock(&c->dctl_lock); return OPAL_BUSY; } if (proc_gen == proc_gen_p9) rc = p9_stop_thread(t); else /* (proc_gen == proc_gen_p8) */ rc = p8_stop_thread(t); if (!rc) t->dctl_stopped = true; unlock(&c->dctl_lock); return rc; } static int dctl_cont(struct cpu_thread *t) { struct cpu_thread *c = t->primary; int rc; if (proc_gen != proc_gen_p9) return OPAL_UNSUPPORTED; lock(&c->dctl_lock); if (!t->dctl_stopped) { unlock(&c->dctl_lock); return OPAL_BUSY; } rc = p9_cont_thread(t); if (!rc) t->dctl_stopped = false; unlock(&c->dctl_lock); return rc; } /* * NOTE: * The POWER8 sreset does not provide SRR registers, so it can be used * for fast reboot, but not OPAL_SIGNAL_SYSTEM_RESET or anywhere that is * expected to return. For now, callers beware. */ static int dctl_sreset(struct cpu_thread *t) { struct cpu_thread *c = t->primary; int rc; if (proc_gen != proc_gen_p9 && proc_gen != proc_gen_p8) return OPAL_UNSUPPORTED; lock(&c->dctl_lock); if (!t->dctl_stopped) { unlock(&c->dctl_lock); return OPAL_BUSY; } if (proc_gen == proc_gen_p9) rc = p9_sreset_thread(t); else /* (proc_gen == proc_gen_p8) */ rc = p8_sreset_thread(t); if (!rc) t->dctl_stopped = false; unlock(&c->dctl_lock); return rc; } /**************** fast reboot API ****************/ int sreset_all_prepare(void) { struct cpu_thread *cpu; prlog(PR_DEBUG, "RESET: Resetting from cpu: 0x%x (core 0x%x)\n", this_cpu()->pir, pir_to_core_id(this_cpu()->pir)); if (chip_quirk(QUIRK_MAMBO_CALLOUTS)) { for_each_ungarded_cpu(cpu) { if (cpu == this_cpu()) continue; mambo_stop_cpu(cpu); } return OPAL_SUCCESS; } /* Assert special wakup on all cores. Only on operational cores. */ for_each_ungarded_primary(cpu) { if (dctl_set_special_wakeup(cpu) != OPAL_SUCCESS) return OPAL_HARDWARE; } prlog(PR_DEBUG, "RESET: Stopping the world...\n"); /* Put everybody in stop except myself */ for_each_ungarded_cpu(cpu) { if (cpu == this_cpu()) continue; if (dctl_stop(cpu) != OPAL_SUCCESS) return OPAL_HARDWARE; } return OPAL_SUCCESS; } void sreset_all_finish(void) { struct cpu_thread *cpu; if (chip_quirk(QUIRK_MAMBO_CALLOUTS)) return; for_each_ungarded_primary(cpu) dctl_clear_special_wakeup(cpu); } int sreset_all_others(void) { struct cpu_thread *cpu; prlog(PR_DEBUG, "RESET: Resetting all threads but self...\n"); /* * mambo should actually implement stop as well, and implement * the dctl_ helpers properly. Currently it's racy just sresetting. */ if (chip_quirk(QUIRK_MAMBO_CALLOUTS)) { for_each_ungarded_cpu(cpu) { if (cpu == this_cpu()) continue; mambo_sreset_cpu(cpu); } return OPAL_SUCCESS; } for_each_ungarded_cpu(cpu) { if (cpu == this_cpu()) continue; if (dctl_sreset(cpu) != OPAL_SUCCESS) return OPAL_HARDWARE; } return OPAL_SUCCESS; } /**************** OPAL_SIGNAL_SYSTEM_RESET API ****************/ /* * This provides a way for the host to raise system reset exceptions * on other threads using direct control scoms on POWER9. * * We assert special wakeup on the core first. * Then stop target thread and wait for it to quiesce. * Then sreset the target thread, which resumes execution on that thread. * Then de-assert special wakeup on the core. */ static int64_t p9_sreset_cpu(struct cpu_thread *cpu) { int rc; if (this_cpu() == cpu) { prlog(PR_ERR, "SRESET: Unable to reset self\n"); return OPAL_PARAMETER; } rc = dctl_set_special_wakeup(cpu); if (rc) return rc; rc = dctl_stop(cpu); if (rc) goto out_spwk; rc = dctl_sreset(cpu); if (rc) goto out_cont; dctl_clear_special_wakeup(cpu); return 0; out_cont: dctl_cont(cpu); out_spwk: dctl_clear_special_wakeup(cpu); return rc; } static struct lock sreset_lock = LOCK_UNLOCKED; int64_t opal_signal_system_reset(int cpu_nr) { struct cpu_thread *cpu; int64_t ret; if (proc_gen != proc_gen_p9) return OPAL_UNSUPPORTED; /* * Broadcasts unsupported. Not clear what threads should be * signaled, so it's better for the OS to perform one-at-a-time * for now. */ if (cpu_nr < 0) return OPAL_CONSTRAINED; /* Reset a single CPU */ cpu = find_cpu_by_server(cpu_nr); if (!cpu) { prlog(PR_ERR, "SRESET: could not find cpu by server %d\n", cpu_nr); return OPAL_PARAMETER; } lock(&sreset_lock); ret = p9_sreset_cpu(cpu); unlock(&sreset_lock); return ret; } void direct_controls_init(void) { uint32_t version; if (chip_quirk(QUIRK_MAMBO_CALLOUTS)) return; if (proc_gen != proc_gen_p9) return; /* DD1 has some sreset quirks we do not support */ version = mfspr(SPR_PVR); if (is_power9n(version) && PVR_VERS_MAJ(version) == 1) return; opal_register(OPAL_SIGNAL_SYSTEM_RESET, opal_signal_system_reset, 1); } skiboot-5.10-rc4/core/errorlog.c000066400000000000000000000125701324317060200165310ustar00rootroot00000000000000/* Copyright 2013-2017 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* This file contains the front end for OPAL error logging. It is used * to construct a struct errorlog representing the event/error to be * logged which is then passed to the platform specific backend to log * the actual errors. */ #include #include #include #include /* * Maximum number buffers that are pre-allocated * to hold elogs that are reported on Sapphire and * PowerNV. */ #define ELOG_WRITE_MAX_RECORD 64 /* Platform log id as per the spec */ static uint32_t sapphire_elog_id = 0xB0000000; /* Reserved for future use */ /* static uint32_t powernv_elog_id = 0xB1000000; */ /* Pool to allocate elog messages from */ static struct pool elog_pool; static struct lock elog_lock = LOCK_UNLOCKED; static bool elog_available = false; static struct errorlog *get_write_buffer(int opal_event_severity) { struct errorlog *buf; if (!elog_available) return NULL; lock(&elog_lock); if (opal_event_severity == OPAL_ERROR_PANIC) buf = pool_get(&elog_pool, POOL_HIGH); else buf = pool_get(&elog_pool, POOL_NORMAL); unlock(&elog_lock); return buf; } /* Reporting of error via struct errorlog */ struct errorlog *opal_elog_create(struct opal_err_info *e_info, uint32_t tag) { struct errorlog *buf; buf = get_write_buffer(e_info->sev); if (buf) { buf->error_event_type = e_info->err_type; buf->component_id = e_info->cmp_id; buf->subsystem_id = e_info->subsystem; buf->event_severity = e_info->sev; buf->event_subtype = e_info->event_subtype; buf->reason_code = e_info->reason_code; buf->elog_origin = ORG_SAPPHIRE; lock(&elog_lock); buf->plid = ++sapphire_elog_id; unlock(&elog_lock); /* Initialise the first user dump section */ log_add_section(buf, tag); } return buf; } /* Add a new user data section to an existing error log */ void log_add_section(struct errorlog *buf, uint32_t tag) { size_t size = sizeof(struct elog_user_data_section) - 1; struct elog_user_data_section *tmp; if (!buf) { prerror("ELOG: Cannot add user data section. " "Buffer is invalid\n"); return; } if ((buf->user_section_size + size) > OPAL_LOG_MAX_DUMP) { prerror("ELOG: Size of dump data overruns buffer\n"); return; } tmp = (struct elog_user_data_section *)(buf->user_data_dump + buf->user_section_size); /* Use DESC if no other tag provided */ tmp->tag = tag ? tag : 0x44455343; tmp->size = size; buf->user_section_size += tmp->size; buf->user_section_count++; } void opal_elog_complete(struct errorlog *buf, bool success) { if (!success) printf("Unable to log error\n"); lock(&elog_lock); pool_free_object(&elog_pool, buf); unlock(&elog_lock); } void log_commit(struct errorlog *elog) { int rc; if (!elog) return; if (platform.elog_commit) { rc = platform.elog_commit(elog); if (rc) prerror("ELOG: Platform commit error %d\n", rc); return; } opal_elog_complete(elog, false); } void log_append_data(struct errorlog *buf, unsigned char *data, uint16_t size) { struct elog_user_data_section *section; uint8_t n_sections; char *buffer; if (!buf) { prerror("ELOG: Cannot update user data. Buffer is invalid\n"); return; } if ((buf->user_section_size + size) > OPAL_LOG_MAX_DUMP) { prerror("ELOG: Size of dump data overruns buffer\n"); return; } /* Step through user sections to find latest dump section */ buffer = buf->user_data_dump; n_sections = buf->user_section_count; if (!n_sections) { prerror("ELOG: User section invalid\n"); return; } while (--n_sections) { section = (struct elog_user_data_section *)buffer; buffer += section->size; } section = (struct elog_user_data_section *)buffer; buffer += section->size; memcpy(buffer, data, size); section->size += size; buf->user_section_size += size; } void log_append_msg(struct errorlog *buf, const char *fmt, ...) { char err_msg[250]; va_list list; if (!buf) { prerror("Tried to append log to NULL buffer\n"); return; } va_start(list, fmt); vsnprintf(err_msg, sizeof(err_msg), fmt, list); va_end(list); /* Log the error on to Sapphire console */ prerror("%s", err_msg); log_append_data(buf, err_msg, strlen(err_msg)); } uint32_t log_simple_error(struct opal_err_info *e_info, const char *fmt, ...) { struct errorlog *buf; va_list list; char err_msg[250]; va_start(list, fmt); vsnprintf(err_msg, sizeof(err_msg), fmt, list); va_end(list); /* Log the error on to Sapphire console */ prerror("%s", err_msg); buf = opal_elog_create(e_info, 0); if (buf == NULL) { prerror("ELOG: Error getting buffer to log error\n"); return -1; } log_append_data(buf, err_msg, strlen(err_msg)); log_commit(buf); return buf->plid; } int elog_init(void) { /* Pre-allocate memory for records */ if (pool_init(&elog_pool, sizeof(struct errorlog), ELOG_WRITE_MAX_RECORD, 1)) return OPAL_RESOURCE; elog_available = true; return 0; } skiboot-5.10-rc4/core/exceptions.c000066400000000000000000000050331324317060200170530ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #define REG "%016llx" #define REG32 "%08x" #define REGS_PER_LINE 4 static void dump_regs(struct stack_frame *stack) { unsigned int i; prerror("CFAR : "REG"\n", stack->cfar); prerror("SRR0 : "REG" SRR1 : "REG"\n", stack->srr0, stack->srr1); prerror("HSRR0: "REG" HSRR1: "REG"\n", stack->hsrr0, stack->hsrr1); prerror("DSISR: "REG32" DAR : "REG"\n", stack->dsisr, stack->dar); prerror("LR : "REG" CTR : "REG"\n", stack->lr, stack->ctr); prerror("CR : "REG32" XER : "REG32"\n", stack->cr, stack->xer); for (i = 0; i < 16; i++) prerror("GPR%02d: "REG" GPR%02d: "REG"\n", i, stack->gpr[i], i + 16, stack->gpr[i + 16]); } /* Called from head.S, thus no prototype */ void exception_entry(struct stack_frame *stack) __noreturn; void exception_entry(struct stack_frame *stack) { const size_t max = 320; char buf[max]; size_t l; prerror("***********************************************\n"); if (stack->type == 0x200) { l = 0; l += snprintf(buf + l, max - l, "Fatal MCE at "REG" ", stack->srr0); l += snprintf_symbol(buf + l, max - l, stack->srr0); prerror("%s\n", buf); } else { uint64_t nip; switch (stack->type) { case 0x500: case 0x980: case 0xe00: case 0xe20: case 0xe40: case 0xe60: case 0xe80: case 0xea0: case 0xf80: nip = stack->hsrr0; break; default: nip = stack->srr0; break; } l = 0; l += snprintf(buf + l, max - l, "Fatal Exception 0x%llx at "REG" ", stack->type, nip); l += snprintf_symbol(buf + l, max - l, nip); prerror("%s\n", buf); } dump_regs(stack); abort(); } static int64_t opal_register_exc_handler(uint64_t opal_exception __unused, uint64_t handler_address __unused, uint64_t glue_cache_line __unused) { /* This interface is deprecated */ return OPAL_UNSUPPORTED; } opal_call(OPAL_REGISTER_OPAL_EXCEPTION_HANDLER, opal_register_exc_handler, 3); skiboot-5.10-rc4/core/fast-reboot.c000066400000000000000000000207731324317060200171270ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Flag tested by the OPAL entry code */ static volatile bool fast_boot_release; static bool cpu_state_wait_all_others(enum cpu_thread_state state, unsigned long timeout_tb) { struct cpu_thread *cpu; unsigned long end = mftb() + timeout_tb; sync(); for_each_ungarded_cpu(cpu) { if (cpu == this_cpu()) continue; if (cpu->state != state) { smt_lowest(); while (cpu->state != state) { barrier(); if (timeout_tb && (tb_compare(mftb(), end) == TB_AAFTERB)) { smt_medium(); return false; } } smt_medium(); } } sync(); return true; } static const char *fast_reboot_disabled = NULL; void disable_fast_reboot(const char *reason) { fast_reboot_disabled = reason; } void fast_reboot(void) { struct cpu_thread *cpu; static int fast_reboot_count = 0; if (proc_gen == proc_gen_p9) { if (!nvram_query_eq("experimental-fast-reset","feeling-lucky")) return; } if (!chip_quirk(QUIRK_MAMBO_CALLOUTS) && (proc_gen != proc_gen_p8 && proc_gen != proc_gen_p9)) { prlog(PR_DEBUG, "RESET: Fast reboot not available on this CPU\n"); return; } if (chip_quirk(QUIRK_NO_DIRECT_CTL)) { prlog(PR_DEBUG, "RESET: Fast reboot disabled by quirk\n"); return; } /* * Ensure all other CPUs have left OPAL calls. */ if (!opal_quiesce(QUIESCE_HOLD, -1)) { prlog(PR_NOTICE, "RESET: Fast reboot disabled because OPAL " "quiesce timed out\n"); return; } if (fast_reboot_disabled) { prlog(PR_DEBUG, "RESET: Fast reboot disabled because %s\n", fast_reboot_disabled); opal_quiesce(QUIESCE_RESUME, -1); return; } /* Should mem_check() all regions before allowing fast reboot? */ prlog(PR_NOTICE, "RESET: Initiating fast reboot %d...\n", ++fast_reboot_count); fast_boot_release = false; sync(); /* Put everybody in stop except myself */ if (sreset_all_prepare()) { prlog(PR_NOTICE, "RESET: Fast reboot failed to prepare " "secondaries for system reset\n"); opal_quiesce(QUIESCE_RESUME, -1); return; } /* * There is no point clearing special wakeup or un-quiesce due to * failure after this point, because we will be going to full IPL. * Less cleanup work means less opportunity to fail. */ for_each_ungarded_cpu(cpu) { /* Also make sure that saved_r1 is 0 ! That's what will * make our reset vector jump to fast_reboot_entry */ cpu->save_r1 = 0; } /* Restore skiboot vectors */ copy_exception_vectors(); setup_reset_vector(); /* Send everyone else to 0x100 */ if (sreset_all_others() != OPAL_SUCCESS) { prlog(PR_NOTICE, "RESET: Fast reboot failed to system reset " "secondaries\n"); return; } /* Ensure all the sresets get through */ if (!cpu_state_wait_all_others(cpu_state_present, msecs_to_tb(100))) { prlog(PR_NOTICE, "RESET: Fast reboot timed out waiting for " "secondaries to call in\n"); return; } prlog(PR_DEBUG, "RESET: Releasing special wakeups...\n"); sreset_all_finish(); /* This resets our quiesce state ready to enter the new kernel. */ opal_quiesce(QUIESCE_RESUME_FAST_REBOOT, -1); asm volatile("ba 0x100\n\t" : : : "memory"); for (;;) ; } static void cleanup_cpu_state(void) { struct cpu_thread *cpu = this_cpu(); /* Per core cleanup */ if (cpu_is_thread0(cpu)) { /* Shared SPRs whacked back to normal */ /* XXX Update the SLW copies ! Also dbl check HIDs etc... */ init_shared_sprs(); if (proc_gen == proc_gen_p8) { /* If somebody was in fast_sleep, we may have a * workaround to undo */ if (cpu->in_fast_sleep) { prlog(PR_DEBUG, "RESET: CPU 0x%04x in fast sleep" " undoing workarounds...\n", cpu->pir); fast_sleep_exit(); } /* The TLB surely contains garbage. * P9 clears TLBs in cpu_fast_reboot_complete */ cleanup_local_tlb(); } /* And we might have lost TB sync */ chiptod_wakeup_resync(); } /* Per-thread additional cleanup */ init_replicated_sprs(); // XXX Cleanup SLW, check HIDs ... } void __noreturn enter_nap(void); static void check_split_core(void) { struct cpu_thread *cpu; u64 mask, hid0; hid0 = mfspr(SPR_HID0); mask = SPR_HID0_POWER8_4LPARMODE | SPR_HID0_POWER8_2LPARMODE; if ((hid0 & mask) == 0) return; prlog(PR_INFO, "RESET: CPU 0x%04x is split !\n", this_cpu()->pir); /* If it's a secondary thread, just send it to nap */ if (this_cpu()->pir & 7) { /* Prepare to be woken up */ icp_prep_for_pm(); /* Setup LPCR to wakeup on external interrupts only */ mtspr(SPR_LPCR, ((mfspr(SPR_LPCR) & ~SPR_LPCR_P8_PECE) | SPR_LPCR_P8_PECE2)); /* Go to nap (doesn't return) */ enter_nap(); } prlog(PR_INFO, "RESET: Primary, unsplitting... \n"); /* Trigger unsplit operation and update SLW image */ hid0 &= ~SPR_HID0_POWER8_DYNLPARDIS; set_hid0(hid0); opal_slw_set_reg(this_cpu()->pir, SPR_HID0, hid0); /* Wait for unsplit */ while (mfspr(SPR_HID0) & mask) cpu_relax(); /* Now the guys are sleeping, wake'em up. They will come back * via reset and continue the fast reboot process normally. * No need to wait. */ prlog(PR_INFO, "RESET: Waking unsplit secondaries... \n"); for_each_cpu(cpu) { if (!cpu_is_sibling(cpu, this_cpu()) || (cpu == this_cpu())) continue; icp_kick_cpu(cpu); } } /* Entry from asm after a fast reset */ void __noreturn fast_reboot_entry(void); void __noreturn fast_reboot_entry(void) { prlog(PR_DEBUG, "RESET: CPU 0x%04x reset in\n", this_cpu()->pir); if (proc_gen == proc_gen_p9) { reset_cpu_xive(); } else if (proc_gen == proc_gen_p8) { /* We reset our ICP first ! Otherwise we might get stray * interrupts when unsplitting */ reset_cpu_icp(); /* If we are split, we need to unsplit. Since that can send us * to NAP, which will come back via reset, we do it now */ check_split_core(); } sync(); this_cpu()->state = cpu_state_present; sync(); /* Are we the original boot CPU ? If not, we spin waiting * for a relase signal from CPU 1, then we clean ourselves * up and go processing jobs. */ if (this_cpu() != boot_cpu) { if (!fast_boot_release) { smt_lowest(); while (!fast_boot_release) barrier(); smt_medium(); } sync(); cleanup_cpu_state(); __secondary_cpu_entry(); } prlog(PR_INFO, "RESET: Boot CPU waiting for everybody...\n"); /* We are the original boot CPU, wait for secondaries to * be captured. */ cpu_state_wait_all_others(cpu_state_present, 0); if (proc_gen == proc_gen_p9) { xive_reset(); } prlog(PR_INFO, "RESET: Releasing secondaries...\n"); /* Release everybody */ sync(); fast_boot_release = true; /* Cleanup ourselves */ cleanup_cpu_state(); /* Set our state to active */ sync(); this_cpu()->state = cpu_state_active; sync(); /* Wait for them to respond */ cpu_state_wait_all_others(cpu_state_active, 0); sync(); prlog(PR_INFO, "RESET: All done, cleaning up...\n"); /* Clear release flag for next time */ fast_boot_release = false; /* Let the CPU layer do some last minute global cleanups */ cpu_fast_reboot_complete(); /* We can now do NAP mode */ cpu_set_sreset_enable(true); cpu_set_ipi_enable(true); /* Start preloading kernel and ramdisk */ start_preload_kernel(); /* Poke the consoles (see comments in the code there) */ fsp_console_reset(); if (proc_gen == proc_gen_p8) { /* XXX */ /* Reset/EOI the PSI interrupt */ psi_irq_reset(); } /* Remove all PCI devices */ if (pci_reset()) { prlog(PR_NOTICE, "RESET: Fast reboot failed to reset PCI\n"); /* * Can't return to caller here because we're past no-return. * Attempt an IPL here which is what the caller would do. */ if (platform.cec_reboot) platform.cec_reboot(); for (;;) ; } ipmi_set_fw_progress_sensor(IPMI_FW_PCI_INIT); /* Load and boot payload */ load_and_boot_kernel(true); } skiboot-5.10-rc4/core/fdt.c000066400000000000000000000130741324317060200154530ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include static int fdt_error; #undef DEBUG_FDT #ifdef DEBUG_FDT #define FDT_DBG(fmt, a...) prlog(PR_DEBUG, "FDT: " fmt, ##a) #else #define FDT_DBG(fmt, a...) #endif static void __save_err(int err, const char *str) { FDT_DBG("rc: %d from \"%s\"\n", err, str); if (err && !fdt_error) { prerror("FDT: Error %d from \"%s\"\n", err, str); fdt_error = err; } } #define save_err(...) __save_err(__VA_ARGS__, #__VA_ARGS__) static void dt_property_cell(void *fdt, const char *name, u32 cell) { save_err(fdt_property_cell(fdt, name, cell)); } static void dt_begin_node(void *fdt, const struct dt_node *dn) { save_err(fdt_begin_node(fdt, dn->name)); dt_property_cell(fdt, "phandle", dn->phandle); } static void dt_property(void *fdt, const struct dt_property *p) { save_err(fdt_property(fdt, p->name, p->prop, p->len)); } static void dt_end_node(void *fdt) { save_err(fdt_end_node(fdt)); } #ifdef DEBUG_FDT static void dump_fdt(void *fdt) { int i, off, depth, err; prlog(PR_INFO, "Device tree %u@%p\n", fdt_totalsize(fdt), fdt); err = fdt_check_header(fdt); if (err) { prerror("fdt_check_header: %s\n", fdt_strerror(err)); return; } prlog(PR_INFO, "fdt_check_header passed\n"); prlog(PR_INFO, "fdt_num_mem_rsv = %u\n", fdt_num_mem_rsv(fdt)); for (i = 0; i < fdt_num_mem_rsv(fdt); i++) { u64 addr, size; err = fdt_get_mem_rsv(fdt, i, &addr, &size); if (err) { prlog(PR_INFO, " ERR %s\n", fdt_strerror(err)); return; } prlog(PR_INFO, " mem_rsv[%i] = %lu@%#lx\n", i, (long)addr, (long)size); } for (off = fdt_next_node(fdt, 0, &depth); off > 0; off = fdt_next_node(fdt, off, &depth)) { int len; const char *name; name = fdt_get_name(fdt, off, &len); if (!name) { prerror("fdt: offset %i no name!\n", off); return; } prlog(PR_INFO, "name: %s [%u]\n", name, off); } } #else static inline void dump_fdt(void *fdt __unused) { } #endif static void flatten_dt_properties(void *fdt, const struct dt_node *dn) { const struct dt_property *p; list_for_each(&dn->properties, p, list) { if (strstarts(p->name, DT_PRIVATE)) continue; FDT_DBG(" prop: %s size: %ld\n", p->name, p->len); dt_property(fdt, p); } } static void flatten_dt_node(void *fdt, const struct dt_node *root, bool exclusive) { const struct dt_node *i; if (!exclusive) { FDT_DBG("node: %s\n", root->name); dt_begin_node(fdt, root); flatten_dt_properties(fdt, root); } list_for_each(&root->children, i, list) flatten_dt_node(fdt, i, false); if (!exclusive) dt_end_node(fdt); } static void create_dtb_reservemap(void *fdt, const struct dt_node *root) { uint64_t base, size; const uint64_t *ranges; const struct dt_property *prop; int i; /* Duplicate the reserved-ranges property into the fdt reservemap */ prop = dt_find_property(root, "reserved-ranges"); if (prop) { ranges = (const void *)prop->prop; for (i = 0; i < prop->len / (sizeof(uint64_t) * 2); i++) { base = *(ranges++); size = *(ranges++); save_err(fdt_add_reservemap_entry(fdt, base, size)); } } save_err(fdt_finish_reservemap(fdt)); } static int __create_dtb(void *fdt, size_t len, const struct dt_node *root, bool exclusive) { fdt_create(fdt, len); if (root == dt_root && !exclusive) create_dtb_reservemap(fdt, root); else fdt_finish_reservemap(fdt); flatten_dt_node(fdt, root, exclusive); save_err(fdt_finish(fdt)); if (fdt_error) { prerror("dtb: error %s\n", fdt_strerror(fdt_error)); return fdt_error; } dump_fdt(fdt); return 0; } void *create_dtb(const struct dt_node *root, bool exclusive) { void *fdt = NULL; size_t len = DEVICE_TREE_MAX_SIZE; uint32_t old_last_phandle = get_last_phandle(); int ret; do { set_last_phandle(old_last_phandle); fdt_error = 0; fdt = malloc(len); if (!fdt) { prerror("dtb: could not malloc %lu\n", (long)len); return NULL; } ret = __create_dtb(fdt, len, root, exclusive); if (ret) { free(fdt); fdt = NULL; } len *= 2; } while (ret == -FDT_ERR_NOSPACE); return fdt; } static int64_t opal_get_device_tree(uint32_t phandle, uint64_t buf, uint64_t len) { struct dt_node *root; void *fdt = (void *)buf; uint32_t old_last_phandle; int64_t totalsize; int ret; if (!opal_addr_valid(fdt)) return OPAL_PARAMETER; root = dt_find_by_phandle(dt_root, phandle); if (!root) return OPAL_PARAMETER; if (!fdt) { fdt = create_dtb(root, true); if (!fdt) return OPAL_INTERNAL_ERROR; totalsize = fdt_totalsize(fdt); free(fdt); return totalsize; } if (!len) return OPAL_PARAMETER; fdt_error = 0; old_last_phandle = get_last_phandle(); ret = __create_dtb(fdt, len, root, true); if (ret) { set_last_phandle(old_last_phandle); if (ret == -FDT_ERR_NOSPACE) return OPAL_NO_MEM; return OPAL_EMPTY; } return OPAL_SUCCESS; } opal_call(OPAL_GET_DEVICE_TREE, opal_get_device_tree, 3); skiboot-5.10-rc4/core/flash-subpartition.c000066400000000000000000000063051324317060200205130ustar00rootroot00000000000000/* Copyright 2013-2016 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include struct flash_hostboot_toc { be32 ec; be32 offset; /* From start of header. 4K aligned */ be32 size; }; #define FLASH_HOSTBOOT_TOC_MAX_ENTRIES ((FLASH_SUBPART_HEADER_SIZE - 8) \ /sizeof(struct flash_hostboot_toc)) struct flash_hostboot_header { char eyecatcher[4]; be32 version; struct flash_hostboot_toc toc[FLASH_HOSTBOOT_TOC_MAX_ENTRIES]; }; int flash_subpart_info(void *part_header, uint32_t header_len, uint32_t part_size, uint32_t *part_actual, uint32_t subid, uint32_t *offset, uint32_t *size) { struct flash_hostboot_header *header; char eyecatcher[5]; uint32_t i, ec, o, s; bool subpart_found; if (!part_header || ( !offset && !size && !part_actual)) { prlog(PR_ERR, "FLASH: invalid parameters: ph %p of %p sz %p " "tsz %p\n", part_header, offset, size, part_actual); return OPAL_PARAMETER; } if (header_len < FLASH_SUBPART_HEADER_SIZE) { prlog(PR_ERR, "FLASH: subpartition header too small 0x%x\n", header_len); return OPAL_PARAMETER; } header = (struct flash_hostboot_header*) part_header; /* Perform sanity */ i = be32_to_cpu(header->version); if (i != 1) { prerror("FLASH: flash subpartition TOC version unknown %i\n", i); return OPAL_RESOURCE; } /* NULL terminate eyecatcher */ strncpy(eyecatcher, header->eyecatcher, 4); eyecatcher[4] = '\0'; prlog(PR_DEBUG, "FLASH: flash subpartition eyecatcher %s\n", eyecatcher); subpart_found = false; *part_actual = 0; for (i = 0; i < FLASH_HOSTBOOT_TOC_MAX_ENTRIES; i++) { ec = be32_to_cpu(header->toc[i].ec); o = be32_to_cpu(header->toc[i].offset); s = be32_to_cpu(header->toc[i].size); /* Check for null terminating entry */ if (!ec && !o && !s) break; /* Sanity check the offset and size. */ if (o + s > part_size) { prerror("FLASH: flash subpartition too big: %i\n", i); return OPAL_RESOURCE; } if (!s) { prerror("FLASH: flash subpartition zero size: %i\n", i); return OPAL_RESOURCE; } if (o < FLASH_SUBPART_HEADER_SIZE) { prerror("FLASH: flash subpartition offset too small: " "%i\n", i); return OPAL_RESOURCE; } /* * Subpartitions content are different, but multiple toc entries * may point to the same subpartition. */ if (ALIGN_UP(o + s, FLASH_SUBPART_HEADER_SIZE) > *part_actual) *part_actual = ALIGN_UP(o + s, FLASH_SUBPART_HEADER_SIZE); if (ec == subid) { if (offset) *offset += o; if (size) *size = s; subpart_found = true; } } if (!subpart_found && (offset || size)) { prerror("FLASH: flash subpartition not found.\n"); return OPAL_RESOURCE; } return OPAL_SUCCESS; } skiboot-5.10-rc4/core/flash.c000066400000000000000000000546121324317060200157760ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct flash { struct list_node list; bool busy; bool no_erase; struct blocklevel_device *bl; uint64_t size; uint32_t block_size; int id; }; static LIST_HEAD(flashes); static struct flash *system_flash; /* Using a single lock as we only have one flash at present. */ static struct lock flash_lock; /* nvram-on-flash support */ static struct flash *nvram_flash; static u32 nvram_offset, nvram_size; /* ibm,firmware-versions support */ static char *version_buf; static size_t version_buf_size = 0x1000; bool flash_reserve(void) { bool rc = false; if (!try_lock(&flash_lock)) return false; if (!system_flash->busy) { system_flash->busy = true; rc = true; } unlock(&flash_lock); return rc; } void flash_release(void) { lock(&flash_lock); system_flash->busy = false; unlock(&flash_lock); } static int flash_nvram_info(uint32_t *total_size) { int rc; lock(&flash_lock); if (!nvram_flash) { rc = OPAL_HARDWARE; } else if (nvram_flash->busy) { rc = OPAL_BUSY; } else { *total_size = nvram_size; rc = OPAL_SUCCESS; } unlock(&flash_lock); return rc; } static int flash_nvram_start_read(void *dst, uint32_t src, uint32_t len) { int rc; if (!try_lock(&flash_lock)) return OPAL_BUSY; if (!nvram_flash) { rc = OPAL_HARDWARE; goto out; } if (nvram_flash->busy) { rc = OPAL_BUSY; goto out; } if ((src + len) > nvram_size) { prerror("FLASH_NVRAM: read out of bound (0x%x,0x%x)\n", src, len); rc = OPAL_PARAMETER; goto out; } rc = blocklevel_read(nvram_flash->bl, nvram_offset + src, dst, len); out: unlock(&flash_lock); if (!rc) nvram_read_complete(true); return rc; } static int flash_nvram_write(uint32_t dst, void *src, uint32_t len) { int rc; if (!try_lock(&flash_lock)) return OPAL_BUSY; if (nvram_flash->busy) { rc = OPAL_BUSY; goto out; } /* TODO: When we have async jobs for PRD, turn this into one */ if ((dst + len) > nvram_size) { prerror("FLASH_NVRAM: write out of bound (0x%x,0x%x)\n", dst, len); rc = OPAL_PARAMETER; goto out; } rc = blocklevel_write(nvram_flash->bl, nvram_offset + dst, src, len); out: unlock(&flash_lock); return rc; } static void __flash_dt_add_fw_version(struct dt_node *fw_version, char* data) { static bool first = true; char *prop; int version_len, i; int len = strlen(data); const char * version_str[] = {"open-power", "buildroot", "skiboot", "hostboot-binaries", "hostboot", "linux", "petitboot", "occ", "capp-ucode", "sbe", "machine-xml"}; if (first) { first = false; /* Increment past "key-" */ if (memcmp(data, "open-power", strlen("open-power")) == 0) prop = data + strlen("open-power"); else prop = strchr(data, '-'); if (!prop) { prlog(PR_DEBUG, "FLASH: Invalid fw version format (%s)\n", data); return; } prop++; dt_add_property_string(fw_version, "version", prop); return; } /* * PNOR version strings are not easily consumable. Split them into * property, value. * * Example input from PNOR : * "open-power-firestone-v1.8" * "linux-4.4.6-openpower1-8420e0f" * * Desired output in device tree: * open-power = "firestone-v1.8"; * linux = "4.4.6-openpower1-8420e0f"; */ for(i = 0; i < ARRAY_SIZE(version_str); i++) { version_len = strlen(version_str[i]); if (len < version_len) continue; if (memcmp(data, version_str[i], version_len) != 0) continue; /* Found a match, add property */ if (dt_find_property(fw_version, version_str[i])) continue; /* Increment past "key-" */ prop = data + version_len + 1; dt_add_property_string(fw_version, version_str[i], prop); } } void flash_dt_add_fw_version(void) { uint8_t version_data[80]; int rc; int numbytes = 0, i = 0; struct dt_node *fw_version; if (version_buf == NULL) return; rc = wait_for_resource_loaded(RESOURCE_ID_VERSION, RESOURCE_SUBID_NONE); if (rc != OPAL_SUCCESS) { prlog(PR_WARNING, "FLASH: Failed to load VERSION data\n"); free(version_buf); return; } fw_version = dt_new(dt_root, "ibm,firmware-versions"); assert(fw_version); for ( ; (numbytes < version_buf_size) && version_buf[numbytes]; numbytes++) { if (version_buf[numbytes] == '\n') { version_data[i] = '\0'; __flash_dt_add_fw_version(fw_version, version_data); memset(version_data, 0, sizeof(version_data)); i = 0; continue; } else if (version_buf[numbytes] == '\t') { continue; /* skip tabs */ } version_data[i++] = version_buf[numbytes]; } free(version_buf); } void flash_fw_version_preload(void) { int rc; if (proc_gen < proc_gen_p9) return; prlog(PR_INFO, "FLASH: Loading VERSION section\n"); version_buf = malloc(version_buf_size); if (!version_buf) { prlog(PR_WARNING, "FLASH: Failed to allocate memory\n"); return; } rc = start_preload_resource(RESOURCE_ID_VERSION, RESOURCE_SUBID_NONE, version_buf, &version_buf_size); if (rc != OPAL_SUCCESS) { prlog(PR_WARNING, "FLASH: Failed to start loading VERSION data\n"); free(version_buf); version_buf = NULL; } } static int flash_nvram_probe(struct flash *flash, struct ffs_handle *ffs) { uint32_t start, size, part; bool ecc; int rc; prlog(PR_INFO, "FLASH: probing for NVRAM\n"); rc = ffs_lookup_part(ffs, "NVRAM", &part); if (rc) { prlog(PR_WARNING, "FLASH: no NVRAM partition found\n"); return OPAL_HARDWARE; } rc = ffs_part_info(ffs, part, NULL, &start, &size, NULL, &ecc); if (rc) { /** * @fwts-label NVRAMNoPartition * @fwts-advice OPAL could not find an NVRAM partition * on the system flash. Check that the system flash * has a valid partition table, and that the firmware * build process has added a NVRAM partition. */ prlog(PR_ERR, "FLASH: Can't parse ffs info for NVRAM\n"); return OPAL_HARDWARE; } nvram_flash = flash; nvram_offset = start; nvram_size = ecc ? ecc_buffer_size_minus_ecc(size) : size; platform.nvram_info = flash_nvram_info; platform.nvram_start_read = flash_nvram_start_read; platform.nvram_write = flash_nvram_write; return 0; } /* core flash support */ static struct dt_node *flash_add_dt_node(struct flash *flash, int id) { struct dt_node *flash_node; flash_node = dt_new_addr(opal_node, "flash", id); dt_add_property_strings(flash_node, "compatible", "ibm,opal-flash"); dt_add_property_cells(flash_node, "ibm,opal-id", id); dt_add_property_u64(flash_node, "reg", flash->size); dt_add_property_cells(flash_node, "ibm,flash-block-size", flash->block_size); if (flash->no_erase) dt_add_property(flash_node, "no-erase", NULL, 0); /* we fix to 32-bits */ dt_add_property_cells(flash_node, "#address-cells", 1); dt_add_property_cells(flash_node, "#size-cells", 1); return flash_node; } static void setup_system_flash(struct flash *flash, struct dt_node *node, const char *name, struct ffs_handle *ffs) { char *path; if (!ffs) return; if (system_flash) { /** * @fwts-label SystemFlashMultiple * @fwts-advice OPAL Found multiple system flash. * Since we've already found a system flash we are * going to use that one but this ordering is not * guaranteed so may change in future. */ prlog(PR_WARNING, "FLASH: Attempted to register multiple system " "flash: %s\n", name); return; } prlog(PR_NOTICE, "FLASH: Found system flash: %s id:%i\n", name, flash->id); system_flash = flash; path = dt_get_path(node); dt_add_property_string(dt_chosen, "ibm,system-flash", path); free(path); prlog(PR_INFO, "FLASH: registered system flash device %s\n", name); flash_nvram_probe(flash, ffs); } static int num_flashes(void) { struct flash *flash; int i = 0; list_for_each(&flashes, flash, list) i++; return i; } int flash_register(struct blocklevel_device *bl) { uint64_t size; uint32_t block_size; struct ffs_handle *ffs; struct dt_node *node; struct flash *flash; const char *name; int rc; rc = blocklevel_get_info(bl, &name, &size, &block_size); if (rc) return rc; prlog(PR_INFO, "FLASH: registering flash device %s " "(size 0x%llx, blocksize 0x%x)\n", name ?: "(unnamed)", size, block_size); lock(&flash_lock); flash = malloc(sizeof(struct flash)); if (!flash) { prlog(PR_ERR, "FLASH: Error allocating flash structure\n"); unlock(&flash_lock); return OPAL_RESOURCE; } flash->busy = false; flash->bl = bl; flash->no_erase = !(bl->flags & WRITE_NEED_ERASE); flash->size = size; flash->block_size = block_size; flash->id = num_flashes(); list_add(&flashes, &flash->list); rc = ffs_init(0, flash->size, bl, &ffs, 1); if (rc) { /** * @fwts-label NoFFS * @fwts-advice System flash isn't formatted as expected. * This could mean several OPAL utilities do not function * as expected. e.g. gard, pflash. */ prlog(PR_WARNING, "FLASH: No ffs info; " "using raw device only\n"); ffs = NULL; } node = flash_add_dt_node(flash, flash->id); setup_system_flash(flash, node, name, ffs); if (ffs) ffs_close(ffs); unlock(&flash_lock); return OPAL_SUCCESS; } enum flash_op { FLASH_OP_READ, FLASH_OP_WRITE, FLASH_OP_ERASE, }; static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset, uint64_t buf, uint64_t size, uint64_t token) { struct flash *flash = NULL; int rc; if (!try_lock(&flash_lock)) return OPAL_BUSY; list_for_each(&flashes, flash, list) if (flash->id == id) break; if (flash->id != id) { /* Couldn't find the flash */ rc = OPAL_PARAMETER; goto err; } if (flash->busy) { rc = OPAL_BUSY; goto err; } if (size >= flash->size || offset >= flash->size || offset + size > flash->size) { rc = OPAL_PARAMETER; goto err; } /* * These ops intentionally have no smarts (ecc correction or erase * before write) to them. * Skiboot is simply exposing the PNOR flash to the host. * The host is expected to understand that this is a raw flash * device and treat it as such. */ switch (op) { case FLASH_OP_READ: rc = blocklevel_raw_read(flash->bl, offset, (void *)buf, size); break; case FLASH_OP_WRITE: rc = blocklevel_raw_write(flash->bl, offset, (void *)buf, size); break; case FLASH_OP_ERASE: rc = blocklevel_erase(flash->bl, offset, size); break; default: assert(0); } if (rc) { rc = OPAL_HARDWARE; goto err; } unlock(&flash_lock); opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, token, rc); return OPAL_ASYNC_COMPLETION; err: unlock(&flash_lock); return rc; } static int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf, uint64_t size, uint64_t token) { if (!opal_addr_valid((void *)buf)) return OPAL_PARAMETER; return opal_flash_op(FLASH_OP_READ, id, offset, buf, size, token); } static int64_t opal_flash_write(uint64_t id, uint64_t offset, uint64_t buf, uint64_t size, uint64_t token) { if (!opal_addr_valid((void *)buf)) return OPAL_PARAMETER; return opal_flash_op(FLASH_OP_WRITE, id, offset, buf, size, token); } static int64_t opal_flash_erase(uint64_t id, uint64_t offset, uint64_t size, uint64_t token) { return opal_flash_op(FLASH_OP_ERASE, id, offset, 0L, size, token); } opal_call(OPAL_FLASH_READ, opal_flash_read, 5); opal_call(OPAL_FLASH_WRITE, opal_flash_write, 5); opal_call(OPAL_FLASH_ERASE, opal_flash_erase, 4); /* flash resource API */ static struct { enum resource_id id; uint32_t subid; char name[PART_NAME_MAX+1]; } part_name_map[] = { { RESOURCE_ID_KERNEL, RESOURCE_SUBID_NONE, "BOOTKERNEL" }, { RESOURCE_ID_INITRAMFS,RESOURCE_SUBID_NONE, "ROOTFS" }, { RESOURCE_ID_CAPP, RESOURCE_SUBID_SUPPORTED, "CAPP" }, { RESOURCE_ID_IMA_CATALOG, RESOURCE_SUBID_SUPPORTED, "IMA_CATALOG" }, { RESOURCE_ID_VERSION, RESOURCE_SUBID_NONE, "VERSION" }, }; const char *flash_map_resource_name(enum resource_id id) { int i; for (i = 0; i < ARRAY_SIZE(part_name_map); i++) { if (part_name_map[i].id == id) return part_name_map[i].name; } return NULL; } static size_t sizeof_elf_from_hdr(void *buf) { struct elf_hdr *elf = (struct elf_hdr*) buf; size_t sz = 0; BUILD_ASSERT(SECURE_BOOT_HEADERS_SIZE > sizeof(struct elf_hdr)); BUILD_ASSERT(SECURE_BOOT_HEADERS_SIZE > sizeof(struct elf64_hdr)); BUILD_ASSERT(SECURE_BOOT_HEADERS_SIZE > sizeof(struct elf32_hdr)); if (elf->ei_ident == ELF_IDENT) { if (elf->ei_class == ELF_CLASS_64) { struct elf64_hdr *elf64 = (struct elf64_hdr*) buf; sz = le64_to_cpu(elf64->e_shoff) + ((uint32_t)le16_to_cpu(elf64->e_shentsize) * (uint32_t)le16_to_cpu(elf64->e_shnum)); } else if (elf->ei_class == ELF_CLASS_32) { struct elf32_hdr *elf32 = (struct elf32_hdr*) buf; sz = le32_to_cpu(elf32->e_shoff) + (le16_to_cpu(elf32->e_shentsize) * le16_to_cpu(elf32->e_shnum)); } } return sz; } /* * load a resource from FLASH * buf and len shouldn't account for ECC even if partition is ECCed. * * The API here is a bit strange. * If resource has a STB container, buf will contain it * If loading subpartition with STB container, buff will *NOT* contain it * For trusted boot, the whole partition containing the subpart is measured. * * Additionally, the logic to work out how much to read from flash is insane. */ static int flash_load_resource(enum resource_id id, uint32_t subid, void *buf, size_t *len) { int i; int rc = OPAL_RESOURCE; struct ffs_handle *ffs; struct flash *flash; const char *name; bool status = false; bool ecc; bool part_signed = false; void *bufp = buf; size_t bufsz = *len; int ffs_part_num, ffs_part_start, ffs_part_size; int content_size = 0; int offset = 0; lock(&flash_lock); if (!system_flash) { /** * @fwts-label SystemFlashNotFound * @fwts-advice No system flash was found. Check for missing * calls flash_register(...). */ prlog(PR_WARNING, "FLASH: Can't load resource id:%i. " "No system flash found\n", id); goto out_unlock; } flash = system_flash; if (flash->busy) goto out_unlock; for (i = 0, name = NULL; i < ARRAY_SIZE(part_name_map); i++) { if (part_name_map[i].id == id) { name = part_name_map[i].name; break; } } if (!name) { prerror("FLASH: Couldn't find partition for id %d\n", id); goto out_unlock; } /* * If partition doesn't have a subindex but the caller specifies one, * we fail. eg. kernel partition doesn't have a subindex */ if ((part_name_map[i].subid == RESOURCE_SUBID_NONE) && (subid != RESOURCE_SUBID_NONE)) { prerror("PLAT: Partition %s doesn't have subindex\n", name); goto out_unlock; } rc = ffs_init(0, flash->size, flash->bl, &ffs, 1); if (rc) { prerror("FLASH: Can't open ffs handle\n"); goto out_unlock; } rc = ffs_lookup_part(ffs, name, &ffs_part_num); if (rc) { /* This is not an error per-se, some partitions * are purposefully absent, don't spam the logs */ prlog(PR_DEBUG, "FLASH: No %s partition\n", name); goto out_free_ffs; } rc = ffs_part_info(ffs, ffs_part_num, NULL, &ffs_part_start, NULL, &ffs_part_size, &ecc); if (rc) { prerror("FLASH: Failed to get %s partition info\n", name); goto out_free_ffs; } prlog(PR_DEBUG,"FLASH: %s partition %s ECC\n", name, ecc ? "has" : "doesn't have"); if ((ecc ? ecc_buffer_size_minus_ecc(ffs_part_size) : ffs_part_size) < SECURE_BOOT_HEADERS_SIZE) { prerror("FLASH: secboot headers bigger than " "partition size 0x%x\n", ffs_part_size); goto out_free_ffs; } rc = blocklevel_read(flash->bl, ffs_part_start, bufp, SECURE_BOOT_HEADERS_SIZE); if (rc) { prerror("FLASH: failed to read the first 0x%x from " "%s partition, rc %d\n", SECURE_BOOT_HEADERS_SIZE, name, rc); goto out_free_ffs; } part_signed = stb_is_container(bufp, SECURE_BOOT_HEADERS_SIZE); prlog(PR_DEBUG, "FLASH: %s partition %s signed\n", name, part_signed ? "is" : "isn't"); /* * part_start/size are raw pointers into the partition. * ie. they will account for ECC if included. */ if (part_signed) { bufp += SECURE_BOOT_HEADERS_SIZE; bufsz -= SECURE_BOOT_HEADERS_SIZE; content_size = stb_sw_payload_size(buf, SECURE_BOOT_HEADERS_SIZE); *len = content_size + SECURE_BOOT_HEADERS_SIZE; if (content_size > bufsz) { prerror("FLASH: content size > buffer size\n"); rc = OPAL_PARAMETER; goto out_free_ffs; } ffs_part_start += SECURE_BOOT_HEADERS_SIZE; if (ecc) ffs_part_start += ecc_size(SECURE_BOOT_HEADERS_SIZE); rc = blocklevel_read(flash->bl, ffs_part_start, bufp, content_size); if (rc) { prerror("FLASH: failed to read content size %d" " %s partition, rc %d\n", content_size, name, rc); goto out_free_ffs; } if (subid == RESOURCE_SUBID_NONE) goto done_reading; rc = flash_subpart_info(bufp, content_size, ffs_part_size, NULL, subid, &offset, &content_size); if (rc) { prerror("FLASH: Failed to parse subpart info for %s\n", name); goto out_free_ffs; } bufp += offset; goto done_reading; } else /* stb_signed */ { /* * Back to the old way of doing things, no STB header. */ if (subid == RESOURCE_SUBID_NONE) { if (id == RESOURCE_ID_KERNEL) { /* * Because actualSize is a lie, we compute the * size of the BOOTKERNEL based on what the ELF * headers say. Otherwise we end up reading more * than we should */ content_size = sizeof_elf_from_hdr(buf); if (!content_size) { prerror("FLASH: Invalid ELF header part" " %s\n", name); rc = OPAL_RESOURCE; goto out_free_ffs; } } else { content_size = ffs_part_size; } if (content_size > bufsz) { prerror("FLASH: %s content size %d > " " buffer size %lu\n", name, content_size, bufsz); rc = OPAL_PARAMETER; goto out_free_ffs; } prlog(PR_DEBUG, "FLASH: computed %s size %u\n", name, content_size); rc = blocklevel_read(flash->bl, ffs_part_start, buf, content_size); if (rc) { prerror("FLASH: failed to read content size %d" " %s partition, rc %d\n", content_size, name, rc); goto out_free_ffs; } *len = content_size; goto done_reading; } BUILD_ASSERT(FLASH_SUBPART_HEADER_SIZE <= SECURE_BOOT_HEADERS_SIZE); rc = flash_subpart_info(bufp, SECURE_BOOT_HEADERS_SIZE, ffs_part_size, &ffs_part_size, subid, &offset, &content_size); if (rc) { prerror("FLASH: FAILED reading subpart info. rc=%d\n", rc); goto out_free_ffs; } *len = ffs_part_size; prlog(PR_DEBUG, "FLASH: Computed %s partition size: %u " "(subpart %u size %u offset %u)\n", name, ffs_part_size, subid, content_size, offset); /* * For a sub partition, we read the whole (computed) * partition, and then measure that. * Afterwards, we memmove() things back into place for * the caller. */ rc = blocklevel_read(flash->bl, ffs_part_start, buf, ffs_part_size); bufp += offset; } done_reading: /* * Verify and measure the retrieved PNOR partition as part of the * secure boot and trusted boot requirements */ secureboot_verify(id, buf, *len); trustedboot_measure(id, buf, *len); /* Find subpartition */ if (subid != RESOURCE_SUBID_NONE) { memmove(buf, bufp, content_size); *len = content_size; } status = true; out_free_ffs: ffs_close(ffs); out_unlock: unlock(&flash_lock); return status ? OPAL_SUCCESS : rc; } struct flash_load_resource_item { enum resource_id id; uint32_t subid; int result; void *buf; size_t *len; struct list_node link; }; static LIST_HEAD(flash_load_resource_queue); static LIST_HEAD(flash_loaded_resources); static struct lock flash_load_resource_lock = LOCK_UNLOCKED; static struct cpu_job *flash_load_job = NULL; int flash_resource_loaded(enum resource_id id, uint32_t subid) { struct flash_load_resource_item *resource = NULL; struct flash_load_resource_item *r; int rc = OPAL_BUSY; lock(&flash_load_resource_lock); list_for_each(&flash_loaded_resources, r, link) { if (r->id == id && r->subid == subid) { resource = r; break; } } if (resource) { rc = resource->result; list_del(&resource->link); free(resource); } if (list_empty(&flash_load_resource_queue) && flash_load_job) { cpu_wait_job(flash_load_job, true); flash_load_job = NULL; } unlock(&flash_load_resource_lock); return rc; } static void flash_load_resources(void *data __unused) { struct flash_load_resource_item *r; int result; lock(&flash_load_resource_lock); do { if (list_empty(&flash_load_resource_queue)) { break; } r = list_top(&flash_load_resource_queue, struct flash_load_resource_item, link); if (r->result != OPAL_EMPTY) prerror("flash_load_resources() list_top unexpected " " result %d\n", r->result); r->result = OPAL_BUSY; unlock(&flash_load_resource_lock); result = flash_load_resource(r->id, r->subid, r->buf, r->len); lock(&flash_load_resource_lock); r = list_pop(&flash_load_resource_queue, struct flash_load_resource_item, link); r->result = result; list_add_tail(&flash_loaded_resources, &r->link); } while(true); unlock(&flash_load_resource_lock); } static void start_flash_load_resource_job(void) { if (flash_load_job) cpu_wait_job(flash_load_job, true); flash_load_job = cpu_queue_job(NULL, "flash_load_resources", flash_load_resources, NULL); cpu_process_local_jobs(); } int flash_start_preload_resource(enum resource_id id, uint32_t subid, void *buf, size_t *len) { struct flash_load_resource_item *r; bool start_thread = false; r = malloc(sizeof(struct flash_load_resource_item)); assert(r != NULL); r->id = id; r->subid = subid; r->buf = buf; r->len = len; r->result = OPAL_EMPTY; prlog(PR_DEBUG, "FLASH: Queueing preload of %x/%x\n", r->id, r->subid); lock(&flash_load_resource_lock); if (list_empty(&flash_load_resource_queue)) { start_thread = true; } list_add_tail(&flash_load_resource_queue, &r->link); unlock(&flash_load_resource_lock); if (start_thread) start_flash_load_resource_job(); return OPAL_SUCCESS; } skiboot-5.10-rc4/core/gcov-profiling.c000066400000000000000000000062101324317060200176150ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include typedef long gcov_type; /* * This is GCC internal data structure. See GCC libgcc/libgcov.h for * details. * * If gcc changes this, we have to change it. */ typedef unsigned int gcov_unsigned_int; #if __GNUC__ == 4 && __GNUC_MINOR__ >= 9 #define GCOV_COUNTERS 9 #else #define GCOV_COUNTERS 8 #endif struct gcov_info { gcov_unsigned_int version; struct gcov_info *next; gcov_unsigned_int stamp; const char *filename; void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int); unsigned int n_functions; struct gcov_fn_info **functions; }; /* We have a list of all gcov info set up at startup */ struct gcov_info *gcov_info_list; void __gcov_init(struct gcov_info* f); void skiboot_gcov_done(void); void __gcov_flush(void) __attrconst; void __gcov_merge_add(gcov_type *counters, unsigned int n_counters) __attrconst; void __gcov_merge_single(gcov_type *counters, unsigned int n_counters) __attrconst; void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters) __attrconst; void __gcov_merge_ior(gcov_type *counters, unsigned int n_counters) __attrconst; void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters) __attrconst; void __gcov_exit(void) __attrconst; void __gcov_init(struct gcov_info* f) { static gcov_unsigned_int version = 0; if (version == 0) { printf("GCOV version: %u\n", f->version); version = f->version; } if (gcov_info_list) f->next = gcov_info_list; gcov_info_list = f; return; } void skiboot_gcov_done(void) { struct gcov_info *i = gcov_info_list; if (i->filename) printf("GCOV: gcov_info_list looks sane (first file: %s)\n", i->filename); else prlog(PR_WARNING, "GCOV: gcov_info_list doesn't look sane. " "i->filename == NULL."); printf("GCOV: gcov_info_list at 0x%p\n", gcov_info_list); } void __gcov_merge_add(gcov_type *counters, unsigned int n_counters) { (void)counters; (void)n_counters; return; } void __gcov_flush(void) { return; } void __gcov_merge_single(gcov_type *counters, unsigned int n_counters) { (void)counters; (void)n_counters; return; } void __gcov_merge_delta(gcov_type *counters, unsigned int n_counters) { (void)counters; (void)n_counters; return; } void __gcov_merge_ior(gcov_type *counters, unsigned int n_counters) { (void)counters; (void)n_counters; return; } void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters) { (void)counters; (void)n_counters; } void __gcov_exit(void) { } skiboot-5.10-rc4/core/hmi.c000066400000000000000000001161761324317060200154620ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define pr_fmt(fmt) "HMI: " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include /* * HMER register layout: * +===+==========+============================+========+===================+ * |Bit|Name |Description |PowerKVM|Action | * | | | |HMI | | * | | | |enabled | | * | | | |for this| | * | | | |bit ? | | * +===+==========+============================+========+===================+ * |0 |malfunctio|A processor core in the |Yes |Raise attn from | * | |n_allert |system has checkstopped | |sapphire resulting | * | | |(failed recovery) and has | |xstop | * | | |requested a CP Sparing | | | * | | |to occur. This is | | | * | | |broadcasted to every | | | * | | |processor in the system | | | * |---+----------+----------------------------+--------+-------------------| * |1 |Reserved |reserved |n/a | | * |---+----------+----------------------------+--------+-------------------| * |2 |proc_recv_|Processor recovery occurred |Yes |Log message and | * | |done |error-bit in fir not masked | |continue working. | * | | |(see bit 11) | | | * |---+----------+----------------------------+--------+-------------------| * |3 |proc_recv_|Processor went through |Yes |Log message and | * | |error_mask|recovery for an error which | |continue working. | * | |ed |is actually masked for | | | * | | |reporting | | | * |---+----------+----------------------------+--------+-------------------| * |4 | |Timer facility experienced |Yes |Raise attn from | * | |tfac_error|an error. | |sapphire resulting | * | | |TB, DEC, HDEC, PURR or SPURR| |xstop | * | | |may be corrupted (details in| | | * | | |TFMR) | | | * |---+----------+----------------------------+--------+-------------------| * |5 | |TFMR SPR itself is |Yes |Raise attn from | * | |tfmr_parit|corrupted. | |sapphire resulting | * | |y_error |Entire timing facility may | |xstop | * | | |be compromised. | | | * |---+----------+----------------------------+--------+-------------------| * |6 |ha_overflo| UPS (Uniterrupted Power |No |N/A | * | |w_warning |System) Overflow indication | | | * | | |indicating that the UPS | | | * | | |DirtyAddrTable has | | | * | | |reached a limit where it | | | * | | |requires PHYP unload support| | | * |---+----------+----------------------------+--------+-------------------| * |7 |reserved |reserved |n/a |n/a | * |---+----------+----------------------------+--------+-------------------| * |8 |xscom_fail|An XSCOM operation caused by|No |We handle it by | * | | |a cache inhibited load/store| |manually reading | * | | |from this thread failed. A | |HMER register. | * | | |trap register is | | | * | | |available. | | | * | | | | | | * |---+----------+----------------------------+--------+-------------------| * |9 |xscom_done|An XSCOM operation caused by|No |We handle it by | * | | |a cache inhibited load/store| |manually reading | * | | |from this thread completed. | |HMER register. | * | | |If hypervisor | | | * | | |intends to use this bit, it | | | * | | |is responsible for clearing | | | * | | |it before performing the | | | * | | |xscom operation. | | | * | | |NOTE: this bit should always| | | * | | |be masked in HMEER | | | * |---+----------+----------------------------+--------+-------------------| * |10 |reserved |reserved |n/a |n/a | * |---+----------+----------------------------+--------+-------------------| * |11 |proc_recv_|Processor recovery occurred |y |Log message and | * | |again |again before bit2 or bit3 | |continue working. | * | | |was cleared | | | * |---+----------+----------------------------+--------+-------------------| * |12-|reserved |was temperature sensor |n/a |n/a | * |15 | |passed the critical point on| | | * | | |the way up | | | * |---+----------+----------------------------+--------+-------------------| * |16 | |SCOM has set a reserved FIR |No |n/a | * | |scom_fir_h|bit to cause recovery | | | * | |m | | | | * |---+----------+----------------------------+--------+-------------------| * |17 |trig_fir_h|Debug trigger has set a |No |n/a | * | |mi |reserved FIR bit to cause | | | * | | |recovery | | | * |---+----------+----------------------------+--------+-------------------| * |18 |reserved |reserved |n/a |n/a | * |---+----------+----------------------------+--------+-------------------| * |19 |reserved |reserved |n/a |n/a | * |---+----------+----------------------------+--------+-------------------| * |20 |hyp_resour|A hypervisor resource error |y |Raise attn from | * | |ce_err |occurred: data parity error | |sapphire resulting | * | | |on, SPRC0:3; SPR_Modereg or | |xstop. | * | | |HMEER. | | | * | | |Note: this bit will cause an| | | * | | |check_stop when (HV=1, PR=0 | | | * | | |and EE=0) | | | * |---+----------+----------------------------+--------+-------------------| * |21-| |if bit 8 is active, the |No |We handle it by | * |23 |xscom_stat|reason will be detailed in | |Manually reading | * | |us |these bits. see chapter 11.1| |HMER register. | * | | |This bits are information | | | * | | |only and always masked | | | * | | |(mask = '0') | | | * | | |If hypervisor intends to use| | | * | | |this bit, it is responsible | | | * | | |for clearing it before | | | * | | |performing the xscom | | | * | | |operation. | | | * |---+----------+----------------------------+--------+-------------------| * |24-|Not |Not implemented |n/a |n/a | * |63 |implemente| | | | * | |d | | | | * +-- +----------+----------------------------+--------+-------------------+ * * Above HMER bits can be enabled/disabled by modifying * SPR_HMEER_HMI_ENABLE_MASK #define in include/processor.h * If you modify support for any of the bits listed above, please make sure * you change the above table to refelct that. * * NOTE: Per Dave Larson, never enable 8,9,21-23 */ /* Used for tracking cpu threads inside hmi handling. */ #define HMI_STATE_CLEANUP_DONE 0x100 #define CORE_THREAD_MASK 0x0ff #define SUBCORE_THREAD_MASK(s_id, t_count) \ ((((1UL) << (t_count)) - 1) << ((s_id) * (t_count))) #define SINGLE_THREAD_MASK(t_id) ((1UL) << (t_id)) /* xscom addresses for core FIR (Fault Isolation Register) */ #define P8_CORE_FIR 0x10013100 #define P9_CORE_FIR 0x20010A40 /* xscom addresses for pMisc Receive Malfunction Alert Register */ #define P8_MALFUNC_ALERT 0x02020011 #define P9_MALFUNC_ALERT 0x00090022 #define P8_NX_STATUS_REG 0x02013040 /* NX status register */ #define P8_NX_DMA_ENGINE_FIR 0x02013100 /* DMA & Engine FIR Data Register */ #define P8_NX_PBI_FIR 0x02013080 /* PowerBus Interface FIR Register */ #define P9_NX_STATUS_REG 0x02011040 /* NX status register */ #define P9_NX_DMA_ENGINE_FIR 0x02011100 /* DMA & Engine FIR Data Register */ #define P9_NX_PBI_FIR 0x02011080 /* PowerBus Interface FIR Register */ /* * Bit 54 from NX status register is set to 1 when HMI interrupt is triggered * due to NX checksop. */ #define NX_HMI_ACTIVE PPC_BIT(54) /* Number of iterations for the various timeouts */ #define TIMEOUT_LOOPS 20000000 /* TFMR other errors. (other than bit 26 and 45) */ #define SPR_TFMR_OTHER_ERRORS \ (SPR_TFMR_TBST_CORRUPT | SPR_TFMR_TB_MISSING_SYNC | \ SPR_TFMR_TB_MISSING_STEP | SPR_TFMR_FW_CONTROL_ERR | \ SPR_TFMR_PURR_PARITY_ERR | SPR_TFMR_SPURR_PARITY_ERR | \ SPR_TFMR_DEC_PARITY_ERR | SPR_TFMR_TFMR_CORRUPT | \ SPR_TFMR_CHIP_TOD_INTERRUPT) static const struct core_xstop_bit_info { uint8_t bit; /* CORE FIR bit number */ enum OpalHMI_CoreXstopReason reason; } xstop_bits[] = { { 3, CORE_CHECKSTOP_IFU_REGFILE }, { 5, CORE_CHECKSTOP_IFU_LOGIC }, { 8, CORE_CHECKSTOP_PC_DURING_RECOV }, { 10, CORE_CHECKSTOP_ISU_REGFILE }, { 12, CORE_CHECKSTOP_ISU_LOGIC }, { 21, CORE_CHECKSTOP_FXU_LOGIC }, { 25, CORE_CHECKSTOP_VSU_LOGIC }, { 26, CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE }, { 32, CORE_CHECKSTOP_LSU_REGFILE }, { 36, CORE_CHECKSTOP_PC_FWD_PROGRESS }, { 38, CORE_CHECKSTOP_LSU_LOGIC }, { 45, CORE_CHECKSTOP_PC_LOGIC }, { 48, CORE_CHECKSTOP_PC_HYP_RESOURCE }, { 52, CORE_CHECKSTOP_PC_HANG_RECOV_FAILED }, { 54, CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED }, { 60, CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ }, { 63, CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ }, }; static const struct nx_xstop_bit_info { uint8_t bit; /* NX FIR bit number */ enum OpalHMI_NestAccelXstopReason reason; } nx_dma_xstop_bits[] = { { 1, NX_CHECKSTOP_SHM_INVAL_STATE_ERR }, { 15, NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1 }, { 16, NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2 }, { 20, NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR }, { 21, NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR }, { 22, NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR }, { 23, NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR }, { 24, NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR }, { 25, NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR }, { 26, NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR }, { 27, NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR }, { 31, NX_CHECKSTOP_DMA_CRB_UE }, { 32, NX_CHECKSTOP_DMA_CRB_SUE }, }; static const struct nx_xstop_bit_info nx_pbi_xstop_bits[] = { { 12, NX_CHECKSTOP_PBI_ISN_UE }, }; static struct lock hmi_lock = LOCK_UNLOCKED; static uint32_t malf_alert_scom; static uint32_t nx_status_reg; static uint32_t nx_dma_engine_fir; static uint32_t nx_pbi_fir; static int setup_scom_addresses(void) { switch (proc_gen) { case proc_gen_p8: malf_alert_scom = P8_MALFUNC_ALERT; nx_status_reg = P8_NX_STATUS_REG; nx_dma_engine_fir = P8_NX_DMA_ENGINE_FIR; nx_pbi_fir = P8_NX_PBI_FIR; return 1; case proc_gen_p9: malf_alert_scom = P9_MALFUNC_ALERT; nx_status_reg = P9_NX_STATUS_REG; nx_dma_engine_fir = P9_NX_DMA_ENGINE_FIR; nx_pbi_fir = P9_NX_PBI_FIR; return 1; default: prerror("%s: Unknown CPU type\n", __func__); break; } return 0; } static int queue_hmi_event(struct OpalHMIEvent *hmi_evt, int recover) { size_t num_params; /* Don't queue up event if recover == -1 */ if (recover == -1) return 0; /* set disposition */ if (recover == 1) hmi_evt->disposition = OpalHMI_DISPOSITION_RECOVERED; else if (recover == 0) hmi_evt->disposition = OpalHMI_DISPOSITION_NOT_RECOVERED; /* * V2 of struct OpalHMIEvent is of (5 * 64 bits) size and well packed * structure. Hence use uint64_t pointer to pass entire structure * using 5 params in generic message format. Instead of hard coding * num_params divide the struct size by 8 bytes to get exact * num_params value. */ num_params = ALIGN_UP(sizeof(*hmi_evt), sizeof(u64)) / sizeof(u64); /* queue up for delivery to host. */ return _opal_queue_msg(OPAL_MSG_HMI_EVT, NULL, NULL, num_params, (uint64_t *)hmi_evt); } static int read_core_fir(uint32_t chip_id, uint32_t core_id, uint64_t *core_fir) { int rc; switch (proc_gen) { case proc_gen_p8: rc = xscom_read(chip_id, XSCOM_ADDR_P8_EX(core_id, P8_CORE_FIR), core_fir); break; case proc_gen_p9: rc = xscom_read(chip_id, XSCOM_ADDR_P9_EC(core_id, P9_CORE_FIR), core_fir); break; default: rc = OPAL_HARDWARE; } return rc; } static bool decode_core_fir(struct cpu_thread *cpu, struct OpalHMIEvent *hmi_evt) { uint64_t core_fir; uint32_t core_id; int i; bool found = false; int64_t ret; const char *loc; /* Sanity check */ if (!cpu || !hmi_evt) return false; core_id = pir_to_core_id(cpu->pir); /* Get CORE FIR register value. */ ret = read_core_fir(cpu->chip_id, core_id, &core_fir); if (ret == OPAL_HARDWARE) { prerror("XSCOM error reading CORE FIR\n"); /* If the FIR can't be read, we should checkstop. */ return true; } else if (ret == OPAL_WRONG_STATE) { /* * CPU is asleep, so it probably didn't cause the checkstop. * If no other HMI cause is found a "catchall" checkstop * will be raised, so if this CPU should've been awake the * error will be handled appropriately. */ prlog(PR_DEBUG, "FIR read failed, chip %d core %d asleep\n", cpu->chip_id, core_id); return false; } if (!core_fir) return false; loc = chip_loc_code(cpu->chip_id); prlog(PR_INFO, "[Loc: %s]: CHIP ID: %x, CORE ID: %x, FIR: %016llx\n", loc ? loc : "Not Available", cpu->chip_id, core_id, core_fir); /* Check CORE FIR bits and populate HMI event with error info. */ for (i = 0; i < ARRAY_SIZE(xstop_bits); i++) { if (core_fir & PPC_BIT(xstop_bits[i].bit)) { found = true; hmi_evt->u.xstop_error.xstop_reason |= xstop_bits[i].reason; } } return found; } static void find_core_checkstop_reason(struct OpalHMIEvent *hmi_evt, bool *event_generated) { struct cpu_thread *cpu; /* Initialize HMI event */ hmi_evt->severity = OpalHMI_SEV_FATAL; hmi_evt->type = OpalHMI_ERROR_MALFUNC_ALERT; hmi_evt->u.xstop_error.xstop_type = CHECKSTOP_TYPE_CORE; /* * Check CORE FIRs and find the reason for core checkstop. * Send a separate HMI event for each core that has checkstopped. */ for_each_cpu(cpu) { /* GARDed CPUs are marked unavailable. Skip them. */ if (cpu->state == cpu_state_unavailable) continue; /* Only check on primaries (ie. core), not threads */ if (cpu->is_secondary) continue; /* Initialize xstop_error fields. */ hmi_evt->u.xstop_error.xstop_reason = 0; hmi_evt->u.xstop_error.u.pir = cpu->pir; if (decode_core_fir(cpu, hmi_evt)) { queue_hmi_event(hmi_evt, 0); *event_generated = 1; } } } static void find_capp_checkstop_reason(int flat_chip_id, struct OpalHMIEvent *hmi_evt, bool *event_generated) { struct capp_info info; struct phb *phb; uint64_t capp_fir; uint64_t capp_fir_mask; uint64_t capp_fir_action0; uint64_t capp_fir_action1; uint64_t reg; int64_t rc; /* Find the CAPP on the chip associated with the HMI. */ for_each_phb(phb) { /* get the CAPP info */ rc = capp_get_info(flat_chip_id, phb, &info); if (rc == OPAL_PARAMETER) continue; if (xscom_read(flat_chip_id, info.capp_fir_reg, &capp_fir) || xscom_read(flat_chip_id, info.capp_fir_mask_reg, &capp_fir_mask) || xscom_read(flat_chip_id, info.capp_fir_action0_reg, &capp_fir_action0) || xscom_read(flat_chip_id, info.capp_fir_action1_reg, &capp_fir_action1)) { prerror("CAPP: Couldn't read CAPP#%d (PHB:#%x) FIR registers by XSCOM!\n", info.capp_index, info.phb_index); continue; } if (!(capp_fir & ~capp_fir_mask)) continue; prlog(PR_DEBUG, "CAPP#%d (PHB:#%x): FIR 0x%016llx mask 0x%016llx\n", info.capp_index, info.phb_index, capp_fir, capp_fir_mask); prlog(PR_DEBUG, "CAPP#%d (PHB:#%x): ACTION0 0x%016llx, ACTION1 0x%016llx\n", info.capp_index, info.phb_index, capp_fir_action0, capp_fir_action1); /* * If this bit is set (=1) a Recoverable Error has been * detected */ xscom_read(flat_chip_id, info.capp_err_status_ctrl_reg, ®); if ((reg & PPC_BIT(0)) != 0) { phb_lock(phb); phb->ops->set_capp_recovery(phb); phb_unlock(phb); hmi_evt->severity = OpalHMI_SEV_NO_ERROR; hmi_evt->type = OpalHMI_ERROR_CAPP_RECOVERY; queue_hmi_event(hmi_evt, 1); *event_generated = true; return; } } } static void find_nx_checkstop_reason(int flat_chip_id, struct OpalHMIEvent *hmi_evt, bool *event_generated) { uint64_t nx_status; uint64_t nx_dma_fir; uint64_t nx_pbi_fir_val; int i; /* Get NX status register value. */ if (xscom_read(flat_chip_id, nx_status_reg, &nx_status) != 0) { prerror("XSCOM error reading NX_STATUS_REG\n"); return; } /* Check if NX has driven an HMI interrupt. */ if (!(nx_status & NX_HMI_ACTIVE)) return; /* Initialize HMI event */ hmi_evt->severity = OpalHMI_SEV_FATAL; hmi_evt->type = OpalHMI_ERROR_MALFUNC_ALERT; hmi_evt->u.xstop_error.xstop_type = CHECKSTOP_TYPE_NX; hmi_evt->u.xstop_error.u.chip_id = flat_chip_id; /* Get DMA & Engine FIR data register value. */ if (xscom_read(flat_chip_id, nx_dma_engine_fir, &nx_dma_fir) != 0) { prerror("XSCOM error reading NX_DMA_ENGINE_FIR\n"); return; } /* Get PowerBus Interface FIR data register value. */ if (xscom_read(flat_chip_id, nx_pbi_fir, &nx_pbi_fir_val) != 0) { prerror("XSCOM error reading NX_PBI_FIR\n"); return; } /* Find NX checkstop reason and populate HMI event with error info. */ for (i = 0; i < ARRAY_SIZE(nx_dma_xstop_bits); i++) if (nx_dma_fir & PPC_BIT(nx_dma_xstop_bits[i].bit)) hmi_evt->u.xstop_error.xstop_reason |= nx_dma_xstop_bits[i].reason; for (i = 0; i < ARRAY_SIZE(nx_pbi_xstop_bits); i++) if (nx_pbi_fir_val & PPC_BIT(nx_pbi_xstop_bits[i].bit)) hmi_evt->u.xstop_error.xstop_reason |= nx_pbi_xstop_bits[i].reason; /* * Set NXDMAENGFIR[38] to signal PRD that service action is required. * Without this inject, PRD will not be able to do NX unit checkstop * error analysis. NXDMAENGFIR[38] is a spare bit and used to report * a software initiated attention. * * The behavior of this bit and all FIR bits are documented in * RAS spreadsheet. */ xscom_write(flat_chip_id, nx_dma_engine_fir, PPC_BIT(38)); /* Send an HMI event. */ queue_hmi_event(hmi_evt, 0); *event_generated = true; } static void find_npu2_checkstop_reason(int flat_chip_id, struct OpalHMIEvent *hmi_evt, bool *event_generated) { struct phb *phb; struct npu *p = NULL; int i; uint64_t npu2_fir; uint64_t npu2_fir_mask; uint64_t npu2_fir_action0; uint64_t npu2_fir_action1; uint64_t npu2_fir_addr; uint64_t npu2_fir_mask_addr; uint64_t npu2_fir_action0_addr; uint64_t npu2_fir_action1_addr; uint64_t fatal_errors; int total_errors = 0; /* Find the NPU on the chip associated with the HMI. */ for_each_phb(phb) { /* NOTE: if a chip ever has >1 NPU this will need adjusting */ if (dt_node_is_compatible(phb->dt_node, "ibm,power9-npu-pciex") && (dt_get_chip_id(phb->dt_node) == flat_chip_id)) { p = phb_to_npu(phb); break; } } /* If we didn't find a NPU on the chip, it's not our checkstop. */ if (p == NULL) return; npu2_fir_addr = NPU2_FIR_REGISTER_0; npu2_fir_mask_addr = NPU2_FIR_REGISTER_0 + NPU2_FIR_MASK_OFFSET; npu2_fir_action0_addr = NPU2_FIR_REGISTER_0 + NPU2_FIR_ACTION0_OFFSET; npu2_fir_action1_addr = NPU2_FIR_REGISTER_0 + NPU2_FIR_ACTION1_OFFSET; for (i = 0; i < NPU2_TOTAL_FIR_REGISTERS; i++) { /* Read all the registers necessary to find a checkstop condition. */ if (xscom_read(flat_chip_id, npu2_fir_addr, &npu2_fir) || xscom_read(flat_chip_id, npu2_fir_mask_addr, &npu2_fir_mask) || xscom_read(flat_chip_id, npu2_fir_action0_addr, &npu2_fir_action0) || xscom_read(flat_chip_id, npu2_fir_action1_addr, &npu2_fir_action1)) { prerror("HMI: Couldn't read NPU FIR register%d with XSCOM\n", i); continue; } fatal_errors = npu2_fir & ~npu2_fir_mask & npu2_fir_action0 & npu2_fir_action1; if (fatal_errors) { prlog(PR_ERR, "NPU: FIR#%d FIR 0x%016llx mask 0x%016llx\n", i, npu2_fir, npu2_fir_mask); prlog(PR_ERR, "NPU: ACTION0 0x%016llx, ACTION1 0x%016llx\n", npu2_fir_action0, npu2_fir_action1); total_errors++; } /* Can't do a fence yet, we are just logging fir information for now */ npu2_fir_addr += NPU2_FIR_OFFSET; npu2_fir_mask_addr += NPU2_FIR_OFFSET; npu2_fir_action0_addr += NPU2_FIR_OFFSET; npu2_fir_action1_addr += NPU2_FIR_OFFSET; } if (!total_errors) return; /* Set up the HMI event */ hmi_evt->severity = OpalHMI_SEV_WARNING; hmi_evt->type = OpalHMI_ERROR_MALFUNC_ALERT; hmi_evt->u.xstop_error.xstop_type = CHECKSTOP_TYPE_NPU; hmi_evt->u.xstop_error.u.chip_id = flat_chip_id; /* Marking the event as recoverable so that we don't crash */ queue_hmi_event(hmi_evt, 1); *event_generated = true; } static void find_npu_checkstop_reason(int flat_chip_id, struct OpalHMIEvent *hmi_evt, bool *event_generated) { struct phb *phb; struct npu *p = NULL; uint64_t npu_fir; uint64_t npu_fir_mask; uint64_t npu_fir_action0; uint64_t npu_fir_action1; uint64_t fatal_errors; /* Only check for NPU errors if the chip has a NPU */ if (PVR_TYPE(mfspr(SPR_PVR)) != PVR_TYPE_P8NVL) return find_npu2_checkstop_reason(flat_chip_id, hmi_evt, event_generated); /* Find the NPU on the chip associated with the HMI. */ for_each_phb(phb) { /* NOTE: if a chip ever has >1 NPU this will need adjusting */ if (dt_node_is_compatible(phb->dt_node, "ibm,power8-npu-pciex") && (dt_get_chip_id(phb->dt_node) == flat_chip_id)) { p = phb_to_npu(phb); break; } } /* If we didn't find a NPU on the chip, it's not our checkstop. */ if (p == NULL) return; /* Read all the registers necessary to find a checkstop condition. */ if (xscom_read(flat_chip_id, p->at_xscom + NX_FIR, &npu_fir) || xscom_read(flat_chip_id, p->at_xscom + NX_FIR_MASK, &npu_fir_mask) || xscom_read(flat_chip_id, p->at_xscom + NX_FIR_ACTION0, &npu_fir_action0) || xscom_read(flat_chip_id, p->at_xscom + NX_FIR_ACTION1, &npu_fir_action1)) { prerror("Couldn't read NPU registers with XSCOM\n"); return; } fatal_errors = npu_fir & ~npu_fir_mask & npu_fir_action0 & npu_fir_action1; /* If there's no errors, we don't need to do anything. */ if (!fatal_errors) return; prlog(PR_DEBUG, "NPU: FIR 0x%016llx mask 0x%016llx\n", npu_fir, npu_fir_mask); prlog(PR_DEBUG, "NPU: ACTION0 0x%016llx, ACTION1 0x%016llx\n", npu_fir_action0, npu_fir_action1); /* Set the NPU to fenced since it can't recover. */ npu_set_fence_state(p, true); /* Set up the HMI event */ hmi_evt->severity = OpalHMI_SEV_WARNING; hmi_evt->type = OpalHMI_ERROR_MALFUNC_ALERT; hmi_evt->u.xstop_error.xstop_type = CHECKSTOP_TYPE_NPU; hmi_evt->u.xstop_error.u.chip_id = flat_chip_id; /* The HMI is "recoverable" because it shouldn't crash the system */ queue_hmi_event(hmi_evt, 1); *event_generated = true; } static void decode_malfunction(struct OpalHMIEvent *hmi_evt) { int i; uint64_t malf_alert; bool event_generated = false; if (!setup_scom_addresses()) { prerror("Failed to setup scom addresses\n"); /* Send an unknown HMI event. */ hmi_evt->u.xstop_error.xstop_type = CHECKSTOP_TYPE_UNKNOWN; hmi_evt->u.xstop_error.xstop_reason = 0; queue_hmi_event(hmi_evt, false); return; } xscom_read(this_cpu()->chip_id, malf_alert_scom, &malf_alert); if (!malf_alert) return; for (i = 0; i < 64; i++) { if (malf_alert & PPC_BIT(i)) { xscom_write(this_cpu()->chip_id, malf_alert_scom, ~PPC_BIT(i)); find_capp_checkstop_reason(i, hmi_evt, &event_generated); find_nx_checkstop_reason(i, hmi_evt, &event_generated); find_npu_checkstop_reason(i, hmi_evt, &event_generated); } } find_core_checkstop_reason(hmi_evt, &event_generated); /* * If we fail to find checkstop reason, send an unknown HMI event. */ if (!event_generated) { hmi_evt->u.xstop_error.xstop_type = CHECKSTOP_TYPE_UNKNOWN; hmi_evt->u.xstop_error.xstop_reason = 0; queue_hmi_event(hmi_evt, false); } } static void wait_for_cleanup_complete(void) { uint64_t timeout = 0; smt_lowest(); while (!(*(this_cpu()->core_hmi_state_ptr) & HMI_STATE_CLEANUP_DONE)) { /* * We use a fixed number of TIMEOUT_LOOPS rather * than using the timebase to do a pseudo-wall time * timeout due to the fact that timebase may not actually * work at this point in time. */ if (++timeout >= (TIMEOUT_LOOPS*3)) { /* * Break out the loop here and fall through * recovery code. If recovery fails, kernel will get * informed about the failure. This way we can avoid * looping here if other threads are stuck. */ prlog(PR_DEBUG, "TB pre-recovery timeout\n"); break; } barrier(); } smt_medium(); } /* * For successful recovery of TB residue error, remove dirty data * from TB/HDEC register in each active partition (subcore). Writing * zero's to TB/HDEC will achieve the same. */ static void timer_facility_do_cleanup(uint64_t tfmr) { /* * Workaround for HW logic bug in Power9. Do not reset the * TB register if TB is valid and running. */ if ((tfmr & SPR_TFMR_TB_RESIDUE_ERR) && !(tfmr & SPR_TFMR_TB_VALID)) { /* Reset the TB register to clear the dirty data. */ mtspr(SPR_TBWU, 0); mtspr(SPR_TBWL, 0); } if (tfmr & SPR_TFMR_HDEC_PARITY_ERROR) { /* Reset HDEC register */ mtspr(SPR_HDEC, 0); } } static int get_split_core_mode(void) { uint64_t hid0; hid0 = mfspr(SPR_HID0); if (hid0 & SPR_HID0_POWER8_2LPARMODE) return 2; else if (hid0 & SPR_HID0_POWER8_4LPARMODE) return 4; return 1; } /* * Certain TB/HDEC errors leaves dirty data in timebase and hdec register * which need to cleared before we initiate clear_tb_errors through TFMR[24]. * The cleanup has to be done by once by any one thread from core or subcore. * * In split core mode, it is required to clear the dirty data from TB/HDEC * register by all subcores (active partitions) before we clear tb errors * through TFMR[24]. The HMI recovery would fail even if one subcore do * not cleanup the respective TB/HDEC register. * * For un-split core, any one thread can do the cleanup. * For split core, any one thread from each subcore can do the cleanup. * * Errors that required pre-recovery cleanup: * - SPR_TFMR_TB_RESIDUE_ERR * - SPR_TFMR_HDEC_PARITY_ERROR */ static void pre_recovery_cleanup_p8(void) { uint64_t hmer; uint64_t tfmr; uint32_t sibling_thread_mask; int split_core_mode, subcore_id, thread_id, threads_per_core; int i; hmer = mfspr(SPR_HMER); /* exit if it is not Time facility error. */ if (!(hmer & SPR_HMER_TFAC_ERROR)) return; /* * Exit if it is not the error that leaves dirty data in timebase * or HDEC register. OR this may be the thread which came in very * late and recovery is been already done. * * TFMR is per [sub]core register. If any one thread on the [sub]core * does the recovery it reflects in TFMR register and applicable to * all threads in that [sub]core. Hence take a lock before checking * TFMR errors. Once a thread from a [sub]core completes the * recovery, all other threads on that [sub]core will return from * here. * * If TFMR does not show error that we are looking for, return * from here. We would just fall through recovery code which would * check for other errors on TFMR and fix them. */ lock(&hmi_lock); tfmr = mfspr(SPR_TFMR); if (!(tfmr & (SPR_TFMR_TB_RESIDUE_ERR | SPR_TFMR_HDEC_PARITY_ERROR))) { unlock(&hmi_lock); return; } /* Gather split core information. */ split_core_mode = get_split_core_mode(); threads_per_core = cpu_thread_count / split_core_mode; /* Prepare core/subcore sibling mask */ thread_id = cpu_get_thread_index(this_cpu()); subcore_id = thread_id / threads_per_core; sibling_thread_mask = SUBCORE_THREAD_MASK(subcore_id, threads_per_core); /* * First thread on the core ? * if yes, setup the hmi cleanup state to !DONE */ if ((*(this_cpu()->core_hmi_state_ptr) & CORE_THREAD_MASK) == 0) *(this_cpu()->core_hmi_state_ptr) &= ~HMI_STATE_CLEANUP_DONE; /* * First thread on subcore ? * if yes, do cleanup. * * Clear TB and wait for other threads (one from each subcore) to * finish its cleanup work. */ if ((*(this_cpu()->core_hmi_state_ptr) & sibling_thread_mask) == 0) timer_facility_do_cleanup(tfmr); /* * Mark this thread bit. This bit will stay on until this thread * exit from handle_hmi_exception(). */ *(this_cpu()->core_hmi_state_ptr) |= this_cpu()->thread_mask; /* * Check if each subcore has completed the cleanup work. * if yes, then notify all the threads that we are done with cleanup. */ for (i = 0; i < split_core_mode; i++) { uint32_t subcore_thread_mask = SUBCORE_THREAD_MASK(i, threads_per_core); if (!(*(this_cpu()->core_hmi_state_ptr) & subcore_thread_mask)) break; } if (i == split_core_mode) *(this_cpu()->core_hmi_state_ptr) |= HMI_STATE_CLEANUP_DONE; unlock(&hmi_lock); /* Wait for other subcore to complete the cleanup. */ wait_for_cleanup_complete(); } /* * Certain TB/HDEC errors leaves dirty data in timebase and hdec register * which need to cleared before we initiate clear_tb_errors through TFMR[24]. * The cleanup has to be done by all the threads from core in p9. * * On TB/HDEC errors, all 4 threads on the affected receives HMI. On power9, * every thread on the core has its own copy of TB and hence every thread * has to clear the dirty data from its own TB register before we clear tb * errors through TFMR[24]. The HMI recovery would fail even if one thread * do not cleanup the respective TB/HDEC register. * * There is no split core mode in power9. * * Errors that required pre-recovery cleanup: * - SPR_TFMR_TB_RESIDUE_ERR * - SPR_TFMR_HDEC_PARITY_ERROR */ static void pre_recovery_cleanup_p9(void) { uint64_t hmer; uint64_t tfmr; int threads_per_core = cpu_thread_count; int i; hmer = mfspr(SPR_HMER); /* exit if it is not Time facility error. */ if (!(hmer & SPR_HMER_TFAC_ERROR)) return; /* * Exit if it is not the error that leaves dirty data in timebase * or HDEC register. OR this may be the thread which came in very * late and recovery is been already done. * * TFMR is per core register. Ideally if any one thread on the core * does the recovery it should reflect in TFMR register and * applicable to all threads in that core. Hence take a lock before * checking TFMR errors. Once a thread from a core completes the * recovery, all other threads on that core will return from * here. * * If TFMR does not show error that we are looking for, return * from here. We would just fall through recovery code which would * check for other errors on TFMR and fix them. */ lock(&hmi_lock); tfmr = mfspr(SPR_TFMR); if (!(tfmr & (SPR_TFMR_TB_RESIDUE_ERR | SPR_TFMR_HDEC_PARITY_ERROR))) { unlock(&hmi_lock); return; } /* * Due to a HW logic bug in p9, TFMR bit 26 and 45 always set * once TB residue or HDEC errors occurs at first time. Hence for HMI * on subsequent TB errors add additional check as workaround to * identify validity of the errors and decide whether pre-recovery * is required or not. Exit pre-recovery if there are other TB * errors also present on TFMR. */ if (tfmr & SPR_TFMR_OTHER_ERRORS) { unlock(&hmi_lock); return; } /* * First thread on the core ? * if yes, setup the hmi cleanup state to !DONE */ if ((*(this_cpu()->core_hmi_state_ptr) & CORE_THREAD_MASK) == 0) *(this_cpu()->core_hmi_state_ptr) &= ~HMI_STATE_CLEANUP_DONE; /* * Clear TB and wait for other threads to finish its cleanup work. */ timer_facility_do_cleanup(tfmr); /* * Mark this thread bit. This bit will stay on until this thread * exit from handle_hmi_exception(). */ *(this_cpu()->core_hmi_state_ptr) |= this_cpu()->thread_mask; /* * Check if each thread has completed the cleanup work. * if yes, then notify all the threads that we are done with cleanup. */ for (i = 0; i < threads_per_core; i++) { uint32_t thread_mask = SINGLE_THREAD_MASK(i); if (!(*(this_cpu()->core_hmi_state_ptr) & thread_mask)) break; } if (i == threads_per_core) *(this_cpu()->core_hmi_state_ptr) |= HMI_STATE_CLEANUP_DONE; unlock(&hmi_lock); /* Wait for other threads to complete the cleanup. */ wait_for_cleanup_complete(); } static void pre_recovery_cleanup(void) { if (proc_gen == proc_gen_p9) return pre_recovery_cleanup_p9(); else return pre_recovery_cleanup_p8(); } static void hmi_exit(void) { /* unconditionally unset the thread bit */ *(this_cpu()->core_hmi_state_ptr) &= ~(this_cpu()->thread_mask); } static void hmi_print_debug(const uint8_t *msg) { uint64_t hmer = mfspr(SPR_HMER); const char *loc; uint32_t core_id, thread_index; core_id = pir_to_core_id(this_cpu()->pir); thread_index = cpu_get_thread_index(this_cpu()); loc = chip_loc_code(this_cpu()->chip_id); if (!loc) loc = "Not Available"; if (hmer & (SPR_HMER_TFAC_ERROR | SPR_HMER_TFMR_PARITY_ERROR)) { prlog(PR_DEBUG, "[Loc: %s]: P:%d C:%d T:%d: TFMR(%016lx) %s\n", loc, this_cpu()->chip_id, core_id, thread_index, mfspr(SPR_TFMR), msg); } else { prlog(PR_DEBUG, "[Loc: %s]: P:%d C:%d T:%d: %s\n", loc, this_cpu()->chip_id, core_id, thread_index, msg); } } int handle_hmi_exception(uint64_t hmer, struct OpalHMIEvent *hmi_evt) { int recover = 1; uint64_t tfmr; /* * In case of split core, some of the Timer facility errors need * cleanup to be done before we proceed with the error recovery. */ pre_recovery_cleanup(); lock(&hmi_lock); /* * Not all HMIs would move TB into invalid state. Set the TB state * looking at TFMR register. TFMR will tell us correct state of * TB register. */ this_cpu()->tb_invalid = !(mfspr(SPR_TFMR) & SPR_TFMR_TB_VALID); prlog(PR_DEBUG, "Received HMI interrupt: HMER = 0x%016llx\n", hmer); if (hmi_evt) hmi_evt->hmer = hmer; if (hmer & SPR_HMER_PROC_RECV_DONE) { hmer &= ~SPR_HMER_PROC_RECV_DONE; if (hmi_evt) { hmi_evt->severity = OpalHMI_SEV_NO_ERROR; hmi_evt->type = OpalHMI_ERROR_PROC_RECOV_DONE; queue_hmi_event(hmi_evt, recover); } hmi_print_debug("Processor recovery Done."); } if (hmer & SPR_HMER_PROC_RECV_ERROR_MASKED) { hmer &= ~SPR_HMER_PROC_RECV_ERROR_MASKED; if (hmi_evt) { hmi_evt->severity = OpalHMI_SEV_NO_ERROR; hmi_evt->type = OpalHMI_ERROR_PROC_RECOV_MASKED; queue_hmi_event(hmi_evt, recover); } hmi_print_debug("Processor recovery Done (masked)."); } if (hmer & SPR_HMER_PROC_RECV_AGAIN) { hmer &= ~SPR_HMER_PROC_RECV_AGAIN; if (hmi_evt) { hmi_evt->severity = OpalHMI_SEV_NO_ERROR; hmi_evt->type = OpalHMI_ERROR_PROC_RECOV_DONE_AGAIN; queue_hmi_event(hmi_evt, recover); } hmi_print_debug("Processor recovery occurred again before" "bit2 was cleared\n"); } /* Assert if we see malfunction alert, we can not continue. */ if (hmer & SPR_HMER_MALFUNCTION_ALERT) { hmer &= ~SPR_HMER_MALFUNCTION_ALERT; hmi_print_debug("Malfunction Alert"); if (hmi_evt) decode_malfunction(hmi_evt); } /* Assert if we see Hypervisor resource error, we can not continue. */ if (hmer & SPR_HMER_HYP_RESOURCE_ERR) { hmer &= ~SPR_HMER_HYP_RESOURCE_ERR; hmi_print_debug("Hypervisor resource error"); recover = 0; if (hmi_evt) { hmi_evt->severity = OpalHMI_SEV_FATAL; hmi_evt->type = OpalHMI_ERROR_HYP_RESOURCE; queue_hmi_event(hmi_evt, recover); } } /* * Assert for now for all TOD errors. In future we need to decode * TFMR and take corrective action wherever required. */ if (hmer & SPR_HMER_TFAC_ERROR) { tfmr = mfspr(SPR_TFMR); /* save original TFMR */ hmi_print_debug("Timer Facility Error"); hmer &= ~SPR_HMER_TFAC_ERROR; recover = chiptod_recover_tb_errors(); if (hmi_evt) { hmi_evt->severity = OpalHMI_SEV_ERROR_SYNC; hmi_evt->type = OpalHMI_ERROR_TFAC; hmi_evt->tfmr = tfmr; queue_hmi_event(hmi_evt, recover); } } if (hmer & SPR_HMER_TFMR_PARITY_ERROR) { tfmr = mfspr(SPR_TFMR); /* save original TFMR */ hmer &= ~SPR_HMER_TFMR_PARITY_ERROR; hmi_print_debug("TFMR parity Error"); recover = chiptod_recover_tb_errors(); if (hmi_evt) { hmi_evt->severity = OpalHMI_SEV_FATAL; hmi_evt->type = OpalHMI_ERROR_TFMR_PARITY; hmi_evt->tfmr = tfmr; queue_hmi_event(hmi_evt, recover); } } if (recover == 0) disable_fast_reboot("Unrecoverable HMI"); /* * HMER bits are sticky, once set to 1 they remain set to 1 until * they are set to 0. Reset the error source bit to 0, otherwise * we keep getting HMI interrupt again and again. */ mtspr(SPR_HMER, hmer); hmi_exit(); /* Set the TB state looking at TFMR register before we head out. */ this_cpu()->tb_invalid = !(mfspr(SPR_TFMR) & SPR_TFMR_TB_VALID); unlock(&hmi_lock); return recover; } static int64_t opal_handle_hmi(void) { uint64_t hmer; struct OpalHMIEvent hmi_evt; /* * Compiled time check to see size of OpalHMIEvent do not exceed * that of struct opal_msg. */ BUILD_ASSERT(sizeof(struct opal_msg) >= sizeof(struct OpalHMIEvent)); memset(&hmi_evt, 0, sizeof(struct OpalHMIEvent)); hmi_evt.version = OpalHMIEvt_V2; hmer = mfspr(SPR_HMER); /* Get HMER register value */ handle_hmi_exception(hmer, &hmi_evt); return OPAL_SUCCESS; } opal_call(OPAL_HANDLE_HMI, opal_handle_hmi, 0); skiboot-5.10-rc4/core/hostservices.c000066400000000000000000000467141324317060200174260ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define HOSTBOOT_RUNTIME_INTERFACE_VERSION 1 struct host_interfaces { /** Interface version. */ uint64_t interface_version; /** Put a string to the console. */ void (*puts)(const char*); /** Critical failure in runtime execution. */ void (*assert)(void); /** OPTIONAL. Hint to environment that the page may be executed. */ int (*set_page_execute)(void*); /** malloc */ void *(*malloc)(size_t); /** free */ void (*free)(void*); /** realloc */ void *(*realloc)(void*, size_t); /** sendErrorLog * @param[in] plid Platform Log identifier * @param[in] data size in bytes * @param[in] pointer to data * @return 0 on success else error code */ int (*send_error_log)(uint32_t,uint32_t,void *); /** Scan communication read * @param[in] chip_id (based on devtree defn) * @param[in] address * @param[in] pointer to 8-byte data buffer * @return 0 on success else return code */ int (*scom_read)(uint64_t, uint64_t, void*); /** Scan communication write * @param[in] chip_id (based on devtree defn) * @param[in] address * @param[in] pointer to 8-byte data buffer * @return 0 on success else return code */ int (*scom_write)(uint64_t, uint64_t, const void *); /** lid_load * Load a LID from PNOR, FSP, etc. * * @param[in] LID number. * @param[out] Allocated buffer for LID. * @param[out] Size of LID (in bytes). * * @return 0 on success, else RC. */ int (*lid_load)(uint32_t lid, void **buf, size_t *len); /** lid_unload * Release memory from previously loaded LID. * * @param[in] Allocated buffer for LID to release. * * @return 0 on success, else RC. */ int (*lid_unload)(void *buf); /** Get the address of a reserved memory region by its devtree name. * * @param[in] Devtree name (ex. "ibm,hbrt-vpd-image") * @return physical address of region (or NULL). **/ uint64_t (*get_reserved_mem)(const char*); /** * @brief Force a core to be awake, or clear the force * @param[in] i_core Core to wake up (pid) * @param[in] i_mode 0=force awake * 1=clear force * 2=clear all previous forces * @return rc non-zero on error */ int (*wakeup)( uint32_t i_core, uint32_t i_mode ); /** * @brief Delay/sleep for at least the time given * @param[in] seconds * @param[in] nano seconds */ void (*nanosleep)(uint64_t i_seconds, uint64_t i_nano_seconds); // Reserve some space for future growth. void (*reserved[32])(void); }; struct runtime_interfaces { /** Interface version. */ uint64_t interface_version; /** Execute CxxTests that may be contained in the image. * * @param[in] - Pointer to CxxTestStats structure for results reporting. */ void (*cxxtestExecute)(void *); /** Get a list of lids numbers of the lids known to HostBoot * * @param[out] o_num - the number of lids in the list * @return a pointer to the list */ const uint32_t * (*get_lid_list)(size_t * o_num); /** Load OCC Image and common data into mainstore, also setup OCC BARSs * * @param[in] i_homer_addr_phys - The physical mainstore address of the * start of the HOMER image * @param[in] i_homer_addr_va - Virtual memory address of the HOMER image * @param[in] i_common_addr_phys - The physical mainstore address of the * OCC common area. * @param[in] i_common_addr_va - Virtual memory address of the common area * @param[in] i_chip - The HW chip id (XSCOM chip ID) * @return 0 on success else return code */ int(*loadOCC)(uint64_t i_homer_addr_phys, uint64_t i_homer_addr_va, uint64_t i_common_addr_phys, uint64_t i_common_addr_va, uint64_t i_chip); /** Start OCC on all chips, by module * * @param[in] i_chip - Array of functional HW chip ids * @Note The caller must include a complete modules worth of chips * @param[in] i_num_chips - Number of chips in the array * @return 0 on success else return code */ int (*startOCCs)(uint64_t* i_chip, size_t i_num_chips); /** Stop OCC hold OCCs in reset * * @param[in] i_chip - Array of functional HW chip ids * @Note The caller must include a complete modules worth of chips * @param[in] i_num_chips - Number of chips in the array * @return 0 on success else return code */ int (*stopOCCs)(uint64_t* i_chip, size_t i_num_chips); /* Reserve some space for future growth. */ void (*reserved[32])(void); }; static struct runtime_interfaces *hservice_runtime; static char *hbrt_con_buf = (char *)HBRT_CON_START; static size_t hbrt_con_pos; static bool hbrt_con_wrapped; #define HBRT_CON_IN_LEN 0 #define HBRT_CON_OUT_LEN (HBRT_CON_LEN - HBRT_CON_IN_LEN) static struct memcons hbrt_memcons __section(".data.memcons") = { .magic = MEMCONS_MAGIC, .obuf_phys = HBRT_CON_START, .ibuf_phys = HBRT_CON_START + HBRT_CON_OUT_LEN, .obuf_size = HBRT_CON_OUT_LEN, .ibuf_size = HBRT_CON_IN_LEN, }; static void hservice_putc(char c) { uint32_t opos; hbrt_con_buf[hbrt_con_pos++] = c; if (hbrt_con_pos >= HBRT_CON_OUT_LEN) { hbrt_con_pos = 0; hbrt_con_wrapped = true; } /* * We must always re-generate memcons.out_pos because * under some circumstances, the console script will * use a broken putmemproc that does RMW on the full * 8 bytes containing out_pos and in_prod, thus corrupting * out_pos */ opos = hbrt_con_pos; if (hbrt_con_wrapped) opos |= MEMCONS_OUT_POS_WRAP; lwsync(); hbrt_memcons.out_pos = opos; } static void hservice_puts(const char *str) { char c; while((c = *(str++)) != 0) hservice_putc(c); hservice_putc(10); } static void hservice_mark(void) { hservice_puts("--------------------------------------------------" "--------------------------------------------------\n"); } static void hservice_assert(void) { /** * @fwts-label HBRTassert * @fwts-advice HBRT triggered assert: you need to debug HBRT */ prlog(PR_EMERG, "HBRT: Assertion from hostservices\n"); abort(); } static void *hservice_malloc(size_t size) { return malloc(size); } static void hservice_free(void *ptr) { free(ptr); } static void *hservice_realloc(void *ptr, size_t size) { return realloc(ptr, size); } struct hbrt_elog_ent { void *buf; unsigned int size; unsigned int plid; struct list_node link; }; static LIST_HEAD(hbrt_elogs); static struct lock hbrt_elog_lock = LOCK_UNLOCKED; static bool hbrt_elog_sending; static void hservice_start_elog_send(void); static void hservice_elog_write_complete(struct fsp_msg *msg) { struct hbrt_elog_ent *ent = msg->user_data; lock(&hbrt_elog_lock); prlog(PR_DEBUG, "HBRT: Completed send of PLID 0x%08x\n", ent->plid); hbrt_elog_sending = false; fsp_tce_unmap(PSI_DMA_HBRT_LOG_WRITE_BUF, PSI_DMA_HBRT_LOG_WRITE_BUF_SZ); free(ent->buf); free(ent); fsp_freemsg(msg); hservice_start_elog_send(); unlock(&hbrt_elog_lock); } static void hservice_start_elog_send(void) { struct fsp_msg *msg; struct hbrt_elog_ent *ent; again: if (list_empty(&hbrt_elogs)) return; ent = list_pop(&hbrt_elogs, struct hbrt_elog_ent, link); hbrt_elog_sending = true; prlog(PR_DEBUG, "HBRT: Starting send of PLID 0x%08x\n", ent->plid); fsp_tce_map(PSI_DMA_HBRT_LOG_WRITE_BUF, ent->buf, PSI_DMA_HBRT_LOG_WRITE_BUF_SZ); msg = fsp_mkmsg(FSP_CMD_WRITE_SP_DATA, 6, FSP_DATASET_HBRT_BLOB, 0, 0, 0, PSI_DMA_HBRT_LOG_WRITE_BUF, ent->size); if (!msg) { prerror("HBRT: Failed to create error msg log to FSP\n"); goto error; } msg->user_data = ent; if (!fsp_queue_msg(msg, hservice_elog_write_complete)) return; prerror("FSP: Error queueing elog update\n"); error: if (msg) fsp_freemsg(msg); fsp_tce_unmap(PSI_DMA_HBRT_LOG_WRITE_BUF, PSI_DMA_HBRT_LOG_WRITE_BUF_SZ); free(ent->buf); free(ent); hbrt_elog_sending = false; goto again; } int hservice_send_error_log(uint32_t plid, uint32_t dsize, void *data) { struct hbrt_elog_ent *ent; void *abuf; prlog(PR_ERR, "HBRT: Error log generated with plid 0x%08x\n", plid); /* We only know how to send error logs to FSP */ if (!fsp_present()) { prerror("HBRT: Warning, error log from HBRT discarded !\n"); return OPAL_UNSUPPORTED; } if (dsize > PSI_DMA_HBRT_LOG_WRITE_BUF_SZ) { prerror("HBRT: Warning, error log from HBRT too big (%d) !\n", dsize); dsize = PSI_DMA_HBRT_LOG_WRITE_BUF_SZ; } lock(&hbrt_elog_lock); /* Create and populate a tracking structure */ ent = zalloc(sizeof(struct hbrt_elog_ent)); if (!ent) { unlock(&hbrt_elog_lock); return OPAL_NO_MEM; } /* Grab a 4k aligned page */ abuf = memalign(0x1000, PSI_DMA_HBRT_LOG_WRITE_BUF_SZ); if (!abuf) { free(ent); unlock(&hbrt_elog_lock); return OPAL_NO_MEM; } memset(abuf, 0, PSI_DMA_HBRT_LOG_WRITE_BUF_SZ); memcpy(abuf, data, dsize); ent->buf = abuf; ent->size = dsize; ent->plid = plid; list_add_tail(&hbrt_elogs, &ent->link); if (!hbrt_elog_sending) hservice_start_elog_send(); unlock(&hbrt_elog_lock); return 0; } static int hservice_scom_read(uint64_t chip_id, uint64_t addr, void *buf) { return xscom_read(chip_id, addr, buf); } static int hservice_scom_write(uint64_t chip_id, uint64_t addr, const void *buf) { uint64_t val; memcpy(&val, buf, sizeof(val)); return xscom_write(chip_id, addr, val); } struct hbrt_lid { void *load_addr; size_t len; uint32_t id; struct list_node link; }; static LIST_HEAD(hbrt_lid_list); static bool hbrt_lid_preload_complete = false; bool hservices_lid_preload_complete(void) { return hbrt_lid_preload_complete; } /* TODO: Few of the following routines can be generalized */ static int __hservice_lid_load(uint32_t lid, void **buf, size_t *len) { int rc; /* Adjust LID side first or we get a cache mismatch */ lid = fsp_adjust_lid_side(lid); /* * Allocate a new buffer and load the LID into it * XXX: We currently use the same size for each HBRT lid. */ *buf = malloc(HBRT_LOAD_LID_SIZE); *len = HBRT_LOAD_LID_SIZE; rc = fsp_preload_lid(lid, *buf, len); rc = fsp_wait_lid_loaded(lid); if (rc != 0) /* Take advantage of realloc corner case here. */ *len = 0; *buf = realloc(*buf, *len); prlog(PR_DEBUG, "HBRT: LID 0x%08x successfully loaded, len=0x%lx\n", lid, (unsigned long)len); return rc; } static int __hservice_lid_preload(const uint32_t lid) { struct hbrt_lid *hlid; void *buf; size_t len; int rc; hlid = zalloc(sizeof(struct hbrt_lid)); if (!hlid) { prerror("HBRT: Could not allocate struct hbrt_lid\n"); return OPAL_NO_MEM; } rc = __hservice_lid_load(lid, &buf, &len); if (rc) { free(hlid); return rc; } hlid->load_addr = buf; hlid->len = len; hlid->id = lid; list_add_tail(&hbrt_lid_list, &hlid->link); return 0; } /* Find and preload all lids needed by hostservices */ void hservices_lid_preload(void) { const uint32_t *lid_list = NULL; size_t num_lids; int i; if (!hservice_runtime) return; lid_list = (const uint32_t *)hservice_runtime->get_lid_list(&num_lids); if (!lid_list) { prerror("HBRT: get_lid_list() returned NULL\n"); return; } prlog(PR_INFO, "HBRT: %d lids to load\n", (int)num_lids); /* Currently HBRT needs only one (OCC) lid */ for (i = 0; i < num_lids; i++) __hservice_lid_preload(lid_list[i]); hbrt_lid_preload_complete = true; occ_poke_load_queue(); } static int hservice_lid_load(uint32_t lid, void **buf, size_t *len) { struct hbrt_lid *hlid; prlog(PR_INFO, "HBRT: Lid load request for 0x%08x\n", lid); if (list_empty(&hbrt_lid_list)) { /* Should not happen */ /** * @fwts-label HBRTlidLoadFail * @fwts-advice Firmware should have aborted boot */ prlog(PR_CRIT, "HBRT: LID Load failed\n"); abort(); } list_for_each(&hbrt_lid_list, hlid, link) { if (hlid->id == lid) { *buf = hlid->load_addr; *len = hlid->len; prlog(PR_DEBUG, "HBRT: LID Serviced from cache," " %x, len=0x%lx\n", hlid->id, hlid->len); return 0; } } return -ENOENT; } static int hservice_lid_unload(void *buf __unused) { /* We do nothing as the LID is held in cache */ return 0; } static uint64_t hservice_get_reserved_mem(const char *name) { struct mem_region *region; uint64_t ret; /* We assume it doesn't change after we've unlocked it, but * lock ensures list is safe to walk. */ lock(&mem_region_lock); region = find_mem_region(name); ret = region ? region->start : 0; unlock(&mem_region_lock); if (!ret) prlog(PR_WARNING, "HBRT: Mem region '%s' not found !\n", name); return ret; } static void hservice_nanosleep(uint64_t i_seconds, uint64_t i_nano_seconds) { struct timespec ts; ts.tv_sec = i_seconds; ts.tv_nsec = i_nano_seconds; nanosleep_nopoll(&ts, NULL); } int hservice_wakeup(uint32_t i_core, uint32_t i_mode) { struct cpu_thread *cpu; int rc = OPAL_SUCCESS; switch (proc_gen) { case proc_gen_p8: /* * Mask out the top nibble of i_core since it may contain * 0x4 (which we use for XSCOM targeting) */ i_core &= 0x0fffffff; i_core <<= 3; break; case proc_gen_p9: i_core &= SPR_PIR_P9_MASK; i_core <<= 2; break; default: return OPAL_UNSUPPORTED; } /* What do we need to do ? */ switch(i_mode) { case 0: /* Assert special wakeup */ cpu = find_cpu_by_pir(i_core); if (!cpu) return OPAL_PARAMETER; prlog(PR_DEBUG, "HBRT: Special wakeup assert for core 0x%x," " count=%d\n", i_core, cpu->hbrt_spec_wakeup); if (cpu->hbrt_spec_wakeup == 0) rc = dctl_set_special_wakeup(cpu); if (rc == 0) cpu->hbrt_spec_wakeup++; return rc; case 1: /* Deassert special wakeup */ cpu = find_cpu_by_pir(i_core); if (!cpu) return OPAL_PARAMETER; prlog(PR_DEBUG, "HBRT: Special wakeup release for core" " 0x%x, count=%d\n", i_core, cpu->hbrt_spec_wakeup); if (cpu->hbrt_spec_wakeup == 0) { prerror("HBRT: Special wakeup clear" " on core 0x%x with count=0\n", i_core); return OPAL_WRONG_STATE; } /* What to do with count on errors ? */ cpu->hbrt_spec_wakeup--; if (cpu->hbrt_spec_wakeup == 0) rc = dctl_clear_special_wakeup(cpu); return rc; case 2: /* Clear all special wakeups */ prlog(PR_DEBUG, "HBRT: Special wakeup release for all cores\n"); for_each_cpu(cpu) { if (cpu->hbrt_spec_wakeup) { cpu->hbrt_spec_wakeup = 0; /* What to do on errors ? */ dctl_clear_special_wakeup(cpu); } } return OPAL_SUCCESS; default: return OPAL_PARAMETER; } } static struct host_interfaces hinterface = { .interface_version = HOSTBOOT_RUNTIME_INTERFACE_VERSION, .puts = hservice_puts, .assert = hservice_assert, .malloc = hservice_malloc, .free = hservice_free, .realloc = hservice_realloc, .send_error_log = hservice_send_error_log, .scom_read = hservice_scom_read, .scom_write = hservice_scom_write, .lid_load = hservice_lid_load, .lid_unload = hservice_lid_unload, .get_reserved_mem = hservice_get_reserved_mem, .wakeup = hservice_wakeup, .nanosleep = hservice_nanosleep, }; int host_services_occ_load(void) { struct proc_chip *chip; int rc = 0; prlog(PR_DEBUG, "HBRT: OCC Load requested\n"); if (!(hservice_runtime && hservice_runtime->loadOCC)) { prerror("HBRT: No hservice_runtime->loadOCC\n"); return -ENOENT; } for_each_chip(chip) { prlog(PR_DEBUG, "HBRT: Calling loadOCC() homer" " %016llx, occ_common_area %016llx, chip %04x\n", chip->homer_base, chip->occ_common_base, chip->id); rc = hservice_runtime->loadOCC(chip->homer_base, chip->homer_base, chip->occ_common_base, chip->occ_common_base, chip->id); hservice_mark(); prlog(PR_DEBUG, "HBRT: -> rc = %d\n", rc); } return rc; } int host_services_occ_start(void) { struct proc_chip *chip; int i, rc = 0, nr_chips=0; uint64_t chipids[MAX_CHIPS]; prlog(PR_INFO, "HBRT: OCC Start requested\n"); if (!(hservice_runtime && hservice_runtime->startOCCs)) { prerror("HBRT: No hservice_runtime->startOCCs\n"); return -ENOENT; } for_each_chip(chip) { chipids[nr_chips++] = chip->id; } for (i = 0; i < nr_chips; i++) prlog(PR_TRACE, "HBRT: Calling startOCC() for %04llx\n", chipids[i]); /* Lets start all OCC */ rc = hservice_runtime->startOCCs(chipids, nr_chips); hservice_mark(); prlog(PR_DEBUG, "HBRT: startOCCs() rc = %d\n", rc); return rc; } int host_services_occ_stop(void) { int i, rc = 0, nr_slaves = 0, nr_masters = 0; uint64_t *master_chipids = NULL, *slave_chipids = NULL; prlog(PR_INFO, "HBRT: OCC Stop requested\n"); if (!(hservice_runtime && hservice_runtime->stopOCCs)) { prerror("HBRT: No hservice_runtime->stopOCCs\n"); return -ENOENT; } rc = find_master_and_slave_occ(&master_chipids, &slave_chipids, &nr_masters, &nr_slaves); if (rc) goto out; for (i = 0; i < nr_slaves; i++) prlog(PR_TRACE, "HBRT: Calling stopOCC() for %04llx ", slave_chipids[i]); if (!nr_slaves) goto master; /* Lets STOP all the slave OCC */ rc = hservice_runtime->stopOCCs(slave_chipids, nr_slaves); prlog(PR_DEBUG, "HBRT: stopOCCs() slave rc = %d\n", rc); master: for (i = 0; i < nr_masters; i++) prlog(PR_TRACE, "HBRT: Calling stopOCC() for %04llx ", master_chipids[i]); /* Lets STOP all the master OCC */ rc = hservice_runtime->stopOCCs(master_chipids, nr_masters); hservice_mark(); prlog(PR_DEBUG, "HBRT: stopOCCs() master rc = %d\n", rc); out: free(master_chipids); free(slave_chipids); return rc; } void host_services_occ_base_setup(void) { struct proc_chip *chip; uint64_t occ_common; chip = next_chip(NULL); /* Frist chip */ occ_common = (uint64_t) local_alloc(chip->id, OCC_COMMON_SIZE, OCC_COMMON_SIZE); for_each_chip(chip) { chip->occ_common_base = occ_common; chip->occ_common_size = OCC_COMMON_SIZE; chip->homer_base = (uint64_t) local_alloc(chip->id, HOMER_IMAGE_SIZE, HOMER_IMAGE_SIZE); chip->homer_size = HOMER_IMAGE_SIZE; memset((void *)chip->homer_base, 0, chip->homer_size); prlog(PR_DEBUG, "HBRT: Chip %d HOMER base %016llx : %08llx\n", chip->id, chip->homer_base, chip->homer_size); prlog(PR_DEBUG, "HBRT: OCC common base %016llx : %08llx\n", chip->occ_common_base, chip->occ_common_size); } } bool hservices_init(void) { void *code = NULL; struct runtime_interfaces *(*hbrt_init)(struct host_interfaces *); struct function_descriptor { void *addr; void *toc; } fdesc; code = (void *)hservice_get_reserved_mem("ibm,hbrt-code-image"); if (!code) { prerror("HBRT: No ibm,hbrt-code-image found.\n"); return false; } if (memcmp(code, "HBRTVERS", 8) != 0) { prerror("HBRT: Bad eyecatcher for ibm,hbrt-code-image!\n"); return false; } prlog(PR_INFO, "HBRT: Found HostBoot Runtime version %llu\n", ((u64 *)code)[1]); /* We enter at 0x100 into the image. */ fdesc.addr = code + 0x100; /* It doesn't care about TOC */ fdesc.toc = NULL; hbrt_init = (void *)&fdesc; hservice_runtime = hbrt_init(&hinterface); hservice_mark(); if (!hservice_runtime) { prerror("HBRT: Host services init failed\n"); return false; } prlog(PR_INFO, "HBRT: Interface version %llu\n", hservice_runtime->interface_version); return true; } skiboot-5.10-rc4/core/i2c.c000066400000000000000000000136521324317060200153550ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include static LIST_HEAD(i2c_bus_list); /* Used to assign OPAL IDs */ static uint32_t i2c_next_bus; void i2c_add_bus(struct i2c_bus *bus) { bus->opal_id = ++i2c_next_bus; dt_add_property_cells(bus->dt_node, "ibm,opal-id", bus->opal_id); list_add_tail(&i2c_bus_list, &bus->link); } struct i2c_bus *i2c_find_bus_by_id(uint32_t opal_id) { struct i2c_bus *bus; list_for_each(&i2c_bus_list, bus, link) { if (bus->opal_id == opal_id) return bus; } return NULL; } static void opal_i2c_request_complete(int rc, struct i2c_request *req) { uint64_t token = (uint64_t)(unsigned long)req->user_data; opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, token, rc); i2c_free_req(req); } static int opal_i2c_request(uint64_t async_token, uint32_t bus_id, struct opal_i2c_request *oreq) { struct i2c_bus *bus = NULL; struct i2c_request *req; int rc; if (!opal_addr_valid(oreq)) return OPAL_PARAMETER; if (oreq->flags & OPAL_I2C_ADDR_10) return OPAL_UNSUPPORTED; bus = i2c_find_bus_by_id(bus_id); if (!bus) { /** * @fwts-label I2CInvalidBusID * @fwts-advice opal_i2c_request was passed an invalid bus * ID. This has likely come from the OS rather than OPAL * and thus could indicate an OS bug rather than an OPAL * bug. */ prlog(PR_ERR, "I2C: Invalid 'bus_id' passed to the OPAL\n"); return OPAL_PARAMETER; } req = i2c_alloc_req(bus); if (!req) { /** * @fwts-label I2CFailedAllocation * @fwts-advice OPAL failed to allocate memory for an * i2c_request. This points to an OPAL bug as OPAL ran * out of memory and this should never happen. */ prlog(PR_ERR, "I2C: Failed to allocate 'i2c_request'\n"); return OPAL_NO_MEM; } switch(oreq->type) { case OPAL_I2C_RAW_READ: req->op = I2C_READ; break; case OPAL_I2C_RAW_WRITE: req->op = I2C_WRITE; break; case OPAL_I2C_SM_READ: req->op = SMBUS_READ; req->offset = oreq->subaddr; req->offset_bytes = oreq->subaddr_sz; break; case OPAL_I2C_SM_WRITE: req->op = SMBUS_WRITE; req->offset = oreq->subaddr; req->offset_bytes = oreq->subaddr_sz; break; default: bus->free_req(req); return OPAL_PARAMETER; } req->dev_addr = oreq->addr; req->rw_len = oreq->size; req->rw_buf = (void *)oreq->buffer_ra; req->completion = opal_i2c_request_complete; req->user_data = (void *)(unsigned long)async_token; req->bus = bus; if (i2c_check_quirk(req, &rc)) { i2c_free_req(req); return rc; } /* Finally, queue the OPAL i2c request and return */ rc = i2c_queue_req(req); if (rc) { i2c_free_req(req); return rc; } return OPAL_ASYNC_COMPLETION; } opal_call(OPAL_I2C_REQUEST, opal_i2c_request, 3); #define MAX_NACK_RETRIES 2 #define REQ_COMPLETE_POLLING 5 /* Check if req is complete in 5ms interval */ struct i2c_sync_userdata { int rc; bool done; }; static void i2c_sync_request_complete(int rc, struct i2c_request *req) { struct i2c_sync_userdata *ud = req->user_data; ud->rc = rc; ud->done = true; } /** * i2c_request_send - send request to i2c bus synchronously * @bus_id: i2c bus id * @dev_addr: address of the device * @read_write: SMBUS_READ or SMBUS_WRITE * @offset: any of the I2C interface offset defined * @offset_bytes: offset size in bytes * @buf: data to be read or written * @buflen: buf length * @timeout: request timeout in milliseconds * * Send an I2C request to a device synchronously * * Returns: Zero on success otherwise a negative error code */ int i2c_request_send(int bus_id, int dev_addr, int read_write, uint32_t offset, uint32_t offset_bytes, void* buf, size_t buflen, int timeout) { int rc, waited, retries; struct i2c_request *req; struct i2c_bus *bus; uint64_t time_to_wait = 0; struct i2c_sync_userdata ud; bus = i2c_find_bus_by_id(bus_id); if (!bus) { /** * @fwts-label I2CInvalidBusID * @fwts-advice i2c_request_send was passed an invalid bus * ID. This indicates a bug. */ prlog(PR_ERR, "I2C: Invalid bus_id=%x\n", bus_id); return OPAL_PARAMETER; } req = i2c_alloc_req(bus); if (!req) { /** * @fwts-label I2CAllocationFailed * @fwts-advice OPAL failed to allocate memory for an * i2c_request. This points to an OPAL bug as OPAL run out of * memory and this should never happen. */ prlog(PR_ERR, "I2C: i2c_alloc_req failed\n"); return OPAL_INTERNAL_ERROR; } req->dev_addr = dev_addr; req->op = read_write; req->offset = offset; req->offset_bytes = offset_bytes; req->rw_buf = (void*) buf; req->rw_len = buflen; req->completion = i2c_sync_request_complete; ud.done = false; req->user_data = &ud; for (retries = 0; retries <= MAX_NACK_RETRIES; retries++) { waited = 0; i2c_set_req_timeout(req, timeout); i2c_queue_req(req); do { time_to_wait = i2c_run_req(req); if (!time_to_wait) time_to_wait = REQ_COMPLETE_POLLING; time_wait(time_to_wait); waited += time_to_wait; } while (!ud.done); rc = ud.rc; if (rc == OPAL_I2C_NACK_RCVD) continue; else /* error or success */ break; } prlog(PR_DEBUG, "I2C: %s req op=%x offset=%x buf=%016llx buflen=%d " "delay=%lu/%d rc=%d\n", (rc) ? "!!!!" : "----", req->op, req->offset, *(uint64_t*) buf, req->rw_len, tb_to_msecs(waited), timeout, rc); i2c_free_req(req); if (rc) return OPAL_HARDWARE; return OPAL_SUCCESS; } skiboot-5.10-rc4/core/init.c000066400000000000000000000717701324317060200156500ustar00rootroot00000000000000/* Copyright 2013-2016 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum proc_gen proc_gen; unsigned int pcie_max_link_speed; static uint64_t kernel_entry; static size_t kernel_size; static bool kernel_32bit; /* We backup the previous vectors here before copying our own */ static uint8_t old_vectors[0x2000]; #ifdef SKIBOOT_GCOV void skiboot_gcov_done(void); #endif struct debug_descriptor debug_descriptor = { .eye_catcher = "OPALdbug", .version = DEBUG_DESC_VERSION, .state_flags = 0, .memcons_phys = (uint64_t)&memcons, .trace_mask = 0, /* All traces disabled by default */ /* console log level: * high 4 bits in memory, low 4 bits driver (e.g. uart). */ #ifdef DEBUG .console_log_levels = (PR_TRACE << 4) | PR_DEBUG, #else .console_log_levels = (PR_DEBUG << 4) | PR_NOTICE, #endif }; static bool try_load_elf64_le(struct elf_hdr *header) { struct elf64_hdr *kh = (struct elf64_hdr *)header; uint64_t load_base = (uint64_t)kh; struct elf64_phdr *ph; unsigned int i; printf("INIT: 64-bit LE kernel discovered\n"); /* Look for a loadable program header that has our entry in it * * Note that we execute the kernel in-place, we don't actually * obey the load informations in the headers. This is expected * to work for the Linux Kernel because it's a fairly dumb ELF * but it will not work for any ELF binary. */ ph = (struct elf64_phdr *)(load_base + le64_to_cpu(kh->e_phoff)); for (i = 0; i < le16_to_cpu(kh->e_phnum); i++, ph++) { if (le32_to_cpu(ph->p_type) != ELF_PTYPE_LOAD) continue; if (le64_to_cpu(ph->p_vaddr) > le64_to_cpu(kh->e_entry) || (le64_to_cpu(ph->p_vaddr) + le64_to_cpu(ph->p_memsz)) < le64_to_cpu(kh->e_entry)) continue; /* Get our entry */ kernel_entry = le64_to_cpu(kh->e_entry) - le64_to_cpu(ph->p_vaddr) + le64_to_cpu(ph->p_offset); break; } if (!kernel_entry) { prerror("INIT: Failed to find kernel entry !\n"); return false; } kernel_entry += load_base; kernel_32bit = false; kernel_size = le64_to_cpu(kh->e_shoff) + ((uint32_t)le16_to_cpu(kh->e_shentsize) * (uint32_t)le16_to_cpu(kh->e_shnum)); prlog(PR_DEBUG, "INIT: 64-bit kernel entry at 0x%llx, size 0x%lx\n", kernel_entry, kernel_size); return true; } static bool try_load_elf64(struct elf_hdr *header) { struct elf64_hdr *kh = (struct elf64_hdr *)header; uint64_t load_base = (uint64_t)kh; struct elf64_phdr *ph; struct elf64_shdr *sh; unsigned int i; /* Check it's a ppc64 LE ELF */ if (kh->ei_ident == ELF_IDENT && kh->ei_data == ELF_DATA_LSB && kh->e_machine == le16_to_cpu(ELF_MACH_PPC64)) { return try_load_elf64_le(header); } /* Check it's a ppc64 ELF */ if (kh->ei_ident != ELF_IDENT || kh->ei_data != ELF_DATA_MSB || kh->e_machine != ELF_MACH_PPC64) { prerror("INIT: Kernel doesn't look like an ppc64 ELF\n"); return false; } /* Look for a loadable program header that has our entry in it * * Note that we execute the kernel in-place, we don't actually * obey the load informations in the headers. This is expected * to work for the Linux Kernel because it's a fairly dumb ELF * but it will not work for any ELF binary. */ ph = (struct elf64_phdr *)(load_base + kh->e_phoff); for (i = 0; i < kh->e_phnum; i++, ph++) { if (ph->p_type != ELF_PTYPE_LOAD) continue; if (ph->p_vaddr > kh->e_entry || (ph->p_vaddr + ph->p_memsz) < kh->e_entry) continue; /* Get our entry */ kernel_entry = kh->e_entry - ph->p_vaddr + ph->p_offset; break; } if (!kernel_entry) { prerror("INIT: Failed to find kernel entry !\n"); return false; } /* For the normal big-endian ELF ABI, the kernel entry points * to a function descriptor in the data section. Linux instead * has it point directly to code. Test whether it is pointing * into an executable section or not to figure this out. Default * to assuming it obeys the ABI. */ sh = (struct elf64_shdr *)(load_base + kh->e_shoff); for (i = 0; i < kh->e_shnum; i++, sh++) { if (sh->sh_addr <= kh->e_entry && (sh->sh_addr + sh->sh_size) > kh->e_entry) break; } if (i == kh->e_shnum || !(sh->sh_flags & ELF_SFLAGS_X)) { kernel_entry = *(uint64_t *)(kernel_entry + load_base); kernel_entry = kernel_entry - ph->p_vaddr + ph->p_offset; } kernel_entry += load_base; kernel_32bit = false; kernel_size = kh->e_shoff + ((uint32_t)kh->e_shentsize * (uint32_t)kh->e_shnum); printf("INIT: 64-bit kernel entry at 0x%llx, size 0x%lx\n", kernel_entry, kernel_size); return true; } static bool try_load_elf32_le(struct elf_hdr *header) { struct elf32_hdr *kh = (struct elf32_hdr *)header; uint64_t load_base = (uint64_t)kh; struct elf32_phdr *ph; unsigned int i; printf("INIT: 32-bit LE kernel discovered\n"); /* Look for a loadable program header that has our entry in it * * Note that we execute the kernel in-place, we don't actually * obey the load informations in the headers. This is expected * to work for the Linux Kernel because it's a fairly dumb ELF * but it will not work for any ELF binary. */ ph = (struct elf32_phdr *)(load_base + le32_to_cpu(kh->e_phoff)); for (i = 0; i < le16_to_cpu(kh->e_phnum); i++, ph++) { if (le32_to_cpu(ph->p_type) != ELF_PTYPE_LOAD) continue; if (le32_to_cpu(ph->p_vaddr) > le32_to_cpu(kh->e_entry) || (le32_to_cpu(ph->p_vaddr) + le32_to_cpu(ph->p_memsz)) < le32_to_cpu(kh->e_entry)) continue; /* Get our entry */ kernel_entry = le32_to_cpu(kh->e_entry) - le32_to_cpu(ph->p_vaddr) + le32_to_cpu(ph->p_offset); break; } if (!kernel_entry) { prerror("INIT: Failed to find kernel entry !\n"); return false; } kernel_entry += load_base; kernel_32bit = true; printf("INIT: 32-bit kernel entry at 0x%llx\n", kernel_entry); return true; } static bool try_load_elf32(struct elf_hdr *header) { struct elf32_hdr *kh = (struct elf32_hdr *)header; uint64_t load_base = (uint64_t)kh; struct elf32_phdr *ph; unsigned int i; /* Check it's a ppc32 LE ELF */ if (header->ei_ident == ELF_IDENT && header->ei_data == ELF_DATA_LSB && header->e_machine == le16_to_cpu(ELF_MACH_PPC32)) { return try_load_elf32_le(header); } /* Check it's a ppc32 ELF */ if (header->ei_ident != ELF_IDENT || header->ei_data != ELF_DATA_MSB || header->e_machine != ELF_MACH_PPC32) { prerror("INIT: Kernel doesn't look like an ppc32 ELF\n"); return false; } /* Look for a loadable program header that has our entry in it * * Note that we execute the kernel in-place, we don't actually * obey the load informations in the headers. This is expected * to work for the Linux Kernel because it's a fairly dumb ELF * but it will not work for any ELF binary. */ ph = (struct elf32_phdr *)(load_base + kh->e_phoff); for (i = 0; i < kh->e_phnum; i++, ph++) { if (ph->p_type != ELF_PTYPE_LOAD) continue; if (ph->p_vaddr > kh->e_entry || (ph->p_vaddr + ph->p_memsz) < kh->e_entry) continue; /* Get our entry */ kernel_entry = kh->e_entry - ph->p_vaddr + ph->p_offset; break; } if (!kernel_entry) { prerror("INIT: Failed to find kernel entry !\n"); return false; } kernel_entry += load_base; kernel_32bit = true; printf("INIT: 32-bit kernel entry at 0x%llx\n", kernel_entry); return true; } extern char __builtin_kernel_start[]; extern char __builtin_kernel_end[]; extern uint64_t boot_offset; static size_t initramfs_size; bool start_preload_kernel(void) { int loaded; /* Try to load an external kernel payload through the platform hooks */ kernel_size = KERNEL_LOAD_SIZE; loaded = start_preload_resource(RESOURCE_ID_KERNEL, RESOURCE_SUBID_NONE, KERNEL_LOAD_BASE, &kernel_size); if (loaded != OPAL_SUCCESS) { printf("INIT: platform start load kernel failed\n"); kernel_size = 0; return false; } initramfs_size = INITRAMFS_LOAD_SIZE; loaded = start_preload_resource(RESOURCE_ID_INITRAMFS, RESOURCE_SUBID_NONE, INITRAMFS_LOAD_BASE, &initramfs_size); if (loaded != OPAL_SUCCESS) { printf("INIT: platform start load initramfs failed\n"); initramfs_size = 0; return false; } return true; } static bool load_kernel(void) { void *stb_container = NULL; struct elf_hdr *kh; int loaded; prlog(PR_NOTICE, "INIT: Waiting for kernel...\n"); loaded = wait_for_resource_loaded(RESOURCE_ID_KERNEL, RESOURCE_SUBID_NONE); if (loaded != OPAL_SUCCESS) { printf("INIT: platform wait for kernel load failed\n"); kernel_size = 0; } /* Try embedded kernel payload */ if (!kernel_size) { kernel_size = __builtin_kernel_end - __builtin_kernel_start; if (kernel_size) { /* Move the built-in kernel up */ uint64_t builtin_base = ((uint64_t)__builtin_kernel_start) - SKIBOOT_BASE + boot_offset; printf("Using built-in kernel\n"); memmove(KERNEL_LOAD_BASE, (void*)builtin_base, kernel_size); } } if (dt_has_node_property(dt_chosen, "kernel-base-address", NULL)) { kernel_entry = dt_prop_get_u64(dt_chosen, "kernel-base-address"); prlog(PR_DEBUG, "INIT: Kernel image at 0x%llx\n", kernel_entry); kh = (struct elf_hdr *)kernel_entry; /* * If the kernel is at 0, restore it as it was overwritten * by our vectors. */ if (kernel_entry < 0x2000) { cpu_set_sreset_enable(false); memcpy(NULL, old_vectors, 0x2000); sync_icache(); } } else { if (!kernel_size) { printf("INIT: Assuming kernel at %p\n", KERNEL_LOAD_BASE); /* Hack for STB in Mambo, assume at least 4kb in mem */ kernel_size = SECURE_BOOT_HEADERS_SIZE; } if (stb_is_container(KERNEL_LOAD_BASE, kernel_size)) { stb_container = KERNEL_LOAD_BASE; kh = (struct elf_hdr *) (KERNEL_LOAD_BASE + SECURE_BOOT_HEADERS_SIZE); } else kh = (struct elf_hdr *) (KERNEL_LOAD_BASE); } prlog(PR_DEBUG, "INIT: Kernel loaded, size: %zu bytes (0 = unknown preload)\n", kernel_size); if (kh->ei_ident != ELF_IDENT) { prerror("INIT: ELF header not found. Assuming raw binary.\n"); return true; } if (kh->ei_class == ELF_CLASS_64) { if (!try_load_elf64(kh)) return false; } else if (kh->ei_class == ELF_CLASS_32) { if (!try_load_elf32(kh)) return false; } else { prerror("INIT: Neither ELF32 not ELF64 ?\n"); return false; } if (chip_quirk(QUIRK_MAMBO_CALLOUTS)) { secureboot_verify(RESOURCE_ID_KERNEL, stb_container, SECURE_BOOT_HEADERS_SIZE + kernel_size); trustedboot_measure(RESOURCE_ID_KERNEL, stb_container, SECURE_BOOT_HEADERS_SIZE + kernel_size); } trustedboot_exit_boot_services(); return true; } static void load_initramfs(void) { int loaded; loaded = wait_for_resource_loaded(RESOURCE_ID_INITRAMFS, RESOURCE_SUBID_NONE); if (loaded != OPAL_SUCCESS || !initramfs_size) return; dt_check_del_prop(dt_chosen, "linux,initrd-start"); dt_check_del_prop(dt_chosen, "linux,initrd-end"); printf("INIT: Initramfs loaded, size: %zu bytes\n", initramfs_size); dt_add_property_u64(dt_chosen, "linux,initrd-start", (uint64_t)INITRAMFS_LOAD_BASE); dt_add_property_u64(dt_chosen, "linux,initrd-end", (uint64_t)INITRAMFS_LOAD_BASE + initramfs_size); } int64_t mem_dump_free(void); void *fdt; void __noreturn load_and_boot_kernel(bool is_reboot) { const struct dt_property *memprop; const char *cmdline, *stdoutp; uint64_t mem_top; memprop = dt_find_property(dt_root, DT_PRIVATE "maxmem"); if (memprop) mem_top = (u64)dt_property_get_cell(memprop, 0) << 32 | dt_property_get_cell(memprop, 1); else /* XXX HB hack, might want to calc it */ mem_top = 0x40000000; op_display(OP_LOG, OP_MOD_INIT, 0x000A); if (platform.exit) platform.exit(); /* Load kernel LID */ if (!load_kernel()) { op_display(OP_FATAL, OP_MOD_INIT, 1); abort(); } load_initramfs(); ipmi_set_fw_progress_sensor(IPMI_FW_OS_BOOT); occ_pstates_init(); if (!is_reboot) { /* We wait for the nvram read to complete here so we can * grab stuff from there such as the kernel arguments */ nvram_wait_for_load(); /* Wait for FW VPD data read to complete */ fsp_code_update_wait_vpd(true); /* * OCC takes few secs to boot. Call this as late as * as possible to avoid delay. */ occ_sensors_init(); } else { /* fdt will be rebuilt */ free(fdt); fdt = NULL; nvram_reinit(); } fsp_console_select_stdout(); /* Use nvram bootargs over device tree */ cmdline = nvram_query("bootargs"); if (cmdline) { dt_check_del_prop(dt_chosen, "bootargs"); dt_add_property_string(dt_chosen, "bootargs", cmdline); prlog(PR_DEBUG, "INIT: Command line from NVRAM: %s\n", cmdline); } op_display(OP_LOG, OP_MOD_INIT, 0x000B); /* Create the device tree blob to boot OS. */ fdt = create_dtb(dt_root, false); if (!fdt) { op_display(OP_FATAL, OP_MOD_INIT, 2); abort(); } op_display(OP_LOG, OP_MOD_INIT, 0x000C); /* Start the kernel */ if (!is_reboot) op_panel_disable_src_echo(); /* Clear SRCs on the op-panel when Linux starts */ op_panel_clear_src(); cpu_give_self_os(); mem_dump_free(); /* Take processours out of nap */ cpu_set_sreset_enable(false); cpu_set_ipi_enable(false); /* Dump the selected console */ stdoutp = dt_prop_get_def(dt_chosen, "linux,stdout-path", NULL); prlog(PR_DEBUG, "INIT: stdout-path: %s\n", stdoutp ? stdoutp : ""); printf("INIT: Starting kernel at 0x%llx, fdt at %p %u bytes)\n", kernel_entry, fdt, fdt_totalsize(fdt)); debug_descriptor.state_flags |= OPAL_BOOT_COMPLETE; fdt_set_boot_cpuid_phys(fdt, this_cpu()->pir); if (kernel_32bit) start_kernel32(kernel_entry, fdt, mem_top); start_kernel(kernel_entry, fdt, mem_top); } static void dt_fixups(void) { struct dt_node *n; struct dt_node *primary_lpc = NULL; /* lpc node missing #address/size cells. Also pick one as * primary for now (TBD: How to convey that from HB) */ dt_for_each_compatible(dt_root, n, "ibm,power8-lpc") { if (!primary_lpc || dt_has_node_property(n, "primary", NULL)) primary_lpc = n; if (dt_has_node_property(n, "#address-cells", NULL)) break; dt_add_property_cells(n, "#address-cells", 2); dt_add_property_cells(n, "#size-cells", 1); dt_add_property_strings(n, "status", "ok"); } /* Missing "primary" property in LPC bus */ if (primary_lpc && !dt_has_node_property(primary_lpc, "primary", NULL)) dt_add_property(primary_lpc, "primary", NULL, 0); /* Missing "scom-controller" */ dt_for_each_compatible(dt_root, n, "ibm,xscom") { if (!dt_has_node_property(n, "scom-controller", NULL)) dt_add_property(n, "scom-controller", NULL, 0); } } static void add_arch_vector(void) { /** * vec5 = a PVR-list : Number-of-option-vectors : * option-vectors[Number-of-option-vectors + 1] */ uint8_t vec5[] = {0x05, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00}; if (dt_has_node_property(dt_chosen, "ibm,architecture-vec-5", NULL)) return; dt_add_property(dt_chosen, "ibm,architecture-vec-5", vec5, sizeof(vec5)); } static void dt_init_misc(void) { /* Check if there's a /chosen node, if not, add one */ dt_chosen = dt_find_by_path(dt_root, "/chosen"); if (!dt_chosen) dt_chosen = dt_new(dt_root, "chosen"); assert(dt_chosen); /* Add IBM architecture vectors if needed */ add_arch_vector(); /* Add the "OPAL virtual ICS*/ add_ics_node(); /* Additional fixups. TODO: Move into platform */ dt_fixups(); } static u8 console_get_level(const char *s) { if (strcmp(s, "emerg") == 0) return PR_EMERG; if (strcmp(s, "alert") == 0) return PR_ALERT; if (strcmp(s, "crit") == 0) return PR_CRIT; if (strcmp(s, "err") == 0) return PR_ERR; if (strcmp(s, "warning") == 0) return PR_WARNING; if (strcmp(s, "notice") == 0) return PR_NOTICE; if (strcmp(s, "printf") == 0) return PR_PRINTF; if (strcmp(s, "info") == 0) return PR_INFO; if (strcmp(s, "debug") == 0) return PR_DEBUG; if (strcmp(s, "trace") == 0) return PR_TRACE; if (strcmp(s, "insane") == 0) return PR_INSANE; /* Assume it's a number instead */ return atoi(s); } static void console_log_level(void) { const char *s; u8 level; /* console log level: * high 4 bits in memory, low 4 bits driver (e.g. uart). */ s = nvram_query("log-level-driver"); if (s) { level = console_get_level(s); debug_descriptor.console_log_levels = (debug_descriptor.console_log_levels & 0xf0 ) | (level & 0x0f); prlog(PR_NOTICE, "console: Setting driver log level to %i\n", level & 0x0f); } s = nvram_query("log-level-memory"); if (s) { level = console_get_level(s); debug_descriptor.console_log_levels = (debug_descriptor.console_log_levels & 0x0f ) | ((level & 0x0f) << 4); prlog(PR_NOTICE, "console: Setting memory log level to %i\n", level & 0x0f); } } typedef void (*ctorcall_t)(void); static void __nomcount do_ctors(void) { extern ctorcall_t __ctors_start[], __ctors_end[]; ctorcall_t *call; for (call = __ctors_start; call < __ctors_end; call++) (*call)(); } #ifndef PPC64_ELF_ABI_v2 static void branch_null(void) { assert_fail("Branch to NULL !"); } static void setup_branch_null_catcher(void) { void (*bn)(void) = branch_null; /* * FIXME: This copies the function descriptor (16 bytes) for * ABI v1 (ie. big endian). This will be broken if we ever * move to ABI v2 (ie little endian) */ memcpy(0, bn, 16); } #else static void setup_branch_null_catcher(void) { } #endif void setup_reset_vector(void) { uint32_t *src, *dst; /* Copy the reset code over the entry point. */ src = &reset_patch_start; dst = (uint32_t *)0x100; while(src < &reset_patch_end) *(dst++) = *(src++); sync_icache(); cpu_set_sreset_enable(true); } void copy_exception_vectors(void) { /* Backup previous vectors as this could contain a kernel * image. */ memcpy(old_vectors, NULL, 0x2000); /* Copy from 0x100 to 0x2000, avoid below 0x100 as this is * the boot flag used by CPUs still potentially entering * skiboot. */ BUILD_ASSERT((&reset_patch_end - &reset_patch_start) < 0x1f00); memcpy((void *)0x100, (void *)(SKIBOOT_BASE + 0x100), 0x1f00); sync_icache(); } static void per_thread_sanity_checks(void) { struct cpu_thread *cpu = this_cpu(); /** * @fwts-label NonZeroHRMOR * @fwts-advice The contents of the hypervisor real mode offset register * (HRMOR) is bitwise orded with the address of any hypervisor real mode * (i.e Skiboot) memory accesses. Skiboot does not support operating * with a non-zero HRMOR and setting it will break some things (e.g * XSCOMs) in hard-to-debug ways. */ assert(mfspr(SPR_HRMOR) == 0); /** * @fwts-label UnknownSecondary * @fwts-advice The boot CPU attampted to call in a secondary thread * without initialising the corresponding cpu_thread structure. This may * happen if the HDAT or devicetree reports too few threads or cores for * this processor. */ assert(cpu->state != cpu_state_no_cpu); } static void pci_nvram_init(void) { const char *nvram_speed; pcie_max_link_speed = 0; nvram_speed = nvram_query("pcie-max-link-speed"); if (nvram_speed) { pcie_max_link_speed = atoi(nvram_speed); prlog(PR_NOTICE, "PHB: NVRAM set max link speed to GEN%i\n", pcie_max_link_speed); } } /* Called from head.S, thus no prototype. */ void main_cpu_entry(const void *fdt); void __noreturn __nomcount main_cpu_entry(const void *fdt) { /* * WARNING: At this point. the timebases have * *not* been synchronized yet. Do not use any timebase * related functions for timeouts etc... unless you can cope * with the speed being some random core clock divider and * the value jumping backward when the synchronization actually * happens (in chiptod_init() below). * * Also the current cpu_thread() struct is not initialized * either so we need to clear it out first thing first (without * putting any other useful info in there jus yet) otherwise * printf an locks are going to play funny games with "con_suspend" */ pre_init_boot_cpu(); /* * Before first printk, ensure console buffer is clear or * reading tools might think it has wrapped */ clear_console(); /* * Some boot firmwares enter OPAL with MSR[ME]=1, as they presumably * handle machine checks until we take over. As we overwrite the * previous exception vectors with our own handlers, disable MSR[ME]. * This could be done atomically by patching in a branch then patching * it out last, but that's a lot of effort. */ disable_machine_check(); /* Copy all vectors down to 0 */ copy_exception_vectors(); /* * Enable MSR[ME] bit so we can take MCEs. We don't currently * recover, but we print some useful information. */ enable_machine_check(); /* Setup a NULL catcher to catch accidental NULL ptr calls */ setup_branch_null_catcher(); /* Call library constructors */ do_ctors(); prlog(PR_NOTICE, "OPAL %s%s starting...\n", version, #ifdef DEBUG "-debug" #else "" #endif ); prlog(PR_DEBUG, "initial console log level: memory %d, driver %d\n", (debug_descriptor.console_log_levels >> 4), (debug_descriptor.console_log_levels & 0x0f)); prlog(PR_TRACE, "You will not see this\n"); #ifdef SKIBOOT_GCOV skiboot_gcov_done(); #endif /* Initialize boot cpu's cpu_thread struct */ init_boot_cpu(); /* Now locks can be used */ init_locks(); /* Create the OPAL call table early on, entries can be overridden * later on (FSP console code for example) */ opal_table_init(); /* Init the physical map table so we can start mapping things */ phys_map_init(); /* * If we are coming in with a flat device-tree, we expand it * now. Else look for HDAT and create a device-tree from them * * Hack alert: When entering via the OPAL entry point, fdt * is set to -1, we record that and pass it to parse_hdat */ dt_root = dt_new_root(""); if (fdt == (void *)-1ul) { if (parse_hdat(true) < 0) abort(); } else if (fdt == NULL) { if (parse_hdat(false) < 0) abort(); } else { dt_expand(fdt); } dt_add_cpufeatures(dt_root); /* Now that we have a full devicetree, verify that we aren't on fire. */ per_thread_sanity_checks(); /* * From there, we follow a fairly strict initialization order. * * First we need to build up our chip data structures and initialize * XSCOM which will be needed for a number of susbequent things. * * We want XSCOM available as early as the platform probe in case the * probe requires some HW accesses. * * We also initialize the FSI master at that point in case we need * to access chips via that path early on. */ init_chips(); xscom_init(); mfsi_init(); /* * Direct controls facilities provides some controls over CPUs * using scoms. */ direct_controls_init(); /* * Put various bits & pieces in device-tree that might not * already be there such as the /chosen node if not there yet, * the ICS node, etc... This can potentially use XSCOM */ dt_init_misc(); /* * Initialize LPC (P8 only) so we can get to UART, BMC and * other system controller. This is done before probe_platform * so that the platform probing code can access an external * BMC if needed. */ lpc_init(); /* * Now, we init our memory map from the device-tree, and immediately * reserve areas which we know might contain data coming from * HostBoot. We need to do these things before we start doing * allocations outside of our heap, such as chip local allocs, * otherwise we might clobber those data. */ mem_region_init(); /* Reserve HOMER and OCC area */ homer_init(); /* Add the /opal node to the device-tree */ add_opal_node(); /* * We probe the platform now. This means the platform probe gets * the opportunity to reserve additional areas of memory if needed. * * Note: Timebases still not synchronized. */ probe_platform(); /* Initialize the rest of the cpu thread structs */ init_all_cpus(); if (proc_gen == proc_gen_p9) cpu_set_ipi_enable(true); /* Allocate our split trace buffers now. Depends add_opal_node() */ init_trace_buffers(); /* On P7/P8, get the ICPs and make sure they are in a sane state */ init_interrupts(); if (proc_gen == proc_gen_p7 || proc_gen == proc_gen_p8) cpu_set_ipi_enable(true); /* On P9, initialize XIVE */ init_xive(); /* Grab centaurs from device-tree if present (only on FSP-less) */ centaur_init(); /* Initialize PSI (depends on probe_platform being called) */ psi_init(); /* Initialize/enable LPC interrupts. This must be done after the * PSI interface has been initialized since it serves as an interrupt * source for LPC interrupts. */ lpc_init_interrupts(); /* Call in secondary CPUs */ cpu_bringup(); /* We can now overwrite the 0x100 vector as we are no longer being * entered there. */ setup_reset_vector(); /* We can now do NAP mode */ cpu_set_sreset_enable(true); /* * Synchronize time bases. Prior to chiptod_init() the timebase * is free-running at a frequency based on the core clock rather * than being synchronised to the ChipTOD network. This means * that the timestamps in early boot might be a little off compared * to wall clock time. */ chiptod_init(); /* Initialize i2c */ p8_i2c_init(); /* Register routine to dispatch and read sensors */ sensor_init(); /* * Initialize the opal messaging before platform.init as we are * getting request to queue occ load opal message when host services * got load occ request from FSP */ opal_init_msg(); /* * We have initialized the basic HW, we can now call into the * platform to perform subsequent inits, such as establishing * communication with the FSP or starting IPMI. */ if (platform.init) platform.init(); /* Read in NVRAM and set it up */ nvram_init(); /* Set the console level */ console_log_level(); /* Secure/Trusted Boot init. We look for /ibm,secureboot in DT */ secureboot_init(); trustedboot_init(); /* preload the IMC catalog dtb */ imc_catalog_preload(); /* Install the OPAL Console handlers */ init_opal_console(); /* Init SLW related stuff, including fastsleep */ slw_init(); op_display(OP_LOG, OP_MOD_INIT, 0x0002); pci_nvram_init(); preload_io_vpd(); preload_capp_ucode(); start_preload_kernel(); /* Virtual Accelerator Switchboard */ vas_init(); /* NX init */ nx_init(); /* Init In-Memory Collection related stuff (load the IMC dtb into memory) */ imc_init(); /* Probe IO hubs */ probe_p7ioc(); /* Probe PHB3 on P8 */ probe_phb3(); /* Probe PHB4 on P9 */ probe_phb4(); /* Probe NPUs */ probe_npu(); probe_npu2(); /* Initialize PCI */ pci_init_slots(); /* Add OPAL timer related properties */ late_init_timers(); ipmi_set_fw_progress_sensor(IPMI_FW_PCI_INIT); /* * These last few things must be done as late as possible * because they rely on various other things having been setup, * for example, add_opal_interrupts() will add all the interrupt * sources that are going to the firmware. We can't add a new one * after that call. Similarly, the mem_region calls will construct * the reserve maps in the DT so we shouldn't affect the memory * regions after that */ /* Create the LPC bus interrupt-map on P9 */ lpc_finalize_interrupts(); /* Add the list of interrupts going to OPAL */ add_opal_interrupts(); /* Now release parts of memory nodes we haven't used ourselves... */ mem_region_release_unused(); /* ... and add remaining reservations to the DT */ mem_region_add_dt_reserved(); /* * Update /ibm,secureboot/ibm,cvc/memory-region to point to * /reserved-memory/secure-crypt-algo-code instead of * /ibm,hostboot/reserved-memory/secure-crypt-algo-code. */ cvc_update_reserved_memory_phandle(); prd_register_reserved_memory(); /* On P9, switch to radix mode by default */ cpu_set_radix_mode(); load_and_boot_kernel(false); } void __noreturn __secondary_cpu_entry(void) { struct cpu_thread *cpu = this_cpu(); /* Secondary CPU called in */ cpu_callin(cpu); /* Some XIVE setup */ xive_cpu_callin(cpu); /* Wait for work to do */ while(true) { if (cpu_check_jobs(cpu)) cpu_process_jobs(); else cpu_idle_job(); } } /* Called from head.S, thus no prototype. */ void secondary_cpu_entry(void); void __noreturn __nomcount secondary_cpu_entry(void) { struct cpu_thread *cpu = this_cpu(); per_thread_sanity_checks(); prlog(PR_DEBUG, "INIT: CPU PIR 0x%04x called in\n", cpu->pir); __secondary_cpu_entry(); } skiboot-5.10-rc4/core/interrupts.c000066400000000000000000000273301324317060200171150ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include /* ICP registers */ #define ICP_XIRR 0x4 /* 32-bit access */ #define ICP_CPPR 0x4 /* 8-bit access */ #define ICP_MFRR 0xc /* 8-bit access */ static LIST_HEAD(irq_sources); static LIST_HEAD(irq_sources2); static struct lock irq_lock = LOCK_UNLOCKED; void __register_irq_source(struct irq_source *is, bool secondary) { struct irq_source *is1; struct list_head *list = secondary ? &irq_sources2 : &irq_sources; prlog(PR_DEBUG, "IRQ: Registering %04x..%04x ops @%p (data %p)%s\n", is->start, is->end - 1, is->ops, is->data, secondary ? " [secondary]" : ""); lock(&irq_lock); list_for_each(list, is1, link) { if (is->end > is1->start && is->start < is1->end) { prerror("register IRQ source overlap !\n"); prerror(" new: %x..%x old: %x..%x\n", is->start, is->end - 1, is1->start, is1->end - 1); assert(0); } } list_add_tail(list, &is->link); unlock(&irq_lock); } void register_irq_source(const struct irq_source_ops *ops, void *data, uint32_t start, uint32_t count) { struct irq_source *is; is = zalloc(sizeof(struct irq_source)); assert(is); is->start = start; is->end = start + count; is->ops = ops; is->data = data; __register_irq_source(is, false); } void unregister_irq_source(uint32_t start, uint32_t count) { struct irq_source *is; /* Note: We currently only unregister from the primary sources */ lock(&irq_lock); list_for_each(&irq_sources, is, link) { if (start >= is->start && start < is->end) { if (start != is->start || count != (is->end - is->start)) { prerror("unregister IRQ source mismatch !\n"); prerror("start:%x, count: %x match: %x..%x\n", start, count, is->start, is->end); assert(0); } list_del(&is->link); unlock(&irq_lock); /* XXX Add synchronize / RCU */ free(is); return; } } unlock(&irq_lock); prerror("unregister IRQ source not found !\n"); prerror("start:%x, count: %x\n", start, count); assert(0); } struct irq_source *irq_find_source(uint32_t isn) { struct irq_source *is; lock(&irq_lock); /* * XXX This really needs some kind of caching ! */ list_for_each(&irq_sources, is, link) { if (isn >= is->start && isn < is->end) { unlock(&irq_lock); return is; } } list_for_each(&irq_sources2, is, link) { if (isn >= is->start && isn < is->end) { unlock(&irq_lock); return is; } } unlock(&irq_lock); return NULL; } void irq_for_each_source(void (*cb)(struct irq_source *, void *), void *data) { struct irq_source *is; lock(&irq_lock); list_for_each(&irq_sources, is, link) cb(is, data); list_for_each(&irq_sources2, is, link) cb(is, data); unlock(&irq_lock); } /* * This takes a 6-bit chip id and returns a 20 bit value representing * the PSI interrupt. This includes all the fields above, ie, is a * global interrupt number. * * For P8, this returns the base of the 8-interrupts block for PSI */ uint32_t get_psi_interrupt(uint32_t chip_id) { uint32_t irq; switch(proc_gen) { case proc_gen_p7: /* Get the chip ID into position, it already has * the T bit so all we need is room for the GX * bit, 9 bit BUID and 4 bit level */ irq = chip_id << (1 + 9 + 4); /* Add in the BUID */ irq |= P7_PSI_IRQ_BUID << 4; break; case proc_gen_p8: irq = p8_chip_irq_block_base(chip_id, P8_IRQ_BLOCK_MISC); irq += P8_IRQ_MISC_PSI_BASE; break; default: assert(false); }; return irq; } struct dt_node *add_ics_node(void) { struct dt_node *ics = dt_new_addr(dt_root, "interrupt-controller", 0); bool has_xive; if (!ics) return NULL; has_xive = proc_gen >= proc_gen_p9; dt_add_property_cells(ics, "reg", 0, 0, 0, 0); dt_add_property_strings(ics, "compatible", has_xive ? "ibm,opal-xive-vc" : "IBM,ppc-xics", "IBM,opal-xics"); dt_add_property_cells(ics, "#address-cells", 0); dt_add_property_cells(ics, "#interrupt-cells", 2); dt_add_property_string(ics, "device_type", "PowerPC-Interrupt-Source-Controller"); dt_add_property(ics, "interrupt-controller", NULL, 0); return ics; } uint32_t get_ics_phandle(void) { struct dt_node *i; for (i = dt_first(dt_root); i; i = dt_next(dt_root, i)) { if (streq(i->name, "interrupt-controller@0")) { return i->phandle; } } abort(); } void add_opal_interrupts(void) { struct irq_source *is; unsigned int i, ns, tns = 0, count = 0; uint32_t *irqs = NULL, isn; char *names = NULL; lock(&irq_lock); list_for_each(&irq_sources, is, link) { /* * Don't even consider sources that don't have an interrupts * callback or don't have an attributes one. */ if (!is->ops->interrupt || !is->ops->attributes) continue; for (isn = is->start; isn < is->end; isn++) { uint64_t attr = is->ops->attributes(is, isn); char *name; if (attr & IRQ_ATTR_TARGET_LINUX) continue; name = is->ops->name ? is->ops->name(is, isn) : NULL; ns = name ? strlen(name) : 0; prlog(PR_DEBUG, "irq %x name: %s (%d/%d)\n", isn, name ? name : "", ns, tns); names = realloc(names, tns + ns + 1); if (name) { strcpy(names + tns, name); tns += (ns + 1); free(name); } else names[tns++] = 0; i = count++; irqs = realloc(irqs, 4 * count); irqs[i] = isn; } } unlock(&irq_lock); /* The opal-interrupts property has one cell per interrupt, * it is not a standard interrupt property. * * Note: Even if empty, create it, otherwise some bogus error * handling in Linux can cause problems. */ dt_add_property(opal_node, "opal-interrupts", irqs, count * 4); dt_add_property(opal_node, "opal-interrupts-names", names, tns); free(irqs); free(names); } /* * This is called at init time (and one fast reboot) to sanitize the * ICP. We set our priority to 0 to mask all interrupts and make sure * no IPI is on the way. This is also called on wakeup from nap */ void reset_cpu_icp(void) { void *icp = this_cpu()->icp_regs; if (!icp) return; /* Dummy fetch */ in_be32(icp + ICP_XIRR); /* Clear pending IPIs */ out_8(icp + ICP_MFRR, 0xff); /* Set priority to max, ignore all incoming interrupts, EOI IPIs */ out_be32(icp + ICP_XIRR, 2); } /* Used by the PSI code to send an EOI during reset. This will also * set the CPPR to 0 which should already be the case anyway */ void icp_send_eoi(uint32_t interrupt) { void *icp = this_cpu()->icp_regs; if (!icp) return; /* Set priority to max, ignore all incoming interrupts */ out_be32(icp + ICP_XIRR, interrupt & 0xffffff); } /* This is called before winkle or nap, we clear pending IPIs and * set our priority to 1 to mask all but the IPI. */ void icp_prep_for_pm(void) { void *icp = this_cpu()->icp_regs; if (!icp) return; /* Clear pending IPIs */ out_8(icp + ICP_MFRR, 0xff); /* Set priority to 1, ignore all incoming interrupts, EOI IPIs */ out_be32(icp + ICP_XIRR, 0x01000002); } /* This is called to wakeup somebody from winkle */ void icp_kick_cpu(struct cpu_thread *cpu) { void *icp = cpu->icp_regs; if (!icp) return; /* Send high priority IPI */ out_8(icp + ICP_MFRR, 0); } /* Returns the number of chip ID bits used for interrupt numbers */ static uint32_t p8_chip_id_bits(uint32_t chip) { struct proc_chip *proc_chip = get_chip(chip); assert(proc_chip); switch (proc_chip->type) { case PROC_CHIP_P8_MURANO: case PROC_CHIP_P8_VENICE: return 6; break; case PROC_CHIP_P8_NAPLES: return 5; break; default: /* This shouldn't be called on non-P8 based systems */ assert(0); return 0; break; } } /* The chip id mask is the upper p8_chip_id_bits of the irq number */ static uint32_t chip_id_mask(uint32_t chip) { uint32_t chip_id_bits = p8_chip_id_bits(chip); uint32_t chip_id_mask; chip_id_mask = ((1 << chip_id_bits) - 1); chip_id_mask <<= P8_IRQ_BITS - chip_id_bits; return chip_id_mask; } /* The block mask is what remains of the 19 bit irq number after * removing the upper 5 or 6 bits for the chip# and the lower 11 bits * for the number of bits per block. */ static uint32_t block_mask(uint32_t chip) { uint32_t chip_id_bits = p8_chip_id_bits(chip); uint32_t irq_block_mask; irq_block_mask = P8_IRQ_BITS - chip_id_bits - P8_IVE_BITS; irq_block_mask = ((1 << irq_block_mask) - 1) << P8_IVE_BITS; return irq_block_mask; } uint32_t p8_chip_irq_block_base(uint32_t chip, uint32_t block) { uint32_t irq; assert(chip < (1 << p8_chip_id_bits(chip))); irq = SETFIELD(chip_id_mask(chip), 0, chip); irq = SETFIELD(block_mask(chip), irq, block); return irq; } uint32_t p8_chip_irq_phb_base(uint32_t chip, uint32_t phb) { assert(chip < (1 << p8_chip_id_bits(chip))); return p8_chip_irq_block_base(chip, phb + P8_IRQ_BLOCK_PHB_BASE); } uint32_t p8_irq_to_chip(uint32_t irq) { /* This assumes we only have one type of cpu in a system, * which should be ok. */ return GETFIELD(chip_id_mask(this_cpu()->chip_id), irq); } uint32_t p8_irq_to_block(uint32_t irq) { return GETFIELD(block_mask(this_cpu()->chip_id), irq); } uint32_t p8_irq_to_phb(uint32_t irq) { return p8_irq_to_block(irq) - P8_IRQ_BLOCK_PHB_BASE; } bool __irq_source_eoi(struct irq_source *is, uint32_t isn) { if (!is->ops->eoi) return false; is->ops->eoi(is, isn); return true; } bool irq_source_eoi(uint32_t isn) { struct irq_source *is = irq_find_source(isn); if (!is) return false; return __irq_source_eoi(is, isn); } static int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority) { struct irq_source *is = irq_find_source(isn); if (!is || !is->ops->set_xive) return OPAL_PARAMETER; return is->ops->set_xive(is, isn, server, priority); } opal_call(OPAL_SET_XIVE, opal_set_xive, 3); static int64_t opal_get_xive(uint32_t isn, uint16_t *server, uint8_t *priority) { struct irq_source *is = irq_find_source(isn); if (!opal_addr_valid(server)) return OPAL_PARAMETER; if (!is || !is->ops->get_xive) return OPAL_PARAMETER; return is->ops->get_xive(is, isn, server, priority); } opal_call(OPAL_GET_XIVE, opal_get_xive, 3); static int64_t opal_handle_interrupt(uint32_t isn, __be64 *outstanding_event_mask) { struct irq_source *is = irq_find_source(isn); int64_t rc = OPAL_SUCCESS; if (!opal_addr_valid(outstanding_event_mask)) return OPAL_PARAMETER; /* No source ? return */ if (!is || !is->ops->interrupt) { rc = OPAL_PARAMETER; goto bail; } /* Run it */ is->ops->interrupt(is, isn); /* Check timers if SLW timer isn't working */ if (!slw_timer_ok()) check_timers(true); /* Update output events */ bail: if (outstanding_event_mask) *outstanding_event_mask = cpu_to_be64(opal_pending_events); return rc; } opal_call(OPAL_HANDLE_INTERRUPT, opal_handle_interrupt, 2); void init_interrupts(void) { struct dt_node *icp; const struct dt_property *sranges; struct cpu_thread *cpu; u32 base, count, i; u64 addr, size; dt_for_each_compatible(dt_root, icp, "ibm,ppc-xicp") { sranges = dt_require_property(icp, "ibm,interrupt-server-ranges", -1); base = dt_get_number(sranges->prop, 1); count = dt_get_number(sranges->prop + 4, 1); for (i = 0; i < count; i++) { addr = dt_get_address(icp, i, &size); cpu = find_cpu_by_server(base + i); if (cpu) cpu->icp_regs = (void *)addr; } } } skiboot-5.10-rc4/core/ipmi-opal.c000066400000000000000000000075671324317060200165770ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include static struct lock msgq_lock = LOCK_UNLOCKED; static struct list_head msgq = LIST_HEAD_INIT(msgq); static void opal_send_complete(struct ipmi_msg *msg) { lock(&msgq_lock); list_add_tail(&msgq, &msg->link); opal_update_pending_evt(ipmi_backend->opal_event_ipmi_recv, ipmi_backend->opal_event_ipmi_recv); unlock(&msgq_lock); } static int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *opal_ipmi_msg, uint64_t msg_len) { struct ipmi_msg *msg; if (opal_ipmi_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) { prerror("OPAL IPMI: Incorrect version\n"); return OPAL_UNSUPPORTED; } msg_len -= sizeof(struct opal_ipmi_msg); if (msg_len > IPMI_MAX_REQ_SIZE) { prerror("OPAL IPMI: Invalid request length\n"); return OPAL_PARAMETER; } prlog(PR_TRACE, "opal_ipmi_send(cmd: 0x%02x netfn: 0x%02x len: 0x%02llx)\n", opal_ipmi_msg->cmd, opal_ipmi_msg->netfn >> 2, msg_len); msg = ipmi_mkmsg(interface, IPMI_CODE(opal_ipmi_msg->netfn >> 2, opal_ipmi_msg->cmd), opal_send_complete, NULL, opal_ipmi_msg->data, msg_len, IPMI_MAX_RESP_SIZE); if (!msg) return OPAL_RESOURCE; msg->complete = opal_send_complete; msg->error = opal_send_complete; return ipmi_queue_msg(msg); } static int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *opal_ipmi_msg, uint64_t *msg_len) { struct ipmi_msg *msg; int64_t rc; lock(&msgq_lock); msg = list_top(&msgq, struct ipmi_msg, link); if (!msg) { rc = OPAL_EMPTY; goto out_unlock; } if (opal_ipmi_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) { prerror("OPAL IPMI: Incorrect version\n"); rc = OPAL_UNSUPPORTED; goto out_del_msg; } if (interface != IPMI_DEFAULT_INTERFACE) { prerror("IPMI: Invalid interface 0x%llx in opal_ipmi_recv\n", interface); rc = OPAL_PARAMETER; goto out_del_msg; } if (*msg_len - sizeof(struct opal_ipmi_msg) < msg->resp_size + 1) { rc = OPAL_RESOURCE; goto out_del_msg; } list_del(&msg->link); if (list_empty(&msgq)) opal_update_pending_evt(ipmi_backend->opal_event_ipmi_recv, 0); unlock(&msgq_lock); opal_ipmi_msg->cmd = msg->cmd; opal_ipmi_msg->netfn = msg->netfn; opal_ipmi_msg->data[0] = msg->cc; memcpy(&opal_ipmi_msg->data[1], msg->data, msg->resp_size); prlog(PR_TRACE, "opal_ipmi_recv(cmd: 0x%02x netfn: 0x%02x resp_size: 0x%02x)\n", msg->cmd, msg->netfn >> 2, msg->resp_size); /* Add one as the completion code is returned in the message data */ *msg_len = msg->resp_size + sizeof(struct opal_ipmi_msg) + 1; ipmi_free_msg(msg); return OPAL_SUCCESS; out_del_msg: list_del(&msg->link); if (list_empty(&msgq)) opal_update_pending_evt(ipmi_backend->opal_event_ipmi_recv, 0); ipmi_free_msg(msg); out_unlock: unlock(&msgq_lock); return rc; } void ipmi_opal_init(void) { struct dt_node *opal_ipmi; opal_ipmi = dt_new(opal_node, "ipmi"); dt_add_property_strings(opal_ipmi, "compatible", "ibm,opal-ipmi"); dt_add_property_cells(opal_ipmi, "ibm,ipmi-interface-id", IPMI_DEFAULT_INTERFACE); dt_add_property_cells(opal_ipmi, "interrupts", ilog2(ipmi_backend->opal_event_ipmi_recv)); opal_register(OPAL_IPMI_SEND, opal_ipmi_send, 3); opal_register(OPAL_IPMI_RECV, opal_ipmi_recv, 3); } skiboot-5.10-rc4/core/ipmi.c000066400000000000000000000142641324317060200156360ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include struct ipmi_backend *ipmi_backend = NULL; static struct lock sync_lock = LOCK_UNLOCKED; static struct ipmi_msg *sync_msg = NULL; void ipmi_free_msg(struct ipmi_msg *msg) { /* ipmi_free_msg frees messages allocated by the * backend. Without a backend we couldn't have allocated * messages to free (we don't support removing backends * yet). */ if (!ipmi_present()) { prerror("IPMI: Trying to free message without backend\n"); return; } msg->backend->free_msg(msg); } void ipmi_init_msg(struct ipmi_msg *msg, int interface, uint32_t code, void (*complete)(struct ipmi_msg *), void *user_data, size_t req_size, size_t resp_size) { /* We don't actually support multiple interfaces at the moment. */ assert(interface == IPMI_DEFAULT_INTERFACE); msg->backend = ipmi_backend; msg->cmd = IPMI_CMD(code); msg->netfn = IPMI_NETFN(code) << 2; msg->req_size = req_size; msg->resp_size = resp_size; msg->complete = complete; msg->user_data = user_data; } struct ipmi_msg *ipmi_mkmsg_simple(uint32_t code, void *req_data, size_t req_size) { return ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, code, ipmi_free_msg, NULL, req_data, req_size, 0); } struct ipmi_msg *ipmi_mkmsg(int interface, uint32_t code, void (*complete)(struct ipmi_msg *), void *user_data, void *req_data, size_t req_size, size_t resp_size) { struct ipmi_msg *msg; if (!ipmi_present()) return NULL; msg = ipmi_backend->alloc_msg(req_size, resp_size); if (!msg) return NULL; ipmi_init_msg(msg, interface, code, complete, user_data, req_size, resp_size); /* Commands are free to over ride this if they want to handle errors */ msg->error = ipmi_free_msg; if (req_data) memcpy(msg->data, req_data, req_size); return msg; } int ipmi_queue_msg_head(struct ipmi_msg *msg) { if (!ipmi_present()) return OPAL_HARDWARE; if (!msg) { prerror("%s: Attempting to queue NULL message\n", __func__); return OPAL_PARAMETER; } return msg->backend->queue_msg_head(msg); } int ipmi_queue_msg(struct ipmi_msg *msg) { /* Here we could choose which interface to use if we want to support multiple interfaces. */ if (!ipmi_present()) return OPAL_HARDWARE; if (!msg) { prerror("%s: Attempting to queue NULL message\n", __func__); return OPAL_PARAMETER; } return msg->backend->queue_msg(msg); } int ipmi_dequeue_msg(struct ipmi_msg *msg) { if (!ipmi_present()) return OPAL_HARDWARE; if (!msg) { prerror("%s: Attempting to dequeue NULL message\n", __func__); return OPAL_PARAMETER; } return msg->backend->dequeue_msg(msg); } void ipmi_cmd_done(uint8_t cmd, uint8_t netfn, uint8_t cc, struct ipmi_msg *msg) { msg->cc = cc; if (msg->cmd != cmd) { prerror("IPMI: Incorrect cmd 0x%02x in response\n", cmd); cc = IPMI_ERR_UNSPECIFIED; } if ((msg->netfn >> 2) + 1 != (netfn >> 2)) { prerror("IPMI: Incorrect netfn 0x%02x in response\n", netfn >> 2); cc = IPMI_ERR_UNSPECIFIED; } msg->netfn = netfn; if (cc != IPMI_CC_NO_ERROR) { prlog(PR_DEBUG, "IPMI: Got error response 0x%02x\n", msg->cc); assert(msg->error); msg->error(msg); } else if (msg->complete) msg->complete(msg); /* At this point the message has should have been freed by the completion functions. */ /* If this is a synchronous message flag that we are done */ if (msg == sync_msg) sync_msg = NULL; } void ipmi_queue_msg_sync(struct ipmi_msg *msg) { if (!ipmi_present()) return; if (!msg) { prerror("%s: Attempting to queue NULL message\n", __func__); return; } lock(&sync_lock); while (sync_msg); sync_msg = msg; ipmi_queue_msg(msg); unlock(&sync_lock); while (sync_msg == msg) time_wait_ms(100); } static void ipmi_read_event_complete(struct ipmi_msg *msg) { prlog(PR_DEBUG, "IPMI read event %02x complete: %d bytes. cc: %02x\n", msg->cmd, msg->resp_size, msg->cc); /* Handle power control & PNOR handshake events */ ipmi_parse_sel(msg); ipmi_free_msg(msg); } static void ipmi_get_message_flags_complete(struct ipmi_msg *msg) { uint8_t flags = msg->data[0]; ipmi_free_msg(msg); prlog(PR_DEBUG, "IPMI Get Message Flags: %02x\n", flags); /* Once we see an interrupt we assume the payload has * booted. We disable the wdt and let the OS setup its own * wdt. * * This is also where we consider the OS to be booted, so we set * the boot count sensor */ if (flags & IPMI_MESSAGE_FLAGS_WATCHDOG_PRE_TIMEOUT) { ipmi_wdt_stop(); ipmi_set_boot_count(); } /* Message available in the event buffer? Queue a Read Event command * to retrieve it. The flag is cleared by performing a read */ if (flags & IPMI_MESSAGE_FLAGS_EVENT_BUFFER) { msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, IPMI_READ_EVENT, ipmi_read_event_complete, NULL, NULL, 0, 16); ipmi_queue_msg(msg); } } void ipmi_sms_attention(void) { struct ipmi_msg *msg; if (!ipmi_present()) return; /* todo: when we handle multiple IPMI interfaces, we'll need to * ensure that this message is associated with the appropriate * backend. */ msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, IPMI_GET_MESSAGE_FLAGS, ipmi_get_message_flags_complete, NULL, NULL, 0, 1); ipmi_queue_msg(msg); } void ipmi_register_backend(struct ipmi_backend *backend) { /* We only support one backend at the moment */ assert(backend->alloc_msg); assert(backend->free_msg); assert(backend->queue_msg); assert(backend->dequeue_msg); ipmi_backend = backend; ipmi_backend->opal_event_ipmi_recv = opal_dynamic_event_alloc(); } bool ipmi_present(void) { return ipmi_backend != NULL; } skiboot-5.10-rc4/core/lock.c000066400000000000000000000073301324317060200156240ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include /* Set to bust locks. Note, this is initialized to true because our * lock debugging code is not going to work until we have the per * CPU data initialized */ bool bust_locks = true; #ifdef DEBUG_LOCKS static void lock_error(struct lock *l, const char *reason, uint16_t err) { bust_locks = true; fprintf(stderr, "LOCK ERROR: %s @%p (state: 0x%016llx)\n", reason, l, l->lock_val); op_display(OP_FATAL, OP_MOD_LOCK, err); abort(); } static void lock_check(struct lock *l) { if ((l->lock_val & 1) && (l->lock_val >> 32) == this_cpu()->pir) lock_error(l, "Invalid recursive lock", 0); } static void unlock_check(struct lock *l) { if (!(l->lock_val & 1)) lock_error(l, "Unlocking unlocked lock", 1); if ((l->lock_val >> 32) != this_cpu()->pir) lock_error(l, "Unlocked non-owned lock", 2); if (l->in_con_path && this_cpu()->con_suspend == 0) lock_error(l, "Unlock con lock with console not suspended", 3); if (list_empty(&this_cpu()->locks_held)) lock_error(l, "Releasing lock we don't hold depth", 4); } #else static inline void lock_check(struct lock *l) { }; static inline void unlock_check(struct lock *l) { }; #endif /* DEBUG_LOCKS */ bool lock_held_by_me(struct lock *l) { uint64_t pir64 = this_cpu()->pir; return l->lock_val == ((pir64 << 32) | 1); } static inline bool __try_lock(struct cpu_thread *cpu, struct lock *l) { uint64_t val; val = cpu->pir; val <<= 32; val |= 1; barrier(); if (__cmpxchg64(&l->lock_val, 0, val) == 0) { sync(); return true; } return false; } bool try_lock_caller(struct lock *l, const char *owner) { struct cpu_thread *cpu = this_cpu(); if (bust_locks) return true; if (__try_lock(cpu, l)) { l->owner = owner; if (l->in_con_path) cpu->con_suspend++; list_add(&cpu->locks_held, &l->list); return true; } return false; } void lock_caller(struct lock *l, const char *owner) { if (bust_locks) return; lock_check(l); for (;;) { if (try_lock_caller(l, owner)) break; smt_lowest(); while (l->lock_val) barrier(); smt_medium(); } } void unlock(struct lock *l) { struct cpu_thread *cpu = this_cpu(); if (bust_locks) return; unlock_check(l); l->owner = NULL; list_del(&l->list); lwsync(); l->lock_val = 0; /* WARNING: On fast reboot, we can be reset right at that * point, so the reset_lock in there cannot be in the con path */ if (l->in_con_path) { cpu->con_suspend--; if (cpu->con_suspend == 0 && cpu->con_need_flush) flush_console(); } } bool lock_recursive_caller(struct lock *l, const char *caller) { if (bust_locks) return false; if (lock_held_by_me(l)) return false; lock_caller(l, caller); return true; } void init_locks(void) { bust_locks = false; } void dump_locks_list(void) { struct lock *l; prlog(PR_ERR, "Locks held:\n"); list_for_each(&this_cpu()->locks_held, l, list) prlog(PR_ERR, " %s\n", l->owner); } void drop_my_locks(bool warn) { struct lock *l; while((l = list_pop(&this_cpu()->locks_held, struct lock, list)) != NULL) { if (warn) prlog(PR_ERR, " %s\n", l->owner); unlock(l); } } skiboot-5.10-rc4/core/malloc.c000066400000000000000000000040101324317060200161330ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Wrappers for malloc, et. al. */ #include #include #include #include #define DEFAULT_ALIGN __alignof__(long) void *__memalign(size_t blocksize, size_t bytes, const char *location) { void *p; lock(&skiboot_heap.free_list_lock); p = mem_alloc(&skiboot_heap, bytes, blocksize, location); unlock(&skiboot_heap.free_list_lock); return p; } void *__malloc(size_t bytes, const char *location) { return __memalign(DEFAULT_ALIGN, bytes, location); } void __free(void *p, const char *location) { lock(&skiboot_heap.free_list_lock); mem_free(&skiboot_heap, p, location); unlock(&skiboot_heap.free_list_lock); } void *__realloc(void *ptr, size_t size, const char *location) { void *newptr; /* Two classic malloc corner cases. */ if (!size) { __free(ptr, location); return NULL; } if (!ptr) return __malloc(size, location); lock(&skiboot_heap.free_list_lock); if (mem_resize(&skiboot_heap, ptr, size, location)) { newptr = ptr; } else { newptr = mem_alloc(&skiboot_heap, size, DEFAULT_ALIGN, location); if (newptr) { size_t copy = mem_allocated_size(ptr); if (copy > size) copy = size; memcpy(newptr, ptr, copy); mem_free(&skiboot_heap, ptr, location); } } unlock(&skiboot_heap.free_list_lock); return newptr; } void *__zalloc(size_t bytes, const char *location) { void *p = __malloc(bytes, location); if (p) memset(p, 0, bytes); return p; } skiboot-5.10-rc4/core/mem_region.c000066400000000000000000001003221324317060200170100ustar00rootroot00000000000000/* Copyright 2013-2017 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include int64_t mem_dump_free(void); void mem_dump_allocs(void); /* Memory poisoning on free (if POISON_MEM_REGION set to 1) */ #ifdef DEBUG #define POISON_MEM_REGION 1 #else #define POISON_MEM_REGION 0 #endif #define POISON_MEM_REGION_WITH 0x99 #define POISON_MEM_REGION_LIMIT 1*1024*1024*1024 /* Locking: The mem_region_lock protects the regions list from concurrent * updates. Additions to, or removals from, the region list must be done * with this lock held. This is typically done when we're establishing * the memory & reserved regions. * * Each region has a lock (region->free_list_lock) to protect the free list * from concurrent modification. This lock is used when we're allocating * memory out of a specific region. * * If both locks are needed (eg, __local_alloc, where we need to find a region, * then allocate from it), the mem_region_lock must be acquired before (and * released after) the per-region lock. */ struct lock mem_region_lock = LOCK_UNLOCKED; static struct list_head regions = LIST_HEAD_INIT(regions); static struct list_head early_reserves = LIST_HEAD_INIT(early_reserves); static bool mem_region_init_done = false; static bool mem_regions_finalised = false; unsigned long top_of_ram = SKIBOOT_BASE + SKIBOOT_SIZE; static struct mem_region skiboot_os_reserve = { .name = "ibm,os-reserve", .start = 0, .len = SKIBOOT_BASE, .type = REGION_OS, }; struct mem_region skiboot_heap = { .name = "ibm,firmware-heap", .start = HEAP_BASE, .len = HEAP_SIZE, .type = REGION_SKIBOOT_HEAP, }; static struct mem_region skiboot_code_and_text = { .name = "ibm,firmware-code", .start = SKIBOOT_BASE, .len = HEAP_BASE - SKIBOOT_BASE, .type = REGION_SKIBOOT_FIRMWARE, }; static struct mem_region skiboot_after_heap = { .name = "ibm,firmware-data", .start = HEAP_BASE + HEAP_SIZE, .len = SKIBOOT_BASE + SKIBOOT_SIZE - (HEAP_BASE + HEAP_SIZE), .type = REGION_SKIBOOT_FIRMWARE, }; static struct mem_region skiboot_cpu_stacks = { .name = "ibm,firmware-stacks", .start = CPU_STACKS_BASE, .len = 0, /* TBA */ .type = REGION_SKIBOOT_FIRMWARE, }; struct alloc_hdr { bool free : 1; bool prev_free : 1; unsigned long num_longs : BITS_PER_LONG-2; /* Including header. */ const char *location; }; struct free_hdr { struct alloc_hdr hdr; struct list_node list; /* ... unsigned long tailer; */ }; #define ALLOC_HDR_LONGS (sizeof(struct alloc_hdr) / sizeof(long)) #define ALLOC_MIN_LONGS (sizeof(struct free_hdr) / sizeof(long) + 1) /* Avoid ugly casts. */ static void *region_start(const struct mem_region *region) { return (void *)(unsigned long)region->start; } /* Each free block has a tailer, so we can walk backwards. */ static unsigned long *tailer(struct free_hdr *f) { return (unsigned long *)f + f->hdr.num_longs - 1; } /* This walks forward to the next hdr (or NULL if at the end). */ static struct alloc_hdr *next_hdr(const struct mem_region *region, const struct alloc_hdr *hdr) { void *next; next = ((unsigned long *)hdr + hdr->num_longs); if (next >= region_start(region) + region->len) next = NULL; return next; } #if POISON_MEM_REGION == 1 static void mem_poison(struct free_hdr *f) { size_t poison_size = (void*)tailer(f) - (void*)(f+1); /* We only poison up to a limit, as otherwise boot is * kinda slow */ if (poison_size > POISON_MEM_REGION_LIMIT) poison_size = POISON_MEM_REGION_LIMIT; memset(f+1, POISON_MEM_REGION_WITH, poison_size); } #else static inline void mem_poison(struct free_hdr *f __unused) { } #endif /* Creates free block covering entire region. */ static void init_allocatable_region(struct mem_region *region) { struct free_hdr *f = region_start(region); assert(region->type == REGION_SKIBOOT_HEAP || region->type == REGION_MEMORY); f->hdr.num_longs = region->len / sizeof(long); f->hdr.free = true; f->hdr.prev_free = false; *tailer(f) = f->hdr.num_longs; list_head_init(®ion->free_list); list_add(®ion->free_list, &f->list); mem_poison(f); } static void make_free(struct mem_region *region, struct free_hdr *f, const char *location, bool skip_poison) { struct alloc_hdr *next; if (!skip_poison) mem_poison(f); if (f->hdr.prev_free) { struct free_hdr *prev; unsigned long *prev_tailer = (unsigned long *)f - 1; assert(*prev_tailer); prev = (void *)((unsigned long *)f - *prev_tailer); assert(prev->hdr.free); assert(!prev->hdr.prev_free); /* Expand to cover the one we just freed. */ prev->hdr.num_longs += f->hdr.num_longs; f = prev; } else { f->hdr.free = true; f->hdr.location = location; list_add(®ion->free_list, &f->list); } /* Fix up tailer. */ *tailer(f) = f->hdr.num_longs; /* If next is free, coalesce it */ next = next_hdr(region, &f->hdr); if (next) { next->prev_free = true; if (next->free) { struct free_hdr *next_free = (void *)next; list_del_from(®ion->free_list, &next_free->list); /* Maximum of one level of recursion */ make_free(region, next_free, location, true); } } } /* Can we fit this many longs with this alignment in this free block? */ static bool fits(struct free_hdr *f, size_t longs, size_t align, size_t *offset) { *offset = 0; while (f->hdr.num_longs >= *offset + longs) { size_t addr; addr = (unsigned long)f + (*offset + ALLOC_HDR_LONGS) * sizeof(long); if ((addr & (align - 1)) == 0) return true; /* Don't make tiny chunks! */ if (*offset == 0) *offset = ALLOC_MIN_LONGS; else (*offset)++; } return false; } static void discard_excess(struct mem_region *region, struct alloc_hdr *hdr, size_t alloc_longs, const char *location, bool skip_poison) { /* Do we have excess? */ if (hdr->num_longs > alloc_longs + ALLOC_MIN_LONGS) { struct free_hdr *post; /* Set up post block. */ post = (void *)hdr + alloc_longs * sizeof(long); post->hdr.num_longs = hdr->num_longs - alloc_longs; post->hdr.prev_free = false; /* Trim our block. */ hdr->num_longs = alloc_longs; /* This coalesces as required. */ make_free(region, post, location, skip_poison); } } static const char *hdr_location(const struct alloc_hdr *hdr) { /* Corrupt: step carefully! */ if (is_rodata(hdr->location)) return hdr->location; return "*CORRUPT*"; } static void bad_header(const struct mem_region *region, const struct alloc_hdr *hdr, const char *during, const char *location) { /* Corrupt: step carefully! */ if (is_rodata(hdr->location)) prerror("%p (in %s) %s at %s, previously %s\n", hdr-1, region->name, during, location, hdr->location); else prerror("%p (in %s) %s at %s, previously %p\n", hdr-1, region->name, during, location, hdr->location); abort(); } static bool region_is_reservable(struct mem_region *region) { return region->type != REGION_OS; } static bool region_is_reserved(struct mem_region *region) { return region->type != REGION_OS && region->type != REGION_MEMORY; } void mem_dump_allocs(void) { struct mem_region *region; struct alloc_hdr *hdr; /* Second pass: populate property data */ prlog(PR_INFO, "Memory regions:\n"); list_for_each(®ions, region, list) { if (!(region->type == REGION_SKIBOOT_HEAP || region->type == REGION_MEMORY)) continue; prlog(PR_INFO, " 0x%012llx..%012llx : %s\n", (long long)region->start, (long long)(region->start + region->len - 1), region->name); if (region->free_list.n.next == NULL) { prlog(PR_INFO, " no allocs\n"); continue; } for (hdr = region_start(region); hdr; hdr = next_hdr(region, hdr)) { if (hdr->free) continue; prlog(PR_INFO, " 0x%.8lx %s\n", hdr->num_longs * sizeof(long), hdr_location(hdr)); } } } int64_t mem_dump_free(void) { struct mem_region *region; struct alloc_hdr *hdr; int64_t total_free; int64_t region_free; total_free = 0; prlog(PR_INFO, "Free space in HEAP memory regions:\n"); list_for_each(®ions, region, list) { if (!(region->type == REGION_SKIBOOT_HEAP || region->type == REGION_MEMORY)) continue; region_free = 0; if (region->free_list.n.next == NULL) { continue; } for (hdr = region_start(region); hdr; hdr = next_hdr(region, hdr)) { if (!hdr->free) continue; region_free+= hdr->num_longs * sizeof(long); } prlog(PR_INFO, "Region %s free: %"PRIx64"\n", region->name, region_free); total_free += region_free; } prlog(PR_INFO, "Total free: %"PRIu64"\n", total_free); return total_free; } static void *__mem_alloc(struct mem_region *region, size_t size, size_t align, const char *location) { size_t alloc_longs, offset; struct free_hdr *f; struct alloc_hdr *next; /* Align must be power of 2. */ assert(!((align - 1) & align)); /* This should be a constant. */ assert(is_rodata(location)); /* Unallocatable region? */ if (!(region->type == REGION_SKIBOOT_HEAP || region->type == REGION_MEMORY)) return NULL; /* First allocation? */ if (region->free_list.n.next == NULL) init_allocatable_region(region); /* Don't do screwy sizes. */ if (size > region->len) return NULL; /* Don't do tiny alignments, we deal in long increments. */ if (align < sizeof(long)) align = sizeof(long); /* Convert size to number of longs, too. */ alloc_longs = (size + sizeof(long)-1) / sizeof(long) + ALLOC_HDR_LONGS; /* Can't be too small for when we free it, either. */ if (alloc_longs < ALLOC_MIN_LONGS) alloc_longs = ALLOC_MIN_LONGS; /* Walk free list. */ list_for_each(®ion->free_list, f, list) { /* We may have to skip some to meet alignment. */ if (fits(f, alloc_longs, align, &offset)) goto found; } return NULL; found: assert(f->hdr.free); assert(!f->hdr.prev_free); /* This block is no longer free. */ list_del_from(®ion->free_list, &f->list); f->hdr.free = false; f->hdr.location = location; next = next_hdr(region, &f->hdr); if (next) { assert(next->prev_free); next->prev_free = false; } if (offset != 0) { struct free_hdr *pre = f; f = (void *)f + offset * sizeof(long); assert(f >= pre + 1); /* Set up new header. */ f->hdr.num_longs = pre->hdr.num_longs - offset; /* f->hdr.prev_free will be set by make_free below. */ f->hdr.free = false; f->hdr.location = location; /* Fix up old header. */ pre->hdr.num_longs = offset; pre->hdr.prev_free = false; /* This coalesces as required. */ make_free(region, pre, location, true); } /* We might be too long; put the rest back. */ discard_excess(region, &f->hdr, alloc_longs, location, true); /* Clear tailer for debugging */ *tailer(f) = 0; /* Their pointer is immediately after header. */ return &f->hdr + 1; } void *mem_alloc(struct mem_region *region, size_t size, size_t align, const char *location) { void *r; assert(lock_held_by_me(®ion->free_list_lock)); r = __mem_alloc(region, size, align, location); if (r) return r; prerror("mem_alloc(0x%lx, 0x%lx, \"%s\") failed !\n", size, align, location); mem_dump_allocs(); return NULL; } void mem_free(struct mem_region *region, void *mem, const char *location) { struct alloc_hdr *hdr; /* This should be a constant. */ assert(is_rodata(location)); assert(lock_held_by_me(®ion->free_list_lock)); /* Freeing NULL is always a noop. */ if (!mem) return; /* Your memory is in the region, right? */ assert(mem >= region_start(region) + sizeof(*hdr)); assert(mem < region_start(region) + region->len); /* Grab header. */ hdr = mem - sizeof(*hdr); if (hdr->free) bad_header(region, hdr, "re-freed", location); make_free(region, (struct free_hdr *)hdr, location, false); } size_t mem_allocated_size(const void *ptr) { const struct alloc_hdr *hdr = ptr - sizeof(*hdr); return hdr->num_longs * sizeof(long) - sizeof(struct alloc_hdr); } bool mem_resize(struct mem_region *region, void *mem, size_t len, const char *location) { struct alloc_hdr *hdr, *next; struct free_hdr *f; /* This should be a constant. */ assert(is_rodata(location)); assert(lock_held_by_me(®ion->free_list_lock)); /* Get header. */ hdr = mem - sizeof(*hdr); if (hdr->free) bad_header(region, hdr, "resize", location); /* Round up size to multiple of longs. */ len = (sizeof(*hdr) + len + sizeof(long) - 1) / sizeof(long); /* Can't be too small for when we free it, either. */ if (len < ALLOC_MIN_LONGS) len = ALLOC_MIN_LONGS; /* Shrinking is simple. */ if (len <= hdr->num_longs) { hdr->location = location; discard_excess(region, hdr, len, location, false); return true; } /* Check if we can expand. */ next = next_hdr(region, hdr); if (!next || !next->free || hdr->num_longs + next->num_longs < len) return false; /* OK, it's free and big enough, absorb it. */ f = (struct free_hdr *)next; list_del_from(®ion->free_list, &f->list); hdr->num_longs += next->num_longs; hdr->location = location; /* Update next prev_free */ next = next_hdr(region, &f->hdr); if (next) { assert(next->prev_free); next->prev_free = false; } /* Clear tailer for debugging */ *tailer(f) = 0; /* Now we might have *too* much. */ discard_excess(region, hdr, len, location, true); return true; } bool mem_check(const struct mem_region *region) { size_t frees = 0; struct alloc_hdr *hdr, *prev_free = NULL; struct free_hdr *f; /* Check it's sanely aligned. */ if (region->start % sizeof(struct alloc_hdr)) { prerror("Region '%s' not sanely aligned (%llx)\n", region->name, (unsigned long long)region->start); return false; } if ((long)region->len % sizeof(struct alloc_hdr)) { prerror("Region '%s' not sane length (%llu)\n", region->name, (unsigned long long)region->len); return false; } /* Not ours to play with, or empty? Don't do anything. */ if (!(region->type == REGION_MEMORY || region->type == REGION_SKIBOOT_HEAP) || region->free_list.n.next == NULL) return true; /* Walk linearly. */ for (hdr = region_start(region); hdr; hdr = next_hdr(region, hdr)) { if (hdr->num_longs < ALLOC_MIN_LONGS) { prerror("Region '%s' %s %p (%s) size %zu\n", region->name, hdr->free ? "free" : "alloc", hdr, hdr_location(hdr), hdr->num_longs * sizeof(long)); return false; } if ((unsigned long)hdr + hdr->num_longs * sizeof(long) > region->start + region->len) { prerror("Region '%s' %s %p (%s) oversize %zu\n", region->name, hdr->free ? "free" : "alloc", hdr, hdr_location(hdr), hdr->num_longs * sizeof(long)); return false; } if (hdr->free) { if (hdr->prev_free || prev_free) { prerror("Region '%s' free %p (%s) has prev_free" " %p (%s) %sset?\n", region->name, hdr, hdr_location(hdr), prev_free, prev_free ? hdr_location(prev_free) : "NULL", hdr->prev_free ? "" : "un"); return false; } prev_free = hdr; frees ^= (unsigned long)hdr - region->start; } else { if (hdr->prev_free != (bool)prev_free) { prerror("Region '%s' alloc %p (%s) has" " prev_free %p %sset?\n", region->name, hdr, hdr_location(hdr), prev_free, hdr->prev_free ? "" : "un"); return false; } prev_free = NULL; } } /* Now walk free list. */ list_for_each(®ion->free_list, f, list) frees ^= (unsigned long)f - region->start; if (frees) { prerror("Region '%s' free list and walk do not match!\n", region->name); return false; } return true; } static struct mem_region *new_region(const char *name, uint64_t start, uint64_t len, struct dt_node *node, enum mem_region_type type) { struct mem_region *region; region = malloc(sizeof(*region)); if (!region) return NULL; region->name = name; region->start = start; region->len = len; region->node = node; region->type = type; region->free_list.n.next = NULL; init_lock(®ion->free_list_lock); return region; } /* We always split regions, so we only have to replace one. */ static struct mem_region *split_region(struct mem_region *head, uint64_t split_at, enum mem_region_type type) { struct mem_region *tail; uint64_t end = head->start + head->len; tail = new_region(head->name, split_at, end - split_at, head->node, type); /* Original region becomes head. */ if (tail) head->len -= tail->len; return tail; } static bool intersects(const struct mem_region *region, uint64_t addr) { return addr > region->start && addr < region->start + region->len; } static bool maybe_split(struct mem_region *r, uint64_t split_at) { struct mem_region *tail; if (!intersects(r, split_at)) return true; tail = split_region(r, split_at, r->type); if (!tail) return false; /* Tail add is important: we may need to split again! */ list_add_tail(®ions, &tail->list); return true; } static bool overlaps(const struct mem_region *r1, const struct mem_region *r2) { return (r1->start + r1->len > r2->start && r1->start < r2->start + r2->len); } static bool contains(const struct mem_region *r1, const struct mem_region *r2) { u64 r1_end = r1->start + r1->len; u64 r2_end = r2->start + r2->len; return (r1->start <= r2->start && r2_end <= r1_end); } static struct mem_region *get_overlap(const struct mem_region *region) { struct mem_region *i; list_for_each(®ions, i, list) { if (overlaps(region, i)) return i; } return NULL; } static bool add_region(struct mem_region *region) { struct mem_region *r; if (mem_regions_finalised) { prerror("MEM: add_region(%s@0x%"PRIx64") called after finalise!\n", region->name, region->start); return false; } /* First split any regions which intersect. */ list_for_each(®ions, r, list) { /* * The new region should be fully contained by an existing one. * If it's not then we have a problem where reservations * partially overlap which is probably broken. * * NB: There *might* be situations where this is legitimate, * but the region handling does not currently support this. */ if (overlaps(r, region) && !contains(r, region)) { prerror("MEM: Partial overlap detected between regions:\n"); prerror("MEM: %s [0x%"PRIx64"-0x%"PRIx64"] (new)\n", region->name, region->start, region->start + region->len); prerror("MEM: %s [0x%"PRIx64"-0x%"PRIx64"]\n", r->name, r->start, r->start + r->len); return false; } if (!maybe_split(r, region->start) || !maybe_split(r, region->start + region->len)) return false; } /* Now we have only whole overlaps, if any. */ while ((r = get_overlap(region)) != NULL) { assert(r->start == region->start); assert(r->len == region->len); list_del_from(®ions, &r->list); free(r); } /* Finally, add in our own region. */ list_add(®ions, ®ion->list); return true; } static void mem_reserve(enum mem_region_type type, const char *name, uint64_t start, uint64_t len) { struct mem_region *region; bool added = true; lock(&mem_region_lock); region = new_region(name, start, len, NULL, type); assert(region); if (!mem_region_init_done) list_add(&early_reserves, ®ion->list); else added = add_region(region); assert(added); unlock(&mem_region_lock); } void mem_reserve_fw(const char *name, uint64_t start, uint64_t len) { mem_reserve(REGION_FW_RESERVED, name, start, len); } void mem_reserve_hwbuf(const char *name, uint64_t start, uint64_t len) { mem_reserve(REGION_RESERVED, name, start, len); } static bool matches_chip_id(const __be32 ids[], size_t num, u32 chip_id) { size_t i; for (i = 0; i < num; i++) if (be32_to_cpu(ids[i]) == chip_id) return true; return false; } void *__local_alloc(unsigned int chip_id, size_t size, size_t align, const char *location) { struct mem_region *region; void *p = NULL; bool use_local = true; lock(&mem_region_lock); restart: list_for_each(®ions, region, list) { const struct dt_property *prop; const __be32 *ids; if (!(region->type == REGION_SKIBOOT_HEAP || region->type == REGION_MEMORY)) continue; /* Don't allocate from normal heap. */ if (region == &skiboot_heap) continue; /* First pass, only match node local regions */ if (use_local) { if (!region->node) continue; prop = dt_find_property(region->node, "ibm,chip-id"); ids = (const __be32 *)prop->prop; if (!matches_chip_id(ids, prop->len/sizeof(u32), chip_id)) continue; } /* Second pass, match anything */ lock(®ion->free_list_lock); p = mem_alloc(region, size, align, location); unlock(®ion->free_list_lock); if (p) break; } /* * If we can't allocate the memory block from the expected * node, we bail to any one that can accommodate our request. */ if (!p && use_local) { use_local = false; goto restart; } unlock(&mem_region_lock); return p; } struct mem_region *find_mem_region(const char *name) { struct mem_region *region; list_for_each(®ions, region, list) { if (streq(region->name, name)) return region; } return NULL; } bool mem_range_is_reserved(uint64_t start, uint64_t size) { uint64_t end = start + size; struct mem_region *region; struct list_head *search; /* We may have the range covered by a number of regions, which could * appear in any order. So, we look for a region that covers the * start address, and bump start up to the end of that region. * * We repeat until we've either bumped past the end of the range, * or we didn't find a matching region. * * This has a worst-case of O(n^2), but n is well bounded by the * small number of reservations. */ if (!mem_region_init_done) search = &early_reserves; else search = ®ions; for (;;) { bool found = false; list_for_each(search, region, list) { if (!region_is_reserved(region)) continue; /* does this region overlap the start address, and * have a non-zero size? */ if (region->start <= start && region->start + region->len > start && region->len) { start = region->start + region->len; found = true; } } /* 'end' is the first byte outside of the range */ if (start >= end) return true; if (!found) break; } return false; } void adjust_cpu_stacks_alloc(void) { /* CPU stacks start at 0, then when we know max possible PIR, * we adjust, then when we bring all CPUs online we know the * runtime max PIR, so we adjust this a few times during boot. */ skiboot_cpu_stacks.len = (cpu_max_pir + 1) * STACK_SIZE; } static void mem_region_parse_reserved_properties(void) { const struct dt_property *names, *ranges; struct mem_region *region; prlog(PR_DEBUG, "MEM: parsing reserved memory from " "reserved-names/-ranges properties\n"); names = dt_find_property(dt_root, "reserved-names"); ranges = dt_find_property(dt_root, "reserved-ranges"); if (names && ranges) { const uint64_t *range; int n, len; range = (const void *)ranges->prop; for (n = 0; n < names->len; n += len, range += 2) { char *name; len = strlen(names->prop + n) + 1; name = strdup(names->prop + n); region = new_region(name, dt_get_number(range, 2), dt_get_number(range + 1, 2), NULL, REGION_FW_RESERVED); if (!add_region(region)) { prerror("Couldn't add mem_region %s\n", name); abort(); } } } else if (names || ranges) { prerror("Invalid properties: reserved-names=%p " "with reserved-ranges=%p\n", names, ranges); abort(); } else { return; } } static bool mem_region_parse_reserved_nodes(const char *path) { struct dt_node *parent, *node; parent = dt_find_by_path(dt_root, path); if (!parent) return false; prlog(PR_INFO, "MEM: parsing reserved memory from node %s\n", path); dt_for_each_child(parent, node) { const struct dt_property *reg; struct mem_region *region; int type; reg = dt_find_property(node, "reg"); if (!reg) { char *nodepath = dt_get_path(node); prerror("node %s has no reg property, ignoring\n", nodepath); free(nodepath); continue; } if (dt_has_node_property(node, "no-map", NULL)) type = REGION_RESERVED; else type = REGION_FW_RESERVED; region = new_region(strdup(node->name), dt_get_number(reg->prop, 2), dt_get_number(reg->prop + sizeof(u64), 2), node, type); if (!add_region(region)) { char *nodepath = dt_get_path(node); prerror("node %s failed to add_region()\n", nodepath); free(nodepath); } } return true; } /* Trawl through device tree, create memory regions from nodes. */ void mem_region_init(void) { struct mem_region *region, *next; struct dt_node *i; bool rc; /* Ensure we have no collision between skiboot core and our heap */ extern char _end[]; BUILD_ASSERT(HEAP_BASE >= (uint64_t)_end); /* * Add associativity properties outside of the lock * to avoid recursive locking caused by allocations * done by add_chip_dev_associativity() */ dt_for_each_node(dt_root, i) { if (!dt_has_node_property(i, "device_type", "memory")) continue; /* Add associativity properties */ add_chip_dev_associativity(i); } /* Add each memory node. */ dt_for_each_node(dt_root, i) { uint64_t start, len; char *rname; #define NODE_REGION_PREFIX "ibm,firmware-allocs-" if (!dt_has_node_property(i, "device_type", "memory")) continue; rname = zalloc(strlen(i->name) + strlen(NODE_REGION_PREFIX) + 1); assert(rname); strcat(rname, NODE_REGION_PREFIX); strcat(rname, i->name); start = dt_get_address(i, 0, &len); lock(&mem_region_lock); region = new_region(rname, start, len, i, REGION_MEMORY); if (!region) { prerror("MEM: Could not add mem region %s!\n", i->name); abort(); } list_add(®ions, ®ion->list); if ((start + len) > top_of_ram) top_of_ram = start + len; unlock(&mem_region_lock); } adjust_cpu_stacks_alloc(); lock(&mem_region_lock); /* Now carve out our own reserved areas. */ if (!add_region(&skiboot_os_reserve) || !add_region(&skiboot_code_and_text) || !add_region(&skiboot_heap) || !add_region(&skiboot_after_heap) || !add_region(&skiboot_cpu_stacks)) { prerror("Out of memory adding skiboot reserved areas\n"); abort(); } /* Add reserved reanges from HDAT */ list_for_each_safe(&early_reserves, region, next, list) { bool added; list_del(®ion->list); added = add_region(region); assert(added); } /* Add reserved ranges from the DT */ rc = mem_region_parse_reserved_nodes("/reserved-memory"); if (!rc) rc = mem_region_parse_reserved_nodes( "/ibm,hostboot/reserved-memory"); if (!rc) mem_region_parse_reserved_properties(); mem_region_init_done = true; unlock(&mem_region_lock); } static uint64_t allocated_length(const struct mem_region *r) { struct free_hdr *f, *last = NULL; /* No allocations at all? */ if (r->free_list.n.next == NULL) return 0; /* Find last free block. */ list_for_each(&r->free_list, f, list) if (f > last) last = f; /* No free blocks? */ if (!last) return r->len; /* Last free block isn't at end? */ if (next_hdr(r, &last->hdr)) return r->len; return (unsigned long)last - r->start; } /* Separate out allocated sections into their own region. */ void mem_region_release_unused(void) { struct mem_region *r; lock(&mem_region_lock); assert(!mem_regions_finalised); prlog(PR_INFO, "Releasing unused memory:\n"); list_for_each(®ions, r, list) { uint64_t used_len; /* If it's not allocatable, ignore it. */ if (!(r->type == REGION_SKIBOOT_HEAP || r->type == REGION_MEMORY)) continue; used_len = allocated_length(r); prlog(PR_INFO, " %s: %llu/%llu used\n", r->name, (long long)used_len, (long long)r->len); /* We keep the skiboot heap. */ if (r == &skiboot_heap) continue; /* Nothing used? Whole thing is for Linux. */ if (used_len == 0) r->type = REGION_OS; /* Partially used? Split region. */ else if (used_len != r->len) { struct mem_region *for_linux; struct free_hdr *last = region_start(r) + used_len; /* Remove the final free block. */ list_del_from(&r->free_list, &last->list); for_linux = split_region(r, r->start + used_len, REGION_OS); if (!for_linux) { prerror("OOM splitting mem node %s for linux\n", r->name); abort(); } list_add(®ions, &for_linux->list); } } unlock(&mem_region_lock); } static void mem_region_add_dt_reserved_node(struct dt_node *parent, struct mem_region *region) { char *name, *p; /* If a reserved region was established before skiboot, it may be * referenced by a device-tree node with extra data. In that case, * copy the node to /reserved-memory/, unless it's already there. * * We update region->node to the new copy here, as the prd code may * update regions' device-tree nodes, and we want those updates to * apply to the nodes in /reserved-memory/. */ if (region->type == REGION_FW_RESERVED && region->node) { if (region->node->parent != parent) region->node = dt_copy(region->node, parent); return; } name = strdup(region->name); assert(name); /* remove any cell addresses in the region name; we have our own cell * addresses here */ p = strchr(name, '@'); if (p) *p = '\0'; region->node = dt_new_addr(parent, name, region->start); assert(region->node); dt_add_property_u64s(region->node, "reg", region->start, region->len); /* * This memory is used by hardware and may need special handling. Ask * the host kernel not to map it by default. */ if (region->type == REGION_RESERVED) dt_add_property(region->node, "no-map", NULL, 0); free(name); } void mem_region_add_dt_reserved(void) { int names_len, ranges_len, len; const struct dt_property *prop; struct mem_region *region; void *names, *ranges; struct dt_node *node; uint64_t *range; char *name; names_len = 0; ranges_len = 0; /* Finalise the region list, so we know that the regions list won't be * altered after this point. The regions' free lists may change after * we drop the lock, but we don't access those. */ lock(&mem_region_lock); mem_regions_finalised = true; /* establish top-level reservation node */ node = dt_find_by_path(dt_root, "reserved-memory"); if (!node) { node = dt_new(dt_root, "reserved-memory"); dt_add_property_cells(node, "#address-cells", 2); dt_add_property_cells(node, "#size-cells", 2); dt_add_property(node, "ranges", NULL, 0); } prlog(PR_INFO, "Reserved regions:\n"); /* First pass, create /reserved-memory/ nodes for each reservation, * and calculate the length for the /reserved-names and * /reserved-ranges properties */ list_for_each(®ions, region, list) { if (!region_is_reservable(region)) continue; prlog(PR_INFO, " 0x%012llx..%012llx : %s\n", (long long)region->start, (long long)(region->start + region->len - 1), region->name); mem_region_add_dt_reserved_node(node, region); /* calculate the size of the properties populated later */ names_len += strlen(region->node->name) + 1; ranges_len += 2 * sizeof(uint64_t); } name = names = malloc(names_len); range = ranges = malloc(ranges_len); /* Second pass: populate the old-style reserved-names and * reserved-regions arrays based on the node data */ list_for_each(®ions, region, list) { if (!region_is_reservable(region)) continue; len = strlen(region->node->name) + 1; memcpy(name, region->node->name, len); name += len; range[0] = cpu_to_fdt64(region->start); range[1] = cpu_to_fdt64(region->len); range += 2; } unlock(&mem_region_lock); prop = dt_find_property(dt_root, "reserved-names"); if (prop) dt_del_property(dt_root, (struct dt_property *)prop); prop = dt_find_property(dt_root, "reserved-ranges"); if (prop) dt_del_property(dt_root, (struct dt_property *)prop); dt_add_property(dt_root, "reserved-names", names, names_len); dt_add_property(dt_root, "reserved-ranges", ranges, ranges_len); free(names); free(ranges); } struct mem_region *mem_region_next(struct mem_region *region) { struct list_node *node; assert(lock_held_by_me(&mem_region_lock)); node = region ? ®ion->list : ®ions.n; if (node->next == ®ions.n) return NULL; return list_entry(node->next, struct mem_region, list); } skiboot-5.10-rc4/core/nvram-format.c000066400000000000000000000161321324317060200173050ustar00rootroot00000000000000/* Copyright 2013-2017 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include /* * NVRAM Format as specified in PAPR */ struct chrp_nvram_hdr { uint8_t sig; uint8_t cksum; be16 len; char name[12]; }; static struct chrp_nvram_hdr *skiboot_part_hdr; #define NVRAM_SIG_FW_PRIV 0x51 #define NVRAM_SIG_SYSTEM 0x70 #define NVRAM_SIG_FREE 0x7f #define NVRAM_NAME_COMMON "common" #define NVRAM_NAME_FW_PRIV "ibm,skiboot" #define NVRAM_NAME_FREE "wwwwwwwwwwww" /* 64k should be enough, famous last words... */ #define NVRAM_SIZE_COMMON 0x10000 /* 4k should be enough, famous last words... */ #define NVRAM_SIZE_FW_PRIV 0x1000 static uint8_t chrp_nv_cksum(struct chrp_nvram_hdr *hdr) { struct chrp_nvram_hdr h_copy = *hdr; uint8_t b_data, i_sum, c_sum; uint8_t *p = (uint8_t *)&h_copy; unsigned int nbytes = sizeof(h_copy); h_copy.cksum = 0; for (c_sum = 0; nbytes; nbytes--) { b_data = *(p++); i_sum = c_sum + b_data; if (i_sum < c_sum) i_sum++; c_sum = i_sum; } return c_sum; } int nvram_format(void *nvram_image, uint32_t nvram_size) { struct chrp_nvram_hdr *h; unsigned int offset = 0; prerror("NVRAM: Re-initializing (size: 0x%08x)\n", nvram_size); memset(nvram_image, 0, nvram_size); /* Create private partition */ if (nvram_size - offset < NVRAM_SIZE_FW_PRIV) return -1; h = nvram_image + offset; h->sig = NVRAM_SIG_FW_PRIV; h->len = cpu_to_be16(NVRAM_SIZE_FW_PRIV >> 4); strcpy(h->name, NVRAM_NAME_FW_PRIV); h->cksum = chrp_nv_cksum(h); prlog(PR_DEBUG, "NVRAM: Created '%s' partition at 0x%08x" " for size 0x%08x with cksum 0x%02x\n", NVRAM_NAME_FW_PRIV, offset, be16_to_cpu(h->len), h->cksum); offset += NVRAM_SIZE_FW_PRIV; /* Create common partition */ if (nvram_size - offset < NVRAM_SIZE_COMMON) return -1; h = nvram_image + offset; h->sig = NVRAM_SIG_SYSTEM; h->len = cpu_to_be16(NVRAM_SIZE_COMMON >> 4); strcpy(h->name, NVRAM_NAME_COMMON); h->cksum = chrp_nv_cksum(h); prlog(PR_DEBUG, "NVRAM: Created '%s' partition at 0x%08x" " for size 0x%08x with cksum 0x%02x\n", NVRAM_NAME_COMMON, offset, be16_to_cpu(h->len), h->cksum); offset += NVRAM_SIZE_COMMON; /* Create free space partition */ if (nvram_size - offset < sizeof(struct chrp_nvram_hdr)) return -1; h = nvram_image + offset; h->sig = NVRAM_SIG_FREE; h->len = cpu_to_be16((nvram_size - offset) >> 4); /* We have the full 12 bytes here */ memcpy(h->name, NVRAM_NAME_FREE, 12); h->cksum = chrp_nv_cksum(h); prlog(PR_DEBUG, "NVRAM: Created '%s' partition at 0x%08x" " for size 0x%08x with cksum 0x%02x\n", NVRAM_NAME_FREE, offset, be16_to_cpu(h->len), h->cksum); return 0; } /* * Check that the nvram partition layout is sane and that it * contains our required partitions. If not, we re-format the * lot of it */ int nvram_check(void *nvram_image, const uint32_t nvram_size) { unsigned int offset = 0; bool found_common = false; skiboot_part_hdr = NULL; while (offset + sizeof(struct chrp_nvram_hdr) < nvram_size) { struct chrp_nvram_hdr *h = nvram_image + offset; if (chrp_nv_cksum(h) != h->cksum) { prerror("NVRAM: Partition at offset 0x%x" " has bad checksum: 0x%02x vs 0x%02x\n", offset, h->cksum, chrp_nv_cksum(h)); goto failed; } if (be16_to_cpu(h->len) < 1) { prerror("NVRAM: Partition at offset 0x%x" " has incorrect 0 length\n", offset); goto failed; } if (h->sig == NVRAM_SIG_SYSTEM && strcmp(h->name, NVRAM_NAME_COMMON) == 0) found_common = true; if (h->sig == NVRAM_SIG_FW_PRIV && strcmp(h->name, NVRAM_NAME_FW_PRIV) == 0) skiboot_part_hdr = h; offset += be16_to_cpu(h->len) << 4; if (offset > nvram_size) { prerror("NVRAM: Partition at offset 0x%x" " extends beyond end of nvram !\n", offset); goto failed; } } if (!found_common) { prlog_once(PR_ERR, "NVRAM: Common partition not found !\n"); goto failed; } if (!skiboot_part_hdr) { prlog_once(PR_ERR, "NVRAM: Skiboot private partition not found !\n"); goto failed; } else { /* * The OF NVRAM format requires config strings to be NUL * terminated and unused memory to be set to zero. Well behaved * software should ensure this is done for us, but we should * always check. */ const char *last_byte = (const char *) skiboot_part_hdr + be16_to_cpu(skiboot_part_hdr->len) * 16 - 1; if (*last_byte != 0) { prerror("NVRAM: Skiboot private partition is not NUL terminated"); goto failed; } } prlog(PR_INFO, "NVRAM: Layout appears sane\n"); return 0; failed: return -1; } static const char *find_next_key(const char *start, const char *end) { /* * Unused parts of the partition are set to NUL. If we hit two * NULs in a row then we assume that we have hit the end of the * partition. */ if (*start == 0) return NULL; while (start < end) { if (*start == 0) return start + 1; start++; } return NULL; } /* * nvram_query() - Searches skiboot NVRAM partition for a key=value pair. * * Returns a pointer to a NUL terminated string that contains the value * associated with the given key. */ const char *nvram_query(const char *key) { const char *part_end, *start; int key_len = strlen(key); if (!nvram_has_loaded()) { prlog(PR_WARNING, "NVRAM: Query before is done loading\n"); prlog(PR_WARNING, "NVRAM: Waiting for load\n"); if (!nvram_wait_for_load()) { prlog(PR_CRIT, "NVRAM: Failed to load\n"); return NULL; } } /* * The running OS can modify the NVRAM as it pleases so we need to be * a little paranoid and check that it's ok before we try parse it. * * NB: nvram_validate() can update skiboot_part_hdr */ if (!nvram_validate()) return NULL; part_end = (const char *) skiboot_part_hdr + be16_to_cpu(skiboot_part_hdr->len) * 16 - 1; start = (const char *) skiboot_part_hdr + sizeof(*skiboot_part_hdr); if (!key_len) { prlog(PR_WARNING, "NVRAM: search key is empty!\n"); return NULL; } if (key_len > 32) prlog(PR_WARNING, "NVRAM: search key '%s' is longer than 32 chars\n", key); while (start) { int remaining = part_end - start; prlog(PR_TRACE, "NVRAM: '%s' (%lu)\n", start, strlen(start)); if (key_len + 1 > remaining) return NULL; if (!strncmp(key, start, key_len) && start[key_len] == '=') { const char *value = &start[key_len + 1]; prlog(PR_DEBUG, "NVRAM: Searched for '%s' found '%s'\n", key, value); return value; } start = find_next_key(start, part_end); } prlog(PR_DEBUG, "NVRAM: '%s' not found\n", key); return NULL; } bool nvram_query_eq(const char *key, const char *value) { const char *s = nvram_query(key); if (!s) return false; return !strcmp(s, value); } skiboot-5.10-rc4/core/nvram.c000066400000000000000000000107151324317060200160200ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include static void *nvram_image; static uint32_t nvram_size; static bool nvram_ready; /* has the nvram been loaded? */ static bool nvram_valid; /* is the nvram format ok? */ static int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset) { if (!nvram_ready) return OPAL_HARDWARE; if (!opal_addr_valid((void *)buffer)) return OPAL_PARAMETER; if (offset >= nvram_size || (offset + size) > nvram_size) return OPAL_PARAMETER; memcpy((void *)buffer, nvram_image + offset, size); return OPAL_SUCCESS; } opal_call(OPAL_READ_NVRAM, opal_read_nvram, 3); static int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset) { if (!nvram_ready) return OPAL_HARDWARE; if (!opal_addr_valid((void *)buffer)) return OPAL_PARAMETER; if (offset >= nvram_size || (offset + size) > nvram_size) return OPAL_PARAMETER; memcpy(nvram_image + offset, (void *)buffer, size); if (platform.nvram_write) platform.nvram_write(offset, nvram_image + offset, size); /* The host OS has written to the NVRAM so we can't be sure that it's * well formatted. */ nvram_valid = false; return OPAL_SUCCESS; } opal_call(OPAL_WRITE_NVRAM, opal_write_nvram, 3); bool nvram_validate(void) { if (!nvram_valid) nvram_valid = !nvram_check(nvram_image, nvram_size); return nvram_valid; } static void nvram_reformat(void) { if (nvram_format(nvram_image, nvram_size)) { prerror("NVRAM: Failed to format NVRAM!\n"); nvram_valid = false; return; } /* Write the whole thing back */ if (platform.nvram_write) platform.nvram_write(0, nvram_image, nvram_size); nvram_valid = true; } void nvram_reinit(void) { /* It's possible we failed to load nvram at boot. */ if (!nvram_ready) nvram_init(); else if (!nvram_validate()) nvram_reformat(); } void nvram_read_complete(bool success) { struct dt_node *np; /* Read not successful, error out and free the buffer */ if (!success) { free(nvram_image); nvram_size = 0; return; } if (!nvram_validate()) nvram_reformat(); /* Add nvram node */ np = dt_new(opal_node, "nvram"); dt_add_property_cells(np, "#bytes", nvram_size); dt_add_property_string(np, "compatible", "ibm,opal-nvram"); /* Mark ready */ nvram_ready = true; } bool nvram_wait_for_load(void) { /* Short cut */ if (nvram_ready) return true; /* Tell the caller it will never happen */ if (!platform.nvram_info) return false; /* * One of two things has happened here. * 1. nvram_wait_for_load() was called before nvram_init() * 2. The read of NVRAM failed. * Either way, this is quite a bad event. */ if (!nvram_image && !nvram_size) { prlog(PR_CRIT, "NVRAM: Possible wait before nvram_init()!\n"); return false; } while (!nvram_ready) { opal_run_pollers(); /* If the read fails, tell the caller */ if (!nvram_image && !nvram_size) return false; } return true; } bool nvram_has_loaded(void) { return nvram_ready; } void nvram_init(void) { int rc; if (!platform.nvram_info) return; rc = platform.nvram_info(&nvram_size); if (rc) { prerror("NVRAM: Error %d retrieving nvram info\n", rc); return; } printf("NVRAM: Size is %d KB\n", nvram_size >> 10); if (nvram_size > 0x100000) { printf("NVRAM: Cropping to 1MB !\n"); nvram_size = 0x100000; } /* * We allocate the nvram image with 4k alignment to make the * FSP backend job's easier */ nvram_image = memalign(0x1000, nvram_size); if (!nvram_image) { prerror("NVRAM: Failed to allocate nvram image\n"); nvram_size = 0; return; } /* Read it in */ rc = platform.nvram_start_read(nvram_image, 0, nvram_size); if (rc) { prerror("NVRAM: Failed to read NVRAM from FSP !\n"); nvram_size = 0; free(nvram_image); return; } /* * We'll get called back later (or recursively from * nvram_start_read) in nvram_read_complete() */ } skiboot-5.10-rc4/core/opal-msg.c000066400000000000000000000102511324317060200164070ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define pr_fmt(fmt) "opalmsg: " fmt #include #include #include #include #define OPAL_MAX_MSGS (OPAL_MSG_TYPE_MAX + OPAL_MAX_ASYNC_COMP - 1) struct opal_msg_entry { struct list_node link; void (*consumed)(void *data); void *data; struct opal_msg msg; }; static LIST_HEAD(msg_free_list); static LIST_HEAD(msg_pending_list); static struct lock opal_msg_lock = LOCK_UNLOCKED; int _opal_queue_msg(enum opal_msg_type msg_type, void *data, void (*consumed)(void *data), size_t num_params, const u64 *params) { struct opal_msg_entry *entry; lock(&opal_msg_lock); entry = list_pop(&msg_free_list, struct opal_msg_entry, link); if (!entry) { prerror("No available node in the free list, allocating\n"); entry = zalloc(sizeof(struct opal_msg_entry)); if (!entry) { prerror("Allocation failed\n"); unlock(&opal_msg_lock); return OPAL_RESOURCE; } } entry->consumed = consumed; entry->data = data; entry->msg.msg_type = cpu_to_be32(msg_type); if (num_params > ARRAY_SIZE(entry->msg.params)) { prerror("Discarding extra parameters\n"); num_params = ARRAY_SIZE(entry->msg.params); } memcpy(entry->msg.params, params, num_params*sizeof(u64)); list_add_tail(&msg_pending_list, &entry->link); opal_update_pending_evt(OPAL_EVENT_MSG_PENDING, OPAL_EVENT_MSG_PENDING); unlock(&opal_msg_lock); return 0; } static int64_t opal_get_msg(uint64_t *buffer, uint64_t size) { struct opal_msg_entry *entry; void (*callback)(void *data); void *data; if (size < sizeof(struct opal_msg) || !buffer) return OPAL_PARAMETER; if (!opal_addr_valid(buffer)) return OPAL_PARAMETER; lock(&opal_msg_lock); entry = list_pop(&msg_pending_list, struct opal_msg_entry, link); if (!entry) { unlock(&opal_msg_lock); return OPAL_RESOURCE; } memcpy(buffer, &entry->msg, sizeof(entry->msg)); callback = entry->consumed; data = entry->data; list_add(&msg_free_list, &entry->link); if (list_empty(&msg_pending_list)) opal_update_pending_evt(OPAL_EVENT_MSG_PENDING, 0); unlock(&opal_msg_lock); if (callback) callback(data); return OPAL_SUCCESS; } opal_call(OPAL_GET_MSG, opal_get_msg, 2); static int64_t opal_check_completion(uint64_t *buffer, uint64_t size, uint64_t token) { struct opal_msg_entry *entry, *next_entry; void (*callback)(void *data) = NULL; int rc = OPAL_BUSY; void *data = NULL; if (!opal_addr_valid(buffer)) return OPAL_PARAMETER; lock(&opal_msg_lock); list_for_each_safe(&msg_pending_list, entry, next_entry, link) { if (entry->msg.msg_type == OPAL_MSG_ASYNC_COMP && be64_to_cpu(entry->msg.params[0]) == token) { list_del(&entry->link); callback = entry->consumed; data = entry->data; list_add(&msg_free_list, &entry->link); if (list_empty(&msg_pending_list)) opal_update_pending_evt(OPAL_EVENT_MSG_PENDING, 0); rc = OPAL_SUCCESS; break; } } if (rc == OPAL_SUCCESS && size >= sizeof(struct opal_msg)) memcpy(buffer, &entry->msg, sizeof(entry->msg)); unlock(&opal_msg_lock); if (callback) callback(data); return rc; } opal_call(OPAL_CHECK_ASYNC_COMPLETION, opal_check_completion, 3); void opal_init_msg(void) { struct opal_msg_entry *entry; int i; for (i = 0; i < OPAL_MAX_MSGS; i++, entry++) { entry = zalloc(sizeof(*entry)); if (!entry) goto err; list_add_tail(&msg_free_list, &entry->link); } return; err: for (; i > 0; i--) { entry = list_pop(&msg_free_list, struct opal_msg_entry, link); if (entry) free(entry); } } skiboot-5.10-rc4/core/opal.c000066400000000000000000000442771324317060200156420ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Pending events to signal via opal_poll_events */ uint64_t opal_pending_events; /* OPAL dispatch table defined in head.S */ extern uint64_t opal_branch_table[]; /* Number of args expected for each call. */ static u8 opal_num_args[OPAL_LAST+1]; /* OPAL anchor node */ struct dt_node *opal_node; /* mask of dynamic vs fixed events; opal_allocate_dynamic_event will * only allocate from this range */ static const uint64_t opal_dynamic_events_mask = 0xffffffff00000000ul; static uint64_t opal_dynamic_events; extern uint32_t attn_trigger; extern uint32_t hir_trigger; /* We make this look like a Surveillance error, even though it really * isn't one. */ DEFINE_LOG_ENTRY(OPAL_INJECTED_HIR, OPAL_MISC_ERR_EVT, OPAL_SURVEILLANCE, OPAL_SURVEILLANCE_ERR, OPAL_PREDICTIVE_ERR_GENERAL, OPAL_MISCELLANEOUS_INFO_ONLY); void opal_table_init(void) { struct opal_table_entry *s = __opal_table_start; struct opal_table_entry *e = __opal_table_end; prlog(PR_DEBUG, "OPAL table: %p .. %p, branch table: %p\n", s, e, opal_branch_table); while(s < e) { opal_branch_table[s->token] = function_entry_address(s->func); opal_num_args[s->token] = s->nargs; s++; } } /* Called from head.S, thus no prototype */ long opal_bad_token(uint64_t token); long opal_bad_token(uint64_t token) { /** * @fwts-label OPALBadToken * @fwts-advice OPAL was called with a bad token. On POWER8 and * earlier, Linux kernels had a bug where they wouldn't check * if firmware supported particular OPAL calls before making them. * It is, in fact, harmless for these cases. On systems newer than * POWER8, this should never happen and indicates a kernel bug * where OPAL_CHECK_TOKEN isn't being called where it should be. */ prlog(PR_ERR, "OPAL: Called with bad token %lld !\n", token); return OPAL_PARAMETER; } static void opal_trace_entry(struct stack_frame *eframe __unused) { #ifdef OPAL_TRACE_ENTRY union trace t; unsigned nargs, i; if (eframe->gpr[0] > OPAL_LAST) nargs = 0; else nargs = opal_num_args[eframe->gpr[0]]; t.opal.token = cpu_to_be64(eframe->gpr[0]); t.opal.lr = cpu_to_be64(eframe->lr); t.opal.sp = cpu_to_be64(eframe->gpr[1]); for(i=0; igpr[3+i]); trace_add(&t, TRACE_OPAL, offsetof(struct trace_opal, r3_to_11[nargs])); #endif } /* * opal_quiesce_state is used as a lock. Don't use an actual lock to avoid * lock busting. */ static uint32_t opal_quiesce_state; /* 0 or QUIESCE_HOLD/QUIESCE_REJECT */ static int32_t opal_quiesce_owner; /* PIR */ static int32_t opal_quiesce_target; /* -1 or PIR */ static int64_t opal_check_token(uint64_t token); /* Called from head.S, thus no prototype */ int64_t opal_entry_check(struct stack_frame *eframe); int64_t opal_entry_check(struct stack_frame *eframe) { struct cpu_thread *cpu = this_cpu(); uint64_t token = eframe->gpr[0]; if (cpu->pir != mfspr(SPR_PIR)) { printf("CPU MISMATCH ! PIR=%04lx cpu @%p -> pir=%04x token=%llu\n", mfspr(SPR_PIR), cpu, cpu->pir, token); abort(); } opal_trace_entry(eframe); if (!opal_check_token(token)) return opal_bad_token(token); if (!opal_quiesce_state && cpu->in_opal_call) { printf("CPU ATTEMPT TO RE-ENTER FIRMWARE! PIR=%04lx cpu @%p -> pir=%04x token=%llu\n", mfspr(SPR_PIR), cpu, cpu->pir, token); return OPAL_BUSY; } again: cpu->in_opal_call++; /* * Order the store in_opal_call vs load quiesce_opal_call. * This also provides an acquire barrier for opal entry vs * another thread quiescing opal. In this way, quiescing * can behave as mutual exclusion. */ sync(); if (cpu->quiesce_opal_call) { cpu->in_opal_call--; if (opal_quiesce_state == QUIESCE_REJECT) return OPAL_BUSY; smt_lowest(); while (cpu->quiesce_opal_call) barrier(); smt_medium(); goto again; } return OPAL_SUCCESS; } int64_t opal_exit_check(int64_t retval, struct stack_frame *eframe); int64_t opal_exit_check(int64_t retval, struct stack_frame *eframe) { struct cpu_thread *cpu = this_cpu(); uint64_t token = eframe->gpr[0]; if (!cpu->in_opal_call) { printf("CPU UN-ACCOUNTED FIRMWARE ENTRY! PIR=%04lx cpu @%p -> pir=%04x token=%llu retval=%lld\n", mfspr(SPR_PIR), cpu, cpu->pir, token, retval); } else { if (!list_empty(&cpu->locks_held)) { prlog(PR_ERR, "OPAL exiting with locks held, token=%llu retval=%lld\n", token, retval); drop_my_locks(true); } sync(); /* release barrier vs quiescing */ cpu->in_opal_call--; } return retval; } int64_t opal_quiesce(uint32_t quiesce_type, int32_t cpu_target) { struct cpu_thread *cpu = this_cpu(); struct cpu_thread *target = NULL; struct cpu_thread *c; uint64_t end; bool stuck = false; if (cpu_target >= 0) { target = find_cpu_by_server(cpu_target); if (!target) return OPAL_PARAMETER; } else if (cpu_target != -1) { return OPAL_PARAMETER; } if (quiesce_type == QUIESCE_HOLD || quiesce_type == QUIESCE_REJECT) { if (cmpxchg32(&opal_quiesce_state, 0, quiesce_type) != 0) { if (opal_quiesce_owner != cpu->pir) { /* * Nested is allowed for now just for * internal uses, so an error is returned * for OS callers, but no error message * printed if we are nested. */ printf("opal_quiesce already quiescing\n"); } return OPAL_BUSY; } opal_quiesce_owner = cpu->pir; opal_quiesce_target = cpu_target; } if (opal_quiesce_owner != cpu->pir) { printf("opal_quiesce CPU does not own quiesce state (must call QUIESCE_HOLD or QUIESCE_REJECT)\n"); return OPAL_BUSY; } /* Okay now we own the quiesce state */ if (quiesce_type == QUIESCE_RESUME || quiesce_type == QUIESCE_RESUME_FAST_REBOOT) { bust_locks = false; sync(); /* release barrier vs opal entry */ if (target) { target->quiesce_opal_call = false; } else { for_each_cpu(c) { if (quiesce_type == QUIESCE_RESUME_FAST_REBOOT) c->in_opal_call = 0; if (c == cpu) { assert(!c->quiesce_opal_call); continue; } c->quiesce_opal_call = false; } } sync(); opal_quiesce_state = 0; return OPAL_SUCCESS; } if (quiesce_type == QUIESCE_LOCK_BREAK) { if (opal_quiesce_target != -1) { printf("opal_quiesce has not quiesced all CPUs (must target -1)\n"); return OPAL_BUSY; } bust_locks = true; return OPAL_SUCCESS; } if (target) { target->quiesce_opal_call = true; } else { for_each_cpu(c) { if (c == cpu) continue; c->quiesce_opal_call = true; } } sync(); /* Order stores to quiesce_opal_call vs loads of in_opal_call */ end = mftb() + msecs_to_tb(1000); smt_lowest(); if (target) { while (target->in_opal_call) { if (tb_compare(mftb(), end) == TB_AAFTERB) { printf("OPAL quiesce CPU:%04x stuck in OPAL\n", target->pir); stuck = true; break; } barrier(); } } else { for_each_cpu(c) { if (c == cpu) continue; while (c->in_opal_call) { if (tb_compare(mftb(), end) == TB_AAFTERB) { printf("OPAL quiesce CPU:%04x stuck in OPAL\n", c->pir); stuck = true; break; } barrier(); } } } smt_medium(); sync(); /* acquire barrier vs opal entry */ if (stuck) { printf("OPAL quiesce could not kick all CPUs out of OPAL\n"); return OPAL_PARTIAL; } return OPAL_SUCCESS; } opal_call(OPAL_QUIESCE, opal_quiesce, 2); void __opal_register(uint64_t token, void *func, unsigned int nargs) { assert(token <= OPAL_LAST); opal_branch_table[token] = function_entry_address(func); opal_num_args[token] = nargs; } /* * add_opal_firmware_exports_node: adds properties to the device-tree which * the OS will then change into sysfs nodes. * The properties must be placed under /ibm,opal/firmware/exports. * The new sysfs nodes are created under /opal/exports. * To be correctly exported the properties must contain: * name * base memory location (u64) * size (u64) */ static void add_opal_firmware_exports_node(struct dt_node *node) { struct dt_node *exports = dt_new(node, "exports"); uint64_t sym_start = (uint64_t)__sym_map_start; uint64_t sym_size = (uint64_t)__sym_map_end - sym_start; /* * These property names will be used by Linux as the user-visible file * name, so make them meaningful if possible. We use _ as the separator * here to remain consistent with existing file names in /sys/opal. */ dt_add_property_u64s(exports, "symbol_map", sym_start, sym_size); dt_add_property_u64s(exports, "hdat_map", SPIRA_HEAP_BASE, SPIRA_HEAP_SIZE); } static void add_opal_firmware_node(void) { struct dt_node *firmware = dt_new(opal_node, "firmware"); uint64_t sym_start = (uint64_t)__sym_map_start; uint64_t sym_size = (uint64_t)__sym_map_end - sym_start; dt_add_property_string(firmware, "compatible", "ibm,opal-firmware"); dt_add_property_string(firmware, "name", "firmware"); dt_add_property_string(firmware, "version", version); /* * As previous OS versions use symbol-map located at * /ibm,opal/firmware we will keep a copy of symbol-map here * for backwards compatibility */ dt_add_property_u64s(firmware, "symbol-map", sym_start, sym_size); add_opal_firmware_exports_node(firmware); } void add_opal_node(void) { uint64_t base, entry, size; extern uint32_t opal_entry; struct dt_node *opal_event; /* XXX TODO: Reorg this. We should create the base OPAL * node early on, and have the various sub modules populate * their own entries (console etc...) * * The logic of which console backend to use should be * extracted */ entry = (uint64_t)&opal_entry; base = SKIBOOT_BASE; size = (CPU_STACKS_BASE + (uint64_t)(cpu_max_pir + 1) * STACK_SIZE) - SKIBOOT_BASE; opal_node = dt_new_check(dt_root, "ibm,opal"); dt_add_property_cells(opal_node, "#address-cells", 0); dt_add_property_cells(opal_node, "#size-cells", 0); if (proc_gen < proc_gen_p9) dt_add_property_strings(opal_node, "compatible", "ibm,opal-v2", "ibm,opal-v3"); else dt_add_property_strings(opal_node, "compatible", "ibm,opal-v3"); dt_add_property_cells(opal_node, "opal-msg-async-num", OPAL_MAX_ASYNC_COMP); dt_add_property_cells(opal_node, "opal-msg-size", sizeof(struct opal_msg)); dt_add_property_u64(opal_node, "opal-base-address", base); dt_add_property_u64(opal_node, "opal-entry-address", entry); dt_add_property_u64(opal_node, "opal-runtime-size", size); /* Add irqchip interrupt controller */ opal_event = dt_new(opal_node, "event"); dt_add_property_strings(opal_event, "compatible", "ibm,opal-event"); dt_add_property_cells(opal_event, "#interrupt-cells", 0x1); dt_add_property(opal_event, "interrupt-controller", 0, 0); add_opal_firmware_node(); add_associativity_ref_point(); memcons_add_properties(); } static struct lock evt_lock = LOCK_UNLOCKED; void opal_update_pending_evt(uint64_t evt_mask, uint64_t evt_values) { uint64_t new_evts; lock(&evt_lock); new_evts = (opal_pending_events & ~evt_mask) | evt_values; if (opal_pending_events != new_evts) { uint64_t tok; #ifdef OPAL_TRACE_EVT_CHG printf("OPAL: Evt change: 0x%016llx -> 0x%016llx\n", opal_pending_events, new_evts); #endif /* * If an event gets *set* while we are in a different call chain * than opal_handle_interrupt() or opal_handle_hmi(), then we * artificially generate an interrupt (OCC interrupt specifically) * to ensure that Linux properly broadcast the event change internally */ if ((new_evts & ~opal_pending_events) != 0) { tok = this_cpu()->current_token; if (tok != OPAL_HANDLE_INTERRUPT && tok != OPAL_HANDLE_HMI) occ_send_dummy_interrupt(); } opal_pending_events = new_evts; } unlock(&evt_lock); } uint64_t opal_dynamic_event_alloc(void) { uint64_t new_event; int n; lock(&evt_lock); /* Create the event mask. This set-bit will be within the event mask * iff there are free events, or out of the mask if there are no free * events. If opal_dynamic_events is all ones (ie, all events are * dynamic, and allocated), then ilog2 will return -1, and we'll have a * zero mask. */ n = ilog2(~opal_dynamic_events); new_event = 1ull << n; /* Ensure we're still within the allocatable dynamic events range */ if (new_event & opal_dynamic_events_mask) opal_dynamic_events |= new_event; else new_event = 0; unlock(&evt_lock); return new_event; } void opal_dynamic_event_free(uint64_t event) { lock(&evt_lock); opal_dynamic_events &= ~event; unlock(&evt_lock); } static uint64_t opal_test_func(uint64_t arg) { printf("OPAL: Test function called with arg 0x%llx\n", arg); return 0xfeedf00d; } opal_call(OPAL_TEST, opal_test_func, 1); struct opal_poll_entry { struct list_node link; void (*poller)(void *data); void *data; }; static struct list_head opal_pollers = LIST_HEAD_INIT(opal_pollers); static struct lock opal_poll_lock = LOCK_UNLOCKED; void opal_add_poller(void (*poller)(void *data), void *data) { struct opal_poll_entry *ent; ent = zalloc(sizeof(struct opal_poll_entry)); assert(ent); ent->poller = poller; ent->data = data; lock(&opal_poll_lock); list_add_tail(&opal_pollers, &ent->link); unlock(&opal_poll_lock); } void opal_del_poller(void (*poller)(void *data)) { struct opal_poll_entry *ent; /* XXX This is currently unused. To solve various "interesting" * locking issues, the pollers are run locklessly, so if we were * to free them, we would have to be careful, using something * akin to RCU to synchronize with other OPAL entries. For now * if anybody uses it, print a warning and leak the entry, don't * free it. */ /** * @fwts-label UnsupportedOPALdelpoller * @fwts-advice Currently removing a poller is DANGEROUS and * MUST NOT be done in production firmware. */ prlog(PR_ALERT, "WARNING: Unsupported opal_del_poller." " Interesting locking issues, don't call this.\n"); lock(&opal_poll_lock); list_for_each(&opal_pollers, ent, link) { if (ent->poller == poller) { list_del(&ent->link); /* free(ent); */ break; } } unlock(&opal_poll_lock); } void opal_run_pollers(void) { struct opal_poll_entry *poll_ent; static int pollers_with_lock_warnings = 0; static int poller_recursion = 0; /* Don't re-enter on this CPU */ if (this_cpu()->in_poller && poller_recursion < 16) { /** * @fwts-label OPALPollerRecursion * @fwts-advice Recursion detected in opal_run_pollers(). This * indicates a bug in OPAL where a poller ended up running * pollers, which doesn't lead anywhere good. */ prlog(PR_ERR, "OPAL: Poller recursion detected.\n"); backtrace(); poller_recursion++; if (poller_recursion == 16) prlog(PR_ERR, "OPAL: Squashing future poller recursion warnings (>16).\n"); return; } this_cpu()->in_poller = true; if (!list_empty(&this_cpu()->locks_held) && pollers_with_lock_warnings < 64) { /** * @fwts-label OPALPollerWithLock * @fwts-advice opal_run_pollers() was called with a lock * held, which could lead to deadlock if not excessively * lucky/careful. */ prlog(PR_ERR, "Running pollers with lock held !\n"); dump_locks_list(); backtrace(); pollers_with_lock_warnings++; if (pollers_with_lock_warnings == 64) { /** * @fwts-label OPALPollerWithLock64 * @fwts-advice Your firmware is buggy, see the 64 * messages complaining about opal_run_pollers with * lock held. */ prlog(PR_ERR, "opal_run_pollers with lock run 64 " "times, disabling warning.\n"); } } /* We run the timers first */ check_timers(false); /* The pollers are run lokelessly, see comment in opal_del_poller */ list_for_each(&opal_pollers, poll_ent, link) poll_ent->poller(poll_ent->data); /* Disable poller flag */ this_cpu()->in_poller = false; /* On debug builds, print max stack usage */ check_stacks(); } static int64_t opal_poll_events(__be64 *outstanding_event_mask) { if (!opal_addr_valid(outstanding_event_mask)) return OPAL_PARAMETER; /* Check if we need to trigger an attn for test use */ if (attn_trigger == 0xdeadbeef) { prlog(PR_EMERG, "Triggering attn\n"); assert(false); } /* Test the host initiated reset */ if (hir_trigger == 0xdeadbeef) { uint32_t plid = log_simple_error(&e_info(OPAL_INJECTED_HIR), "SURV: Injected HIR, initiating FSP R/R\n"); fsp_trigger_reset(plid); hir_trigger = 0; } opal_run_pollers(); if (outstanding_event_mask) *outstanding_event_mask = cpu_to_be64(opal_pending_events); return OPAL_SUCCESS; } opal_call(OPAL_POLL_EVENTS, opal_poll_events, 1); static int64_t opal_check_token(uint64_t token) { if (token > OPAL_LAST) return OPAL_TOKEN_ABSENT; if (opal_branch_table[token]) return OPAL_TOKEN_PRESENT; return OPAL_TOKEN_ABSENT; } opal_call(OPAL_CHECK_TOKEN, opal_check_token, 1); struct opal_sync_entry { struct list_node link; bool (*notify)(void *data); void *data; }; static struct list_head opal_syncers = LIST_HEAD_INIT(opal_syncers); void opal_add_host_sync_notifier(bool (*notify)(void *data), void *data) { struct opal_sync_entry *ent; ent = zalloc(sizeof(struct opal_sync_entry)); assert(ent); ent->notify = notify; ent->data = data; list_add_tail(&opal_syncers, &ent->link); } void opal_del_host_sync_notifier(bool (*notify)(void *data)) { struct opal_sync_entry *ent; list_for_each(&opal_syncers, ent, link) { if (ent->notify == notify) { list_del(&ent->link); free(ent); return; } } } /* * OPAL call to handle host kexec'ing scenario */ static int64_t opal_sync_host_reboot(void) { struct opal_sync_entry *ent; bool ret = true; list_for_each(&opal_syncers, ent, link) ret &= ent->notify(ent->data); if (ret) return OPAL_SUCCESS; else return OPAL_BUSY_EVENT; } opal_call(OPAL_SYNC_HOST_REBOOT, opal_sync_host_reboot, 0); skiboot-5.10-rc4/core/pci-dt-slot.c000066400000000000000000000132031324317060200170270ustar00rootroot00000000000000/* Copyright 2017 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #undef pr_fmt #define pr_fmt(fmt) "DT-SLOT: " fmt #define PCIDBG(_p, _bdfn, fmt, a...) \ prlog(PR_DEBUG, "PHB#%04x:%02x:%02x.%x " fmt, \ (_p)->opal_id, \ ((_bdfn) >> 8) & 0xff, \ ((_bdfn) >> 3) & 0x1f, (_bdfn) & 0x7, ## a) struct dt_node *dt_slots; static struct dt_node *map_phb_to_slot(struct phb *phb) { uint32_t chip_id = dt_get_chip_id(phb->dt_node); uint32_t phb_idx = dt_prop_get_u32_def(phb->dt_node, "ibm,phb-index", 0); struct dt_node *slot_node; if (!dt_slots) dt_slots = dt_find_by_path(dt_root, "/ibm,pcie-slots"); dt_for_each_child(dt_slots, slot_node) { u32 reg[2]; if (!dt_node_is_compatible(slot_node, "ibm,pcie-root-port")) continue; reg[0] = dt_prop_get_cell(slot_node, "reg", 0); reg[1] = dt_prop_get_cell(slot_node, "reg", 1); if (reg[0] == chip_id && reg[1] == phb_idx) return slot_node; } return NULL; } static struct dt_node *map_downport_to_slot(struct phb *phb, struct pci_device *pd) { struct dt_node *bus_node, *child; struct pci_device *cursor; uint32_t port_dev_id; /* * Downports are a little bit special since we need to figure * out which PCI device corresponds to which down port in the * slot map. * * XXX: I'm assuming the ordering of port IDs and probed * PCIe switch downstream devices is the same. We should * check what we actually get in the HDAT. */ list_for_each(&pd->parent->children, cursor, link) if (cursor == pd) break; /* the child should always be on the parent's child list */ assert(cursor); port_dev_id = (cursor->bdfn >> 3) & 0x1f; bus_node = map_pci_dev_to_slot(phb, pd->parent); if (!bus_node) return NULL; dt_for_each_child(bus_node, child) if (dt_prop_get_u32(child, "reg") == port_dev_id) return child; /* unused downport */ return NULL; } static struct dt_node *__map_pci_dev_to_slot(struct phb *phb, struct pci_device *pd) { struct dt_node *child, *bus_node, *wildcard= NULL; if (!pd || !pd->parent || pd->dev_type == PCIE_TYPE_ROOT_PORT) return map_phb_to_slot(phb); if (pd->dev_type == PCIE_TYPE_SWITCH_DNPORT) return map_downport_to_slot(phb, pd); /* * For matching against devices always use the 0th function. * This is necessary since some functions may have a different * VDID to the base device. e.g. The DMA engines in PLX switches */ if (pd->bdfn & 0x7) { struct pci_device *cursor; PCIDBG(phb, pd->bdfn, "mapping fn %x to 0th fn (%x)\n", pd->bdfn, pd->bdfn & (~0x7)); list_for_each(&pd->parent->children, cursor, link) if ((pd->bdfn & ~0x7) == cursor->bdfn) return map_pci_dev_to_slot(phb, cursor); return NULL; } /* No slot information for this device. Might be a firmware bug */ bus_node = map_pci_dev_to_slot(phb, pd->parent); if (!bus_node) return NULL; /* * If this PCI device is mounted on a card the parent "bus" * may actually be a slot or builtin. */ if (list_empty(&bus_node->children)) return bus_node; /* find the device in the parent bus node */ dt_for_each_child(bus_node, child) { u32 vdid; /* "pluggable" and "builtin" without unit addrs are wildcards */ if (!dt_has_node_property(child, "reg", NULL)) { if (wildcard) { prerror("Duplicate wildcard entry! Already have %s, found %s", wildcard->name, child->name); assert(0); } wildcard = child; continue; } /* NB: the pci_device vdid is did,vid rather than vid,did */ vdid = dt_prop_get_cell(child, "reg", 1) << 16 | dt_prop_get_cell(child, "reg", 0); if (vdid == pd->vdid) return child; } if (!wildcard) PCIDBG(phb, pd->bdfn, "Unable to find a slot for device %.4x:%.4x\n", (pd->vdid & 0xffff0000) >> 16, pd->vdid & 0xffff); return wildcard; } struct dt_node *map_pci_dev_to_slot(struct phb *phb, struct pci_device *pd) { uint32_t bdfn = pd ? pd->bdfn : 0; struct dt_node *n; char *path; if (pd && pd->slot && pd->slot->data) return pd->slot->data; PCIDBG(phb, bdfn, "Finding slot\n"); n = __map_pci_dev_to_slot(phb, pd); if (!n) { PCIDBG(phb, bdfn, "No slot found!\n"); } else { path = dt_get_path(n); PCIDBG(phb, bdfn, "Slot found %s\n", path); free(path); } return n; } int __print_slot(struct phb *phb, struct pci_device *pd, void *userdata); int __print_slot(struct phb *phb, struct pci_device *pd, void __unused *userdata) { struct dt_node *node; struct dt_node *pnode; char *c = NULL; u32 phandle = 0; if (!pd) return 0; node = map_pci_dev_to_slot(phb, pd); /* at this point all node associations should be done */ if (pd->dn && dt_has_node_property(pd->dn, "ibm,pcie-slot", NULL)) { phandle = dt_prop_get_u32(pd->dn, "ibm,pcie-slot"); pnode = dt_find_by_phandle(dt_root, phandle); assert(node == pnode); } if (node) c = dt_get_path(node); PCIDBG(phb, pd->bdfn, "Mapped to slot %s (%x)\n", c ? c : "", phandle); free(c); return 0; } skiboot-5.10-rc4/core/pci-iov.c000066400000000000000000000160371324317060200162460ustar00rootroot00000000000000/* Copyright 2013-2016 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include /* * Tackle the VF's MPS in PCIe capability. The field is read only. * This function caches what is written and returns the cached * MPS on read. */ static int64_t pci_iov_vf_devctl(void *dev, struct pci_cfg_reg_filter *pcrf, uint32_t offset, uint32_t len, uint32_t *data, bool write) { struct pci_device *vf = (struct pci_device *)dev; uint32_t pos = pci_cap(vf, PCI_CFG_CAP_ID_EXP, false); uint8_t *pcache; if (offset != (pos + PCICAP_EXP_DEVCTL)) return OPAL_PARTIAL; pcache = &pcrf->data[0]; if (write) { *pcache = ((uint8_t)(*data >> (8 * (4 - len)))) & PCICAP_EXP_DEVCTL_MPS; } else { *data &= ~(PCICAP_EXP_DEVCTL_MPS << (8 * (4 - len))); *data |= (((uint32_t)(*pcache & PCICAP_EXP_DEVCTL_MPS)) << (8 * (4 - len))); } return OPAL_SUCCESS; } static void pci_iov_vf_quirk(struct phb *phb, struct pci_device *vf) { struct pci_cfg_reg_filter *pcrf; uint32_t pos; if (!pci_has_cap(vf, PCI_CFG_CAP_ID_EXP, false)) return; /* * On Mellanox MT27500 Family [ConnectX-3], its VF's MPS field in * the corresponding config register is readonly. The MPS for PF/VF * are usually different. We are introducing a quirk to make them * look same to avoid confusion. */ if (vf->vdid != 0x100315b3) return; pos = pci_cap(vf, PCI_CFG_CAP_ID_EXP, false); pcrf = pci_add_cfg_reg_filter(vf, pos + PCICAP_EXP_DEVCTL, 4, PCI_REG_FLAG_MASK, pci_iov_vf_devctl); if (!pcrf) prlog(PR_WARNING, "%s: Missed DEVCTL filter on %04x:%02x:%02x.%01x\n", __func__, phb->opal_id, (vf->bdfn >> 8), ((vf->bdfn >> 3) & 0x1f), (vf->bdfn & 0x7)); } /* * Update the SRIOV parameters that change when the number of * VFs is configured. */ static bool pci_iov_update_parameters(struct pci_iov *iov) { struct phb *phb = iov->phb; uint16_t bdfn = iov->pd->bdfn; uint32_t pos = iov->pos; uint16_t val; bool enabled; pci_cfg_read16(phb, bdfn, pos + PCIECAP_SRIOV_CTRL, &val); enabled = !!(val & PCIECAP_SRIOV_CTRL_VFE); if (iov->enabled == enabled) return false; if (enabled) { pci_cfg_read16(phb, bdfn, pos + PCIECAP_SRIOV_INITIAL_VF, &iov->init_VFs); pci_cfg_read16(phb, bdfn, pos + PCIECAP_SRIOV_NUM_VF, &iov->num_VFs); pci_cfg_read16(phb, bdfn, pos + PCIECAP_SRIOV_VF_OFFSET, &iov->offset); pci_cfg_read16(phb, bdfn, pos + PCIECAP_SRIOV_VF_STRIDE, &iov->stride); } else { iov->init_VFs = 0; iov->num_VFs = 0; iov->offset = 0; iov->stride = 0; } iov->enabled = enabled; return true; } static int64_t pci_iov_change(void *dev __unused, struct pci_cfg_reg_filter *pcrf, uint32_t offset __unused, uint32_t len __unused, uint32_t *data __unused, bool write __unused) { struct pci_iov *iov = (struct pci_iov *)pcrf->data; struct phb *phb = iov->phb; struct pci_device *pd = iov->pd; struct pci_device *vf, *tmp; uint32_t i; bool changed; /* Update SRIOV variable parameters */ changed = pci_iov_update_parameters(iov); if (!changed) return OPAL_PARTIAL; /* Remove all VFs that have been attached to the parent */ if (!iov->enabled) { list_for_each_safe(&pd->children, vf, tmp, link) list_del(&vf->link); return OPAL_PARTIAL; } /* Initialize the VFs and attach them to parent */ for (changed = false, i = 0; i < iov->num_VFs; i++) { vf = &iov->VFs[i]; vf->bdfn = pd->bdfn + iov->offset + iov->stride * i; list_add_tail(&pd->children, &vf->link); /* * We don't populate the capabilities again if they have * been existing, to save time. Also, we need delay for * 100ms before the VF's config space becomes ready. */ if (!pci_has_cap(vf, PCI_CFG_CAP_ID_EXP, false)) { if (!changed) { changed = !changed; time_wait_ms(100); } pci_init_capabilities(phb, vf); pci_iov_vf_quirk(phb, vf); } /* Call PHB hook */ if (phb->ops->device_init) phb->ops->device_init(phb, pd, NULL); } return OPAL_PARTIAL; } /* * This function is called with disabled SRIOV capability. So the VF's * config address isn't finalized and its config space isn't accessible. */ static void pci_iov_init_VF(struct pci_device *pd, struct pci_device *vf) { vf->is_bridge = false; vf->is_multifunction = false; vf->is_vf = true; vf->dev_type = PCIE_TYPE_ENDPOINT; vf->scan_map = -1; vf->vdid = pd->vdid; vf->sub_vdid = pd->sub_vdid; vf->class = pd->class; vf->dn = NULL; vf->slot = NULL; vf->parent = pd; vf->phb = pd->phb; list_head_init(&vf->pcrf); list_head_init(&vf->children); } static void pci_free_iov_cap(void *data) { struct pci_iov *iov = data; free(iov->VFs); free(iov); } void pci_init_iov_cap(struct phb *phb, struct pci_device *pd) { int64_t pos; struct pci_iov *iov; struct pci_cfg_reg_filter *pcrf; uint32_t i; /* Search for SRIOV capability */ if (!pci_has_cap(pd, PCI_CFG_CAP_ID_EXP, false)) return; pos = pci_find_ecap(phb, pd->bdfn, PCIECAP_ID_SRIOV, NULL); if (pos <= 0) return; /* Allocate IOV */ iov = zalloc(sizeof(*iov)); if (!iov) { prlog(PR_ERR, "%s: Cannot alloc IOV for %04x:%02x:%02x.%01x\n", __func__, phb->opal_id, (pd->bdfn >> 8), ((pd->bdfn >> 3) & 0x1f), (pd->bdfn & 0x7)); return; } /* Allocate VFs */ pci_cfg_read16(phb, pd->bdfn, pos + PCIECAP_SRIOV_TOTAL_VF, &iov->total_VFs); iov->VFs = zalloc(sizeof(*iov->VFs) * iov->total_VFs); if (!iov->VFs) { prlog(PR_ERR, "%s: Cannot alloc %d VFs for %04x:%02x:%02x.%01x\n", __func__, iov->total_VFs, phb->opal_id, (pd->bdfn >> 8), ((pd->bdfn >> 3) & 0x1f), (pd->bdfn & 0x7)); free(iov); return; } /* Initialize VFs */ for (i = 0; i < iov->total_VFs; i++) pci_iov_init_VF(pd, &iov->VFs[i]); /* Register filter for enabling or disabling SRIOV capability */ pcrf = pci_add_cfg_reg_filter(pd, pos + PCIECAP_SRIOV_CTRL, 2, PCI_REG_FLAG_WRITE, pci_iov_change); if (!pcrf) { prlog(PR_ERR, "%s: Cannot set filter on %04x:%02x:%02x.%01x\n", __func__, phb->opal_id, (pd->bdfn >> 8), ((pd->bdfn >> 3) & 0x1f), (pd->bdfn & 0x7)); free(iov->VFs); free(iov); return; } /* Associate filter and IOV capability */ pcrf->data = (void *)iov; /* * Retrieve the number of VFs and other information if applicable. * Register the SRIOV capability in the mean while. */ iov->phb = phb; iov->pd = pd; iov->pos = pos; iov->enabled = false; pci_iov_update_parameters(iov); pci_set_cap(pd, PCIECAP_ID_SRIOV, pos, iov, pci_free_iov_cap, true); } skiboot-5.10-rc4/core/pci-opal.c000066400000000000000000000637571324317060200164170ustar00rootroot00000000000000/* Copyright 2013-2014 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #define OPAL_PCICFG_ACCESS_READ(op, cb, type) \ static int64_t opal_pci_config_##op(uint64_t phb_id, \ uint64_t bus_dev_func, \ uint64_t offset, type data) \ { \ struct phb *phb = pci_get_phb(phb_id); \ int64_t rc; \ \ if (!opal_addr_valid((void *)data)) \ return OPAL_PARAMETER; \ \ if (!phb) \ return OPAL_PARAMETER; \ phb_lock(phb); \ rc = phb->ops->cfg_##cb(phb, bus_dev_func, offset, data); \ phb_unlock(phb); \ \ return rc; \ } #define OPAL_PCICFG_ACCESS_WRITE(op, cb, type) \ static int64_t opal_pci_config_##op(uint64_t phb_id, \ uint64_t bus_dev_func, \ uint64_t offset, type data) \ { \ struct phb *phb = pci_get_phb(phb_id); \ int64_t rc; \ \ if (!phb) \ return OPAL_PARAMETER; \ phb_lock(phb); \ rc = phb->ops->cfg_##cb(phb, bus_dev_func, offset, data); \ phb_unlock(phb); \ \ return rc; \ } OPAL_PCICFG_ACCESS_READ(read_byte, read8, uint8_t *) OPAL_PCICFG_ACCESS_READ(read_half_word, read16, uint16_t *) OPAL_PCICFG_ACCESS_READ(read_word, read32, uint32_t *) OPAL_PCICFG_ACCESS_WRITE(write_byte, write8, uint8_t) OPAL_PCICFG_ACCESS_WRITE(write_half_word, write16, uint16_t) OPAL_PCICFG_ACCESS_WRITE(write_word, write32, uint32_t) opal_call(OPAL_PCI_CONFIG_READ_BYTE, opal_pci_config_read_byte, 4); opal_call(OPAL_PCI_CONFIG_READ_HALF_WORD, opal_pci_config_read_half_word, 4); opal_call(OPAL_PCI_CONFIG_READ_WORD, opal_pci_config_read_word, 4); opal_call(OPAL_PCI_CONFIG_WRITE_BYTE, opal_pci_config_write_byte, 4); opal_call(OPAL_PCI_CONFIG_WRITE_HALF_WORD, opal_pci_config_write_half_word, 4); opal_call(OPAL_PCI_CONFIG_WRITE_WORD, opal_pci_config_write_word, 4); static struct lock opal_eeh_evt_lock = LOCK_UNLOCKED; static uint64_t opal_eeh_evt = 0; void opal_pci_eeh_set_evt(uint64_t phb_id) { lock(&opal_eeh_evt_lock); opal_eeh_evt |= 1ULL << phb_id; opal_update_pending_evt(OPAL_EVENT_PCI_ERROR, OPAL_EVENT_PCI_ERROR); unlock(&opal_eeh_evt_lock); } void opal_pci_eeh_clear_evt(uint64_t phb_id) { lock(&opal_eeh_evt_lock); opal_eeh_evt &= ~(1ULL << phb_id); if (!opal_eeh_evt) opal_update_pending_evt(OPAL_EVENT_PCI_ERROR, 0); unlock(&opal_eeh_evt_lock); } static int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number, uint8_t *freeze_state, uint16_t *pci_error_type, uint64_t *phb_status) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!opal_addr_valid(freeze_state) || !opal_addr_valid(pci_error_type) || !opal_addr_valid(phb_status)) return OPAL_PARAMETER; if (!phb) return OPAL_PARAMETER; if (!phb->ops->eeh_freeze_status) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->eeh_freeze_status(phb, pe_number, freeze_state, pci_error_type, NULL, phb_status); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_EEH_FREEZE_STATUS, opal_pci_eeh_freeze_status, 5); static int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number, uint64_t eeh_action_token) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->eeh_freeze_clear) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->eeh_freeze_clear(phb, pe_number, eeh_action_token); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_EEH_FREEZE_CLEAR, opal_pci_eeh_freeze_clear, 3); static int64_t opal_pci_eeh_freeze_set(uint64_t phb_id, uint64_t pe_number, uint64_t eeh_action_token) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->eeh_freeze_set) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->eeh_freeze_set(phb, pe_number, eeh_action_token); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_EEH_FREEZE_SET, opal_pci_eeh_freeze_set, 3); static int64_t opal_pci_err_inject(uint64_t phb_id, uint64_t pe_number, uint32_t type, uint32_t func, uint64_t addr, uint64_t mask) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops || !phb->ops->err_inject) return OPAL_UNSUPPORTED; if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) return OPAL_PARAMETER; phb_lock(phb); rc = phb->ops->err_inject(phb, pe_number, type, func, addr, mask); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_ERR_INJECT, opal_pci_err_inject, 6); static int64_t opal_pci_phb_mmio_enable(uint64_t phb_id, uint16_t window_type, uint16_t window_num, uint16_t enable) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->phb_mmio_enable) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->phb_mmio_enable(phb, window_type, window_num, enable); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_PHB_MMIO_ENABLE, opal_pci_phb_mmio_enable, 4); static int64_t opal_pci_set_phb_mem_window(uint64_t phb_id, uint16_t window_type, uint16_t window_num, uint64_t addr, uint64_t pci_addr, uint64_t size) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->set_phb_mem_window) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->set_phb_mem_window(phb, window_type, window_num, addr, pci_addr, size); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_SET_PHB_MEM_WINDOW, opal_pci_set_phb_mem_window, 6); static int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint64_t pe_number, uint16_t window_type, uint16_t window_num, uint16_t segment_num) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->map_pe_mmio_window) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->map_pe_mmio_window(phb, pe_number, window_type, window_num, segment_num); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_MAP_PE_MMIO_WINDOW, opal_pci_map_pe_mmio_window, 5); static int64_t opal_pci_set_phb_table_memory(uint64_t phb_id __unused, uint64_t rtt_addr __unused, uint64_t ivt_addr __unused, uint64_t ivt_len __unused, uint64_t rej_array_addr __unused, uint64_t peltv_addr __unused) { /* IODA2 (P8) stuff, TODO */ return OPAL_UNSUPPORTED; } opal_call(OPAL_PCI_SET_PHB_TABLE_MEMORY, opal_pci_set_phb_table_memory, 6); static int64_t opal_pci_set_pe(uint64_t phb_id, uint64_t pe_number, uint64_t bus_dev_func, uint8_t bus_compare, uint8_t dev_compare, uint8_t func_compare, uint8_t pe_action) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->set_pe) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->set_pe(phb, pe_number, bus_dev_func, bus_compare, dev_compare, func_compare, pe_action); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_SET_PE, opal_pci_set_pe, 7); static int64_t opal_pci_set_peltv(uint64_t phb_id, uint32_t parent_pe, uint32_t child_pe, uint8_t state) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->set_peltv) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->set_peltv(phb, parent_pe, child_pe, state); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_SET_PELTV, opal_pci_set_peltv, 4); static int64_t opal_pci_set_mve(uint64_t phb_id, uint32_t mve_number, uint64_t pe_number) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->set_mve) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->set_mve(phb, mve_number, pe_number); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_SET_MVE, opal_pci_set_mve, 3); static int64_t opal_pci_set_mve_enable(uint64_t phb_id, uint32_t mve_number, uint32_t state) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->set_mve_enable) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->set_mve_enable(phb, mve_number, state); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_SET_MVE_ENABLE, opal_pci_set_mve_enable, 3); static int64_t opal_pci_get_xive_reissue(uint64_t phb_id __unused, uint32_t xive_number __unused, uint8_t *p_bit __unused, uint8_t *q_bit __unused) { /* IODA2 (P8) stuff, TODO */ return OPAL_UNSUPPORTED; } opal_call(OPAL_PCI_GET_XIVE_REISSUE, opal_pci_get_xive_reissue, 4); static int64_t opal_pci_set_xive_reissue(uint64_t phb_id __unused, uint32_t xive_number __unused, uint8_t p_bit __unused, uint8_t q_bit __unused) { /* IODA2 (P8) stuff, TODO */ return OPAL_UNSUPPORTED; } opal_call(OPAL_PCI_SET_XIVE_REISSUE, opal_pci_set_xive_reissue, 4); static int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hwirq) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->pci_msi_eoi) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->pci_msi_eoi(phb, hwirq); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_MSI_EOI, opal_pci_msi_eoi, 2); static int64_t opal_pci_tce_kill(uint64_t phb_id, uint32_t kill_type, uint64_t pe_number, uint32_t tce_size, uint64_t dma_addr, uint32_t npages) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->tce_kill) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->tce_kill(phb, kill_type, pe_number, tce_size, dma_addr, npages); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_TCE_KILL, opal_pci_tce_kill, 6); static int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint64_t pe_number, uint32_t xive_num) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->set_xive_pe) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->set_xive_pe(phb, pe_number, xive_num); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_SET_XIVE_PE, opal_pci_set_xive_pe, 3); static int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num, int32_t *interrupt_source_number) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!opal_addr_valid(interrupt_source_number)) return OPAL_PARAMETER; if (!phb) return OPAL_PARAMETER; if (!phb->ops->get_xive_source) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->get_xive_source(phb, xive_num, interrupt_source_number); phb_unlock(phb); return rc; } opal_call(OPAL_GET_XIVE_SOURCE, opal_get_xive_source, 3); static int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num, uint8_t msi_range, uint32_t *msi_address, uint32_t *message_data) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!opal_addr_valid(msi_address) || !opal_addr_valid(message_data)) return OPAL_PARAMETER; if (!phb) return OPAL_PARAMETER; if (!phb->ops->get_msi_32) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->get_msi_32(phb, mve_number, xive_num, msi_range, msi_address, message_data); phb_unlock(phb); return rc; } opal_call(OPAL_GET_MSI_32, opal_get_msi_32, 6); static int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num, uint8_t msi_range, uint64_t *msi_address, uint32_t *message_data) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!opal_addr_valid(msi_address) || !opal_addr_valid(message_data)) return OPAL_PARAMETER; if (!phb) return OPAL_PARAMETER; if (!phb->ops->get_msi_64) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->get_msi_64(phb, mve_number, xive_num, msi_range, msi_address, message_data); phb_unlock(phb); return rc; } opal_call(OPAL_GET_MSI_64, opal_get_msi_64, 6); static int64_t opal_pci_map_pe_dma_window(uint64_t phb_id, uint64_t pe_number, uint16_t window_id, uint16_t tce_levels, uint64_t tce_table_addr, uint64_t tce_table_size, uint64_t tce_page_size) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->map_pe_dma_window) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->map_pe_dma_window(phb, pe_number, window_id, tce_levels, tce_table_addr, tce_table_size, tce_page_size); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_MAP_PE_DMA_WINDOW, opal_pci_map_pe_dma_window, 7); static int64_t opal_pci_map_pe_dma_window_real(uint64_t phb_id, uint64_t pe_number, uint16_t window_id, uint64_t pci_start_addr, uint64_t pci_mem_size) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->map_pe_dma_window_real) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->map_pe_dma_window_real(phb, pe_number, window_id, pci_start_addr, pci_mem_size); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_MAP_PE_DMA_WINDOW_REAL, opal_pci_map_pe_dma_window_real, 5); static int64_t opal_pci_reset(uint64_t id, uint8_t reset_scope, uint8_t assert_state) { struct pci_slot *slot = pci_slot_find(id); struct phb *phb = slot ? slot->phb : NULL; int64_t rc = OPAL_SUCCESS; if (!slot || !phb) return OPAL_PARAMETER; if (assert_state != OPAL_ASSERT_RESET && assert_state != OPAL_DEASSERT_RESET) return OPAL_PARAMETER; phb_lock(phb); switch(reset_scope) { case OPAL_RESET_PHB_COMPLETE: /* Complete reset is applicable to PHB slot only */ if (!slot->ops.creset || slot->pd) { rc = OPAL_UNSUPPORTED; break; } if (assert_state != OPAL_ASSERT_RESET) break; rc = slot->ops.creset(slot); if (rc < 0) prlog(PR_ERR, "SLOT-%016llx: Error %lld on complete reset\n", slot->id, rc); break; case OPAL_RESET_PCI_FUNDAMENTAL: if (!slot->ops.freset) { rc = OPAL_UNSUPPORTED; break; } /* We need do nothing on deassert time */ if (assert_state != OPAL_ASSERT_RESET) break; rc = slot->ops.freset(slot); if (rc < 0) prlog(PR_ERR, "SLOT-%016llx: Error %lld on fundamental reset\n", slot->id, rc); break; case OPAL_RESET_PCI_HOT: if (!slot->ops.hreset) { rc = OPAL_UNSUPPORTED; break; } /* We need do nothing on deassert time */ if (assert_state != OPAL_ASSERT_RESET) break; rc = slot->ops.hreset(slot); if (rc < 0) prlog(PR_ERR, "SLOT-%016llx: Error %lld on hot reset\n", slot->id, rc); break; case OPAL_RESET_PCI_IODA_TABLE: /* It's allowed on PHB slot only */ if (slot->pd || !phb->ops || !phb->ops->ioda_reset) { rc = OPAL_UNSUPPORTED; break; } if (assert_state != OPAL_ASSERT_RESET) break; rc = phb->ops->ioda_reset(phb, true); break; case OPAL_RESET_PHB_ERROR: /* It's allowed on PHB slot only */ if (slot->pd || !phb->ops || !phb->ops->papr_errinjct_reset) { rc = OPAL_UNSUPPORTED; break; } if (assert_state != OPAL_ASSERT_RESET) break; rc = phb->ops->papr_errinjct_reset(phb); break; default: rc = OPAL_UNSUPPORTED; } phb_unlock(phb); return (rc > 0) ? tb_to_msecs(rc) : rc; } opal_call(OPAL_PCI_RESET, opal_pci_reset, 3); static int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops || !phb->ops->pci_reinit) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->pci_reinit(phb, reinit_scope, data); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_REINIT, opal_pci_reinit, 3); static int64_t opal_pci_poll(uint64_t id) { struct pci_slot *slot = pci_slot_find(id); struct phb *phb = slot ? slot->phb : NULL; int64_t rc; if (!slot || !phb) return OPAL_PARAMETER; if (!slot->ops.run_sm) return OPAL_UNSUPPORTED; phb_lock(phb); rc = slot->ops.run_sm(slot); phb_unlock(phb); /* Return milliseconds for caller to sleep: round up */ if (rc > 0) { rc = tb_to_msecs(rc); if (rc == 0) rc = 1; } return rc; } opal_call(OPAL_PCI_POLL, opal_pci_poll, 1); static int64_t opal_pci_get_presence_state(uint64_t id, uint64_t data) { struct pci_slot *slot = pci_slot_find(id); struct phb *phb = slot ? slot->phb : NULL; uint8_t *presence = (uint8_t *)data; int64_t rc; if (!opal_addr_valid(presence)) return OPAL_PARAMETER; if (!slot || !phb) return OPAL_PARAMETER; if (!slot->ops.get_presence_state) return OPAL_UNSUPPORTED; phb_lock(phb); rc = slot->ops.get_presence_state(slot, presence); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_GET_PRESENCE_STATE, opal_pci_get_presence_state, 2); static int64_t opal_pci_get_power_state(uint64_t id, uint64_t data) { struct pci_slot *slot = pci_slot_find(id); struct phb *phb = slot ? slot->phb : NULL; uint8_t *power_state = (uint8_t *)data; int64_t rc; if (!opal_addr_valid(power_state)) return OPAL_PARAMETER; if (!slot || !phb) return OPAL_PARAMETER; if (!slot->ops.get_power_state) return OPAL_UNSUPPORTED; phb_lock(phb); rc = slot->ops.get_power_state(slot, power_state); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_GET_POWER_STATE, opal_pci_get_power_state, 2); static void set_power_timer(struct timer *t __unused, void *data, uint64_t now __unused) { struct pci_slot *slot = data; struct phb *phb = slot->phb; struct pci_device *pd = slot->pd; struct dt_node *dn = pd->dn; uint8_t link; switch (slot->state) { case PCI_SLOT_STATE_SPOWER_START: if (slot->retries-- == 0) { pci_slot_set_state(slot, PCI_SLOT_STATE_NORMAL); opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, slot->async_token, dn->phandle, slot->power_state, OPAL_BUSY); } else { schedule_timer(&slot->timer, msecs_to_tb(10)); } break; case PCI_SLOT_STATE_SPOWER_DONE: if (slot->power_state == OPAL_PCI_SLOT_POWER_OFF) { pci_remove_bus(phb, &pd->children); pci_slot_set_state(slot, PCI_SLOT_STATE_NORMAL); opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, slot->async_token, dn->phandle, OPAL_PCI_SLOT_POWER_OFF, OPAL_SUCCESS); break; } /* Power on */ if (slot->ops.get_link_state(slot, &link) != OPAL_SUCCESS) link = 0; if (link) { slot->ops.prepare_link_change(slot, true); pci_scan_bus(phb, pd->secondary_bus, pd->subordinate_bus, &pd->children, pd, true); pci_add_device_nodes(phb, &pd->children, dn, &phb->lstate, 0); pci_slot_set_state(slot, PCI_SLOT_STATE_NORMAL); opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, slot->async_token, dn->phandle, OPAL_PCI_SLOT_POWER_ON, OPAL_SUCCESS); } else if (slot->retries-- == 0) { pci_slot_set_state(slot, PCI_SLOT_STATE_NORMAL); opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, slot->async_token, dn->phandle, OPAL_PCI_SLOT_POWER_ON, OPAL_BUSY); } else { schedule_timer(&slot->timer, msecs_to_tb(10)); } break; default: prlog(PR_ERR, "PCI SLOT %016llx: Unexpected state 0x%08x\n", slot->id, slot->state); } } static int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id, uint64_t data) { struct pci_slot *slot = pci_slot_find(id); struct phb *phb = slot ? slot->phb : NULL; struct pci_device *pd = slot ? slot->pd : NULL; uint8_t *state = (uint8_t *)data; int64_t rc; if (!slot || !phb) return OPAL_PARAMETER; if (!opal_addr_valid(state)) return OPAL_PARAMETER; phb_lock(phb); switch (*state) { case OPAL_PCI_SLOT_POWER_OFF: if (!slot->ops.prepare_link_change || !slot->ops.set_power_state) return OPAL_UNSUPPORTED; slot->async_token = async_token; slot->ops.prepare_link_change(slot, false); rc = slot->ops.set_power_state(slot, PCI_SLOT_POWER_OFF); break; case OPAL_PCI_SLOT_POWER_ON: if (!slot->ops.set_power_state || !slot->ops.get_link_state) return OPAL_UNSUPPORTED; slot->async_token = async_token; rc = slot->ops.set_power_state(slot, PCI_SLOT_POWER_ON); break; case OPAL_PCI_SLOT_OFFLINE: if (!pd) return OPAL_PARAMETER; pci_remove_bus(phb, &pd->children); phb_unlock(phb); return OPAL_SUCCESS; case OPAL_PCI_SLOT_ONLINE: if (!pd) return OPAL_PARAMETER; pci_scan_bus(phb, pd->secondary_bus, pd->subordinate_bus, &pd->children, pd, true); pci_add_device_nodes(phb, &pd->children, pd->dn, &phb->lstate, 0); phb_unlock(phb); return OPAL_SUCCESS; default: rc = OPAL_PARAMETER; } /* * OPAL_ASYNC_COMPLETION is returned when delay is needed to change * the power state in the backend. When it can be finished without * delay, OPAL_SUCCESS is returned. The PCI topology needs to be * updated in both cases. */ if (rc == OPAL_ASYNC_COMPLETION) { slot->retries = 500; init_timer(&slot->timer, set_power_timer, slot); schedule_timer(&slot->timer, msecs_to_tb(10)); } else if (rc == OPAL_SUCCESS) { if (*state == OPAL_PCI_SLOT_POWER_OFF) { pci_remove_bus(phb, &pd->children); } else { slot->ops.prepare_link_change(slot, true); pci_scan_bus(phb, pd->secondary_bus, pd->subordinate_bus, &pd->children, pd, true); pci_add_device_nodes(phb, &pd->children, pd->dn, &phb->lstate, 0); } } phb_unlock(phb); return rc; } opal_call(OPAL_PCI_SET_POWER_STATE, opal_pci_set_power_state, 3); static int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr, uint64_t tce_mem_size) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->set_phb_tce_memory) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->set_phb_tce_memory(phb, tce_mem_addr, tce_mem_size); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_SET_PHB_TCE_MEMORY, opal_pci_set_phb_tce_memory, 3); static int64_t opal_pci_get_phb_diag_data(uint64_t phb_id, void *diag_buffer, uint64_t diag_buffer_len) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!opal_addr_valid(diag_buffer)) return OPAL_PARAMETER; if (!phb) return OPAL_PARAMETER; if (!phb->ops->get_diag_data) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->get_diag_data(phb, diag_buffer, diag_buffer_len); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_GET_PHB_DIAG_DATA, opal_pci_get_phb_diag_data, 3); static int64_t opal_pci_get_phb_diag_data2(uint64_t phb_id, void *diag_buffer, uint64_t diag_buffer_len) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!opal_addr_valid(diag_buffer)) return OPAL_PARAMETER; if (!phb) return OPAL_PARAMETER; if (!phb->ops->get_diag_data2) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->get_diag_data2(phb, diag_buffer, diag_buffer_len); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_GET_PHB_DIAG_DATA2, opal_pci_get_phb_diag_data2, 3); static int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, uint16_t *pci_error_type, uint16_t *severity) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!opal_addr_valid(first_frozen_pe) || !opal_addr_valid(pci_error_type) || !opal_addr_valid(severity)) return OPAL_PARAMETER; if (!phb) return OPAL_PARAMETER; if (!phb->ops->next_error) return OPAL_UNSUPPORTED; phb_lock(phb); opal_pci_eeh_clear_evt(phb_id); rc = phb->ops->next_error(phb, first_frozen_pe, pci_error_type, severity); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_NEXT_ERROR, opal_pci_next_error, 4); static int64_t opal_pci_eeh_freeze_status2(uint64_t phb_id, uint64_t pe_number, uint8_t *freeze_state, uint16_t *pci_error_type, uint16_t *severity, uint64_t *phb_status) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!opal_addr_valid(freeze_state) || !opal_addr_valid(pci_error_type) || !opal_addr_valid(severity) || !opal_addr_valid(phb_status)) return OPAL_PARAMETER; if (!phb) return OPAL_PARAMETER; if (!phb->ops->eeh_freeze_status) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->eeh_freeze_status(phb, pe_number, freeze_state, pci_error_type, severity, phb_status); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_EEH_FREEZE_STATUS2, opal_pci_eeh_freeze_status2, 6); static int64_t opal_pci_set_phb_capi_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number) { struct phb *phb = pci_get_phb(phb_id); int64_t rc; if (!phb) return OPAL_PARAMETER; if (!phb->ops->set_capi_mode) return OPAL_UNSUPPORTED; phb_lock(phb); rc = phb->ops->set_capi_mode(phb, mode, pe_number); phb_unlock(phb); return rc; } opal_call(OPAL_PCI_SET_PHB_CAPI_MODE, opal_pci_set_phb_capi_mode, 3); static int64_t opal_pci_set_p2p(uint64_t phbid_init, uint64_t phbid_target, uint64_t desc, uint16_t pe_number) { struct phb *phb_init = pci_get_phb(phbid_init); struct phb *phb_target = pci_get_phb(phbid_target); if (!phb_init || !phb_target) return OPAL_PARAMETER; /* * Having the 2 devices under the same PHB may require tuning * the configuration of intermediate switch(es), more easily * done from linux. And it shouldn't require a PHB config * change. * Return an error for the time being. */ if (phb_init == phb_target) return OPAL_UNSUPPORTED; if (!phb_init->ops->set_p2p || !phb_target->ops->set_p2p) return OPAL_UNSUPPORTED; /* * Loads would be supported on p9 if the 2 devices are under * the same PHB, but we ruled it out above. */ if (desc & OPAL_PCI_P2P_LOAD) return OPAL_UNSUPPORTED; phb_lock(phb_init); phb_init->ops->set_p2p(phb_init, OPAL_PCI_P2P_INITIATOR, desc, pe_number); phb_unlock(phb_init); phb_lock(phb_target); phb_target->ops->set_p2p(phb_target, OPAL_PCI_P2P_TARGET, desc, pe_number); phb_unlock(phb_target); return OPAL_SUCCESS; } opal_call(OPAL_PCI_SET_P2P, opal_pci_set_p2p, 4); skiboot-5.10-rc4/core/pci-quirk.c000066400000000000000000000057101324317060200166000ustar00rootroot00000000000000/* Copyright 2017 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include static void quirk_astbmc_vga(struct phb *phb __unused, struct pci_device *pd) { struct dt_node *np = pd->dn; uint32_t revision, mcr_configuration, mcr_scu_mpll, mcr_scu_strap; /* * These accesses will only work if the BMC address 0x1E6E2180 is set * to 0x7B, which is its default state on current systems. In future, * for security purposes it is proposed to configure this register to * disallow accesses from the host, and provide the properties that * the Linux ast VGA driver used through the device tree instead. * Here we set those properties so we can test how things would work * if the window into BMC memory was closed. * * If both the petitboot kernel and the host kernel have an ast driver * that reads properties from the device tree, setting 0x1E6E2180 to * 0x79 will disable the backdoor into BMC memory and the only way the * ast driver can operate is using the device tree properties. */ revision = ast_ahb_readl(SCU_REVISION_ID); mcr_configuration = ast_ahb_readl(MCR_CONFIGURATION); mcr_scu_mpll = ast_ahb_readl(MCR_SCU_MPLL); mcr_scu_strap = ast_ahb_readl(MCR_SCU_STRAP); dt_add_property_cells(np, "aspeed,scu-revision-id", revision); dt_add_property_cells(np, "aspeed,mcr-configuration", mcr_configuration); dt_add_property_cells(np, "aspeed,mcr-scu-mpll", mcr_scu_mpll); dt_add_property_cells(np, "aspeed,mcr-scu-strap", mcr_scu_strap); /* * if * - the petitboot kernel supports an ast driver that uses DT * - every host kernel supports an ast driver that uses DT * - the host can't flash unsigned skiboots * * then enabling the line below will allow the host and the BMC to be * securely isolated from each other, without changing what's running * on the BMC. */ /* ast_ahb_writel(0x79, 0x1E6E2180); */ } /* Quirks are: {fixup function, vendor ID, (device ID or PCI_ANY_ID)} */ static const struct pci_quirk quirk_table[] = { /* ASPEED 2400 VGA device */ { &quirk_astbmc_vga, 0x1a03, 0x2000 }, {NULL} }; void pci_handle_quirk(struct phb *phb, struct pci_device *pd) { const struct pci_quirk *quirks = quirk_table; while (quirks->vendor_id) { if (quirks->vendor_id == PCI_VENDOR_ID(pd->vdid) && (quirks->device_id == PCI_ANY_ID || quirks->device_id == PCI_DEVICE_ID(pd->vdid))) quirks->fixup(phb, pd); quirks++; } } skiboot-5.10-rc4/core/pci-slot.c000066400000000000000000000147601324317060200164330ustar00rootroot00000000000000/* Copyright 2013-2016 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include /* Debugging options */ #define PCI_SLOT_PREFIX "PCI-SLOT-%016llx " #define PCI_SLOT_DBG(s, fmt, a...) \ prlog(PR_DEBUG, PCI_SLOT_PREFIX fmt, (s)->id, ##a) static void pci_slot_prepare_link_change(struct pci_slot *slot, bool up) { struct phb *phb = slot->phb; struct pci_device *pd = slot->pd; uint32_t aercap, mask; /* * Mask the link down and receiver error before the link becomes * down. Otherwise, unmask the errors when the link is up. */ if (pci_has_cap(pd, PCIECAP_ID_AER, true)) { aercap = pci_cap(pd, PCIECAP_ID_AER, true); /* Mask link surprise down event. The event is always * masked when the associated PCI slot supports PCI * surprise hotplug. We needn't toggle it when the link * bounces caused by reset and just keep it always masked. */ if (!pd->slot || !pd->slot->surprise_pluggable) { pci_cfg_read32(phb, pd->bdfn, aercap + PCIECAP_AER_UE_MASK, &mask); if (up) mask &= ~PCIECAP_AER_UE_MASK_SURPRISE_DOWN; else mask |= PCIECAP_AER_UE_MASK_SURPRISE_DOWN; pci_cfg_write32(phb, pd->bdfn, aercap + PCIECAP_AER_UE_MASK, mask); } /* Receiver error */ pci_cfg_read32(phb, pd->bdfn, aercap + PCIECAP_AER_CE_MASK, &mask); if (up) mask &= ~PCIECAP_AER_CE_RECVR_ERR; else mask |= PCIECAP_AER_CE_RECVR_ERR; pci_cfg_write32(phb, pd->bdfn, aercap + PCIECAP_AER_CE_MASK, mask); } /* * We're coming back from reset. We need restore bus ranges * and reinitialize the affected bridges and devices. */ if (up) { pci_restore_bridge_buses(phb, pd); if (phb->ops->device_init) pci_walk_dev(phb, pd, phb->ops->device_init, NULL); } } static int64_t pci_slot_run_sm(struct pci_slot *slot) { uint64_t now = mftb(); int64_t ret; /* Return remaining timeout if we're still waiting */ if (slot->delay_tgt_tb && tb_compare(now, slot->delay_tgt_tb) == TB_ABEFOREB) return slot->delay_tgt_tb - now; slot->delay_tgt_tb = 0; switch (slot->state & PCI_SLOT_STATE_MASK) { case PCI_SLOT_STATE_LINK: ret = slot->ops.poll_link(slot); break; case PCI_SLOT_STATE_HRESET: ret = slot->ops.hreset(slot); break; case PCI_SLOT_STATE_FRESET: ret = slot->ops.freset(slot); break; case PCI_SLOT_STATE_CRESET: ret = slot->ops.creset(slot); break; default: prlog(PR_ERR, PCI_SLOT_PREFIX "Invalid state %08x\n", slot->id, slot->state); pci_slot_set_state(slot, PCI_SLOT_STATE_NORMAL); return OPAL_HARDWARE; } return ret; } void pci_slot_add_dt_properties(struct pci_slot *slot, struct dt_node *np) { /* Bail without device node */ if (!np) return; dt_add_property_cells(np, "ibm,reset-by-firmware", 1); dt_add_property_cells(np, "ibm,slot-pluggable", slot->pluggable); dt_add_property_cells(np, "ibm,slot-surprise-pluggable", slot->surprise_pluggable); if (pci_slot_has_flags(slot, PCI_SLOT_FLAG_BROKEN_PDC)) dt_add_property_cells(np, "ibm,slot-broken-pdc", 1); dt_add_property_cells(np, "ibm,slot-power-ctl", slot->power_ctl); dt_add_property_cells(np, "ibm,slot-power-led-ctlled", slot->power_led_ctl); dt_add_property_cells(np, "ibm,slot-attn-led", slot->attn_led_ctl); dt_add_property_cells(np, "ibm,slot-connector-type", slot->connector_type); dt_add_property_cells(np, "ibm,slot-card-desc", slot->card_desc); dt_add_property_cells(np, "ibm,slot-card-mech", slot->card_mech); dt_add_property_cells(np, "ibm,slot-wired-lanes", slot->wired_lanes); if (slot->ops.add_properties) slot->ops.add_properties(slot, np); } struct pci_slot *pci_slot_alloc(struct phb *phb, struct pci_device *pd) { struct pci_slot *slot = NULL; /* * The function can be used to allocate either PHB slot or normal * one. For both cases, the @phb should be always valid. */ if (!phb) return NULL; /* * When @pd is NULL, we're going to create a PHB slot. Otherwise, * a normal slot will be created. Check if the specified slot * already exists or not. */ slot = pd ? pd->slot : phb->slot; if (slot) { prlog(PR_ERR, PCI_SLOT_PREFIX "Already exists\n", slot->id); return slot; } /* Allocate memory chunk */ slot = zalloc(sizeof(struct pci_slot)); if (!slot) { prlog(PR_ERR, "%s: Out of memory\n", __func__); return NULL; } /* * The polling function sholdn't be overridden by individual * platforms */ slot->phb = phb; slot->pd = pd; pci_slot_set_state(slot, PCI_SLOT_STATE_NORMAL); slot->power_state = PCI_SLOT_POWER_ON; slot->ops.run_sm = pci_slot_run_sm; slot->ops.prepare_link_change = pci_slot_prepare_link_change; slot->peer_slot = NULL; if (!pd) { slot->id = PCI_PHB_SLOT_ID(phb); phb->slot = slot; } else { slot->id = PCI_SLOT_ID(phb, pd->bdfn); pd->slot = slot; } return slot; } struct pci_slot *pci_slot_find(uint64_t id) { struct phb *phb; struct pci_device *pd; struct pci_slot *slot; uint64_t index; uint16_t bdfn; index = PCI_SLOT_PHB_INDEX(id); phb = pci_get_phb(index); /* PHB slot */ if (!(id & PCI_SLOT_ID_PREFIX)) { slot = phb ? phb->slot : NULL; return slot; } /* Normal PCI slot */ bdfn = PCI_SLOT_BDFN(id); pd = phb ? pci_find_dev(phb, bdfn) : NULL; slot = pd ? pd->slot : NULL; return slot; } void pci_slot_add_loc(struct pci_slot *slot, struct dt_node *np, const char *label) { char tmp[8], loc_code[LOC_CODE_SIZE]; struct pci_device *pd = slot->pd; struct phb *phb = slot->phb; if (!np) return; /* didn't get a real slot label? generate one! */ if (!label) { snprintf(tmp, sizeof(tmp), "S%04x%02x", phb->opal_id, pd->secondary_bus); label = tmp; } /* Make a -