pax_global_header00006660000000000000000000000064150307130720014510gustar00rootroot0000000000000052 comment=e6f86fa718d40f2709b48ed57dc775b25bbd7bb5 ipu7-drivers-0~git202507010803.e6f86fa7/000077500000000000000000000000001503071307200170425ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/.github/000077500000000000000000000000001503071307200204025ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/.github/workflows/000077500000000000000000000000001503071307200224375ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/.github/workflows/test.yml000066400000000000000000000067101503071307200241450ustar00rootroot00000000000000name: test on: push: pull_request: schedule: - cron: '30 1 * * *' #every day at 1:30am permissions: {} jobs: Ubuntu-2404-dkms: runs-on: ubuntu-latest container: ubuntu:24.04 steps: - name: Checkout uses: actions/checkout@v4 - name: Prepare environment shell: bash run: | sed -i 's/noble-updates/noble-updates noble-proposed/' /etc/apt/sources.list.d/ubuntu.sources apt-get update --quiet; apt-get install --yes --no-install-recommends dkms gpg-agent kmod software-properties-common sudo - name: Download header files shell: bash run: | # latest generic kernel headers apt-get install --yes linux-headers-generic linux-headers-generic-hwe-24.04-edge # latest oem kernel apt-get install --yes linux-headers-oem-24.04a linux-headers-oem-24.04b - name: Register with dkms shell: bash run: | dkms add . - name: Compile driver shell: bash run: | # run dkms build and all available kernel headers failed="" succeeded="" for kver in /lib/modules/*/build; do # skip the kernel headers from the azure kernel. These kernel headers # are preinstalled and of no interest if [[ "$kver" == *"azure"* ]]; then echo "Skipping $kver - This is the kernel of the github runner."; continue; fi; test -d $kver || continue kver=${kver%/build} kver=${kver##*/} echo "=== Testing ${kver} ==="; echo "running: dkms build -m ipu7-drivers/0.0.0 -k ${kver}"; ret=$(sudo dkms build -m ipu7-drivers/0.0.0 -k ${kver} >&2; echo $?); if [ ${ret} -eq 0 ]; then succeeded="${succeeded} ${kver}" else echo "#### Skipped unexpected failure ${kver}"; failed="${failed} ${kver}"; fi; done if [ "x${failed}" != "x" ]; then echo "#### Failed kernels: ${failed}"; exit 1 fi echo "#### Successful builds for kernels: ${succeeded}"; ipu7-drivers-0~git202507010803.e6f86fa7/CODE_OF_CONDUCT.md000066400000000000000000000124761503071307200216530ustar00rootroot00000000000000# Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at CommunityCodeOfConduct AT intel DOT com. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq ipu7-drivers-0~git202507010803.e6f86fa7/CONTRIBUTING.md000066400000000000000000000043421503071307200212760ustar00rootroot00000000000000# Contributing ### License ipu7-drivers is licensed under the terms in [GPL-2.0](LICENSE). By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms. ### Sign your work Please use the sign-off line at the end of the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` Then you just add a line to every git commit message: Signed-off-by: Joe Smith Use your real name (sorry, no pseudonyms or anonymous contributions.) If you set your `user.name` and `user.email` git configs, you can sign your commit automatically with `git commit -s`. ipu7-drivers-0~git202507010803.e6f86fa7/LICENSE000066400000000000000000000431341503071307200200540ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. ipu7-drivers-0~git202507010803.e6f86fa7/Makefile000066400000000000000000000012151503071307200205010ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2022 Intel Corporation. KERNELRELEASE ?= $(shell uname -r) KERNEL_SRC ?= /lib/modules/$(KERNELRELEASE)/build MODSRC := $(shell pwd) export EXTERNAL_BUILD = 1 export CONFIG_VIDEO_INTEL_IPU7 = m export CONFIG_IPU_BRIDGE = y obj-y += drivers/media/pci/intel/ipu7/ subdir-ccflags-y += -I$(src)/include subdir-ccflags-$(CONFIG_IPU_BRIDGE) += \ -DCONFIG_IPU_BRIDGE subdir-ccflags-y += $(subdir-ccflags-m) all: $(MAKE) -C $(KERNEL_SRC) M=$(MODSRC) modules modules_install: $(MAKE) INSTALL_MOD_DIR=updates -C $(KERNEL_SRC) M=$(MODSRC) modules_install clean: $(MAKE) -C $(KERNEL_SRC) M=$(MODSRC) clean ipu7-drivers-0~git202507010803.e6f86fa7/README.md000066400000000000000000000046411503071307200203260ustar00rootroot00000000000000# ipu7-drivers This repository supports MIPI cameras through the IPU7 on Intel Lunar Lake platform. There are 4 repositories that provide the complete setup: - https://github.com/intel/ipu7-drivers - kernel drivers for the IPU and sensors - https://github.com/intel/ipu7-camera-bins - IPU firmware and proprietary image processing libraries - https://github.com/intel/ipu7-camera-hal - HAL for processing of images in userspace - https://github.com/intel/icamerasrc/tree/icamerasrc_slim_api (branch:icamerasrc_slim_api) - Gstreamer src plugin ## Content of this repository - IPU7 kernel driver - Kernel patches needed ## Dependencies - Kernel config IPU_BRIDGE should be enabled when kernel version >= v6.6. - Kernel config INTEL_SKL_INT3472 should be enabled if camera sensor driver is using discrete power control logic, like ov13b10.c. - The camera sensor drivers you are using should be enabled in kernel config. - For other camera sensor drivers and related ipu-bridge changes, please [go to IPU6 release repository](https://github.com/intel/ipu6-drivers). ## Build instructions: Three ways are available: 1. Build with kernel source code tree (in-tree build) 2. Build with kernel headers only (out-of-tree build) 3. Build and install by dkms (DKMS build) ### 1. In-tree build - Tested with kernel v6.8 and v6.10 1. Check out kernel source code 2. Patch kernel source code, using patches under `patch/` and `patch//in-tree-build` depending on what you need. 3. Copy `drivers` and `include` folders to kernel source code. 4. Enable the following settings in .config: ```conf CONFIG_VIDEO_INTEL_IPU7=m CONFIG_IPU_BRIDGE=m ``` And other components, depending on what you need: ```conf CONFIG_PINCTRL_INTEL_PLATFORM=m CONFIG_INTEL_SKL_INT3472=m CONFIG_VIDEO_OV13B10=m CONFIG_VIDEO_OV02C10=m CONFIG_VIDEO_OV05C10=m ``` ```conf To enable HDMI2MIPI camera: CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET=y CONFIG_VIDEO_LT6911GXD=m ``` 5. Build you kernel ### 2. Out-of-tree build - Requires kernel headers installed on build machine - Requires dependencies enabled in kernel, and patches under `patch/` (not `in-tree-build`) applied to the kernel you will run the modules on - To build and install: ```sh make -j`nproc` && sudo make modules_install && sudo depmod -a ``` ### 3. Build with dkms - Register, build and auto install: ```sh sudo dkms add . sudo dkms autoinstall ipu7-drivers/0.0.0 ``` ipu7-drivers-0~git202507010803.e6f86fa7/SECURITY.md000066400000000000000000000006261503071307200206370ustar00rootroot00000000000000# Security Policy Intel is committed to rapidly addressing security vulnerabilities affecting our customers and providing clear guidance on the solution, impact, severity and mitigation. ## Reporting a Vulnerability Please report any security vulnerabilities in this project utilizing the guidelines [here](https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html). ipu7-drivers-0~git202507010803.e6f86fa7/dkms.conf000066400000000000000000000012071503071307200206470ustar00rootroot00000000000000PACKAGE_NAME=ipu7-drivers PACKAGE_VERSION=0.0.0 MAKE="make KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir" CLEAN="make KERNELRELEASE=$kernelver KERNEL_SRC=$kernel_source_dir clean" AUTOINSTALL="yes" BUILD_EXCLUSIVE_CONFIG="CONFIG_VIDEO_V4L2_I2C" BUILT_MODULE_NAME[0]="intel-ipu7" BUILT_MODULE_LOCATION[0]="drivers/media/pci/intel/ipu7" DEST_MODULE_LOCATION[0]="/updates" BUILT_MODULE_NAME[1]="intel-ipu7-isys" BUILT_MODULE_LOCATION[1]="drivers/media/pci/intel/ipu7" DEST_MODULE_LOCATION[1]="/updates" BUILT_MODULE_NAME[2]="intel-ipu7-psys" BUILT_MODULE_LOCATION[2]="drivers/media/pci/intel/ipu7/psys" DEST_MODULE_LOCATION[2]="/updates"ipu7-drivers-0~git202507010803.e6f86fa7/drivers/000077500000000000000000000000001503071307200205205ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/000077500000000000000000000000001503071307200215775ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/000077500000000000000000000000001503071307200223525ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/000077500000000000000000000000001503071307200234655ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/000077500000000000000000000000001503071307200243515ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/Kconfig000066400000000000000000000025011503071307200256520ustar00rootroot00000000000000config VIDEO_INTEL_IPU7 tristate "Intel IPU7 driver" depends on ACPI || COMPILE_TEST depends on VIDEO_DEV depends on X86 && HAS_DMA depends on IPU_BRIDGE || !IPU_BRIDGE # # This driver incorrectly tries to override the dma_ops. It should # never have done that, but for now keep it working on architectures # that use dma ops # depends on ARCH_HAS_DMA_OPS select AUXILIARY_BUS select IOMMU_IOVA select VIDEO_V4L2_SUBDEV_API select MEDIA_CONTROLLER select VIDEOBUF2_DMA_SG select V4L2_FWNODE help This is the 7th Gen Intel Image Processing Unit, found in Intel SoCs and used for capturing images and video from camera sensors. To compile this driver, say Y here! It contains 3 modules - intel_ipu7, intel_ipu7_isys and intel_ipu7_psys. config VIDEO_INTEL_IPU7_MGC bool "Compile for IPU7 MGC driver" depends on VIDEO_INTEL_IPU7 help If selected, MGC device nodes would be created. Recommended for driver developers only. If you want to the MGC devices exposed to user as media entity, you must select this option, otherwise no. config VIDEO_INTEL_IPU7_ISYS_RESET bool "IPU7 ISYS RESET" depends on VIDEO_INTEL_IPU7 default n help This option enables IPU7 ISYS reset feature to support HDMI-MIPI converter hot-plugging. If doubt, say N here. ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/Makefile000066400000000000000000000017001503071307200260070ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2017 - 2025 Intel Corporation. is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) ifeq ($(is_kernel_lt_6_10), 1) ifneq ($(EXTERNAL_BUILD), 1) src := $(srctree)/$(src) endif endif intel-ipu7-objs += ipu7.o \ ipu7-bus.o \ ipu7-dma.o \ ipu7-mmu.o \ ipu7-buttress.o \ ipu7-cpd.o \ ipu7-syscom.o \ ipu7-boot.o obj-$(CONFIG_VIDEO_INTEL_IPU7) += intel-ipu7.o intel-ipu7-isys-objs += ipu7-isys.o \ ipu7-isys-csi2.o \ ipu7-isys-csi-phy.o \ ipu7-fw-isys.o \ ipu7-isys-video.o \ ipu7-isys-queue.o \ ipu7-isys-subdev.o ifdef CONFIG_VIDEO_INTEL_IPU7_MGC intel-ipu7-isys-objs += ipu7-isys-tpg.o endif obj-$(CONFIG_VIDEO_INTEL_IPU7) += intel-ipu7-isys.o obj-$(CONFIG_VIDEO_INTEL_IPU7) += psys/ ccflags-y += -I$(src)/ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/abi/000077500000000000000000000000001503071307200251045ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/abi/ipu7_fw_boot_abi.h000066400000000000000000000140701503071307200304750ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2025 Intel Corporation */ #ifndef IPU7_FW_BOOT_ABI_H #define IPU7_FW_BOOT_ABI_H #include "ipu7_fw_common_abi.h" #include "ipu7_fw_syscom_abi.h" #define IA_GOFO_FWLOG_SEVERITY_CRIT (0U) #define IA_GOFO_FWLOG_SEVERITY_ERROR (1U) #define IA_GOFO_FWLOG_SEVERITY_WARNING (2U) #define IA_GOFO_FWLOG_SEVERITY_INFO (3U) #define IA_GOFO_FWLOG_SEVERITY_DEBUG (4U) #define IA_GOFO_FWLOG_SEVERITY_VERBOSE (5U) #define IA_GOFO_FWLOG_MAX_LOGGER_SOURCES (64U) #define LOGGER_CONFIG_CHANNEL_ENABLE_HWPRINTF_BITMASK BIT(0) #define LOGGER_CONFIG_CHANNEL_ENABLE_SYSCOM_BITMASK BIT(1) #define LOGGER_CONFIG_CHANNEL_ENABLE_ALL_BITMASK \ (LOGGER_CONFIG_CHANNEL_ENABLE_HWPRINTF_BITMASK | \ LOGGER_CONFIG_CHANNEL_ENABLE_SYSCOM_BITMASK) struct ia_gofo_logger_config { u8 use_source_severity; u8 source_severity[IA_GOFO_FWLOG_MAX_LOGGER_SOURCES]; u8 use_channels_enable_bitmask; u8 channels_enable_bitmask; u8 padding[1]; ia_gofo_addr_t hw_printf_buffer_base_addr; u32 hw_printf_buffer_size_bytes; }; static inline void ia_gofo_logger_config_abi_test_func(void) { CHECK_ALIGN32(struct ia_gofo_logger_config); } #pragma pack(push, 1) #define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP \ ((u32)IA_GOFO_FW_BOOT_ID_MAX) #define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_IS_OFFSET (0U) #define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PS_OFFSET \ ((IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_IS_OFFSET) + \ (u32)(IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP)) #define IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_PRIMARY_OFFSET (0U) #define IA_GOFO_CCG_IPU_BUTTRESS_FW_BOOT_PARAMS_SECONDARY_OFFSET (0x3000U / 4U) #define IA_GOFO_HKR_IPU_BUTTRESS_FW_BOOT_PARAMS_SECONDARY_OFFSET \ (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP * 2U) #define IA_GOFO_HKR_HIF_BUTTRESS_FW_BOOT_PARAMS_SECONDARY_OFFSET \ (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP) #define IA_GOFO_CCG_IPU_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX \ (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP * 4U) #define IA_GOFO_HKR_IPU_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX \ (IA_GOFO_BUTTRESS_FW_BOOT_PARAMS_MAX_REG_IDX_PER_APP * 4U) #define IA_GOFO_BOOT_RESERVED_SIZE (58U) #define IA_GOFO_BOOT_SECONDARY_RESERVED_SIZE (IA_GOFO_BOOT_RESERVED_SIZE) #define IA_GOFO_BOOT_SECONDARY_RESERVED_FIELDS \ (sizeof(ia_gofo_addr_t) + sizeof(ia_gofo_addr_t) + sizeof(u32)) enum ia_gofo_buttress_reg_id { IA_GOFO_FW_BOOT_CONFIG_ID = 0, IA_GOFO_FW_BOOT_STATE_ID = 1, IA_GOFO_FW_BOOT_RESERVED1_ID = IA_GOFO_FW_BOOT_STATE_ID, IA_GOFO_FW_BOOT_SYSCOM_QUEUE_INDICES_BASE_ID = 2, IA_GOFO_FW_BOOT_UNTRUSTED_ADDR_MIN_ID = 3, IA_GOFO_FW_BOOT_RESERVED0_ID = IA_GOFO_FW_BOOT_UNTRUSTED_ADDR_MIN_ID, IA_GOFO_FW_BOOT_MESSAGING_VERSION_ID = 4, IA_GOFO_FW_BOOT_ID_MAX }; enum ia_gofo_boot_uc_tile_frequency_units { IA_GOFO_FW_BOOT_UC_FREQUENCY_UNITS_MHZ = 0, IA_GOFO_FW_BOOT_UC_FREQUENCY_UNITS_KHZ = 1, IA_GOFO_FW_BOOT_UC_FREQUENCY_UNITS_N }; #define IA_GOFO_FW_BOOT_STATE_IS_CRITICAL(boot_state) \ (0xdead0000U == ((boot_state) & 0xffff0000U)) struct ia_gofo_boot_config { u32 length; struct ia_gofo_version_s config_version; struct ia_gofo_msg_version_list client_version_support; ia_gofo_addr_t pkg_dir; ia_gofo_addr_t subsys_config; u32 uc_tile_frequency; u16 checksum; u8 uc_tile_frequency_units; u8 padding[1]; u32 reserved[IA_GOFO_BOOT_RESERVED_SIZE]; struct syscom_config_s syscom_context_config; }; struct ia_gofo_secondary_boot_config { u32 length; struct ia_gofo_version_s config_version; struct ia_gofo_msg_version_list client_version_support; u8 reserved1[IA_GOFO_BOOT_SECONDARY_RESERVED_FIELDS]; u16 checksum; u8 padding[2]; u32 reserved2[IA_GOFO_BOOT_SECONDARY_RESERVED_SIZE]; struct syscom_config_s syscom_context_config; }; #pragma pack(pop) static inline void ia_gofo_boot_config_abi_test_func(void) { CHECK_ALIGN32(struct ia_gofo_boot_config); CHECK_ALIGN32(struct ia_gofo_secondary_boot_config); } #define IA_GOFO_WDT_TIMEOUT_ERR 0xdead0401U #define IA_GOFO_MEM_FATAL_DME_ERR 0xdead0801U #define IA_GOFO_MEM_UNCORRECTABLE_LOCAL_ERR 0xdead0802U #define IA_GOFO_MEM_UNCORRECTABLE_DIRTY_ERR 0xdead0803U #define IA_GOFO_MEM_UNCORRECTABLE_DTAG_ERR 0xdead0804U #define IA_GOFO_MEM_UNCORRECTABLE_CACHE_ERR 0xdead0805U #define IA_GOFO_DOUBLE_EXCEPTION_ERR 0xdead0806U #define IA_GOFO_BIST_DMEM_FAULT_DETECTION_ERR 0xdead1000U #define IA_GOFO_BIST_DATA_INTEGRITY_FAILURE 0xdead1010U enum ia_gofo_boot_state { IA_GOFO_FW_BOOT_STATE_SECONDARY_BOOT_CONFIG_READY = 0x57a7b000U, IA_GOFO_FW_BOOT_STATE_UNINIT = 0x57a7e000U, IA_GOFO_FW_BOOT_STATE_STARTING_0 = 0x57a7d000U, IA_GOFO_FW_BOOT_STATE_CACHE_INIT_DONE = 0x57a7d010U, IA_GOFO_FW_BOOT_STATE_MEM_INIT_DONE = 0x57a7d020U, IA_GOFO_FW_BOOT_STATE_STACK_INIT_DONE = 0x57a7d030U, IA_GOFO_FW_BOOT_STATE_EARLY_BOOT_DONE = 0x57a7d100U, IA_GOFO_FW_BOOT_STATE_BOOT_CONFIG_START = 0x57a7d200U, IA_GOFO_FW_BOOT_STATE_QUEUE_INIT_DONE = 0x57a7d300U, IA_GOFO_FW_BOOT_STATE_READY = 0x57a7e100U, IA_GOFO_FW_BOOT_STATE_CRIT_UNSPECIFIED = 0xdead0001U, IA_GOFO_FW_BOOT_STATE_CRIT_CFG_PTR = 0xdead0101U, IA_GOFO_FW_BOOT_STATE_CRIT_CFG_VERSION = 0xdead0201U, IA_GOFO_FW_BOOT_STATE_CRIT_MSG_VERSION = 0xdead0301U, IA_GOFO_FW_BOOT_STATE_CRIT_WDT_TIMEOUT = IA_GOFO_WDT_TIMEOUT_ERR, IA_GOFO_FW_BOOT_STATE_WRONG_DATA_SECTION_UNPACKING = 0xdead0501U, IA_GOFO_FW_BOOT_STATE_WRONG_RO_DATA_SECTION_UNPACKING = 0xdead0601U, IA_GOFO_FW_BOOT_STATE_INVALID_UNTRUSTED_ADDR_MIN = 0xdead0701U, IA_GOFO_FW_BOOT_STATE_CRIT_MEM_FATAL_DME = IA_GOFO_MEM_FATAL_DME_ERR, IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_LOCAL = IA_GOFO_MEM_UNCORRECTABLE_LOCAL_ERR, IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_DIRTY = IA_GOFO_MEM_UNCORRECTABLE_DIRTY_ERR, IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_DTAG = IA_GOFO_MEM_UNCORRECTABLE_DTAG_ERR, IA_GOFO_FW_BOOT_STATE_CRIT_MEM_UNCORRECTABLE_CACHE = IA_GOFO_MEM_UNCORRECTABLE_CACHE_ERR, IA_GOFO_FW_BOOT_STATE_CRIT_DOUBLE_EXCEPTION = IA_GOFO_DOUBLE_EXCEPTION_ERR, IA_GOFO_FW_BOOT_STATE_SHUTDOWN_CMD = 0x57a7f001U, IA_GOFO_FW_BOOT_STATE_SHUTDOWN_START = 0x57a7e200U, IA_GOFO_FW_BOOT_STATE_INACTIVE = 0x57a7e300U, IA_GOFO_FW_BOOT_HW_CMD_ACK_TIMEOUT = 0x57a7e400U }; #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/abi/ipu7_fw_common_abi.h000066400000000000000000000136611503071307200310270ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2025 Intel Corporation */ #ifndef IPU7_FW_COMMOM_ABI_H #define IPU7_FW_COMMOM_ABI_H #include #define CHECK_SIZE_ALIGNMENT(struct_type, alignment) do { \ const u8 arr[((sizeof(struct_type) % (alignment)) == 0U) ? \ 1 : -1]; (void)arr;\ } while (false) #define CHECK_ALIGN16(struct_type) \ CHECK_SIZE_ALIGNMENT(struct_type, sizeof(u16)) #define CHECK_ALIGN32(struct_type) \ CHECK_SIZE_ALIGNMENT(struct_type, sizeof(u32)) #define CHECK_ALIGN64(struct_type) \ CHECK_SIZE_ALIGNMENT(struct_type, sizeof(u64)) #define IA_GOFO_CL_SIZE (64U) #define CHECK_ALIGN_CL(struct_type) \ CHECK_SIZE_ALIGNMENT(struct_type, IA_GOFO_CL_SIZE) #define IA_GOFO_PAGE_SIZE (4096U) #define IA_GOFO_SRAM_PAGE_SIZE (256U) #define CHECK_ALIGN_PAGE(struct_type) \ CHECK_SIZE_ALIGNMENT(struct_type, IA_GOFO_PAGE_SIZE) #define CHECK_EQUAL(lval, rval) do { \ const u8 arr[((lval) == (rval)) ? 1 : -1]; (void)arr;\ } while (false) #define CHECK_SMALLER_OR_EQUAL(lval, rval) do { \ const u8 arr[((lval) <= (rval)) ? 1 : -1]; (void)arr;\ } while (false) #define CHECK_SIZE(struct_type, size) CHECK_EQUAL(sizeof(struct_type), size) #pragma pack(push, 1) typedef u32 ia_gofo_addr_t; #define IA_GOFO_ADDR_NULL (0U) struct ia_gofo_version_s { u8 patch; u8 subminor; u8 minor; u8 major; }; #define IA_GOFO_MSG_VERSION_INIT(major_val, minor_val, subminor_val, patch_val)\ {.major = (major_val), .minor = (minor_val), .subminor = \ (subminor_val), .patch = (patch_val)} #define IA_GOFO_MSG_VERSION_LIST_MAX_ENTRIES (3U) #define IA_GOFO_MSG_RESERVED_SIZE (3U) struct ia_gofo_msg_version_list { u8 num_versions; u8 reserved[IA_GOFO_MSG_RESERVED_SIZE]; struct ia_gofo_version_s versions[IA_GOFO_MSG_VERSION_LIST_MAX_ENTRIES]; }; #pragma pack(pop) static inline void ia_gofo_common_abi_test_func(void) { CHECK_ALIGN32(struct ia_gofo_version_s); CHECK_ALIGN32(struct ia_gofo_msg_version_list); } #define TLV_TYPE_PADDING (0U) #pragma pack(push, 1) #define IA_GOFO_ABI_BITS_PER_BYTE (8U) struct ia_gofo_tlv_header { u16 tlv_type; u16 tlv_len32; }; struct ia_gofo_tlv_list { u16 num_elems; u16 head_offset; }; #define TLV_ITEM_ALIGNMENT ((u32)sizeof(u32)) #define TLV_MSG_ALIGNMENT ((u32)sizeof(u64)) #define TLV_LIST_ALIGNMENT TLV_ITEM_ALIGNMENT #pragma pack(pop) static inline void ia_gofo_msg_tlv_test_func(void) { CHECK_ALIGN16(struct ia_gofo_tlv_header); CHECK_ALIGN32(struct ia_gofo_tlv_list); } #define IA_GOFO_MODULO(dividend, divisor) ((dividend) % (divisor)) #define IA_GOFO_MSG_ERR_MAX_DETAILS (4U) #define IA_GOFO_MSG_ERR_OK (0U) #define IA_GOFO_MSG_ERR_UNSPECIFED (0xffffffffU) #define IA_GOFO_MSG_ERR_GROUP_UNSPECIFIED (0U) #define IA_GOFO_MSG_ERR_IS_OK(err) (IA_GOFO_MSG_ERR_OK == (err).err_code) #pragma pack(push, 1) struct ia_gofo_msg_err { u32 err_group; u32 err_code; u32 err_detail[IA_GOFO_MSG_ERR_MAX_DETAILS]; }; #pragma pack(pop) #define IA_GOFO_MSG_ERR_GROUP_APP_EXT_START (16U) #define IA_GOFO_MSG_ERR_GROUP_MAX (31U) #define IA_GOFO_MSG_ERR_GROUP_INTERNAL_START (IA_GOFO_MSG_ERR_GROUP_MAX + 1U) #define IA_GOFO_MSG_ERR_GROUP_RESERVED IA_GOFO_MSG_ERR_GROUP_UNSPECIFIED #define IA_GOFO_MSG_ERR_GROUP_GENERAL 1 static inline void ia_gofo_msg_err_test_func(void) { CHECK_ALIGN64(struct ia_gofo_msg_err); } enum ia_gofo_msg_err_general { IA_GOFO_MSG_ERR_GENERAL_OK = IA_GOFO_MSG_ERR_OK, IA_GOFO_MSG_ERR_GENERAL_MSG_TOO_SMALL = 1, IA_GOFO_MSG_ERR_GENERAL_MSG_TOO_LARGE = 2, IA_GOFO_MSG_ERR_GENERAL_DEVICE_STATE = 3, IA_GOFO_MSG_ERR_GENERAL_ALIGNMENT = 4, IA_GOFO_MSG_ERR_GENERAL_INDIRECT_REF_PTR_INVALID = 5, IA_GOFO_MSG_ERR_GENERAL_INVALID_MSG_TYPE = 6, IA_GOFO_MSG_ERR_GENERAL_SYSCOM_FAIL = 7, IA_GOFO_MSG_ERR_GENERAL_N }; #pragma pack(push, 1) #define IA_GOFO_MSG_TYPE_RESERVED 0 #define IA_GOFO_MSG_TYPE_INDIRECT 1 #define IA_GOFO_MSG_TYPE_LOG 2 #define IA_GOFO_MSG_TYPE_GENERAL_ERR 3 struct ia_gofo_msg_header { struct ia_gofo_tlv_header tlv_header; struct ia_gofo_tlv_list msg_options; u64 user_token; }; struct ia_gofo_msg_header_ack { struct ia_gofo_msg_header header; struct ia_gofo_msg_err err; }; struct ia_gofo_msg_general_err { struct ia_gofo_msg_header_ack header; }; #pragma pack(pop) static inline void ia_gofo_msg_header_test_func(void) { CHECK_ALIGN64(struct ia_gofo_msg_header); CHECK_ALIGN64(struct ia_gofo_msg_header_ack); CHECK_ALIGN64(struct ia_gofo_msg_general_err); } #pragma pack(push, 1) enum ia_gofo_msg_link_streaming_mode { IA_GOFO_MSG_LINK_STREAMING_MODE_SOFF = 0, IA_GOFO_MSG_LINK_STREAMING_MODE_DOFF = 1, IA_GOFO_MSG_LINK_STREAMING_MODE_BCLM = 2, IA_GOFO_MSG_LINK_STREAMING_MODE_BCSM_FIX = 3, IA_GOFO_MSG_LINK_STREAMING_MODE_N }; enum ia_gofo_soc_pbk_instance_id { IA_GOFO_SOC_PBK_ID0 = 0, IA_GOFO_SOC_PBK_ID1 = 1, IA_GOFO_SOC_PBK_ID_N }; #define IA_GOFO_MSG_LINK_PBK_MAX_SLOTS (2U) struct ia_gofo_msg_indirect { struct ia_gofo_msg_header header; struct ia_gofo_tlv_header ref_header; ia_gofo_addr_t ref_msg_ptr; }; #pragma pack(pop) static inline void ia_gofo_msg_indirect_test_func(void) { CHECK_ALIGN64(struct ia_gofo_msg_indirect); } #pragma pack(push, 1) #define IA_GOFO_MSG_LOG_MAX_PARAMS (4U) #define IA_GOFO_MSG_LOG_DOC_FMT_ID_MIN (0U) #define IA_GOFO_MSG_LOG_DOC_FMT_ID_MAX (4095U) #define IA_GOFO_MSG_LOG_FMT_ID_INVALID (0xfffffffU) struct ia_gofo_msg_log_info { u16 log_counter; u8 msg_parameter_types; u8 is_out_of_order; u32 fmt_id; u32 params[IA_GOFO_MSG_LOG_MAX_PARAMS]; }; struct ia_gofo_msg_log_info_ts { u64 msg_ts; struct ia_gofo_msg_log_info log_info; }; struct ia_gofo_msg_log { struct ia_gofo_msg_header header; struct ia_gofo_msg_log_info_ts log_info_ts; }; #pragma pack(pop) static inline void ia_gofo_msg_log_test_func(void) { CHECK_ALIGN64(struct ia_gofo_msg_log); CHECK_ALIGN64(struct ia_gofo_msg_log_info); CHECK_ALIGN64(struct ia_gofo_msg_log_info_ts); } #define IA_GOFO_MSG_ABI_OUT_ACK_QUEUE_ID (0U) #define IA_GOFO_MSG_ABI_OUT_LOG_QUEUE_ID (1U) #define IA_GOFO_MSG_ABI_IN_DEV_QUEUE_ID (2U) #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/abi/ipu7_fw_config_abi.h000066400000000000000000000005341503071307200307770ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2021 - 2025 Intel Corporation */ #ifndef IPU7_FW_CONFIG_ABI_H #define IPU7_FW_CONFIG_ABI_H #include #define IPU_CONFIG_ABI_WDT_TIMER_DISABLED 0U #define IPU_CONFIG_ABI_CMD_TIMER_DISABLED 0U struct ipu7_wdt_abi { u32 wdt_timer1_us; u32 wdt_timer2_us; }; #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/abi/ipu7_fw_insys_config_abi.h000066400000000000000000000006201503071307200322200ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2021 - 2025 Intel Corporation */ #ifndef IPU7_FW_INSYS_CONFIG_ABI_H #define IPU7_FW_INSYS_CONFIG_ABI_H #include "ipu7_fw_boot_abi.h" #include "ipu7_fw_config_abi.h" #include "ipu7_fw_isys_abi.h" struct ipu7_insys_config { u32 timeout_val_ms; struct ia_gofo_logger_config logger_config; struct ipu7_wdt_abi wdt_config; }; #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/abi/ipu7_fw_isys_abi.h000066400000000000000000000426631503071307200305320ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2025 Intel Corporation */ #ifndef IPU7_FW_ISYS_ABI_H #define IPU7_FW_ISYS_ABI_H #include "ipu7_fw_common_abi.h" #include "ipu7_fw_isys_abi.h" #define IPU_INSYS_MAX_OUTPUT_QUEUES (3U) #define IPU_INSYS_STREAM_ID_MAX (16U) #define IPU_INSYS_MAX_INPUT_QUEUES (IPU_INSYS_STREAM_ID_MAX + 1U) #define IPU_INSYS_OUTPUT_FIRST_QUEUE (0U) #define IPU_INSYS_OUTPUT_LAST_QUEUE (IPU_INSYS_MAX_OUTPUT_QUEUES - 1U) #define IPU_INSYS_OUTPUT_MSG_QUEUE (IPU_INSYS_OUTPUT_FIRST_QUEUE) #define IPU_INSYS_OUTPUT_LOG_QUEUE (IPU_INSYS_OUTPUT_FIRST_QUEUE + 1U) #define IPU_INSYS_OUTPUT_RESERVED_QUEUE (IPU_INSYS_OUTPUT_LAST_QUEUE) #define IPU_INSYS_INPUT_FIRST_QUEUE (IPU_INSYS_MAX_OUTPUT_QUEUES) #define IPU_INSYS_INPUT_LAST_QUEUE \ (IPU_INSYS_INPUT_FIRST_QUEUE + IPU_INSYS_MAX_INPUT_QUEUES - 1U) #define IPU_INSYS_INPUT_DEV_QUEUE (IPU_INSYS_INPUT_FIRST_QUEUE) #define IPU_INSYS_INPUT_MSG_QUEUE (IPU_INSYS_INPUT_FIRST_QUEUE + 1U) #define IPU_INSYS_INPUT_MSG_MAX_QUEUE (IPU_INSYS_MAX_INPUT_QUEUES - 1U) #define MAX_IPINS (4U) #define MAX_OPINS (4U) #define MAX_OPINS_FOR_SINGLE_IPINS (3U) #define DEV_SEND_QUEUE_SIZE (IPU_INSYS_STREAM_ID_MAX) #define PIN_PLANES_MAX (4U) #define INSYS_MSG_ERR_STREAM_INSUFFICIENT_RESOURCES_INPUT \ INSYS_MSG_ERR_STREAM_INSUFFICIENT_RESOURCES typedef u64 ipu7_insys_return_token; enum ipu7_insys_resp_type { IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE = 0, IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK = 1, IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK = 2, IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK = 3, IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK = 4, IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK = 5, IPU_INSYS_RESP_TYPE_PIN_DATA_READY = 6, IPU_INSYS_RESP_TYPE_FRAME_SOF = 7, IPU_INSYS_RESP_TYPE_FRAME_EOF = 8, IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE = 9, IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE = 10, IPU_INSYS_RESP_TYPE_PWM_IRQ = 11, N_IPU_INSYS_RESP_TYPE }; enum ipu7_insys_send_type { IPU_INSYS_SEND_TYPE_STREAM_OPEN = 0, IPU_INSYS_SEND_TYPE_STREAM_START_AND_CAPTURE = 1, IPU_INSYS_SEND_TYPE_STREAM_CAPTURE = 2, IPU_INSYS_SEND_TYPE_STREAM_ABORT = 3, IPU_INSYS_SEND_TYPE_STREAM_FLUSH = 4, IPU_INSYS_SEND_TYPE_STREAM_CLOSE = 5, N_IPU_INSYS_SEND_TYPE }; enum ipu7_insys_mipi_vc { IPU_INSYS_MIPI_VC_0 = 0, IPU_INSYS_MIPI_VC_1 = 1, IPU_INSYS_MIPI_VC_2 = 2, IPU_INSYS_MIPI_VC_3 = 3, IPU_INSYS_MIPI_VC_4 = 4, IPU_INSYS_MIPI_VC_5 = 5, IPU_INSYS_MIPI_VC_6 = 6, IPU_INSYS_MIPI_VC_7 = 7, IPU_INSYS_MIPI_VC_8 = 8, IPU_INSYS_MIPI_VC_9 = 9, IPU_INSYS_MIPI_VC_10 = 10, IPU_INSYS_MIPI_VC_11 = 11, IPU_INSYS_MIPI_VC_12 = 12, IPU_INSYS_MIPI_VC_13 = 13, IPU_INSYS_MIPI_VC_14 = 14, IPU_INSYS_MIPI_VC_15 = 15, N_IPU_INSYS_MIPI_VC }; enum ipu7_insys_mipi_port { IPU_INSYS_MIPI_PORT_0 = 0, IPU_INSYS_MIPI_PORT_1 = 1, IPU_INSYS_MIPI_PORT_2 = 2, IPU_INSYS_MIPI_PORT_3 = 3, IPU_INSYS_MIPI_PORT_4 = 4, IPU_INSYS_MIPI_PORT_5 = 5, NA_IPU_INSYS_MIPI_PORT }; enum ipu7_insys_frame_format_type { IPU_INSYS_FRAME_FORMAT_NV11 = 0, IPU_INSYS_FRAME_FORMAT_NV12 = 1, IPU_INSYS_FRAME_FORMAT_NV12_16 = 2, IPU_INSYS_FRAME_FORMAT_NV12_TILEY = 3, IPU_INSYS_FRAME_FORMAT_NV16 = 4, IPU_INSYS_FRAME_FORMAT_NV21 = 5, IPU_INSYS_FRAME_FORMAT_NV61 = 6, IPU_INSYS_FRAME_FORMAT_YV12 = 7, IPU_INSYS_FRAME_FORMAT_YV16 = 8, IPU_INSYS_FRAME_FORMAT_YUV420 = 9, IPU_INSYS_FRAME_FORMAT_YUV420_10 = 10, IPU_INSYS_FRAME_FORMAT_YUV420_12 = 11, IPU_INSYS_FRAME_FORMAT_YUV420_14 = 12, IPU_INSYS_FRAME_FORMAT_YUV420_16 = 13, IPU_INSYS_FRAME_FORMAT_YUV422 = 14, IPU_INSYS_FRAME_FORMAT_YUV422_16 = 15, IPU_INSYS_FRAME_FORMAT_UYVY = 16, IPU_INSYS_FRAME_FORMAT_YUYV = 17, IPU_INSYS_FRAME_FORMAT_YUV444 = 18, IPU_INSYS_FRAME_FORMAT_YUV_LINE = 19, IPU_INSYS_FRAME_FORMAT_RAW8 = 20, IPU_INSYS_FRAME_FORMAT_RAW10 = 21, IPU_INSYS_FRAME_FORMAT_RAW12 = 22, IPU_INSYS_FRAME_FORMAT_RAW14 = 23, IPU_INSYS_FRAME_FORMAT_RAW16 = 24, IPU_INSYS_FRAME_FORMAT_RGB565 = 25, IPU_INSYS_FRAME_FORMAT_PLANAR_RGB888 = 26, IPU_INSYS_FRAME_FORMAT_RGBA888 = 27, IPU_INSYS_FRAME_FORMAT_QPLANE6 = 28, IPU_INSYS_FRAME_FORMAT_BINARY_8 = 29, IPU_INSYS_FRAME_FORMAT_Y_8 = 30, IPU_INSYS_FRAME_FORMAT_ARGB888 = 31, IPU_INSYS_FRAME_FORMAT_BGRA888 = 32, IPU_INSYS_FRAME_FORMAT_ABGR888 = 33, N_IPU_INSYS_FRAME_FORMAT }; #define IPU_INSYS_FRAME_FORMAT_RAW (IPU_INSYS_FRAME_FORMAT_RAW16) enum ipu7_insys_mipi_data_type { IPU_INSYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00, IPU_INSYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01, IPU_INSYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02, IPU_INSYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07, IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08, IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09, IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0a, IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0b, IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0c, IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0d, IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0e, IPU_INSYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0f, IPU_INSYS_MIPI_DATA_TYPE_NULL = 0x10, IPU_INSYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11, IPU_INSYS_MIPI_DATA_TYPE_EMBEDDED = 0x12, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17, IPU_INSYS_MIPI_DATA_TYPE_YUV420_8 = 0x18, IPU_INSYS_MIPI_DATA_TYPE_YUV420_10 = 0x19, IPU_INSYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1a, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1b, IPU_INSYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1c, IPU_INSYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1d, IPU_INSYS_MIPI_DATA_TYPE_YUV422_8 = 0x1e, IPU_INSYS_MIPI_DATA_TYPE_YUV422_10 = 0x1f, IPU_INSYS_MIPI_DATA_TYPE_RGB_444 = 0x20, IPU_INSYS_MIPI_DATA_TYPE_RGB_555 = 0x21, IPU_INSYS_MIPI_DATA_TYPE_RGB_565 = 0x22, IPU_INSYS_MIPI_DATA_TYPE_RGB_666 = 0x23, IPU_INSYS_MIPI_DATA_TYPE_RGB_888 = 0x24, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27, IPU_INSYS_MIPI_DATA_TYPE_RAW_6 = 0x28, IPU_INSYS_MIPI_DATA_TYPE_RAW_7 = 0x29, IPU_INSYS_MIPI_DATA_TYPE_RAW_8 = 0x2a, IPU_INSYS_MIPI_DATA_TYPE_RAW_10 = 0x2b, IPU_INSYS_MIPI_DATA_TYPE_RAW_12 = 0x2c, IPU_INSYS_MIPI_DATA_TYPE_RAW_14 = 0x2d, IPU_INSYS_MIPI_DATA_TYPE_RAW_16 = 0x2e, IPU_INSYS_MIPI_DATA_TYPE_BINARY_8 = 0x2f, IPU_INSYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30, IPU_INSYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31, IPU_INSYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32, IPU_INSYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33, IPU_INSYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34, IPU_INSYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35, IPU_INSYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36, IPU_INSYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3a, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3b, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3c, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3d, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3e, IPU_INSYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3f, N_IPU_INSYS_MIPI_DATA_TYPE = 0x40 }; enum ipu7_insys_mipi_dt_rename_mode { IPU_INSYS_MIPI_DT_NO_RENAME = 0, IPU_INSYS_MIPI_DT_RENAMED_MODE = 1, N_IPU_INSYS_MIPI_DT_MODE }; #define IPU_INSYS_SEND_MSG_ENABLED 1U #define IPU_INSYS_SEND_MSG_DISABLED 0U #define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF BIT(0) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF BIT(1) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF BIT(2) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF BIT(3) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF_DISCARDED BIT(4) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF_DISCARDED BIT(5) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF_DISCARDED BIT(6) #define IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF_DISCARDED BIT(7) #define IPU_INSYS_STREAM_SYNC_MSG_ENABLE_MSG_SEND_RESP ( \ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF_DISCARDED | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_EOF_DISCARDED) #define IPU_INSYS_STREAM_SYNC_MSG_ENABLE_MSG_SEND_IRQ ( \ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF_DISCARDED | \ IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_EOF_DISCARDED) #define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_OPEN_DONE BIT(0) #define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_OPEN_DONE BIT(1) #define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_START_ACK BIT(2) #define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_START_ACK BIT(3) #define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_CLOSE_ACK BIT(4) #define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_CLOSE_ACK BIT(5) #define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_FLUSH_ACK BIT(6) #define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_FLUSH_ACK BIT(7) #define IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_ABORT_ACK BIT(8) #define IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_ABORT_ACK BIT(9) #define IPU_INSYS_STREAM_ENABLE_MSG_SEND_RESP ( \ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_OPEN_DONE | \ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_START_ACK | \ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_CLOSE_ACK | \ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_FLUSH_ACK | \ IPU_INSYS_STREAM_MSG_SEND_RESP_STREAM_ABORT_ACK) #define IPU_INSYS_STREAM_ENABLE_MSG_SEND_IRQ ( \ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_OPEN_DONE | \ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_START_ACK | \ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_CLOSE_ACK | \ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_FLUSH_ACK | \ IPU_INSYS_STREAM_MSG_SEND_IRQ_STREAM_ABORT_ACK) #define IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_ACK BIT(0) #define IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_ACK BIT(1) #define IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_DONE BIT(2) #define IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_DONE BIT(3) #define IPU_INSYS_FRAME_MSG_SEND_RESP_PIN_DATA_READY BIT(4) #define IPU_INSYS_FRAME_MSG_SEND_IRQ_PIN_DATA_READY BIT(5) #define IPU_INSYS_FRAME_ENABLE_MSG_SEND_RESP ( \ IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_ACK | \ IPU_INSYS_FRAME_MSG_SEND_RESP_CAPTURE_DONE | \ IPU_INSYS_FRAME_MSG_SEND_RESP_PIN_DATA_READY) #define IPU_INSYS_FRAME_ENABLE_MSG_SEND_IRQ ( \ IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_ACK | \ IPU_INSYS_FRAME_MSG_SEND_IRQ_CAPTURE_DONE | \ IPU_INSYS_FRAME_MSG_SEND_IRQ_PIN_DATA_READY) enum ipu7_insys_output_link_dest { IPU_INSYS_OUTPUT_LINK_DEST_MEM = 0, IPU_INSYS_OUTPUT_LINK_DEST_PSYS = 1, IPU_INSYS_OUTPUT_LINK_DEST_IPU_EXTERNAL = 2 }; enum ipu7_insys_dpcm_type { IPU_INSYS_DPCM_TYPE_DISABLED = 0, IPU_INSYS_DPCM_TYPE_10_8_10 = 1, IPU_INSYS_DPCM_TYPE_12_8_12 = 2, IPU_INSYS_DPCM_TYPE_12_10_12 = 3, N_IPU_INSYS_DPCM_TYPE }; enum ipu7_insys_dpcm_predictor { IPU_INSYS_DPCM_PREDICTOR_1 = 0, IPU_INSYS_DPCM_PREDICTOR_2 = 1, N_IPU_INSYS_DPCM_PREDICTOR }; enum ipu7_insys_send_queue_token_flag { IPU_INSYS_SEND_QUEUE_TOKEN_FLAG_NONE = 0, IPU_INSYS_SEND_QUEUE_TOKEN_FLAG_FLUSH_FORCE = 1 }; #pragma pack(push, 1) struct ipu7_insys_resolution { u32 width; u32 height; }; struct ipu7_insys_capture_output_pin_payload { u64 user_token; ia_gofo_addr_t addr; u8 pad[4]; }; struct ipu7_insys_output_link { u32 buffer_lines; u16 foreign_key; u16 granularity_pointer_update; u8 msg_link_streaming_mode; u8 pbk_id; u8 pbk_slot_id; u8 dest; u8 use_sw_managed; u8 is_snoop; u8 pad[2]; }; struct ipu7_insys_output_cropping { u16 line_top; u16 line_bottom; #ifdef IPU8_INSYS_NEW_ABI u16 column_left; u16 column_right; #endif }; struct ipu7_insys_output_dpcm { u8 enable; u8 type; u8 predictor; u8 pad; }; #ifdef IPU8_INSYS_NEW_ABI enum ipu_insys_cfa_dim { IPU_INSYS_CFA_DIM_2x2 = 0, IPU_INSYS_CFA_DIM_4x4 = 1, N_IPU_INSYS_CFA_DIM }; #define IPU_INSYS_MAX_BINNING_FACTOR (4U) #define IPU_INSYS_UPIPE_MAX_OUTPUTS (2U) #define IPU_INSYS_UPIPE_MAX_UOB_FIFO_ALLOC (4U) #define IPU_INSYS_UPIPE_STREAM_CFG_BUF_SIZE (32U) #define IPU_INSYS_UPIPE_FRAME_CFG_BUF_SIZE (36U) struct ipu7_insys_upipe_output_pin { ia_gofo_addr_t opaque_pin_cfg; u16 plane_offset_1; u16 plane_offset_2; u8 single_uob_fifo; u8 shared_uob_fifo; u8 pad[2]; }; struct ipu7_insys_capture_output_pin_cfg { struct ipu7_insys_capture_output_pin_payload pin_payload; ia_gofo_addr_t upipe_capture_cfg; }; #endif struct ipu7_insys_output_pin { struct ipu7_insys_output_link link; struct ipu7_insys_output_cropping crop; struct ipu7_insys_output_dpcm dpcm; #ifdef IPU8_INSYS_NEW_ABI struct ipu7_insys_upipe_output_pin upipe_pin_cfg; #endif u32 stride; u16 ft; #ifdef IPU8_INSYS_NEW_ABI u8 upipe_enable; #endif u8 send_irq; u8 input_pin_id; u8 early_ack_en; #ifdef IPU8_INSYS_NEW_ABI u8 cfa_dim; u8 binning_factor; #else u8 pad[3]; #endif }; struct ipu7_insys_input_pin { struct ipu7_insys_resolution input_res; u16 sync_msg_map; u8 dt; u8 disable_mipi_unpacking; u8 dt_rename_mode; u8 mapped_dt; u8 pad[2]; }; struct ipu7_insys_stream_cfg { struct ipu7_insys_input_pin input_pins[MAX_IPINS]; struct ipu7_insys_output_pin output_pins[MAX_OPINS]; u16 stream_msg_map; u8 port_id; u8 vc; u8 nof_input_pins; u8 nof_output_pins; u8 pad[2]; }; struct ipu7_insys_buffset { #ifdef IPU8_INSYS_NEW_ABI struct ipu7_insys_capture_output_pin_cfg output_pins[MAX_OPINS]; #else struct ipu7_insys_capture_output_pin_payload output_pins[MAX_OPINS]; #endif u8 capture_msg_map; u8 frame_id; u8 skip_frame; u8 pad[5]; }; struct ipu7_insys_resp { u64 buf_id; struct ipu7_insys_capture_output_pin_payload pin; struct ia_gofo_msg_err error_info; u32 timestamp[2]; u8 type; u8 msg_link_streaming_mode; u8 stream_id; u8 pin_id; u8 frame_id; u8 skip_frame; u8 pad[2]; }; struct ipu7_insys_resp_queue_token { struct ipu7_insys_resp resp_info; }; struct ipu7_insys_send_queue_token { u64 buf_handle; ia_gofo_addr_t addr; u16 stream_id; u8 send_type; u8 flag; }; #pragma pack(pop) static inline void ipu7_insys_types_test_func(void) { CHECK_ALIGN32(struct ipu7_insys_resolution); CHECK_ALIGN64(struct ipu7_insys_capture_output_pin_payload); CHECK_ALIGN32(struct ipu7_insys_output_pin); CHECK_ALIGN32(struct ipu7_insys_input_pin); CHECK_ALIGN32(struct ipu7_insys_output_cropping); CHECK_ALIGN32(struct ipu7_insys_stream_cfg); CHECK_ALIGN64(struct ipu7_insys_buffset); CHECK_ALIGN64(struct ipu7_insys_resp); CHECK_ALIGN64(struct ipu7_insys_resp_queue_token); CHECK_ALIGN64(struct ipu7_insys_send_queue_token); CHECK_ALIGN32(struct ipu7_insys_output_link); } enum insys_msg_err_stream { INSYS_MSG_ERR_STREAM_OK = IA_GOFO_MSG_ERR_OK, INSYS_MSG_ERR_STREAM_STREAM_ID = 1, INSYS_MSG_ERR_STREAM_MAX_OPINS = 2, INSYS_MSG_ERR_STREAM_MAX_IPINS = 3, INSYS_MSG_ERR_STREAM_STREAM_MESSAGES_MAP = 4, INSYS_MSG_ERR_STREAM_SYNC_MESSAGES_MAP = 5, INSYS_MSG_ERR_STREAM_SENSOR_TYPE = 6, INSYS_MSG_ERR_STREAM_FOREIGN_KEY = 7, INSYS_MSG_ERR_STREAM_STREAMING_MODE = 8, INSYS_MSG_ERR_STREAM_DPCM_EN = 9, INSYS_MSG_ERR_STREAM_DPCM_TYPE = 10, INSYS_MSG_ERR_STREAM_DPCM_PREDICTOR = 11, INSYS_MSG_ERR_STREAM_GRANULARITY_POINTER_UPDATE = 12, INSYS_MSG_ERR_STREAM_MPF_LUT_ENTRY_RESOURCES_BUSY = 13, INSYS_MSG_ERR_STREAM_MPF_DEV_ID = 14, INSYS_MSG_ERR_STREAM_BUFFER_LINES = 15, INSYS_MSG_ERR_STREAM_IPIN_ID = 16, INSYS_MSG_ERR_STREAM_DATA_TYPE = 17, INSYS_MSG_ERR_STREAM_STREAMING_PROTOCOL_STATE = 18, INSYS_MSG_ERR_STREAM_SYSCOM_FLUSH = 19, INSYS_MSG_ERR_STREAM_MIPI_VC = 20, INSYS_MSG_ERR_STREAM_STREAM_SRC = 21, INSYS_MSG_ERR_STREAM_PBK_ID = 22, INSYS_MSG_ERR_STREAM_CMD_QUEUE_DEALLOCATE = 23, INSYS_MSG_ERR_STREAM_INSUFFICIENT_RESOURCES = 24, INSYS_MSG_ERR_STREAM_IPIN_CONFIGURATION = 25, INSYS_MSG_ERR_STREAM_INVALID_STATE = 26, INSYS_MSG_ERR_STREAM_SW_MANAGED = 27, INSYS_MSG_ERR_STREAM_PBK_SLOT_ID = 28, INSYS_MSG_ERR_STREAM_FLUSH_TIMEOUT = 29, INSYS_MSG_ERR_STREAM_IPIN_WIDTH = 30, INSYS_MSG_ERR_STREAM_IPIN_HEIGHT = 31, INSYS_MSG_ERR_STREAM_OUTPUT_PIN_EARLY_ACK_EN = 32, INSYS_MSG_ERR_STREAM_INCONSISTENT_PARAMS = 33, INSYS_MSG_ERR_STREAM_PLANE_COUNT = 34, INSYS_MSG_ERR_STREAM_FRAME_FORMAT_TYPE = 35, INSYS_MSG_ERR_STREAM_INSUFFICIENT_RESOURCES_OUTPUT = 36, INSYS_MSG_ERR_STREAM_N }; enum insys_msg_err_capture { INSYS_MSG_ERR_CAPTURE_OK = IA_GOFO_MSG_ERR_OK, INSYS_MSG_ERR_CAPTURE_STREAM_ID = 1, INSYS_MSG_ERR_CAPTURE_PAYLOAD_PTR = 2, INSYS_MSG_ERR_CAPTURE_MEM_SLOT = 3, INSYS_MSG_ERR_CAPTURE_STREAMING_MODE = 4, INSYS_MSG_ERR_CAPTURE_AVAILABLE_CMD_SLOT = 5, INSYS_MSG_ERR_CAPTURE_CONSUMED_CMD_SLOT = 6, INSYS_MSG_ERR_CAPTURE_CMD_SLOT_PAYLOAD_PTR = 7, INSYS_MSG_ERR_CAPTURE_CMD_PREPARE = 8, INSYS_MSG_ERR_CAPTURE_OUTPUT_PIN = 9, INSYS_MSG_ERR_CAPTURE_SYNC_FRAME_DROP = 10, INSYS_MSG_ERR_CAPTURE_FRAME_MESSAGES_MAP = 11, INSYS_MSG_ERR_CAPTURE_TIMEOUT = 12, INSYS_MSG_ERR_CAPTURE_INVALID_STREAM_STATE = 13, INSYS_MSG_ERR_CAPTURE_HW_ERR_MULTIBIT_PH_ERROR_DETECTED = 14, INSYS_MSG_ERR_CAPTURE_HW_ERR_PAYLOAD_CRC_ERROR = 15, INSYS_MSG_ERR_CAPTURE_HW_ERR_INPUT_DATA_LOSS_ELASTIC_FIFO_OVFL = 16, INSYS_MSG_ERR_CAPTURE_HW_ERR_PIXEL_BUFFER_OVERFLOW = 17, INSYS_MSG_ERR_CAPTURE_HW_ERR_BAD_FRAME_DIM = 18, INSYS_MSG_ERR_CAPTURE_HW_ERR_PHY_SYNC_ERR = 19, INSYS_MSG_ERR_CAPTURE_HW_ERR_SECURE_TOUCH = 20, INSYS_MSG_ERR_CAPTURE_HW_ERR_MASTER_SLAVE_SYNC_ERR = 21, INSYS_MSG_ERR_CAPTURE_FRAME_SKIP_ERR = 22, INSYS_MSG_ERR_CAPTURE_FE_INPUT_FIFO_OVERFLOW_ERR = 23, INSYS_MSG_ERR_CAPTURE_CMD_SUBMIT_TO_HW = 24, INSYS_MSG_ERR_CAPTURE_N }; enum insys_msg_err_groups { INSYS_MSG_ERR_GROUP_RESERVED = IA_GOFO_MSG_ERR_GROUP_RESERVED, INSYS_MSG_ERR_GROUP_GENERAL = IA_GOFO_MSG_ERR_GROUP_GENERAL, INSYS_MSG_ERR_GROUP_STREAM = 2, INSYS_MSG_ERR_GROUP_CAPTURE = 3, INSYS_MSG_ERR_GROUP_N, }; #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/abi/ipu7_fw_msg_abi.h000066400000000000000000000321411503071307200303170ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2025 Intel Corporation */ #ifndef IPU7_FW_MSG_ABI_H #define IPU7_FW_MSG_ABI_H #include "ipu7_fw_common_abi.h" #pragma pack(push, 1) enum ipu7_msg_type { IPU_MSG_TYPE_RESERVED = IA_GOFO_MSG_TYPE_RESERVED, IPU_MSG_TYPE_INDIRECT = IA_GOFO_MSG_TYPE_INDIRECT, IPU_MSG_TYPE_DEV_LOG = IA_GOFO_MSG_TYPE_LOG, IPU_MSG_TYPE_GENERAL_ERR = IA_GOFO_MSG_TYPE_GENERAL_ERR, IPU_MSG_TYPE_DEV_OPEN = 4, IPU_MSG_TYPE_DEV_OPEN_ACK = 5, IPU_MSG_TYPE_GRAPH_OPEN = 6, IPU_MSG_TYPE_GRAPH_OPEN_ACK = 7, IPU_MSG_TYPE_TASK_REQ = 8, IPU_MSG_TYPE_TASK_DONE = 9, IPU_MSG_TYPE_GRAPH_CLOSE = 10, IPU_MSG_TYPE_GRAPH_CLOSE_ACK = 11, IPU_MSG_TYPE_DEV_CLOSE = 12, IPU_MSG_TYPE_DEV_CLOSE_ACK = 13, IPU_MSG_TYPE_TERM_EVENT = 14, IPU_MSG_TYPE_N, }; #define IPU_MSG_MAX_NODE_TERMS (64U) #define IPU_MSG_MAX_FRAGS (7U) enum ipu7_msg_node_type { IPU_MSG_NODE_TYPE_PAD = 0, IPU_MSG_NODE_TYPE_BASE, IPU_MSG_NODE_TYPE_N }; #define IPU_MSG_NODE_MAX_DEVICES (128U) #define DEB_NUM_UINT32 (IPU_MSG_NODE_MAX_DEVICES / (sizeof(u32) * 8U)) typedef u32 ipu7_msg_teb_t[2]; typedef u32 ipu7_msg_deb_t[DEB_NUM_UINT32]; #define IPU_MSG_NODE_MAX_ROUTE_ENABLES (128U) #define RBM_NUM_UINT32 (IPU_MSG_NODE_MAX_ROUTE_ENABLES / (sizeof(u32) * 8U)) typedef u32 ipu7_msg_rbm_t[RBM_NUM_UINT32]; enum ipu7_msg_node_profile_type { IPU_MSG_NODE_PROFILE_TYPE_PAD = 0, IPU_MSG_NODE_PROFILE_TYPE_BASE, IPU_MSG_NODE_PROFILE_TYPE_CB, IPU_MSG_NODE_PROFILE_TYPE_N }; struct ipu7_msg_node_profile { struct ia_gofo_tlv_header tlv_header; ipu7_msg_teb_t teb; }; struct ipu7_msg_cb_profile { struct ipu7_msg_node_profile profile_base; ipu7_msg_deb_t deb; ipu7_msg_rbm_t rbm; ipu7_msg_rbm_t reb; }; #define IPU_MSG_NODE_MAX_PROFILES (2U) #define IPU_MSG_NODE_DEF_PROFILE_IDX (0U) #define IPU_MSG_NODE_RSRC_ID_EXT_IP (0xffU) #define IPU_MSG_NODE_DONT_CARE_TEB_HI (0xffffffffU) #define IPU_MSG_NODE_DONT_CARE_TEB_LO (0xffffffffU) #define IPU_MSG_NODE_RSRC_ID_IS (0xfeU) struct ipu7_msg_node { struct ia_gofo_tlv_header tlv_header; u8 node_rsrc_id; u8 node_ctx_id; u8 num_frags; u8 reserved[1]; struct ia_gofo_tlv_list profiles_list; struct ia_gofo_tlv_list terms_list; struct ia_gofo_tlv_list node_options; }; enum ipu7_msg_node_option_types { IPU_MSG_NODE_OPTION_TYPES_PADDING = 0, IPU_MSG_NODE_OPTION_TYPES_N }; #pragma pack(pop) static inline void ipu7_msg_node_test_func(void) { CHECK_ALIGN32(struct ipu7_msg_node); CHECK_ALIGN32(struct ipu7_msg_node_profile); CHECK_ALIGN32(struct ipu7_msg_cb_profile); } #pragma pack(push, 1) enum ipu7_msg_link_type { IPU_MSG_LINK_TYPE_PAD = 0, IPU_MSG_LINK_TYPE_GENERIC = 1, IPU_MSG_LINK_TYPE_N }; enum ipu7_msg_link_option_types { IPU_MSG_LINK_OPTION_TYPES_PADDING = 0, IPU_MSG_LINK_OPTION_TYPES_CMPRS = 1, IPU_MSG_LINK_OPTION_TYPES_N }; enum ipu7_msg_link_cmprs_option_bit_depth { IPU_MSG_LINK_CMPRS_OPTION_8BPP = 0, IPU_MSG_LINK_CMPRS_OPTION_10BPP = 1, IPU_MSG_LINK_CMPRS_OPTION_12BPP = 2, }; #define IPU_MSG_LINK_CMPRS_SPACE_SAVING_DENOM (128U) #define IPU_MSG_LINK_CMPRS_LOSSY_CFG_PAYLOAD_SIZE (5U) #define IPU_MSG_LINK_CMPRS_SPACE_SAVING_NUM_MAX \ (IPU_MSG_LINK_CMPRS_SPACE_SAVING_DENOM - 1U) struct ipu7_msg_link_cmprs_plane_desc { u8 plane_enable; u8 cmprs_enable; u8 encoder_plane_id; u8 decoder_plane_id; u8 cmprs_is_lossy; u8 cmprs_is_footprint; u8 bit_depth; u8 space_saving_numerator; u32 pixels_offset; u32 ts_offset; u32 tile_row_to_tile_row_stride; u32 rows_of_tiles; u32 lossy_cfg[IPU_MSG_LINK_CMPRS_LOSSY_CFG_PAYLOAD_SIZE]; }; #define IPU_MSG_LINK_CMPRS_MAX_PLANES (2U) #define IPU_MSG_LINK_CMPRS_NO_ALIGN_INTERVAL (0U) #define IPU_MSG_LINK_CMPRS_MIN_ALIGN_INTERVAL (16U) #define IPU_MSG_LINK_CMPRS_MAX_ALIGN_INTERVAL (1024U) struct ipu7_msg_link_cmprs_option { struct ia_gofo_tlv_header header; u32 cmprs_buf_size; u16 align_interval; u8 reserved[2]; struct ipu7_msg_link_cmprs_plane_desc plane_descs[2]; }; struct ipu7_msg_link_ep { u8 node_ctx_id; u8 term_id; }; struct ipu7_msg_link_ep_pair { struct ipu7_msg_link_ep ep_src; struct ipu7_msg_link_ep ep_dst; }; #define IPU_MSG_LINK_FOREIGN_KEY_NONE (65535U) #define IPU_MSG_LINK_FOREIGN_KEY_MAX (64U) #define IPU_MSG_LINK_PBK_ID_DONT_CARE (255U) #define IPU_MSG_LINK_PBK_SLOT_ID_DONT_CARE (255U) #define IPU_MSG_LINK_TERM_ID_DONT_CARE (0xffU) struct ipu7_msg_link { struct ia_gofo_tlv_header tlv_header; struct ipu7_msg_link_ep_pair endpoints; u16 foreign_key; u8 streaming_mode; u8 pbk_id; u8 pbk_slot_id; u8 delayed_link; u8 reserved[2]; struct ia_gofo_tlv_list link_options; }; #pragma pack(pop) static inline void ipu7_msg_abi_link_test_func(void) { CHECK_ALIGN32(struct ipu7_msg_link); CHECK_ALIGN32(struct ipu7_msg_link_cmprs_option); CHECK_ALIGN32(struct ipu7_msg_link_cmprs_plane_desc); } enum ipu7_msg_dev_state { IPU_MSG_DEV_STATE_CLOSED = 0, IPU_MSG_DEV_STATE_OPEN_WAIT = 1, IPU_MSG_DEV_STATE_OPEN = 2, IPU_MSG_DEV_STATE_CLOSE_WAIT = 3, IPU_MSG_DEV_STATE_N }; enum ipu7_msg_graph_state { IPU_MSG_GRAPH_STATE_CLOSED = 0, IPU_MSG_GRAPH_STATE_OPEN_WAIT = 1, IPU_MSG_GRAPH_STATE_OPEN = 2, IPU_MSG_GRAPH_STATE_CLOSE_WAIT = 3, IPU_MSG_GRAPH_STATE_N }; enum ipu7_msg_task_state { IPU_MSG_TASK_STATE_DONE = 0, IPU_MSG_TASK_STATE_WAIT_DONE = 1, IPU_MSG_TASK_STATE_N }; enum ipu7_msg_err_groups { IPU_MSG_ERR_GROUP_RESERVED = IA_GOFO_MSG_ERR_GROUP_RESERVED, IPU_MSG_ERR_GROUP_GENERAL = IA_GOFO_MSG_ERR_GROUP_GENERAL, IPU_MSG_ERR_GROUP_DEVICE = 2, IPU_MSG_ERR_GROUP_GRAPH = 3, IPU_MSG_ERR_GROUP_TASK = 4, IPU_MSG_ERR_GROUP_N, }; #pragma pack(push, 1) struct ipu7_msg_task { struct ia_gofo_msg_header header; u8 graph_id; u8 profile_idx; u8 node_ctx_id; u8 frame_id; u8 frag_id; u8 req_done_msg; u8 req_done_irq; u8 reserved[1]; ipu7_msg_teb_t payload_reuse_bm; ia_gofo_addr_t term_buffers[IPU_MSG_MAX_NODE_TERMS]; }; struct ipu7_msg_task_done { struct ia_gofo_msg_header_ack header; u8 graph_id; u8 frame_id; u8 node_ctx_id; u8 profile_idx; u8 frag_id; u8 reserved[3]; }; enum ipu7_msg_err_task { IPU_MSG_ERR_TASK_OK = IA_GOFO_MSG_ERR_OK, IPU_MSG_ERR_TASK_GRAPH_ID = 1, IPU_MSG_ERR_TASK_NODE_CTX_ID = 2, IPU_MSG_ERR_TASK_PROFILE_IDX = 3, IPU_MSG_ERR_TASK_CTX_MEMORY_TASK = 4, IPU_MSG_ERR_TASK_TERM_PAYLOAD_PTR = 5, IPU_MSG_ERR_TASK_FRAME_ID = 6, IPU_MSG_ERR_TASK_FRAG_ID = 7, IPU_MSG_ERR_TASK_EXEC_EXT = 8, IPU_MSG_ERR_TASK_EXEC_SBX = 9, IPU_MSG_ERR_TASK_EXEC_INT = 10, IPU_MSG_ERR_TASK_EXEC_UNKNOWN = 11, IPU_MSG_ERR_TASK_N }; #pragma pack(pop) static inline void ipu7_msg_task_test_func(void) { CHECK_ALIGN64(struct ipu7_msg_task); CHECK_ALIGN64(struct ipu7_msg_task_done); CHECK_ALIGN32(struct ipu7_msg_link_cmprs_option); } #pragma pack(push, 1) enum ipu7_msg_term_type { IPU_MSG_TERM_TYPE_PAD = 0, IPU_MSG_TERM_TYPE_BASE, IPU_MSG_TERM_TYPE_N, }; #define IPU_MSG_TERM_EVENT_TYPE_NONE 0U #define IPU_MSG_TERM_EVENT_TYPE_PROGRESS 1U #define IPU_MSG_TERM_EVENT_TYPE_N (IPU_MSG_TERM_EVENT_TYPE_PROGRESS + 1U) struct ipu7_msg_term { struct ia_gofo_tlv_header tlv_header; u8 term_id; u8 event_req_bm; u8 reserved[2]; u32 payload_size; struct ia_gofo_tlv_list term_options; }; enum ipu7_msg_term_option_types { IPU_MSG_TERM_OPTION_TYPES_PADDING = 0, IPU_MSG_TERM_OPTION_TYPES_N }; struct ipu7_msg_term_event { struct ia_gofo_msg_header header; u8 graph_id; u8 frame_id; u8 node_ctx_id; u8 profile_idx; u8 frag_id; u8 term_id; u8 event_type; u8 reserved[1]; u64 event_ts; }; #pragma pack(pop) static inline void ipu7_msg_term_test_func(void) { CHECK_ALIGN32(struct ipu7_msg_term); CHECK_ALIGN64(struct ipu7_msg_term_event); } #pragma pack(push, 1) #define IPU_MSG_DEVICE_SEND_MSG_ENABLED 1U #define IPU_MSG_DEVICE_SEND_MSG_DISABLED 0U #define IPU_MSG_DEVICE_OPEN_SEND_RESP BIT(0) #define IPU_MSG_DEVICE_OPEN_SEND_IRQ BIT(1) #define IPU_MSG_DEVICE_CLOSE_SEND_RESP BIT(0) #define IPU_MSG_DEVICE_CLOSE_SEND_IRQ BIT(1) struct ipu7_msg_dev_open { struct ia_gofo_msg_header header; u32 max_graphs; u8 dev_msg_map; u8 enable_power_gating; u8 reserved[2]; }; struct ipu7_msg_dev_open_ack { struct ia_gofo_msg_header_ack header; }; struct ipu7_msg_dev_close { struct ia_gofo_msg_header header; u8 dev_msg_map; u8 reserved[7]; }; struct ipu7_msg_dev_close_ack { struct ia_gofo_msg_header_ack header; }; enum ipu7_msg_err_device { IPU_MSG_ERR_DEVICE_OK = IA_GOFO_MSG_ERR_OK, IPU_MSG_ERR_DEVICE_MAX_GRAPHS = 1, IPU_MSG_ERR_DEVICE_MSG_MAP = 2, IPU_MSG_ERR_DEVICE_N }; #pragma pack(pop) static inline void ipu7_msg_device_test_func(void) { CHECK_ALIGN32(struct ia_gofo_version_s); CHECK_ALIGN64(struct ipu7_msg_dev_open); CHECK_ALIGN64(struct ipu7_msg_dev_open_ack); CHECK_ALIGN64(struct ipu7_msg_dev_close); CHECK_ALIGN64(struct ipu7_msg_dev_close_ack); } #pragma pack(push, 1) #define IPU_MSG_GRAPH_ID_UNKNOWN (0xffU) #define IPU_MSG_GRAPH_SEND_MSG_ENABLED 1U #define IPU_MSG_GRAPH_SEND_MSG_DISABLED 0U #define IPU_MSG_GRAPH_OPEN_SEND_RESP BIT(0) #define IPU_MSG_GRAPH_OPEN_SEND_IRQ BIT(1) #define IPU_MSG_GRAPH_CLOSE_SEND_RESP BIT(0) #define IPU_MSG_GRAPH_CLOSE_SEND_IRQ BIT(1) struct ipu7_msg_graph_open { struct ia_gofo_msg_header header; struct ia_gofo_tlv_list nodes; struct ia_gofo_tlv_list links; u8 graph_id; u8 graph_msg_map; u8 reserved[6]; }; enum ipu7_msg_graph_ack_option_types { IPU_MSG_GRAPH_ACK_OPTION_TYPES_PADDING = 0, IPU_MSG_GRAPH_ACK_TASK_Q_INFO, IPU_MSG_GRAPH_ACK_OPTION_TYPES_N }; struct ipu7_msg_graph_open_ack_task_q_info { struct ia_gofo_tlv_header header; u8 node_ctx_id; u8 q_id; u8 reserved[2]; }; struct ipu7_msg_graph_open_ack { struct ia_gofo_msg_header_ack header; u8 graph_id; u8 reserved[7]; }; struct ipu7_msg_graph_close { struct ia_gofo_msg_header header; u8 graph_id; u8 graph_msg_map; u8 reserved[6]; }; struct ipu7_msg_graph_close_ack { struct ia_gofo_msg_header_ack header; u8 graph_id; u8 reserved[7]; }; enum ipu7_msg_err_graph { IPU_MSG_ERR_GRAPH_OK = IA_GOFO_MSG_ERR_OK, IPU_MSG_ERR_GRAPH_GRAPH_STATE = 1, IPU_MSG_ERR_GRAPH_MAX_GRAPHS = 2, IPU_MSG_ERR_GRAPH_GRAPH_ID = 3, IPU_MSG_ERR_GRAPH_NODE_CTX_ID = 4, IPU_MSG_ERR_GRAPH_NODE_RSRC_ID = 5, IPU_MSG_ERR_GRAPH_PROFILE_IDX = 6, IPU_MSG_ERR_GRAPH_TERM_ID = 7, IPU_MSG_ERR_GRAPH_TERM_PAYLOAD_SIZE = 8, IPU_MSG_ERR_GRAPH_LINK_NODE_CTX_ID = 9, IPU_MSG_ERR_GRAPH_LINK_TERM_ID = 10, IPU_MSG_ERR_GRAPH_PROFILE_TYPE = 11, IPU_MSG_ERR_GRAPH_NUM_FRAGS = 12, IPU_MSG_ERR_GRAPH_QUEUE_ID_USAGE = 13, IPU_MSG_ERR_GRAPH_QUEUE_OPEN = 14, IPU_MSG_ERR_GRAPH_QUEUE_CLOSE = 15, IPU_MSG_ERR_GRAPH_QUEUE_ID_TASK_REQ_MISMATCH = 16, IPU_MSG_ERR_GRAPH_CTX_MEMORY_FGRAPH = 17, IPU_MSG_ERR_GRAPH_CTX_MEMORY_NODE = 18, IPU_MSG_ERR_GRAPH_CTX_MEMORY_NODE_PROFILE = 19, IPU_MSG_ERR_GRAPH_CTX_MEMORY_TERM = 20, IPU_MSG_ERR_GRAPH_CTX_MEMORY_LINK = 21, IPU_MSG_ERR_GRAPH_CTX_MSG_MAP = 22, IPU_MSG_ERR_GRAPH_CTX_FOREIGN_KEY = 23, IPU_MSG_ERR_GRAPH_CTX_STREAMING_MODE = 24, IPU_MSG_ERR_GRAPH_CTX_PBK_RSRC = 25, IPU_MSG_ERR_GRAPH_UNSUPPORTED_EVENT_TYPE = 26, IPU_MSG_ERR_GRAPH_TOO_MANY_EVENTS = 27, IPU_MSG_ERR_GRAPH_CTX_MEMORY_CMPRS = 28, IPU_MSG_ERR_GRAPH_CTX_CMPRS_ALIGN_INTERVAL = 29, IPU_MSG_ERR_GRAPH_CTX_CMPRS_PLANE_ID = 30, IPU_MSG_ERR_GRAPH_CTX_CMPRS_UNSUPPORTED_MODE = 31, IPU_MSG_ERR_GRAPH_CTX_CMPRS_BIT_DEPTH = 32, IPU_MSG_ERR_GRAPH_CTX_CMPRS_STRIDE_ALIGNMENT = 33, IPU_MSG_ERR_GRAPH_CTX_CMPRS_SUB_BUFFER_ALIGNMENT = 34, IPU_MSG_ERR_GRAPH_CTX_CMPRS_LAYOUT_ORDER = 35, IPU_MSG_ERR_GRAPH_CTX_CMPRS_LAYOUT_OVERLAP = 36, IPU_MSG_ERR_GRAPH_CTX_CMPRS_BUFFER_TOO_SMALL = 37, IPU_MSG_ERR_GRAPH_CTX_DELAYED_LINK = 38, IPU_MSG_ERR_GRAPH_N }; #pragma pack(pop) static inline void ipu7_msg_graph_test_func(void) { CHECK_ALIGN64(struct ipu7_msg_graph_open); CHECK_ALIGN64(struct ipu7_msg_graph_open_ack); CHECK_ALIGN64(struct ipu7_msg_graph_close); CHECK_ALIGN64(struct ipu7_msg_graph_close_ack); CHECK_ALIGN32(struct ipu7_msg_graph_open_ack_task_q_info); } #define FWPS_MSG_ABI_MAX_INPUT_QUEUES (60U) #define FWPS_MSG_ABI_MAX_OUTPUT_QUEUES (2U) #define FWPS_MSG_ABI_MAX_QUEUES \ (FWPS_MSG_ABI_MAX_OUTPUT_QUEUES + FWPS_MSG_ABI_MAX_INPUT_QUEUES) #define FWPS_MSG_ABI_OUT_ACK_QUEUE_ID (IA_GOFO_MSG_ABI_OUT_ACK_QUEUE_ID) #define FWPS_MSG_ABI_OUT_LOG_QUEUE_ID (IA_GOFO_MSG_ABI_OUT_LOG_QUEUE_ID) #if (FWPS_MSG_ABI_OUT_LOG_QUEUE_ID >= FWPS_MSG_ABI_MAX_OUTPUT_QUEUES) #error "Maximum output queues configuration is too small to fit ACK and LOG \ queues" #endif #define FWPS_MSG_ABI_IN_DEV_QUEUE_ID (IA_GOFO_MSG_ABI_IN_DEV_QUEUE_ID) #define FWPS_MSG_ABI_IN_RESERVED_QUEUE_ID (3U) #define FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID \ (FWPS_MSG_ABI_IN_RESERVED_QUEUE_ID + 1U) #if (FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID >= FWPS_MSG_ABI_MAX_INPUT_QUEUES) #error "Maximum queues configuration is too small to fit minimum number of \ useful queues" #endif #define FWPS_MSG_ABI_IN_LAST_TASK_QUEUE_ID (FWPS_MSG_ABI_MAX_QUEUES - 1U) #define FWPS_MSG_ABI_IN_MAX_TASK_QUEUES \ (FWPS_MSG_ABI_IN_LAST_TASK_QUEUE_ID - \ FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID + 1U) #define FWPS_MSG_ABI_OUT_FIRST_QUEUE_ID (FWPS_MSG_ABI_OUT_ACK_QUEUE_ID) #define FWPS_MSG_ABI_OUT_LAST_QUEUE_ID (FWPS_MSG_ABI_MAX_OUTPUT_QUEUES - 1U) #define FWPS_MSG_ABI_IN_FIRST_QUEUE_ID (FWPS_MSG_ABI_IN_DEV_QUEUE_ID) #define FWPS_MSG_ABI_IN_LAST_QUEUE_ID (FWPS_MSG_ABI_IN_LAST_TASK_QUEUE_ID) #define FWPS_MSG_HOST2FW_MAX_SIZE (2U * 1024U) #define FWPS_MSG_FW2HOST_MAX_SIZE (256U) #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/abi/ipu7_fw_psys_config_abi.h000066400000000000000000000007241503071307200320560ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2025 Intel Corporation */ #ifndef IPU7_PSYS_CONFIG_ABI_H_INCLUDED__ #define IPU7_PSYS_CONFIG_ABI_H_INCLUDED__ #include #include "ipu7_fw_boot_abi.h" #include "ipu7_fw_config_abi.h" struct ipu7_psys_config { u32 use_debug_manifest; u32 timeout_val_ms; u32 compression_support_enabled; struct ia_gofo_logger_config logger_config; struct ipu7_wdt_abi wdt_config; }; #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/abi/ipu7_fw_syscom_abi.h000066400000000000000000000023231503071307200310450ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2025 Intel Corporation */ #ifndef IPU7_FW_SYSCOM_ABI_H #define IPU7_FW_SYSCOM_ABI_H #include #include "ipu7_fw_common_abi.h" #pragma pack(push, 1) #define SYSCOM_QUEUE_MIN_CAPACITY 2U struct syscom_queue_params_config { ia_gofo_addr_t token_array_mem; u16 token_size_in_bytes; u16 max_capacity; }; struct syscom_config_s { u16 max_output_queues; u16 max_input_queues; }; #pragma pack(pop) static inline struct syscom_queue_params_config * syscom_config_get_queue_configs(struct syscom_config_s *config) { return (struct syscom_queue_params_config *)(&config[1]); } static inline const struct syscom_queue_params_config * syscom_config_get_queue_configs_const(const struct syscom_config_s *config) { return (const struct syscom_queue_params_config *)(&config[1]); } static inline void syscom_abi_test_func(void) { CHECK_ALIGN32(struct syscom_queue_params_config); CHECK_ALIGN32(struct syscom_config_s); } #pragma pack(push, 1) struct syscom_queue_indices_s { u32 read_index; u32 write_index; }; #pragma pack(pop) static inline void syscom_queue_abi_test_func(void) { CHECK_ALIGN64(struct syscom_queue_indices_s); } #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-boot.c000066400000000000000000000330041503071307200263420ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2022 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include "abi/ipu7_fw_boot_abi.h" #include "ipu7.h" #include "ipu7-boot.h" #include "ipu7-bus.h" #include "ipu7-buttress-regs.h" #include "ipu7-dma.h" #include "ipu7-platform-regs.h" #include "ipu7-syscom.h" #define IPU_FW_START_STOP_TIMEOUT 2000 #define IPU_BOOT_CELL_RESET_TIMEOUT (2 * USEC_PER_SEC) #define BOOT_STATE_IS_CRITICAL(s) IA_GOFO_FW_BOOT_STATE_IS_CRITICAL(s) #define BOOT_STATE_IS_READY(s) ((s) == IA_GOFO_FW_BOOT_STATE_READY) #define BOOT_STATE_IS_INACTIVE(s) ((s) == IA_GOFO_FW_BOOT_STATE_INACTIVE) struct ipu7_boot_context { u32 base; u32 dmem_address; u32 status_ctrl_reg; u32 fw_start_address_reg; u32 fw_code_base_reg; }; static const struct ipu7_boot_context contexts[IPU_SUBSYS_NUM] = { { /* ISYS */ .dmem_address = IPU_ISYS_DMEM_OFFSET, .status_ctrl_reg = BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS, .fw_start_address_reg = BUTTRESS_REG_DRV_IS_UCX_START_ADDR, .fw_code_base_reg = IS_UC_CTRL_BASE }, { /* PSYS */ .dmem_address = IPU_PSYS_DMEM_OFFSET, .status_ctrl_reg = BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS, .fw_start_address_reg = BUTTRESS_REG_DRV_PS_UCX_START_ADDR, .fw_code_base_reg = PS_UC_CTRL_BASE } }; static u32 get_fw_boot_reg_addr(const struct ipu7_bus_device *adev, enum ia_gofo_buttress_reg_id reg) { u32 base = (adev->subsys == IPU_IS) ? 0U : (u32)IA_GOFO_FW_BOOT_ID_MAX; return BUTTRESS_FW_BOOT_PARAMS_ENTRY(base + (u32)reg); } static void write_fw_boot_param(const struct ipu7_bus_device *adev, enum ia_gofo_buttress_reg_id reg, u32 val) { void __iomem *base = adev->isp->base; dev_dbg(&adev->auxdev.dev, "write boot param reg: %d addr: %x val: 0x%x\n", reg, get_fw_boot_reg_addr(adev, reg), val); writel(val, base + get_fw_boot_reg_addr(adev, reg)); } static u32 read_fw_boot_param(const struct ipu7_bus_device *adev, enum ia_gofo_buttress_reg_id reg) { void __iomem *base = adev->isp->base; return readl(base + get_fw_boot_reg_addr(adev, reg)); } static int ipu7_boot_cell_reset(const struct ipu7_bus_device *adev) { const struct ipu7_boot_context *ctx = &contexts[adev->subsys]; const struct device *dev = &adev->auxdev.dev; u32 ucx_ctrl_status = ctx->status_ctrl_reg; u32 timeout = IPU_BOOT_CELL_RESET_TIMEOUT; void __iomem *base = adev->isp->base; u32 val, val2; int ret; dev_dbg(dev, "cell enter reset...\n"); val = readl(base + ucx_ctrl_status); dev_dbg(dev, "cell_ctrl_reg addr = 0x%x, val = 0x%x\n", ucx_ctrl_status, val); dev_dbg(dev, "force cell reset...\n"); val |= UCX_CTL_RESET; val &= ~UCX_CTL_RUN; dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n", ucx_ctrl_status, val); writel(val, base + ucx_ctrl_status); ret = readl_poll_timeout(base + ucx_ctrl_status, val2, (val2 & 0x3U) == (val & 0x3U), 100, timeout); if (ret) { dev_err(dev, "cell enter reset timeout. status: 0x%x\n", val2); return -ETIMEDOUT; } dev_dbg(dev, "cell exit reset...\n"); val = readl(base + ucx_ctrl_status); WARN((!(val & UCX_CTL_RESET) || val & UCX_CTL_RUN), "cell status 0x%x", val); val &= ~(UCX_CTL_RESET | UCX_CTL_RUN); dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n", ucx_ctrl_status, val); writel(val, base + ucx_ctrl_status); ret = readl_poll_timeout(base + ucx_ctrl_status, val2, (val2 & 0x3U) == (val & 0x3U), 100, timeout); if (ret) { dev_err(dev, "cell exit reset timeout. status: 0x%x\n", val2); return -ETIMEDOUT; } return 0; } static void ipu7_boot_cell_start(const struct ipu7_bus_device *adev) { const struct ipu7_boot_context *ctx = &contexts[adev->subsys]; void __iomem *base = adev->isp->base; const struct device *dev = &adev->auxdev.dev; u32 val; dev_dbg(dev, "starting cell...\n"); val = readl(base + ctx->status_ctrl_reg); WARN_ON(val & (UCX_CTL_RESET | UCX_CTL_RUN)); val &= ~UCX_CTL_RESET; val |= UCX_CTL_RUN; dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n", ctx->status_ctrl_reg, val); writel(val, base + ctx->status_ctrl_reg); } static void ipu7_boot_cell_stop(const struct ipu7_bus_device *adev) { const struct ipu7_boot_context *ctx = &contexts[adev->subsys]; void __iomem *base = adev->isp->base; const struct device *dev = &adev->auxdev.dev; u32 val; dev_dbg(dev, "stopping cell...\n"); val = readl(base + ctx->status_ctrl_reg); val &= ~UCX_CTL_RUN; dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n", ctx->status_ctrl_reg, val); writel(val, base + ctx->status_ctrl_reg); /* Wait for uC transactions complete */ usleep_range(10, 20); val = readl(base + ctx->status_ctrl_reg); val |= UCX_CTL_RESET; dev_dbg(dev, "write status_ctrl_reg(0x%x) to 0x%x\n", ctx->status_ctrl_reg, val); writel(val, base + ctx->status_ctrl_reg); } static int ipu7_boot_cell_init(const struct ipu7_bus_device *adev) { const struct ipu7_boot_context *ctx = &contexts[adev->subsys]; void __iomem *base = adev->isp->base; dev_dbg(&adev->auxdev.dev, "write fw_start_address_reg(0x%x) to 0x%x\n", ctx->fw_start_address_reg, adev->fw_entry); writel(adev->fw_entry, base + ctx->fw_start_address_reg); return ipu7_boot_cell_reset(adev); } static void init_boot_config(struct ia_gofo_boot_config *boot_config, u32 length, u8 major) { /* syscom version, new syscom2 version */ boot_config->length = length; boot_config->config_version.major = 1U; boot_config->config_version.minor = 0U; boot_config->config_version.subminor = 0U; boot_config->config_version.patch = 0U; /* msg version for task interface */ boot_config->client_version_support.num_versions = 1U; boot_config->client_version_support.versions[0].major = major; boot_config->client_version_support.versions[0].minor = 0U; boot_config->client_version_support.versions[0].subminor = 0U; boot_config->client_version_support.versions[0].patch = 0U; } int ipu7_boot_init_boot_config(struct ipu7_bus_device *adev, struct syscom_queue_config *qconfigs, int num_queues, u32 uc_freq, dma_addr_t subsys_config, u8 major) { u32 total_queue_size = 0, total_queue_size_aligned = 0; struct ipu7_syscom_context *syscom = adev->syscom; struct ia_gofo_boot_config *boot_config; struct syscom_queue_params_config *cfgs; struct device *dev = &adev->auxdev.dev; struct syscom_config_s *syscfg; dma_addr_t queue_mem_dma_ptr; void *queue_mem_ptr; unsigned int i; dev_dbg(dev, "boot config queues_nr: %d freq: %u sys_conf: 0x%llx\n", num_queues, uc_freq, subsys_config); /* Allocate boot config. */ adev->boot_config_size = sizeof(*cfgs) * num_queues + sizeof(*boot_config); adev->boot_config = ipu7_dma_alloc(adev, adev->boot_config_size, &adev->boot_config_dma_addr, GFP_KERNEL, 0); if (!adev->boot_config) { dev_err(dev, "Failed to allocate boot config.\n"); return -ENOMEM; } boot_config = adev->boot_config; memset(boot_config, 0, sizeof(struct ia_gofo_boot_config)); init_boot_config(boot_config, adev->boot_config_size, major); boot_config->subsys_config = subsys_config; boot_config->uc_tile_frequency = uc_freq; boot_config->uc_tile_frequency_units = IA_GOFO_FW_BOOT_UC_FREQUENCY_UNITS_MHZ; boot_config->syscom_context_config.max_output_queues = syscom->num_output_queues; boot_config->syscom_context_config.max_input_queues = syscom->num_input_queues; ipu7_dma_sync_single(adev, adev->boot_config_dma_addr, adev->boot_config_size); for (i = 0; i < num_queues; i++) { u32 queue_size = qconfigs[i].max_capacity * qconfigs[i].token_size_in_bytes; total_queue_size += queue_size; queue_size = ALIGN(queue_size, IA_GOFO_CL_SIZE); total_queue_size_aligned += queue_size; qconfigs[i].queue_size = queue_size; } /* Allocate queue memory */ syscom->queue_mem = ipu7_dma_alloc(adev, total_queue_size_aligned, &syscom->queue_mem_dma_addr, GFP_KERNEL, 0); if (!syscom->queue_mem) { dev_err(dev, "Failed to allocate queue memory.\n"); return -ENOMEM; } syscom->queue_mem_size = total_queue_size_aligned; syscfg = &boot_config->syscom_context_config; cfgs = ipu7_syscom_get_queue_config(syscfg); queue_mem_ptr = syscom->queue_mem; queue_mem_dma_ptr = syscom->queue_mem_dma_addr; for (i = 0; i < num_queues; i++) { cfgs[i].token_array_mem = queue_mem_dma_ptr; cfgs[i].max_capacity = qconfigs[i].max_capacity; cfgs[i].token_size_in_bytes = qconfigs[i].token_size_in_bytes; qconfigs[i].token_array_mem = queue_mem_ptr; queue_mem_dma_ptr += qconfigs[i].queue_size; queue_mem_ptr += qconfigs[i].queue_size; } ipu7_dma_sync_single(adev, syscom->queue_mem_dma_addr, total_queue_size_aligned); return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_boot_init_boot_config, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_boot_init_boot_config, INTEL_IPU7); #endif void ipu7_boot_release_boot_config(struct ipu7_bus_device *adev) { struct ipu7_syscom_context *syscom = adev->syscom; if (syscom->queue_mem) { ipu7_dma_free(adev, syscom->queue_mem_size, syscom->queue_mem, syscom->queue_mem_dma_addr, 0); syscom->queue_mem = NULL; syscom->queue_mem_dma_addr = 0; } if (adev->boot_config) { ipu7_dma_free(adev, adev->boot_config_size, adev->boot_config, adev->boot_config_dma_addr, 0); adev->boot_config = NULL; adev->boot_config_dma_addr = 0; } } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_boot_release_boot_config, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_boot_release_boot_config, INTEL_IPU7); #endif int ipu7_boot_start_fw(const struct ipu7_bus_device *adev) { const struct device *dev = &adev->auxdev.dev; u32 timeout = IPU_FW_START_STOP_TIMEOUT; void __iomem *base = adev->isp->base; u32 boot_state, last_boot_state; u32 indices_addr, msg_ver, id; int ret; ret = ipu7_boot_cell_init(adev); if (ret) return ret; dev_dbg(dev, "start booting fw...\n"); /* store "uninit" state to syscom/boot state reg */ write_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID, IA_GOFO_FW_BOOT_STATE_UNINIT); /* * Set registers to zero * (not strictly required, but recommended for diagnostics) */ write_fw_boot_param(adev, IA_GOFO_FW_BOOT_SYSCOM_QUEUE_INDICES_BASE_ID, 0); write_fw_boot_param(adev, IA_GOFO_FW_BOOT_MESSAGING_VERSION_ID, 0); /* store firmware configuration address */ write_fw_boot_param(adev, IA_GOFO_FW_BOOT_CONFIG_ID, adev->boot_config_dma_addr); /* Kick uC, then wait for boot complete */ ipu7_boot_cell_start(adev); last_boot_state = IA_GOFO_FW_BOOT_STATE_UNINIT; while (timeout--) { boot_state = read_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID); if (boot_state != last_boot_state) { dev_dbg(dev, "boot state changed from 0x%x to 0x%x\n", last_boot_state, boot_state); last_boot_state = boot_state; } if (BOOT_STATE_IS_CRITICAL(boot_state) || BOOT_STATE_IS_READY(boot_state)) break; usleep_range(1000, 1200); } if (BOOT_STATE_IS_CRITICAL(boot_state)) { ipu7_dump_fw_error_log(adev); dev_err(dev, "critical boot state error 0x%x\n", boot_state); return -EINVAL; } else if (!BOOT_STATE_IS_READY(boot_state)) { dev_err(dev, "fw boot timeout. state: 0x%x\n", boot_state); return -ETIMEDOUT; } dev_dbg(dev, "fw boot done.\n"); /* Get FW syscom queue indices addr */ id = IA_GOFO_FW_BOOT_SYSCOM_QUEUE_INDICES_BASE_ID; indices_addr = read_fw_boot_param(adev, id); adev->syscom->queue_indices = base + indices_addr; dev_dbg(dev, "fw queue indices offset is 0x%x\n", indices_addr); /* Get message version. */ msg_ver = read_fw_boot_param(adev, IA_GOFO_FW_BOOT_MESSAGING_VERSION_ID); dev_dbg(dev, "ipu message version is 0x%08x\n", msg_ver); return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_boot_start_fw, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_boot_start_fw, INTEL_IPU7); #endif int ipu7_boot_stop_fw(const struct ipu7_bus_device *adev) { const struct device *dev = &adev->auxdev.dev; u32 timeout = IPU_FW_START_STOP_TIMEOUT; u32 boot_state; boot_state = read_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID); if (BOOT_STATE_IS_CRITICAL(boot_state) || !BOOT_STATE_IS_READY(boot_state)) { dev_err(dev, "fw not ready for shutdown, state 0x%x\n", boot_state); return -EBUSY; } /* Issue shutdown to start shutdown process */ dev_dbg(dev, "stopping fw...\n"); write_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID, IA_GOFO_FW_BOOT_STATE_SHUTDOWN_CMD); while (timeout--) { boot_state = read_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID); if (BOOT_STATE_IS_CRITICAL(boot_state) || BOOT_STATE_IS_INACTIVE(boot_state)) break; usleep_range(1000, 1200); } if (BOOT_STATE_IS_CRITICAL(boot_state)) { ipu7_dump_fw_error_log(adev); dev_err(dev, "critical boot state error 0x%x\n", boot_state); return -EINVAL; } else if (!BOOT_STATE_IS_INACTIVE(boot_state)) { dev_err(dev, "stop fw timeout. state: 0x%x\n", boot_state); return -ETIMEDOUT; } ipu7_boot_cell_stop(adev); dev_dbg(dev, "stop fw done.\n"); return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_boot_stop_fw, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_boot_stop_fw, INTEL_IPU7); #endif u32 ipu7_boot_get_boot_state(const struct ipu7_bus_device *adev) { return read_fw_boot_param(adev, IA_GOFO_FW_BOOT_STATE_ID); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_boot_get_boot_state, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_boot_get_boot_state, INTEL_IPU7); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-boot.h000066400000000000000000000014011503071307200263430ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2022 - 2025 Intel Corporation */ #ifndef IPU7_BOOT_H #define IPU7_BOOT_H #include struct ipu7_bus_device; struct syscom_queue_config; #define FW_QUEUE_CONFIG_SIZE(num_queues) \ (sizeof(struct syscom_queue_config) * (num_queues)) int ipu7_boot_init_boot_config(struct ipu7_bus_device *adev, struct syscom_queue_config *qconfigs, int num_queues, u32 uc_freq, dma_addr_t subsys_config, u8 major); void ipu7_boot_release_boot_config(struct ipu7_bus_device *adev); int ipu7_boot_start_fw(const struct ipu7_bus_device *adev); int ipu7_boot_stop_fw(const struct ipu7_bus_device *adev); u32 ipu7_boot_get_boot_state(const struct ipu7_bus_device *adev); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-bus.c000066400000000000000000000064401503071307200261740ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-boot.h" #include "ipu7-dma.h" static int bus_pm_runtime_suspend(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); int ret; ret = pm_generic_runtime_suspend(dev); if (ret) return ret; ret = ipu_buttress_powerdown(dev, adev->ctrl); if (!ret) return 0; dev_err(dev, "power down failed!\n"); /* Powering down failed, attempt to resume device now */ ret = pm_generic_runtime_resume(dev); if (!ret) return -EBUSY; return -EIO; } static int bus_pm_runtime_resume(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); int ret; ret = ipu_buttress_powerup(dev, adev->ctrl); if (ret) return ret; ret = pm_generic_runtime_resume(dev); if (ret) goto out_err; return 0; out_err: ipu_buttress_powerdown(dev, adev->ctrl); return -EBUSY; } static struct dev_pm_domain ipu7_bus_pm_domain = { .ops = { .runtime_suspend = bus_pm_runtime_suspend, .runtime_resume = bus_pm_runtime_resume, }, }; static DEFINE_MUTEX(ipu7_bus_mutex); static void ipu7_bus_release(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); kfree(adev->pdata); kfree(adev); } struct ipu7_bus_device * ipu7_bus_initialize_device(struct pci_dev *pdev, struct device *parent, void *pdata, const struct ipu_buttress_ctrl *ctrl, const char *name) { struct auxiliary_device *auxdev; struct ipu7_bus_device *adev; struct ipu7_device *isp = pci_get_drvdata(pdev); int ret; adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) return ERR_PTR(-ENOMEM); adev->isp = isp; adev->ctrl = ctrl; adev->pdata = pdata; auxdev = &adev->auxdev; auxdev->name = name; auxdev->id = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn); auxdev->dev.parent = parent; auxdev->dev.release = ipu7_bus_release; ret = auxiliary_device_init(auxdev); if (ret < 0) { dev_err(&isp->pdev->dev, "auxiliary device init failed (%d)\n", ret); kfree(adev); return ERR_PTR(ret); } dev_pm_domain_set(&auxdev->dev, &ipu7_bus_pm_domain); pm_runtime_forbid(&adev->auxdev.dev); pm_runtime_enable(&adev->auxdev.dev); return adev; } int ipu7_bus_add_device(struct ipu7_bus_device *adev) { struct auxiliary_device *auxdev = &adev->auxdev; int ret; ret = auxiliary_device_add(auxdev); if (ret) { auxiliary_device_uninit(auxdev); return ret; } mutex_lock(&ipu7_bus_mutex); list_add(&adev->list, &adev->isp->devices); mutex_unlock(&ipu7_bus_mutex); pm_runtime_allow(&auxdev->dev); return 0; } void ipu7_bus_del_devices(struct pci_dev *pdev) { struct ipu7_device *isp = pci_get_drvdata(pdev); struct ipu7_bus_device *adev, *save; mutex_lock(&ipu7_bus_mutex); list_for_each_entry_safe(adev, save, &isp->devices, list) { pm_runtime_disable(&adev->auxdev.dev); list_del(&adev->list); auxiliary_device_delete(&adev->auxdev); auxiliary_device_uninit(&adev->auxdev); } mutex_unlock(&ipu7_bus_mutex); } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-bus.h000066400000000000000000000033511503071307200261770ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_BUS_H #define IPU7_BUS_H #include #include #include #include #include #include #include #include "abi/ipu7_fw_boot_abi.h" #include "ipu7-syscom.h" struct pci_dev; struct ipu_buttress_ctrl; struct ipu7_mmu; struct ipu7_device; enum ipu7_subsys { IPU_IS = 0, IPU_PS = 1, IPU_SUBSYS_NUM = 2, }; struct ipu7_bus_device { struct auxiliary_device auxdev; const struct auxiliary_driver *auxdrv; const struct ipu7_auxdrv_data *auxdrv_data; struct list_head list; enum ipu7_subsys subsys; void *pdata; struct ipu7_mmu *mmu; struct ipu7_device *isp; const struct ipu_buttress_ctrl *ctrl; u64 dma_mask; struct sg_table fw_sgt; u32 fw_entry; struct ipu7_syscom_context *syscom; struct ia_gofo_boot_config *boot_config; dma_addr_t boot_config_dma_addr; u32 boot_config_size; }; struct ipu7_auxdrv_data { irqreturn_t (*isr)(struct ipu7_bus_device *adev); irqreturn_t (*isr_threaded)(struct ipu7_bus_device *adev); bool wake_isr_thread; }; #define to_ipu7_bus_device(_dev) \ container_of(to_auxiliary_dev(_dev), struct ipu7_bus_device, auxdev) #define auxdev_to_adev(_auxdev) \ container_of(_auxdev, struct ipu7_bus_device, auxdev) #define ipu7_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->auxdev.dev) struct ipu7_bus_device * ipu7_bus_initialize_device(struct pci_dev *pdev, struct device *parent, void *pdata, const struct ipu_buttress_ctrl *ctrl, const char *name); int ipu7_bus_add_device(struct ipu7_bus_device *adev); void ipu7_bus_del_devices(struct pci_dev *pdev); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-buttress-regs.h000066400000000000000000000453071503071307200302260ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2025 Intel Corporation */ #ifndef IPU7_BUTTRESS_REGS_H #define IPU7_BUTTRESS_REGS_H #define BUTTRESS_REG_IRQ_STATUS 0x2000 #define BUTTRESS_REG_IRQ_STATUS_UNMASKED 0x2004 #define BUTTRESS_REG_IRQ_ENABLE 0x2008 #define BUTTRESS_REG_IRQ_CLEAR 0x200c #define BUTTRESS_REG_IRQ_MASK 0x2010 #define BUTTRESS_REG_TSC_CMD 0x2014 #define BUTTRESS_REG_TSC_CTL 0x2018 #define BUTTRESS_REG_TSC_LO 0x201c #define BUTTRESS_REG_TSC_HI 0x2020 /* valid for PTL */ #define BUTTRESS_REG_PB_TIMESTAMP_LO 0x2030 #define BUTTRESS_REG_PB_TIMESTAMP_HI 0x2034 #define BUTTRESS_REG_PB_TIMESTAMP_VALID 0x2038 #define BUTTRESS_REG_PS_WORKPOINT_REQ 0x2100 #define BUTTRESS_REG_IS_WORKPOINT_REQ 0x2104 #define BUTTRESS_REG_PS_WORKPOINT_DOMAIN_REQ 0x2108 #define BUTTRESS_REG_PS_DOMAINS_STATUS 0x2110 #define BUTTRESS_REG_PWR_STATUS 0x2114 #define BUTTRESS_REG_PS_WORKPOINT_REQ_SHADOW 0x2120 #define BUTTRESS_REG_IS_WORKPOINT_REQ_SHADOW 0x2124 #define BUTTRESS_REG_PS_WORKPOINT_DOMAIN_REQ_SHADOW 0x2128 #define BUTTRESS_REG_ISPS_WORKPOINT_DOWNLOAD 0x212c #define BUTTRESS_REG_PG_FLOW_OVERRIDE 0x2180 #define BUTTRESS_REG_GLOBAL_OVERRIDE_UNGATE_CTL 0x2184 #define BUTTRESS_REG_PWR_FSM_CTL 0x2188 #define BUTTRESS_REG_IDLE_WDT 0x218c #define BUTTRESS_REG_PS_PWR_DOMAIN_EVENTQ_EN 0x2190 #define BUTTRESS_REG_PS_PWR_DOMAIN_EVENTQ_ADDR 0x2194 #define BUTTRESS_REG_PS_PWR_DOMAIN_EVENTQ_DATA 0x2198 #define BUTTRESS_REG_POWER_EN_DELAY 0x219c #define IPU7_BUTTRESS_REG_LTR_CONTROL 0x21a0 #define IPU7_BUTTRESS_REG_NDE_CONTROL 0x21a4 #define IPU7_BUTTRESS_REG_INT_FRM_PUNIT 0x21a8 #define IPU8_BUTTRESS_REG_LTR_CONTROL 0x21a4 #define IPU8_BUTTRESS_REG_NDE_CONTROL 0x21a8 #define IPU8_BUTTRESS_REG_INT_FRM_PUNIT 0x21ac #define BUTTRESS_REG_SLEEP_LEVEL_CFG 0x21b0 #define BUTTRESS_REG_SLEEP_LEVEL_STS 0x21b4 #define BUTTRESS_REG_DVFS_FSM_STATUS 0x21b8 #define BUTTRESS_REG_PS_PLL_ENABLE 0x21bc #define BUTTRESS_REG_D2D_CTL 0x21d4 #define BUTTRESS_REG_IB_CLK_CTL 0x21d8 #define BUTTRESS_REG_IB_CRO_CLK_CTL 0x21dc #define BUTTRESS_REG_FUNC_FUSES 0x21e0 #define BUTTRESS_REG_ISOCH_CTL 0x21e4 #define BUTTRESS_REG_WORKPOINT_CTL 0x21f0 #define BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS 0x2200 #define BUTTRESS_REG_DRV_IS_UCX_START_ADDR 0x2204 #define BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS 0x2208 #define BUTTRESS_REG_DRV_PS_UCX_START_ADDR 0x220c #define BUTTRESS_REG_DRV_UCX_RESET_CFG 0x2210 /* configured by CSE */ #define BUTTRESS_REG_CSE_IS_UCX_CONTROL_STATUS 0x2300 #define BUTTRESS_REG_CSE_IS_UCX_START_ADDR 0x2304 #define BUTTRESS_REG_CSE_PS_UCX_CONTROL_STATUS 0x2308 #define BUTTRESS_REG_CSE_PS_UCX_START_ADDR 0x230c #define BUTTRESS_REG_CAMERA_MASK 0x2310 #define BUTTRESS_REG_FW_CTL 0x2314 #define BUTTRESS_REG_SECURITY_CTL 0x2318 #define BUTTRESS_REG_FUNCTIONAL_FW_SETUP 0x231c #define BUTTRESS_REG_FW_BASE 0x2320 #define BUTTRESS_REG_FW_BASE_LIMIT 0x2324 #define BUTTRESS_REG_FW_SCRATCH_BASE 0x2328 #define BUTTRESS_REG_FW_SCRATCH_LIMIT 0x232c #define BUTTRESS_REG_CSE_ACTION 0x2330 /* configured by SW */ #define BUTTRESS_REG_FW_RESET_CTL 0x2334 #define BUTTRESS_REG_FW_SOURCE_SIZE 0x2338 #define BUTTRESS_REG_FW_SOURCE_BASE 0x233c #define BUTTRESS_REG_IPU_SEC_CP_LSB 0x2400 #define BUTTRESS_REG_IPU_SEC_CP_MSB 0x2404 #define BUTTRESS_REG_IPU_SEC_WAC_LSB 0x2408 #define BUTTRESS_REG_IPU_SEC_WAC_MSB 0x240c #define BUTTRESS_REG_IPU_SEC_RAC_LSB 0x2410 #define BUTTRESS_REG_IPU_SEC_RAC_MSB 0x2414 #define BUTTRESS_REG_IPU_DRV_CP_LSB 0x2418 #define BUTTRESS_REG_IPU_DRV_CP_MSB 0x241c #define BUTTRESS_REG_IPU_DRV_WAC_LSB 0x2420 #define BUTTRESS_REG_IPU_DRV_WAC_MSB 0x2424 #define BUTTRESS_REG_IPU_DRV_RAC_LSB 0x2428 #define BUTTRESS_REG_IPU_DRV_RAC_MSB 0x242c #define BUTTRESS_REG_IPU_FW_CP_LSB 0x2430 #define BUTTRESS_REG_IPU_FW_CP_MSB 0x2434 #define BUTTRESS_REG_IPU_FW_WAC_LSB 0x2438 #define BUTTRESS_REG_IPU_FW_WAC_MSB 0x243c #define BUTTRESS_REG_IPU_FW_RAC_LSB 0x2440 #define BUTTRESS_REG_IPU_FW_RAC_MSB 0x2444 #define BUTTRESS_REG_IPU_BIOS_SEC_CP_LSB 0x2448 #define BUTTRESS_REG_IPU_BIOS_SEC_CP_MSB 0x244c #define BUTTRESS_REG_IPU_BIOS_SEC_WAC_LSB 0x2450 #define BUTTRESS_REG_IPU_BIOS_SEC_WAC_MSB 0x2454 #define BUTTRESS_REG_IPU_BIOS_SEC_RAC_LSB 0x2458 #define BUTTRESS_REG_IPU_BIOS_SEC_RAC_MSB 0x245c #define BUTTRESS_REG_IPU_DFD_CP_LSB 0x2460 #define BUTTRESS_REG_IPU_DFD_CP_MSB 0x2464 #define BUTTRESS_REG_IPU_DFD_WAC_LSB 0x2468 #define BUTTRESS_REG_IPU_DFD_WAC_MSB 0x246c #define BUTTRESS_REG_IPU_DFD_RAC_LSB 0x2470 #define BUTTRESS_REG_IPU_DFD_RAC_MSB 0x2474 #define BUTTRESS_REG_CSE2IUDB0 0x2500 #define BUTTRESS_REG_CSE2IUDATA0 0x2504 #define BUTTRESS_REG_CSE2IUCSR 0x2508 #define BUTTRESS_REG_IU2CSEDB0 0x250c #define BUTTRESS_REG_IU2CSEDATA0 0x2510 #define BUTTRESS_REG_IU2CSECSR 0x2514 #define BUTTRESS_REG_CSE2IUDB0_CR_SHADOW 0x2520 #define BUTTRESS_REG_CSE2IUDATA0_CR_SHADOW 0x2524 #define BUTTRESS_REG_CSE2IUCSR_CR_SHADOW 0x2528 #define BUTTRESS_REG_IU2CSEDB0_CR_SHADOW 0x252c #define BUTTRESS_REG_DVFS_FSM_SURVIVABILITY 0x2900 #define BUTTRESS_REG_FLOWS_FSM_SURVIVABILITY 0x2904 #define BUTTRESS_REG_FABRICS_FSM_SURVIVABILITY 0x2908 #define BUTTRESS_REG_PS_SUB1_PM_FSM_SURVIVABILITY 0x290c #define BUTTRESS_REG_PS_SUB0_PM_FSM_SURVIVABILITY 0x2910 #define BUTTRESS_REG_PS_PM_FSM_SURVIVABILITY 0x2914 #define BUTTRESS_REG_IS_PM_FSM_SURVIVABILITY 0x2918 #define BUTTRESS_REG_FLR_RST_FSM_SURVIVABILITY 0x291c #define BUTTRESS_REG_FW_RST_FSM_SURVIVABILITY 0x2920 #define BUTTRESS_REG_RESETPREP_FSM_SURVIVABILITY 0x2924 #define BUTTRESS_REG_POWER_FSM_DOMAIN_STATUS 0x3000 #define BUTTRESS_REG_IDLEREQ_STATUS1 0x3004 #define BUTTRESS_REG_POWER_FSM_STATUS_IS_PS 0x3008 #define BUTTRESS_REG_POWER_ACK_B_STATUS 0x300c #define BUTTRESS_REG_DOMAIN_RETENTION_CTL 0x3010 #define BUTTRESS_REG_CG_CTRL_BITS 0x3014 #define BUTTRESS_REG_IS_IFC_STATUS0 0x3018 #define BUTTRESS_REG_IS_IFC_STATUS1 0x301c #define BUTTRESS_REG_PS_IFC_STATUS0 0x3020 #define BUTTRESS_REG_PS_IFC_STATUS1 0x3024 #define BUTTRESS_REG_BTRS_IFC_STATUS0 0x3028 #define BUTTRESS_REG_BTRS_IFC_STATUS1 0x302c #define BUTTRESS_REG_IPU_SKU 0x3030 #define BUTTRESS_REG_PS_IDLEACK 0x3034 #define BUTTRESS_REG_IS_IDLEACK 0x3038 #define BUTTRESS_REG_SPARE_REGS_0 0x303c #define BUTTRESS_REG_SPARE_REGS_1 0x3040 #define BUTTRESS_REG_SPARE_REGS_2 0x3044 #define BUTTRESS_REG_SPARE_REGS_3 0x3048 #define BUTTRESS_REG_IUNIT_ACV 0x304c #define BUTTRESS_REG_CHICKEN_BITS 0x3050 #define BUTTRESS_REG_SBENDPOINT_CFG 0x3054 #define BUTTRESS_REG_ECC_ERR_LOG 0x3058 #define BUTTRESS_REG_POWER_FSM_STATUS 0x3070 #define BUTTRESS_REG_RESET_FSM_STATUS 0x3074 #define BUTTRESS_REG_IDLE_STATUS 0x3078 #define BUTTRESS_REG_IDLEACK_STATUS 0x307c #define BUTTRESS_REG_IPU_DEBUG 0x3080 #define BUTTRESS_REG_FW_BOOT_PARAMS0 0x4000 #define BUTTRESS_REG_FW_BOOT_PARAMS1 0x4004 #define BUTTRESS_REG_FW_BOOT_PARAMS2 0x4008 #define BUTTRESS_REG_FW_BOOT_PARAMS3 0x400c #define BUTTRESS_REG_FW_BOOT_PARAMS4 0x4010 #define BUTTRESS_REG_FW_BOOT_PARAMS5 0x4014 #define BUTTRESS_REG_FW_BOOT_PARAMS6 0x4018 #define BUTTRESS_REG_FW_BOOT_PARAMS7 0x401c #define BUTTRESS_REG_FW_BOOT_PARAMS8 0x4020 #define BUTTRESS_REG_FW_BOOT_PARAMS9 0x4024 #define BUTTRESS_REG_FW_BOOT_PARAMS10 0x4028 #define BUTTRESS_REG_FW_BOOT_PARAMS11 0x402c #define BUTTRESS_REG_FW_BOOT_PARAMS12 0x4030 #define BUTTRESS_REG_FW_BOOT_PARAMS13 0x4034 #define BUTTRESS_REG_FW_BOOT_PARAMS14 0x4038 #define BUTTRESS_REG_FW_BOOT_PARAMS15 0x403c #define BUTTRESS_FW_BOOT_PARAMS_ENTRY(i) \ (BUTTRESS_REG_FW_BOOT_PARAMS0 + ((i) * 4U)) #define BUTTRESS_REG_FW_GP(i) (0x4040 + 0x4 * (i)) #define BUTTRESS_REG_FPGA_SUPPORT(i) (0x40c0 + 0x4 * (i)) #define BUTTRESS_REG_FW_GP8 0x4060 #define BUTTRESS_REG_FW_GP24 0x40a0 #define BUTTRESS_REG_GPIO_0_PADCFG_ADDR_CR 0x4100 #define BUTTRESS_REG_GPIO_1_PADCFG_ADDR_CR 0x4104 #define BUTTRESS_REG_GPIO_2_PADCFG_ADDR_CR 0x4108 #define BUTTRESS_REG_GPIO_3_PADCFG_ADDR_CR 0x410c #define BUTTRESS_REG_GPIO_4_PADCFG_ADDR_CR 0x4110 #define BUTTRESS_REG_GPIO_5_PADCFG_ADDR_CR 0x4114 #define BUTTRESS_REG_GPIO_6_PADCFG_ADDR_CR 0x4118 #define BUTTRESS_REG_GPIO_7_PADCFG_ADDR_CR 0x411c #define BUTTRESS_REG_GPIO_ENABLE 0x4140 #define BUTTRESS_REG_GPIO_VALUE_CR 0x4144 #define BUTTRESS_REG_IS_MEM_CORRECTABLE_ERROR_STATUS 0x5000 #define BUTTRESS_REG_IS_MEM_FATAL_ERROR_STATUS 0x5004 #define BUTTRESS_REG_IS_MEM_NON_FATAL_ERROR_STATUS 0x5008 #define BUTTRESS_REG_IS_MEM_CHECK_PASSED 0x500c #define BUTTRESS_REG_IS_MEM_ERROR_INJECT 0x5010 #define BUTTRESS_REG_IS_MEM_ERROR_CLEAR 0x5014 #define BUTTRESS_REG_PS_MEM_CORRECTABLE_ERROR_STATUS 0x5040 #define BUTTRESS_REG_PS_MEM_FATAL_ERROR_STATUS 0x5044 #define BUTTRESS_REG_PS_MEM_NON_FATAL_ERROR_STATUS 0x5048 #define BUTTRESS_REG_PS_MEM_CHECK_PASSED 0x504c #define BUTTRESS_REG_PS_MEM_ERROR_INJECT 0x5050 #define BUTTRESS_REG_PS_MEM_ERROR_CLEAR 0x5054 #define BUTTRESS_REG_IS_AB_REGION_MIN_ADDRESS(i) (0x6000 + 0x8 * (i)) #define BUTTRESS_REG_IS_AB_REGION_MAX_ADDRESS(i) (0x6004 + 0x8 * (i)) #define BUTTRESS_REG_IS_AB_VIOLATION_LOG0 0x6080 #define BUTTRESS_REG_IS_AB_VIOLATION_LOG1 0x6084 #define BUTTRESS_REG_PS_AB_REGION_MIN_ADDRESS(i) (0x6100 + 0x8 * (i)) #define BUTTRESS_REG_PS_AB_REGION_MAX_ADDRESS0 (0x6104 + 0x8 * (i)) #define BUTTRESS_REG_PS_AB_VIOLATION_LOG0 0x6180 #define BUTTRESS_REG_PS_AB_VIOLATION_LOG1 0x6184 #define BUTTRESS_REG_PS_DEBUG_AB_VIOLATION_LOG0 0x6200 #define BUTTRESS_REG_PS_DEBUG_AB_VIOLATION_LOG1 0x6204 #define BUTTRESS_REG_IS_DEBUG_AB_VIOLATION_LOG0 0x6208 #define BUTTRESS_REG_IS_DEBUG_AB_VIOLATION_LOG1 0x620c #define BUTTRESS_REG_IB_DVP_AB_VIOLATION_LOG0 0x6210 #define BUTTRESS_REG_IB_DVP_AB_VIOLATION_LOG1 0x6214 #define BUTTRESS_REG_IB_ATB2DTF_AB_VIOLATION_LOG0 0x6218 #define BUTTRESS_REG_IB_ATB2DTF_AB_VIOLATION_LOG1 0x621c #define BUTTRESS_REG_AB_ENABLE 0x6220 #define BUTTRESS_REG_AB_DEFAULT_ACCESS 0x6230 /* Indicates CSE has received an IPU driver IPC transaction */ #define BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE BIT(0) /* Indicates an IPC transaction from CSE has arrived */ #define BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING BIT(1) /* Indicates a CSR update from CSE has arrived */ #define BUTTRESS_IRQ_CSE_CSR_SET BIT(2) /* Indicates an interrupt set by Punit (not in use at this time) */ #define BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ BIT(3) /* Indicates an SAI violation was detected on access to IB registers */ #define BUTTRESS_IRQ_SAI_VIOLATION BIT(4) /* Indicates a transaction to IS was not able to pass the access blocker */ #define BUTTRESS_IRQ_IS_AB_VIOLATION BIT(5) /* Indicates a transaction to PS was not able to pass the access blocker */ #define BUTTRESS_IRQ_PS_AB_VIOLATION BIT(6) /* Indicates an error response was detected by the IB config NoC */ #define BUTTRESS_IRQ_IB_CFG_NOC_ERR_IRQ BIT(7) /* Indicates an error response was detected by the IB data NoC */ #define BUTTRESS_IRQ_IB_DATA_NOC_ERR_IRQ BIT(8) /* Transaction to DVP regs was not able to pass the access blocker */ #define BUTTRESS_IRQ_IB_DVP_AB_VIOLATION BIT(9) /* Transaction to ATB2DTF regs was not able to pass the access blocker */ #define BUTTRESS_IRQ_ATB2DTF_AB_VIOLATION BIT(10) /* Transaction to IS debug regs was not able to pass the access blocker */ #define BUTTRESS_IRQ_IS_DEBUG_AB_VIOLATION BIT(11) /* Transaction to PS debug regs was not able to pass the access blocker */ #define BUTTRESS_IRQ_PS_DEBUG_AB_VIOLATION BIT(12) /* Indicates timeout occurred waiting for a response from a target */ #define BUTTRESS_IRQ_IB_CFG_NOC_TIMEOUT_IRQ BIT(13) /* Set when any correctable ECC error input wire to buttress is set */ #define BUTTRESS_IRQ_ECC_CORRECTABLE BIT(14) /* Any noncorrectable-nonfatal ECC error input wire to buttress is set */ #define BUTTRESS_IRQ_ECC_NONCORRECTABLE_NONFATAL BIT(15) /* Set when any noncorrectable-fatal ECC error input wire to buttress is set */ #define BUTTRESS_IRQ_ECC_NONCORRECTABLE_FATAL BIT(16) /* Set when timeout occurred waiting for a response from a target */ #define BUTTRESS_IRQ_IS_CFG_NOC_TIMEOUT_IRQ BIT(17) #define BUTTRESS_IRQ_PS_CFG_NOC_TIMEOUT_IRQ BIT(18) #define BUTTRESS_IRQ_LB_CFG_NOC_TIMEOUT_IRQ BIT(19) /* IS FW double exception event */ #define BUTTRESS_IRQ_IS_UC_PFATAL_ERROR BIT(26) /* PS FW double exception event */ #define BUTTRESS_IRQ_PS_UC_PFATAL_ERROR BIT(27) /* IS FW watchdog event */ #define BUTTRESS_IRQ_IS_WATCHDOG BIT(28) /* PS FW watchdog event */ #define BUTTRESS_IRQ_PS_WATCHDOG BIT(29) /* IS IRC irq out */ #define BUTTRESS_IRQ_IS_IRQ BIT(30) /* PS IRC irq out */ #define BUTTRESS_IRQ_PS_IRQ BIT(31) /* buttress irq */ enum { BUTTRESS_PWR_STATUS_HH_STATE_IDLE, BUTTRESS_PWR_STATUS_HH_STATE_IN_PRGS, BUTTRESS_PWR_STATUS_HH_STATE_DONE, BUTTRESS_PWR_STATUS_HH_STATE_ERR, }; #define BUTTRESS_TSC_CMD_START_TSC_SYNC BIT(0) #define BUTTRESS_PWR_STATUS_HH_STATUS_SHIFT 11 #define BUTTRESS_PWR_STATUS_HH_STATUS_MASK (0x3U << 11) #define BUTTRESS_TSW_WA_SOFT_RESET BIT(8) /* new for PTL */ #define BUTTRESS_SEL_PB_TIMESTAMP BIT(9) #define BUTTRESS_IRQS (BUTTRESS_IRQ_IS_IRQ | \ BUTTRESS_IRQ_PS_IRQ | \ BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING | \ BUTTRESS_IRQ_CSE_CSR_SET | \ BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE | \ BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ) /* Iunit to CSE regs */ #define BUTTRESS_IU2CSEDB0_BUSY BIT(31) #define BUTTRESS_IU2CSEDB0_SHORT_FORMAT_SHIFT 27 #define BUTTRESS_IU2CSEDB0_CLIENT_ID_SHIFT 10 #define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2 #define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1 #define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2 #define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3 #define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16 #define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE BIT(0) #define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE BIT(1) #define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE BIT(2) #define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE BIT(4) #define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0) #define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1) #define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2) #define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3) #define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4) #define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5) /* 0x20 == NACK, 0xf == unknown command */ #define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20 #define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK 0xffff /* IS/PS freq control */ #define BUTTRESS_IS_FREQ_CTL_RATIO_MASK 0xffU #define BUTTRESS_PS_FREQ_CTL_RATIO_MASK 0xffU #define IPU7_IS_FREQ_MAX 450 #define IPU7_IS_FREQ_MIN 50 #define IPU7_PS_FREQ_MAX 750 #define BUTTRESS_PS_FREQ_RATIO_STEP 25U /* valid for IPU8 */ #define BUTTRESS_IS_FREQ_RATIO_STEP 25U /* IS: 400mhz, PS: 500mhz */ #define IPU7_IS_FREQ_CTL_DEFAULT_RATIO 0x1b #define IPU7_PS_FREQ_CTL_DEFAULT_RATIO 0x14 /* IS: 400mhz, PS: 400mhz */ #define IPU8_IS_FREQ_CTL_DEFAULT_RATIO 0x10 #define IPU8_PS_FREQ_CTL_DEFAULT_RATIO 0x10 #define IPU_FREQ_CTL_CDYN 0x80 #define IPU_FREQ_CTL_RATIO_SHIFT 0x0 #define IPU_FREQ_CTL_CDYN_SHIFT 0x8 /* buttree power status */ #define IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 0 #define IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK \ (0x3U << IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT) #define IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 4 #define IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK \ (0x3U << IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT) #define IPU_BUTTRESS_PWR_STATE_DN_DONE 0x0 #define IPU_BUTTRESS_PWR_STATE_UP_PROCESS 0x1 #define IPU_BUTTRESS_PWR_STATE_DN_PROCESS 0x2 #define IPU_BUTTRESS_PWR_STATE_UP_DONE 0x3 #define BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3 #define BUTTRESS_PWR_STATE_IS_PWR_MASK (0x3 << 3) #define BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6 #define BUTTRESS_PWR_STATE_PS_PWR_MASK (0x3 << 6) #define PS_FSM_CG BIT(3) #define BUTTRESS_OVERRIDE_IS_CLK BIT(1) #define BUTTRESS_OVERRIDE_PS_CLK BIT(2) /* ps_pll only valid for ipu8 */ #define BUTTRESS_OWN_ACK_PS_PLL BIT(8) #define BUTTRESS_OWN_ACK_IS_CLK BIT(9) #define BUTTRESS_OWN_ACK_PS_CLK BIT(10) /* FW reset ctrl */ #define BUTTRESS_FW_RESET_CTL_START BIT(0) #define BUTTRESS_FW_RESET_CTL_DONE BIT(1) /* security */ #define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16) #define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK GENMASK(4, 0) #define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE BIT(0) #define BUTTRESS_SECURITY_CTL_AUTH_DONE BIT(1) #define BUTTRESS_SECURITY_CTL_AUTH_FAILED BIT(3) /* D2D */ #define BUTTRESS_D2D_PWR_EN BIT(0) #define BUTTRESS_D2D_PWR_ACK BIT(4) /* NDE */ #define NDE_VAL_MASK GENMASK(9, 0) #define NDE_SCALE_MASK GENMASK(12, 10) #define NDE_VALID_MASK BIT(13) #define NDE_RESVEC_MASK GENMASK(19, 16) #define NDE_IN_VBLANK_DIS_MASK BIT(31) #define BUTTRESS_NDE_VAL_ACTIVE 48 #define BUTTRESS_NDE_SCALE_ACTIVE 2 #define BUTTRESS_NDE_VALID_ACTIVE 1 #define BUTTRESS_NDE_VAL_DEFAULT 1023 #define BUTTRESS_NDE_SCALE_DEFAULT 2 #define BUTTRESS_NDE_VALID_DEFAULT 0 /* IS and PS UCX control */ #define UCX_CTL_RESET BIT(0) #define UCX_CTL_RUN BIT(1) #define UCX_CTL_WAKEUP BIT(2) #define UCX_CTL_SPARE GENMASK(7, 3) #define UCX_STS_PWR GENMASK(17, 16) #define UCX_STS_SLEEPING BIT(18) /* offset from PHY base */ #define PHY_CSI_CFG 0xc0 #define PHY_CSI_RCOMP_CONTROL 0xc8 #define PHY_CSI_BSCAN_EXCLUDE 0xd8 #define PHY_CPHY_DLL_OVRD(x) (0x100 + 0x100 * (x)) #define PHY_DPHY_DLL_OVRD(x) (0x14c + 0x100 * (x)) #define PHY_CPHY_RX_CONTROL1(x) (0x110 + 0x100 * (x)) #define PHY_CPHY_RX_CONTROL2(x) (0x114 + 0x100 * (x)) #define PHY_DPHY_CFG(x) (0x148 + 0x100 * (x)) #define PHY_BB_AFE_CONFIG(x) (0x174 + 0x100 * (x)) /* PB registers */ #define INTERRUPT_STATUS 0x0 #define BTRS_LOCAL_INTERRUPT_MASK 0x4 #define GLOBAL_INTERRUPT_MASK 0x8 #define HM_ATS 0xc #define ATS_ERROR_LOG1 0x10 #define ATS_ERROR_LOG2 0x14 #define ATS_ERROR_CLEAR 0x18 #define CFI_0_ERROR_LOG 0x1c #define CFI_0_ERROR_CLEAR 0x20 #define HASH_CONFIG 0x2c #define TLBID_HASH_ENABLE_31_0 0x30 #define TLBID_HASH_ENABLE_63_32 0x34 #define TLBID_HASH_ENABLE_95_64 0x38 #define TLBID_HASH_ENABLE_127_96 0x3c #define CFI_1_ERROR_LOGGING 0x40 #define CFI_1_ERROR_CLEAR 0x44 #define IMR_ERROR_LOGGING_LOW 0x48 #define IMR_ERROR_LOGGING_HIGH 0x4c #define IMR_ERROR_CLEAR 0x50 #define PORT_ARBITRATION_WEIGHTS 0x54 #define IMR_ERROR_LOGGING_CFI_1_LOW 0x58 #define IMR_ERROR_LOGGING_CFI_1_HIGH 0x5c #define IMR_ERROR_CLEAR_CFI_1 0x60 #define BAR2_MISC_CONFIG 0x64 #define RSP_ID_CONFIG_AXI2CFI_0 0x68 #define RSP_ID_CONFIG_AXI2CFI_1 0x6c #define PB_DRIVER_PCODE_MAILBOX_STATUS 0x70 #define PB_DRIVER_PCODE_MAILBOX_INTERFACE 0x74 #define PORT_ARBITRATION_WEIGHTS_ATS 0x78 #endif /* IPU7_BUTTRESS_REGS_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-buttress.c000066400000000000000000001020601503071307200272510ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress.h" #include "ipu7-buttress-regs.h" #define BOOTLOADER_STATUS_OFFSET BUTTRESS_REG_FW_BOOT_PARAMS7 #define BOOTLOADER_MAGIC_KEY 0xb00710adU #define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 #define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 #define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE #define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10U #define BUTTRESS_POWER_TIMEOUT_US (200 * USEC_PER_MSEC) #define BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US (5 * USEC_PER_SEC) #define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US (10 * USEC_PER_SEC) #define BUTTRESS_CSE_FWRESET_TIMEOUT_US (100 * USEC_PER_MSEC) #define BUTTRESS_IPC_TX_TIMEOUT_MS MSEC_PER_SEC #define BUTTRESS_IPC_RX_TIMEOUT_MS MSEC_PER_SEC #define BUTTRESS_IPC_VALIDITY_TIMEOUT_US (1 * USEC_PER_SEC) #define BUTTRESS_TSC_SYNC_TIMEOUT_US (5 * USEC_PER_MSEC) #define BUTTRESS_IPC_RESET_RETRY 2000U #define BUTTRESS_CSE_IPC_RESET_RETRY 4U #define BUTTRESS_IPC_CMD_SEND_RETRY 1U struct ipu7_ipc_buttress_msg { u32 cmd; u32 expected_resp; bool require_resp; u8 cmd_size; }; static const u32 ipu7_adev_irq_mask[2] = { BUTTRESS_IRQ_IS_IRQ, BUTTRESS_IRQ_PS_IRQ }; int ipu_buttress_ipc_reset(struct ipu7_device *isp, struct ipu_buttress_ipc *ipc) { unsigned int retries = BUTTRESS_IPC_RESET_RETRY; struct ipu_buttress *b = &isp->buttress; struct device *dev = &isp->pdev->dev; u32 val = 0, csr_in_clr; if (!isp->secure_mode) { dev_dbg(dev, "Skip IPC reset for non-secure mode\n"); return 0; } mutex_lock(&b->ipc_mutex); /* Clear-by-1 CSR (all bits), corresponding internal states. */ val = readl(isp->base + ipc->csr_in); writel(val, isp->base + ipc->csr_in); /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ writel(ENTRY, isp->base + ipc->csr_out); /* * Clear-by-1 all CSR bits EXCEPT following * bits: * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. * C. Possibly custom bits, depending on * their role. */ csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ | BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID | BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY; do { usleep_range(400, 500); val = readl(isp->base + ipc->csr_in); switch (val) { case ENTRY | EXIT: case ENTRY | EXIT | QUERY: /* * 1) Clear-by-1 CSR bits * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, * IPC_PEER_COMP_ACTIONS_RST_PHASE2). * 2) Set peer CSR bit * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. */ writel(ENTRY | EXIT, isp->base + ipc->csr_in); writel(QUERY, isp->base + ipc->csr_out); break; case ENTRY: case ENTRY | QUERY: /* * 1) Clear-by-1 CSR bits * (IPC_PEER_COMP_ACTIONS_RST_PHASE1, * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE). * 2) Set peer CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE1. */ writel(ENTRY | QUERY, isp->base + ipc->csr_in); writel(ENTRY, isp->base + ipc->csr_out); break; case EXIT: case EXIT | QUERY: /* * Clear-by-1 CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE2. * 1) Clear incoming doorbell. * 2) Clear-by-1 all CSR bits EXCEPT following * bits: * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1. * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2. * C. Possibly custom bits, depending on * their role. * 3) Set peer CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE2. */ writel(EXIT, isp->base + ipc->csr_in); writel(0, isp->base + ipc->db0_in); writel(csr_in_clr, isp->base + ipc->csr_in); writel(EXIT, isp->base + ipc->csr_out); /* * Read csr_in again to make sure if RST_PHASE2 is done. * If csr_in is QUERY, it should be handled again. */ usleep_range(200, 300); val = readl(isp->base + ipc->csr_in); if (val & QUERY) { dev_dbg(dev, "RST_PHASE2 retry csr_in = %x\n", val); break; } mutex_unlock(&b->ipc_mutex); return 0; case QUERY: /* * 1) Clear-by-1 CSR bit * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE. * 2) Set peer CSR bit * IPC_PEER_COMP_ACTIONS_RST_PHASE1 */ writel(QUERY, isp->base + ipc->csr_in); writel(ENTRY, isp->base + ipc->csr_out); break; default: dev_dbg_ratelimited(dev, "Unexpected CSR 0x%x\n", val); break; } } while (retries--); mutex_unlock(&b->ipc_mutex); dev_err(dev, "Timed out while waiting for CSE\n"); return -ETIMEDOUT; } static void ipu_buttress_ipc_validity_close(struct ipu7_device *isp, struct ipu_buttress_ipc *ipc) { writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ, isp->base + ipc->csr_out); } static int ipu_buttress_ipc_validity_open(struct ipu7_device *isp, struct ipu_buttress_ipc *ipc) { unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID; void __iomem *addr; int ret; u32 val; writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ, isp->base + ipc->csr_out); addr = isp->base + ipc->csr_in; ret = readl_poll_timeout(addr, val, val & mask, 200, BUTTRESS_IPC_VALIDITY_TIMEOUT_US); if (ret) { dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val); ipu_buttress_ipc_validity_close(isp, ipc); } return ret; } static void ipu_buttress_ipc_recv(struct ipu7_device *isp, struct ipu_buttress_ipc *ipc, u32 *ipc_msg) { if (ipc_msg) *ipc_msg = readl(isp->base + ipc->data0_in); writel(0, isp->base + ipc->db0_in); } static int ipu_buttress_ipc_send_msg(struct ipu7_device *isp, struct ipu7_ipc_buttress_msg *msg) { unsigned long tx_timeout_jiffies, rx_timeout_jiffies; unsigned int retry = BUTTRESS_IPC_CMD_SEND_RETRY; struct ipu_buttress *b = &isp->buttress; struct ipu_buttress_ipc *ipc = &b->cse; struct device *dev = &isp->pdev->dev; int tout; u32 val; int ret; mutex_lock(&b->ipc_mutex); ret = ipu_buttress_ipc_validity_open(isp, ipc); if (ret) { dev_err(dev, "IPC validity open failed\n"); goto out; } tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT_MS); rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT_MS); try: reinit_completion(&ipc->send_complete); if (msg->require_resp) reinit_completion(&ipc->recv_complete); dev_dbg(dev, "IPC command: 0x%x\n", msg->cmd); writel(msg->cmd, isp->base + ipc->data0_out); val = BUTTRESS_IU2CSEDB0_BUSY | msg->cmd_size; writel(val, isp->base + ipc->db0_out); tout = wait_for_completion_timeout(&ipc->send_complete, tx_timeout_jiffies); if (!tout) { dev_err(dev, "send IPC response timeout\n"); if (!retry--) { ret = -ETIMEDOUT; goto out; } /* Try again if CSE is not responding on first try */ writel(0, isp->base + ipc->db0_out); goto try; } if (!msg->require_resp) { ret = -EIO; goto out; } tout = wait_for_completion_timeout(&ipc->recv_complete, rx_timeout_jiffies); if (!tout) { dev_err(dev, "recv IPC response timeout\n"); ret = -ETIMEDOUT; goto out; } if (ipc->nack_mask && (ipc->recv_data & ipc->nack_mask) == ipc->nack) { dev_err(dev, "IPC NACK for cmd 0x%x\n", msg->cmd); ret = -EIO; goto out; } if (ipc->recv_data != msg->expected_resp) { dev_err(dev, "expected resp: 0x%x, IPC response: 0x%x\n", msg->expected_resp, ipc->recv_data); ret = -EIO; goto out; } dev_dbg(dev, "IPC commands done\n"); out: ipu_buttress_ipc_validity_close(isp, ipc); mutex_unlock(&b->ipc_mutex); return ret; } static int ipu_buttress_ipc_send(struct ipu7_device *isp, u32 ipc_msg, u32 size, bool require_resp, u32 expected_resp) { struct ipu7_ipc_buttress_msg msg = { .cmd = ipc_msg, .cmd_size = size, .require_resp = require_resp, .expected_resp = expected_resp, }; return ipu_buttress_ipc_send_msg(isp, &msg); } static irqreturn_t ipu_buttress_call_isr(struct ipu7_bus_device *adev) { irqreturn_t ret = IRQ_WAKE_THREAD; if (!adev || !adev->auxdrv || !adev->auxdrv_data) return IRQ_NONE; if (adev->auxdrv_data->isr) ret = adev->auxdrv_data->isr(adev); if (ret == IRQ_WAKE_THREAD && !adev->auxdrv_data->isr_threaded) ret = IRQ_NONE; return ret; } irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr) { struct ipu7_device *isp = isp_ptr; struct ipu7_bus_device *adev[] = { isp->isys, isp->psys }; struct ipu_buttress *b = &isp->buttress; struct device *dev = &isp->pdev->dev; irqreturn_t ret = IRQ_NONE; u32 pb_irq, pb_local_irq; u32 disable_irqs = 0; u32 irq_status; unsigned int i; pm_runtime_get_noresume(dev); pb_irq = readl(isp->pb_base + INTERRUPT_STATUS); writel(pb_irq, isp->pb_base + INTERRUPT_STATUS); /* check btrs ATS, CFI and IMR errors, BIT(0) is unused for IPU */ pb_local_irq = readl(isp->pb_base + BTRS_LOCAL_INTERRUPT_MASK); if (pb_local_irq & ~BIT(0)) { dev_warn(dev, "PB interrupt status 0x%x local 0x%x\n", pb_irq, pb_local_irq); dev_warn(dev, "Details: %x %x %x %x %x %x %x %x\n", readl(isp->pb_base + ATS_ERROR_LOG1), readl(isp->pb_base + ATS_ERROR_LOG2), readl(isp->pb_base + CFI_0_ERROR_LOG), readl(isp->pb_base + CFI_1_ERROR_LOGGING), readl(isp->pb_base + IMR_ERROR_LOGGING_LOW), readl(isp->pb_base + IMR_ERROR_LOGGING_HIGH), readl(isp->pb_base + IMR_ERROR_LOGGING_CFI_1_LOW), readl(isp->pb_base + IMR_ERROR_LOGGING_CFI_1_HIGH)); } irq_status = readl(isp->base + BUTTRESS_REG_IRQ_STATUS); if (!irq_status) { pm_runtime_put_noidle(dev); return IRQ_NONE; } do { writel(irq_status, isp->base + BUTTRESS_REG_IRQ_CLEAR); for (i = 0; i < ARRAY_SIZE(ipu7_adev_irq_mask); i++) { irqreturn_t r = ipu_buttress_call_isr(adev[i]); if (!(irq_status & ipu7_adev_irq_mask[i])) continue; if (r == IRQ_WAKE_THREAD) { ret = IRQ_WAKE_THREAD; disable_irqs |= ipu7_adev_irq_mask[i]; } else if (ret == IRQ_NONE && r == IRQ_HANDLED) { ret = IRQ_HANDLED; } } if (irq_status & (BUTTRESS_IRQS | BUTTRESS_IRQ_SAI_VIOLATION) && ret == IRQ_NONE) ret = IRQ_HANDLED; if (irq_status & BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING) { dev_dbg(dev, "BUTTRESS_IRQ_IPC_FROM_CSE_IS_WAITING\n"); ipu_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data); complete(&b->cse.recv_complete); } if (irq_status & BUTTRESS_IRQ_CSE_CSR_SET) dev_dbg(dev, "BUTTRESS_IRQ_CSE_CSR_SET\n"); if (irq_status & BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE) { dev_dbg(dev, "BUTTRESS_IRQ_IPC_EXEC_DONE_BY_CSE\n"); complete(&b->cse.send_complete); } if (irq_status & BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ) dev_dbg(dev, "BUTTRESS_IRQ_PUNIT_2_IUNIT_IRQ\n"); if (irq_status & BUTTRESS_IRQ_SAI_VIOLATION && ipu_buttress_get_secure_mode(isp)) dev_err(dev, "BUTTRESS_IRQ_SAI_VIOLATION\n"); irq_status = readl(isp->base + BUTTRESS_REG_IRQ_STATUS); } while (irq_status); if (disable_irqs) writel(BUTTRESS_IRQS & ~disable_irqs, isp->base + BUTTRESS_REG_IRQ_ENABLE); pm_runtime_put(dev); return ret; } irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr) { struct ipu7_device *isp = isp_ptr; struct ipu7_bus_device *adev[] = { isp->isys, isp->psys }; const struct ipu7_auxdrv_data *drv_data = NULL; irqreturn_t ret = IRQ_NONE; unsigned int i; for (i = 0; i < ARRAY_SIZE(ipu7_adev_irq_mask) && adev[i]; i++) { drv_data = adev[i]->auxdrv_data; if (!drv_data) continue; if (drv_data->wake_isr_thread && drv_data->isr_threaded(adev[i]) == IRQ_HANDLED) ret = IRQ_HANDLED; } writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_ENABLE); return ret; } static int isys_d2d_power(struct device *dev, bool on) { struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; int ret = 0; u32 target = on ? BUTTRESS_D2D_PWR_ACK : 0U; u32 val; dev_dbg(dev, "power %s isys d2d.\n", on ? "UP" : "DOWN"); val = readl(isp->base + BUTTRESS_REG_D2D_CTL); if ((val & BUTTRESS_D2D_PWR_ACK) == target) { dev_info(dev, "d2d already in %s state.\n", on ? "UP" : "DOWN"); return 0; } val = on ? val | BUTTRESS_D2D_PWR_EN : val & (~BUTTRESS_D2D_PWR_EN); writel(val, isp->base + BUTTRESS_REG_D2D_CTL); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_D2D_CTL, val, (val & BUTTRESS_D2D_PWR_ACK) == target, 100, BUTTRESS_POWER_TIMEOUT_US); if (ret) dev_err(dev, "power %s d2d timeout. status: 0x%x\n", on ? "UP" : "DOWN", val); return ret; } static void isys_nde_control(struct device *dev, bool on) { struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; u32 val, value, scale, valid, resvec; u32 nde_reg; if (on) { value = BUTTRESS_NDE_VAL_ACTIVE; scale = BUTTRESS_NDE_SCALE_ACTIVE; valid = BUTTRESS_NDE_VALID_ACTIVE; } else { value = BUTTRESS_NDE_VAL_DEFAULT; scale = BUTTRESS_NDE_SCALE_DEFAULT; valid = BUTTRESS_NDE_VALID_DEFAULT; } /* only set the fabrics resource ownership for ipu8 */ nde_reg = is_ipu8(isp->hw_ver) ? IPU8_BUTTRESS_REG_NDE_CONTROL : IPU7_BUTTRESS_REG_NDE_CONTROL; resvec = is_ipu8(isp->hw_ver) ? 0x2 : 0xe; val = FIELD_PREP(NDE_VAL_MASK, value) | FIELD_PREP(NDE_SCALE_MASK, scale) | FIELD_PREP(NDE_VALID_MASK, valid) | FIELD_PREP(NDE_RESVEC_MASK, resvec); writel(val, isp->base + nde_reg); } static int ipu7_buttress_powerup(struct device *dev, const struct ipu_buttress_ctrl *ctrl) { struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; u32 val, exp_sts; int ret = 0; if (!ctrl) return 0; mutex_lock(&isp->buttress.power_mutex); exp_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift; if (ctrl->subsys_id == IPU_IS) { ret = isys_d2d_power(dev, true); if (ret) goto out_power; isys_nde_control(dev, true); } /* request clock resource ownership */ val = readl(isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG); val |= ctrl->ovrd_clk; writel(val, isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SLEEP_LEVEL_STS, val, (val & ctrl->own_clk_ack), 100, BUTTRESS_POWER_TIMEOUT_US); if (ret) dev_warn(dev, "request clk ownership timeout. status 0x%x\n", val); val = ctrl->ratio << ctrl->ratio_shift | ctrl->cdyn << ctrl->cdyn_shift; dev_dbg(dev, "set 0x%x to %s_WORKPOINT_REQ.\n", val, ctrl->subsys_id == IPU_IS ? "IS" : "PS"); writel(val, isp->base + ctrl->freq_ctl); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS, val, ((val & ctrl->pwr_sts_mask) == exp_sts), 100, BUTTRESS_POWER_TIMEOUT_US); if (ret) { dev_err(dev, "%s power up timeout with status: 0x%x\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", val); goto out_power; } dev_dbg(dev, "%s power up successfully. status: 0x%x\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", val); /* release clock resource ownership */ val = readl(isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG); val &= ~ctrl->ovrd_clk; writel(val, isp->base + BUTTRESS_REG_SLEEP_LEVEL_CFG); out_power: mutex_unlock(&isp->buttress.power_mutex); return ret; } static int ipu7_buttress_powerdown(struct device *dev, const struct ipu_buttress_ctrl *ctrl) { struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; u32 val, exp_sts; int ret = 0; if (!ctrl) return 0; mutex_lock(&isp->buttress.power_mutex); exp_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift; val = 0x8U << ctrl->ratio_shift; dev_dbg(dev, "set 0x%x to %s_WORKPOINT_REQ.\n", val, ctrl->subsys_id == IPU_IS ? "IS" : "PS"); writel(val, isp->base + ctrl->freq_ctl); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS, val, ((val & ctrl->pwr_sts_mask) == exp_sts), 100, BUTTRESS_POWER_TIMEOUT_US); if (ret) { dev_err(dev, "%s power down timeout with status: 0x%x\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", val); goto out_power; } dev_dbg(dev, "%s power down successfully. status: 0x%x\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", val); out_power: if (ctrl->subsys_id == IPU_IS && !ret) { isys_d2d_power(dev, false); isys_nde_control(dev, false); } mutex_unlock(&isp->buttress.power_mutex); return ret; } static int ipu8_buttress_powerup(struct device *dev, const struct ipu_buttress_ctrl *ctrl) { struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; u32 sleep_level_reg = BUTTRESS_REG_SLEEP_LEVEL_STS; u32 val, exp_sts; int ret = 0; if (!ctrl) return 0; mutex_lock(&isp->buttress.power_mutex); exp_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift; if (ctrl->subsys_id == IPU_IS) { ret = isys_d2d_power(dev, true); if (ret) goto out_power; isys_nde_control(dev, true); } /* request ps_pll when psys freq > 400Mhz */ if (ctrl->subsys_id == IPU_PS && ctrl->ratio > 0x10) { writel(1, isp->base + BUTTRESS_REG_PS_PLL_ENABLE); ret = readl_poll_timeout(isp->base + sleep_level_reg, val, (val & ctrl->own_clk_ack), 100, BUTTRESS_POWER_TIMEOUT_US); if (ret) dev_warn(dev, "ps_pll req ack timeout. status 0x%x\n", val); } val = ctrl->ratio << ctrl->ratio_shift | ctrl->cdyn << ctrl->cdyn_shift; dev_dbg(dev, "set 0x%x to %s_WORKPOINT_REQ.\n", val, ctrl->subsys_id == IPU_IS ? "IS" : "PS"); writel(val, isp->base + ctrl->freq_ctl); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS, val, ((val & ctrl->pwr_sts_mask) == exp_sts), 100, BUTTRESS_POWER_TIMEOUT_US); if (ret) { dev_err(dev, "%s power up timeout with status: 0x%x\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", val); goto out_power; } dev_dbg(dev, "%s power up successfully. status: 0x%x\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", val); out_power: mutex_unlock(&isp->buttress.power_mutex); return ret; } static int ipu8_buttress_powerdown(struct device *dev, const struct ipu_buttress_ctrl *ctrl) { struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; u32 val, exp_sts; int ret = 0; if (!ctrl) return 0; mutex_lock(&isp->buttress.power_mutex); exp_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift; if (ctrl->subsys_id == IPU_PS) val = 0x10U << ctrl->ratio_shift; else val = 0x8U << ctrl->ratio_shift; dev_dbg(dev, "set 0x%x to %s_WORKPOINT_REQ.\n", val, ctrl->subsys_id == IPU_IS ? "IS" : "PS"); writel(val, isp->base + ctrl->freq_ctl); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS, val, ((val & ctrl->pwr_sts_mask) == exp_sts), 100, BUTTRESS_POWER_TIMEOUT_US); if (ret) { dev_err(dev, "%s power down timeout with status: 0x%x\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", val); goto out_power; } dev_dbg(dev, "%s power down successfully. status: 0x%x\n", ctrl->subsys_id == IPU_IS ? "IS" : "PS", val); out_power: if (ctrl->subsys_id == IPU_IS && !ret) { isys_d2d_power(dev, false); isys_nde_control(dev, false); } if (ctrl->subsys_id == IPU_PS) { val = readl(isp->base + BUTTRESS_REG_SLEEP_LEVEL_STS); if (val & ctrl->own_clk_ack) writel(0, isp->base + BUTTRESS_REG_PS_PLL_ENABLE); } mutex_unlock(&isp->buttress.power_mutex); return ret; } int ipu_buttress_powerup(struct device *dev, const struct ipu_buttress_ctrl *ctrl) { struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; if (is_ipu8(isp->hw_ver)) return ipu8_buttress_powerup(dev, ctrl); return ipu7_buttress_powerup(dev, ctrl); } int ipu_buttress_powerdown(struct device *dev, const struct ipu_buttress_ctrl *ctrl) { struct ipu7_device *isp = to_ipu7_bus_device(dev)->isp; if (is_ipu8(isp->hw_ver)) return ipu8_buttress_powerdown(dev, ctrl); return ipu7_buttress_powerdown(dev, ctrl); } bool ipu_buttress_get_secure_mode(struct ipu7_device *isp) { u32 val; val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE; } bool ipu_buttress_auth_done(struct ipu7_device *isp) { u32 val; if (!isp->secure_mode) return true; val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL); val = FIELD_GET(BUTTRESS_SECURITY_CTL_FW_SETUP_MASK, val); return val == BUTTRESS_SECURITY_CTL_AUTH_DONE; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu_buttress_auth_done, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu_buttress_auth_done, INTEL_IPU7); #endif int ipu_buttress_get_isys_freq(struct ipu7_device *isp, u32 *freq) { u32 reg_val; int ret; ret = pm_runtime_get_sync(&isp->isys->auxdev.dev); if (ret < 0) { pm_runtime_put(&isp->isys->auxdev.dev); dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret); return ret; } reg_val = readl(isp->base + BUTTRESS_REG_IS_WORKPOINT_REQ); pm_runtime_put(&isp->isys->auxdev.dev); if (is_ipu8(isp->hw_ver)) *freq = (reg_val & BUTTRESS_IS_FREQ_CTL_RATIO_MASK) * 25; else *freq = (reg_val & BUTTRESS_IS_FREQ_CTL_RATIO_MASK) * 50 / 3; return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu_buttress_get_isys_freq, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu_buttress_get_isys_freq, INTEL_IPU7); #endif int ipu_buttress_get_psys_freq(struct ipu7_device *isp, u32 *freq) { u32 reg_val; int ret; ret = pm_runtime_get_sync(&isp->psys->auxdev.dev); if (ret < 0) { pm_runtime_put(&isp->psys->auxdev.dev); dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret); return ret; } reg_val = readl(isp->base + BUTTRESS_REG_PS_WORKPOINT_REQ); pm_runtime_put(&isp->psys->auxdev.dev); reg_val &= BUTTRESS_PS_FREQ_CTL_RATIO_MASK; *freq = BUTTRESS_PS_FREQ_RATIO_STEP * reg_val; return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu_buttress_get_psys_freq, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu_buttress_get_psys_freq, INTEL_IPU7); #endif int ipu_buttress_reset_authentication(struct ipu7_device *isp) { struct device *dev = &isp->pdev->dev; int ret; u32 val; if (!isp->secure_mode) { dev_dbg(dev, "Skip auth for non-secure mode\n"); return 0; } writel(BUTTRESS_FW_RESET_CTL_START, isp->base + BUTTRESS_REG_FW_RESET_CTL); ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val, val & BUTTRESS_FW_RESET_CTL_DONE, 500, BUTTRESS_CSE_FWRESET_TIMEOUT_US); if (ret) { dev_err(dev, "Time out while resetting authentication state\n"); return ret; } dev_dbg(dev, "FW reset for authentication done\n"); writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL); /* leave some time for HW restore */ usleep_range(800, 1000); return 0; } int ipu_buttress_authenticate(struct ipu7_device *isp) { struct ipu_buttress *b = &isp->buttress; struct device *dev = &isp->pdev->dev; u32 data, mask, done, fail; int ret; if (!isp->secure_mode) { dev_dbg(dev, "Skip auth for non-secure mode\n"); return 0; } mutex_lock(&b->auth_mutex); if (ipu_buttress_auth_done(isp)) { ret = 0; goto out_unlock; } /* * BUTTRESS_REG_FW_SOURCE_BASE needs to be set with FW CPD * package address for secure mode. */ writel(isp->cpd_fw->size, isp->base + BUTTRESS_REG_FW_SOURCE_SIZE); writel(sg_dma_address(isp->psys->fw_sgt.sgl), isp->base + BUTTRESS_REG_FW_SOURCE_BASE); /* * Write boot_load into IU2CSEDATA0 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as */ dev_info(dev, "Sending BOOT_LOAD to CSE\n"); ret = ipu_buttress_ipc_send(isp, BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD, 1, true, BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE); if (ret) { dev_err(dev, "CSE boot_load failed\n"); goto out_unlock; } mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK; done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE; fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED; ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, ((data & mask) == done || (data & mask) == fail), 500, BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); if (ret) { dev_err(dev, "CSE boot_load timeout\n"); goto out_unlock; } if ((data & mask) == fail) { dev_err(dev, "CSE auth failed\n"); ret = -EINVAL; goto out_unlock; } ret = readl_poll_timeout(isp->base + BOOTLOADER_STATUS_OFFSET, data, data == BOOTLOADER_MAGIC_KEY, 500, BUTTRESS_CSE_BOOTLOAD_TIMEOUT_US); if (ret) { dev_err(dev, "Unexpected magic number 0x%x\n", data); goto out_unlock; } /* * Write authenticate_run into IU2CSEDATA0 * Write sizeof(boot_load) | 0x2 << CLIENT_ID to * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as */ dev_info(dev, "Sending AUTHENTICATE_RUN to CSE\n"); ret = ipu_buttress_ipc_send(isp, BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN, 1, true, BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE); if (ret) { dev_err(dev, "CSE authenticate_run failed\n"); goto out_unlock; } done = BUTTRESS_SECURITY_CTL_AUTH_DONE; ret = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data, ((data & mask) == done || (data & mask) == fail), 500, BUTTRESS_CSE_AUTHENTICATE_TIMEOUT_US); if (ret) { dev_err(dev, "CSE authenticate timeout\n"); goto out_unlock; } if ((data & mask) == fail) { dev_err(dev, "CSE boot_load failed\n"); ret = -EINVAL; goto out_unlock; } dev_info(dev, "CSE authenticate_run done\n"); out_unlock: mutex_unlock(&b->auth_mutex); return ret; } static int ipu_buttress_send_tsc_request(struct ipu7_device *isp) { u32 val, mask, done; int ret; mask = BUTTRESS_PWR_STATUS_HH_STATUS_MASK; writel(BUTTRESS_TSC_CMD_START_TSC_SYNC, isp->base + BUTTRESS_REG_TSC_CMD); val = readl(isp->base + BUTTRESS_REG_PWR_STATUS); val = FIELD_GET(mask, val); if (val == BUTTRESS_PWR_STATUS_HH_STATE_ERR) { dev_err(&isp->pdev->dev, "Start tsc sync failed\n"); return -EINVAL; } done = BUTTRESS_PWR_STATUS_HH_STATE_DONE; ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATUS, val, FIELD_GET(mask, val) == done, 500, BUTTRESS_TSC_SYNC_TIMEOUT_US); if (ret) dev_err(&isp->pdev->dev, "Start tsc sync timeout\n"); return ret; } int ipu_buttress_start_tsc_sync(struct ipu7_device *isp) { void __iomem *base = isp->base; unsigned int i; u32 val; if (is_ipu8(isp->hw_ver)) { for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { val = readl(base + BUTTRESS_REG_PB_TIMESTAMP_VALID); if (val == 1) return 0; usleep_range(40, 50); } dev_err(&isp->pdev->dev, "PB HH sync failed (valid %u)\n", val); return -ETIMEDOUT; } if (is_ipu7p5(isp->hw_ver)) { val = readl(base + BUTTRESS_REG_TSC_CTL); val |= BUTTRESS_SEL_PB_TIMESTAMP; writel(val, base + BUTTRESS_REG_TSC_CTL); for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { val = readl(base + BUTTRESS_REG_PB_TIMESTAMP_VALID); if (val == 1) return 0; usleep_range(40, 50); } dev_err(&isp->pdev->dev, "PB HH sync failed (valid %u)\n", val); return -ETIMEDOUT; } for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) { int ret; ret = ipu_buttress_send_tsc_request(isp); if (ret != -ETIMEDOUT) return ret; val = readl(base + BUTTRESS_REG_TSC_CTL); val = val | BUTTRESS_TSW_WA_SOFT_RESET; writel(val, base + BUTTRESS_REG_TSC_CTL); val = val & (~BUTTRESS_TSW_WA_SOFT_RESET); writel(val, base + BUTTRESS_REG_TSC_CTL); } dev_err(&isp->pdev->dev, "TSC sync failed (timeout)\n"); return -ETIMEDOUT; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu_buttress_start_tsc_sync, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu_buttress_start_tsc_sync, INTEL_IPU7); #endif void ipu_buttress_tsc_read(struct ipu7_device *isp, u64 *val) { unsigned long flags; u32 tsc_hi, tsc_lo; local_irq_save(flags); if (is_ipu7(isp->hw_ver)) { tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO); tsc_hi = readl(isp->base + BUTTRESS_REG_TSC_HI); } else { tsc_lo = readl(isp->base + BUTTRESS_REG_PB_TIMESTAMP_LO); tsc_hi = readl(isp->base + BUTTRESS_REG_PB_TIMESTAMP_HI); } *val = (u64)tsc_hi << 32 | tsc_lo; local_irq_restore(flags); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu_buttress_tsc_read, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu_buttress_tsc_read, INTEL_IPU7); #endif u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu7_device *isp) { u64 ns = ticks * 10000; /* * converting TSC tick count to ns is calculated by: * Example (TSC clock frequency is 19.2MHz): * ns = ticks * 1000 000 000 / 19.2Mhz * = ticks * 1000 000 000 / 19200000Hz * = ticks * 10000 / 192 ns */ return div_u64(ns, isp->buttress.ref_clk); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu_buttress_tsc_ticks_to_ns, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu_buttress_tsc_ticks_to_ns, INTEL_IPU7); #endif /* trigger uc control to wakeup fw */ void ipu_buttress_wakeup_is_uc(const struct ipu7_device *isp) { u32 val; val = readl(isp->base + BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS); val |= UCX_CTL_WAKEUP; writel(val, isp->base + BUTTRESS_REG_DRV_IS_UCX_CONTROL_STATUS); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu_buttress_wakeup_is_uc, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu_buttress_wakeup_is_uc, INTEL_IPU7); #endif void ipu_buttress_wakeup_ps_uc(const struct ipu7_device *isp) { u32 val; val = readl(isp->base + BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS); val |= UCX_CTL_WAKEUP; writel(val, isp->base + BUTTRESS_REG_DRV_PS_UCX_CONTROL_STATUS); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu_buttress_wakeup_ps_uc, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu_buttress_wakeup_ps_uc, INTEL_IPU7); #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) #define INTEL_PANTHERLAKE_L IFM(6, 0xCC) #endif static const struct x86_cpu_id ipu_misc_cfg_exclusion[] = { #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 14, 0) X86_MATCH_VFM_STEPS(INTEL_PANTHERLAKE_L, 0x1, 0x1, 0), #else X86_MATCH_VFM_STEPPINGS(INTEL_PANTHERLAKE_L, 0x1, 0), #endif {}, }; static void ipu_buttress_setup(struct ipu7_device *isp) { struct device *dev = &isp->pdev->dev; u32 val; /* program PB BAR */ #define WRXREQOP_OVRD_VAL_MASK GENMASK(22, 19) writel(0, isp->pb_base + GLOBAL_INTERRUPT_MASK); val = readl(isp->pb_base + BAR2_MISC_CONFIG); if (is_ipu7(isp->hw_ver) || x86_match_cpu(ipu_misc_cfg_exclusion)) val |= 0x100U; else val |= FIELD_PREP(WRXREQOP_OVRD_VAL_MASK, 0xf) | BIT(18) | 0x100U; writel(val, isp->pb_base + BAR2_MISC_CONFIG); val = readl(isp->pb_base + BAR2_MISC_CONFIG); if (is_ipu8(isp->hw_ver)) { writel(BIT(13), isp->pb_base + TLBID_HASH_ENABLE_63_32); writel(BIT(9), isp->pb_base + TLBID_HASH_ENABLE_95_64); dev_dbg(dev, "IPU8 TLBID_HASH %x %x\n", readl(isp->pb_base + TLBID_HASH_ENABLE_63_32), readl(isp->pb_base + TLBID_HASH_ENABLE_95_64)); } else if (is_ipu7p5(isp->hw_ver)) { writel(BIT(14), isp->pb_base + TLBID_HASH_ENABLE_63_32); writel(BIT(9), isp->pb_base + TLBID_HASH_ENABLE_95_64); dev_dbg(dev, "IPU7P5 TLBID_HASH %x %x\n", readl(isp->pb_base + TLBID_HASH_ENABLE_63_32), readl(isp->pb_base + TLBID_HASH_ENABLE_95_64)); } else { writel(BIT(22), isp->pb_base + TLBID_HASH_ENABLE_63_32); writel(BIT(1), isp->pb_base + TLBID_HASH_ENABLE_127_96); dev_dbg(dev, "TLBID_HASH %x %x\n", readl(isp->pb_base + TLBID_HASH_ENABLE_63_32), readl(isp->pb_base + TLBID_HASH_ENABLE_127_96)); } writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_CLEAR); writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_MASK); writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_IRQ_ENABLE); /* LNL SW workaround for PS PD hang when PS sub-domain during PD */ writel(PS_FSM_CG, isp->base + BUTTRESS_REG_CG_CTRL_BITS); } void ipu_buttress_restore(struct ipu7_device *isp) { struct ipu_buttress *b = &isp->buttress; ipu_buttress_setup(isp); writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_IDLE_WDT); } int ipu_buttress_init(struct ipu7_device *isp) { int ret, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY; struct ipu_buttress *b = &isp->buttress; struct device *dev = &isp->pdev->dev; u32 val; mutex_init(&b->power_mutex); mutex_init(&b->auth_mutex); mutex_init(&b->cons_mutex); mutex_init(&b->ipc_mutex); init_completion(&b->cse.send_complete); init_completion(&b->cse.recv_complete); b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK; b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK; b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR; b->cse.csr_out = BUTTRESS_REG_IU2CSECSR; b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0; b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0; b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0; b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0; isp->secure_mode = ipu_buttress_get_secure_mode(isp); val = readl(isp->base + BUTTRESS_REG_IPU_SKU); dev_info(dev, "IPU%u SKU %u in %s mode mask 0x%x\n", val & 0xfU, (val >> 4) & 0x7U, isp->secure_mode ? "secure" : "non-secure", readl(isp->base + BUTTRESS_REG_CAMERA_MASK)); b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_IDLE_WDT); b->ref_clk = 384; ipu_buttress_setup(isp); /* Retry couple of times in case of CSE initialization is delayed */ do { ret = ipu_buttress_ipc_reset(isp, &b->cse); if (ret) { dev_warn(dev, "IPC reset protocol failed, retrying\n"); } else { dev_dbg(dev, "IPC reset done\n"); return 0; } } while (ipc_reset_retry--); dev_err(dev, "IPC reset protocol failed\n"); mutex_destroy(&b->power_mutex); mutex_destroy(&b->auth_mutex); mutex_destroy(&b->cons_mutex); mutex_destroy(&b->ipc_mutex); return ret; } void ipu_buttress_exit(struct ipu7_device *isp) { struct ipu_buttress *b = &isp->buttress; writel(0, isp->base + BUTTRESS_REG_IRQ_ENABLE); mutex_destroy(&b->power_mutex); mutex_destroy(&b->auth_mutex); mutex_destroy(&b->cons_mutex); mutex_destroy(&b->ipc_mutex); } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-buttress.h000066400000000000000000000043631503071307200272650ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_BUTTRESS_H #define IPU7_BUTTRESS_H #include #include #include #include struct device; struct ipu7_device; struct ipu_buttress_ctrl { u32 subsys_id; u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off; u32 ratio; u32 ratio_shift; u32 cdyn; u32 cdyn_shift; u32 ovrd_clk; u32 own_clk_ack; }; struct ipu_buttress_ipc { struct completion send_complete; struct completion recv_complete; u32 nack; u32 nack_mask; u32 recv_data; u32 csr_out; u32 csr_in; u32 db0_in; u32 db0_out; u32 data0_out; u32 data0_in; }; struct ipu_buttress { struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex; struct ipu_buttress_ipc cse; u32 psys_min_freq; u32 wdt_cached_value; u8 psys_force_ratio; bool force_suspend; u32 ref_clk; }; int ipu_buttress_ipc_reset(struct ipu7_device *isp, struct ipu_buttress_ipc *ipc); int ipu_buttress_powerup(struct device *dev, const struct ipu_buttress_ctrl *ctrl); int ipu_buttress_powerdown(struct device *dev, const struct ipu_buttress_ctrl *ctrl); bool ipu_buttress_get_secure_mode(struct ipu7_device *isp); int ipu_buttress_authenticate(struct ipu7_device *isp); int ipu_buttress_reset_authentication(struct ipu7_device *isp); bool ipu_buttress_auth_done(struct ipu7_device *isp); int ipu_buttress_get_isys_freq(struct ipu7_device *isp, u32 *freq); int ipu_buttress_get_psys_freq(struct ipu7_device *isp, u32 *freq); int ipu_buttress_start_tsc_sync(struct ipu7_device *isp); void ipu_buttress_tsc_read(struct ipu7_device *isp, u64 *val); u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks, const struct ipu7_device *isp); irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr); irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr); int ipu_buttress_init(struct ipu7_device *isp); void ipu_buttress_exit(struct ipu7_device *isp); void ipu_buttress_csi_port_config(struct ipu7_device *isp, u32 legacy, u32 combo); void ipu_buttress_restore(struct ipu7_device *isp); void ipu_buttress_wakeup_is_uc(const struct ipu7_device *isp); void ipu_buttress_wakeup_ps_uc(const struct ipu7_device *isp); #endif /* IPU7_BUTTRESS_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-cpd.c000066400000000000000000000157301503071307200261530ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-cpd.h" /* $CPD */ #define CPD_HDR_MARK 0x44504324 /* Maximum size is 4K DWORDs or 16KB */ #define MAX_MANIFEST_SIZE (SZ_4K * sizeof(u32)) #define CPD_MANIFEST_IDX 0 #define CPD_BINARY_START_IDX 1U #define CPD_METADATA_START_IDX 2U #define CPD_BINARY_NUM 2U /* ISYS + PSYS */ /* * Entries include: * 1 manifest entry. * 1 metadata entry for each sub system(ISYS and PSYS). * 1 binary entry for each sub system(ISYS and PSYS). */ #define CPD_ENTRY_NUM (CPD_BINARY_NUM * 2U + 1U) #define CPD_METADATA_ATTR 0xa #define CPD_METADATA_IPL 0x1c #define ONLINE_METADATA_SIZE 128U #define ONLINE_METADATA_LINES 6U struct ipu7_cpd_hdr { u32 hdr_mark; u32 ent_cnt; u8 hdr_ver; u8 ent_ver; u8 hdr_len; u8 rsvd; u8 partition_name[4]; u32 crc32; } __packed; struct ipu7_cpd_ent { u8 name[12]; u32 offset; u32 len; u8 rsvd[4]; } __packed; struct ipu7_cpd_metadata_hdr { u32 type; u32 len; } __packed; struct ipu7_cpd_metadata_attr { struct ipu7_cpd_metadata_hdr hdr; u8 compression_type; u8 encryption_type; u8 rsvd[2]; u32 uncompressed_size; u32 compressed_size; u32 module_id; u8 hash[48]; } __packed; struct ipu7_cpd_metadata_ipl { struct ipu7_cpd_metadata_hdr hdr; u32 param[4]; u8 rsvd[8]; } __packed; struct ipu7_cpd_metadata { struct ipu7_cpd_metadata_attr attr; struct ipu7_cpd_metadata_ipl ipl; } __packed; static inline struct ipu7_cpd_ent *ipu7_cpd_get_entry(const void *cpd, int idx) { const struct ipu7_cpd_hdr *cpd_hdr = cpd; return ((struct ipu7_cpd_ent *)((u8 *)cpd + cpd_hdr->hdr_len)) + idx; } #define ipu7_cpd_get_manifest(cpd) ipu7_cpd_get_entry(cpd, 0) static struct ipu7_cpd_metadata *ipu7_cpd_get_metadata(const void *cpd, int idx) { struct ipu7_cpd_ent *cpd_ent = ipu7_cpd_get_entry(cpd, CPD_METADATA_START_IDX + idx * 2); return (struct ipu7_cpd_metadata *)((u8 *)cpd + cpd_ent->offset); } static int ipu7_cpd_validate_cpd(struct ipu7_device *isp, const void *cpd, unsigned long data_size) { const struct ipu7_cpd_hdr *cpd_hdr = cpd; struct device *dev = &isp->pdev->dev; struct ipu7_cpd_ent *ent; unsigned int i; u8 len; len = cpd_hdr->hdr_len; /* Ensure cpd hdr is within moduledata */ if (data_size < len) { dev_err(dev, "Invalid CPD moduledata size\n"); return -EINVAL; } /* Check for CPD file marker */ if (cpd_hdr->hdr_mark != CPD_HDR_MARK) { dev_err(dev, "Invalid CPD header marker\n"); return -EINVAL; } /* Sanity check for CPD entry header */ if (cpd_hdr->ent_cnt != CPD_ENTRY_NUM) { dev_err(dev, "Invalid CPD entry number %d\n", cpd_hdr->ent_cnt); return -EINVAL; } if ((data_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) { dev_err(dev, "Invalid CPD entry headers\n"); return -EINVAL; } /* Ensure that all entries are within moduledata */ ent = (struct ipu7_cpd_ent *)(((u8 *)cpd_hdr) + len); for (i = 0; i < cpd_hdr->ent_cnt; i++) { if (data_size < ent->offset || data_size - ent->offset < ent->len) { dev_err(dev, "Invalid CPD entry %d\n", i); return -EINVAL; } ent++; } return 0; } static int ipu7_cpd_validate_metadata(struct ipu7_device *isp, const void *cpd, int idx) { const struct ipu7_cpd_ent *cpd_ent = ipu7_cpd_get_entry(cpd, CPD_METADATA_START_IDX + idx * 2); const struct ipu7_cpd_metadata *metadata = ipu7_cpd_get_metadata(cpd, idx); struct device *dev = &isp->pdev->dev; /* Sanity check for metadata size */ if (cpd_ent->len != sizeof(struct ipu7_cpd_metadata)) { dev_err(dev, "Invalid metadata size\n"); return -EINVAL; } /* Validate type and length of metadata sections */ if (metadata->attr.hdr.type != CPD_METADATA_ATTR) { dev_err(dev, "Invalid metadata attr type (%d)\n", metadata->attr.hdr.type); return -EINVAL; } if (metadata->attr.hdr.len != sizeof(struct ipu7_cpd_metadata_attr)) { dev_err(dev, "Invalid metadata attr size (%d)\n", metadata->attr.hdr.len); return -EINVAL; } if (metadata->ipl.hdr.type != CPD_METADATA_IPL) { dev_err(dev, "Invalid metadata ipl type (%d)\n", metadata->ipl.hdr.type); return -EINVAL; } if (metadata->ipl.hdr.len != sizeof(struct ipu7_cpd_metadata_ipl)) { dev_err(dev, "Invalid metadata ipl size (%d)\n", metadata->ipl.hdr.len); return -EINVAL; } return 0; } int ipu7_cpd_validate_cpd_file(struct ipu7_device *isp, const void *cpd_file, unsigned long cpd_file_size) { struct device *dev = &isp->pdev->dev; struct ipu7_cpd_ent *ent; unsigned int i; int ret; char *buf; ret = ipu7_cpd_validate_cpd(isp, cpd_file, cpd_file_size); if (ret) { dev_err(dev, "Invalid CPD in file\n"); return -EINVAL; } /* Sanity check for manifest size */ ent = ipu7_cpd_get_manifest(cpd_file); if (ent->len > MAX_MANIFEST_SIZE) { dev_err(dev, "Invalid manifest size\n"); return -EINVAL; } /* Validate metadata */ for (i = 0; i < CPD_BINARY_NUM; i++) { ret = ipu7_cpd_validate_metadata(isp, cpd_file, i); if (ret) { dev_err(dev, "Invalid metadata%d\n", i); return ret; } } /* Get fw binary version. */ buf = kmalloc(ONLINE_METADATA_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; for (i = 0; i < CPD_BINARY_NUM; i++) { char *lines[ONLINE_METADATA_LINES]; char *info = buf; unsigned int l; ent = ipu7_cpd_get_entry(cpd_file, CPD_BINARY_START_IDX + i * 2U); memcpy(info, (u8 *)cpd_file + ent->offset + ent->len - ONLINE_METADATA_SIZE, ONLINE_METADATA_SIZE); for (l = 0; l < ONLINE_METADATA_LINES; l++) { lines[l] = strsep((char **)&info, "\n"); if (!lines[l]) break; } if (l < ONLINE_METADATA_LINES) { dev_err(dev, "Failed to parse fw binary%d info.\n", i); continue; } dev_info(dev, "FW binary%d info:\n", i); dev_info(dev, "Name: %s\n", lines[1]); dev_info(dev, "Version: %s\n", lines[2]); dev_info(dev, "Timestamp: %s\n", lines[3]); dev_info(dev, "Commit: %s\n", lines[4]); } kfree(buf); return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_cpd_validate_cpd_file, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_cpd_validate_cpd_file, INTEL_IPU7); #endif int ipu7_cpd_copy_binary(const void *cpd, const char *name, void *code_region, u32 *entry) { unsigned int i; for (i = 0; i < CPD_BINARY_NUM; i++) { const struct ipu7_cpd_ent *binary = ipu7_cpd_get_entry(cpd, CPD_BINARY_START_IDX + i * 2U); const struct ipu7_cpd_metadata *metadata = ipu7_cpd_get_metadata(cpd, i); if (!strncmp(binary->name, name, sizeof(binary->name))) { memcpy(code_region + metadata->ipl.param[0], cpd + binary->offset, binary->len); *entry = metadata->ipl.param[2]; return 0; } } return -ENOENT; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_cpd_copy_binary, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_cpd_copy_binary, INTEL_IPU7); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-cpd.h000066400000000000000000000006271503071307200261570ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 - 2025 Intel Corporation */ #ifndef IPU7_CPD_H #define IPU7_CPD_H struct ipu7_device; int ipu7_cpd_validate_cpd_file(struct ipu7_device *isp, const void *cpd_file, unsigned long cpd_file_size); int ipu7_cpd_copy_binary(const void *cpd, const char *name, void *code_region, u32 *entry); #endif /* IPU7_CPD_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-dma.c000066400000000000000000000303601503071307200261420ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-dma.h" #include "ipu7-mmu.h" struct vm_info { struct list_head list; struct page **pages; dma_addr_t ipu7_iova; void *vaddr; unsigned long size; }; static struct vm_info *get_vm_info(struct ipu7_mmu *mmu, dma_addr_t iova) { struct vm_info *info, *save; list_for_each_entry_safe(info, save, &mmu->vma_list, list) { if (iova >= info->ipu7_iova && iova < (info->ipu7_iova + info->size)) return info; } return NULL; } static void __clear_buffer(struct page *page, size_t size, unsigned long attrs) { void *ptr; if (!page) return; /* * Ensure that the allocated pages are zeroed, and that any data * lurking in the kernel direct-mapped region is invalidated. */ ptr = page_address(page); memset(ptr, 0, size); if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) clflush_cache_range(ptr, size); } static struct page **__alloc_buffer(size_t size, gfp_t gfp, unsigned long attrs) { unsigned int count = PHYS_PFN(size); unsigned int array_size = count * sizeof(struct page *); struct page **pages; int i = 0; pages = kvzalloc(array_size, GFP_KERNEL); if (!pages) return NULL; gfp |= __GFP_NOWARN; while (count) { int j, order = __fls(count); pages[i] = alloc_pages(gfp, order); while (!pages[i] && order) pages[i] = alloc_pages(gfp, --order); if (!pages[i]) goto error; if (order) { split_page(pages[i], order); j = 1U << order; while (j--) pages[i + j] = pages[i] + j; } __clear_buffer(pages[i], PAGE_SIZE << order, attrs); i += 1U << order; count -= 1U << order; } return pages; error: while (i--) if (pages[i]) __free_pages(pages[i], 0); kvfree(pages); return NULL; } static void __free_buffer(struct page **pages, size_t size, unsigned long attrs) { unsigned int count = PHYS_PFN(size); unsigned int i; for (i = 0; i < count && pages[i]; i++) { __clear_buffer(pages[i], PAGE_SIZE, attrs); __free_pages(pages[i], 0); } kvfree(pages); } void ipu7_dma_sync_single(struct ipu7_bus_device *sys, dma_addr_t dma_handle, size_t size) { void *vaddr; u32 offset; struct vm_info *info; struct ipu7_mmu *mmu = sys->mmu; info = get_vm_info(mmu, dma_handle); if (WARN_ON(!info)) return; offset = dma_handle - info->ipu7_iova; if (WARN_ON(size > (info->size - offset))) return; vaddr = info->vaddr + offset; clflush_cache_range(vaddr, size); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_dma_sync_single, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_dma_sync_single, INTEL_IPU7); #endif void ipu7_dma_sync_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist, unsigned int nents) { struct scatterlist *sg; int i; for_each_sg(sglist, sg, nents, i) clflush_cache_range(sg_virt(sg), sg->length); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_dma_sync_sg, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_dma_sync_sg, INTEL_IPU7); #endif void ipu7_dma_sync_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt) { ipu7_dma_sync_sg(sys, sgt->sgl, sgt->orig_nents); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_dma_sync_sgtable, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_dma_sync_sgtable, INTEL_IPU7); #endif void *ipu7_dma_alloc(struct ipu7_bus_device *sys, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { struct device *dev = &sys->auxdev.dev; struct pci_dev *pdev = sys->isp->pdev; dma_addr_t pci_dma_addr, ipu7_iova; struct ipu7_mmu *mmu = sys->mmu; struct vm_info *info; unsigned long count; struct page **pages; struct iova *iova; unsigned int i; int ret; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return NULL; size = PAGE_ALIGN(size); count = PHYS_PFN(size); iova = alloc_iova(&mmu->dmap->iovad, count, PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); if (!iova) goto out_kfree; pages = __alloc_buffer(size, gfp, attrs); if (!pages) goto out_free_iova; dev_dbg(dev, "dma_alloc: size %zu iova low pfn %lu, high pfn %lu\n", size, iova->pfn_lo, iova->pfn_hi); for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) { pci_dma_addr = dma_map_page_attrs(&pdev->dev, pages[i], 0, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); dev_dbg(dev, "dma_alloc: mapped pci_dma_addr %pad\n", &pci_dma_addr); if (dma_mapping_error(&pdev->dev, pci_dma_addr)) { dev_err(dev, "pci_dma_mapping for page[%d] failed", i); goto out_unmap; } ret = ipu7_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo + i), pci_dma_addr, PAGE_SIZE); if (ret) { dev_err(dev, "ipu7_mmu_map for pci_dma[%d] %pad failed", i, &pci_dma_addr); dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); goto out_unmap; } } info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL); if (!info->vaddr) goto out_unmap; *dma_handle = PFN_PHYS(iova->pfn_lo); info->pages = pages; info->ipu7_iova = *dma_handle; info->size = size; list_add(&info->list, &mmu->vma_list); return info->vaddr; out_unmap: while (i--) { ipu7_iova = PFN_PHYS(iova->pfn_lo + i); pci_dma_addr = ipu7_mmu_iova_to_phys(mmu->dmap->mmu_info, ipu7_iova); dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); ipu7_mmu_unmap(mmu->dmap->mmu_info, ipu7_iova, PAGE_SIZE); } __free_buffer(pages, size, attrs); out_free_iova: __free_iova(&mmu->dmap->iovad, iova); out_kfree: kfree(info); return NULL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_dma_alloc, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_dma_alloc, INTEL_IPU7); #endif void ipu7_dma_free(struct ipu7_bus_device *sys, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { struct ipu7_mmu *mmu = sys->mmu; struct pci_dev *pdev = sys->isp->pdev; struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(dma_handle)); dma_addr_t pci_dma_addr, ipu7_iova; struct vm_info *info; struct page **pages; unsigned int i; if (WARN_ON(!iova)) return; info = get_vm_info(mmu, dma_handle); if (WARN_ON(!info)) return; if (WARN_ON(!info->vaddr)) return; if (WARN_ON(!info->pages)) return; list_del(&info->list); size = PAGE_ALIGN(size); pages = info->pages; vunmap(vaddr); for (i = 0; i < PHYS_PFN(size); i++) { ipu7_iova = PFN_PHYS(iova->pfn_lo + i); pci_dma_addr = ipu7_mmu_iova_to_phys(mmu->dmap->mmu_info, ipu7_iova); dma_unmap_page_attrs(&pdev->dev, pci_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL, attrs); } ipu7_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), PFN_PHYS(iova_size(iova))); __free_buffer(pages, size, attrs); mmu->tlb_invalidate(mmu); __free_iova(&mmu->dmap->iovad, iova); kfree(info); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_dma_free, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_dma_free, INTEL_IPU7); #endif int ipu7_dma_mmap(struct ipu7_bus_device *sys, struct vm_area_struct *vma, void *addr, dma_addr_t iova, size_t size, unsigned long attrs) { struct ipu7_mmu *mmu = sys->mmu; size_t count = PFN_UP(size); struct vm_info *info; size_t i; int ret; info = get_vm_info(mmu, iova); if (!info) return -EFAULT; if (!info->vaddr) return -EFAULT; if (vma->vm_start & ~PAGE_MASK) return -EINVAL; if (size > info->size) return -EFAULT; for (i = 0; i < count; i++) { ret = vm_insert_page(vma, vma->vm_start + PFN_PHYS(i), info->pages[i]); if (ret < 0) return ret; } return 0; } void ipu7_dma_unmap_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs) { struct device *dev = &sys->auxdev.dev; struct ipu7_mmu *mmu = sys->mmu; struct iova *iova = find_iova(&mmu->dmap->iovad, PHYS_PFN(sg_dma_address(sglist))); struct scatterlist *sg; dma_addr_t pci_dma_addr; unsigned int i; if (!nents) return; if (WARN_ON(!iova)) return; /* * Before IPU7 mmu unmap, return the pci dma address back to sg * assume the nents is less than orig_nents as the least granule * is 1 SZ_4K page */ dev_dbg(dev, "trying to unmap concatenated %u ents\n", nents); for_each_sg(sglist, sg, nents, i) { dev_dbg(dev, "unmap sg[%d] %pad size %u\n", i, &sg_dma_address(sg), sg_dma_len(sg)); pci_dma_addr = ipu7_mmu_iova_to_phys(mmu->dmap->mmu_info, sg_dma_address(sg)); dev_dbg(dev, "return pci_dma_addr %pad back to sg[%d]\n", &pci_dma_addr, i); sg_dma_address(sg) = pci_dma_addr; } dev_dbg(dev, "ipu7_mmu_unmap low pfn %lu high pfn %lu\n", iova->pfn_lo, iova->pfn_hi); ipu7_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), PFN_PHYS(iova_size(iova))); mmu->tlb_invalidate(mmu); __free_iova(&mmu->dmap->iovad, iova); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_dma_unmap_sg, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_dma_unmap_sg, INTEL_IPU7); #endif int ipu7_dma_map_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs) { struct device *dev = &sys->auxdev.dev; struct ipu7_mmu *mmu = sys->mmu; struct scatterlist *sg; struct iova *iova; size_t npages = 0; unsigned long iova_addr; int i; for_each_sg(sglist, sg, nents, i) { if (sg->offset) { dev_err(dev, "Unsupported non-zero sg[%d].offset %x\n", i, sg->offset); return -EFAULT; } } for_each_sg(sglist, sg, nents, i) npages += PFN_UP(sg_dma_len(sg)); dev_dbg(dev, "dmamap trying to map %d ents %zu pages\n", nents, npages); if (attrs & DMA_ATTR_RESERVE_REGION) { /* * Reserve iova with size aligned to IPU_FW_CODE_REGION_SIZE. * Only apply for non-secure mode. */ unsigned long lo, hi; lo = iova_pfn(&mmu->dmap->iovad, IPU_FW_CODE_REGION_START); hi = iova_pfn(&mmu->dmap->iovad, IPU_FW_CODE_REGION_END) - 1U; iova = reserve_iova(&mmu->dmap->iovad, lo, hi); if (!iova) { dev_err(dev, "Reserve iova[%lx:%lx] failed.\n", lo, hi); return -ENOMEM; } dev_dbg(dev, "iova[%lx:%lx] reserved for FW code.\n", lo, hi); } else { iova = alloc_iova(&mmu->dmap->iovad, npages, PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); if (!iova) return 0; } dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, iova->pfn_hi); iova_addr = iova->pfn_lo; for_each_sg(sglist, sg, nents, i) { phys_addr_t iova_pa; int ret; iova_pa = PFN_PHYS(iova_addr); dev_dbg(dev, "mapping entry %d: iova %pap phy %pap size %d\n", i, &iova_pa, &sg_dma_address(sg), sg_dma_len(sg)); ret = ipu7_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), sg_dma_address(sg), PAGE_ALIGN(sg_dma_len(sg))); if (ret) goto out_fail; sg_dma_address(sg) = PFN_PHYS(iova_addr); iova_addr += PFN_UP(sg_dma_len(sg)); } dev_dbg(dev, "dmamap %d ents %zu pages mapped\n", nents, npages); return nents; out_fail: ipu7_dma_unmap_sg(sys, sglist, i, dir, attrs); return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_dma_map_sg, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_dma_map_sg, INTEL_IPU7); #endif int ipu7_dma_map_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs) { int nents; nents = ipu7_dma_map_sg(sys, sgt->sgl, sgt->nents, dir, attrs); if (nents < 0) return nents; sgt->nents = nents; return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_dma_map_sgtable, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_dma_map_sgtable, INTEL_IPU7); #endif void ipu7_dma_unmap_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs) { ipu7_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_dma_unmap_sgtable, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_dma_unmap_sgtable, INTEL_IPU7); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-dma.h000066400000000000000000000033211503071307200261440ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (C) 2013--2025 Intel Corporation */ #ifndef IPU7_DMA_H #define IPU7_DMA_H #include #include #include #include #include #include "ipu7-bus.h" #define DMA_ATTR_RESERVE_REGION BIT(31) struct ipu7_mmu_info; struct ipu7_dma_mapping { struct ipu7_mmu_info *mmu_info; struct iova_domain iovad; }; void ipu7_dma_sync_single(struct ipu7_bus_device *sys, dma_addr_t dma_handle, size_t size); void ipu7_dma_sync_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist, unsigned int nents); void ipu7_dma_sync_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt); void *ipu7_dma_alloc(struct ipu7_bus_device *sys, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void ipu7_dma_free(struct ipu7_bus_device *sys, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs); int ipu7_dma_mmap(struct ipu7_bus_device *sys, struct vm_area_struct *vma, void *addr, dma_addr_t iova, size_t size, unsigned long attrs); int ipu7_dma_map_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs); void ipu7_dma_unmap_sg(struct ipu7_bus_device *sys, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs); int ipu7_dma_map_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs); void ipu7_dma_unmap_sgtable(struct ipu7_bus_device *sys, struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs); #endif /* IPU7_DMA_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-fw-isys.c000066400000000000000000000300261503071307200270010ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include "abi/ipu7_fw_insys_config_abi.h" #include "abi/ipu7_fw_isys_abi.h" #include "ipu7.h" #include "ipu7-boot.h" #include "ipu7-bus.h" #include "ipu7-dma.h" #include "ipu7-fw-isys.h" #include "ipu7-isys.h" #include "ipu7-platform-regs.h" #include "ipu7-syscom.h" static const char * const send_msg_types[N_IPU_INSYS_SEND_TYPE] = { "STREAM_OPEN", "STREAM_START_AND_CAPTURE", "STREAM_CAPTURE", "STREAM_ABORT", "STREAM_FLUSH", "STREAM_CLOSE" }; int ipu7_fw_isys_complex_cmd(struct ipu7_isys *isys, const unsigned int stream_handle, void *cpu_mapped_buf, dma_addr_t dma_mapped_buf, size_t size, u16 send_type) { struct ipu7_syscom_context *ctx = isys->adev->syscom; struct device *dev = &isys->adev->auxdev.dev; struct ipu7_insys_send_queue_token *token; if (send_type >= N_IPU_INSYS_SEND_TYPE) return -EINVAL; dev_dbg(dev, "send_token: %s\n", send_msg_types[send_type]); /* * Time to flush cache in case we have some payload. Not all messages * have that */ if (cpu_mapped_buf) clflush_cache_range(cpu_mapped_buf, size); token = ipu7_syscom_get_token(ctx, stream_handle + IPU_INSYS_INPUT_MSG_QUEUE); if (!token) return -EBUSY; token->addr = dma_mapped_buf; token->buf_handle = (unsigned long)cpu_mapped_buf; token->send_type = send_type; token->stream_id = stream_handle; token->flag = IPU_INSYS_SEND_QUEUE_TOKEN_FLAG_NONE; ipu7_syscom_put_token(ctx, stream_handle + IPU_INSYS_INPUT_MSG_QUEUE); /* now wakeup FW */ ipu_buttress_wakeup_is_uc(isys->adev->isp); return 0; } int ipu7_fw_isys_simple_cmd(struct ipu7_isys *isys, const unsigned int stream_handle, u16 send_type) { return ipu7_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0, send_type); } int ipu7_fw_isys_init(struct ipu7_isys *isys) { struct syscom_queue_config *queue_configs; struct ipu7_bus_device *adev = isys->adev; struct device *dev = &adev->auxdev.dev; struct ipu7_insys_config *isys_config; struct ipu7_syscom_context *syscom; dma_addr_t isys_config_dma_addr; unsigned int i, num_queues; u32 freq; u8 major; int ret; /* Allocate and init syscom context. */ syscom = devm_kzalloc(dev, sizeof(struct ipu7_syscom_context), GFP_KERNEL); if (!syscom) return -ENOMEM; adev->syscom = syscom; syscom->num_input_queues = IPU_INSYS_MAX_INPUT_QUEUES; syscom->num_output_queues = IPU_INSYS_MAX_OUTPUT_QUEUES; num_queues = syscom->num_input_queues + syscom->num_output_queues; queue_configs = devm_kzalloc(dev, FW_QUEUE_CONFIG_SIZE(num_queues), GFP_KERNEL); if (!queue_configs) { ipu7_fw_isys_release(isys); return -ENOMEM; } syscom->queue_configs = queue_configs; queue_configs[IPU_INSYS_OUTPUT_MSG_QUEUE].max_capacity = IPU_ISYS_SIZE_RECV_QUEUE; queue_configs[IPU_INSYS_OUTPUT_MSG_QUEUE].token_size_in_bytes = sizeof(struct ipu7_insys_resp); queue_configs[IPU_INSYS_OUTPUT_LOG_QUEUE].max_capacity = IPU_ISYS_SIZE_LOG_QUEUE; queue_configs[IPU_INSYS_OUTPUT_LOG_QUEUE].token_size_in_bytes = sizeof(struct ipu7_insys_resp); queue_configs[IPU_INSYS_OUTPUT_RESERVED_QUEUE].max_capacity = 0; queue_configs[IPU_INSYS_OUTPUT_RESERVED_QUEUE].token_size_in_bytes = 0; queue_configs[IPU_INSYS_INPUT_DEV_QUEUE].max_capacity = IPU_ISYS_MAX_STREAMS; queue_configs[IPU_INSYS_INPUT_DEV_QUEUE].token_size_in_bytes = sizeof(struct ipu7_insys_send_queue_token); for (i = IPU_INSYS_INPUT_MSG_QUEUE; i < num_queues; i++) { queue_configs[i].max_capacity = IPU_ISYS_SIZE_SEND_QUEUE; queue_configs[i].token_size_in_bytes = sizeof(struct ipu7_insys_send_queue_token); } /* Allocate ISYS subsys config. */ isys_config = ipu7_dma_alloc(adev, sizeof(struct ipu7_insys_config), &isys_config_dma_addr, GFP_KERNEL, 0); if (!isys_config) { dev_err(dev, "Failed to allocate isys subsys config.\n"); ipu7_fw_isys_release(isys); return -ENOMEM; } isys->subsys_config = isys_config; isys->subsys_config_dma_addr = isys_config_dma_addr; memset(isys_config, 0, sizeof(struct ipu7_insys_config)); isys_config->logger_config.use_source_severity = 0; isys_config->logger_config.use_channels_enable_bitmask = 1; isys_config->logger_config.channels_enable_bitmask = LOGGER_CONFIG_CHANNEL_ENABLE_SYSCOM_BITMASK; isys_config->logger_config.hw_printf_buffer_base_addr = 0U; isys_config->logger_config.hw_printf_buffer_size_bytes = 0U; isys_config->wdt_config.wdt_timer1_us = 0; isys_config->wdt_config.wdt_timer2_us = 0; ret = ipu_buttress_get_isys_freq(adev->isp, &freq); if (ret) { dev_err(dev, "Failed to get ISYS frequency.\n"); ipu7_fw_isys_release(isys); return ret; } ipu7_dma_sync_single(adev, isys_config_dma_addr, sizeof(struct ipu7_insys_config)); major = is_ipu8(adev->isp->hw_ver) ? 2U : 1U; ret = ipu7_boot_init_boot_config(adev, queue_configs, num_queues, freq, isys_config_dma_addr, major); if (ret) ipu7_fw_isys_release(isys); return ret; } void ipu7_fw_isys_release(struct ipu7_isys *isys) { struct ipu7_bus_device *adev = isys->adev; ipu7_boot_release_boot_config(adev); if (isys->subsys_config) { ipu7_dma_free(adev, sizeof(struct ipu7_insys_config), isys->subsys_config, isys->subsys_config_dma_addr, 0); isys->subsys_config = NULL; isys->subsys_config_dma_addr = 0; } } int ipu7_fw_isys_open(struct ipu7_isys *isys) { return ipu7_boot_start_fw(isys->adev); } int ipu7_fw_isys_close(struct ipu7_isys *isys) { return ipu7_boot_stop_fw(isys->adev); } struct ipu7_insys_resp *ipu7_fw_isys_get_resp(struct ipu7_isys *isys) { return (struct ipu7_insys_resp *) ipu7_syscom_get_token(isys->adev->syscom, IPU_INSYS_OUTPUT_MSG_QUEUE); } void ipu7_fw_isys_put_resp(struct ipu7_isys *isys) { ipu7_syscom_put_token(isys->adev->syscom, IPU_INSYS_OUTPUT_MSG_QUEUE); } #ifdef ENABLE_FW_OFFLINE_LOGGER int ipu7_fw_isys_get_log(struct ipu7_isys *isys) { u32 log_size = sizeof(struct ia_gofo_msg_log_info_ts); struct device *dev = &isys->adev->auxdev.dev; struct isys_fw_log *fw_log = isys->fw_log; struct ia_gofo_msg_log *log_msg; u8 msg_type, msg_len; u32 count, fmt_id; void *token; token = ipu7_syscom_get_token(isys->adev->syscom, IPU_INSYS_OUTPUT_LOG_QUEUE); if (!token) return -ENODATA; while (token) { log_msg = (struct ia_gofo_msg_log *)token; msg_type = log_msg->header.tlv_header.tlv_type; msg_len = log_msg->header.tlv_header.tlv_len32; if (msg_type != IPU_MSG_TYPE_DEV_LOG || !msg_len) dev_warn(dev, "Invalid msg data from Log queue!\n"); count = log_msg->log_info_ts.log_info.log_counter; fmt_id = log_msg->log_info_ts.log_info.fmt_id; if (count > fw_log->count + 1) dev_warn(dev, "log msg lost, count %u+1 != %u!\n", count, fw_log->count); if (fmt_id == IA_GOFO_MSG_LOG_FMT_ID_INVALID) { dev_err(dev, "invalid log msg fmt_id 0x%x!\n", fmt_id); ipu7_syscom_put_token(isys->adev->syscom, IPU_INSYS_OUTPUT_LOG_QUEUE); return -EIO; } if (log_size + fw_log->head - fw_log->addr > FW_LOG_BUF_SIZE) fw_log->head = fw_log->addr; memcpy(fw_log->head, (void *)&log_msg->log_info_ts, sizeof(struct ia_gofo_msg_log_info_ts)); fw_log->count = count; fw_log->head += log_size; fw_log->size += log_size; ipu7_syscom_put_token(isys->adev->syscom, IPU_INSYS_OUTPUT_LOG_QUEUE); token = ipu7_syscom_get_token(isys->adev->syscom, IPU_INSYS_OUTPUT_LOG_QUEUE); }; return 0; } #endif void ipu7_fw_isys_dump_stream_cfg(struct device *dev, struct ipu7_insys_stream_cfg *cfg) { unsigned int i; dev_dbg(dev, "---------------------------\n"); dev_dbg(dev, "IPU_FW_ISYS_STREAM_CFG_DATA\n"); dev_dbg(dev, ".port id %d\n", cfg->port_id); dev_dbg(dev, ".vc %d\n", cfg->vc); dev_dbg(dev, ".nof_input_pins = %d\n", cfg->nof_input_pins); dev_dbg(dev, ".nof_output_pins = %d\n", cfg->nof_output_pins); dev_dbg(dev, ".stream_msg_map = 0x%x\n", cfg->stream_msg_map); for (i = 0; i < cfg->nof_input_pins; i++) { dev_dbg(dev, ".input_pin[%d]:\n", i); dev_dbg(dev, "\t.dt = 0x%0x\n", cfg->input_pins[i].dt); dev_dbg(dev, "\t.disable_mipi_unpacking = %d\n", cfg->input_pins[i].disable_mipi_unpacking); dev_dbg(dev, "\t.dt_rename_mode = %d\n", cfg->input_pins[i].dt_rename_mode); dev_dbg(dev, "\t.mapped_dt = 0x%0x\n", cfg->input_pins[i].mapped_dt); dev_dbg(dev, "\t.input_res = %d x %d\n", cfg->input_pins[i].input_res.width, cfg->input_pins[i].input_res.height); dev_dbg(dev, "\t.sync_msg_map = 0x%x\n", cfg->input_pins[i].sync_msg_map); } for (i = 0; i < cfg->nof_output_pins; i++) { dev_dbg(dev, ".output_pin[%d]:\n", i); dev_dbg(dev, "\t.input_pin_id = %d\n", cfg->output_pins[i].input_pin_id); dev_dbg(dev, "\t.stride = %d\n", cfg->output_pins[i].stride); dev_dbg(dev, "\t.send_irq = %d\n", cfg->output_pins[i].send_irq); dev_dbg(dev, "\t.ft = %d\n", cfg->output_pins[i].ft); dev_dbg(dev, "\t.link.buffer_lines = %d\n", cfg->output_pins[i].link.buffer_lines); dev_dbg(dev, "\t.link.foreign_key = %d\n", cfg->output_pins[i].link.foreign_key); dev_dbg(dev, "\t.link.granularity_pointer_update = %d\n", cfg->output_pins[i].link.granularity_pointer_update); dev_dbg(dev, "\t.link.msg_link_streaming_mode = %d\n", cfg->output_pins[i].link.msg_link_streaming_mode); dev_dbg(dev, "\t.link.pbk_id = %d\n", cfg->output_pins[i].link.pbk_id); dev_dbg(dev, "\t.link.pbk_slot_id = %d\n", cfg->output_pins[i].link.pbk_slot_id); dev_dbg(dev, "\t.link.dest = %d\n", cfg->output_pins[i].link.dest); dev_dbg(dev, "\t.link.use_sw_managed = %d\n", cfg->output_pins[i].link.use_sw_managed); dev_dbg(dev, "\t.link.is_snoop = %d\n", cfg->output_pins[i].link.is_snoop); dev_dbg(dev, "\t.crop.line_top = %d\n", cfg->output_pins[i].crop.line_top); dev_dbg(dev, "\t.crop.line_bottom = %d\n", cfg->output_pins[i].crop.line_bottom); #ifdef IPU8_INSYS_NEW_ABI dev_dbg(dev, "\t.crop.column_left = %d\n", cfg->output_pins[i].crop.column_left); dev_dbg(dev, "\t.crop.colunm_right = %d\n", cfg->output_pins[i].crop.column_right); #endif dev_dbg(dev, "\t.dpcm_enable = %d\n", cfg->output_pins[i].dpcm.enable); dev_dbg(dev, "\t.dpcm.type = %d\n", cfg->output_pins[i].dpcm.type); dev_dbg(dev, "\t.dpcm.predictor = %d\n", cfg->output_pins[i].dpcm.predictor); #ifdef IPU8_INSYS_NEW_ABI dev_dbg(dev, "\t.upipe_enable = %d\n", cfg->output_pins[i].upipe_enable); dev_dbg(dev, "\t.upipe_pin_cfg.opaque_pin_cfg = %d\n", cfg->output_pins[i].upipe_pin_cfg.opaque_pin_cfg); dev_dbg(dev, "\t.upipe_pin_cfg.plane_offset_1 = %d\n", cfg->output_pins[i].upipe_pin_cfg.plane_offset_1); dev_dbg(dev, "\t.upipe_pin_cfg.plane_offset_2 = %d\n", cfg->output_pins[i].upipe_pin_cfg.plane_offset_2); dev_dbg(dev, "\t.upipe_pin_cfg.singel_uob_fifo = %d\n", cfg->output_pins[i].upipe_pin_cfg.single_uob_fifo); dev_dbg(dev, "\t.upipe_pin_cfg.shared_uob_fifo = %d\n", cfg->output_pins[i].upipe_pin_cfg.shared_uob_fifo); #endif } dev_dbg(dev, "---------------------------\n"); } void ipu7_fw_isys_dump_frame_buff_set(struct device *dev, struct ipu7_insys_buffset *buf, unsigned int outputs) { unsigned int i; dev_dbg(dev, "--------------------------\n"); dev_dbg(dev, "IPU_ISYS_BUFF_SET\n"); dev_dbg(dev, ".capture_msg_map = %d\n", buf->capture_msg_map); dev_dbg(dev, ".frame_id = %d\n", buf->frame_id); dev_dbg(dev, ".skip_frame = %d\n", buf->skip_frame); for (i = 0; i < outputs; i++) { dev_dbg(dev, ".output_pin[%d]:\n", i); #ifndef IPU8_INSYS_NEW_ABI dev_dbg(dev, "\t.user_token = %llx\n", buf->output_pins[i].user_token); dev_dbg(dev, "\t.addr = 0x%x\n", buf->output_pins[i].addr); #else dev_dbg(dev, "\t.pin_payload.user_token = %llx\n", buf->output_pins[i].pin_payload.user_token); dev_dbg(dev, "\t.pin_payload.addr = 0x%x\n", buf->output_pins[i].pin_payload.addr); dev_dbg(dev, "\t.pin_payload.upipe_capture_cfg = 0x%x\n", buf->output_pins[i].upipe_capture_cfg); #endif } dev_dbg(dev, "---------------------------\n"); } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-fw-isys.h000066400000000000000000000024451503071307200270120ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_FW_ISYS_H #define IPU7_FW_ISYS_H #include #include "abi/ipu7_fw_isys_abi.h" struct device; struct ipu7_insys_buffset; struct ipu7_insys_stream_cfg; struct ipu7_isys; /* From here on type defines not coming from the ISYSAPI interface */ int ipu7_fw_isys_init(struct ipu7_isys *isys); void ipu7_fw_isys_release(struct ipu7_isys *isys); int ipu7_fw_isys_open(struct ipu7_isys *isys); int ipu7_fw_isys_close(struct ipu7_isys *isys); void ipu7_fw_isys_dump_stream_cfg(struct device *dev, struct ipu7_insys_stream_cfg *cfg); void ipu7_fw_isys_dump_frame_buff_set(struct device *dev, struct ipu7_insys_buffset *buf, unsigned int outputs); int ipu7_fw_isys_simple_cmd(struct ipu7_isys *isys, const unsigned int stream_handle, u16 send_type); int ipu7_fw_isys_complex_cmd(struct ipu7_isys *isys, const unsigned int stream_handle, void *cpu_mapped_buf, dma_addr_t dma_mapped_buf, size_t size, u16 send_type); struct ipu7_insys_resp *ipu7_fw_isys_get_resp(struct ipu7_isys *isys); void ipu7_fw_isys_put_resp(struct ipu7_isys *isys); #ifdef ENABLE_FW_OFFLINE_LOGGER int ipu7_fw_isys_get_log(struct ipu7_isys *isys); #endif #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-csi-phy.c000066400000000000000000000731511503071307200277470ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress.h" #include "ipu7-isys.h" #include "ipu7-isys-csi2.h" #include "ipu7-isys-csi2-regs.h" #include "ipu7-platform-regs.h" #include "ipu7-isys-csi-phy.h" #define PORT_A 0U #define PORT_B 1U #define PORT_C 2U #define PORT_D 3U struct ddlcal_counter_ref_s { u16 min_mbps; u16 max_mbps; u16 ddlcal_counter_ref; }; struct ddlcal_params { u16 min_mbps; u16 max_mbps; u16 oa_lanex_hsrx_cdphy_sel_fast; u16 ddlcal_max_phase; u16 phase_bound; u16 ddlcal_dll_fbk; u16 ddlcal_ddl_coarse_bank; u16 fjump_deskew; u16 min_eye_opening_deskew; }; struct i_thssettle_params { u16 min_mbps; u16 max_mbps; u16 i_thssettle; }; /* lane2 for 4l3t, lane1 for 2l2t */ struct oa_lane_clk_div_params { u16 min_mbps; u16 max_mbps; u16 oa_lane_hsrx_hs_clk_div; }; struct cdr_fbk_cap_prog_params { u16 min_mbps; u16 max_mbps; u16 val; }; static const struct ddlcal_counter_ref_s table0[] = { {1500, 1999, 118}, {2000, 2499, 157}, {2500, 3499, 196}, {3500, 4499, 274}, {4500, 4500, 352}, {} }; static const struct ddlcal_params table1[] = { {1500, 1587, 0, 143, 167, 17, 3, 4, 29}, {1588, 1687, 0, 135, 167, 15, 3, 4, 27}, {1688, 1799, 0, 127, 135, 15, 2, 4, 26}, {1800, 1928, 0, 119, 135, 13, 2, 3, 24}, {1929, 2076, 0, 111, 135, 13, 2, 3, 23}, {2077, 2249, 0, 103, 135, 11, 2, 3, 21}, {2250, 2454, 0, 95, 103, 11, 1, 3, 19}, {2455, 2699, 0, 87, 103, 9, 1, 3, 18}, {2700, 2999, 0, 79, 103, 9, 1, 2, 16}, {3000, 3229, 0, 71, 71, 7, 1, 2, 15}, {3230, 3599, 1, 87, 103, 9, 1, 3, 18}, {3600, 3999, 1, 79, 103, 9, 1, 2, 16}, {4000, 4499, 1, 71, 103, 7, 1, 2, 15}, {4500, 4500, 1, 63, 71, 7, 0, 2, 13}, {} }; static const struct i_thssettle_params table2[] = { {80, 124, 24}, {125, 249, 20}, {250, 499, 16}, {500, 749, 14}, {750, 1499, 13}, {1500, 4500, 12}, {} }; static const struct oa_lane_clk_div_params table6[] = { {80, 159, 0x1}, {160, 319, 0x2}, {320, 639, 0x3}, {640, 1279, 0x4}, {1280, 2560, 0x5}, {2561, 4500, 0x6}, {} }; static const struct cdr_fbk_cap_prog_params table7[] = { {80, 919, 0}, {920, 1029, 1}, {1030, 1169, 2}, {1170, 1349, 3}, {1350, 1589, 4}, {1590, 1949, 5}, {1950, 2499, 6}, {} }; static void dwc_phy_write(struct ipu7_isys *isys, u32 id, u32 addr, u16 data) { void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IS_IO_CDPHY_BASE(id); dev_dbg(&isys->adev->auxdev.dev, "phy write: reg 0x%zx = data 0x%04x", base + addr - isys_base, data); writew(data, base + addr); } static u16 dwc_phy_read(struct ipu7_isys *isys, u32 id, u32 addr) { void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IS_IO_CDPHY_BASE(id); u16 data; data = readw(base + addr); dev_dbg(&isys->adev->auxdev.dev, "phy read: reg 0x%zx = data 0x%04x", base + addr - isys_base, data); return data; } static void dwc_csi_write(struct ipu7_isys *isys, u32 id, u32 addr, u32 data) { void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IS_IO_CSI2_HOST_BASE(id); struct device *dev = &isys->adev->auxdev.dev; dev_dbg(dev, "csi write: reg 0x%zx = data 0x%08x", base + addr - isys_base, data); writel(data, base + addr); dev_dbg(dev, "csi read: reg 0x%zx = data 0x%08x", base + addr - isys_base, readl(base + addr)); } static void gpreg_write(struct ipu7_isys *isys, u32 id, u32 addr, u32 data) { void __iomem *isys_base = isys->pdata->base; u32 gpreg = isys->pdata->ipdata->csi2.gpreg; void __iomem *base = isys_base + gpreg + 0x1000 * id; struct device *dev = &isys->adev->auxdev.dev; dev_dbg(dev, "gpreg write: reg 0x%zx = data 0x%08x", base + addr - isys_base, data); writel(data, base + addr); dev_dbg(dev, "gpreg read: reg 0x%zx = data 0x%08x", base + addr - isys_base, readl(base + addr)); } static u32 dwc_csi_read(struct ipu7_isys *isys, u32 id, u32 addr) { void __iomem *isys_base = isys->pdata->base; void __iomem *base = isys_base + IS_IO_CSI2_HOST_BASE(id); u32 data; data = readl(base + addr); dev_dbg(&isys->adev->auxdev.dev, "csi read: reg 0x%zx = data 0x%x", base + addr - isys_base, data); return data; } static void dwc_phy_write_mask(struct ipu7_isys *isys, u32 id, u32 addr, u16 val, u8 lo, u8 hi) { u32 temp, mask; WARN_ON(lo > hi); WARN_ON(hi > 15); mask = ((~0U - (1U << lo) + 1U)) & (~0U >> (31 - hi)); temp = dwc_phy_read(isys, id, addr); temp &= ~mask; temp |= (val << lo) & mask; dwc_phy_write(isys, id, addr, temp); } static void dwc_csi_write_mask(struct ipu7_isys *isys, u32 id, u32 addr, u32 val, u8 hi, u8 lo) { u32 temp, mask; WARN_ON(lo > hi); mask = ((~0U - (1U << lo) + 1U)) & (~0U >> (31 - hi)); temp = dwc_csi_read(isys, id, addr); temp &= ~mask; temp |= (val << lo) & mask; dwc_csi_write(isys, id, addr, temp); } static void ipu7_isys_csi_ctrl_cfg(struct ipu7_isys_csi2 *csi2) { struct ipu7_isys *isys = csi2->isys; struct device *dev = &isys->adev->auxdev.dev; u32 id, lanes, phy_mode; u32 val; id = csi2->port; lanes = csi2->nlanes; phy_mode = csi2->phy_mode; dev_dbg(dev, "csi-%d controller init with %u lanes, phy mode %u", id, lanes, phy_mode); val = dwc_csi_read(isys, id, VERSION); dev_dbg(dev, "csi-%d controller version = 0x%x", id, val); /* num of active data lanes */ dwc_csi_write(isys, id, N_LANES, lanes - 1); dwc_csi_write(isys, id, CDPHY_MODE, phy_mode); dwc_csi_write(isys, id, VC_EXTENSION, 0); /* only mask PHY_FATAL and PKT_FATAL interrupts */ dwc_csi_write(isys, id, INT_MSK_PHY_FATAL, 0xff); dwc_csi_write(isys, id, INT_MSK_PKT_FATAL, 0x3); dwc_csi_write(isys, id, INT_MSK_PHY, 0x0); dwc_csi_write(isys, id, INT_MSK_LINE, 0x0); dwc_csi_write(isys, id, INT_MSK_BNDRY_FRAME_FATAL, 0x0); dwc_csi_write(isys, id, INT_MSK_SEQ_FRAME_FATAL, 0x0); dwc_csi_write(isys, id, INT_MSK_CRC_FRAME_FATAL, 0x0); dwc_csi_write(isys, id, INT_MSK_PLD_CRC_FATAL, 0x0); dwc_csi_write(isys, id, INT_MSK_DATA_ID, 0x0); dwc_csi_write(isys, id, INT_MSK_ECC_CORRECTED, 0x0); } static void ipu7_isys_csi_phy_reset(struct ipu7_isys *isys, u32 id) { dwc_csi_write(isys, id, PHY_SHUTDOWNZ, 0); dwc_csi_write(isys, id, DPHY_RSTZ, 0); dwc_csi_write(isys, id, CSI2_RESETN, 0); gpreg_write(isys, id, PHY_RESET, 0); gpreg_write(isys, id, PHY_SHUTDOWN, 0); } #define N_DATA_IDS 8 static DECLARE_BITMAP(data_ids, N_DATA_IDS); /* 8 Data ID monitors, each Data ID is composed by pair of VC and data type */ static int __dids_config(struct ipu7_isys_csi2 *csi2, u32 id, u8 vc, u8 dt) { struct ipu7_isys *isys = csi2->isys; u32 reg, n; u8 lo, hi; int ret; dev_dbg(&isys->adev->auxdev.dev, "config CSI-%u with vc:%u dt:0x%02x\n", id, vc, dt); dwc_csi_write(isys, id, VC_EXTENSION, 0x0); n = find_first_zero_bit(data_ids, N_DATA_IDS); if (n == N_DATA_IDS) return -ENOSPC; ret = test_and_set_bit(n, data_ids); if (ret) return -EBUSY; reg = n < 4 ? DATA_IDS_VC_1 : DATA_IDS_VC_2; lo = (n % 4) * 8; hi = lo + 4; dwc_csi_write_mask(isys, id, reg, vc & GENMASK(4, 0), hi, lo); reg = n < 4 ? DATA_IDS_1 : DATA_IDS_2; lo = (n % 4) * 8; hi = lo + 5; dwc_csi_write_mask(isys, id, reg, dt & GENMASK(5, 0), hi, lo); return 0; } static int ipu7_isys_csi_ctrl_dids_config(struct ipu7_isys_csi2 *csi2, u32 id) { struct v4l2_mbus_frame_desc_entry *desc_entry = NULL; struct device *dev = &csi2->isys->adev->auxdev.dev; struct v4l2_mbus_frame_desc desc; struct v4l2_subdev *ext_sd; struct media_pad *pad; unsigned int i; int ret; pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity); if (IS_ERR(pad)) { dev_warn(dev, "can't get remote source pad of %s (%ld)\n", csi2->asd.sd.name, PTR_ERR(pad)); return PTR_ERR(pad); } ext_sd = media_entity_to_v4l2_subdev(pad->entity); if (WARN(!ext_sd, "Failed to get subdev for entity %s\n", pad->entity->name)) return -ENODEV; ret = v4l2_subdev_call(ext_sd, pad, get_frame_desc, pad->index, &desc); if (ret) return ret; if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { dev_warn(dev, "Unsupported frame descriptor type\n"); return -EINVAL; } for (i = 0; i < desc.num_entries; i++) { desc_entry = &desc.entry[i]; if (desc_entry->bus.csi2.vc < NR_OF_CSI2_VC) { ret = __dids_config(csi2, id, desc_entry->bus.csi2.vc, desc_entry->bus.csi2.dt); if (ret) return ret; } } return 0; } #define CDPHY_TIMEOUT (5000000) static int ipu7_isys_phy_ready(struct ipu7_isys *isys, u32 id) { void __iomem *isys_base = isys->pdata->base; u32 gpreg_offset = isys->pdata->ipdata->csi2.gpreg; void __iomem *gpreg = isys_base + gpreg_offset + 0x1000 * id; struct device *dev = &isys->adev->auxdev.dev; unsigned int i; u32 phy_ready; u32 reg, rext; int ret; dev_dbg(dev, "waiting phy ready...\n"); ret = readl_poll_timeout(gpreg + PHY_READY, phy_ready, phy_ready & BIT(0) && phy_ready != ~0U, 100, CDPHY_TIMEOUT); dev_dbg(dev, "phy %u ready = 0x%08x\n", id, readl(gpreg + PHY_READY)); dev_dbg(dev, "csi %u PHY_RX = 0x%08x\n", id, dwc_csi_read(isys, id, PHY_RX)); dev_dbg(dev, "csi %u PHY_STOPSTATE = 0x%08x\n", id, dwc_csi_read(isys, id, PHY_STOPSTATE)); dev_dbg(dev, "csi %u PHY_CAL = 0x%08x\n", id, dwc_csi_read(isys, id, PHY_CAL)); for (i = 0; i < 4U; i++) { reg = CORE_DIG_DLANE_0_R_HS_RX_0 + (i * 0x400U); dev_dbg(dev, "phy %u DLANE%u skewcal = 0x%04x\n", id, i, dwc_phy_read(isys, id, reg)); } dev_dbg(dev, "phy %u DDLCAL = 0x%04x\n", id, dwc_phy_read(isys, id, PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_5)); dev_dbg(dev, "phy %u TERMCAL = 0x%04x\n", id, dwc_phy_read(isys, id, PPI_R_TERMCAL_DEBUG_0)); dev_dbg(dev, "phy %u LPDCOCAL = 0x%04x\n", id, dwc_phy_read(isys, id, PPI_R_LPDCOCAL_DEBUG_RB)); dev_dbg(dev, "phy %u HSDCOCAL = 0x%04x\n", id, dwc_phy_read(isys, id, PPI_R_HSDCOCAL_DEBUG_RB)); dev_dbg(dev, "phy %u LPDCOCAL_VT = 0x%04x\n", id, dwc_phy_read(isys, id, PPI_R_LPDCOCAL_DEBUG_VT)); if (!ret) { if (id) { dev_dbg(dev, "ignore phy %u rext\n", id); return 0; } rext = dwc_phy_read(isys, id, CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_15) & 0xfU; dev_dbg(dev, "phy %u rext value = %u\n", id, rext); isys->phy_rext_cal = (rext ? rext : 5); return 0; } dev_err(dev, "wait phy ready timeout!\n"); return ret; } static int lookup_table1(u16 mbps) { int i; for (i = 0; i < ARRAY_SIZE(table1); i++) { if (mbps >= table1[i].min_mbps && mbps <= table1[i].max_mbps) return i; } return -ENXIO; } static const u16 deskew_fine_mem[] = { 0x0404, 0x040c, 0x0414, 0x041c, 0x0423, 0x0429, 0x0430, 0x043a, 0x0445, 0x044a, 0x0450, 0x045a, 0x0465, 0x0469, 0x0472, 0x047a, 0x0485, 0x0489, 0x0490, 0x049a, 0x04a4, 0x04ac, 0x04b4, 0x04bc, 0x04c4, 0x04cc, 0x04d4, 0x04dc, 0x04e4, 0x04ec, 0x04f4, 0x04fc, 0x0504, 0x050c, 0x0514, 0x051c, 0x0523, 0x0529, 0x0530, 0x053a, 0x0545, 0x054a, 0x0550, 0x055a, 0x0565, 0x0569, 0x0572, 0x057a, 0x0585, 0x0589, 0x0590, 0x059a, 0x05a4, 0x05ac, 0x05b4, 0x05bc, 0x05c4, 0x05cc, 0x05d4, 0x05dc, 0x05e4, 0x05ec, 0x05f4, 0x05fc, 0x0604, 0x060c, 0x0614, 0x061c, 0x0623, 0x0629, 0x0632, 0x063a, 0x0645, 0x064a, 0x0650, 0x065a, 0x0665, 0x0669, 0x0672, 0x067a, 0x0685, 0x0689, 0x0690, 0x069a, 0x06a4, 0x06ac, 0x06b4, 0x06bc, 0x06c4, 0x06cc, 0x06d4, 0x06dc, 0x06e4, 0x06ec, 0x06f4, 0x06fc, 0x0704, 0x070c, 0x0714, 0x071c, 0x0723, 0x072a, 0x0730, 0x073a, 0x0745, 0x074a, 0x0750, 0x075a, 0x0765, 0x0769, 0x0772, 0x077a, 0x0785, 0x0789, 0x0790, 0x079a, 0x07a4, 0x07ac, 0x07b4, 0x07bc, 0x07c4, 0x07cc, 0x07d4, 0x07dc, 0x07e4, 0x07ec, 0x07f4, 0x07fc, }; static void ipu7_isys_dphy_config(struct ipu7_isys *isys, u8 id, u8 lanes, bool aggregation, u64 mbps) { u16 hsrxval0 = 0; u16 hsrxval1 = 0; u16 hsrxval2 = 0; int index; u16 reg; u16 val; u32 i; dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_7, 0, 0, 9); if (mbps > 1500) dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_7, 40, 0, 7); else dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_7, 104, 0, 7); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_8, 80, 0, 7); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_0, 191, 0, 9); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_7, 34, 7, 12); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_1, 38, 8, 15); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 4, 12, 15); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 2, 10, 11); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 1, 8, 8); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 38, 0, 7); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_2, 1, 9, 9); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_4, 10, 0, 9); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_6, 20, 0, 9); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_7, 19, 0, 6); for (i = 0; i < ARRAY_SIZE(table0); i++) { if (mbps >= table0[i].min_mbps && mbps <= table0[i].max_mbps) { dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_3, table0[i].ddlcal_counter_ref, 0, 9); break; } } index = lookup_table1(mbps); if (index >= 0) { dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_1, table1[index].phase_bound, 0, 7); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_5, table1[index].ddlcal_dll_fbk, 4, 9); dwc_phy_write_mask(isys, id, PPI_RW_DDLCAL_CFG_5, table1[index].ddlcal_ddl_coarse_bank, 0, 3); reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_8; val = table1[index].oa_lanex_hsrx_cdphy_sel_fast; for (i = 0; i < lanes + 1; i++) dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 12, 12); } reg = CORE_DIG_DLANE_0_RW_LP_0; for (i = 0; i < lanes; i++) dwc_phy_write_mask(isys, id, reg + (i * 0x400), 6, 8, 11); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_2, 0, 0, 0); if (!is_ipu7(isys->adev->isp->hw_ver) || id == PORT_B || id == PORT_C) { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2, 1, 0, 0); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2, 0, 0, 0); } else { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2, 0, 0, 0); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2, 1, 0, 0); } if (lanes == 4 && is_ipu7(isys->adev->isp->hw_ver)) { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_2, 0, 0, 0); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_2, 0, 0, 0); } dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_6, 1, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_6, 1, 3, 5); reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_12; val = (mbps > 1500) ? 0 : 1; for (i = 0; i < lanes + 1; i++) { dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 1, 1); dwc_phy_write_mask(isys, id, reg + (i * 0x400), !val, 3, 3); } reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_13; val = (mbps > 1500) ? 0 : 1; for (i = 0; i < lanes + 1; i++) { dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 1, 1); dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 3, 3); } if (!is_ipu7(isys->adev->isp->hw_ver) || id == PORT_B || id == PORT_C) reg = CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9; else reg = CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9; for (i = 0; i < ARRAY_SIZE(table6); i++) { if (mbps >= table6[i].min_mbps && mbps <= table6[i].max_mbps) { dwc_phy_write_mask(isys, id, reg, table6[i].oa_lane_hsrx_hs_clk_div, 5, 7); break; } } if (aggregation) { dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_0, 1, 1, 1); reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_15; dwc_phy_write_mask(isys, id, reg, 3, 3, 4); val = (id == PORT_A) ? 3 : 0; reg = CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_15; dwc_phy_write_mask(isys, id, reg, val, 3, 4); reg = CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_15; dwc_phy_write_mask(isys, id, reg, 3, 3, 4); } dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_HS_RX_0, 28, 0, 7); dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_HS_RX_7, 6, 0, 7); reg = CORE_DIG_DLANE_0_RW_HS_RX_0; for (i = 0; i < ARRAY_SIZE(table2); i++) { if (mbps >= table2[i].min_mbps && mbps <= table2[i].max_mbps) { u8 j; for (j = 0; j < lanes; j++) dwc_phy_write_mask(isys, id, reg + (j * 0x400), table2[i].i_thssettle, 8, 15); break; } } /* deskew */ for (i = 0; i < lanes; i++) { reg = CORE_DIG_DLANE_0_RW_CFG_1; dwc_phy_write_mask(isys, id, reg + (i * 0x400), ((mbps > 1500) ? 0x1 : 0x2), 2, 3); reg = CORE_DIG_DLANE_0_RW_HS_RX_2; dwc_phy_write_mask(isys, id, reg + (i * 0x400), ((mbps > 2500) ? 0 : 1), 15, 15); dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 13, 13); dwc_phy_write_mask(isys, id, reg + (i * 0x400), 7, 9, 12); reg = CORE_DIG_DLANE_0_RW_LP_0; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 12, 15); reg = CORE_DIG_DLANE_0_RW_LP_2; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 0, 0); reg = CORE_DIG_DLANE_0_RW_HS_RX_1; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 16, 0, 7); reg = CORE_DIG_DLANE_0_RW_HS_RX_3; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 2, 0, 2); index = lookup_table1(mbps); if (index >= 0) { val = table1[index].fjump_deskew; dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 3, 8); } reg = CORE_DIG_DLANE_0_RW_HS_RX_4; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 150, 0, 15); reg = CORE_DIG_DLANE_0_RW_HS_RX_5; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 0, 7); dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 8, 15); reg = CORE_DIG_DLANE_0_RW_HS_RX_6; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 2, 0, 7); index = lookup_table1(mbps); if (index >= 0) { val = table1[index].min_eye_opening_deskew; dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 8, 15); } reg = CORE_DIG_DLANE_0_RW_HS_RX_7; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 13, 13); dwc_phy_write_mask(isys, id, reg + (i * 0x400), 0, 15, 15); reg = CORE_DIG_DLANE_0_RW_HS_RX_9; index = lookup_table1(mbps); if (index >= 0) { val = table1[index].ddlcal_max_phase; dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 0, 7); } } dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_LP_0, 1, 12, 15); dwc_phy_write_mask(isys, id, CORE_DIG_DLANE_CLK_RW_LP_2, 0, 0, 0); for (i = 0; i < ARRAY_SIZE(deskew_fine_mem); i++) dwc_phy_write_mask(isys, id, CORE_DIG_COMMON_RW_DESKEW_FINE_MEM, deskew_fine_mem[i], 0, 15); if (mbps > 1500) { hsrxval0 = 4; hsrxval2 = 3; } if (mbps > 2500) hsrxval1 = 2; dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9, hsrxval0, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9, hsrxval0, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9, hsrxval0, 0, 2); if (lanes == 4 && is_ipu7(isys->adev->isp->hw_ver)) { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_9, hsrxval0, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_9, hsrxval0, 0, 2); } dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9, hsrxval1, 3, 4); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9, hsrxval1, 3, 4); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9, hsrxval1, 3, 4); if (lanes == 4 && is_ipu7(isys->adev->isp->hw_ver)) { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_9, hsrxval1, 3, 4); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_9, hsrxval1, 3, 4); } dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_15, hsrxval2, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_15, hsrxval2, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_15, hsrxval2, 0, 2); if (lanes == 4 && is_ipu7(isys->adev->isp->hw_ver)) { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_15, hsrxval2, 0, 2); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_15, hsrxval2, 0, 2); } /* force and override rext */ if (isys->phy_rext_cal && id) { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_8, isys->phy_rext_cal, 0, 3); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_7, 1, 11, 11); } } static void ipu7_isys_cphy_config(struct ipu7_isys *isys, u8 id, u8 lanes, bool aggregation, u64 mbps) { u8 trios = 2; u16 coarse_target; u16 deass_thresh; u16 delay_thresh; u16 reset_thresh; u16 cap_prog = 6U; u16 reg; u16 val; u32 i; u32 r; if (is_ipu7p5(isys->adev->isp->hw_ver)) val = 0x15; else val = 0x155; if (is_ipu7(isys->adev->isp->hw_ver)) trios = 3; dwc_phy_write_mask(isys, id, CORE_DIG_RW_COMMON_7, val, 0, 9); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_7, 104, 0, 7); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_8, 16, 0, 7); reg = CORE_DIG_CLANE_0_RW_LP_0; for (i = 0; i < trios; i++) dwc_phy_write_mask(isys, id, reg + (i * 0x400), 6, 8, 11); val = (mbps > 900U) ? 1U : 0U; for (i = 0; i < trios; i++) { reg = CORE_DIG_CLANE_0_RW_HS_RX_0; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 1, 0, 0); dwc_phy_write_mask(isys, id, reg + (i * 0x400), val, 1, 1); reg = CORE_DIG_CLANE_0_RW_HS_RX_1; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 38, 0, 15); reg = CORE_DIG_CLANE_0_RW_HS_RX_5; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 38, 0, 15); reg = CORE_DIG_CLANE_0_RW_HS_RX_6; dwc_phy_write_mask(isys, id, reg + (i * 0x400), 10, 0, 15); } /* * Below 900Msps, always use the same value. * The formula is suitable for data rate 80-3500Msps. * Timebase (us) = 1, DIV = 32, TDDL (UI) = 0.5 */ if (mbps >= 80U) { coarse_target = (u16)div_u64_rem(mbps, 16, &r); if (!r) coarse_target--; } else { coarse_target = 56; } for (i = 0; i < trios; i++) { reg = CORE_DIG_CLANE_0_RW_HS_RX_2 + i * 0x400; dwc_phy_write_mask(isys, id, reg, coarse_target, 0, 15); } dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_2, 1, 0, 0); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2, 0, 0, 0); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2, 1, 0, 0); if (!is_ipu7p5(isys->adev->isp->hw_ver) && lanes == 4) { dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_2, 1, 0, 0); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_2, 0, 0, 0); } for (i = 0; i < trios; i++) { reg = CORE_DIG_RW_TRIO0_0 + i * 0x400; dwc_phy_write_mask(isys, id, reg, 1, 6, 8); dwc_phy_write_mask(isys, id, reg, 1, 3, 5); dwc_phy_write_mask(isys, id, reg, 2, 0, 2); } deass_thresh = (u16)div_u64_rem(7 * 1000 * 6, mbps * 5U, &r) + 1; if (r != 0) deass_thresh++; reg = CORE_DIG_RW_TRIO0_2; for (i = 0; i < trios; i++) dwc_phy_write_mask(isys, id, reg + 0x400 * i, deass_thresh, 0, 7); delay_thresh = ((224U - (9U * 7U)) * 1000U) / (5U * mbps) - 7U; if (delay_thresh < 1) delay_thresh = 1; reg = CORE_DIG_RW_TRIO0_1; for (i = 0; i < trios; i++) dwc_phy_write_mask(isys, id, reg + 0x400 * i, delay_thresh, 0, 15); reset_thresh = (u16)div_u64_rem(2U * 5U * mbps, 7U * 1000U, &r); if (!r) reset_thresh--; if (reset_thresh < 1) reset_thresh = 1; reg = CORE_DIG_RW_TRIO0_0; for (i = 0; i < trios; i++) dwc_phy_write_mask(isys, id, reg + 0x400 * i, reset_thresh, 9, 11); reg = CORE_DIG_CLANE_0_RW_LP_0; for (i = 0; i < trios; i++) dwc_phy_write_mask(isys, id, reg + 0x400 * i, 1, 12, 15); reg = CORE_DIG_CLANE_0_RW_LP_2; for (i = 0; i < trios; i++) dwc_phy_write_mask(isys, id, reg + 0x400 * i, 0, 0, 0); reg = CORE_DIG_CLANE_0_RW_HS_RX_0; for (i = 0; i < trios; i++) dwc_phy_write_mask(isys, id, reg + 0x400 * i, 12, 2, 6); for (i = 0; i < ARRAY_SIZE(table7); i++) { if (mbps >= table7[i].min_mbps && mbps <= table7[i].max_mbps) { cap_prog = table7[i].val; break; } } for (i = 0; i < (lanes + 1); i++) { reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9 + 0x400 * i; dwc_phy_write_mask(isys, id, reg, 4U, 0, 2); dwc_phy_write_mask(isys, id, reg, 0U, 3, 4); reg = CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_7 + 0x400 * i; dwc_phy_write_mask(isys, id, reg, cap_prog, 10, 12); } } static void ipu7_isys_phy_config(struct ipu7_isys *isys, u8 id, u8 lanes, bool aggregation) { struct device *dev = &isys->adev->auxdev.dev; u32 phy_mode; s64 link_freq; u64 mbps; if (aggregation) link_freq = ipu7_isys_csi2_get_link_freq(&isys->csi2[0]); else link_freq = ipu7_isys_csi2_get_link_freq(&isys->csi2[id]); if (link_freq < 0) { dev_warn(dev, "get link freq failed, use default mbps\n"); link_freq = 560000000; } mbps = div_u64(link_freq, 500000); dev_dbg(dev, "config phy %u with lanes %u aggregation %d mbps %lld\n", id, lanes, aggregation, mbps); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_10, 48, 0, 7); dwc_phy_write_mask(isys, id, CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_2, 1, 12, 13); dwc_phy_write_mask(isys, id, CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_0, 63, 2, 7); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_STARTUP_1_1, 563, 0, 11); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_2, 5, 0, 7); /* bypass the RCAL state (bit6) */ if (aggregation && id != PORT_A) dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_2, 0x45, 0, 7); dwc_phy_write_mask(isys, id, PPI_STARTUP_RW_COMMON_DPHY_6, 39, 0, 7); dwc_phy_write_mask(isys, id, PPI_CALIBCTRL_RW_COMMON_BG_0, 500, 0, 8); dwc_phy_write_mask(isys, id, PPI_RW_TERMCAL_CFG_0, 38, 0, 6); dwc_phy_write_mask(isys, id, PPI_RW_OFFSETCAL_CFG_0, 7, 0, 4); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_TIMEBASE, 153, 0, 9); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_NREF, 800, 0, 10); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_NREF_RANGE, 27, 0, 4); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_TWAIT_CONFIG, 47, 0, 8); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_TWAIT_CONFIG, 127, 9, 15); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_VT_CONFIG, 47, 7, 15); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_VT_CONFIG, 27, 2, 6); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_VT_CONFIG, 3, 0, 1); dwc_phy_write_mask(isys, id, PPI_RW_LPDCOCAL_COARSE_CFG, 1, 0, 1); dwc_phy_write_mask(isys, id, PPI_RW_COMMON_CFG, 3, 0, 1); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_0, 0, 10, 10); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_1, 1, 10, 10); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_1, 0, 15, 15); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_3, 3, 8, 9); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_0, 0, 15, 15); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_6, 7, 12, 14); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_7, 0, 8, 10); dwc_phy_write_mask(isys, id, CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_5, 0, 8, 8); if (aggregation) phy_mode = isys->csi2[0].phy_mode; else phy_mode = isys->csi2[id].phy_mode; if (phy_mode == PHY_MODE_DPHY) { ipu7_isys_dphy_config(isys, id, lanes, aggregation, mbps); } else if (phy_mode == PHY_MODE_CPHY) { ipu7_isys_cphy_config(isys, id, lanes, aggregation, mbps); } else { dev_err(dev, "unsupported phy mode %d!\n", isys->csi2[id].phy_mode); } } int ipu7_isys_csi_phy_powerup(struct ipu7_isys_csi2 *csi2) { struct ipu7_isys *isys = csi2->isys; u32 lanes = csi2->nlanes; bool aggregation = false; u32 id = csi2->port; int ret; /* lanes remapping for aggregation (port AB) mode */ if (!is_ipu7(isys->adev->isp->hw_ver) && lanes > 2 && id == PORT_A) { aggregation = true; lanes = 2; } ipu7_isys_csi_phy_reset(isys, id); gpreg_write(isys, id, PHY_CLK_LANE_CONTROL, 0x1); gpreg_write(isys, id, PHY_CLK_LANE_FORCE_CONTROL, 0x2); gpreg_write(isys, id, PHY_LANE_CONTROL_EN, (1U << lanes) - 1U); gpreg_write(isys, id, PHY_LANE_FORCE_CONTROL, 0xf); gpreg_write(isys, id, PHY_MODE, csi2->phy_mode); /* config PORT_B if aggregation mode */ if (aggregation) { ipu7_isys_csi_phy_reset(isys, PORT_B); gpreg_write(isys, PORT_B, PHY_CLK_LANE_CONTROL, 0x0); gpreg_write(isys, PORT_B, PHY_LANE_CONTROL_EN, 0x3); gpreg_write(isys, PORT_B, PHY_CLK_LANE_FORCE_CONTROL, 0x2); gpreg_write(isys, PORT_B, PHY_LANE_FORCE_CONTROL, 0xf); gpreg_write(isys, PORT_B, PHY_MODE, csi2->phy_mode); } ipu7_isys_csi_ctrl_cfg(csi2); ipu7_isys_csi_ctrl_dids_config(csi2, id); ipu7_isys_phy_config(isys, id, lanes, aggregation); gpreg_write(isys, id, PHY_RESET, 1); gpreg_write(isys, id, PHY_SHUTDOWN, 1); dwc_csi_write(isys, id, DPHY_RSTZ, 1); dwc_csi_write(isys, id, PHY_SHUTDOWNZ, 1); dwc_csi_write(isys, id, CSI2_RESETN, 1); ret = ipu7_isys_phy_ready(isys, id); if (ret < 0) return ret; gpreg_write(isys, id, PHY_LANE_FORCE_CONTROL, 0); gpreg_write(isys, id, PHY_CLK_LANE_FORCE_CONTROL, 0); /* config PORT_B if aggregation mode */ if (aggregation) { ipu7_isys_phy_config(isys, PORT_B, 2, aggregation); gpreg_write(isys, PORT_B, PHY_RESET, 1); gpreg_write(isys, PORT_B, PHY_SHUTDOWN, 1); dwc_csi_write(isys, PORT_B, DPHY_RSTZ, 1); dwc_csi_write(isys, PORT_B, PHY_SHUTDOWNZ, 1); dwc_csi_write(isys, PORT_B, CSI2_RESETN, 1); ret = ipu7_isys_phy_ready(isys, PORT_B); if (ret < 0) return ret; gpreg_write(isys, PORT_B, PHY_LANE_FORCE_CONTROL, 0); gpreg_write(isys, PORT_B, PHY_CLK_LANE_FORCE_CONTROL, 0); } return 0; } void ipu7_isys_csi_phy_powerdown(struct ipu7_isys_csi2 *csi2) { struct ipu7_isys *isys = csi2->isys; ipu7_isys_csi_phy_reset(isys, csi2->port); if (!is_ipu7(isys->adev->isp->hw_ver) && csi2->nlanes > 2U && csi2->port == PORT_A) ipu7_isys_csi_phy_reset(isys, PORT_B); } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-csi-phy.h000066400000000000000000000005461503071307200277520ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_ISYS_CSI_PHY_H #define IPU7_ISYS_CSI_PHY_H struct ipu7_isys; #define PHY_MODE_DPHY 0U #define PHY_MODE_CPHY 1U int ipu7_isys_csi_phy_powerup(struct ipu7_isys_csi2 *csi2); void ipu7_isys_csi_phy_powerdown(struct ipu7_isys_csi2 *csi2); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-csi2-regs.h000066400000000000000000001474371503071307200302070ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020 - 2025 Intel Corporation */ #ifndef IPU7_ISYS_CSI2_REG_H #define IPU7_ISYS_CSI2_REG_H /* IS main regs base */ #define IS_MAIN_BASE (0x240000U) #define IS_MAIN_S2B_BASE (IS_MAIN_BASE + 0x22000U) #define IS_MAIN_B2O_BASE (IS_MAIN_BASE + 0x26000U) #define IS_MAIN_ISD_M0_BASE (IS_MAIN_BASE + 0x2b000U) #define IS_MAIN_ISD_M1_BASE (IS_MAIN_BASE + 0x2b100U) #define IS_MAIN_ISD_INT_BASE (IS_MAIN_BASE + 0x2b200U) #define IS_MAIN_GDA_BASE (IS_MAIN_BASE + 0x32000U) #define IS_MAIN_GPREGS_MAIN_BASE (IS_MAIN_BASE + 0x32500U) #define IS_MAIN_IRQ_CTRL_BASE (IS_MAIN_BASE + 0x32700U) #define IS_MAIN_PWM_CTRL_BASE (IS_MAIN_BASE + 0x32b00U) #define S2B_IRQ_COMMON_0_CTL_STATUS (IS_MAIN_S2B_BASE + 0x1cU) #define S2B_IRQ_COMMON_0_CTL_CLEAR (IS_MAIN_S2B_BASE + 0x20U) #define S2B_IRQ_COMMON_0_CTL_ENABLE (IS_MAIN_S2B_BASE + 0x24U) #define S2B_IID_IRQ_CTL_STATUS(iid) (IS_MAIN_S2B_BASE + 0x94U + \ 0x100U * (iid)) #define B2O_IRQ_COMMON_0_CTL_STATUS (IS_MAIN_B2O_BASE + 0x30U) #define B2O_IRQ_COMMON_0_CTL_CLEAR (IS_MAIN_B2O_BASE + 0x34U) #define B2O_IRQ_COMMON_0_CTL_ENABLE (IS_MAIN_B2O_BASE + 0x38U) #define B2O_IID_IRQ_CTL_STATUS(oid) (IS_MAIN_B2O_BASE + 0x3dcU + \ 0x200U * (oid)) #define ISD_M0_IRQ_CTL_STATUS (IS_MAIN_ISD_M0_BASE + 0x1cU) #define ISD_M0_IRQ_CTL_CLEAR (IS_MAIN_ISD_M0_BASE + 0x20U) #define ISD_M0_IRQ_CTL_ENABLE (IS_MAIN_ISD_M0_BASE + 0x24U) #define ISD_M1_IRQ_CTL_STATUS (IS_MAIN_ISD_M1_BASE + 0x1cU) #define ISD_M1_IRQ_CTL_CLEAR (IS_MAIN_ISD_M1_BASE + 0x20U) #define ISD_M1_IRQ_CTL_ENABLE (IS_MAIN_ISD_M1_BASE + 0x24U) #define ISD_INT_IRQ_CTL_STATUS (IS_MAIN_ISD_INT_BASE + 0x1cU) #define ISD_INT_IRQ_CTL_CLEAR (IS_MAIN_ISD_INT_BASE + 0x20U) #define ISD_INT_IRQ_CTL_ENABLE (IS_MAIN_ISD_INT_BASE + 0x24U) #define GDA_IRQ_CTL_STATUS (IS_MAIN_GDA_BASE + 0x1cU) #define GDA_IRQ_CTL_CLEAR (IS_MAIN_GDA_BASE + 0x20U) #define GDA_IRQ_CTL_ENABLE (IS_MAIN_GDA_BASE + 0x24U) #define IS_MAIN_IRQ_CTL_EDGE IS_MAIN_IRQ_CTRL_BASE #define IS_MAIN_IRQ_CTL_MASK (IS_MAIN_IRQ_CTRL_BASE + 0x4U) #define IS_MAIN_IRQ_CTL_STATUS (IS_MAIN_IRQ_CTRL_BASE + 0x8U) #define IS_MAIN_IRQ_CTL_CLEAR (IS_MAIN_IRQ_CTRL_BASE + 0xcU) #define IS_MAIN_IRQ_CTL_ENABLE (IS_MAIN_IRQ_CTRL_BASE + 0x10U) #define IS_MAIN_IRQ_CTL_LEVEL_NOT_PULSE (IS_MAIN_IRQ_CTRL_BASE + 0x14U) /* IS IO regs base */ #define IS_PHY_NUM (4U) #define IS_IO_BASE (0x280000U) /* dwc csi cdphy registers */ #define IS_IO_CDPHY_BASE(i) (IS_IO_BASE + 0x10000U * (i)) #define PPI_STARTUP_RW_COMMON_DPHY_0 0x1800U #define PPI_STARTUP_RW_COMMON_DPHY_1 0x1802U #define PPI_STARTUP_RW_COMMON_DPHY_2 0x1804U #define PPI_STARTUP_RW_COMMON_DPHY_3 0x1806U #define PPI_STARTUP_RW_COMMON_DPHY_4 0x1808U #define PPI_STARTUP_RW_COMMON_DPHY_5 0x180aU #define PPI_STARTUP_RW_COMMON_DPHY_6 0x180cU #define PPI_STARTUP_RW_COMMON_DPHY_7 0x180eU #define PPI_STARTUP_RW_COMMON_DPHY_8 0x1810U #define PPI_STARTUP_RW_COMMON_DPHY_9 0x1812U #define PPI_STARTUP_RW_COMMON_DPHY_A 0x1814U #define PPI_STARTUP_RW_COMMON_DPHY_10 0x1820U #define PPI_STARTUP_RW_COMMON_STARTUP_1_1 0x1822U #define PPI_STARTUP_RW_COMMON_STARTUP_1_2 0x1824U #define PPI_CALIBCTRL_RW_COMMON_CALIBCTRL_2_0 0x1840U #define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_1 0x1842U #define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_2 0x1844U #define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_3 0x1846U #define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_4 0x1848U #define PPI_CALIBCTRL_R_COMMON_CALIBCTRL_2_5 0x184aU #define PPI_CALIBCTRL_RW_COMMON_BG_0 0x184cU #define PPI_CALIBCTRL_RW_COMMON_CALIBCTRL_2_7 0x184eU #define PPI_CALIBCTRL_RW_ADC_CFG_0 0x1850U #define PPI_CALIBCTRL_RW_ADC_CFG_1 0x1852U #define PPI_CALIBCTRL_R_ADC_DEBUG 0x1854U #define PPI_RW_LPDCOCAL_TOP_OVERRIDE 0x1c00U #define PPI_RW_LPDCOCAL_TIMEBASE 0x1c02U #define PPI_RW_LPDCOCAL_NREF 0x1c04U #define PPI_RW_LPDCOCAL_NREF_RANGE 0x1c06U #define PPI_RW_LPDCOCAL_NREF_TRIGGER_MAN 0x1c08U #define PPI_RW_LPDCOCAL_TWAIT_CONFIG 0x1c0aU #define PPI_RW_LPDCOCAL_VT_CONFIG 0x1c0cU #define PPI_R_LPDCOCAL_DEBUG_RB 0x1c0eU #define PPI_RW_LPDCOCAL_COARSE_CFG 0x1c10U #define PPI_R_LPDCOCAL_DEBUG_COARSE_RB 0x1c12U #define PPI_R_LPDCOCAL_DEBUG_COARSE_MEAS_0_RB 0x1c14U #define PPI_R_LPDCOCAL_DEBUG_COARSE_MEAS_1_RB 0x1c16U #define PPI_R_LPDCOCAL_DEBUG_COARSE_FWORD_RB 0x1c18U #define PPI_R_LPDCOCAL_DEBUG_MEASURE_CURR_ERROR 0x1c1aU #define PPI_R_LPDCOCAL_DEBUG_MEASURE_LAST_ERROR 0x1c1cU #define PPI_R_LPDCOCAL_DEBUG_VT 0x1c1eU #define PPI_RW_LB_TIMEBASE_CONFIG 0x1c20U #define PPI_RW_LB_STARTCMU_CONFIG 0x1c22U #define PPI_R_LBPULSE_COUNTER_RB 0x1c24U #define PPI_R_LB_START_CMU_RB 0x1c26U #define PPI_RW_LB_DPHY_BURST_START 0x1c28U #define PPI_RW_LB_CPHY_BURST_START 0x1c2aU #define PPI_RW_DDLCAL_CFG_0 0x1c40U #define PPI_RW_DDLCAL_CFG_1 0x1c42U #define PPI_RW_DDLCAL_CFG_2 0x1c44U #define PPI_RW_DDLCAL_CFG_3 0x1c46U #define PPI_RW_DDLCAL_CFG_4 0x1c48U #define PPI_RW_DDLCAL_CFG_5 0x1c4aU #define PPI_RW_DDLCAL_CFG_6 0x1c4cU #define PPI_RW_DDLCAL_CFG_7 0x1c4eU #define PPI_R_DDLCAL_DEBUG_0 0x1c50U #define PPI_R_DDLCAL_DEBUG_1 0x1c52U #define PPI_RW_PARITY_TEST 0x1c60U #define PPI_RW_STARTUP_OVR_0 0x1c62U #define PPI_RW_STARTUP_STATE_OVR_1 0x1c64U #define PPI_RW_DTB_SELECTOR 0x1c66U #define PPI_RW_DPHY_CLK_SPARE 0x1c6aU #define PPI_RW_COMMON_CFG 0x1c6cU #define PPI_RW_TERMCAL_CFG_0 0x1c80U #define PPI_R_TERMCAL_DEBUG_0 0x1c82U #define PPI_RW_TERMCAL_CTRL_0 0x1c84U #define PPI_RW_OFFSETCAL_CFG_0 0x1ca0U #define PPI_R_OFFSETCAL_DEBUG_LANE0 0x1ca2U #define PPI_R_OFFSETCAL_DEBUG_LANE1 0x1ca4U #define PPI_R_OFFSETCAL_DEBUG_LANE2 0x1ca6U #define PPI_R_OFFSETCAL_DEBUG_LANE3 0x1ca8U #define PPI_R_OFFSETCAL_DEBUG_LANE4 0x1caaU #define PPI_RW_HSDCOCAL_CFG_O 0x1d00U #define PPI_RW_HSDCOCAL_CFG_1 0x1d02U #define PPI_RW_HSDCOCAL_CFG_2 0x1d04U #define PPI_RW_HSDCOCAL_CFG_3 0x1d06U #define PPI_RW_HSDCOCAL_CFG_4 0x1d08U #define PPI_RW_HSDCOCAL_CFG_5 0x1d0aU #define PPI_RW_HSDCOCAL_CFG_6 0x1d0cU #define PPI_RW_HSDCOCAL_CFG_7 0x1d0eU #define PPI_RW_HSDCOCAL_CFG_8 0x1d10U #define PPI_R_HSDCOCAL_DEBUG_RB 0x1d12U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_0 0x2000U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_1 0x2002U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_2 0x2004U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_3 0x2006U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_4 0x2008U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_5 0x200aU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_6 0x200cU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_7 0x200eU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE0_OVR_0_8 0x2010U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_9 0x2012U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_10 0x2014U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_11 0x2016U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_12 0x2018U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_13 0x201aU #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_14 0x201cU #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE0_OVR_0_15 0x201eU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_0 0x2020U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_1 0x2022U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_2 0x2024U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_3 0x2026U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_4 0x2028U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_5 0x202aU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_6 0x202cU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_7 0x202eU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_8 0x2030U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_1_9 0x2032U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_10 0x2034U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_11 0x2036U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_12 0x2038U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_13 0x203aU #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_14 0x203cU #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_1_15 0x203eU #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_0 0x2040U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_1 0x2042U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_2 0x2044U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_3 0x2046U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_4 0x2048U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_5 0x204aU #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_6 0x204cU #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_7 0x204eU #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_8 0x2050U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_9 0x2052U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_10 0x2054U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_11 0x2056U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_12 0x2058U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_13 0x205aU #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_14 0x205cU #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_15 0x205eU #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_0 0x2060U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_1 0x2062U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_2 0x2064U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_3 0x2066U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_4 0x2068U #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_5 0x206aU #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_6 0x206cU #define CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_3_7 0x206eU #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_8 0x2070U #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_9 0x2072U #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_10 0x2074U #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_11 0x2076U #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_12 0x2078U #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_13 0x207aU #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_14 0x207cU #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_3_15 0x207eU #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_0 0x2080U #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_1 0x2082U #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_2 0x2084U #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_3 0x2086U #define CORE_DIG_IOCTRL_R_AFE_LANE0_CTRL_4_4 0x2088U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_5_0 0x20a0U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_5_1 0x20a2U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_5_2 0x20a4U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE0_OVR_5_3 0x20a6U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE0_OVR_5_4 0x20a8U #define CORE_DIG_RW_TRIO0_0 0x2100U #define CORE_DIG_RW_TRIO0_1 0x2102U #define CORE_DIG_RW_TRIO0_2 0x2104U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_0 0x2400U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_1 0x2402U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_2 0x2404U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_3 0x2406U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_4 0x2408U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_5 0x240aU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_6 0x240cU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_7 0x240eU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE1_OVR_0_8 0x2410U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_9 0x2412U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_10 0x2414U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_11 0x2416U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_12 0x2418U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_13 0x241aU #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_14 0x241cU #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE1_OVR_0_15 0x241eU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_0 0x2420U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_1 0x2422U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_2 0x2424U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_3 0x2426U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_4 0x2428U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_5 0x242aU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_6 0x242cU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_7 0x242eU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_8 0x2430U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_1_9 0x2432U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_10 0x2434U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_11 0x2436U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_12 0x2438U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_13 0x243aU #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_14 0x243cU #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_1_15 0x243eU #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_0 0x2440U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_1 0x2442U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_2 0x2444U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_3 0x2446U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_4 0x2448U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_5 0x244aU #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_6 0x244cU #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_7 0x244eU #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_8 0x2450U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_9 0x2452U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_10 0x2454U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_11 0x2456U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_12 0x2458U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_13 0x245aU #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_14 0x245cU #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_15 0x245eU #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_0 0x2460U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_1 0x2462U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_2 0x2464U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_3 0x2466U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_4 0x2468U #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_5 0x246aU #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_6 0x246cU #define CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_3_7 0x246eU #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_8 0x2470U #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_9 0x2472U #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_10 0x2474U #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_11 0x2476U #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_12 0x2478U #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_13 0x247aU #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_14 0x247cU #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_3_15 0x247eU #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_0 0x2480U #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_1 0x2482U #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_2 0x2484U #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_3 0x2486U #define CORE_DIG_IOCTRL_R_AFE_LANE1_CTRL_4_4 0x2488U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_5_0 0x24a0U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_5_1 0x24a2U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_5_2 0x24a4U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE1_OVR_5_3 0x24a6U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE1_OVR_5_4 0x24a8U #define CORE_DIG_RW_TRIO1_0 0x2500U #define CORE_DIG_RW_TRIO1_1 0x2502U #define CORE_DIG_RW_TRIO1_2 0x2504U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_0 0x2800U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_1 0x2802U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_2 0x2804U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_3 0x2806U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_4 0x2808U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_5 0x280aU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_6 0x280cU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_7 0x280eU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE2_OVR_0_8 0x2810U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_9 0x2812U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_10 0x2814U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_11 0x2816U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_12 0x2818U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_13 0x281aU #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_14 0x281cU #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE2_OVR_0_15 0x281eU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_0 0x2820U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_1 0x2822U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_2 0x2824U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_3 0x2826U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_4 0x2828U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_5 0x282aU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_6 0x282cU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_7 0x282eU #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_8 0x2830U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_1_9 0x2832U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_10 0x2834U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_11 0x2836U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_12 0x2838U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_13 0x283aU #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_14 0x283cU #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_1_15 0x283eU #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_0 0x2840U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_1 0x2842U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_2 0x2844U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_3 0x2846U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_4 0x2848U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_5 0x284aU #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_6 0x284cU #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_7 0x284eU #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_8 0x2850U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_9 0x2852U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_10 0x2854U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_11 0x2856U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_12 0x2858U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_13 0x285aU #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_14 0x285cU #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_15 0x285eU #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_0 0x2860U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_1 0x2862U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_2 0x2864U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_3 0x2866U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_4 0x2868U #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_5 0x286aU #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_6 0x286cU #define CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_3_7 0x286eU #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_8 0x2870U #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_9 0x2872U #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_10 0x2874U #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_11 0x2876U #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_12 0x2878U #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_13 0x287aU #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_14 0x287cU #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_3_15 0x287eU #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_0 0x2880U #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_1 0x2882U #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_2 0x2884U #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_3 0x2886U #define CORE_DIG_IOCTRL_R_AFE_LANE2_CTRL_4_4 0x2888U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_5_0 0x28a0U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_5_1 0x28a2U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_5_2 0x28a4U #define CORE_DIG_IOCTRL_RW_CPHY_PPI_LANE2_OVR_5_3 0x28a6U #define CORE_DIG_IOCTRL_R_CPHY_PPI_LANE2_OVR_5_4 0x28a8U #define CORE_DIG_RW_TRIO2_0 0x2900U #define CORE_DIG_RW_TRIO2_1 0x2902U #define CORE_DIG_RW_TRIO2_2 0x2904U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_0 0x2c00U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_1 0x2c02U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_2 0x2c04U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_3 0x2c06U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_4 0x2c08U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_5 0x2c0aU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_6 0x2c0cU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_7 0x2c0eU #define CORE_DIG_IOCTRL_RW_DPHY_PPI_LANE3_OVR_0_8 0x2c10U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_9 0x2c12U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_10 0x2c14U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_11 0x2c16U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_12 0x2c18U #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_13 0x2c1aU #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_14 0x2c1cU #define CORE_DIG_IOCTRL_R_DPHY_PPI_LANE3_OVR_0_15 0x2c1eU #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_0 0x2c40U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_1 0x2c42U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_2 0x2c44U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_3 0x2c46U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_4 0x2c48U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_5 0x2c4aU #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_6 0x2c4cU #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_7 0x2c4eU #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_8 0x2c50U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_9 0x2c52U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_10 0x2c54U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_11 0x2c56U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_12 0x2c58U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_13 0x2c5aU #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_14 0x2c5cU #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_15 0x2c5eU #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_0 0x2c60U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_1 0x2c62U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_2 0x2c64U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_3 0x2c66U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_4 0x2c68U #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_5 0x2c6aU #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_6 0x2c6cU #define CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_3_7 0x2c6eU #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_8 0x2c70U #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_9 0x2c72U #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_10 0x2c74U #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_11 0x2c76U #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_12 0x2c78U #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_13 0x2c7aU #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_14 0x2c7cU #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_3_15 0x2c7eU #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_0 0x2c80U #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_1 0x2c82U #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_2 0x2c84U #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_3 0x2c86U #define CORE_DIG_IOCTRL_R_AFE_LANE3_CTRL_4_4 0x2c88U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_0 0x3040U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_1 0x3042U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_2 0x3044U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_3 0x3046U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_4 0x3048U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_5 0x304aU #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_6 0x304cU #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_7 0x304eU #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_8 0x3050U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_9 0x3052U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_10 0x3054U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_11 0x3056U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_12 0x3058U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_13 0x305aU #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_14 0x305cU #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_15 0x305eU #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_0 0x3060U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_1 0x3062U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_2 0x3064U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_3 0x3066U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_4 0x3068U #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_5 0x306aU #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_6 0x306cU #define CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_3_7 0x306eU #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_8 0x3070U #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_9 0x3072U #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_10 0x3074U #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_11 0x3076U #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_12 0x3078U #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_13 0x307aU #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_14 0x307cU #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_3_15 0x307eU #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_0 0x3080U #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_1 0x3082U #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_2 0x3084U #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_3 0x3086U #define CORE_DIG_IOCTRL_R_AFE_LANE4_CTRL_4_4 0x3088U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_CLK_OVR_0_0 0x3400U #define CORE_DIG_IOCTRL_RW_DPHY_PPI_CLK_OVR_0_1 0x3402U #define CORE_DIG_IOCTRL_R_DPHY_PPI_CLK_OVR_0_2 0x3404U #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_0 0x3800U #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_1 0x3802U #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_2 0x3804U #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_3 0x3806U #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_4 0x3808U #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_5 0x380aU #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_6 0x380cU #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_7 0x380eU #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_8 0x3810U #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_9 0x3812U #define CORE_DIG_IOCTRL_RW_COMMON_PPI_OVR_0_10 0x3814U #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_11 0x3816U #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_12 0x3818U #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_13 0x381aU #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_14 0x381cU #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_0_15 0x381eU #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_0 0x3820U #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_1 0x3822U #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_2 0x3824U #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_3 0x3826U #define CORE_DIG_IOCTRL_R_COMMON_PPI_OVR_1_4 0x3828U #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_0 0x3840U #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_1 0x3842U #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_2 0x3844U #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_3 0x3846U #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_4 0x3848U #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_5 0x384aU #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_6 0x384cU #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_7 0x384eU #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_8 0x3850U #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_9 0x3852U #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_10 0x3854U #define CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_11 0x3856U #define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_12 0x3858U #define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_13 0x385aU #define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_14 0x385cU #define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_2_15 0x385eU #define CORE_DIG_IOCTRL_R_AFE_CB_CTRL_3_0 0x3860U #define CORE_DIG_RW_COMMON_0 0x3880U #define CORE_DIG_RW_COMMON_1 0x3882U #define CORE_DIG_RW_COMMON_2 0x3884U #define CORE_DIG_RW_COMMON_3 0x3886U #define CORE_DIG_RW_COMMON_4 0x3888U #define CORE_DIG_RW_COMMON_5 0x388aU #define CORE_DIG_RW_COMMON_6 0x388cU #define CORE_DIG_RW_COMMON_7 0x388eU #define CORE_DIG_RW_COMMON_8 0x3890U #define CORE_DIG_RW_COMMON_9 0x3892U #define CORE_DIG_RW_COMMON_10 0x3894U #define CORE_DIG_RW_COMMON_11 0x3896U #define CORE_DIG_RW_COMMON_12 0x3898U #define CORE_DIG_RW_COMMON_13 0x389aU #define CORE_DIG_RW_COMMON_14 0x389cU #define CORE_DIG_RW_COMMON_15 0x389eU #define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_0 0x39e0U #define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_1 0x39e2U #define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_2 0x39e4U #define CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_3 0x39e6U #define CORE_DIG_COMMON_RW_DESKEW_FINE_MEM 0x3fe0U #define CORE_DIG_COMMON_R_DESKEW_FINE_MEM 0x3fe2U #define PPI_RW_DPHY_LANE0_LBERT_0 0x4000U #define PPI_RW_DPHY_LANE0_LBERT_1 0x4002U #define PPI_R_DPHY_LANE0_LBERT_0 0x4004U #define PPI_R_DPHY_LANE0_LBERT_1 0x4006U #define PPI_RW_DPHY_LANE0_SPARE 0x4008U #define PPI_RW_DPHY_LANE1_LBERT_0 0x4400U #define PPI_RW_DPHY_LANE1_LBERT_1 0x4402U #define PPI_R_DPHY_LANE1_LBERT_0 0x4404U #define PPI_R_DPHY_LANE1_LBERT_1 0x4406U #define PPI_RW_DPHY_LANE1_SPARE 0x4408U #define PPI_RW_DPHY_LANE2_LBERT_0 0x4800U #define PPI_RW_DPHY_LANE2_LBERT_1 0x4802U #define PPI_R_DPHY_LANE2_LBERT_0 0x4804U #define PPI_R_DPHY_LANE2_LBERT_1 0x4806U #define PPI_RW_DPHY_LANE2_SPARE 0x4808U #define PPI_RW_DPHY_LANE3_LBERT_0 0x4c00U #define PPI_RW_DPHY_LANE3_LBERT_1 0x4c02U #define PPI_R_DPHY_LANE3_LBERT_0 0x4c04U #define PPI_R_DPHY_LANE3_LBERT_1 0x4c06U #define PPI_RW_DPHY_LANE3_SPARE 0x4c08U #define CORE_DIG_DLANE_0_RW_CFG_0 0x6000U #define CORE_DIG_DLANE_0_RW_CFG_1 0x6002U #define CORE_DIG_DLANE_0_RW_CFG_2 0x6004U #define CORE_DIG_DLANE_0_RW_LP_0 0x6080U #define CORE_DIG_DLANE_0_RW_LP_1 0x6082U #define CORE_DIG_DLANE_0_RW_LP_2 0x6084U #define CORE_DIG_DLANE_0_R_LP_0 0x60a0U #define CORE_DIG_DLANE_0_R_LP_1 0x60a2U #define CORE_DIG_DLANE_0_R_HS_TX_0 0x60e0U #define CORE_DIG_DLANE_0_RW_HS_RX_0 0x6100U #define CORE_DIG_DLANE_0_RW_HS_RX_1 0x6102U #define CORE_DIG_DLANE_0_RW_HS_RX_2 0x6104U #define CORE_DIG_DLANE_0_RW_HS_RX_3 0x6106U #define CORE_DIG_DLANE_0_RW_HS_RX_4 0x6108U #define CORE_DIG_DLANE_0_RW_HS_RX_5 0x610aU #define CORE_DIG_DLANE_0_RW_HS_RX_6 0x610cU #define CORE_DIG_DLANE_0_RW_HS_RX_7 0x610eU #define CORE_DIG_DLANE_0_RW_HS_RX_8 0x6110U #define CORE_DIG_DLANE_0_RW_HS_RX_9 0x6112U #define CORE_DIG_DLANE_0_R_HS_RX_0 0x6120U #define CORE_DIG_DLANE_0_R_HS_RX_1 0x6122U #define CORE_DIG_DLANE_0_R_HS_RX_2 0x6124U #define CORE_DIG_DLANE_0_R_HS_RX_3 0x6126U #define CORE_DIG_DLANE_0_R_HS_RX_4 0x6128U #define CORE_DIG_DLANE_0_RW_HS_TX_0 0x6200U #define CORE_DIG_DLANE_0_RW_HS_TX_1 0x6202U #define CORE_DIG_DLANE_0_RW_HS_TX_2 0x6204U #define CORE_DIG_DLANE_0_RW_HS_TX_3 0x6206U #define CORE_DIG_DLANE_0_RW_HS_TX_4 0x6208U #define CORE_DIG_DLANE_0_RW_HS_TX_5 0x620aU #define CORE_DIG_DLANE_0_RW_HS_TX_6 0x620cU #define CORE_DIG_DLANE_0_RW_HS_TX_7 0x620eU #define CORE_DIG_DLANE_0_RW_HS_TX_8 0x6210U #define CORE_DIG_DLANE_0_RW_HS_TX_9 0x6212U #define CORE_DIG_DLANE_0_RW_HS_TX_10 0x6214U #define CORE_DIG_DLANE_0_RW_HS_TX_11 0x6216U #define CORE_DIG_DLANE_0_RW_HS_TX_12 0x6218U #define CORE_DIG_DLANE_1_RW_CFG_0 0x6400U #define CORE_DIG_DLANE_1_RW_CFG_1 0x6402U #define CORE_DIG_DLANE_1_RW_CFG_2 0x6404U #define CORE_DIG_DLANE_1_RW_LP_0 0x6480U #define CORE_DIG_DLANE_1_RW_LP_1 0x6482U #define CORE_DIG_DLANE_1_RW_LP_2 0x6484U #define CORE_DIG_DLANE_1_R_LP_0 0x64a0U #define CORE_DIG_DLANE_1_R_LP_1 0x64a2U #define CORE_DIG_DLANE_1_R_HS_TX_0 0x64e0U #define CORE_DIG_DLANE_1_RW_HS_RX_0 0x6500U #define CORE_DIG_DLANE_1_RW_HS_RX_1 0x6502U #define CORE_DIG_DLANE_1_RW_HS_RX_2 0x6504U #define CORE_DIG_DLANE_1_RW_HS_RX_3 0x6506U #define CORE_DIG_DLANE_1_RW_HS_RX_4 0x6508U #define CORE_DIG_DLANE_1_RW_HS_RX_5 0x650aU #define CORE_DIG_DLANE_1_RW_HS_RX_6 0x650cU #define CORE_DIG_DLANE_1_RW_HS_RX_7 0x650eU #define CORE_DIG_DLANE_1_RW_HS_RX_8 0x6510U #define CORE_DIG_DLANE_1_RW_HS_RX_9 0x6512U #define CORE_DIG_DLANE_1_R_HS_RX_0 0x6520U #define CORE_DIG_DLANE_1_R_HS_RX_1 0x6522U #define CORE_DIG_DLANE_1_R_HS_RX_2 0x6524U #define CORE_DIG_DLANE_1_R_HS_RX_3 0x6526U #define CORE_DIG_DLANE_1_R_HS_RX_4 0x6528U #define CORE_DIG_DLANE_1_RW_HS_TX_0 0x6600U #define CORE_DIG_DLANE_1_RW_HS_TX_1 0x6602U #define CORE_DIG_DLANE_1_RW_HS_TX_2 0x6604U #define CORE_DIG_DLANE_1_RW_HS_TX_3 0x6606U #define CORE_DIG_DLANE_1_RW_HS_TX_4 0x6608U #define CORE_DIG_DLANE_1_RW_HS_TX_5 0x660aU #define CORE_DIG_DLANE_1_RW_HS_TX_6 0x660cU #define CORE_DIG_DLANE_1_RW_HS_TX_7 0x660eU #define CORE_DIG_DLANE_1_RW_HS_TX_8 0x6610U #define CORE_DIG_DLANE_1_RW_HS_TX_9 0x6612U #define CORE_DIG_DLANE_1_RW_HS_TX_10 0x6614U #define CORE_DIG_DLANE_1_RW_HS_TX_11 0x6616U #define CORE_DIG_DLANE_1_RW_HS_TX_12 0x6618U #define CORE_DIG_DLANE_2_RW_CFG_0 0x6800U #define CORE_DIG_DLANE_2_RW_CFG_1 0x6802U #define CORE_DIG_DLANE_2_RW_CFG_2 0x6804U #define CORE_DIG_DLANE_2_RW_LP_0 0x6880U #define CORE_DIG_DLANE_2_RW_LP_1 0x6882U #define CORE_DIG_DLANE_2_RW_LP_2 0x6884U #define CORE_DIG_DLANE_2_R_LP_0 0x68a0U #define CORE_DIG_DLANE_2_R_LP_1 0x68a2U #define CORE_DIG_DLANE_2_R_HS_TX_0 0x68e0U #define CORE_DIG_DLANE_2_RW_HS_RX_0 0x6900U #define CORE_DIG_DLANE_2_RW_HS_RX_1 0x6902U #define CORE_DIG_DLANE_2_RW_HS_RX_2 0x6904U #define CORE_DIG_DLANE_2_RW_HS_RX_3 0x6906U #define CORE_DIG_DLANE_2_RW_HS_RX_4 0x6908U #define CORE_DIG_DLANE_2_RW_HS_RX_5 0x690aU #define CORE_DIG_DLANE_2_RW_HS_RX_6 0x690cU #define CORE_DIG_DLANE_2_RW_HS_RX_7 0x690eU #define CORE_DIG_DLANE_2_RW_HS_RX_8 0x6910U #define CORE_DIG_DLANE_2_RW_HS_RX_9 0x6912U #define CORE_DIG_DLANE_2_R_HS_RX_0 0x6920U #define CORE_DIG_DLANE_2_R_HS_RX_1 0x6922U #define CORE_DIG_DLANE_2_R_HS_RX_2 0x6924U #define CORE_DIG_DLANE_2_R_HS_RX_3 0x6926U #define CORE_DIG_DLANE_2_R_HS_RX_4 0x6928U #define CORE_DIG_DLANE_2_RW_HS_TX_0 0x6a00U #define CORE_DIG_DLANE_2_RW_HS_TX_1 0x6a02U #define CORE_DIG_DLANE_2_RW_HS_TX_2 0x6a04U #define CORE_DIG_DLANE_2_RW_HS_TX_3 0x6a06U #define CORE_DIG_DLANE_2_RW_HS_TX_4 0x6a08U #define CORE_DIG_DLANE_2_RW_HS_TX_5 0x6a0aU #define CORE_DIG_DLANE_2_RW_HS_TX_6 0x6a0cU #define CORE_DIG_DLANE_2_RW_HS_TX_7 0x6a0eU #define CORE_DIG_DLANE_2_RW_HS_TX_8 0x6a10U #define CORE_DIG_DLANE_2_RW_HS_TX_9 0x6a12U #define CORE_DIG_DLANE_2_RW_HS_TX_10 0x6a14U #define CORE_DIG_DLANE_2_RW_HS_TX_11 0x6a16U #define CORE_DIG_DLANE_2_RW_HS_TX_12 0x6a18U #define CORE_DIG_DLANE_3_RW_CFG_0 0x6c00U #define CORE_DIG_DLANE_3_RW_CFG_1 0x6c02U #define CORE_DIG_DLANE_3_RW_CFG_2 0x6c04U #define CORE_DIG_DLANE_3_RW_LP_0 0x6c80U #define CORE_DIG_DLANE_3_RW_LP_1 0x6c82U #define CORE_DIG_DLANE_3_RW_LP_2 0x6c84U #define CORE_DIG_DLANE_3_R_LP_0 0x6ca0U #define CORE_DIG_DLANE_3_R_LP_1 0x6ca2U #define CORE_DIG_DLANE_3_R_HS_TX_0 0x6ce0U #define CORE_DIG_DLANE_3_RW_HS_RX_0 0x6d00U #define CORE_DIG_DLANE_3_RW_HS_RX_1 0x6d02U #define CORE_DIG_DLANE_3_RW_HS_RX_2 0x6d04U #define CORE_DIG_DLANE_3_RW_HS_RX_3 0x6d06U #define CORE_DIG_DLANE_3_RW_HS_RX_4 0x6d08U #define CORE_DIG_DLANE_3_RW_HS_RX_5 0x6d0aU #define CORE_DIG_DLANE_3_RW_HS_RX_6 0x6d0cU #define CORE_DIG_DLANE_3_RW_HS_RX_7 0x6d0eU #define CORE_DIG_DLANE_3_RW_HS_RX_8 0x6d10U #define CORE_DIG_DLANE_3_RW_HS_RX_9 0x6d12U #define CORE_DIG_DLANE_3_R_HS_RX_0 0x6d20U #define CORE_DIG_DLANE_3_R_HS_RX_1 0x6d22U #define CORE_DIG_DLANE_3_R_HS_RX_2 0x6d24U #define CORE_DIG_DLANE_3_R_HS_RX_3 0x6d26U #define CORE_DIG_DLANE_3_R_HS_RX_4 0x6d28U #define CORE_DIG_DLANE_3_RW_HS_TX_0 0x6e00U #define CORE_DIG_DLANE_3_RW_HS_TX_1 0x6e02U #define CORE_DIG_DLANE_3_RW_HS_TX_2 0x6e04U #define CORE_DIG_DLANE_3_RW_HS_TX_3 0x6e06U #define CORE_DIG_DLANE_3_RW_HS_TX_4 0x6e08U #define CORE_DIG_DLANE_3_RW_HS_TX_5 0x6e0aU #define CORE_DIG_DLANE_3_RW_HS_TX_6 0x6e0cU #define CORE_DIG_DLANE_3_RW_HS_TX_7 0x6e0eU #define CORE_DIG_DLANE_3_RW_HS_TX_8 0x6e10U #define CORE_DIG_DLANE_3_RW_HS_TX_9 0x6e12U #define CORE_DIG_DLANE_3_RW_HS_TX_10 0x6e14U #define CORE_DIG_DLANE_3_RW_HS_TX_11 0x6e16U #define CORE_DIG_DLANE_3_RW_HS_TX_12 0x6e18U #define CORE_DIG_DLANE_CLK_RW_CFG_0 0x7000U #define CORE_DIG_DLANE_CLK_RW_CFG_1 0x7002U #define CORE_DIG_DLANE_CLK_RW_CFG_2 0x7004U #define CORE_DIG_DLANE_CLK_RW_LP_0 0x7080U #define CORE_DIG_DLANE_CLK_RW_LP_1 0x7082U #define CORE_DIG_DLANE_CLK_RW_LP_2 0x7084U #define CORE_DIG_DLANE_CLK_R_LP_0 0x70a0U #define CORE_DIG_DLANE_CLK_R_LP_1 0x70a2U #define CORE_DIG_DLANE_CLK_R_HS_TX_0 0x70e0U #define CORE_DIG_DLANE_CLK_RW_HS_RX_0 0x7100U #define CORE_DIG_DLANE_CLK_RW_HS_RX_1 0x7102U #define CORE_DIG_DLANE_CLK_RW_HS_RX_2 0x7104U #define CORE_DIG_DLANE_CLK_RW_HS_RX_3 0x7106U #define CORE_DIG_DLANE_CLK_RW_HS_RX_4 0x7108U #define CORE_DIG_DLANE_CLK_RW_HS_RX_5 0x710aU #define CORE_DIG_DLANE_CLK_RW_HS_RX_6 0x710cU #define CORE_DIG_DLANE_CLK_RW_HS_RX_7 0x710eU #define CORE_DIG_DLANE_CLK_RW_HS_RX_8 0x7110U #define CORE_DIG_DLANE_CLK_RW_HS_RX_9 0x7112U #define CORE_DIG_DLANE_CLK_R_HS_RX_0 0x7120U #define CORE_DIG_DLANE_CLK_R_HS_RX_1 0x7122U #define CORE_DIG_DLANE_CLK_R_HS_RX_2 0x7124U #define CORE_DIG_DLANE_CLK_R_HS_RX_3 0x7126U #define CORE_DIG_DLANE_CLK_R_HS_RX_4 0x7128U #define CORE_DIG_DLANE_CLK_RW_HS_TX_0 0x7200U #define CORE_DIG_DLANE_CLK_RW_HS_TX_1 0x7202U #define CORE_DIG_DLANE_CLK_RW_HS_TX_2 0x7204U #define CORE_DIG_DLANE_CLK_RW_HS_TX_3 0x7206U #define CORE_DIG_DLANE_CLK_RW_HS_TX_4 0x7208U #define CORE_DIG_DLANE_CLK_RW_HS_TX_5 0x720aU #define CORE_DIG_DLANE_CLK_RW_HS_TX_6 0x720cU #define CORE_DIG_DLANE_CLK_RW_HS_TX_7 0x720eU #define CORE_DIG_DLANE_CLK_RW_HS_TX_8 0x7210U #define CORE_DIG_DLANE_CLK_RW_HS_TX_9 0x7212U #define CORE_DIG_DLANE_CLK_RW_HS_TX_10 0x7214U #define CORE_DIG_DLANE_CLK_RW_HS_TX_11 0x7216U #define CORE_DIG_DLANE_CLK_RW_HS_TX_12 0x7218U #define PPI_RW_CPHY_TRIO0_LBERT_0 0x8000U #define PPI_RW_CPHY_TRIO0_LBERT_1 0x8002U #define PPI_R_CPHY_TRIO0_LBERT_0 0x8004U #define PPI_R_CPHY_TRIO0_LBERT_1 0x8006U #define PPI_RW_CPHY_TRIO0_SPARE 0x8008U #define PPI_RW_CPHY_TRIO1_LBERT_0 0x8400U #define PPI_RW_CPHY_TRIO1_LBERT_1 0x8402U #define PPI_R_CPHY_TRIO1_LBERT_0 0x8404U #define PPI_R_CPHY_TRIO1_LBERT_1 0x8406U #define PPI_RW_CPHY_TRIO1_SPARE 0x8408U #define PPI_RW_CPHY_TRIO2_LBERT_0 0x8800U #define PPI_RW_CPHY_TRIO2_LBERT_1 0x8802U #define PPI_R_CPHY_TRIO2_LBERT_0 0x8804U #define PPI_R_CPHY_TRIO2_LBERT_1 0x8806U #define PPI_RW_CPHY_TRIO2_SPARE 0x8808U #define CORE_DIG_CLANE_0_RW_CFG_0 0xa000U #define CORE_DIG_CLANE_0_RW_CFG_2 0xa004U #define CORE_DIG_CLANE_0_RW_LP_0 0xa080U #define CORE_DIG_CLANE_0_RW_LP_1 0xa082U #define CORE_DIG_CLANE_0_RW_LP_2 0xa084U #define CORE_DIG_CLANE_0_R_LP_0 0xa0a0U #define CORE_DIG_CLANE_0_R_LP_1 0xa0a2U #define CORE_DIG_CLANE_0_RW_HS_RX_0 0xa100U #define CORE_DIG_CLANE_0_RW_HS_RX_1 0xa102U #define CORE_DIG_CLANE_0_RW_HS_RX_2 0xa104U #define CORE_DIG_CLANE_0_RW_HS_RX_3 0xa106U #define CORE_DIG_CLANE_0_RW_HS_RX_4 0xa108U #define CORE_DIG_CLANE_0_RW_HS_RX_5 0xa10aU #define CORE_DIG_CLANE_0_RW_HS_RX_6 0xa10cU #define CORE_DIG_CLANE_0_R_RX_0 0xa120U #define CORE_DIG_CLANE_0_R_RX_1 0xa122U #define CORE_DIG_CLANE_0_R_TX_0 0xa124U #define CORE_DIG_CLANE_0_R_RX_2 0xa126U #define CORE_DIG_CLANE_0_R_RX_3 0xa128U #define CORE_DIG_CLANE_0_RW_HS_TX_0 0xa200U #define CORE_DIG_CLANE_0_RW_HS_TX_1 0xa202U #define CORE_DIG_CLANE_0_RW_HS_TX_2 0xa204U #define CORE_DIG_CLANE_0_RW_HS_TX_3 0xa206U #define CORE_DIG_CLANE_0_RW_HS_TX_4 0xa208U #define CORE_DIG_CLANE_0_RW_HS_TX_5 0xa20aU #define CORE_DIG_CLANE_0_RW_HS_TX_6 0xa20cU #define CORE_DIG_CLANE_0_RW_HS_TX_7 0xa20eU #define CORE_DIG_CLANE_0_RW_HS_TX_8 0xa210U #define CORE_DIG_CLANE_0_RW_HS_TX_9 0xa212U #define CORE_DIG_CLANE_0_RW_HS_TX_10 0xa214U #define CORE_DIG_CLANE_0_RW_HS_TX_11 0xa216U #define CORE_DIG_CLANE_0_RW_HS_TX_12 0xa218U #define CORE_DIG_CLANE_0_RW_HS_TX_13 0xa21aU #define CORE_DIG_CLANE_1_RW_CFG_0 0xa400U #define CORE_DIG_CLANE_1_RW_CFG_2 0xa404U #define CORE_DIG_CLANE_1_RW_LP_0 0xa480U #define CORE_DIG_CLANE_1_RW_LP_1 0xa482U #define CORE_DIG_CLANE_1_RW_LP_2 0xa484U #define CORE_DIG_CLANE_1_R_LP_0 0xa4a0U #define CORE_DIG_CLANE_1_R_LP_1 0xa4a2U #define CORE_DIG_CLANE_1_RW_HS_RX_0 0xa500U #define CORE_DIG_CLANE_1_RW_HS_RX_1 0xa502U #define CORE_DIG_CLANE_1_RW_HS_RX_2 0xa504U #define CORE_DIG_CLANE_1_RW_HS_RX_3 0xa506U #define CORE_DIG_CLANE_1_RW_HS_RX_4 0xa508U #define CORE_DIG_CLANE_1_RW_HS_RX_5 0xa50aU #define CORE_DIG_CLANE_1_RW_HS_RX_6 0xa50cU #define CORE_DIG_CLANE_1_R_RX_0 0xa520U #define CORE_DIG_CLANE_1_R_RX_1 0xa522U #define CORE_DIG_CLANE_1_R_TX_0 0xa524U #define CORE_DIG_CLANE_1_R_RX_2 0xa526U #define CORE_DIG_CLANE_1_R_RX_3 0xa528U #define CORE_DIG_CLANE_1_RW_HS_TX_0 0xa600U #define CORE_DIG_CLANE_1_RW_HS_TX_1 0xa602U #define CORE_DIG_CLANE_1_RW_HS_TX_2 0xa604U #define CORE_DIG_CLANE_1_RW_HS_TX_3 0xa606U #define CORE_DIG_CLANE_1_RW_HS_TX_4 0xa608U #define CORE_DIG_CLANE_1_RW_HS_TX_5 0xa60aU #define CORE_DIG_CLANE_1_RW_HS_TX_6 0xa60cU #define CORE_DIG_CLANE_1_RW_HS_TX_7 0xa60eU #define CORE_DIG_CLANE_1_RW_HS_TX_8 0xa610U #define CORE_DIG_CLANE_1_RW_HS_TX_9 0xa612U #define CORE_DIG_CLANE_1_RW_HS_TX_10 0xa614U #define CORE_DIG_CLANE_1_RW_HS_TX_11 0xa616U #define CORE_DIG_CLANE_1_RW_HS_TX_12 0xa618U #define CORE_DIG_CLANE_1_RW_HS_TX_13 0xa61aU #define CORE_DIG_CLANE_2_RW_CFG_0 0xa800U #define CORE_DIG_CLANE_2_RW_CFG_2 0xa804U #define CORE_DIG_CLANE_2_RW_LP_0 0xa880U #define CORE_DIG_CLANE_2_RW_LP_1 0xa882U #define CORE_DIG_CLANE_2_RW_LP_2 0xa884U #define CORE_DIG_CLANE_2_R_LP_0 0xa8a0U #define CORE_DIG_CLANE_2_R_LP_1 0xa8a2U #define CORE_DIG_CLANE_2_RW_HS_RX_0 0xa900U #define CORE_DIG_CLANE_2_RW_HS_RX_1 0xa902U #define CORE_DIG_CLANE_2_RW_HS_RX_2 0xa904U #define CORE_DIG_CLANE_2_RW_HS_RX_3 0xa906U #define CORE_DIG_CLANE_2_RW_HS_RX_4 0xa908U #define CORE_DIG_CLANE_2_RW_HS_RX_5 0xa90aU #define CORE_DIG_CLANE_2_RW_HS_RX_6 0xa90cU #define CORE_DIG_CLANE_2_R_RX_0 0xa920U #define CORE_DIG_CLANE_2_R_RX_1 0xa922U #define CORE_DIG_CLANE_2_R_TX_0 0xa924U #define CORE_DIG_CLANE_2_R_RX_2 0xa926U #define CORE_DIG_CLANE_2_R_RX_3 0xa928U #define CORE_DIG_CLANE_2_RW_HS_TX_0 0xaa00U #define CORE_DIG_CLANE_2_RW_HS_TX_1 0xaa02U #define CORE_DIG_CLANE_2_RW_HS_TX_2 0xaa04U #define CORE_DIG_CLANE_2_RW_HS_TX_3 0xaa06U #define CORE_DIG_CLANE_2_RW_HS_TX_4 0xaa08U #define CORE_DIG_CLANE_2_RW_HS_TX_5 0xaa0aU #define CORE_DIG_CLANE_2_RW_HS_TX_6 0xaa0cU #define CORE_DIG_CLANE_2_RW_HS_TX_7 0xaa0eU #define CORE_DIG_CLANE_2_RW_HS_TX_8 0xaa10U #define CORE_DIG_CLANE_2_RW_HS_TX_9 0xaa12U #define CORE_DIG_CLANE_2_RW_HS_TX_10 0xaa14U #define CORE_DIG_CLANE_2_RW_HS_TX_11 0xaa16U #define CORE_DIG_CLANE_2_RW_HS_TX_12 0xaa18U #define CORE_DIG_CLANE_2_RW_HS_TX_13 0xaa1aU /* dwc csi host controller registers */ #define IS_IO_CSI2_HOST_BASE(i) (IS_IO_BASE + 0x40000U + \ 0x2000U * (i)) #define VERSION 0 #define N_LANES 0x4U #define CSI2_RESETN 0x8U #define INT_ST_MAIN 0xcU #define DATA_IDS_1 0x10U #define DATA_IDS_2 0x14U #define CDPHY_MODE 0x1cU #define DATA_IDS_VC_1 0x30U #define DATA_IDS_VC_2 0x34U #define PHY_SHUTDOWNZ 0x40U #define DPHY_RSTZ 0x44U #define PHY_RX 0x48U #define PHY_STOPSTATE 0x4cU #define PHY_TEST_CTRL0 0x50U #define PHY_TEST_CTRL1 0x54U #define PPI_PG_PATTERN_VRES 0x60U #define PPI_PG_PATTERN_HRES 0x64U #define PPI_PG_CONFIG 0x68U #define PPI_PG_ENABLE 0x6cU #define PPI_PG_STATUS 0x70U #define VC_EXTENSION 0xc8U #define PHY_CAL 0xccU #define INT_ST_PHY_FATAL 0xe0U #define INT_MSK_PHY_FATAL 0xe4U #define INT_FORCE_PHY_FATAL 0xe8U #define INT_ST_PKT_FATAL 0xf0U #define INT_MSK_PKT_FATAL 0xf4U #define INT_FORCE_PKT_FATAL 0xf8U #define INT_ST_PHY 0x110U #define INT_MSK_PHY 0x114U #define INT_FORCE_PHY 0x118U #define INT_ST_LINE 0x130U #define INT_MSK_LINE 0x134U #define INT_FORCE_LINE 0x138U #define INT_ST_BNDRY_FRAME_FATAL 0x280U #define INT_MSK_BNDRY_FRAME_FATAL 0x284U #define INT_FORCE_BNDRY_FRAME_FATAL 0x288U #define INT_ST_SEQ_FRAME_FATAL 0x290U #define INT_MSK_SEQ_FRAME_FATAL 0x294U #define INT_FORCE_SEQ_FRAME_FATAL 0x298U #define INT_ST_CRC_FRAME_FATAL 0x2a0U #define INT_MSK_CRC_FRAME_FATAL 0x2a4U #define INT_FORCE_CRC_FRAME_FATAL 0x2a8U #define INT_ST_PLD_CRC_FATAL 0x2b0U #define INT_MSK_PLD_CRC_FATAL 0x2b4U #define INT_FORCE_PLD_CRC_FATAL 0x2b8U #define INT_ST_DATA_ID 0x2c0U #define INT_MSK_DATA_ID 0x2c4U #define INT_FORCE_DATA_ID 0x2c8U #define INT_ST_ECC_CORRECTED 0x2d0U #define INT_MSK_ECC_CORRECTED 0x2d4U #define INT_FORCE_ECC_CORRECTED 0x2d8U #define SCRAMBLING 0x300U #define SCRAMBLING_SEED1 0x304U #define SCRAMBLING_SEED2 0x308U #define SCRAMBLING_SEED3 0x30cU #define SCRAMBLING_SEED4 0x310U #define SCRAMBLING 0x300U #define IS_IO_CSI2_ADPL_PORT_BASE(i) (IS_IO_BASE + 0x40800U + \ 0x2000U * (i)) #define CSI2_ADPL_INPUT_MODE 0 #define CSI2_ADPL_CSI_RX_ERR_IRQ_CLEAR_EN 0x4U #define CSI2_ADPL_CSI_RX_ERR_IRQ_CLEAR_ADDR 0x8U #define CSI2_ADPL_CSI_RX_ERR_IRQ_STATUS 0xcU #define CSI2_ADPL_IRQ_CTL_COMMON_STATUS 0xa4U #define CSI2_ADPL_IRQ_CTL_COMMON_CLEAR 0xa8U #define CSI2_ADPL_IRQ_CTL_COMMON_ENABLE 0xacU #define CSI2_ADPL_IRQ_CTL_FS_STATUS 0xbcU #define CSI2_ADPL_IRQ_CTL_FS_CLEAR 0xc0U #define CSI2_ADPL_IRQ_CTL_FS_ENABLE 0xc4U #define CSI2_ADPL_IRQ_CTL_FE_STATUS 0xc8U #define CSI2_ADPL_IRQ_CTL_FE_CLEAR 0xccU #define CSI2_ADPL_IRQ_CTL_FE_ENABLE 0xd0U /* software control the legacy csi irq */ #define IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(i) (IS_IO_BASE + 0x40c00U + \ 0x2000U * (i)) #define IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(i) (IS_IO_BASE + 0x40d00U + \ 0x2000U * (i)) #define IS_IO_CSI2_LEGACY_IRQ_CTRL_BASE (IS_IO_BASE + 0x49000U) #define IS_IO_CSI2_IRQ_CTRL_BASE (IS_IO_BASE + 0x4e100) #define IRQ_CTL_EDGE 0 #define IRQ_CTL_MASK 0x4U #define IRQ_CTL_STATUS 0x8U #define IRQ_CTL_CLEAR 0xcU #define IRQ_CTL_ENABLE 0x10U /* FE irq for PTL */ #define IRQ1_CTL_MASK 0x14U #define IRQ1_CTL_STATUS 0x18U #define IRQ1_CTL_CLEAR 0x1cU #define IRQ1_CTL_ENABLE 0x20U /* software to set the clock gate to use the port or mgc */ #define IS_IO_GPREGS_BASE (IS_IO_BASE + 0x49200U) #define SRST_PORT_ARB 0 #define SRST_MGC 0x4U #define SRST_WIDTH_CONV 0x8U #define SRST_CSI_IRQ 0xcU #define SRST_CSI_LEGACY_IRQ 0x10U #define CLK_EN_TXCLKESC 0x14U #define CLK_DIV_FACTOR_IS_CLK 0x18U #define CLK_DIV_FACTOR_APB_CLK 0x1cU #define CSI_PORT_CLK_GATE 0x20U #define CSI_PORTAB_AGGREGATION 0x24U #define MGC_CLK_GATE 0x2cU #define CG_CTRL_BITS 0x3cU #define SPARE_RW 0xf8U #define SPARE_RO 0xfcU #define IS_IO_CSI2_MPF_PORT_BASE(i) (IS_IO_BASE + 0x53000U + \ 0x1000U * (i)) #define MPF_16_IRQ_CNTRL_STATUS 0x238U #define MPF_16_IRQ_CNTRL_CLEAR 0x23cU #define MPF_16_IRQ_CNTRL_ENABLE 0x240U /* software config the phy */ #define IS_IO_CSI2_GPREGS_BASE (IS_IO_BASE + 0x53400U) #define IPU8_IS_IO_CSI2_GPREGS_BASE (IS_IO_BASE + 0x40e00U) #define CSI_ADAPT_LAYER_SRST 0 #define MPF_SRST_RST 0x4U #define CSI_ERR_IRQ_CTRL_SRST 0x8U #define CSI_SYNC_RC_SRST 0xcU #define CSI_CG_CTRL_BITS 0x10U #define SOC_CSI2HOST_SELECT 0x14U #define PHY_RESET 0x18U #define PHY_SHUTDOWN 0x1cU #define PHY_MODE 0x20U #define PHY_READY 0x24U #define PHY_CLK_LANE_FORCE_CONTROL 0x28U #define PHY_CLK_LANE_CONTROL 0x2cU #define PHY_CLK_LANE_STATUS 0x30U #define PHY_LANE_RX_ESC_REQ 0x34U #define PHY_LANE_RX_ESC_DATA 0x38U #define PHY_LANE_TURNDISABLE 0x3cU #define PHY_LANE_DIRECTION 0x40U #define PHY_LANE_FORCE_CONTROL 0x44U #define PHY_LANE_CONTROL_EN 0x48U #define PHY_LANE_CONTROL_DATAWIDTH 0x4cU #define PHY_LANE_STATUS 0x50U #define PHY_LANE_ERR 0x54U #define PHY_LANE_RXALP 0x58U #define PHY_LANE_RXALP_NIBBLE 0x5cU #define PHY_PARITY_ERROR 0x60U #define PHY_DEBUG_REGS_CLK_GATE_EN 0x64U #define SPARE_RW 0xf8U #define SPARE_RO 0xfcU /* software not touch */ #define PORT_ARB_BASE (IS_IO_BASE + 0x4e000) #define PORT_ARB_IRQ_CTL_STATUS 0x4U #define PORT_ARB_IRQ_CTL_CLEAR 0x8U #define PORT_ARB_IRQ_CTL_ENABLE 0xcU #define MGC_PPC 4U #define MGC_DTYPE_RAW(i) (((i) - 8) / 2) #define IS_IO_MGC_BASE (IS_IO_BASE + 0x48000U) #define MGC_KICK 0 #define MGC_ASYNC_STOP 0x4U #define MGC_PORT_OFFSET 0x100U #define MGC_CSI_PORT_MAP(i) (0x8 + (i) * 0x4) #define MGC_MG_PORT(i) (IS_IO_MGC_BASE + \ (i) * MGC_PORT_OFFSET) /* per mgc instance */ #define MGC_MG_CSI_ADAPT_LAYER_TYPE 0x28U #define MGC_MG_MODE 0x2cU #define MGC_MG_INIT_COUNTER 0x30U #define MGC_MG_MIPI_VC 0x34U #define MGC_MG_MIPI_DTYPES 0x38U #define MGC_MG_MULTI_DTYPES_MODE 0x3cU #define MGC_MG_NOF_FRAMES 0x40U #define MGC_MG_FRAME_DIM 0x44U #define MGC_MG_HBLANK 0x48U #define MGC_MG_VBLANK 0x4cU #define MGC_MG_TPG_MODE 0x50U #define MGC_MG_TPG_R0 0x54U #define MGC_MG_TPG_G0 0x58U #define MGC_MG_TPG_B0 0x5cU #define MGC_MG_TPG_R1 0x60U #define MGC_MG_TPG_G1 0x64U #define MGC_MG_TPG_B1 0x68U #define MGC_MG_TPG_FACTORS 0x6cU #define MGC_MG_TPG_MASKS 0x70U #define MGC_MG_TPG_XY_MASK 0x74U #define MGC_MG_TPG_TILE_DIM 0x78U #define MGC_MG_PRBS_LFSR_INIT_0 0x7cU #define MGC_MG_PRBS_LFSR_INIT_1 0x80U #define MGC_MG_SYNC_STOP_POINT 0x84U #define MGC_MG_SYNC_STOP_POINT_LOC 0x88U #define MGC_MG_ERR_INJECT 0x8cU #define MGC_MG_ERR_LOCATION 0x90U #define MGC_MG_DTO_SPEED_CTRL_EN 0x94U #define MGC_MG_DTO_SPEED_CTRL_INCR_VAL 0x98U #define MGC_MG_HOR_LOC_STTS 0x9cU #define MGC_MG_VER_LOC_STTS 0xa0U #define MGC_MG_FRAME_NUM_STTS 0xa4U #define MGC_MG_BUSY_STTS 0xa8U #define MGC_MG_STOPPED_STTS 0xacU /* tile width and height in pixels for Chess board and Color palette */ #define MGC_TPG_TILE_WIDTH 64U #define MGC_TPG_TILE_HEIGHT 64U #define IPU_CSI_PORT_A_ADDR_OFFSET 0 #define IPU_CSI_PORT_B_ADDR_OFFSET 0 #define IPU_CSI_PORT_C_ADDR_OFFSET 0 #define IPU_CSI_PORT_D_ADDR_OFFSET 0 /* * 0 - CSI RX Port 0 interrupt; * 1 - MPF Port 0 interrupt; * 2 - CSI RX Port 1 interrupt; * 3 - MPF Port 1 interrupt; * 4 - CSI RX Port 2 interrupt; * 5 - MPF Port 2 interrupt; * 6 - CSI RX Port 3 interrupt; * 7 - MPF Port 3 interrupt; * 8 - Port ARB FIFO 0 overflow; * 9 - Port ARB FIFO 1 overflow; * 10 - Port ARB FIFO 2 overflow; * 11 - Port ARB FIFO 3 overflow; * 12 - isys_cfgnoc_err_probe_intl; * 13-15 - reserved */ #define IPU7_CSI_IS_IO_IRQ_MASK 0xffffU /* Adapter layer irq */ #define IPU7_CSI_ADPL_IRQ_MASK 0xffffU /* sw irq from legacy irq control * legacy irq status * IPU7 * 0 - CSI Port 0 error interrupt * 1 - CSI Port 0 sync interrupt * 2 - CSI Port 1 error interrupt * 3 - CSI Port 1 sync interrupt * 4 - CSI Port 2 error interrupt * 5 - CSI Port 2 sync interrupt * 6 - CSI Port 3 error interrupt * 7 - CSI Port 3 sync interrupt * IPU7P5 * 0 - CSI Port 0 error interrupt * 1 - CSI Port 0 fs interrupt * 2 - CSI Port 0 fe interrupt * 3 - CSI Port 1 error interrupt * 4 - CSI Port 1 fs interrupt * 5 - CSI Port 1 fe interrupt * 6 - CSI Port 2 error interrupt * 7 - CSI Port 2 fs interrupt * 8 - CSI Port 2 fe interrupt */ #define IPU7_CSI_RX_LEGACY_IRQ_MASK 0x1ffU /* legacy error status per port * 0 - Error handler FIFO full; * 1 - Reserved Short Packet encoding detected; * 2 - Reserved Long Packet encoding detected; * 3 - Received packet is too short (fewer data words than specified in header); * 4 - Received packet is too long (more data words than specified in header); * 5 - Short packet discarded due to errors; * 6 - Long packet discarded due to errors; * 7 - CSI Combo Rx interrupt; * 8 - IDI CDC FIFO overflow; remaining bits are reserved and tied to 0; */ #define IPU7_CSI_RX_ERROR_IRQ_MASK 0xfffU /* * 0 - VC0 frame start received * 1 - VC0 frame end received * 2 - VC1 frame start received * 3 - VC1 frame end received * 4 - VC2 frame start received * 5 - VC2 frame end received * 6 - VC3 frame start received * 7 - VC3 frame end received * 8 - VC4 frame start received * 9 - VC4 frame end received * 10 - VC5 frame start received * 11 - VC5 frame end received * 12 - VC6 frame start received * 13 - VC6 frame end received * 14 - VC7 frame start received * 15 - VC7 frame end received * 16 - VC8 frame start received * 17 - VC8 frame end received * 18 - VC9 frame start received * 19 - VC9 frame end received * 20 - VC10 frame start received * 21 - VC10 frame end received * 22 - VC11 frame start received * 23 - VC11 frame end received * 24 - VC12 frame start received * 25 - VC12 frame end received * 26 - VC13 frame start received * 27 - VC13 frame end received * 28 - VC14 frame start received * 29 - VC14 frame end received * 30 - VC15 frame start received * 31 - VC15 frame end received */ #define IPU7_CSI_RX_SYNC_IRQ_MASK 0 /* 0xffffffff */ #define IPU7P5_CSI_RX_SYNC_FE_IRQ_MASK 0 /* 0xffffffff */ #define CSI_RX_NUM_ERRORS_IN_IRQ 12U #define CSI_RX_NUM_SYNC_IN_IRQ 32U enum CSI_FE_MODE_TYPE { CSI_FE_DPHY_MODE = 0, CSI_FE_CPHY_MODE = 1, }; enum CSI_FE_INPUT_MODE { CSI_SENSOR_INPUT = 0, CSI_MIPIGEN_INPUT = 1, }; enum MGC_CSI_ADPL_TYPE { MGC_MAPPED_2_LANES = 0, MGC_MAPPED_4_LANES = 1, }; enum CSI2HOST_SELECTION { CSI2HOST_SEL_SOC = 0, CSI2HOST_SEL_CSI2HOST = 1, }; #define IPU7_ISYS_LEGACY_IRQ_CSI2(port) (0x3U << (port)) #define IPU7P5_ISYS_LEGACY_IRQ_CSI2(port) (0x7U << (port)) /* ---------------------------------------------------------------- */ #define CSI_REG_BASE 0x220000U #define CSI_REG_BASE_PORT(id) ((id) * 0x1000) /* CSI Port General Purpose Registers */ #define CSI_REG_PORT_GPREG_SRST 0 #define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST 0x4U #define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL 0x8U #define CSI_RX_NUM_IRQ 32U #define IPU7_CSI_RX_SYNC_FS_VC 0x55555555U #define IPU7_CSI_RX_SYNC_FE_VC 0xaaaaaaaaU #define IPU7P5_CSI_RX_SYNC_FS_VC 0xffffU #define IPU7P5_CSI_RX_SYNC_FE_VC 0xffffU #endif /* IPU7_ISYS_CSI2_REG_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.c000066400000000000000000000362421503071307200272330ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-isys.h" #include "ipu7-isys-csi2.h" #include "ipu7-isys-csi2-regs.h" #include "ipu7-isys-csi-phy.h" static const u32 csi2_supported_codes[] = { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_RGB565_1X16, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV10_1X20, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 0, }; #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 15, 0) s64 ipu7_isys_csi2_get_link_freq(struct ipu7_isys_csi2 *csi2) { struct media_pad *src_pad; if (!csi2) return -EINVAL; src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity); if (IS_ERR(src_pad)) { dev_err(&csi2->isys->adev->auxdev.dev, "can't get source pad of %s (%ld)\n", csi2->asd.sd.name, PTR_ERR(src_pad)); return PTR_ERR(src_pad); } return v4l2_get_link_freq(src_pad, 0, 0); } #else s64 ipu7_isys_csi2_get_link_freq(struct ipu7_isys_csi2 *csi2) { struct media_pad *src_pad; struct v4l2_subdev *ext_sd; struct device *dev; if (!csi2) return -EINVAL; dev = &csi2->isys->adev->auxdev.dev; src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity); if (IS_ERR(src_pad)) { dev_err(dev, "can't get source pad of %s (%ld)\n", csi2->asd.sd.name, PTR_ERR(src_pad)); return PTR_ERR(src_pad); } ext_sd = media_entity_to_v4l2_subdev(src_pad->entity); if (WARN(!ext_sd, "Failed to get subdev for %s\n", csi2->asd.sd.name)) return -ENODEV; return v4l2_get_link_freq(ext_sd->ctrl_handler, 0, 0); } #endif static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); struct ipu7_isys_csi2 *csi2 = to_ipu7_isys_csi2(asd); struct device *dev = &csi2->isys->adev->auxdev.dev; dev_dbg(dev, "csi2 subscribe event(type %u id %u)\n", sub->type, sub->id); switch (sub->type) { case V4L2_EVENT_FRAME_SYNC: return v4l2_event_subscribe(fh, sub, 10, NULL); case V4L2_EVENT_CTRL: return v4l2_ctrl_subscribe_event(fh, sub); default: return -EINVAL; } } static const struct v4l2_subdev_core_ops csi2_sd_core_ops = { .subscribe_event = csi2_subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static void csi2_irq_en(struct ipu7_isys_csi2 *csi2, bool enable) { struct ipu7_device *isp = csi2->isys->adev->isp; unsigned int offset, mask; if (!enable) { /* disable CSI2 legacy error irq */ offset = IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(csi2->port); mask = IPU7_CSI_RX_ERROR_IRQ_MASK; writel(mask, csi2->base + offset + IRQ_CTL_CLEAR); writel(0, csi2->base + offset + IRQ_CTL_MASK); writel(0, csi2->base + offset + IRQ_CTL_ENABLE); /* disable CSI2 legacy sync irq */ offset = IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(csi2->port); mask = IPU7_CSI_RX_SYNC_IRQ_MASK; writel(mask, csi2->base + offset + IRQ_CTL_CLEAR); writel(0, csi2->base + offset + IRQ_CTL_MASK); writel(0, csi2->base + offset + IRQ_CTL_ENABLE); if (!is_ipu7(isp->hw_ver)) { writel(mask, csi2->base + offset + IRQ1_CTL_CLEAR); writel(0, csi2->base + offset + IRQ1_CTL_MASK); writel(0, csi2->base + offset + IRQ1_CTL_ENABLE); } return; } /* enable CSI2 legacy error irq */ offset = IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(csi2->port); mask = IPU7_CSI_RX_ERROR_IRQ_MASK; writel(mask, csi2->base + offset + IRQ_CTL_CLEAR); writel(mask, csi2->base + offset + IRQ_CTL_MASK); writel(mask, csi2->base + offset + IRQ_CTL_ENABLE); /* enable CSI2 legacy sync irq */ offset = IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(csi2->port); mask = IPU7_CSI_RX_SYNC_IRQ_MASK; writel(mask, csi2->base + offset + IRQ_CTL_CLEAR); writel(mask, csi2->base + offset + IRQ_CTL_MASK); writel(mask, csi2->base + offset + IRQ_CTL_ENABLE); mask = IPU7P5_CSI_RX_SYNC_FE_IRQ_MASK; if (!is_ipu7(isp->hw_ver)) { writel(mask, csi2->base + offset + IRQ1_CTL_CLEAR); writel(mask, csi2->base + offset + IRQ1_CTL_MASK); writel(mask, csi2->base + offset + IRQ1_CTL_ENABLE); } } static void ipu7_isys_csi2_disable_stream(struct ipu7_isys_csi2 *csi2) { struct ipu7_isys *isys = csi2->isys; void __iomem *isys_base = isys->pdata->base; ipu7_isys_csi_phy_powerdown(csi2); writel(0x4, isys_base + IS_IO_GPREGS_BASE + CLK_DIV_FACTOR_APB_CLK); csi2_irq_en(csi2, 0); } static int ipu7_isys_csi2_enable_stream(struct ipu7_isys_csi2 *csi2) { struct ipu7_isys *isys = csi2->isys; struct device *dev = &isys->adev->auxdev.dev; void __iomem *isys_base = isys->pdata->base; unsigned int port, nlanes, offset; int ret; port = csi2->port; nlanes = csi2->nlanes; offset = IS_IO_GPREGS_BASE; writel(0x2, isys_base + offset + CLK_DIV_FACTOR_APB_CLK); dev_dbg(dev, "port %u CLK_GATE = 0x%04x DIV_FACTOR_APB_CLK=0x%04x\n", port, readl(isys_base + offset + CSI_PORT_CLK_GATE), readl(isys_base + offset + CLK_DIV_FACTOR_APB_CLK)); if (port == 0U && nlanes == 4U && !is_ipu7(isys->adev->isp->hw_ver)) { dev_dbg(dev, "CSI port %u in aggregation mode\n", port); writel(0x1, isys_base + offset + CSI_PORTAB_AGGREGATION); } /* input is coming from CSI receiver (sensor) */ offset = IS_IO_CSI2_ADPL_PORT_BASE(port); writel(CSI_SENSOR_INPUT, isys_base + offset + CSI2_ADPL_INPUT_MODE); writel(1, isys_base + offset + CSI2_ADPL_CSI_RX_ERR_IRQ_CLEAR_EN); ret = ipu7_isys_csi_phy_powerup(csi2); if (ret) { dev_err(dev, "CSI-%d PHY power up failed %d\n", port, ret); return ret; } csi2_irq_en(csi2, 1); return 0; } static int ipu7_isys_csi2_set_sel(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); struct device *dev = &asd->isys->adev->auxdev.dev; struct v4l2_mbus_framefmt *sink_ffmt; struct v4l2_mbus_framefmt *src_ffmt; struct v4l2_rect *crop; if (sel->pad == CSI2_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state, sel->pad, sel->stream); if (!sink_ffmt) return -EINVAL; src_ffmt = v4l2_subdev_state_get_format(state, sel->pad, sel->stream); if (!src_ffmt) return -EINVAL; crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream); if (!crop) return -EINVAL; /* Only vertical cropping is supported */ sel->r.left = 0; sel->r.width = sink_ffmt->width; /* Non-bayer formats can't be single line cropped */ if (!ipu7_isys_is_bayer_format(sink_ffmt->code)) sel->r.top &= ~1U; sel->r.height = clamp(sel->r.height & ~1U, IPU_ISYS_MIN_HEIGHT, sink_ffmt->height - sel->r.top); *crop = sel->r; /* update source pad format */ src_ffmt->width = sel->r.width; src_ffmt->height = sel->r.height; if (ipu7_isys_is_bayer_format(sink_ffmt->code)) src_ffmt->code = ipu7_isys_convert_bayer_order(sink_ffmt->code, sel->r.left, sel->r.top); dev_dbg(dev, "set crop for %s sel: %d,%d,%d,%d code: 0x%x\n", sd->name, sel->r.left, sel->r.top, sel->r.width, sel->r.height, src_ffmt->code); return 0; } static int ipu7_isys_csi2_get_sel(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { struct v4l2_mbus_framefmt *sink_ffmt; struct v4l2_rect *crop; int ret = 0; if (sd->entity.pads[sel->pad].flags & MEDIA_PAD_FL_SINK) return -EINVAL; sink_ffmt = v4l2_subdev_state_get_opposite_stream_format(state, sel->pad, sel->stream); if (!sink_ffmt) return -EINVAL; crop = v4l2_subdev_state_get_crop(state, sel->pad, sel->stream); if (!crop) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_CROP_DEFAULT: case V4L2_SEL_TGT_CROP_BOUNDS: sel->r.left = 0; sel->r.top = 0; sel->r.width = sink_ffmt->width; sel->r.height = sink_ffmt->height; break; case V4L2_SEL_TGT_CROP: sel->r = *crop; break; default: ret = -EINVAL; } return ret; } /* * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set * of streams. */ #define CSI2_SUBDEV_MAX_STREAM_ID 63 static int ipu7_isys_csi2_enable_streams(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, u32 pad, u64 streams_mask) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); struct ipu7_isys_csi2 *csi2 = to_ipu7_isys_csi2(asd); struct v4l2_subdev *r_sd; struct media_pad *r_pad; u32 sink_pad, sink_stream; int ret, i; if (!csi2->stream_count) { dev_dbg(&csi2->isys->adev->auxdev.dev, "stream on CSI2-%u with %u lanes\n", csi2->port, csi2->nlanes); ret = ipu7_isys_csi2_enable_stream(csi2); if (ret) return ret; } for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) { if (streams_mask & BIT_ULL(i)) break; } ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i, &sink_pad, &sink_stream); if (ret) return ret; r_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); r_sd = media_entity_to_v4l2_subdev(r_pad->entity); ret = v4l2_subdev_enable_streams(r_sd, r_pad->index, BIT_ULL(sink_stream)); if (!ret) { csi2->stream_count++; return 0; } if (!csi2->stream_count) ipu7_isys_csi2_disable_stream(csi2); return ret; } static int ipu7_isys_csi2_disable_streams(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, u32 pad, u64 streams_mask) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); struct ipu7_isys_csi2 *csi2 = to_ipu7_isys_csi2(asd); struct v4l2_subdev *r_sd; struct media_pad *r_pad; u32 sink_pad, sink_stream; int ret, i; for (i = 0; i <= CSI2_SUBDEV_MAX_STREAM_ID; i++) { if (streams_mask & BIT_ULL(i)) break; } ret = v4l2_subdev_routing_find_opposite_end(&state->routing, pad, i, &sink_pad, &sink_stream); if (ret) return ret; r_pad = media_pad_remote_pad_first(&sd->entity.pads[CSI2_PAD_SINK]); r_sd = media_entity_to_v4l2_subdev(r_pad->entity); v4l2_subdev_disable_streams(r_sd, r_pad->index, BIT_ULL(sink_stream)); if (--csi2->stream_count) return 0; dev_dbg(&csi2->isys->adev->auxdev.dev, "stream off CSI2-%u with %u lanes\n", csi2->port, csi2->nlanes); ipu7_isys_csi2_disable_stream(csi2); return 0; } static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = { .get_fmt = v4l2_subdev_get_fmt, .set_fmt = ipu7_isys_subdev_set_fmt, .get_selection = ipu7_isys_csi2_get_sel, .set_selection = ipu7_isys_csi2_set_sel, .enum_mbus_code = ipu7_isys_subdev_enum_mbus_code, .enable_streams = ipu7_isys_csi2_enable_streams, .disable_streams = ipu7_isys_csi2_disable_streams, .set_routing = ipu7_isys_subdev_set_routing, }; static const struct v4l2_subdev_ops csi2_sd_ops = { .core = &csi2_sd_core_ops, .pad = &csi2_sd_pad_ops, }; static const struct media_entity_operations csi2_entity_ops = { .link_validate = v4l2_subdev_link_validate, .has_pad_interdep = v4l2_subdev_has_pad_interdep, }; void ipu7_isys_csi2_cleanup(struct ipu7_isys_csi2 *csi2) { if (!csi2->isys) return; v4l2_device_unregister_subdev(&csi2->asd.sd); v4l2_subdev_cleanup(&csi2->asd.sd); ipu7_isys_subdev_cleanup(&csi2->asd); csi2->isys = NULL; } int ipu7_isys_csi2_init(struct ipu7_isys_csi2 *csi2, struct ipu7_isys *isys, void __iomem *base, unsigned int index) { struct device *dev = &isys->adev->auxdev.dev; int ret; csi2->isys = isys; csi2->base = base; csi2->port = index; if (!is_ipu7(isys->adev->isp->hw_ver)) csi2->legacy_irq_mask = 0x7U << (index * 3U); else csi2->legacy_irq_mask = 0x3U << (index * 2U); dev_dbg(dev, "csi-%d legacy irq mask = 0x%x\n", index, csi2->legacy_irq_mask); csi2->asd.sd.entity.ops = &csi2_entity_ops; csi2->asd.isys = isys; ret = ipu7_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0, NR_OF_CSI2_SINK_PADS, NR_OF_CSI2_SRC_PADS); if (ret) return ret; csi2->asd.source = (int)index; csi2->asd.supported_codes = csi2_supported_codes; snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name), IPU_ISYS_ENTITY_PREFIX " CSI2 %u", index); v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd); ret = v4l2_subdev_init_finalize(&csi2->asd.sd); if (ret) { dev_err(dev, "failed to init v4l2 subdev (%d)\n", ret); goto isys_subdev_cleanup; } ret = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd); if (ret) { dev_err(dev, "failed to register v4l2 subdev (%d)\n", ret); goto v4l2_subdev_cleanup; } return 0; v4l2_subdev_cleanup: v4l2_subdev_cleanup(&csi2->asd.sd); isys_subdev_cleanup: ipu7_isys_subdev_cleanup(&csi2->asd); return ret; } void ipu7_isys_csi2_sof_event_by_stream(struct ipu7_isys_stream *stream) { struct ipu7_isys_csi2 *csi2 = ipu7_isys_subdev_to_csi2(stream->asd); struct device *dev = &stream->isys->adev->auxdev.dev; struct video_device *vdev = csi2->asd.sd.devnode; struct v4l2_event ev = { .type = V4L2_EVENT_FRAME_SYNC, }; ev.id = stream->vc; ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); v4l2_event_queue(vdev, &ev); dev_dbg(dev, "sof_event::csi2-%i sequence: %i, vc: %d\n", csi2->port, ev.u.frame_sync.frame_sequence, stream->vc); } void ipu7_isys_csi2_eof_event_by_stream(struct ipu7_isys_stream *stream) { struct ipu7_isys_csi2 *csi2 = ipu7_isys_subdev_to_csi2(stream->asd); struct device *dev = &stream->isys->adev->auxdev.dev; u32 frame_sequence = atomic_read(&stream->sequence); dev_dbg(dev, "eof_event::csi2-%i sequence: %i\n", csi2->port, frame_sequence); } int ipu7_isys_csi2_get_remote_desc(u32 source_stream, struct ipu7_isys_csi2 *csi2, struct media_entity *source_entity, struct v4l2_mbus_frame_desc_entry *entry, int *nr_queues) { struct v4l2_mbus_frame_desc_entry *desc_entry = NULL; struct device *dev = &csi2->isys->adev->auxdev.dev; struct v4l2_mbus_frame_desc desc; struct v4l2_subdev *source; struct media_pad *pad; unsigned int i; int ret; source = media_entity_to_v4l2_subdev(source_entity); if (!source) return -EPIPE; pad = media_pad_remote_pad_first(&csi2->asd.pad[CSI2_PAD_SINK]); if (!pad) return -EPIPE; ret = v4l2_subdev_call(source, pad, get_frame_desc, pad->index, &desc); if (ret) return ret; if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { dev_err(dev, "Unsupported frame descriptor type\n"); return -EINVAL; } for (i = 0; i < desc.num_entries; i++) { if (source_stream == desc.entry[i].stream) { desc_entry = &desc.entry[i]; break; } } if (!desc_entry) { dev_err(dev, "Failed to find stream %u from remote subdev\n", source_stream); return -EINVAL; } if (desc_entry->bus.csi2.vc >= NR_OF_CSI2_VC) { dev_err(dev, "invalid vc %d\n", desc_entry->bus.csi2.vc); return -EINVAL; } *entry = *desc_entry; for (i = 0; i < desc.num_entries; i++) { if (desc_entry->bus.csi2.vc == desc.entry[i].bus.csi2.vc) (*nr_queues)++; } return 0; } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-csi2.h000066400000000000000000000041611503071307200272330ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_ISYS_CSI2_H #define IPU7_ISYS_CSI2_H #include #include #include "ipu7-isys-subdev.h" #include "ipu7-isys-video.h" struct ipu7_isys; struct ipu7_isys_csi2_pdata; struct ipu7_isys_stream; #define NR_OF_CSI2_VC 16U #define INVALID_VC_ID -1 #define NR_OF_CSI2_SINK_PADS 1U #define CSI2_PAD_SINK 0U #define NR_OF_CSI2_SRC_PADS 8U #define CSI2_PAD_SRC 1U #define NR_OF_CSI2_PADS (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SRC_PADS) #define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0 #define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0 #define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95 #define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8 #define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0 #define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0 #define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85 #define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2 /* * struct ipu7_isys_csi2 * * @nlanes: number of lanes in the receiver */ struct ipu7_isys_csi2 { struct ipu7_isys_subdev asd; struct ipu7_isys_csi2_pdata *pdata; struct ipu7_isys *isys; struct ipu7_isys_video av[NR_OF_CSI2_SRC_PADS]; void __iomem *base; u32 receiver_errors; u32 legacy_irq_mask; unsigned int nlanes; unsigned int port; unsigned int phy_mode; unsigned int stream_count; }; #define ipu7_isys_subdev_to_csi2(__sd) \ container_of(__sd, struct ipu7_isys_csi2, asd) #define to_ipu7_isys_csi2(__asd) container_of(__asd, struct ipu7_isys_csi2, asd) s64 ipu7_isys_csi2_get_link_freq(struct ipu7_isys_csi2 *csi2); int ipu7_isys_csi2_init(struct ipu7_isys_csi2 *csi2, struct ipu7_isys *isys, void __iomem *base, unsigned int index); void ipu7_isys_csi2_cleanup(struct ipu7_isys_csi2 *csi2); void ipu7_isys_csi2_sof_event_by_stream(struct ipu7_isys_stream *stream); void ipu7_isys_csi2_eof_event_by_stream(struct ipu7_isys_stream *stream); int ipu7_isys_csi2_get_remote_desc(u32 source_stream, struct ipu7_isys_csi2 *csi2, struct media_entity *source_entity, struct v4l2_mbus_frame_desc_entry *entry, int *nr_queues); #endif /* IPU7_ISYS_CSI2_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-queue.c000066400000000000000000000700341503071307200275140ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET #include #endif #include #include #include #include #include "abi/ipu7_fw_isys_abi.h" #include "ipu7-bus.h" #include "ipu7-dma.h" #include "ipu7-fw-isys.h" #include "ipu7-isys.h" #include "ipu7-isys-csi2-regs.h" #include "ipu7-isys-video.h" #include "ipu7-platform-regs.h" #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET #include "ipu7-cpd.h" #endif #define IPU_MAX_FRAME_COUNTER (U8_MAX + 1) static int ipu7_isys_buf_init(struct vb2_buffer *vb) { struct ipu7_isys *isys = vb2_get_drv_priv(vb->vb2_queue); struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); struct ipu7_isys_video_buffer *ivb = vb2_buffer_to_ipu7_isys_video_buffer(vvb); int ret; ret = ipu7_dma_map_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0); if (ret) return ret; ivb->dma_addr = sg_dma_address(sg->sgl); return 0; } static void ipu7_isys_buf_cleanup(struct vb2_buffer *vb) { struct ipu7_isys *isys = vb2_get_drv_priv(vb->vb2_queue); struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0); struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); struct ipu7_isys_video_buffer *ivb = vb2_buffer_to_ipu7_isys_video_buffer(vvb); ivb->dma_addr = 0; ipu7_dma_unmap_sgtable(isys->adev, sg, DMA_TO_DEVICE, 0); } static int ipu7_isys_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; u32 size = av->pix_fmt.sizeimage; /* num_planes == 0: we're being called through VIDIOC_REQBUFS */ if (!*num_planes) { sizes[0] = size; } else if (sizes[0] < size) { dev_dbg(dev, "%s: queue setup: size %u < %u\n", av->vdev.name, sizes[0], size); return -EINVAL; } *num_planes = 1; return 0; } static int ipu7_isys_buf_prepare(struct vb2_buffer *vb) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; u32 bytesperline = av->pix_fmt.bytesperline; u32 height = av->pix_fmt.height; dev_dbg(dev, "buffer: %s: configured size %u, buffer size %lu\n", av->vdev.name, av->pix_fmt.sizeimage, vb2_plane_size(vb, 0)); if (av->pix_fmt.sizeimage > vb2_plane_size(vb, 0)) return -EINVAL; dev_dbg(dev, "buffer: %s: bytesperline %u, height %u\n", av->vdev.name, bytesperline, height); vb2_set_plane_payload(vb, 0, bytesperline * height); return 0; } /* * Queue a buffer list back to incoming or active queues. The buffers * are removed from the buffer list. */ void ipu7_isys_buffer_list_queue(struct ipu7_isys_buffer_list *bl, unsigned long op_flags, enum vb2_buffer_state state) { struct ipu7_isys_buffer *ib, *ib_safe; unsigned long flags; bool first = true; if (!bl) return; WARN_ON_ONCE(!bl->nbufs); WARN_ON_ONCE(op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE && op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING); list_for_each_entry_safe(ib, ib_safe, &bl->head, head) { struct ipu7_isys_video *av; struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib); struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); av = ipu7_isys_queue_to_video(aq); spin_lock_irqsave(&aq->lock, flags); list_del(&ib->head); if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE) list_add(&ib->head, &aq->active); else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING) list_add_tail(&ib->head, &aq->incoming); spin_unlock_irqrestore(&aq->lock, flags); if (op_flags & IPU_ISYS_BUFFER_LIST_FL_SET_STATE) vb2_buffer_done(vb, state); if (first) { dev_dbg(&av->isys->adev->auxdev.dev, "queue buf list %p flags %lx, s %d, %d bufs\n", bl, op_flags, state, bl->nbufs); first = false; } bl->nbufs--; } WARN_ON(bl->nbufs); } /* * flush_firmware_streamon_fail() - Flush in cases where requests may * have been queued to firmware and the *firmware streamon fails for a * reason or another. */ static void flush_firmware_streamon_fail(struct ipu7_isys_stream *stream) { struct ipu7_isys_queue *aq; unsigned long flags; lockdep_assert_held(&stream->mutex); list_for_each_entry(aq, &stream->queues, node) { struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_buffer *ib, *ib_safe; spin_lock_irqsave(&aq->lock, flags); list_for_each_entry_safe(ib, ib_safe, &aq->active, head) { struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib); list_del(&ib->head); if (av->streaming) { dev_dbg(dev, "%s: queue buffer %u back to incoming\n", av->vdev.name, vb->index); /* Queue already streaming, return to driver. */ list_add(&ib->head, &aq->incoming); continue; } /* Queue not yet streaming, return to user. */ dev_dbg(dev, "%s: return %u back to videobuf2\n", av->vdev.name, vb->index); vb2_buffer_done(ipu7_isys_buffer_to_vb2_buffer(ib), VB2_BUF_STATE_QUEUED); } spin_unlock_irqrestore(&aq->lock, flags); } } /* * Attempt obtaining a buffer list from the incoming queues, a list of buffers * that contains one entry from each video buffer queue. If a buffer can't be * obtained from every queue, the buffers are returned back to the queue. */ static int buffer_list_get(struct ipu7_isys_stream *stream, struct ipu7_isys_buffer_list *bl) { unsigned long buf_flag = IPU_ISYS_BUFFER_LIST_FL_INCOMING; struct device *dev = &stream->isys->adev->auxdev.dev; struct ipu7_isys_queue *aq; unsigned long flags; bl->nbufs = 0; INIT_LIST_HEAD(&bl->head); list_for_each_entry(aq, &stream->queues, node) { struct ipu7_isys_buffer *ib; spin_lock_irqsave(&aq->lock, flags); if (list_empty(&aq->incoming)) { spin_unlock_irqrestore(&aq->lock, flags); if (!list_empty(&bl->head)) ipu7_isys_buffer_list_queue(bl, buf_flag, 0); return -ENODATA; } ib = list_last_entry(&aq->incoming, struct ipu7_isys_buffer, head); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); if (av->skipframe) { atomic_set(&ib->skipframe_flag, 1); av->skipframe--; } else { atomic_set(&ib->skipframe_flag, 0); } #endif dev_dbg(dev, "buffer: %s: buffer %u\n", ipu7_isys_queue_to_video(aq)->vdev.name, ipu7_isys_buffer_to_vb2_buffer(ib)->index); list_del(&ib->head); list_add(&ib->head, &bl->head); spin_unlock_irqrestore(&aq->lock, flags); bl->nbufs++; } dev_dbg(dev, "get buffer list %p, %u buffers\n", bl, bl->nbufs); return 0; } static void ipu7_isys_buf_to_fw_frame_buf_pin(struct vb2_buffer *vb, struct ipu7_insys_buffset *set) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); struct ipu7_isys_video_buffer *ivb = vb2_buffer_to_ipu7_isys_video_buffer(vvb); #ifndef IPU8_INSYS_NEW_ABI set->output_pins[aq->fw_output].addr = ivb->dma_addr; set->output_pins[aq->fw_output].user_token = (uintptr_t)set; #else set->output_pins[aq->fw_output].pin_payload.addr = ivb->dma_addr; set->output_pins[aq->fw_output].pin_payload.user_token = (uintptr_t)set; set->output_pins[aq->fw_output].upipe_capture_cfg = 0; #endif } /* * Convert a buffer list to a isys fw ABI framebuffer set. The * buffer list is not modified. */ #define IPU_ISYS_FRAME_NUM_THRESHOLD (30) void ipu7_isys_buffer_to_fw_frame_buff(struct ipu7_insys_buffset *set, struct ipu7_isys_stream *stream, struct ipu7_isys_buffer_list *bl) { struct ipu7_isys_buffer *ib; u32 buf_id; WARN_ON(!bl->nbufs); set->skip_frame = 0; set->capture_msg_map = IPU_INSYS_FRAME_ENABLE_MSG_SEND_RESP | IPU_INSYS_FRAME_ENABLE_MSG_SEND_IRQ; buf_id = atomic_fetch_inc(&stream->buf_id); set->frame_id = buf_id % IPU_MAX_FRAME_COUNTER; list_for_each_entry(ib, &bl->head, head) { struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib); ipu7_isys_buf_to_fw_frame_buf_pin(vb, set); } } /* Start streaming for real. The buffer list must be available. */ static int ipu7_isys_stream_start(struct ipu7_isys_video *av, struct ipu7_isys_buffer_list *bl, bool error) { struct ipu7_isys_stream *stream = av->stream; struct device *dev = &stream->isys->adev->auxdev.dev; struct ipu7_isys_buffer_list __bl; int ret; mutex_lock(&stream->isys->stream_mutex); ret = ipu7_isys_video_set_streaming(av, 1, bl); mutex_unlock(&stream->isys->stream_mutex); if (ret) goto out_requeue; stream->streaming = 1; bl = &__bl; do { struct ipu7_insys_buffset *buf = NULL; struct isys_fw_msgs *msg; enum ipu7_insys_send_type send_type = IPU_INSYS_SEND_TYPE_STREAM_CAPTURE; ret = buffer_list_get(stream, bl); if (ret < 0) break; msg = ipu7_get_fw_msg_buf(stream); if (!msg) return -ENOMEM; buf = &msg->fw_msg.frame; ipu7_isys_buffer_to_fw_frame_buff(buf, stream, bl); ipu7_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins); ipu7_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); ret = ipu7_fw_isys_complex_cmd(stream->isys, stream->stream_handle, buf, msg->dma_addr, sizeof(*buf), send_type); } while (!WARN_ON(ret)); return 0; out_requeue: if (bl && bl->nbufs) ipu7_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_INCOMING | (error ? IPU_ISYS_BUFFER_LIST_FL_SET_STATE : 0), error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_QUEUED); flush_firmware_streamon_fail(stream); return ret; } static void buf_queue(struct vb2_buffer *vb) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct vb2_v4l2_buffer *vvb = to_vb2_v4l2_buffer(vb); struct ipu7_isys_video_buffer *ivb = vb2_buffer_to_ipu7_isys_video_buffer(vvb); struct media_pipeline *media_pipe = media_entity_pipeline(&av->vdev.entity); struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_stream *stream = av->stream; struct ipu7_isys_buffer *ib = &ivb->ib; struct ipu7_insys_buffset *buf = NULL; struct ipu7_isys_buffer_list bl; struct isys_fw_msgs *msg; unsigned long flags; dma_addr_t dma; int ret; #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET while (av->isys->in_reset_stop_streaming) { dev_dbg(dev, "buffer: %s: wait for reset stop\n", av->vdev.name); usleep_range(10000, 11000); } /* ip may be cleared in ipu reset */ stream = av->stream; #endif dev_dbg(dev, "queue buffer %u for %s\n", vb->index, av->vdev.name); dma = ivb->dma_addr; dev_dbg(dev, "iova: iova %pad\n", &dma); spin_lock_irqsave(&aq->lock, flags); list_add(&ib->head, &aq->incoming); spin_unlock_irqrestore(&aq->lock, flags); if (!media_pipe || !vb->vb2_queue->start_streaming_called) { dev_dbg(dev, "media pipeline is not ready for %s\n", av->vdev.name); return; } mutex_lock(&stream->mutex); if (stream->nr_streaming != stream->nr_queues) { dev_dbg(dev, "not streaming yet, adding to incoming\n"); goto out; } /* * We just put one buffer to the incoming list of this queue * (above). Let's see whether all queues in the pipeline would * have a buffer. */ ret = buffer_list_get(stream, &bl); if (ret < 0) { dev_dbg(dev, "No buffers available\n"); goto out; } msg = ipu7_get_fw_msg_buf(stream); if (!msg) { ret = -ENOMEM; goto out; } buf = &msg->fw_msg.frame; ipu7_isys_buffer_to_fw_frame_buff(buf, stream, &bl); ipu7_fw_isys_dump_frame_buff_set(dev, buf, stream->nr_output_pins); if (!stream->streaming) { ret = ipu7_isys_stream_start(av, &bl, true); if (ret) dev_err(dev, "stream start failed.\n"); goto out; } /* * We must queue the buffers in the buffer list to the * appropriate video buffer queues BEFORE passing them to the * firmware since we could get a buffer event back before we * have queued them ourselves to the active queue. */ ipu7_isys_buffer_list_queue(&bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); ret = ipu7_fw_isys_complex_cmd(stream->isys, stream->stream_handle, buf, msg->dma_addr, sizeof(*buf), IPU_INSYS_SEND_TYPE_STREAM_CAPTURE); if (ret < 0) dev_err(dev, "send stream capture failed\n"); out: mutex_unlock(&stream->mutex); } static int ipu7_isys_link_fmt_validate(struct ipu7_isys_queue *aq) { struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; struct media_pad *remote_pad = media_pad_remote_pad_first(av->vdev.entity.pads); struct v4l2_mbus_framefmt format; struct v4l2_subdev *sd; u32 r_stream, code; int ret; if (!remote_pad) return -ENOTCONN; sd = media_entity_to_v4l2_subdev(remote_pad->entity); r_stream = ipu7_isys_get_src_stream_by_src_pad(sd, remote_pad->index); ret = ipu7_isys_get_stream_pad_fmt(sd, remote_pad->index, r_stream, &format); if (ret) { dev_dbg(dev, "failed to get %s: pad %d, stream:%d format\n", sd->entity.name, remote_pad->index, r_stream); return ret; } if (format.width != av->pix_fmt.width || format.height != av->pix_fmt.height) { dev_dbg(dev, "wrong width or height %ux%u (%ux%u expected)\n", av->pix_fmt.width, av->pix_fmt.height, format.width, format.height); return -EINVAL; } code = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat)->code; if (format.code != code) { dev_dbg(dev, "wrong mbus code 0x%8.8x (0x%8.8x expected)\n", code, format.code); return -EINVAL; } return 0; } static void return_buffers(struct ipu7_isys_queue *aq, enum vb2_buffer_state state) { #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET bool need_reset = false; struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); #endif struct ipu7_isys_buffer *ib; struct vb2_buffer *vb; unsigned long flags; spin_lock_irqsave(&aq->lock, flags); /* * Something went wrong (FW crash / HW hang / not all buffers * returned from isys) if there are still buffers queued in active * queue. We have to clean up places a bit. */ while (!list_empty(&aq->active)) { ib = list_last_entry(&aq->active, struct ipu7_isys_buffer, head); vb = ipu7_isys_buffer_to_vb2_buffer(ib); list_del(&ib->head); spin_unlock_irqrestore(&aq->lock, flags); vb2_buffer_done(vb, state); spin_lock_irqsave(&aq->lock, flags); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET need_reset = true; #endif } while (!list_empty(&aq->incoming)) { ib = list_last_entry(&aq->incoming, struct ipu7_isys_buffer, head); vb = ipu7_isys_buffer_to_vb2_buffer(ib); list_del(&ib->head); spin_unlock_irqrestore(&aq->lock, flags); vb2_buffer_done(vb, state); spin_lock_irqsave(&aq->lock, flags); } spin_unlock_irqrestore(&aq->lock, flags); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET if (need_reset) { mutex_lock(&av->isys->reset_mutex); av->isys->need_reset = true; mutex_unlock(&av->isys->reset_mutex); } #endif } static void ipu7_isys_stream_cleanup(struct ipu7_isys_video *av) { video_device_pipeline_stop(&av->vdev); ipu7_isys_put_stream(av->stream); av->stream = NULL; } static int start_streaming(struct vb2_queue *q, unsigned int count) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; const struct ipu7_isys_pixelformat *pfmt = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat); struct ipu7_isys_buffer_list __bl, *bl = NULL; struct ipu7_isys_stream *stream; struct media_entity *source_entity = NULL; int nr_queues, ret; dev_dbg(dev, "stream: %s: width %u, height %u, css pixelformat %u\n", av->vdev.name, av->pix_fmt.width, av->pix_fmt.height, pfmt->css_pixelformat); ret = ipu7_isys_setup_video(av, &source_entity, &nr_queues); if (ret < 0) { dev_dbg(dev, "failed to setup video\n"); goto out_return_buffers; } ret = ipu7_isys_link_fmt_validate(aq); if (ret) { dev_dbg(dev, "%s: link format validation failed (%d)\n", av->vdev.name, ret); goto out_pipeline_stop; } stream = av->stream; mutex_lock(&stream->mutex); if (!stream->nr_streaming) { ret = ipu7_isys_video_prepare_stream(av, source_entity, nr_queues); if (ret) { mutex_unlock(&stream->mutex); goto out_pipeline_stop; } } stream->nr_streaming++; dev_dbg(dev, "queue %u of %u\n", stream->nr_streaming, stream->nr_queues); list_add(&aq->node, &stream->queues); if (stream->nr_streaming != stream->nr_queues) goto out; bl = &__bl; ret = buffer_list_get(stream, bl); if (ret < 0) { dev_warn(dev, "no buffer available, DRIVER BUG?\n"); goto out; } ret = ipu7_isys_fw_open(av->isys); if (ret) goto out_stream_start; ipu7_isys_setup_hw(av->isys); ret = ipu7_isys_stream_start(av, bl, false); if (ret) goto out_isys_fw_close; out: mutex_unlock(&stream->mutex); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET av->start_streaming = 1; #endif return 0; out_isys_fw_close: ipu7_isys_fw_close(av->isys); out_stream_start: list_del(&aq->node); stream->nr_streaming--; mutex_unlock(&stream->mutex); out_pipeline_stop: ipu7_isys_stream_cleanup(av); out_return_buffers: return_buffers(aq, VB2_BUF_STATE_QUEUED); return ret; } #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET static void reset_stop_streaming(struct ipu7_isys_video *av) { struct ipu7_isys_queue *aq = &av->aq; struct ipu7_isys_stream *stream = av->stream; if (stream->nr_streaming == stream->nr_queues && stream->streaming) ipu7_isys_video_set_streaming(av, 0, NULL); mutex_unlock(&av->isys->stream_mutex); stream->nr_streaming--; list_del(&aq->node); stream->streaming = 0; mutex_unlock(&stream->mutex); ipu7_isys_stream_cleanup(av); av->isys->in_reset_stop_streaming = true; return_buffers(aq, VB2_BUF_STATE_ERROR); av->isys->in_reset_stop_streaming = false; ipu7_isys_fw_close(av->isys); } static int reset_start_streaming(struct ipu7_isys_video *av) { struct ipu7_isys_queue *aq = &av->aq; struct device *dev = &av->isys->adev->auxdev.dev; unsigned long flags; int ret; dev_dbg(dev, "%s: start streaming\n", av->vdev.name); spin_lock_irqsave(&aq->lock, flags); while (!list_empty(&aq->active)) { struct ipu7_isys_buffer *ib = list_last_entry(&aq->active, struct ipu7_isys_buffer, head); list_del(&ib->head); list_add_tail(&ib->head, &aq->incoming); } spin_unlock_irqrestore(&aq->lock, flags); av->skipframe = 1; ret = start_streaming(&aq->vbq, 0); if (ret) { dev_dbg(dev, "%s: start streaming failed in reset ! set av->start_streaming = 0.\n", av->vdev.name); av->start_streaming = 0; } else av->start_streaming = 1; return ret; } static int ipu_isys_reset(struct ipu7_isys_video *self_av, struct ipu7_isys_stream *self_stream) { struct ipu7_isys *isys = self_av->isys; struct ipu7_bus_device *adev = isys->adev; struct ipu7_isys_video *av = NULL; struct ipu7_isys_stream *stream = NULL; struct device *dev = &adev->auxdev.dev; int i, j; int has_streaming = 0; const struct ipu7_isys_internal_csi2_pdata *csi2_pdata = &isys->pdata->ipdata->csi2; dev_dbg(dev, "%s\n", __func__); mutex_lock(&isys->reset_mutex); if (isys->in_reset) { mutex_unlock(&isys->reset_mutex); return 0; } isys->in_reset = true; while (isys->in_stop_streaming) { dev_dbg(dev, "isys reset: %s: wait for stop\n", self_av->vdev.name); mutex_unlock(&isys->reset_mutex); usleep_range(10000, 11000); mutex_lock(&isys->reset_mutex); } mutex_unlock(&isys->reset_mutex); dev_dbg(dev, "reset stop streams\n"); for (i = 0; i < csi2_pdata->nports; i++) { for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { av = &isys->csi2[i].av[j]; if (av == self_av) continue; stream = av->stream; if (!stream || stream == self_stream) continue; if (!stream->streaming && !stream->nr_streaming) continue; av->reset = true; has_streaming = true; reset_stop_streaming(av); } } if (!has_streaming) goto end_of_reset; ipu7_cleanup_fw_msg_bufs(isys); dev_dbg(dev, "reset start streams\n"); for (j = 0; j < csi2_pdata->nports; j++) { for (i = 0; i < NR_OF_CSI2_SRC_PADS; i++) { av = &isys->csi2[j].av[i]; if (!av->reset) continue; av->reset = false; reset_start_streaming(av); } } end_of_reset: mutex_lock(&isys->reset_mutex); isys->in_reset = false; mutex_unlock(&isys->reset_mutex); dev_dbg(dev, "reset done\n"); return 0; } #endif static void stop_streaming(struct vb2_queue *q) { struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(q); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct ipu7_isys_stream *stream = av->stream; #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET struct device *dev = &av->isys->adev->auxdev.dev; bool need_reset; dev_dbg(dev, "stop: %s: enter\n", av->vdev.name); mutex_lock(&av->isys->reset_mutex); while (av->isys->in_reset || av->isys->in_stop_streaming) { mutex_unlock(&av->isys->reset_mutex); dev_dbg(dev, "stop: %s: wait for in_reset = %d\n", av->vdev.name, av->isys->in_reset); dev_dbg(dev, "stop: %s: wait for in_stop = %d\n", av->vdev.name, av->isys->in_stop_streaming); usleep_range(10000, 11000); mutex_lock(&av->isys->reset_mutex); } if (!av->start_streaming) { mutex_unlock(&av->isys->reset_mutex); return; } av->isys->in_stop_streaming = true; mutex_unlock(&av->isys->reset_mutex); stream = av->stream; if (!stream) { dev_err(dev, "stop: %s: ip cleard!\n", av->vdev.name); return_buffers(aq, VB2_BUF_STATE_ERROR); mutex_lock(&av->isys->reset_mutex); av->isys->in_stop_streaming = false; mutex_unlock(&av->isys->reset_mutex); return; } #endif mutex_lock(&stream->mutex); mutex_lock(&av->isys->stream_mutex); if (stream->nr_streaming == stream->nr_queues && stream->streaming) ipu7_isys_video_set_streaming(av, 0, NULL); mutex_unlock(&av->isys->stream_mutex); stream->nr_streaming--; list_del(&aq->node); stream->streaming = 0; mutex_unlock(&stream->mutex); ipu7_isys_stream_cleanup(av); return_buffers(aq, VB2_BUF_STATE_ERROR); ipu7_isys_fw_close(av->isys); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET av->start_streaming = 0; mutex_lock(&av->isys->reset_mutex); av->isys->in_stop_streaming = false; need_reset = av->isys->need_reset; mutex_unlock(&av->isys->reset_mutex); if (need_reset) { if (!stream->nr_streaming) { ipu_isys_reset(av, stream); } else { mutex_lock(&av->isys->reset_mutex); av->isys->need_reset = false; mutex_unlock(&av->isys->reset_mutex); } } dev_dbg(dev, "stop: %s: exit\n", av->vdev.name); #endif } static unsigned int get_sof_sequence_by_timestamp(struct ipu7_isys_stream *stream, u64 time) { struct ipu7_isys *isys = stream->isys; struct device *dev = &isys->adev->auxdev.dev; unsigned int i; /* * The timestamp is invalid as no TSC in some FPGA platform, * so get the sequence from pipeline directly in this case. */ if (time == 0) return atomic_read(&stream->sequence) - 1; for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++) if (time == stream->seq[i].timestamp) { dev_dbg(dev, "SOF: using seq nr %u for ts %llu\n", stream->seq[i].sequence, time); return stream->seq[i].sequence; } dev_dbg(dev, "SOF: looking for %llu\n", time); for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++) dev_dbg(dev, "SOF: sequence %u, timestamp value %llu\n", stream->seq[i].sequence, stream->seq[i].timestamp); dev_dbg(dev, "SOF sequence number not found\n"); return atomic_read(&stream->sequence) - 1; } static u64 get_sof_ns_delta(struct ipu7_isys_video *av, u64 time) { struct ipu7_bus_device *adev = av->isys->adev; struct ipu7_device *isp = adev->isp; u64 delta, tsc_now; ipu_buttress_tsc_read(isp, &tsc_now); if (!tsc_now) return 0; delta = tsc_now - time; return ipu_buttress_tsc_ticks_to_ns(delta, isp); } static void ipu7_isys_buf_calc_sequence_time(struct ipu7_isys_buffer *ib, u64 time) { struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct ipu7_isys_queue *aq = vb2_queue_to_isys_queue(vb->vb2_queue); struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_stream *stream = av->stream; u64 ns; u32 sequence; ns = ktime_get_ns() - get_sof_ns_delta(av, time); sequence = get_sof_sequence_by_timestamp(stream, time); vbuf->vb2_buf.timestamp = ns; vbuf->sequence = sequence; dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n", av->vdev.name, ktime_get_ns(), sequence); dev_dbg(dev, "index:%d, vbuf timestamp:%lld\n", vb->index, vbuf->vb2_buf.timestamp); } static void ipu7_isys_queue_buf_done(struct ipu7_isys_buffer *ib) { struct vb2_buffer *vb = ipu7_isys_buffer_to_vb2_buffer(ib); if (atomic_read(&ib->str2mmio_flag)) { vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); /* * Operation on buffer is ended with error and will be reported * to the userspace when it is de-queued */ atomic_set(&ib->str2mmio_flag, 0); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET } else if (atomic_read(&ib->skipframe_flag)) { vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); atomic_set(&ib->skipframe_flag, 0); #endif } else { vb2_buffer_done(vb, VB2_BUF_STATE_DONE); } } void ipu7_isys_queue_buf_ready(struct ipu7_isys_stream *stream, struct ipu7_insys_resp *info) { struct ipu7_isys_queue *aq = stream->output_pins[info->pin_id].aq; u64 time = ((u64)info->timestamp[1] << 32 | info->timestamp[0]); struct ipu7_isys *isys = stream->isys; struct device *dev = &isys->adev->auxdev.dev; struct ipu7_isys_buffer *ib; struct vb2_buffer *vb; unsigned long flags; bool first = true; struct vb2_v4l2_buffer *buf; dev_dbg(dev, "buffer: %s: received buffer %8.8x %d\n", ipu7_isys_queue_to_video(aq)->vdev.name, info->pin.addr, info->frame_id); spin_lock_irqsave(&aq->lock, flags); if (list_empty(&aq->active)) { spin_unlock_irqrestore(&aq->lock, flags); dev_err(dev, "active queue empty\n"); return; } list_for_each_entry_reverse(ib, &aq->active, head) { struct ipu7_isys_video_buffer *ivb; struct vb2_v4l2_buffer *vvb; dma_addr_t addr; vb = ipu7_isys_buffer_to_vb2_buffer(ib); vvb = to_vb2_v4l2_buffer(vb); ivb = vb2_buffer_to_ipu7_isys_video_buffer(vvb); addr = ivb->dma_addr; if (info->pin.addr != addr) { if (first) dev_err(dev, "Unexpected buffer address %pad\n", &addr); first = false; continue; } dev_dbg(dev, "buffer: found buffer %pad\n", &addr); buf = to_vb2_v4l2_buffer(vb); buf->field = V4L2_FIELD_NONE; list_del(&ib->head); spin_unlock_irqrestore(&aq->lock, flags); ipu7_isys_buf_calc_sequence_time(ib, time); ipu7_isys_queue_buf_done(ib); return; } dev_err(dev, "Failed to find a matching video buffer\n"); spin_unlock_irqrestore(&aq->lock, flags); } static const struct vb2_ops ipu7_isys_queue_ops = { .queue_setup = ipu7_isys_queue_setup, #if LINUX_VERSION_CODE <= KERNEL_VERSION(6, 12, 255) .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, #endif .buf_init = ipu7_isys_buf_init, .buf_prepare = ipu7_isys_buf_prepare, .buf_cleanup = ipu7_isys_buf_cleanup, .start_streaming = start_streaming, .stop_streaming = stop_streaming, .buf_queue = buf_queue, }; int ipu7_isys_queue_init(struct ipu7_isys_queue *aq) { struct ipu7_isys *isys = ipu7_isys_queue_to_video(aq)->isys; struct ipu7_isys_video *av = ipu7_isys_queue_to_video(aq); struct ipu7_bus_device *adev = isys->adev; int ret; if (!aq->vbq.io_modes) aq->vbq.io_modes = VB2_MMAP | VB2_DMABUF; aq->vbq.drv_priv = isys; aq->vbq.ops = &ipu7_isys_queue_ops; aq->vbq.lock = &av->mutex; aq->vbq.mem_ops = &vb2_dma_sg_memops; aq->vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; aq->vbq.min_queued_buffers = 1; aq->vbq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; ret = vb2_queue_init(&aq->vbq); if (ret) return ret; aq->dev = &adev->auxdev.dev; aq->vbq.dev = &adev->isp->pdev->dev; spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->active); INIT_LIST_HEAD(&aq->incoming); return 0; } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-queue.h000066400000000000000000000037151503071307200275230ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_ISYS_QUEUE_H #define IPU7_ISYS_QUEUE_H #include #include #include #include #include struct device; struct ipu7_isys_stream; struct ipu7_insys_resp; struct ipu7_insys_buffset; struct ipu7_isys_queue { struct vb2_queue vbq; struct list_head node; struct device *dev; spinlock_t lock; struct list_head active; struct list_head incoming; unsigned int fw_output; }; struct ipu7_isys_buffer { struct list_head head; atomic_t str2mmio_flag; #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET atomic_t skipframe_flag; #endif }; struct ipu7_isys_video_buffer { struct vb2_v4l2_buffer vb_v4l2; struct ipu7_isys_buffer ib; dma_addr_t dma_addr; }; #define IPU_ISYS_BUFFER_LIST_FL_INCOMING BIT(0) #define IPU_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1) #define IPU_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2) struct ipu7_isys_buffer_list { struct list_head head; unsigned int nbufs; }; #define vb2_queue_to_isys_queue(__vb2) \ container_of(__vb2, struct ipu7_isys_queue, vbq) #define ipu7_isys_to_isys_video_buffer(__ib) \ container_of(__ib, struct ipu7_isys_video_buffer, ib) #define vb2_buffer_to_ipu7_isys_video_buffer(__vvb) \ container_of(__vvb, struct ipu7_isys_video_buffer, vb_v4l2) #define ipu7_isys_buffer_to_vb2_buffer(__ib) \ (&ipu7_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf) void ipu7_isys_buffer_list_queue(struct ipu7_isys_buffer_list *bl, unsigned long op_flags, enum vb2_buffer_state state); void ipu7_isys_buffer_to_fw_frame_buff(struct ipu7_insys_buffset *set, struct ipu7_isys_stream *stream, struct ipu7_isys_buffer_list *bl); void ipu7_isys_queue_buf_ready(struct ipu7_isys_stream *stream, struct ipu7_insys_resp *info); int ipu7_isys_queue_init(struct ipu7_isys_queue *aq); #endif /* IPU7_ISYS_QUEUE_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-subdev.c000066400000000000000000000222521503071307200276570ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include "ipu7-bus.h" #include "ipu7-isys.h" #include "ipu7-isys-subdev.h" unsigned int ipu7_isys_mbus_code_to_bpp(u32 code) { switch (code) { case MEDIA_BUS_FMT_RGB888_1X24: return 24; case MEDIA_BUS_FMT_YUYV10_1X20: return 20; case MEDIA_BUS_FMT_Y10_1X10: case MEDIA_BUS_FMT_RGB565_1X16: case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_YUYV8_1X16: return 16; case MEDIA_BUS_FMT_SBGGR12_1X12: case MEDIA_BUS_FMT_SGBRG12_1X12: case MEDIA_BUS_FMT_SGRBG12_1X12: case MEDIA_BUS_FMT_SRGGB12_1X12: return 12; case MEDIA_BUS_FMT_SBGGR10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SRGGB10_1X10: return 10; case MEDIA_BUS_FMT_SBGGR8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SGRBG8_1X8: case MEDIA_BUS_FMT_SRGGB8_1X8: return 8; default: WARN_ON(1); return -EINVAL; } } unsigned int ipu7_isys_mbus_code_to_mipi(u32 code) { switch (code) { case MEDIA_BUS_FMT_RGB565_1X16: return MIPI_CSI2_DT_RGB565; case MEDIA_BUS_FMT_RGB888_1X24: return MIPI_CSI2_DT_RGB888; case MEDIA_BUS_FMT_YUYV10_1X20: return MIPI_CSI2_DT_YUV422_10B; case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_YUYV8_1X16: return MIPI_CSI2_DT_YUV422_8B; case MEDIA_BUS_FMT_SBGGR12_1X12: case MEDIA_BUS_FMT_SGBRG12_1X12: case MEDIA_BUS_FMT_SGRBG12_1X12: case MEDIA_BUS_FMT_SRGGB12_1X12: return MIPI_CSI2_DT_RAW12; case MEDIA_BUS_FMT_Y10_1X10: case MEDIA_BUS_FMT_SBGGR10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SRGGB10_1X10: return MIPI_CSI2_DT_RAW10; case MEDIA_BUS_FMT_SBGGR8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SGRBG8_1X8: case MEDIA_BUS_FMT_SRGGB8_1X8: return MIPI_CSI2_DT_RAW8; default: WARN_ON(1); return -EINVAL; } } bool ipu7_isys_is_bayer_format(u32 code) { switch (ipu7_isys_mbus_code_to_mipi(code)) { case MIPI_CSI2_DT_RAW8: case MIPI_CSI2_DT_RAW10: case MIPI_CSI2_DT_RAW12: case MIPI_CSI2_DT_RAW14: case MIPI_CSI2_DT_RAW16: case MIPI_CSI2_DT_RAW20: case MIPI_CSI2_DT_RAW24: case MIPI_CSI2_DT_RAW28: return true; default: return false; } } u32 ipu7_isys_convert_bayer_order(u32 code, int x, int y) { static const u32 code_map[] = { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SBGGR12_1X12, }; unsigned int i; for (i = 0; i < ARRAY_SIZE(code_map); i++) if (code_map[i] == code) break; if (WARN_ON(i == ARRAY_SIZE(code_map))) return code; return code_map[i ^ ((((u32)y & 1U) << 1U) | ((u32)x & 1U))]; } int ipu7_isys_subdev_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); u32 code = asd->supported_codes[0]; struct v4l2_mbus_framefmt *fmt; u32 other_pad, other_stream; struct v4l2_rect *crop; unsigned int i; int ret; /* No transcoding, source and sink formats must match. */ if ((sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SOURCE) && sd->entity.num_pads > 1) return v4l2_subdev_get_fmt(sd, state, format); format->format.width = clamp(format->format.width, IPU_ISYS_MIN_WIDTH, IPU_ISYS_MAX_WIDTH); format->format.height = clamp(format->format.height, IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT); for (i = 0; asd->supported_codes[i]; i++) { if (asd->supported_codes[i] == format->format.code) { code = asd->supported_codes[i]; break; } } format->format.code = code; format->format.field = V4L2_FIELD_NONE; /* Store the format and propagate it to the source pad. */ fmt = v4l2_subdev_state_get_format(state, format->pad, format->stream); if (!fmt) return -EINVAL; *fmt = format->format; if (!(sd->entity.pads[format->pad].flags & MEDIA_PAD_FL_SINK)) return 0; /* propagate format to following source pad */ fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad, format->stream); if (!fmt) return -EINVAL; *fmt = format->format; ret = v4l2_subdev_routing_find_opposite_end(&state->routing, format->pad, format->stream, &other_pad, &other_stream); if (ret) return -EINVAL; crop = v4l2_subdev_state_get_crop(state, other_pad, other_stream); /* reset crop */ crop->left = 0; crop->top = 0; crop->width = fmt->width; crop->height = fmt->height; return 0; } int ipu7_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code) { struct ipu7_isys_subdev *asd = to_ipu7_isys_subdev(sd); const u32 *supported_codes = asd->supported_codes; u32 index; for (index = 0; supported_codes[index]; index++) { if (index == code->index) { code->code = supported_codes[index]; return 0; } } return -EINVAL; } static int subdev_set_routing(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_krouting *routing) { static const struct v4l2_mbus_framefmt fmt = { .width = 4096, .height = 3072, .code = MEDIA_BUS_FMT_SGRBG10_1X10, .field = V4L2_FIELD_NONE, }; int ret; ret = v4l2_subdev_routing_validate(sd, routing, V4L2_SUBDEV_ROUTING_ONLY_1_TO_1); if (ret) return ret; return v4l2_subdev_set_routing_with_fmt(sd, state, routing, &fmt); } int ipu7_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream, struct v4l2_mbus_framefmt *format) { struct v4l2_subdev_state *state; struct v4l2_mbus_framefmt *fmt; if (!sd || !format) return -EINVAL; state = v4l2_subdev_lock_and_get_active_state(sd); fmt = v4l2_subdev_state_get_format(state, pad, stream); if (fmt) *format = *fmt; v4l2_subdev_unlock_state(state); return fmt ? 0 : -EINVAL; } u32 ipu7_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad) { struct v4l2_subdev_state *state; struct v4l2_subdev_route *routes; u32 source_stream = 0; unsigned int i; state = v4l2_subdev_lock_and_get_active_state(sd); if (!state) return 0; routes = state->routing.routes; for (i = 0; i < state->routing.num_routes; i++) { if (routes[i].source_pad == pad) { source_stream = routes[i].source_stream; break; } } v4l2_subdev_unlock_state(state); return source_stream; } static int ipu7_isys_subdev_init_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state) { struct v4l2_subdev_route route = { .sink_pad = 0, .sink_stream = 0, .source_pad = 1, .source_stream = 0, .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE, }; struct v4l2_subdev_krouting routing = { .num_routes = 1, .routes = &route, }; return subdev_set_routing(sd, state, &routing); } int ipu7_isys_subdev_set_routing(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, enum v4l2_subdev_format_whence which, struct v4l2_subdev_krouting *routing) { return subdev_set_routing(sd, state, routing); } static const struct v4l2_subdev_internal_ops ipu7_isys_subdev_internal_ops = { .init_state = ipu7_isys_subdev_init_state, }; int ipu7_isys_subdev_init(struct ipu7_isys_subdev *asd, const struct v4l2_subdev_ops *ops, unsigned int nr_ctrls, unsigned int num_sink_pads, unsigned int num_source_pads) { unsigned int num_pads = num_sink_pads + num_source_pads; unsigned int i; int ret; v4l2_subdev_init(&asd->sd, ops); asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_STREAMS; asd->sd.owner = THIS_MODULE; asd->sd.dev = &asd->isys->adev->auxdev.dev; asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; asd->sd.internal_ops = &ipu7_isys_subdev_internal_ops; asd->pad = devm_kcalloc(&asd->isys->adev->auxdev.dev, num_pads, sizeof(*asd->pad), GFP_KERNEL); if (!asd->pad) return -ENOMEM; for (i = 0; i < num_sink_pads; i++) asd->pad[i].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; for (i = num_sink_pads; i < num_pads; i++) asd->pad[i].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad); if (ret) { pr_err("isys subdev init failed %d.\n", ret); return ret; } if (asd->ctrl_init) { ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls); if (ret) goto out_media_entity_cleanup; asd->ctrl_init(&asd->sd); if (asd->ctrl_handler.error) { ret = asd->ctrl_handler.error; goto out_v4l2_ctrl_handler_free; } asd->sd.ctrl_handler = &asd->ctrl_handler; } asd->source = -1; return 0; out_v4l2_ctrl_handler_free: v4l2_ctrl_handler_free(&asd->ctrl_handler); out_media_entity_cleanup: media_entity_cleanup(&asd->sd.entity); return ret; } void ipu7_isys_subdev_cleanup(struct ipu7_isys_subdev *asd) { media_entity_cleanup(&asd->sd.entity); v4l2_ctrl_handler_free(&asd->ctrl_handler); } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-subdev.h000066400000000000000000000035211503071307200276620ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_ISYS_SUBDEV_H #define IPU7_ISYS_SUBDEV_H #include #include #include #include struct ipu7_isys; struct ipu7_isys_subdev { struct v4l2_subdev sd; struct ipu7_isys *isys; u32 const *supported_codes; struct media_pad *pad; struct v4l2_ctrl_handler ctrl_handler; void (*ctrl_init)(struct v4l2_subdev *sd); int source; /* SSI stream source; -1 if unset */ #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC bool is_tpg; #endif }; #define to_ipu7_isys_subdev(__sd) \ container_of(__sd, struct ipu7_isys_subdev, sd) unsigned int ipu7_isys_mbus_code_to_bpp(u32 code); unsigned int ipu7_isys_mbus_code_to_mipi(u32 code); bool ipu7_isys_is_bayer_format(u32 code); u32 ipu7_isys_convert_bayer_order(u32 code, int x, int y); int ipu7_isys_subdev_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format); int ipu7_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code); u32 ipu7_isys_get_src_stream_by_src_pad(struct v4l2_subdev *sd, u32 pad); int ipu7_isys_get_stream_pad_fmt(struct v4l2_subdev *sd, u32 pad, u32 stream, struct v4l2_mbus_framefmt *format); int ipu7_isys_subdev_set_routing(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, enum v4l2_subdev_format_whence which, struct v4l2_subdev_krouting *routing); int ipu7_isys_subdev_init(struct ipu7_isys_subdev *asd, const struct v4l2_subdev_ops *ops, unsigned int nr_ctrls, unsigned int num_sink_pads, unsigned int num_source_pads); void ipu7_isys_subdev_cleanup(struct ipu7_isys_subdev *asd); #endif /* IPU7_ISYS_SUBDEV_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-tpg.c000066400000000000000000000432421503071307200271630ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress-regs.h" #include "ipu7-isys.h" #include "ipu7-isys-subdev.h" #include "ipu7-isys-tpg.h" #include "ipu7-isys-video.h" #include "ipu7-isys-csi2-regs.h" #include "ipu7-platform-regs.h" static const u32 tpg_supported_codes[] = { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12, 0, }; #define IPU_ISYS_FREQ 533000000UL static const struct v4l2_subdev_video_ops tpg_sd_video_ops = { .s_stream = tpg_set_stream, }; static int ipu7_isys_tpg_s_ctrl(struct v4l2_ctrl *ctrl) { struct ipu7_isys_tpg *tpg = container_of(container_of(ctrl->handler, struct ipu7_isys_subdev, ctrl_handler), struct ipu7_isys_tpg, asd); switch (ctrl->id) { case V4L2_CID_HBLANK: writel(ctrl->val, tpg->base + MGC_MG_HBLANK); break; case V4L2_CID_VBLANK: writel(ctrl->val, tpg->base + MGC_MG_VBLANK); break; case V4L2_CID_TEST_PATTERN: writel(ctrl->val, tpg->base + MGC_MG_TPG_MODE); break; } return 0; } static const struct v4l2_ctrl_ops ipu7_isys_tpg_ctrl_ops = { .s_ctrl = ipu7_isys_tpg_s_ctrl, }; static u64 ipu7_isys_tpg_rate(struct ipu7_isys_tpg *tpg, unsigned int bpp) { return MGC_PPC * IPU_ISYS_FREQ / bpp; } static const char *const tpg_mode_items[] = { "Ramp", "Checkerboard", "Monochrome per frame", "Color palette", }; static struct v4l2_ctrl_config tpg_mode = { .ops = &ipu7_isys_tpg_ctrl_ops, .id = V4L2_CID_TEST_PATTERN, .name = "Test Pattern", .type = V4L2_CTRL_TYPE_MENU, .min = TPG_MODE_RAMP, .max = ARRAY_SIZE(tpg_mode_items) - 1, .def = TPG_MODE_COLOR_PALETTE, .menu_skip_mask = 0x2, .qmenu = tpg_mode_items, }; static void ipu7_isys_tpg_init_controls(struct v4l2_subdev *sd) { struct ipu7_isys_tpg *tpg = to_ipu7_isys_tpg(sd); int hblank; u64 default_pixel_rate; hblank = 1024; tpg->hblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, &ipu7_isys_tpg_ctrl_ops, V4L2_CID_HBLANK, 8, 65535, 1, hblank); tpg->vblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, &ipu7_isys_tpg_ctrl_ops, V4L2_CID_VBLANK, 8, 65535, 1, 1024); default_pixel_rate = ipu7_isys_tpg_rate(tpg, 8); tpg->pixel_rate = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler, &ipu7_isys_tpg_ctrl_ops, V4L2_CID_PIXEL_RATE, default_pixel_rate, default_pixel_rate, 1, default_pixel_rate); if (tpg->pixel_rate) { tpg->pixel_rate->cur.val = default_pixel_rate; tpg->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY; } v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &tpg_mode, NULL); } static int tpg_sd_init_cfg(struct v4l2_subdev *sd, struct v4l2_subdev_state *state) { struct v4l2_subdev_route routes[] = { { .source_pad = 0, .source_stream = 0, .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE, } }; struct v4l2_subdev_krouting routing = { .num_routes = 1, .routes = routes, }; static const struct v4l2_mbus_framefmt format = { .width = 1920, .height = 1080, .code = MEDIA_BUS_FMT_SBGGR10_1X10, .field = V4L2_FIELD_NONE, }; return v4l2_subdev_set_routing_with_fmt(sd, state, &routing, &format); } static const struct v4l2_subdev_internal_ops ipu7_isys_tpg_internal_ops = { .init_state = tpg_sd_init_cfg, }; static const struct v4l2_subdev_pad_ops tpg_sd_pad_ops = { .get_fmt = v4l2_subdev_get_fmt, .set_fmt = ipu7_isys_subdev_set_fmt, .enum_mbus_code = ipu7_isys_subdev_enum_mbus_code, }; static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_FRAME_SYNC: return v4l2_event_subscribe(fh, sub, 10, NULL); case V4L2_EVENT_CTRL: return v4l2_ctrl_subscribe_event(fh, sub); default: return -EINVAL; } }; /* V4L2 subdev core operations */ static const struct v4l2_subdev_core_ops tpg_sd_core_ops = { .subscribe_event = subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static const struct v4l2_subdev_ops tpg_sd_ops = { .core = &tpg_sd_core_ops, .video = &tpg_sd_video_ops, .pad = &tpg_sd_pad_ops, }; static struct media_entity_operations tpg_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; void ipu7_isys_tpg_sof_event_by_stream(struct ipu7_isys_stream *stream) { struct ipu7_isys_tpg *tpg = ipu7_isys_subdev_to_tpg(stream->asd); struct video_device *vdev = tpg->asd.sd.devnode; struct v4l2_event ev = { .type = V4L2_EVENT_FRAME_SYNC, }; ev.u.frame_sync.frame_sequence = atomic_fetch_inc(&stream->sequence); v4l2_event_queue(vdev, &ev); dev_dbg(&stream->isys->adev->auxdev.dev, "sof_event::tpg-%i sequence: %i\n", tpg->index, ev.u.frame_sync.frame_sequence); } void ipu7_isys_tpg_eof_event_by_stream(struct ipu7_isys_stream *stream) { struct ipu7_isys_tpg *tpg = ipu7_isys_subdev_to_tpg(stream->asd); u32 frame_sequence = atomic_read(&stream->sequence); dev_dbg(&stream->isys->adev->auxdev.dev, "eof_event::tpg-%i sequence: %i\n", tpg->index, frame_sequence); } #define DEFAULT_VC_ID 0 static bool is_metadata_enabled(const struct ipu7_isys_tpg *tpg) { return false; } static void ipu7_mipigen_regdump(const struct ipu7_isys_tpg *tpg, void __iomem *mg_base) { struct device *dev = &tpg->isys->adev->auxdev.dev; dev_dbg(dev, "---------MGC REG DUMP START----------"); dev_dbg(dev, "MGC RX_TYPE_REG 0x%x = 0x%x", MGC_MG_CSI_ADAPT_LAYER_TYPE, readl(mg_base + MGC_MG_CSI_ADAPT_LAYER_TYPE)); dev_dbg(dev, "MGC MG_MODE_REG 0x%x = 0x%x", MGC_MG_MODE, readl(mg_base + MGC_MG_MODE)); dev_dbg(dev, "MGC MIPI_VC_REG 0x%x = 0x%x", MGC_MG_MIPI_VC, readl(mg_base + MGC_MG_MIPI_VC)); dev_dbg(dev, "MGC MIPI_DTYPES_REG 0x%x = 0x%x", MGC_MG_MIPI_DTYPES, readl(mg_base + MGC_MG_MIPI_DTYPES)); dev_dbg(dev, "MGC MULTI_DTYPES_REG 0x%x = 0x%x", MGC_MG_MULTI_DTYPES_MODE, readl(mg_base + MGC_MG_MULTI_DTYPES_MODE)); dev_dbg(dev, "MGC NOF_FRAMES_REG 0x%x = 0x%x", MGC_MG_NOF_FRAMES, readl(mg_base + MGC_MG_NOF_FRAMES)); dev_dbg(dev, "MGC FRAME_DIM_REG 0x%x = 0x%x", MGC_MG_FRAME_DIM, readl(mg_base + MGC_MG_FRAME_DIM)); dev_dbg(dev, "MGC HBLANK_REG 0x%x = 0x%x", MGC_MG_HBLANK, readl(mg_base + MGC_MG_HBLANK)); dev_dbg(dev, "MGC VBLANK_REG 0x%x = 0x%x", MGC_MG_VBLANK, readl(mg_base + MGC_MG_VBLANK)); dev_dbg(dev, "MGC TPG_MODE_REG 0x%x = 0x%x", MGC_MG_TPG_MODE, readl(mg_base + MGC_MG_TPG_MODE)); dev_dbg(dev, "MGC R0=0x%x G0=0x%x B0=0x%x", readl(mg_base + MGC_MG_TPG_R0), readl(mg_base + MGC_MG_TPG_G0), readl(mg_base + MGC_MG_TPG_B0)); dev_dbg(dev, "MGC R1=0x%x G1=0x%x B1=0x%x", readl(mg_base + MGC_MG_TPG_R1), readl(mg_base + MGC_MG_TPG_G1), readl(mg_base + MGC_MG_TPG_B1)); dev_dbg(dev, "MGC TPG_MASKS_REG 0x%x = 0x%x", MGC_MG_TPG_MASKS, readl(mg_base + MGC_MG_TPG_MASKS)); dev_dbg(dev, "MGC TPG_XY_MASK_REG 0x%x = 0x%x", MGC_MG_TPG_XY_MASK, readl(mg_base + MGC_MG_TPG_XY_MASK)); dev_dbg(dev, "MGC TPG_TILE_DIM_REG 0x%x = 0x%x", MGC_MG_TPG_TILE_DIM, readl(mg_base + MGC_MG_TPG_TILE_DIM)); dev_dbg(dev, "MGC DTO_SPEED_CTRL_EN_REG 0x%x = 0x%x", MGC_MG_DTO_SPEED_CTRL_EN, readl(mg_base + MGC_MG_DTO_SPEED_CTRL_EN)); dev_dbg(dev, "MGC DTO_SPEED_CTRL_INCR_VAL_REG 0x%x = 0x%x", MGC_MG_DTO_SPEED_CTRL_INCR_VAL, readl(mg_base + MGC_MG_DTO_SPEED_CTRL_INCR_VAL)); dev_dbg(dev, "MGC MG_FRAME_NUM_STTS 0x%x = 0x%x", MGC_MG_FRAME_NUM_STTS, readl(mg_base + MGC_MG_FRAME_NUM_STTS)); dev_dbg(dev, "---------MGC REG DUMP END----------"); } #define TPG_STOP_TIMEOUT 500000 static int tpg_stop_stream(const struct ipu7_isys_tpg *tpg) { struct device *dev = &tpg->isys->adev->auxdev.dev; int ret; unsigned int port; u32 status; void __iomem *reg; void __iomem *mgc_base = tpg->isys->pdata->base + IS_IO_MGC_BASE; void __iomem *mg_base = tpg->base; port = 1 << tpg->index; dev_dbg(dev, "MG%d generated %u frames", tpg->index, readl(mgc_base + MGC_MG_FRAME_NUM_STTS)); writel(port, mgc_base + MGC_ASYNC_STOP); dev_dbg(dev, "wait for MG%d stop", tpg->index); reg = mg_base + MGC_MG_STOPPED_STTS; ret = readl_poll_timeout(reg, status, status & 0x1, 200, TPG_STOP_TIMEOUT); if (ret < 0) { dev_err(dev, "mgc stop timeout"); return ret; } dev_dbg(dev, "MG%d STOPPED", tpg->index); return 0; } #define IS_IO_CLK (IPU7_IS_FREQ_CTL_DEFAULT_RATIO * 100 / 6) #define TPG_FRAME_RATE 30 #define TPG_BLANK_RATIO (4 / 3) static void tpg_get_timing(const struct ipu7_isys_tpg *tpg, u32 *dto, u32 *hblank_cycles, u32 *vblank_cycles) { struct v4l2_mbus_framefmt format; u32 width, height; u32 code; u32 bpp; u32 bits_per_line; u64 line_time_ns, frame_time_us, cycles, ns_per_cycle, rate; u64 vblank_us, hblank_us; u32 ref_clk; struct device *dev = &tpg->isys->adev->auxdev.dev; u32 dto_incr_val = 0x100; int ret; ret = ipu7_isys_get_stream_pad_fmt((struct v4l2_subdev *)&tpg->asd.sd, 0, 0, &format); if (ret) return; width = format.width; height = format.height; code = format.code; bpp = ipu7_isys_mbus_code_to_bpp(code); dev_dbg(dev, "MG%d code = 0x%x bpp = %u\n", tpg->index, code, bpp); bits_per_line = width * bpp * TPG_BLANK_RATIO; cycles = div_u64(bits_per_line, 64); dev_dbg(dev, "MG%d bits_per_line = %u cycles = %llu\n", tpg->index, bits_per_line, cycles); do { dev_dbg(dev, "MG%d try dto_incr_val 0x%x\n", tpg->index, dto_incr_val); rate = div_u64(1 << 16, dto_incr_val); ns_per_cycle = div_u64(rate * 1000, IS_IO_CLK); dev_dbg(dev, "MG%d ns_per_cycles = %llu\n", tpg->index, ns_per_cycle); line_time_ns = cycles * ns_per_cycle; frame_time_us = line_time_ns * height / 1000; dev_dbg(dev, "MG%d line_time_ns = %llu frame_time_us = %llu\n", tpg->index, line_time_ns, frame_time_us); if (frame_time_us * TPG_FRAME_RATE < USEC_PER_SEC) break; /* dto incr val step 0x100 */ dto_incr_val += 0x100; } while (dto_incr_val < (1 << 16)); if (dto_incr_val >= (1 << 16)) { dev_warn(dev, "No DTO_INCR_VAL found\n"); hblank_us = 10; /* 10us */ vblank_us = 10000; /* 10ms */ dto_incr_val = 0x1000; } else { hblank_us = line_time_ns * (TPG_BLANK_RATIO - 1) / 1000; vblank_us = div_u64(1000000, TPG_FRAME_RATE) - frame_time_us; } dev_dbg(dev, "hblank_us = %llu, vblank_us = %llu dto_incr_val = %u\n", hblank_us, vblank_us, dto_incr_val); ref_clk = tpg->isys->adev->isp->buttress.ref_clk; *dto = dto_incr_val; *hblank_cycles = hblank_us * ref_clk / 10; *vblank_cycles = vblank_us * ref_clk / 10; dev_dbg(dev, "hblank_cycles = %u, vblank_cycles = %u\n", *hblank_cycles, *vblank_cycles); } static int tpg_start_stream(const struct ipu7_isys_tpg *tpg) { struct v4l2_mbus_framefmt format; u32 port_map; u32 csi_port; u32 code, bpp; u32 width, height; u32 dto, hblank, vblank; struct device *dev = &tpg->isys->adev->auxdev.dev; void __iomem *mgc_base = tpg->isys->pdata->base + IS_IO_MGC_BASE; void __iomem *mg_base = tpg->base; int ret; ret = ipu7_isys_get_stream_pad_fmt((struct v4l2_subdev *)&tpg->asd.sd, 0, 0, &format); if (ret) return ret; width = format.width; height = format.height; code = format.code; dev_dbg(dev, "MG%d code: 0x%x resolution: %ux%u\n", tpg->index, code, width, height); bpp = ipu7_isys_mbus_code_to_bpp(code); csi_port = tpg->index; if (csi_port >= 4) dev_err(dev, "invalid tpg index %u\n", tpg->index); dev_dbg(dev, "INSYS MG%d was mapped to CSI%d\n", DEFAULT_VC_ID, csi_port); /* config port map * TODO: add VC support and TPG with multiple * source pads. Currently, for simplicity, only map 1 mg to 1 csi port */ port_map = 1 << tpg->index; writel(port_map, mgc_base + MGC_CSI_PORT_MAP(csi_port)); /* configure adapt layer type */ writel(1, mg_base + MGC_MG_CSI_ADAPT_LAYER_TYPE); /* configure MGC mode * 0 - Disable MGC * 1 - Enable PRBS * 2 - Enable TPG * 3 - Reserved [Write phase: SW/FW debug] */ writel(2, mg_base + MGC_MG_MODE); /* config mg init counter */ writel(0, mg_base + MGC_MG_INIT_COUNTER); /* * configure virtual channel * TODO: VC support if need * currently each MGC just uses 1 virtual channel */ writel(DEFAULT_VC_ID, mg_base + MGC_MG_MIPI_VC); /* * configure data type and multi dtypes mode * TODO: it needs to add the metedata flow. */ if (is_metadata_enabled(tpg)) { writel(MGC_DTYPE_RAW(bpp) << 4 | MGC_DTYPE_RAW(bpp), mg_base + MGC_MG_MIPI_DTYPES); writel(2, mg_base + MGC_MG_MULTI_DTYPES_MODE); } else { writel(MGC_DTYPE_RAW(bpp) << 4 | MGC_DTYPE_RAW(bpp), mg_base + MGC_MG_MIPI_DTYPES); writel(0, mg_base + MGC_MG_MULTI_DTYPES_MODE); } /* * configure frame information */ writel(0, mg_base + MGC_MG_NOF_FRAMES); writel(width | height << 16, mg_base + MGC_MG_FRAME_DIM); tpg_get_timing(tpg, &dto, &hblank, &vblank); writel(hblank, mg_base + MGC_MG_HBLANK); writel(vblank, mg_base + MGC_MG_VBLANK); /* * configure tpg mode, colors, mask, tile dimension * Mode was set by user configuration * 0 - Ramp mode * 1 - Checkerboard * 2 - Monochrome per frame * 3 - Color palette */ writel(TPG_MODE_COLOR_PALETTE, mg_base + MGC_MG_TPG_MODE); /* red and green for checkerboard, n/a for other modes */ writel(58, mg_base + MGC_MG_TPG_R0); writel(122, mg_base + MGC_MG_TPG_G0); writel(46, mg_base + MGC_MG_TPG_B0); writel(123, mg_base + MGC_MG_TPG_R1); writel(85, mg_base + MGC_MG_TPG_G1); writel(67, mg_base + MGC_MG_TPG_B1); writel(0x0, mg_base + MGC_MG_TPG_FACTORS); /* hor_mask [15:0] ver_mask [31:16] */ writel(0xffffffff, mg_base + MGC_MG_TPG_MASKS); /* xy_mask [11:0] */ writel(0xfff, mg_base + MGC_MG_TPG_XY_MASK); writel(((MGC_TPG_TILE_WIDTH << 16) | MGC_TPG_TILE_HEIGHT), mg_base + MGC_MG_TPG_TILE_DIM); writel(dto, mg_base + MGC_MG_DTO_SPEED_CTRL_INCR_VAL); writel(1, mg_base + MGC_MG_DTO_SPEED_CTRL_EN); /* disable err_injection */ writel(0, mg_base + MGC_MG_ERR_INJECT); writel(0, mg_base + MGC_MG_ERR_LOCATION); ipu7_mipigen_regdump(tpg, mg_base); dev_dbg(dev, "starting MG%d streaming...\n", csi_port); /* kick and start */ writel(port_map, mgc_base + MGC_KICK); return 0; } static void ipu7_isys_ungate_mgc(struct ipu7_isys_tpg *tpg, int enable) { struct ipu7_isys_csi2 *csi2; u32 offset; struct ipu7_isys *isys = tpg->isys; csi2 = &isys->csi2[tpg->index]; offset = IS_IO_GPREGS_BASE; /* MGC is in use by SW or not */ if (enable) writel(1, csi2->base + offset + MGC_CLK_GATE); else writel(0, csi2->base + offset + MGC_CLK_GATE); } static void ipu7_isys_mgc_csi2_s_stream(struct ipu7_isys_tpg *tpg, int enable) { struct device *dev = &tpg->isys->adev->auxdev.dev; struct ipu7_isys *isys = tpg->isys; struct ipu7_isys_csi2 *csi2; u32 port, offset, val; void __iomem *isys_base = isys->pdata->base; port = tpg->index; csi2 = &isys->csi2[port]; offset = IS_IO_GPREGS_BASE; val = readl(isys_base + offset + CSI_PORT_CLK_GATE); dev_dbg(dev, "current CSI port %u clk gate 0x%x\n", port, val); if (!enable) { writel(~(1 << port) & val, isys_base + offset + CSI_PORT_CLK_GATE); return; } /* set csi port is using by SW */ writel(1 << port | val, isys_base + offset + CSI_PORT_CLK_GATE); /* input is coming from MGC */ offset = IS_IO_CSI2_ADPL_PORT_BASE(port); writel(CSI_MIPIGEN_INPUT, csi2->base + offset + CSI2_ADPL_INPUT_MODE); } /* TODO: add the processing of vc */ int tpg_set_stream(struct v4l2_subdev *sd, int enable) { struct ipu7_isys_tpg *tpg = to_ipu7_isys_tpg(sd); struct ipu7_isys_stream *stream = tpg->av->stream; struct device *dev = &tpg->isys->adev->auxdev.dev; int ret; if (tpg->index >= IPU7_ISYS_CSI_PORT_NUM) { dev_err(dev, "invalid MGC index %d\n", tpg->index); return -EINVAL; } if (!enable) { /* Stop MGC */ stream->asd->is_tpg = false; stream->asd = NULL; ipu7_isys_mgc_csi2_s_stream(tpg, enable); ret = tpg_stop_stream(tpg); ipu7_isys_ungate_mgc(tpg, enable); return ret; } stream->asd = &tpg->asd; /* ungate the MGC clock to program */ ipu7_isys_ungate_mgc(tpg, enable); /* Start MGC */ ret = tpg_start_stream(tpg); v4l2_ctrl_handler_setup(&tpg->asd.ctrl_handler); ipu7_isys_mgc_csi2_s_stream(tpg, enable); return ret; } void ipu7_isys_tpg_cleanup(struct ipu7_isys_tpg *tpg) { v4l2_device_unregister_subdev(&tpg->asd.sd); ipu7_isys_subdev_cleanup(&tpg->asd); } int ipu7_isys_tpg_init(struct ipu7_isys_tpg *tpg, struct ipu7_isys *isys, void __iomem *base, void __iomem *sel, unsigned int index) { struct device *dev = &isys->adev->auxdev.dev; int ret; tpg->isys = isys; tpg->base = base; tpg->sel = sel; tpg->index = index; tpg->asd.sd.entity.ops = &tpg_entity_ops; tpg->asd.ctrl_init = ipu7_isys_tpg_init_controls; tpg->asd.isys = isys; ret = ipu7_isys_subdev_init(&tpg->asd, &tpg_sd_ops, 5, NR_OF_TPG_SINK_PADS, NR_OF_TPG_SOURCE_PADS); if (ret) return ret; tpg->asd.sd.flags &= ~V4L2_SUBDEV_FL_STREAMS; tpg->asd.sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; tpg->asd.pad[TPG_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; tpg->asd.source = IPU_INSYS_MIPI_PORT_0 + index; tpg->asd.supported_codes = tpg_supported_codes; tpg->asd.sd.internal_ops = &ipu7_isys_tpg_internal_ops; snprintf(tpg->asd.sd.name, sizeof(tpg->asd.sd.name), IPU_ISYS_ENTITY_PREFIX " TPG %u", index); v4l2_set_subdevdata(&tpg->asd.sd, &tpg->asd); ret = v4l2_subdev_init_finalize(&tpg->asd.sd); if (ret) { dev_err(dev, "failed to finalize subdev (%d)\n", ret); goto fail; } ret = v4l2_device_register_subdev(&isys->v4l2_dev, &tpg->asd.sd); if (ret) { dev_info(dev, "can't register v4l2 subdev\n"); goto fail; } return 0; fail: ipu7_isys_tpg_cleanup(tpg); return ret; } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-tpg.h000066400000000000000000000032341503071307200271650ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_ISYS_TPG_H #define IPU7_ISYS_TPG_H #include #include #include #include "ipu7-isys-subdev.h" #include "ipu7-isys-video.h" #include "ipu7-isys-queue.h" struct ipu7_isys_tpg_pdata; struct ipu7_isys; #define TPG_PAD_SOURCE 0 #define NR_OF_TPG_PADS 1 #define NR_OF_TPG_SOURCE_PADS 1 #define NR_OF_TPG_SINK_PADS 0 #define NR_OF_TPG_STREAMS 1 enum isys_tpg_mode { TPG_MODE_RAMP = 0, TPG_MODE_CHECKERBOARD = 1, TPG_MODE_MONO = 2, TPG_MODE_COLOR_PALETTE = 3, }; /* * struct ipu7_isys_tpg * * @nlanes: number of lanes in the receiver */ struct ipu7_isys_tpg { struct ipu7_isys_subdev asd; struct ipu7_isys_tpg_pdata *pdata; struct ipu7_isys *isys; struct ipu7_isys_video *av; /* MG base not MGC */ void __iomem *base; void __iomem *sel; unsigned int index; struct v4l2_ctrl *hblank; struct v4l2_ctrl *vblank; struct v4l2_ctrl *pixel_rate; }; #define ipu7_isys_subdev_to_tpg(__sd) \ container_of(__sd, struct ipu7_isys_tpg, asd) #define to_ipu7_isys_tpg(sd) \ container_of(to_ipu7_isys_subdev(sd), \ struct ipu7_isys_tpg, asd) void ipu7_isys_tpg_sof_event_by_stream(struct ipu7_isys_stream *stream); void ipu7_isys_tpg_eof_event_by_stream(struct ipu7_isys_stream *stream); int ipu7_isys_tpg_init(struct ipu7_isys_tpg *tpg, struct ipu7_isys *isys, void __iomem *base, void __iomem *sel, unsigned int index); void ipu7_isys_tpg_cleanup(struct ipu7_isys_tpg *tpg); int tpg_set_stream(struct v4l2_subdev *sd, int enable); #endif /* IPU7_ISYS_TPG_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-video.c000066400000000000000000001041731503071307200275000ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET #include #endif #include #include #include #include #include #include #include #include "abi/ipu7_fw_isys_abi.h" #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress-regs.h" #include "ipu7-fw-isys.h" #include "ipu7-isys.h" #include "ipu7-isys-video.h" #include "ipu7-platform-regs.h" const struct ipu7_isys_pixelformat ipu7_isys_pfmts[] = { {V4L2_PIX_FMT_SBGGR12, 16, 12, MEDIA_BUS_FMT_SBGGR12_1X12, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGBRG12, 16, 12, MEDIA_BUS_FMT_SGBRG12_1X12, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGRBG12, 16, 12, MEDIA_BUS_FMT_SGRBG12_1X12, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SRGGB12, 16, 12, MEDIA_BUS_FMT_SRGGB12_1X12, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SBGGR10, 16, 10, MEDIA_BUS_FMT_SBGGR10_1X10, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGBRG10, 16, 10, MEDIA_BUS_FMT_SGBRG10_1X10, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SGRBG10, 16, 10, MEDIA_BUS_FMT_SGRBG10_1X10, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SRGGB10, 16, 10, MEDIA_BUS_FMT_SRGGB10_1X10, IPU_INSYS_FRAME_FORMAT_RAW16}, {V4L2_PIX_FMT_SBGGR8, 8, 8, MEDIA_BUS_FMT_SBGGR8_1X8, IPU_INSYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SGBRG8, 8, 8, MEDIA_BUS_FMT_SGBRG8_1X8, IPU_INSYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SGRBG8, 8, 8, MEDIA_BUS_FMT_SGRBG8_1X8, IPU_INSYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SRGGB8, 8, 8, MEDIA_BUS_FMT_SRGGB8_1X8, IPU_INSYS_FRAME_FORMAT_RAW8}, {V4L2_PIX_FMT_SBGGR12P, 12, 12, MEDIA_BUS_FMT_SBGGR12_1X12, IPU_INSYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SGBRG12P, 12, 12, MEDIA_BUS_FMT_SGBRG12_1X12, IPU_INSYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SGRBG12P, 12, 12, MEDIA_BUS_FMT_SGRBG12_1X12, IPU_INSYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SRGGB12P, 12, 12, MEDIA_BUS_FMT_SRGGB12_1X12, IPU_INSYS_FRAME_FORMAT_RAW12}, {V4L2_PIX_FMT_SBGGR10P, 10, 10, MEDIA_BUS_FMT_SBGGR10_1X10, IPU_INSYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_SGBRG10P, 10, 10, MEDIA_BUS_FMT_SGBRG10_1X10, IPU_INSYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_SGRBG10P, 10, 10, MEDIA_BUS_FMT_SGRBG10_1X10, IPU_INSYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10, IPU_INSYS_FRAME_FORMAT_RAW10}, {V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16, IPU_INSYS_FRAME_FORMAT_UYVY}, {V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16, IPU_INSYS_FRAME_FORMAT_YUYV}, {V4L2_PIX_FMT_RGB565, 16, 16, MEDIA_BUS_FMT_RGB565_1X16, IPU_INSYS_FRAME_FORMAT_RGB565}, {V4L2_PIX_FMT_BGR24, 24, 24, MEDIA_BUS_FMT_RGB888_1X24, IPU_INSYS_FRAME_FORMAT_RGBA888}, }; static int video_open(struct file *file) { #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET struct ipu7_isys_video *av = video_drvdata(file); struct ipu7_isys *isys = av->isys; struct ipu7_bus_device *adev = isys->adev; mutex_lock(&isys->reset_mutex); if (isys->need_reset) { mutex_unlock(&isys->reset_mutex); dev_warn(&adev->auxdev.dev, "isys power cycle required\n"); return -EIO; } mutex_unlock(&isys->reset_mutex); #endif return v4l2_fh_open(file); } #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET static int video_release(struct file *file) { struct ipu7_isys_video *av = video_drvdata(file); dev_dbg(&av->isys->adev->auxdev.dev, "release: %s: enter\n", av->vdev.name); mutex_lock(&av->isys->reset_mutex); while (av->isys->in_reset) { mutex_unlock(&av->isys->reset_mutex); dev_dbg(&av->isys->adev->auxdev.dev, "release: %s: wait for reset\n", av->vdev.name); usleep_range(10000, 11000); mutex_lock(&av->isys->reset_mutex); } mutex_unlock(&av->isys->reset_mutex); return vb2_fop_release(file); } #endif const struct ipu7_isys_pixelformat *ipu7_isys_get_isys_format(u32 pixelformat) { unsigned int i; for (i = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) { const struct ipu7_isys_pixelformat *pfmt = &ipu7_isys_pfmts[i]; if (pfmt->pixelformat == pixelformat) return pfmt; } return &ipu7_isys_pfmts[0]; } static int ipu7_isys_vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct ipu7_isys_video *av = video_drvdata(file); strscpy(cap->driver, IPU_ISYS_NAME, sizeof(cap->driver)); strscpy(cap->card, av->isys->media_dev.model, sizeof(cap->card)); return 0; } static int ipu7_isys_vidioc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) { unsigned int i, num_found; for (i = 0, num_found = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) { if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) continue; if (f->mbus_code && f->mbus_code != ipu7_isys_pfmts[i].code) continue; if (num_found < f->index) { num_found++; continue; } f->flags = 0; f->pixelformat = ipu7_isys_pfmts[i].pixelformat; return 0; } return -EINVAL; } static int ipu7_isys_vidioc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { unsigned int i; if (fsize->index > 0) return -EINVAL; for (i = 0; i < ARRAY_SIZE(ipu7_isys_pfmts); i++) { if (fsize->pixel_format != ipu7_isys_pfmts[i].pixelformat) continue; fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = IPU_ISYS_MIN_WIDTH; fsize->stepwise.max_width = IPU_ISYS_MAX_WIDTH; fsize->stepwise.min_height = IPU_ISYS_MIN_HEIGHT; fsize->stepwise.max_height = IPU_ISYS_MAX_HEIGHT; fsize->stepwise.step_width = 2; fsize->stepwise.step_height = 2; return 0; } return -EINVAL; } static int ipu7_isys_vidioc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct ipu7_isys_video *av = video_drvdata(file); f->fmt.pix = av->pix_fmt; return 0; } static void ipu7_isys_try_fmt_cap(struct ipu7_isys_video *av, u32 type, u32 *format, u32 *width, u32 *height, u32 *bytesperline, u32 *sizeimage) { const struct ipu7_isys_pixelformat *pfmt = ipu7_isys_get_isys_format(*format); *format = pfmt->pixelformat; *width = clamp(*width, IPU_ISYS_MIN_WIDTH, IPU_ISYS_MAX_WIDTH); *height = clamp(*height, IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT); if (pfmt->bpp != pfmt->bpp_packed) *bytesperline = *width * DIV_ROUND_UP(pfmt->bpp, BITS_PER_BYTE); else *bytesperline = DIV_ROUND_UP(*width * pfmt->bpp, BITS_PER_BYTE); *bytesperline = ALIGN(*bytesperline, 64U); /* * (height + 1) * bytesperline due to a hardware issue: the DMA unit * is a power of two, and a line should be transferred as few units * as possible. The result is that up to line length more data than * the image size may be transferred to memory after the image. * Another limitation is the GDA allocation unit size. For low * resolution it gives a bigger number. Use larger one to avoid * memory corruption. */ *sizeimage = *bytesperline * *height + max(*bytesperline, av->isys->pdata->ipdata->isys_dma_overshoot); } static void __ipu_isys_vidioc_try_fmt_vid_cap(struct ipu7_isys_video *av, struct v4l2_format *f) { ipu7_isys_try_fmt_cap(av, f->type, &f->fmt.pix.pixelformat, &f->fmt.pix.width, &f->fmt.pix.height, &f->fmt.pix.bytesperline, &f->fmt.pix.sizeimage); f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW; f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT; f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT; } static int ipu7_isys_vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct ipu7_isys_video *av = video_drvdata(file); if (vb2_is_busy(&av->aq.vbq)) return -EBUSY; __ipu_isys_vidioc_try_fmt_vid_cap(av, f); return 0; } static int ipu7_isys_vidioc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct ipu7_isys_video *av = video_drvdata(file); ipu7_isys_vidioc_try_fmt_vid_cap(file, fh, f); av->pix_fmt = f->fmt.pix; return 0; } static int ipu7_isys_vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct ipu7_isys_video *av = video_drvdata(file); int ret; av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->type); av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->type); ret = vb2_queue_change_type(&av->aq.vbq, p->type); if (ret) return ret; return vb2_ioctl_reqbufs(file, priv, p); } static int ipu7_isys_vidioc_create_bufs(struct file *file, void *priv, struct v4l2_create_buffers *p) { struct ipu7_isys_video *av = video_drvdata(file); int ret; av->aq.vbq.is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(p->format.type); av->aq.vbq.is_output = V4L2_TYPE_IS_OUTPUT(p->format.type); ret = vb2_queue_change_type(&av->aq.vbq, p->format.type); if (ret) return ret; return vb2_ioctl_create_bufs(file, priv, p); } static int link_validate(struct media_link *link) { struct ipu7_isys_video *av = container_of(link->sink, struct ipu7_isys_video, pad); struct device *dev = &av->isys->adev->auxdev.dev; struct v4l2_subdev_state *s_state; struct v4l2_mbus_framefmt *s_fmt; struct v4l2_subdev *s_sd; struct media_pad *s_pad; u32 s_stream, code; int ret = -EPIPE; if (!link->source->entity) return ret; s_sd = media_entity_to_v4l2_subdev(link->source->entity); s_state = v4l2_subdev_get_unlocked_active_state(s_sd); if (!s_state) return ret; dev_dbg(dev, "validating link \"%s\":%u -> \"%s\"\n", link->source->entity->name, link->source->index, link->sink->entity->name); s_pad = media_pad_remote_pad_first(&av->pad); s_stream = ipu7_isys_get_src_stream_by_src_pad(s_sd, s_pad->index); v4l2_subdev_lock_state(s_state); s_fmt = v4l2_subdev_state_get_format(s_state, s_pad->index, s_stream); if (!s_fmt) { dev_err(dev, "failed to get source pad format\n"); goto unlock; } code = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat)->code; if (s_fmt->width != av->pix_fmt.width || s_fmt->height != av->pix_fmt.height || s_fmt->code != code) { dev_dbg(dev, "format mismatch %dx%d,%x != %dx%d,%x\n", s_fmt->width, s_fmt->height, s_fmt->code, av->pix_fmt.width, av->pix_fmt.height, code); goto unlock; } v4l2_subdev_unlock_state(s_state); return 0; unlock: v4l2_subdev_unlock_state(s_state); return ret; } static void get_stream_opened(struct ipu7_isys_video *av) { unsigned long flags; spin_lock_irqsave(&av->isys->streams_lock, flags); av->isys->stream_opened++; spin_unlock_irqrestore(&av->isys->streams_lock, flags); } static void put_stream_opened(struct ipu7_isys_video *av) { unsigned long flags; spin_lock_irqsave(&av->isys->streams_lock, flags); av->isys->stream_opened--; spin_unlock_irqrestore(&av->isys->streams_lock, flags); } static int ipu7_isys_fw_pin_cfg(struct ipu7_isys_video *av, struct ipu7_insys_stream_cfg *cfg) { struct media_pad *src_pad = media_pad_remote_pad_first(&av->pad); struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(src_pad->entity); struct ipu7_isys_stream *stream = av->stream; const struct ipu7_isys_pixelformat *pfmt = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat); struct ipu7_insys_output_pin *output_pin; struct ipu7_insys_input_pin *input_pin; int input_pins = cfg->nof_input_pins++; struct ipu7_isys_queue *aq = &av->aq; struct ipu7_isys *isys = av->isys; struct device *dev = &isys->adev->auxdev.dev; struct v4l2_mbus_framefmt fmt; int output_pins; u32 src_stream; int ret; src_stream = ipu7_isys_get_src_stream_by_src_pad(sd, src_pad->index); ret = ipu7_isys_get_stream_pad_fmt(sd, src_pad->index, src_stream, &fmt); if (ret < 0) { dev_err(dev, "can't get stream format (%d)\n", ret); return ret; } input_pin = &cfg->input_pins[input_pins]; input_pin->input_res.width = fmt.width; input_pin->input_res.height = fmt.height; input_pin->dt = av->dt; input_pin->disable_mipi_unpacking = 0; pfmt = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat); if (pfmt->bpp == pfmt->bpp_packed && pfmt->bpp % BITS_PER_BYTE) input_pin->disable_mipi_unpacking = 1; input_pin->mapped_dt = N_IPU_INSYS_MIPI_DATA_TYPE; input_pin->dt_rename_mode = IPU_INSYS_MIPI_DT_NO_RENAME; /* if enable polling isys interrupt, the follow values maybe set */ input_pin->sync_msg_map = IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF | IPU_INSYS_STREAM_SYNC_MSG_SEND_RESP_SOF_DISCARDED | IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF | IPU_INSYS_STREAM_SYNC_MSG_SEND_IRQ_SOF_DISCARDED; output_pins = cfg->nof_output_pins++; aq->fw_output = output_pins; stream->output_pins[output_pins].pin_ready = ipu7_isys_queue_buf_ready; stream->output_pins[output_pins].aq = aq; output_pin = &cfg->output_pins[output_pins]; /* output pin msg link */ output_pin->link.buffer_lines = 0; output_pin->link.foreign_key = IPU_MSG_LINK_FOREIGN_KEY_NONE; output_pin->link.granularity_pointer_update = 0; output_pin->link.msg_link_streaming_mode = IA_GOFO_MSG_LINK_STREAMING_MODE_SOFF; output_pin->link.pbk_id = IPU_MSG_LINK_PBK_ID_DONT_CARE; output_pin->link.pbk_slot_id = IPU_MSG_LINK_PBK_SLOT_ID_DONT_CARE; output_pin->link.dest = IPU_INSYS_OUTPUT_LINK_DEST_MEM; output_pin->link.use_sw_managed = 1; /* TODO: set the snoop bit for metadata capture */ output_pin->link.is_snoop = 0; /* output pin crop */ output_pin->crop.line_top = 0; output_pin->crop.line_bottom = 0; #ifdef IPU8_INSYS_NEW_ABI output_pin->crop.column_left = 0; output_pin->crop.column_right = 0; #endif /* output de-compression */ output_pin->dpcm.enable = 0; #ifdef IPU8_INSYS_NEW_ABI /* upipe_cfg */ output_pin->upipe_pin_cfg.opaque_pin_cfg = 0; output_pin->upipe_pin_cfg.plane_offset_1 = 0; output_pin->upipe_pin_cfg.plane_offset_2 = 0; output_pin->upipe_pin_cfg.single_uob_fifo = 0; output_pin->upipe_pin_cfg.shared_uob_fifo = 0; output_pin->upipe_enable = 0; output_pin->binning_factor = 0; /* stupid setting, even unused, SW still need to set a valid value */ output_pin->cfa_dim = IPU_INSYS_CFA_DIM_2x2; #endif /* frame format type */ pfmt = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat); output_pin->ft = (u16)pfmt->css_pixelformat; /* stride in bytes */ output_pin->stride = av->pix_fmt.bytesperline; output_pin->send_irq = 1; output_pin->early_ack_en = 0; /* input pin id */ output_pin->input_pin_id = input_pins; return 0; } /* Create stream and start it using the CSS FW ABI. */ static int start_stream_firmware(struct ipu7_isys_video *av, struct ipu7_isys_buffer_list *bl) { struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_stream *stream = av->stream; struct ipu7_insys_stream_cfg *stream_cfg; struct ipu7_insys_buffset *buf = NULL; struct isys_fw_msgs *msg = NULL; struct ipu7_isys_queue *aq; int ret, retout, tout; u16 send_type; if (WARN_ON(!bl)) return -EIO; msg = ipu7_get_fw_msg_buf(stream); if (!msg) return -ENOMEM; stream_cfg = &msg->fw_msg.stream; stream_cfg->port_id = stream->stream_source; stream_cfg->vc = stream->vc; stream_cfg->stream_msg_map = IPU_INSYS_STREAM_ENABLE_MSG_SEND_RESP | IPU_INSYS_STREAM_ENABLE_MSG_SEND_IRQ; list_for_each_entry(aq, &stream->queues, node) { struct ipu7_isys_video *__av = ipu7_isys_queue_to_video(aq); ret = ipu7_isys_fw_pin_cfg(__av, stream_cfg); if (ret < 0) { ipu7_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); return ret; } } ipu7_fw_isys_dump_stream_cfg(dev, stream_cfg); stream->nr_output_pins = stream_cfg->nof_output_pins; reinit_completion(&stream->stream_open_completion); ret = ipu7_fw_isys_complex_cmd(av->isys, stream->stream_handle, stream_cfg, msg->dma_addr, sizeof(*stream_cfg), IPU_INSYS_SEND_TYPE_STREAM_OPEN); if (ret < 0) { dev_err(dev, "can't open stream (%d)\n", ret); ipu7_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); return ret; } get_stream_opened(av); tout = wait_for_completion_timeout(&stream->stream_open_completion, FW_CALL_TIMEOUT_JIFFIES); ipu7_put_fw_msg_buf(av->isys, (uintptr_t)stream_cfg); if (!tout) { dev_err(dev, "stream open time out\n"); ret = -ETIMEDOUT; goto out_put_stream_opened; } if (stream->error) { dev_err(dev, "stream open error: %d\n", stream->error); ret = -EIO; goto out_put_stream_opened; } dev_dbg(dev, "start stream: open complete\n"); msg = ipu7_get_fw_msg_buf(stream); if (!msg) { ret = -ENOMEM; goto out_put_stream_opened; } buf = &msg->fw_msg.frame; ipu7_isys_buffer_to_fw_frame_buff(buf, stream, bl); ipu7_isys_buffer_list_queue(bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0); reinit_completion(&stream->stream_start_completion); send_type = IPU_INSYS_SEND_TYPE_STREAM_START_AND_CAPTURE; ipu7_fw_isys_dump_frame_buff_set(dev, buf, stream_cfg->nof_output_pins); ret = ipu7_fw_isys_complex_cmd(av->isys, stream->stream_handle, buf, msg->dma_addr, sizeof(*buf), send_type); if (ret < 0) { dev_err(dev, "can't start streaming (%d)\n", ret); goto out_stream_close; } tout = wait_for_completion_timeout(&stream->stream_start_completion, FW_CALL_TIMEOUT_JIFFIES); if (!tout) { dev_err(dev, "stream start time out\n"); ret = -ETIMEDOUT; goto out_stream_close; } if (stream->error) { dev_err(dev, "stream start error: %d\n", stream->error); ret = -EIO; goto out_stream_close; } dev_dbg(dev, "start stream: complete\n"); return 0; out_stream_close: reinit_completion(&stream->stream_close_completion); retout = ipu7_fw_isys_simple_cmd(av->isys, stream->stream_handle, IPU_INSYS_SEND_TYPE_STREAM_CLOSE); if (retout < 0) { dev_dbg(dev, "can't close stream (%d)\n", retout); goto out_put_stream_opened; } tout = wait_for_completion_timeout(&stream->stream_close_completion, FW_CALL_TIMEOUT_JIFFIES); if (!tout) dev_err(dev, "stream close time out with error %d\n", stream->error); else dev_dbg(dev, "stream close complete\n"); out_put_stream_opened: put_stream_opened(av); return ret; } static void stop_streaming_firmware(struct ipu7_isys_video *av) { struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_stream *stream = av->stream; int ret, tout; reinit_completion(&stream->stream_stop_completion); ret = ipu7_fw_isys_simple_cmd(av->isys, stream->stream_handle, IPU_INSYS_SEND_TYPE_STREAM_FLUSH); if (ret < 0) { dev_err(dev, "can't stop stream (%d)\n", ret); return; } tout = wait_for_completion_timeout(&stream->stream_stop_completion, #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET FW_CALL_TIMEOUT_JIFFIES_RESET); #else FW_CALL_TIMEOUT_JIFFIES); #endif if (!tout) dev_warn(dev, "stream stop time out\n"); else if (stream->error) dev_warn(dev, "stream stop error: %d\n", stream->error); else dev_dbg(dev, "stop stream: complete\n"); } static void close_streaming_firmware(struct ipu7_isys_video *av) { struct device *dev = &av->isys->adev->auxdev.dev; struct ipu7_isys_stream *stream = av->stream; int ret, tout; reinit_completion(&stream->stream_close_completion); ret = ipu7_fw_isys_simple_cmd(av->isys, stream->stream_handle, IPU_INSYS_SEND_TYPE_STREAM_CLOSE); if (ret < 0) { dev_err(dev, "can't close stream (%d)\n", ret); return; } tout = wait_for_completion_timeout(&stream->stream_close_completion, #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET FW_CALL_TIMEOUT_JIFFIES_RESET); #else FW_CALL_TIMEOUT_JIFFIES); #endif if (!tout) dev_warn(dev, "stream close time out\n"); else if (stream->error) dev_warn(dev, "stream close error: %d\n", stream->error); else dev_dbg(dev, "close stream: complete\n"); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET stream->last_sequence = atomic_read(&stream->sequence); dev_dbg(dev, "ip->last_sequence = %d\n", stream->last_sequence); #endif put_stream_opened(av); } int ipu7_isys_video_prepare_stream(struct ipu7_isys_video *av, struct media_entity *source_entity, int nr_queues) { struct ipu7_isys_stream *stream = av->stream; struct ipu7_isys_csi2 *csi2; if (WARN_ON(stream->nr_streaming)) return -EINVAL; stream->nr_queues = nr_queues; #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET if (av->isys->in_reset) { atomic_set(&stream->sequence, stream->last_sequence); dev_dbg(&av->isys->adev->auxdev.dev, "atomic_set : stream->last_sequence = %d\n", stream->last_sequence); } else { atomic_set(&stream->sequence, 0); } #else atomic_set(&stream->sequence, 0); #endif atomic_set(&stream->buf_id, 0); stream->seq_index = 0; memset(stream->seq, 0, sizeof(stream->seq)); if (WARN_ON(!list_empty(&stream->queues))) return -EINVAL; stream->stream_source = stream->asd->source; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC if (!stream->asd->is_tpg) { csi2 = ipu7_isys_subdev_to_csi2(stream->asd); csi2->receiver_errors = 0; } #else csi2 = ipu7_isys_subdev_to_csi2(stream->asd); csi2->receiver_errors = 0; #endif stream->source_entity = source_entity; dev_dbg(&av->isys->adev->auxdev.dev, "prepare stream: external entity %s\n", stream->source_entity->name); return 0; } void ipu7_isys_put_stream(struct ipu7_isys_stream *stream) { unsigned long flags; struct device *dev; unsigned int i; if (!stream) { pr_err("ipu7-isys: no available stream\n"); return; } dev = &stream->isys->adev->auxdev.dev; spin_lock_irqsave(&stream->isys->streams_lock, flags); for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { if (&stream->isys->streams[i] == stream) { if (stream->isys->streams_ref_count[i] > 0) stream->isys->streams_ref_count[i]--; else dev_warn(dev, "invalid stream %d\n", i); break; } } spin_unlock_irqrestore(&stream->isys->streams_lock, flags); } static struct ipu7_isys_stream * ipu7_isys_get_stream(struct ipu7_isys_video *av, struct ipu7_isys_subdev *asd) { struct ipu7_isys_stream *stream = NULL; struct ipu7_isys *isys = av->isys; unsigned long flags; unsigned int i; u8 vc = av->vc; if (!isys) return NULL; spin_lock_irqsave(&isys->streams_lock, flags); for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { if (isys->streams_ref_count[i] && isys->streams[i].vc == vc && isys->streams[i].asd == asd) { isys->streams_ref_count[i]++; stream = &isys->streams[i]; break; } } if (!stream) { for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { if (!isys->streams_ref_count[i]) { isys->streams_ref_count[i]++; stream = &isys->streams[i]; stream->vc = vc; stream->asd = asd; break; } } } spin_unlock_irqrestore(&isys->streams_lock, flags); return stream; } struct ipu7_isys_stream * ipu7_isys_query_stream_by_handle(struct ipu7_isys *isys, u8 stream_handle) { unsigned long flags; struct ipu7_isys_stream *stream = NULL; if (!isys) return NULL; if (stream_handle >= IPU_ISYS_MAX_STREAMS) { dev_err(&isys->adev->auxdev.dev, "stream_handle %d is invalid\n", stream_handle); return NULL; } spin_lock_irqsave(&isys->streams_lock, flags); if (isys->streams_ref_count[stream_handle] > 0) { isys->streams_ref_count[stream_handle]++; stream = &isys->streams[stream_handle]; } spin_unlock_irqrestore(&isys->streams_lock, flags); return stream; } struct ipu7_isys_stream * ipu7_isys_query_stream_by_source(struct ipu7_isys *isys, int source, u8 vc) { struct ipu7_isys_stream *stream = NULL; unsigned long flags; unsigned int i; if (!isys) return NULL; if (source < 0) { dev_err(&isys->adev->auxdev.dev, "query stream with invalid port number\n"); return NULL; } spin_lock_irqsave(&isys->streams_lock, flags); for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { if (!isys->streams_ref_count[i]) continue; if (isys->streams[i].stream_source == source && isys->streams[i].vc == vc) { stream = &isys->streams[i]; isys->streams_ref_count[i]++; break; } } spin_unlock_irqrestore(&isys->streams_lock, flags); return stream; } static u32 get_remote_pad_stream(struct media_pad *r_pad) { struct v4l2_subdev_state *state; struct v4l2_subdev *sd; u32 stream_id = 0; unsigned int i; sd = media_entity_to_v4l2_subdev(r_pad->entity); state = v4l2_subdev_lock_and_get_active_state(sd); if (!state) return 0; for (i = 0; i < state->stream_configs.num_configs; i++) { struct v4l2_subdev_stream_config *cfg = &state->stream_configs.configs[i]; if (cfg->pad == r_pad->index) { stream_id = cfg->stream; break; } } v4l2_subdev_unlock_state(state); return stream_id; } int ipu7_isys_video_set_streaming(struct ipu7_isys_video *av, int state, struct ipu7_isys_buffer_list *bl) { struct ipu7_isys_stream *stream = av->stream; struct device *dev = &av->isys->adev->auxdev.dev; struct media_pad *r_pad; struct v4l2_subdev *sd; u32 r_stream; int ret = 0; dev_dbg(dev, "set stream: %d\n", state); if (WARN(!stream->source_entity, "No source entity for stream\n")) return -ENODEV; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC if (stream->asd->is_tpg) { sd = &stream->asd->sd; r_pad = media_pad_remote_pad_first(&av->pad); r_stream = ipu7_isys_get_src_stream_by_src_pad(sd, r_pad->index); if (!state) { stop_streaming_firmware(av); dev_dbg(dev, "disable streams 0x%lx of %s\n", BIT(r_stream), sd->name); ret = v4l2_subdev_disable_streams(sd, r_pad->index, BIT(r_stream)); if (ret) dev_err(dev, "disable streams of %s failed\n", sd->name); close_streaming_firmware(av); } else { ret = start_stream_firmware(av, bl); if (ret) { dev_err(dev, "start FW stream failed\n"); return ret; } dev_dbg(dev, "set stream: source %d, handle %d\n", stream->stream_source, stream->stream_handle); dev_dbg(dev, "enable streams 0x%lx of %s\n", BIT(r_stream), sd->name); /* start sub-device which connects with video */ ret = v4l2_subdev_enable_streams(sd, r_pad->index, BIT(r_stream)); if (ret) { dev_err(dev, "enable streams of %s failed\n", sd->name); goto out_media_entity_stop_streaming_firmware; } } av->streaming = state; return 0; } #endif sd = &stream->asd->sd; r_pad = media_pad_remote_pad_first(&av->pad); r_stream = get_remote_pad_stream(r_pad); if (!state) { stop_streaming_firmware(av); /* stop sub-device which connects with video */ dev_dbg(dev, "disable streams %s pad:%d mask:0x%llx\n", sd->name, r_pad->index, BIT_ULL(r_stream)); ret = v4l2_subdev_disable_streams(sd, r_pad->index, BIT_ULL(r_stream)); if (ret) { dev_err(dev, "disable streams %s failed with %d\n", sd->name, ret); return ret; } close_streaming_firmware(av); } else { ret = start_stream_firmware(av, bl); if (ret) { dev_err(dev, "start stream of firmware failed\n"); return ret; } /* start sub-device which connects with video */ dev_dbg(dev, "enable streams %s pad: %d mask:0x%llx\n", sd->name, r_pad->index, BIT_ULL(r_stream)); ret = v4l2_subdev_enable_streams(sd, r_pad->index, BIT_ULL(r_stream)); if (ret) { dev_err(dev, "enable streams %s failed with %d\n", sd->name, ret); goto out_media_entity_stop_streaming_firmware; } } av->streaming = state; return 0; out_media_entity_stop_streaming_firmware: stop_streaming_firmware(av); return ret; } static const struct v4l2_ioctl_ops ipu7_v4l2_ioctl_ops = { .vidioc_querycap = ipu7_isys_vidioc_querycap, .vidioc_enum_fmt_vid_cap = ipu7_isys_vidioc_enum_fmt, .vidioc_enum_framesizes = ipu7_isys_vidioc_enum_framesizes, .vidioc_g_fmt_vid_cap = ipu7_isys_vidioc_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = ipu7_isys_vidioc_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = ipu7_isys_vidioc_try_fmt_vid_cap, .vidioc_reqbufs = ipu7_isys_vidioc_reqbufs, .vidioc_create_bufs = ipu7_isys_vidioc_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_expbuf = vb2_ioctl_expbuf, }; static const struct media_entity_operations entity_ops = { .link_validate = link_validate, }; static const struct v4l2_file_operations isys_fops = { .owner = THIS_MODULE, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .open = video_open, #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET .release = video_release, #else .release = vb2_fop_release, #endif }; int ipu7_isys_fw_open(struct ipu7_isys *isys) { struct ipu7_bus_device *adev = isys->adev; int ret; ret = pm_runtime_resume_and_get(&adev->auxdev.dev); if (ret < 0) return ret; mutex_lock(&isys->mutex); if (isys->ref_count++) goto unlock; /* * Buffers could have been left to wrong queue at last closure. * Move them now back to empty buffer queue. */ ipu7_cleanup_fw_msg_bufs(isys); ret = ipu7_fw_isys_open(isys); if (ret < 0) goto out; unlock: mutex_unlock(&isys->mutex); return 0; out: isys->ref_count--; mutex_unlock(&isys->mutex); pm_runtime_put(&adev->auxdev.dev); return ret; } void ipu7_isys_fw_close(struct ipu7_isys *isys) { #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET bool need_reset; #endif mutex_lock(&isys->mutex); isys->ref_count--; if (!isys->ref_count) ipu7_fw_isys_close(isys); mutex_unlock(&isys->mutex); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET mutex_lock(&isys->reset_mutex); need_reset = isys->need_reset; mutex_unlock(&isys->reset_mutex); if (need_reset) pm_runtime_put_sync(&isys->adev->auxdev.dev); else pm_runtime_put(&isys->adev->auxdev.dev); #endif } int ipu7_isys_setup_video(struct ipu7_isys_video *av, struct media_entity **source_entity, int *nr_queues) { const struct ipu7_isys_pixelformat *pfmt = ipu7_isys_get_isys_format(av->pix_fmt.pixelformat); struct device *dev = &av->isys->adev->auxdev.dev; struct media_pad *source_pad, *remote_pad; struct v4l2_mbus_frame_desc_entry entry; struct v4l2_subdev_route *route = NULL; struct v4l2_subdev_route *r; struct v4l2_subdev_state *state; struct ipu7_isys_subdev *asd; struct v4l2_subdev *remote_sd; struct media_pipeline *pipeline; int ret = -EINVAL; *nr_queues = 0; remote_pad = media_pad_remote_pad_unique(&av->pad); if (IS_ERR(remote_pad)) { dev_dbg(dev, "failed to get remote pad\n"); return PTR_ERR(remote_pad); } remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity); asd = to_ipu7_isys_subdev(remote_sd); #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC if (strncmp(remote_pad->entity->name, "Intel IPU7 TPG", strlen("Intel IPU7 TPG")) == 0) { dev_dbg(dev, "Find TPG:%s stream\n", remote_sd->name); av->vc = 0; av->dt = ipu7_isys_mbus_code_to_mipi(pfmt->code); ret = video_device_pipeline_alloc_start(&av->vdev); if (ret < 0) { dev_dbg(dev, "media pipeline start failed\n"); return ret; } *source_entity = remote_pad->entity; av->stream = ipu7_isys_get_stream(av, asd); if (!av->stream) { video_device_pipeline_stop(&av->vdev); dev_err(dev, "no available stream for firmware\n"); return -EINVAL; } av->stream->asd->is_tpg = true; *nr_queues = 1; return 0; } #endif source_pad = media_pad_remote_pad_first(&remote_pad->entity->pads[0]); if (!source_pad) { dev_dbg(dev, "No external source entity\n"); return -ENODEV; } *source_entity = source_pad->entity; state = v4l2_subdev_lock_and_get_active_state(remote_sd); for_each_active_route(&state->routing, r) { if (r->source_pad == remote_pad->index) route = r; } if (!route) { v4l2_subdev_unlock_state(state); dev_dbg(dev, "Failed to find route\n"); return -ENODEV; } v4l2_subdev_unlock_state(state); ret = ipu7_isys_csi2_get_remote_desc(route->sink_stream, to_ipu7_isys_csi2(asd), *source_entity, &entry, nr_queues); if (ret == -ENOIOCTLCMD) { av->vc = 0; av->dt = ipu7_isys_mbus_code_to_mipi(pfmt->code); *nr_queues = 1; } else if (*nr_queues && !ret) { dev_dbg(dev, "Framedesc: stream %u, len %u, vc %u, dt %#x\n", entry.stream, entry.length, entry.bus.csi2.vc, entry.bus.csi2.dt); av->vc = entry.bus.csi2.vc; av->dt = entry.bus.csi2.dt; } else { dev_err(dev, "failed to get remote frame desc\n"); return ret; } pipeline = media_entity_pipeline(&av->vdev.entity); if (!pipeline) ret = video_device_pipeline_alloc_start(&av->vdev); else ret = video_device_pipeline_start(&av->vdev, pipeline); if (ret < 0) { dev_dbg(dev, "media pipeline start failed\n"); return ret; } av->stream = ipu7_isys_get_stream(av, asd); if (!av->stream) { video_device_pipeline_stop(&av->vdev); dev_err(dev, "no available stream for firmware\n"); return -EINVAL; } return 0; } /* * Do everything that's needed to initialise things related to video * buffer queue, video node, and the related media entity. The caller * is expected to assign isys field and set the name of the video * device. */ int ipu7_isys_video_init(struct ipu7_isys_video *av) { struct v4l2_format format = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { .width = 1920, .height = 1080, }, }; int ret; mutex_init(&av->mutex); av->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_IO_MC | V4L2_CAP_VIDEO_CAPTURE; av->vdev.vfl_dir = VFL_DIR_RX; ret = ipu7_isys_queue_init(&av->aq); if (ret) goto out_mutex_destroy; av->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; ret = media_entity_pads_init(&av->vdev.entity, 1, &av->pad); if (ret) goto out_vb2_queue_cleanup; av->vdev.entity.ops = &entity_ops; av->vdev.release = video_device_release_empty; av->vdev.fops = &isys_fops; av->vdev.v4l2_dev = &av->isys->v4l2_dev; av->vdev.dev_parent = &av->isys->adev->isp->pdev->dev; av->vdev.ioctl_ops = &ipu7_v4l2_ioctl_ops; av->vdev.queue = &av->aq.vbq; av->vdev.lock = &av->mutex; __ipu_isys_vidioc_try_fmt_vid_cap(av, &format); av->pix_fmt = format.fmt.pix; #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET av->reset = false; av->skipframe = 0; av->start_streaming = 0; #endif set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags); video_set_drvdata(&av->vdev, av); ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1); if (ret) goto out_media_entity_cleanup; return ret; out_media_entity_cleanup: vb2_video_unregister_device(&av->vdev); media_entity_cleanup(&av->vdev.entity); out_vb2_queue_cleanup: vb2_queue_release(&av->aq.vbq); out_mutex_destroy: mutex_destroy(&av->mutex); return ret; } void ipu7_isys_video_cleanup(struct ipu7_isys_video *av) { vb2_video_unregister_device(&av->vdev); media_entity_cleanup(&av->vdev.entity); mutex_destroy(&av->mutex); } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys-video.h000066400000000000000000000064231503071307200275040ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_ISYS_VIDEO_H #define IPU7_ISYS_VIDEO_H #include #include #include #include #include #include #include #include "ipu7-isys-queue.h" #define IPU_INSYS_OUTPUT_PINS 11U #define IPU_ISYS_MAX_PARALLEL_SOF 2U struct file; struct ipu7_isys; struct ipu7_isys_csi2; struct ipu7_insys_stream_cfg; struct ipu7_isys_subdev; struct ipu7_isys_pixelformat { u32 pixelformat; u32 bpp; u32 bpp_packed; u32 code; u32 css_pixelformat; }; struct sequence_info { unsigned int sequence; u64 timestamp; }; struct output_pin_data { void (*pin_ready)(struct ipu7_isys_stream *stream, struct ipu7_insys_resp *info); struct ipu7_isys_queue *aq; }; /* * Align with firmware stream. Each stream represents a CSI virtual channel. * May map to multiple video devices */ struct ipu7_isys_stream { struct mutex mutex; struct media_entity *source_entity; atomic_t sequence; #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET int last_sequence; #endif atomic_t buf_id; unsigned int seq_index; struct sequence_info seq[IPU_ISYS_MAX_PARALLEL_SOF]; int stream_source; int stream_handle; unsigned int nr_output_pins; struct ipu7_isys_subdev *asd; int nr_queues; /* Number of capture queues */ int nr_streaming; int streaming; struct list_head queues; struct completion stream_open_completion; struct completion stream_close_completion; struct completion stream_start_completion; struct completion stream_stop_completion; struct ipu7_isys *isys; struct output_pin_data output_pins[IPU_INSYS_OUTPUT_PINS]; int error; u8 vc; }; struct ipu7_isys_video { struct ipu7_isys_queue aq; /* Serialise access to other fields in the struct. */ struct mutex mutex; struct media_pad pad; struct video_device vdev; struct v4l2_pix_format pix_fmt; struct ipu7_isys *isys; struct ipu7_isys_csi2 *csi2; struct ipu7_isys_stream *stream; unsigned int streaming; u8 vc; u8 dt; #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET unsigned int reset; unsigned int skipframe; unsigned int start_streaming; #endif }; #define ipu7_isys_queue_to_video(__aq) \ container_of(__aq, struct ipu7_isys_video, aq) extern const struct ipu7_isys_pixelformat ipu7_isys_pfmts[]; const struct ipu7_isys_pixelformat *ipu7_isys_get_isys_format(u32 pixelformat); int ipu7_isys_video_prepare_stream(struct ipu7_isys_video *av, struct media_entity *source_entity, int nr_queues); int ipu7_isys_video_set_streaming(struct ipu7_isys_video *av, int state, struct ipu7_isys_buffer_list *bl); int ipu7_isys_fw_open(struct ipu7_isys *isys); void ipu7_isys_fw_close(struct ipu7_isys *isys); int ipu7_isys_setup_video(struct ipu7_isys_video *av, struct media_entity **source_entity, int *nr_queues); int ipu7_isys_video_init(struct ipu7_isys_video *av); void ipu7_isys_video_cleanup(struct ipu7_isys_video *av); void ipu7_isys_put_stream(struct ipu7_isys_stream *stream); struct ipu7_isys_stream * ipu7_isys_query_stream_by_handle(struct ipu7_isys *isys, u8 stream_handle); struct ipu7_isys_stream * ipu7_isys_query_stream_by_source(struct ipu7_isys *isys, int source, u8 vc); #endif /* IPU7_ISYS_VIDEO_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys.c000066400000000000000000001055641503071307200264010ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #ifdef CONFIG_DEBUG_FS #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "abi/ipu7_fw_isys_abi.h" #include "ipu7-bus.h" #include "ipu7-buttress-regs.h" #include "ipu7-cpd.h" #include "ipu7-dma.h" #include "ipu7-fw-isys.h" #include "ipu7-mmu.h" #include "ipu7-isys.h" #include "ipu7-isys-csi2.h" #include "ipu7-isys-csi-phy.h" #include "ipu7-isys-csi2-regs.h" #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC #include "ipu7-isys-tpg.h" #endif #include "ipu7-isys-video.h" #include "ipu7-platform-regs.h" #define ISYS_PM_QOS_VALUE 300 static int isys_complete_ext_device_registration(struct ipu7_isys *isys, struct v4l2_subdev *sd, struct ipu7_isys_csi2_config *csi2) { struct device *dev = &isys->adev->auxdev.dev; unsigned int i; int ret; v4l2_set_subdev_hostdata(sd, csi2); for (i = 0; i < sd->entity.num_pads; i++) { if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE) break; } if (i == sd->entity.num_pads) { dev_warn(dev, "no source pad in external entity\n"); ret = -ENOENT; goto skip_unregister_subdev; } ret = media_create_pad_link(&sd->entity, i, &isys->csi2[csi2->port].asd.sd.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret) { dev_warn(dev, "can't create link\n"); goto skip_unregister_subdev; } isys->csi2[csi2->port].nlanes = csi2->nlanes; if (csi2->bus_type == V4L2_MBUS_CSI2_DPHY) isys->csi2[csi2->port].phy_mode = PHY_MODE_DPHY; else isys->csi2[csi2->port].phy_mode = PHY_MODE_CPHY; return 0; skip_unregister_subdev: v4l2_device_unregister_subdev(sd); return ret; } static void isys_stream_init(struct ipu7_isys *isys) { unsigned int i; for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) { mutex_init(&isys->streams[i].mutex); init_completion(&isys->streams[i].stream_open_completion); init_completion(&isys->streams[i].stream_close_completion); init_completion(&isys->streams[i].stream_start_completion); init_completion(&isys->streams[i].stream_stop_completion); INIT_LIST_HEAD(&isys->streams[i].queues); isys->streams[i].isys = isys; isys->streams[i].stream_handle = i; isys->streams[i].vc = INVALID_VC_ID; } } static int isys_fw_log_init(struct ipu7_isys *isys) { struct device *dev = &isys->adev->auxdev.dev; struct isys_fw_log *fw_log; void *log_buf; if (isys->fw_log) return 0; fw_log = devm_kzalloc(dev, sizeof(*fw_log), GFP_KERNEL); if (!fw_log) return -ENOMEM; mutex_init(&fw_log->mutex); log_buf = devm_kzalloc(dev, FW_LOG_BUF_SIZE, GFP_KERNEL); if (!log_buf) return -ENOMEM; fw_log->head = log_buf; fw_log->addr = log_buf; fw_log->count = 0; fw_log->size = 0; isys->fw_log = fw_log; return 0; } /* The .bound() notifier callback when a match is found */ static int isys_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_connection *asc) { struct ipu7_isys *isys = container_of(notifier, struct ipu7_isys, notifier); struct sensor_async_sd *s_asd = container_of(asc, struct sensor_async_sd, asc); struct device *dev = &isys->adev->auxdev.dev; int ret; ret = ipu_bridge_instantiate_vcm(sd->dev); if (ret) { dev_err(dev, "instantiate vcm failed\n"); return ret; } dev_info(dev, "bind %s nlanes is %d port is %d\n", sd->name, s_asd->csi2.nlanes, s_asd->csi2.port); isys_complete_ext_device_registration(isys, sd, &s_asd->csi2); return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); } static int isys_notifier_complete(struct v4l2_async_notifier *notifier) { struct ipu7_isys *isys = container_of(notifier, struct ipu7_isys, notifier); dev_info(&isys->adev->auxdev.dev, "All sensor registration completed.\n"); return v4l2_device_register_subdev_nodes(&isys->v4l2_dev); } static const struct v4l2_async_notifier_operations isys_async_ops = { .bound = isys_notifier_bound, .complete = isys_notifier_complete, }; static int isys_notifier_init(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2 = &isys->pdata->ipdata->csi2; struct ipu7_device *isp = isys->adev->isp; struct device *dev = &isp->pdev->dev; unsigned int i; int ret; v4l2_async_nf_init(&isys->notifier, &isys->v4l2_dev); for (i = 0; i < csi2->nports; i++) { struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_UNKNOWN }; struct sensor_async_sd *s_asd; struct fwnode_handle *ep; ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0, FWNODE_GRAPH_ENDPOINT_NEXT); if (!ep) continue; ret = v4l2_fwnode_endpoint_parse(ep, &vep); if (ret) goto err_parse; if (vep.bus_type != V4L2_MBUS_CSI2_DPHY && vep.bus_type != V4L2_MBUS_CSI2_CPHY) { ret = -EINVAL; dev_err(dev, "unsupported bus type %d!\n", vep.bus_type); goto err_parse; } s_asd = v4l2_async_nf_add_fwnode_remote(&isys->notifier, ep, struct sensor_async_sd); if (IS_ERR(s_asd)) { ret = PTR_ERR(s_asd); goto err_parse; } s_asd->csi2.port = vep.base.port; s_asd->csi2.nlanes = vep.bus.mipi_csi2.num_data_lanes; s_asd->csi2.bus_type = vep.bus_type; fwnode_handle_put(ep); continue; err_parse: fwnode_handle_put(ep); return ret; } if (list_empty(&isys->notifier.waiting_list)) { /* isys probe could continue with async subdevs missing */ dev_warn(dev, "no subdev found in graph\n"); return 0; } isys->notifier.ops = &isys_async_ops; ret = v4l2_async_nf_register(&isys->notifier); if (ret) { dev_err(dev, "failed to register async notifier(%d)\n", ret); v4l2_async_nf_cleanup(&isys->notifier); } return ret; } static void isys_notifier_cleanup(struct ipu7_isys *isys) { v4l2_async_nf_unregister(&isys->notifier); v4l2_async_nf_cleanup(&isys->notifier); } static void isys_unregister_video_devices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2_pdata = &isys->pdata->ipdata->csi2; unsigned int i, j; for (i = 0; i < csi2_pdata->nports; i++) for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) ipu7_isys_video_cleanup(&isys->csi2[i].av[j]); } static int isys_register_video_devices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2_pdata = &isys->pdata->ipdata->csi2; unsigned int i, j; int ret; for (i = 0; i < csi2_pdata->nports; i++) { for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { struct ipu7_isys_video *av = &isys->csi2[i].av[j]; snprintf(av->vdev.name, sizeof(av->vdev.name), IPU_ISYS_ENTITY_PREFIX " ISYS Capture %u", i * NR_OF_CSI2_SRC_PADS + j); av->isys = isys; av->aq.vbq.buf_struct_size = sizeof(struct ipu7_isys_video_buffer); ret = ipu7_isys_video_init(av); if (ret) goto fail; } } return 0; fail: i = i + 1U; while (i--) { while (j--) ipu7_isys_video_cleanup(&isys->csi2[i].av[j]); j = NR_OF_CSI2_SRC_PADS; } return ret; } static void isys_csi2_unregister_subdevices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2 = &isys->pdata->ipdata->csi2; unsigned int i; for (i = 0; i < csi2->nports; i++) ipu7_isys_csi2_cleanup(&isys->csi2[i]); } static int isys_csi2_register_subdevices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2_pdata = &isys->pdata->ipdata->csi2; unsigned int i; int ret; for (i = 0; i < csi2_pdata->nports; i++) { ret = ipu7_isys_csi2_init(&isys->csi2[i], isys, isys->pdata->base + csi2_pdata->offsets[i], i); if (ret) goto fail; } isys->isr_csi2_mask = IPU7_CSI_RX_LEGACY_IRQ_MASK; return 0; fail: while (i--) ipu7_isys_csi2_cleanup(&isys->csi2[i]); return ret; } static int isys_csi2_create_media_links(struct ipu7_isys *isys) { const struct ipu7_isys_internal_csi2_pdata *csi2_pdata = &isys->pdata->ipdata->csi2; struct device *dev = &isys->adev->auxdev.dev; struct media_entity *sd; unsigned int i, j; int ret; for (i = 0; i < csi2_pdata->nports; i++) { sd = &isys->csi2[i].asd.sd.entity; for (j = 0; j < NR_OF_CSI2_SRC_PADS; j++) { struct ipu7_isys_video *av = &isys->csi2[i].av[j]; ret = media_create_pad_link(sd, CSI2_PAD_SRC + j, &av->vdev.entity, 0, 0); if (ret) { dev_err(dev, "CSI2 can't create link\n"); return ret; } av->csi2 = &isys->csi2[i]; } } return 0; } #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC static void isys_tpg_unregister_subdevices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_tpg_pdata *tpg_pdata = &isys->pdata->ipdata->tpg; unsigned int i; if (!isys->tpg) return; for (i = 0; i < tpg_pdata->ntpgs; i++) ipu7_isys_tpg_cleanup(&isys->tpg[i]); kfree(isys->tpg); isys->tpg = NULL; } static int isys_tpg_register_subdevices(struct ipu7_isys *isys) { const struct ipu7_isys_internal_tpg_pdata *tpg_pdata = &isys->pdata->ipdata->tpg; unsigned int i; int ret; isys->tpg = kcalloc(tpg_pdata->ntpgs, sizeof(*isys->tpg), GFP_KERNEL); if (!isys->tpg) return -ENOMEM; for (i = 0; i < tpg_pdata->ntpgs; i++) { ret = ipu7_isys_tpg_init(&isys->tpg[i], isys, isys->pdata->base + tpg_pdata->offsets[i], tpg_pdata->sels ? (isys->pdata->base + tpg_pdata->sels[i]) : NULL, i); if (ret) goto fail; } return 0; fail: while (i--) ipu7_isys_tpg_cleanup(&isys->tpg[i]); kfree(isys->tpg); isys->tpg = NULL; return ret; } static int isys_tpg_create_media_links(struct ipu7_isys *isys) { const struct ipu7_isys_internal_tpg_pdata *tpg_pdata = &isys->pdata->ipdata->tpg; struct device *dev = &isys->adev->auxdev.dev; struct ipu7_isys_tpg *tpg; struct media_entity *sd; unsigned int i; int ret; for (i = 0; i < tpg_pdata->ntpgs; i++) { tpg = &isys->tpg[i]; sd = &tpg->asd.sd.entity; tpg->av = &isys->csi2[tpg->index].av[0]; ret = media_create_pad_link(sd, TPG_PAD_SOURCE, &tpg->av->vdev.entity, TPG_PAD_SOURCE, 0); if (ret) { dev_err(dev, "TPG can't create link\n"); return ret; } } return 0; } #endif static int isys_register_devices(struct ipu7_isys *isys) { struct device *dev = &isys->adev->auxdev.dev; struct pci_dev *pdev = isys->adev->isp->pdev; int ret; media_device_pci_init(&isys->media_dev, pdev, IPU_MEDIA_DEV_MODEL_NAME); strscpy(isys->v4l2_dev.name, isys->media_dev.model, sizeof(isys->v4l2_dev.name)); ret = media_device_register(&isys->media_dev); if (ret < 0) goto out_media_device_unregister; isys->v4l2_dev.mdev = &isys->media_dev; isys->v4l2_dev.ctrl_handler = NULL; ret = v4l2_device_register(dev, &isys->v4l2_dev); if (ret < 0) goto out_media_device_unregister; ret = isys_register_video_devices(isys); if (ret) goto out_v4l2_device_unregister; ret = isys_csi2_register_subdevices(isys); if (ret) goto out_video_unregister_device; ret = isys_csi2_create_media_links(isys); if (ret) goto out_csi2_unregister_subdevices; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC ret = isys_tpg_register_subdevices(isys); if (!ret) ret = isys_tpg_create_media_links(isys); if (ret) goto out_tpg_unregister_subdevices; #endif ret = isys_notifier_init(isys); if (ret) #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC goto out_tpg_unregister_subdevices; #else goto out_csi2_unregister_subdevices; #endif #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC ret = v4l2_device_register_subdev_nodes(&isys->v4l2_dev); if (ret) goto out_isys_notifier_cleanup; #endif return 0; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC out_isys_notifier_cleanup: isys_notifier_cleanup(isys); out_tpg_unregister_subdevices: isys_tpg_unregister_subdevices(isys); #endif out_csi2_unregister_subdevices: isys_csi2_unregister_subdevices(isys); out_video_unregister_device: isys_unregister_video_devices(isys); out_v4l2_device_unregister: v4l2_device_unregister(&isys->v4l2_dev); out_media_device_unregister: media_device_unregister(&isys->media_dev); media_device_cleanup(&isys->media_dev); dev_err(dev, "failed to register isys devices\n"); return ret; } static void isys_unregister_devices(struct ipu7_isys *isys) { isys_unregister_video_devices(isys); isys_csi2_unregister_subdevices(isys); #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC isys_tpg_unregister_subdevices(isys); #endif v4l2_device_unregister(&isys->v4l2_dev); media_device_unregister(&isys->media_dev); media_device_cleanup(&isys->media_dev); } static void enable_csi2_legacy_irq(struct ipu7_isys *isys, bool enable) { u32 offset = IS_IO_CSI2_LEGACY_IRQ_CTRL_BASE; void __iomem *base = isys->pdata->base; u32 mask = isys->isr_csi2_mask; if (!enable) { writel(mask, base + offset + IRQ_CTL_CLEAR); writel(0, base + offset + IRQ_CTL_ENABLE); return; } writel(mask, base + offset + IRQ_CTL_EDGE); writel(mask, base + offset + IRQ_CTL_CLEAR); writel(mask, base + offset + IRQ_CTL_MASK); writel(mask, base + offset + IRQ_CTL_ENABLE); } static void enable_to_sw_irq(struct ipu7_isys *isys, bool enable) { void __iomem *base = isys->pdata->base; u32 mask = IS_UC_TO_SW_IRQ_MASK; u32 offset = IS_UC_CTRL_BASE; if (!enable) { writel(0, base + offset + TO_SW_IRQ_CNTL_ENABLE); return; } writel(mask, base + offset + TO_SW_IRQ_CNTL_CLEAR); writel(mask, base + offset + TO_SW_IRQ_CNTL_MASK_N); writel(mask, base + offset + TO_SW_IRQ_CNTL_ENABLE); } void ipu7_isys_setup_hw(struct ipu7_isys *isys) { u32 offset; void __iomem *base = isys->pdata->base; /* soft reset */ offset = IS_IO_GPREGS_BASE; writel(0x0, base + offset + CLK_EN_TXCLKESC); /* Update if ISYS freq updated (0: 400/1, 1:400/2, 63:400/64) */ writel(0x0, base + offset + CLK_DIV_FACTOR_IS_CLK); /* correct the initial printf configuration */ writel(0x200, base + IS_UC_CTRL_BASE + PRINTF_AXI_CNTL); enable_to_sw_irq(isys, 1); enable_csi2_legacy_irq(isys, 1); } static void isys_cleanup_hw(struct ipu7_isys *isys) { enable_csi2_legacy_irq(isys, 0); enable_to_sw_irq(isys, 0); } static int isys_runtime_pm_resume(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev); struct ipu7_device *isp = adev->isp; unsigned long flags; int ret; if (!isys) return 0; ret = ipu7_mmu_hw_init(adev->mmu); if (ret) return ret; cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE); ret = ipu_buttress_start_tsc_sync(isp); if (ret) return ret; spin_lock_irqsave(&isys->power_lock, flags); isys->power = 1; spin_unlock_irqrestore(&isys->power_lock, flags); return 0; } static int isys_runtime_pm_suspend(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev); unsigned long flags; if (!isys) return 0; isys_cleanup_hw(isys); spin_lock_irqsave(&isys->power_lock, flags); isys->power = 0; spin_unlock_irqrestore(&isys->power_lock, flags); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET mutex_lock(&isys->reset_mutex); isys->need_reset = false; mutex_unlock(&isys->reset_mutex); #endif cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); ipu7_mmu_hw_cleanup(adev->mmu); return 0; } static int isys_suspend(struct device *dev) { struct ipu7_isys *isys = dev_get_drvdata(dev); /* If stream is open, refuse to suspend */ if (isys->stream_opened) return -EBUSY; return 0; } static int isys_resume(struct device *dev) { return 0; } static const struct dev_pm_ops isys_pm_ops = { .runtime_suspend = isys_runtime_pm_suspend, .runtime_resume = isys_runtime_pm_resume, .suspend = isys_suspend, .resume = isys_resume, }; static void isys_remove(struct auxiliary_device *auxdev) { struct ipu7_isys *isys = dev_get_drvdata(&auxdev->dev); struct isys_fw_msgs *fwmsg, *safe; struct ipu7_bus_device *adev = auxdev_to_adev(auxdev); #ifdef CONFIG_DEBUG_FS if (adev->isp->ipu7_dir) debugfs_remove_recursive(isys->debugfsdir); #endif for (int i = 0; i < IPU_ISYS_MAX_STREAMS; i++) mutex_destroy(&isys->streams[i].mutex); list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) ipu7_dma_free(adev, sizeof(struct isys_fw_msgs), fwmsg, fwmsg->dma_addr, 0); list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) ipu7_dma_free(adev, sizeof(struct isys_fw_msgs), fwmsg, fwmsg->dma_addr, 0); isys_notifier_cleanup(isys); isys_unregister_devices(isys); cpu_latency_qos_remove_request(&isys->pm_qos); mutex_destroy(&isys->stream_mutex); mutex_destroy(&isys->mutex); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET mutex_destroy(&isys->reset_mutex); #endif } #ifdef CONFIG_DEBUG_FS static ssize_t fwlog_read(struct file *file, char __user *userbuf, size_t size, loff_t *pos) { struct ipu7_isys *isys = file->private_data; struct isys_fw_log *fw_log = isys->fw_log; struct device *dev = &isys->adev->auxdev.dev; u32 log_size; int ret = 0; void *buf; if (!fw_log) return 0; buf = kvzalloc(FW_LOG_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&fw_log->mutex); if (!fw_log->size) { dev_warn(dev, "no available fw log\n"); mutex_unlock(&fw_log->mutex); goto free_and_return; } if (fw_log->size > FW_LOG_BUF_SIZE) log_size = FW_LOG_BUF_SIZE; else log_size = fw_log->size; memcpy(buf, fw_log->addr, log_size); dev_info(dev, "copy %d bytes fw log to user...\n", log_size); mutex_unlock(&fw_log->mutex); ret = simple_read_from_buffer(userbuf, size, pos, buf, log_size); free_and_return: kvfree(buf); return ret; } static const struct file_operations isys_fw_log_fops = { .open = simple_open, .owner = THIS_MODULE, .read = fwlog_read, .llseek = default_llseek, }; static int ipu7_isys_init_debugfs(struct ipu7_isys *isys) { struct dentry *file; struct dentry *dir; dir = debugfs_create_dir("isys", isys->adev->isp->ipu7_dir); if (IS_ERR(dir)) return -ENOMEM; file = debugfs_create_file("fwlog", 0400, dir, isys, &isys_fw_log_fops); if (IS_ERR(file)) goto err; isys->debugfsdir = dir; return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } #endif static int alloc_fw_msg_bufs(struct ipu7_isys *isys, int amount) { struct ipu7_bus_device *adev = isys->adev; struct isys_fw_msgs *addr; dma_addr_t dma_addr; unsigned long flags; unsigned int i; for (i = 0; i < amount; i++) { addr = ipu7_dma_alloc(adev, sizeof(struct isys_fw_msgs), &dma_addr, GFP_KERNEL, 0); if (!addr) break; addr->dma_addr = dma_addr; spin_lock_irqsave(&isys->listlock, flags); list_add(&addr->head, &isys->framebuflist); spin_unlock_irqrestore(&isys->listlock, flags); } if (i == amount) return 0; spin_lock_irqsave(&isys->listlock, flags); while (!list_empty(&isys->framebuflist)) { addr = list_first_entry(&isys->framebuflist, struct isys_fw_msgs, head); list_del(&addr->head); spin_unlock_irqrestore(&isys->listlock, flags); ipu7_dma_free(adev, sizeof(struct isys_fw_msgs), addr, addr->dma_addr, 0); spin_lock_irqsave(&isys->listlock, flags); } spin_unlock_irqrestore(&isys->listlock, flags); return -ENOMEM; } struct isys_fw_msgs *ipu7_get_fw_msg_buf(struct ipu7_isys_stream *stream) { struct device *dev = &stream->isys->adev->auxdev.dev; struct ipu7_isys *isys = stream->isys; struct isys_fw_msgs *msg; unsigned long flags; int ret; spin_lock_irqsave(&isys->listlock, flags); if (list_empty(&isys->framebuflist)) { spin_unlock_irqrestore(&isys->listlock, flags); dev_dbg(dev, "Frame buffer list empty\n"); ret = alloc_fw_msg_bufs(isys, 5); if (ret < 0) return NULL; spin_lock_irqsave(&isys->listlock, flags); if (list_empty(&isys->framebuflist)) { spin_unlock_irqrestore(&isys->listlock, flags); dev_err(dev, "Frame list empty\n"); return NULL; } } msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head); list_move(&msg->head, &isys->framebuflist_fw); spin_unlock_irqrestore(&isys->listlock, flags); memset(&msg->fw_msg, 0, sizeof(msg->fw_msg)); return msg; } void ipu7_cleanup_fw_msg_bufs(struct ipu7_isys *isys) { struct isys_fw_msgs *fwmsg, *fwmsg0; unsigned long flags; spin_lock_irqsave(&isys->listlock, flags); list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head) list_move(&fwmsg->head, &isys->framebuflist); spin_unlock_irqrestore(&isys->listlock, flags); } void ipu7_put_fw_msg_buf(struct ipu7_isys *isys, uintptr_t data) { struct isys_fw_msgs *msg; void *ptr = (void *)data; unsigned long flags; if (WARN_ON_ONCE(!ptr)) return; spin_lock_irqsave(&isys->listlock, flags); msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy); list_move(&msg->head, &isys->framebuflist); spin_unlock_irqrestore(&isys->listlock, flags); } static int isys_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *auxdev_id) { const struct ipu7_isys_internal_csi2_pdata *csi2_pdata; struct ipu7_bus_device *adev = auxdev_to_adev(auxdev); struct ipu7_device *isp = adev->isp; struct ipu7_isys *isys; int ret = 0; if (!isp->ipu7_bus_ready_to_probe) return -EPROBE_DEFER; isys = devm_kzalloc(&auxdev->dev, sizeof(*isys), GFP_KERNEL); if (!isys) return -ENOMEM; ret = pm_runtime_resume_and_get(&auxdev->dev); if (ret < 0) return ret; adev->auxdrv_data = (const struct ipu7_auxdrv_data *)auxdev_id->driver_data; adev->auxdrv = to_auxiliary_drv(auxdev->dev.driver); isys->adev = adev; isys->pdata = adev->pdata; INIT_LIST_HEAD(&isys->requests); csi2_pdata = &isys->pdata->ipdata->csi2; isys->csi2 = devm_kcalloc(&auxdev->dev, csi2_pdata->nports, sizeof(*isys->csi2), GFP_KERNEL); if (!isys->csi2) { ret = -ENOMEM; goto out_runtime_put; } ret = ipu7_mmu_hw_init(adev->mmu); if (ret) goto out_runtime_put; spin_lock_init(&isys->streams_lock); spin_lock_init(&isys->power_lock); isys->power = 0; mutex_init(&isys->mutex); mutex_init(&isys->stream_mutex); #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET mutex_init(&isys->reset_mutex); isys->in_reset = false; #endif spin_lock_init(&isys->listlock); INIT_LIST_HEAD(&isys->framebuflist); INIT_LIST_HEAD(&isys->framebuflist_fw); dev_set_drvdata(&auxdev->dev, isys); isys->icache_prefetch = 0; isys->phy_rext_cal = 0; isys_stream_init(isys); #ifdef CONFIG_DEBUG_FS /* Debug fs failure is not fatal. */ ipu7_isys_init_debugfs(isys); #endif cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE); ret = alloc_fw_msg_bufs(isys, 20); if (ret < 0) goto out_cleanup_isys; ret = ipu7_fw_isys_init(isys); if (ret) goto out_cleanup_isys; ret = isys_register_devices(isys); if (ret) goto out_cleanup_fw; ret = isys_fw_log_init(isys); if (ret) goto out_cleanup; #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET mutex_destroy(&isys->reset_mutex); #endif ipu7_mmu_hw_cleanup(adev->mmu); pm_runtime_put(&auxdev->dev); return 0; out_cleanup: isys_unregister_devices(isys); out_cleanup_fw: ipu7_fw_isys_release(isys); out_cleanup_isys: cpu_latency_qos_remove_request(&isys->pm_qos); for (unsigned int i = 0; i < IPU_ISYS_MAX_STREAMS; i++) mutex_destroy(&isys->streams[i].mutex); mutex_destroy(&isys->mutex); mutex_destroy(&isys->stream_mutex); ipu7_mmu_hw_cleanup(adev->mmu); out_runtime_put: pm_runtime_put(&auxdev->dev); return ret; } struct ipu7_csi2_error { const char *error_string; bool is_info_only; }; /* * Strings corresponding to CSI-2 receiver errors are here. * Corresponding macros are defined in the header file. */ static const struct ipu7_csi2_error dphy_rx_errors[] = { { "Error handler FIFO full", false }, { "Reserved Short Packet encoding detected", true }, { "Reserved Long Packet encoding detected", true }, { "Received packet is too short", false}, { "Received packet is too long", false}, { "Short packet discarded due to errors", false }, { "Long packet discarded due to errors", false }, { "CSI Combo Rx interrupt", false }, { "IDI CDC FIFO overflow(remaining bits are reserved as 0)", false }, { "Received NULL packet", true }, { "Received blanking packet", true }, { "Tie to 0", true }, { } }; static void ipu7_isys_register_errors(struct ipu7_isys_csi2 *csi2) { u32 offset = IS_IO_CSI2_ERR_LEGACY_IRQ_CTL_BASE(csi2->port); u32 status = readl(csi2->base + offset + IRQ_CTL_STATUS); u32 mask = IPU7_CSI_RX_ERROR_IRQ_MASK; if (!status) return; dev_dbg(&csi2->isys->adev->auxdev.dev, "csi2-%u error status 0x%08x\n", csi2->port, status); writel(status & mask, csi2->base + offset + IRQ_CTL_CLEAR); csi2->receiver_errors |= status & mask; } static void ipu7_isys_csi2_error(struct ipu7_isys_csi2 *csi2) { struct ipu7_csi2_error const *errors; unsigned int i; u32 status; /* Register errors once more in case of error interrupts are disabled */ ipu7_isys_register_errors(csi2); status = csi2->receiver_errors; csi2->receiver_errors = 0; errors = dphy_rx_errors; for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) { if (status & BIT(i)) dev_err_ratelimited(&csi2->isys->adev->auxdev.dev, "csi2-%i error: %s\n", csi2->port, errors[i].error_string); } } struct resp_to_msg { enum ipu7_insys_resp_type type; const char *msg; }; static const struct resp_to_msg is_fw_msg[] = { {IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE, "IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE"}, {IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK, "IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK"}, {IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK, "IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK"}, {IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK, "IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK"}, {IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK, "IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK"}, {IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK, "IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK"}, {IPU_INSYS_RESP_TYPE_PIN_DATA_READY, "IPU_INSYS_RESP_TYPE_PIN_DATA_READY"}, {IPU_INSYS_RESP_TYPE_FRAME_SOF, "IPU_INSYS_RESP_TYPE_FRAME_SOF"}, {IPU_INSYS_RESP_TYPE_FRAME_EOF, "IPU_INSYS_RESP_TYPE_FRAME_EOF"}, {IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE, "IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE"}, {IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE, "IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE"}, {N_IPU_INSYS_RESP_TYPE, "N_IPU_INSYS_RESP_TYPE"}, }; int isys_isr_one(struct ipu7_bus_device *adev) { struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev); struct ipu7_isys_stream *stream = NULL; struct device *dev = &adev->auxdev.dev; struct ipu7_isys_csi2 *csi2 = NULL; struct ia_gofo_msg_err err_info; struct ipu7_insys_resp *resp; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC struct ipu7_isys_tpg *tpg = NULL; #endif u64 ts; if (!isys->adev->syscom) return 1; #ifdef ENABLE_FW_OFFLINE_LOGGER ipu7_fw_isys_get_log(isys); #endif resp = ipu7_fw_isys_get_resp(isys); if (!resp) return 1; if (resp->type >= N_IPU_INSYS_RESP_TYPE) { dev_err(dev, "Unknown response type %u stream %u\n", resp->type, resp->stream_id); ipu7_fw_isys_put_resp(isys); return 1; } err_info = resp->error_info; ts = ((u64)resp->timestamp[1] << 32) | resp->timestamp[0]; if (err_info.err_group == INSYS_MSG_ERR_GROUP_CAPTURE && err_info.err_code == INSYS_MSG_ERR_CAPTURE_SYNC_FRAME_DROP) { /* receive a sp w/o command, firmware drop it */ dev_dbg(dev, "FRAME DROP: %02u %s stream %u\n", resp->type, is_fw_msg[resp->type].msg, resp->stream_id); dev_dbg(dev, "\tpin %u buf_id %llx frame %u\n", resp->pin_id, resp->buf_id, resp->frame_id); dev_dbg(dev, "\terror group %u code %u details [%u %u]\n", err_info.err_group, err_info.err_code, err_info.err_detail[0], err_info.err_detail[1]); } else if (!IA_GOFO_MSG_ERR_IS_OK(err_info)) { dev_err(dev, "%02u %s stream %u pin %u buf_id %llx frame %u\n", resp->type, is_fw_msg[resp->type].msg, resp->stream_id, resp->pin_id, resp->buf_id, resp->frame_id); dev_err(dev, "\terror group %u code %u details [%u %u]\n", err_info.err_group, err_info.err_code, err_info.err_detail[0], err_info.err_detail[1]); } else { dev_dbg(dev, "%02u %s stream %u pin %u buf_id %llx frame %u\n", resp->type, is_fw_msg[resp->type].msg, resp->stream_id, resp->pin_id, resp->buf_id, resp->frame_id); dev_dbg(dev, "\tts %llu\n", ts); } if (resp->stream_id >= IPU_ISYS_MAX_STREAMS) { dev_err(dev, "bad stream handle %u\n", resp->stream_id); goto leave; } stream = ipu7_isys_query_stream_by_handle(isys, resp->stream_id); if (!stream) { dev_err(dev, "stream of stream_handle %u is unused\n", resp->stream_id); goto leave; } stream->error = err_info.err_code; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC if (stream->asd) { if (stream->asd->is_tpg) tpg = ipu7_isys_subdev_to_tpg(stream->asd); else csi2 = ipu7_isys_subdev_to_csi2(stream->asd); } #else if (stream->asd) csi2 = ipu7_isys_subdev_to_csi2(stream->asd); #endif switch (resp->type) { case IPU_INSYS_RESP_TYPE_STREAM_OPEN_DONE: complete(&stream->stream_open_completion); break; case IPU_INSYS_RESP_TYPE_STREAM_CLOSE_ACK: complete(&stream->stream_close_completion); break; case IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK: complete(&stream->stream_start_completion); break; case IPU_INSYS_RESP_TYPE_STREAM_ABORT_ACK: complete(&stream->stream_stop_completion); break; case IPU_INSYS_RESP_TYPE_STREAM_FLUSH_ACK: complete(&stream->stream_stop_completion); break; case IPU_INSYS_RESP_TYPE_PIN_DATA_READY: /* * firmware only release the capture msg until software * get pin_data_ready event */ ipu7_put_fw_msg_buf(ipu7_bus_get_drvdata(adev), resp->buf_id); if (resp->pin_id < IPU_INSYS_OUTPUT_PINS && stream->output_pins[resp->pin_id].pin_ready) stream->output_pins[resp->pin_id].pin_ready(stream, resp); else dev_err(dev, "No handler for pin %u ready\n", resp->pin_id); if (csi2) ipu7_isys_csi2_error(csi2); break; case IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_ACK: break; case IPU_INSYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE: case IPU_INSYS_RESP_TYPE_STREAM_CAPTURE_DONE: break; case IPU_INSYS_RESP_TYPE_FRAME_SOF: if (csi2) ipu7_isys_csi2_sof_event_by_stream(stream); #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC if (tpg) ipu7_isys_tpg_sof_event_by_stream(stream); #endif stream->seq[stream->seq_index].sequence = atomic_read(&stream->sequence) - 1U; stream->seq[stream->seq_index].timestamp = ts; dev_dbg(dev, "SOF: stream %u frame %u (index %u), ts 0x%16.16llx\n", resp->stream_id, resp->frame_id, stream->seq[stream->seq_index].sequence, ts); stream->seq_index = (stream->seq_index + 1U) % IPU_ISYS_MAX_PARALLEL_SOF; break; case IPU_INSYS_RESP_TYPE_FRAME_EOF: if (csi2) ipu7_isys_csi2_eof_event_by_stream(stream); #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC if (tpg) ipu7_isys_tpg_eof_event_by_stream(stream); #endif dev_dbg(dev, "eof: stream %d(index %u) ts 0x%16.16llx\n", resp->stream_id, stream->seq[stream->seq_index].sequence, ts); break; default: dev_err(dev, "Unknown response type %u stream %u\n", resp->type, resp->stream_id); break; } ipu7_isys_put_stream(stream); leave: ipu7_fw_isys_put_resp(isys); return 0; } static void ipu7_isys_csi2_isr(struct ipu7_isys_csi2 *csi2) { struct device *dev = &csi2->isys->adev->auxdev.dev; struct ipu7_device *isp = csi2->isys->adev->isp; struct ipu7_isys_stream *s; u32 sync, offset; u32 fe = 0; u8 vc; ipu7_isys_register_errors(csi2); offset = IS_IO_CSI2_SYNC_LEGACY_IRQ_CTL_BASE(csi2->port); sync = readl(csi2->base + offset + IRQ_CTL_STATUS); writel(sync, csi2->base + offset + IRQ_CTL_CLEAR); dev_dbg(dev, "csi2-%u sync status 0x%08x\n", csi2->port, sync); if (!is_ipu7(isp->hw_ver)) { fe = readl(csi2->base + offset + IRQ1_CTL_STATUS); writel(fe, csi2->base + offset + IRQ1_CTL_CLEAR); dev_dbg(dev, "csi2-%u FE status 0x%08x\n", csi2->port, fe); } for (vc = 0; vc < NR_OF_CSI2_VC && (sync || fe); vc++) { s = ipu7_isys_query_stream_by_source(csi2->isys, csi2->asd.source, vc); if (!s) continue; if (!is_ipu7(isp->hw_ver)) { if (sync & IPU7P5_CSI_RX_SYNC_FS_VC & (1U << vc)) ipu7_isys_csi2_sof_event_by_stream(s); if (fe & IPU7P5_CSI_RX_SYNC_FE_VC & (1U << vc)) ipu7_isys_csi2_eof_event_by_stream(s); } else { if (sync & IPU7_CSI_RX_SYNC_FS_VC & (1U << (vc * 2))) ipu7_isys_csi2_sof_event_by_stream(s); if (sync & IPU7_CSI_RX_SYNC_FE_VC & (2U << (vc * 2))) ipu7_isys_csi2_eof_event_by_stream(s); } } } static irqreturn_t isys_isr(struct ipu7_bus_device *adev) { struct ipu7_isys *isys = ipu7_bus_get_drvdata(adev); u32 status_csi, status_sw, csi_offset, sw_offset; struct device *dev = &isys->adev->auxdev.dev; void __iomem *base = isys->pdata->base; spin_lock(&isys->power_lock); if (!isys->power) { spin_unlock(&isys->power_lock); return IRQ_NONE; } csi_offset = IS_IO_CSI2_LEGACY_IRQ_CTRL_BASE; sw_offset = IS_BASE; status_csi = readl(base + csi_offset + IRQ_CTL_STATUS); status_sw = readl(base + sw_offset + TO_SW_IRQ_CNTL_STATUS); if (!status_csi && !status_sw) { spin_unlock(&isys->power_lock); return IRQ_NONE; } if (status_csi) dev_dbg(dev, "status csi 0x%08x\n", status_csi); if (status_sw) dev_dbg(dev, "status to_sw 0x%08x\n", status_sw); do { writel(status_sw, base + sw_offset + TO_SW_IRQ_CNTL_CLEAR); writel(status_csi, base + csi_offset + IRQ_CTL_CLEAR); if (isys->isr_csi2_mask & status_csi) { unsigned int i; for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) { /* irq from not enabled port */ if (!isys->csi2[i].base) continue; if (status_csi & isys->csi2[i].legacy_irq_mask) ipu7_isys_csi2_isr(&isys->csi2[i]); } } if (!isys_isr_one(adev)) status_sw = TO_SW_IRQ_FW; else status_sw = 0; status_csi = readl(base + csi_offset + IRQ_CTL_STATUS); status_sw |= readl(base + sw_offset + TO_SW_IRQ_CNTL_STATUS); } while ((status_csi & isys->isr_csi2_mask) || (status_sw & TO_SW_IRQ_FW)); writel(TO_SW_IRQ_MASK, base + sw_offset + TO_SW_IRQ_CNTL_MASK_N); spin_unlock(&isys->power_lock); return IRQ_HANDLED; } static const struct ipu7_auxdrv_data ipu7_isys_auxdrv_data = { .isr = isys_isr, .isr_threaded = NULL, .wake_isr_thread = false, }; static const struct auxiliary_device_id ipu7_isys_id_table[] = { { .name = "intel_ipu7.isys", .driver_data = (kernel_ulong_t)&ipu7_isys_auxdrv_data, }, { } }; MODULE_DEVICE_TABLE(auxiliary, ipu7_isys_id_table); static struct auxiliary_driver isys_driver = { .name = IPU_ISYS_NAME, .probe = isys_probe, .remove = isys_remove, .id_table = ipu7_isys_id_table, .driver = { .pm = &isys_pm_ops, }, }; module_auxiliary_driver(isys_driver); MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Tianshu Qiu "); MODULE_AUTHOR("Qingwu Zhang "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu7 input system driver"); #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) MODULE_IMPORT_NS("INTEL_IPU7"); MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); #else MODULE_IMPORT_NS(INTEL_IPU7); MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-isys.h000066400000000000000000000106351503071307200264000ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_ISYS_H #define IPU7_ISYS_H #include #include #include #include #include #include #include #include #include #include #include "abi/ipu7_fw_msg_abi.h" #include "abi/ipu7_fw_isys_abi.h" #include "ipu7.h" #include "ipu7-isys-csi2.h" #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC #include "ipu7-isys-tpg.h" #endif #include "ipu7-isys-video.h" #ifdef CONFIG_DEBUG_FS struct dentry; #endif #define IPU_ISYS_ENTITY_PREFIX "Intel IPU7" /* FW support max 16 streams */ #define IPU_ISYS_MAX_STREAMS 16U /* * Current message queue configuration. These must be big enough * so that they never gets full. Queues are located in system memory */ #define IPU_ISYS_SIZE_RECV_QUEUE 40U #define IPU_ISYS_SIZE_LOG_QUEUE 256U #define IPU_ISYS_SIZE_SEND_QUEUE 40U #define IPU_ISYS_NUM_RECV_QUEUE 1U #define IPU_ISYS_MIN_WIDTH 2U #define IPU_ISYS_MIN_HEIGHT 2U #define IPU_ISYS_MAX_WIDTH 8160U #define IPU_ISYS_MAX_HEIGHT 8190U #define FW_CALL_TIMEOUT_JIFFIES \ msecs_to_jiffies(IPU_LIB_CALL_TIMEOUT_MS) #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET #define FW_CALL_TIMEOUT_JIFFIES_RESET msecs_to_jiffies(200) #endif struct isys_fw_log { struct mutex mutex; /* protect whole struct */ void *head; void *addr; u32 count; /* running counter of log */ u32 size; /* actual size of log content, in bits */ }; /* * struct ipu7_isys * * @media_dev: Media device * @v4l2_dev: V4L2 device * @adev: ISYS bus device * @power: Is ISYS powered on or not? * @isr_bits: Which bits does the ISR handle? * @power_lock: Serialise access to power (power state in general) * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers * @streams_lock: serialise access to streams * @streams: streams per firmware stream ID * @syscom: fw communication layer context #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET * @need_reset: Isys requires d0i0->i3 transition #endif * @ref_count: total number of callers fw open * @mutex: serialise access isys video open/release related operations * @stream_mutex: serialise stream start and stop, queueing requests * @pdata: platform data pointer * @csi2: CSI-2 receivers #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC * @tpg: test pattern generators #endif */ struct ipu7_isys { struct media_device media_dev; struct v4l2_device v4l2_dev; struct ipu7_bus_device *adev; int power; spinlock_t power_lock; /* Serialise access to power */ u32 isr_csi2_mask; u32 csi2_rx_ctrl_cached; spinlock_t streams_lock; struct ipu7_isys_stream streams[IPU_ISYS_MAX_STREAMS]; int streams_ref_count[IPU_ISYS_MAX_STREAMS]; u32 phy_rext_cal; bool icache_prefetch; bool csi2_cse_ipc_not_supported; unsigned int ref_count; unsigned int stream_opened; #ifdef CONFIG_DEBUG_FS struct dentry *debugfsdir; #endif struct mutex mutex; /* Serialise isys video open/release related */ struct mutex stream_mutex; /* Stream start, stop, queueing reqs */ struct ipu7_isys_pdata *pdata; struct ipu7_isys_csi2 *csi2; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC struct ipu7_isys_tpg *tpg; #endif struct isys_fw_log *fw_log; struct list_head requests; struct pm_qos_request pm_qos; spinlock_t listlock; /* Protect framebuflist */ struct list_head framebuflist; struct list_head framebuflist_fw; struct v4l2_async_notifier notifier; struct ipu7_insys_config *subsys_config; dma_addr_t subsys_config_dma_addr; #ifdef CONFIG_VIDEO_INTEL_IPU7_ISYS_RESET struct mutex reset_mutex; bool need_reset; bool in_reset; bool in_reset_stop_streaming; bool in_stop_streaming; #endif }; struct isys_fw_msgs { union { u64 dummy; struct ipu7_insys_buffset frame; struct ipu7_insys_stream_cfg stream; } fw_msg; struct list_head head; dma_addr_t dma_addr; }; struct ipu7_isys_csi2_config { unsigned int nlanes; unsigned int port; enum v4l2_mbus_type bus_type; }; struct sensor_async_sd { struct v4l2_async_connection asc; struct ipu7_isys_csi2_config csi2; }; struct isys_fw_msgs *ipu7_get_fw_msg_buf(struct ipu7_isys_stream *stream); void ipu7_put_fw_msg_buf(struct ipu7_isys *isys, uintptr_t data); void ipu7_cleanup_fw_msg_bufs(struct ipu7_isys *isys); int isys_isr_one(struct ipu7_bus_device *adev); void ipu7_isys_setup_hw(struct ipu7_isys *isys); #endif /* IPU7_ISYS_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-mmu.c000066400000000000000000000517531503071307200262100ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-dma.h" #include "ipu7-mmu.h" #include "ipu7-platform-regs.h" #define ISP_PAGE_SHIFT 12 #define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT) #define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1U)) #define ISP_L1PT_SHIFT 22 #define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1)) #define ISP_L2PT_SHIFT 12 #define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK)))) #define ISP_L1PT_PTES 1024U #define ISP_L2PT_PTES 1024U #define ISP_PADDR_SHIFT 12 #define REG_L1_PHYS 0x0004 /* 27-bit pfn */ #define REG_INFO 0x0008 #define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT) #define MMU_TLB_INVALIDATE_TIMEOUT 2000 static __maybe_unused void mmu_irq_handler(struct ipu7_mmu *mmu) { unsigned int i; u32 irq_cause; for (i = 0; i < mmu->nr_mmus; i++) { irq_cause = readl(mmu->mmu_hw[i].base + MMU_REG_IRQ_CAUSE); pr_info("mmu %s irq_cause = 0x%x", mmu->mmu_hw[i].name, irq_cause); writel(0x1ffff, mmu->mmu_hw[i].base + MMU_REG_IRQ_CLEAR); } } static void tlb_invalidate(struct ipu7_mmu *mmu) { unsigned long flags; unsigned int i; int ret; u32 val; spin_lock_irqsave(&mmu->ready_lock, flags); if (!mmu->ready) { spin_unlock_irqrestore(&mmu->ready_lock, flags); return; } for (i = 0; i < mmu->nr_mmus; i++) { writel(0xffffffffU, mmu->mmu_hw[i].base + MMU_REG_INVALIDATE_0); /* Need check with HW, use l1streams or l2streams */ if (mmu->mmu_hw[i].nr_l2streams > 32) writel(0xffffffffU, mmu->mmu_hw[i].base + MMU_REG_INVALIDATE_1); /* * The TLB invalidation is a "single cycle" (IOMMU clock cycles) * When the actual MMIO write reaches the IPU TLB Invalidate * register, wmb() will force the TLB invalidate out if the CPU * attempts to update the IOMMU page table (or sooner). */ wmb(); /* wait invalidation done */ ret = readl_poll_timeout_atomic(mmu->mmu_hw[i].base + MMU_REG_INVALIDATION_STATUS, val, !(val & 0x1U), 500, MMU_TLB_INVALIDATE_TIMEOUT); if (ret) dev_err(mmu->dev, "MMU[%u] TLB invalidate failed\n", i); } spin_unlock_irqrestore(&mmu->ready_lock, flags); } static dma_addr_t map_single(struct ipu7_mmu_info *mmu_info, void *ptr) { dma_addr_t dma; dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(mmu_info->dev, dma)) return 0; return dma; } static int get_dummy_page(struct ipu7_mmu_info *mmu_info) { void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); dma_addr_t dma; if (!pt) return -ENOMEM; dev_dbg(mmu_info->dev, "dummy_page: get_zeroed_page() == %p\n", pt); dma = map_single(mmu_info, pt); if (!dma) { dev_err(mmu_info->dev, "Failed to map dummy page\n"); goto err_free_page; } mmu_info->dummy_page = pt; mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT; return 0; err_free_page: free_page((unsigned long)pt); return -ENOMEM; } static void free_dummy_page(struct ipu7_mmu_info *mmu_info) { dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->dummy_page_pteval), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->dummy_page); } static int alloc_dummy_l2_pt(struct ipu7_mmu_info *mmu_info) { u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); dma_addr_t dma; unsigned int i; if (!pt) return -ENOMEM; dev_dbg(mmu_info->dev, "dummy_l2: get_zeroed_page() = %p\n", pt); dma = map_single(mmu_info, pt); if (!dma) { dev_err(mmu_info->dev, "Failed to map l2pt page\n"); goto err_free_page; } for (i = 0; i < ISP_L2PT_PTES; i++) pt[i] = mmu_info->dummy_page_pteval; mmu_info->dummy_l2_pt = pt; mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT; return 0; err_free_page: free_page((unsigned long)pt); return -ENOMEM; } static void free_dummy_l2_pt(struct ipu7_mmu_info *mmu_info) { dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->dummy_l2_pt); } static u32 *alloc_l1_pt(struct ipu7_mmu_info *mmu_info) { u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); dma_addr_t dma; unsigned int i; if (!pt) return NULL; dev_dbg(mmu_info->dev, "alloc_l1: get_zeroed_page() = %p\n", pt); for (i = 0; i < ISP_L1PT_PTES; i++) pt[i] = mmu_info->dummy_l2_pteval; dma = map_single(mmu_info, pt); if (!dma) { dev_err(mmu_info->dev, "Failed to map l1pt page\n"); goto err_free_page; } mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT; dev_dbg(mmu_info->dev, "l1 pt %p mapped at %pad\n", pt, &dma); return pt; err_free_page: free_page((unsigned long)pt); return NULL; } static u32 *alloc_l2_pt(struct ipu7_mmu_info *mmu_info) { u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); unsigned int i; if (!pt) return NULL; dev_dbg(mmu_info->dev, "alloc_l2: get_zeroed_page() = %p\n", pt); for (i = 0; i < ISP_L1PT_PTES; i++) pt[i] = mmu_info->dummy_page_pteval; return pt; } static void l2_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova, phys_addr_t dummy, size_t size) { unsigned int l2_entries; unsigned int l2_idx; unsigned long flags; u32 l1_idx; u32 *l2_pt; spin_lock_irqsave(&mmu_info->lock, flags); for (l1_idx = iova >> ISP_L1PT_SHIFT; size > 0U && l1_idx < ISP_L1PT_PTES; l1_idx++) { dev_dbg(mmu_info->dev, "unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n", l1_idx, iova); if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) { dev_err(mmu_info->dev, "unmap not mapped iova 0x%8.8lx l1 index %u\n", iova, l1_idx); continue; } l2_pt = mmu_info->l2_pts[l1_idx]; l2_entries = 0; for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; size > 0U && l2_idx < ISP_L2PT_PTES; l2_idx++) { phys_addr_t pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]); dev_dbg(mmu_info->dev, "unmap l2 index %u with pteval 0x%p\n", l2_idx, &pteval); l2_pt[l2_idx] = mmu_info->dummy_page_pteval; iova += ISP_PAGE_SIZE; size -= ISP_PAGE_SIZE; l2_entries++; } WARN_ON_ONCE(!l2_entries); clflush_cache_range(&l2_pt[l2_idx - l2_entries], sizeof(l2_pt[0]) * l2_entries); } WARN_ON_ONCE(size); spin_unlock_irqrestore(&mmu_info->lock, flags); } static int l2_map(struct ipu7_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size) { struct device *dev = mmu_info->dev; unsigned int l2_entries; u32 *l2_pt, *l2_virt; unsigned int l2_idx; unsigned long flags; size_t mapped = 0; dma_addr_t dma; u32 l1_entry; u32 l1_idx; int err = 0; spin_lock_irqsave(&mmu_info->lock, flags); paddr = ALIGN(paddr, ISP_PAGE_SIZE); for (l1_idx = iova >> ISP_L1PT_SHIFT; size && l1_idx < ISP_L1PT_PTES; l1_idx++) { dev_dbg(dev, "mapping l2 page table for l1 index %u (iova %8.8x)\n", l1_idx, (u32)iova); l1_entry = mmu_info->l1_pt[l1_idx]; if (l1_entry == mmu_info->dummy_l2_pteval) { l2_virt = mmu_info->l2_pts[l1_idx]; if (likely(!l2_virt)) { l2_virt = alloc_l2_pt(mmu_info); if (!l2_virt) { err = -ENOMEM; goto error; } } dma = map_single(mmu_info, l2_virt); if (!dma) { dev_err(dev, "Failed to map l2pt page\n"); free_page((unsigned long)l2_virt); err = -EINVAL; goto error; } l1_entry = dma >> ISP_PADDR_SHIFT; dev_dbg(dev, "page for l1_idx %u %p allocated\n", l1_idx, l2_virt); mmu_info->l1_pt[l1_idx] = l1_entry; mmu_info->l2_pts[l1_idx] = l2_virt; clflush_cache_range(&mmu_info->l1_pt[l1_idx], sizeof(mmu_info->l1_pt[l1_idx])); } l2_pt = mmu_info->l2_pts[l1_idx]; l2_entries = 0; for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT; size && l2_idx < ISP_L2PT_PTES; l2_idx++) { l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT; dev_dbg(dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx, l2_pt[l2_idx]); iova += ISP_PAGE_SIZE; paddr += ISP_PAGE_SIZE; mapped += ISP_PAGE_SIZE; size -= ISP_PAGE_SIZE; l2_entries++; } WARN_ON_ONCE(!l2_entries); clflush_cache_range(&l2_pt[l2_idx - l2_entries], sizeof(l2_pt[0]) * l2_entries); } spin_unlock_irqrestore(&mmu_info->lock, flags); return 0; error: spin_unlock_irqrestore(&mmu_info->lock, flags); /* unroll mapping in case something went wrong */ if (mapped) l2_unmap(mmu_info, iova - mapped, paddr - mapped, mapped); return err; } static int __ipu7_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size) { u32 iova_start = round_down(iova, ISP_PAGE_SIZE); u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE); dev_dbg(mmu_info->dev, "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr %pap\n", iova_start, iova_end, size, &paddr); return l2_map(mmu_info, iova_start, paddr, size); } static void __ipu7_mmu_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova, size_t size) { l2_unmap(mmu_info, iova, 0, size); } static int allocate_trash_buffer(struct ipu7_mmu *mmu) { unsigned int n_pages = PFN_UP(IPU_MMUV2_TRASH_RANGE); unsigned long iova_addr; struct iova *iova; unsigned int i; dma_addr_t dma; int ret; /* Allocate 8MB in iova range */ iova = alloc_iova(&mmu->dmap->iovad, n_pages, PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0); if (!iova) { dev_err(mmu->dev, "cannot allocate iova range for trash\n"); return -ENOMEM; } dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) { dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n"); ret = -ENOMEM; goto out_free_iova; } mmu->pci_trash_page = dma; /* * Map the 8MB iova address range to the same physical trash page * mmu->trash_page which is already reserved at the probe */ iova_addr = iova->pfn_lo; for (i = 0; i < n_pages; i++) { ret = ipu7_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr), mmu->pci_trash_page, PAGE_SIZE); if (ret) { dev_err(mmu->dev, "mapping trash buffer range failed\n"); goto out_unmap; } iova_addr++; } mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo); dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n", mmu->mmid, (unsigned int)mmu->iova_trash_page); return 0; out_unmap: ipu7_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo), PFN_PHYS(iova_size(iova))); dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page, PAGE_SIZE, DMA_BIDIRECTIONAL); out_free_iova: __free_iova(&mmu->dmap->iovad, iova); return ret; } static void __mmu_at_init(struct ipu7_mmu *mmu) { struct ipu7_mmu_info *mmu_info; unsigned int i; mmu_info = mmu->dmap->mmu_info; for (i = 0; i < mmu->nr_mmus; i++) { struct ipu7_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; unsigned int j; /* Write page table address per MMU */ writel((phys_addr_t)mmu_info->l1_pt_dma, mmu_hw->base + MMU_REG_PAGE_TABLE_BASE_ADDR); dev_dbg(mmu->dev, "mmu %s base was set as %x\n", mmu_hw->name, readl(mmu_hw->base + MMU_REG_PAGE_TABLE_BASE_ADDR)); /* Set info bits and axi_refill per MMU */ writel(mmu_hw->info_bits, mmu_hw->base + MMU_REG_USER_INFO_BITS); writel(mmu_hw->refill, mmu_hw->base + MMU_REG_AXI_REFILL_IF_ID); writel(mmu_hw->collapse_en_bitmap, mmu_hw->base + MMU_REG_COLLAPSE_ENABLE_BITMAP); dev_dbg(mmu->dev, "mmu %s info_bits was set as %x\n", mmu_hw->name, readl(mmu_hw->base + MMU_REG_USER_INFO_BITS)); if (mmu_hw->at_sp_arb_cfg) writel(mmu_hw->at_sp_arb_cfg, mmu_hw->base + MMU_REG_AT_SP_ARB_CFG); /* default irq configuration */ writel(0x3ff, mmu_hw->base + MMU_REG_IRQ_MASK); writel(0x3ff, mmu_hw->base + MMU_REG_IRQ_ENABLE); /* Configure MMU TLB stream configuration for L1/L2 */ for (j = 0; j < mmu_hw->nr_l1streams; j++) { writel(mmu_hw->l1_block_sz[j], mmu_hw->base + mmu_hw->l1_block + 4U * j); } for (j = 0; j < mmu_hw->nr_l2streams; j++) { writel(mmu_hw->l2_block_sz[j], mmu_hw->base + mmu_hw->l2_block + 4U * j); } for (j = 0; j < mmu_hw->uao_p_num; j++) { if (!mmu_hw->uao_p2tlb[j]) continue; writel(mmu_hw->uao_p2tlb[j], mmu_hw->uao_base + 4U * j); } } } static void __mmu_zlx_init(struct ipu7_mmu *mmu) { unsigned int i; dev_dbg(mmu->dev, "mmu zlx init\n"); for (i = 0; i < mmu->nr_mmus; i++) { struct ipu7_mmu_hw *mmu_hw = &mmu->mmu_hw[i]; unsigned int j; dev_dbg(mmu->dev, "mmu %s zlx init\n", mmu_hw->name); for (j = 0; j < IPU_ZLX_POOL_NUM; j++) { if (!mmu_hw->zlx_axi_pool[j]) continue; writel(mmu_hw->zlx_axi_pool[j], mmu_hw->zlx_base + ZLX_REG_AXI_POOL + j * 0x4U); } for (j = 0; j < mmu_hw->zlx_nr; j++) { if (!mmu_hw->zlx_conf[j]) continue; writel(mmu_hw->zlx_conf[j], mmu_hw->zlx_base + ZLX_REG_CONF + j * 0x8U); } for (j = 0; j < mmu_hw->zlx_nr; j++) { if (!mmu_hw->zlx_en[j]) continue; writel(mmu_hw->zlx_en[j], mmu_hw->zlx_base + ZLX_REG_EN + j * 0x8U); } } } int ipu7_mmu_hw_init(struct ipu7_mmu *mmu) { unsigned long flags; dev_dbg(mmu->dev, "IPU mmu hardware init\n"); /* Initialise the each MMU and ZLX */ __mmu_at_init(mmu); __mmu_zlx_init(mmu); if (!mmu->trash_page) { int ret; mmu->trash_page = alloc_page(GFP_KERNEL); if (!mmu->trash_page) { dev_err(mmu->dev, "insufficient memory for trash buffer\n"); return -ENOMEM; } ret = allocate_trash_buffer(mmu); if (ret) { __free_page(mmu->trash_page); mmu->trash_page = NULL; dev_err(mmu->dev, "trash buffer allocation failed\n"); return ret; } } spin_lock_irqsave(&mmu->ready_lock, flags); mmu->ready = true; spin_unlock_irqrestore(&mmu->ready_lock, flags); return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_mmu_hw_init, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_mmu_hw_init, INTEL_IPU7); #endif static struct ipu7_mmu_info *ipu7_mmu_alloc(struct ipu7_device *isp) { struct ipu7_mmu_info *mmu_info; int ret; mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL); if (!mmu_info) return NULL; if (isp->secure_mode) { mmu_info->aperture_start = IPU_FW_CODE_REGION_END; mmu_info->aperture_end = (dma_addr_t)DMA_BIT_MASK(IPU_MMU_ADDR_BITS); } else { mmu_info->aperture_start = IPU_FW_CODE_REGION_START; mmu_info->aperture_end = (dma_addr_t)DMA_BIT_MASK(IPU_MMU_ADDR_BITS_NON_SECURE); } mmu_info->pgsize_bitmap = SZ_4K; mmu_info->dev = &isp->pdev->dev; ret = get_dummy_page(mmu_info); if (ret) goto err_free_info; ret = alloc_dummy_l2_pt(mmu_info); if (ret) goto err_free_dummy_page; mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts)); if (!mmu_info->l2_pts) goto err_free_dummy_l2_pt; /* * We always map the L1 page table (a single page as well as * the L2 page tables). */ mmu_info->l1_pt = alloc_l1_pt(mmu_info); if (!mmu_info->l1_pt) goto err_free_l2_pts; spin_lock_init(&mmu_info->lock); dev_dbg(mmu_info->dev, "domain initialised\n"); return mmu_info; err_free_l2_pts: vfree(mmu_info->l2_pts); err_free_dummy_l2_pt: free_dummy_l2_pt(mmu_info); err_free_dummy_page: free_dummy_page(mmu_info); err_free_info: kfree(mmu_info); return NULL; } void ipu7_mmu_hw_cleanup(struct ipu7_mmu *mmu) { unsigned long flags; spin_lock_irqsave(&mmu->ready_lock, flags); mmu->ready = false; spin_unlock_irqrestore(&mmu->ready_lock, flags); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_mmu_hw_cleanup, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_mmu_hw_cleanup, INTEL_IPU7); #endif static struct ipu7_dma_mapping *alloc_dma_mapping(struct ipu7_device *isp) { struct ipu7_dma_mapping *dmap; unsigned long base_pfn; dmap = kzalloc(sizeof(*dmap), GFP_KERNEL); if (!dmap) return NULL; dmap->mmu_info = ipu7_mmu_alloc(isp); if (!dmap->mmu_info) { kfree(dmap); return NULL; } /* 0~64M is forbidden for uctile controller */ base_pfn = max_t(unsigned long, 1, PFN_DOWN(dmap->mmu_info->aperture_start)); init_iova_domain(&dmap->iovad, SZ_4K, base_pfn); dmap->mmu_info->dmap = dmap; dev_dbg(&isp->pdev->dev, "alloc mapping\n"); iova_cache_get(); return dmap; } phys_addr_t ipu7_mmu_iova_to_phys(struct ipu7_mmu_info *mmu_info, dma_addr_t iova) { phys_addr_t phy_addr; unsigned long flags; u32 *l2_pt; spin_lock_irqsave(&mmu_info->lock, flags); l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT]; phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT]; phy_addr <<= ISP_PAGE_SHIFT; spin_unlock_irqrestore(&mmu_info->lock, flags); return phy_addr; } void ipu7_mmu_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova, size_t size) { unsigned int min_pagesz; dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size); /* find out the minimum page size supported */ min_pagesz = 1U << __ffs(mmu_info->pgsize_bitmap); /* * The virtual address and the size of the mapping must be * aligned (at least) to the size of the smallest page supported * by the hardware */ if (!IS_ALIGNED(iova | size, min_pagesz)) { dev_err(mmu_info->dev, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", iova, size, min_pagesz); return; } __ipu7_mmu_unmap(mmu_info, iova, size); } int ipu7_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size) { unsigned int min_pagesz; if (mmu_info->pgsize_bitmap == 0UL) return -ENODEV; /* find out the minimum page size supported */ min_pagesz = 1U << __ffs(mmu_info->pgsize_bitmap); /* * both the virtual address and the physical one, as well as * the size of the mapping, must be aligned (at least) to the * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { dev_err(mmu_info->dev, "unaligned: iova %lx pa %pa size %zx min_pagesz %x\n", iova, &paddr, size, min_pagesz); return -EINVAL; } dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); return __ipu7_mmu_map(mmu_info, iova, paddr, size); } static void ipu7_mmu_destroy(struct ipu7_mmu *mmu) { struct ipu7_dma_mapping *dmap = mmu->dmap; struct ipu7_mmu_info *mmu_info = dmap->mmu_info; struct iova *iova; u32 l1_idx; if (mmu->iova_trash_page) { iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page)); if (iova) { /* unmap and free the trash buffer iova */ ipu7_mmu_unmap(mmu_info, PFN_PHYS(iova->pfn_lo), PFN_PHYS(iova_size(iova))); __free_iova(&dmap->iovad, iova); } else { dev_err(mmu->dev, "trash buffer iova not found.\n"); } mmu->iova_trash_page = 0; dma_unmap_page(mmu_info->dev, mmu->pci_trash_page, PAGE_SIZE, DMA_BIDIRECTIONAL); mmu->pci_trash_page = 0; __free_page(mmu->trash_page); } for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) { if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) { dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->l2_pts[l1_idx]); } } vfree(mmu_info->l2_pts); free_dummy_page(mmu_info); dma_unmap_single(mmu_info->dev, TBL_PHYS_ADDR(mmu_info->l1_pt_dma), PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)mmu_info->dummy_l2_pt); free_page((unsigned long)mmu_info->l1_pt); kfree(mmu_info); } struct ipu7_mmu *ipu7_mmu_init(struct device *dev, void __iomem *base, int mmid, const struct ipu7_hw_variants *hw) { struct ipu7_device *isp = pci_get_drvdata(to_pci_dev(dev)); struct ipu7_mmu_pdata *pdata; struct ipu7_mmu *mmu; unsigned int i; if (hw->nr_mmus > IPU_MMU_MAX_NUM) return ERR_PTR(-EINVAL); pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); for (i = 0; i < hw->nr_mmus; i++) { struct ipu7_mmu_hw *pdata_mmu = &pdata->mmu_hw[i]; const struct ipu7_mmu_hw *src_mmu = &hw->mmu_hw[i]; if (src_mmu->nr_l1streams > IPU_MMU_MAX_TLB_L1_STREAMS || src_mmu->nr_l2streams > IPU_MMU_MAX_TLB_L2_STREAMS) return ERR_PTR(-EINVAL); *pdata_mmu = *src_mmu; pdata_mmu->base = base + src_mmu->offset; pdata_mmu->zlx_base = base + src_mmu->zlx_offset; pdata_mmu->uao_base = base + src_mmu->uao_offset; } mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL); if (!mmu) return ERR_PTR(-ENOMEM); mmu->mmid = mmid; mmu->mmu_hw = pdata->mmu_hw; mmu->nr_mmus = hw->nr_mmus; mmu->tlb_invalidate = tlb_invalidate; mmu->ready = false; INIT_LIST_HEAD(&mmu->vma_list); spin_lock_init(&mmu->ready_lock); mmu->dmap = alloc_dma_mapping(isp); if (!mmu->dmap) { dev_err(dev, "can't alloc dma mapping\n"); return ERR_PTR(-ENOMEM); } return mmu; } void ipu7_mmu_cleanup(struct ipu7_mmu *mmu) { struct ipu7_dma_mapping *dmap = mmu->dmap; ipu7_mmu_destroy(mmu); mmu->dmap = NULL; iova_cache_put(); put_iova_domain(&dmap->iovad); kfree(dmap); } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-mmu.h000066400000000000000000000301441503071307200262040ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_MMU_H #define IPU7_MMU_H #include #include #include #include struct device; struct page; struct ipu7_hw_variants; struct ipu7_mmu; struct ipu7_mmu_info; #define ISYS_MMID 1 #define PSYS_MMID 0 /* IPU7 for LNL */ /* IS MMU Cmd RD */ #define IPU7_IS_MMU_FW_RD_OFFSET 0x274000 #define IPU7_IS_MMU_FW_RD_STREAM_NUM 3 #define IPU7_IS_MMU_FW_RD_L1_BLOCKNR_REG 0x54 #define IPU7_IS_MMU_FW_RD_L2_BLOCKNR_REG 0x60 /* IS MMU Cmd WR */ #define IPU7_IS_MMU_FW_WR_OFFSET 0x275000 #define IPU7_IS_MMU_FW_WR_STREAM_NUM 3 #define IPU7_IS_MMU_FW_WR_L1_BLOCKNR_REG 0x54 #define IPU7_IS_MMU_FW_WR_L2_BLOCKNR_REG 0x60 /* IS MMU Data WR Snoop */ #define IPU7_IS_MMU_M0_OFFSET 0x276000 #define IPU7_IS_MMU_M0_STREAM_NUM 8 #define IPU7_IS_MMU_M0_L1_BLOCKNR_REG 0x54 #define IPU7_IS_MMU_M0_L2_BLOCKNR_REG 0x74 /* IS MMU Data WR ISOC */ #define IPU7_IS_MMU_M1_OFFSET 0x277000 #define IPU7_IS_MMU_M1_STREAM_NUM 16 #define IPU7_IS_MMU_M1_L1_BLOCKNR_REG 0x54 #define IPU7_IS_MMU_M1_L2_BLOCKNR_REG 0x94 /* PS MMU FW RD */ #define IPU7_PS_MMU_FW_RD_OFFSET 0x148000 #define IPU7_PS_MMU_FW_RD_STREAM_NUM 20 #define IPU7_PS_MMU_FW_RD_L1_BLOCKNR_REG 0x54 #define IPU7_PS_MMU_FW_RD_L2_BLOCKNR_REG 0xa4 /* PS MMU FW WR */ #define IPU7_PS_MMU_FW_WR_OFFSET 0x149000 #define IPU7_PS_MMU_FW_WR_STREAM_NUM 10 #define IPU7_PS_MMU_FW_WR_L1_BLOCKNR_REG 0x54 #define IPU7_PS_MMU_FW_WR_L2_BLOCKNR_REG 0x7c /* PS MMU FW Data RD VC0 */ #define IPU7_PS_MMU_SRT_RD_OFFSET 0x14a000 #define IPU7_PS_MMU_SRT_RD_STREAM_NUM 40 #define IPU7_PS_MMU_SRT_RD_L1_BLOCKNR_REG 0x54 #define IPU7_PS_MMU_SRT_RD_L2_BLOCKNR_REG 0xf4 /* PS MMU FW Data WR VC0 */ #define IPU7_PS_MMU_SRT_WR_OFFSET 0x14b000 #define IPU7_PS_MMU_SRT_WR_STREAM_NUM 40 #define IPU7_PS_MMU_SRT_WR_L1_BLOCKNR_REG 0x54 #define IPU7_PS_MMU_SRT_WR_L2_BLOCKNR_REG 0xf4 /* IS UAO UC RD */ #define IPU7_IS_UAO_UC_RD_OFFSET 0x27c000 #define IPU7_IS_UAO_UC_RD_PLANENUM 4 /* IS UAO UC WR */ #define IPU7_IS_UAO_UC_WR_OFFSET 0x27d000 #define IPU7_IS_UAO_UC_WR_PLANENUM 4 /* IS UAO M0 WR */ #define IPU7_IS_UAO_M0_WR_OFFSET 0x27e000 #define IPU7_IS_UAO_M0_WR_PLANENUM 8 /* IS UAO M1 WR */ #define IPU7_IS_UAO_M1_WR_OFFSET 0x27f000 #define IPU7_IS_UAO_M1_WR_PLANENUM 16 /* PS UAO FW RD */ #define IPU7_PS_UAO_FW_RD_OFFSET 0x156000 #define IPU7_PS_UAO_FW_RD_PLANENUM 20 /* PS UAO FW WR */ #define IPU7_PS_UAO_FW_WR_OFFSET 0x157000 #define IPU7_PS_UAO_FW_WR_PLANENUM 16 /* PS UAO SRT RD */ #define IPU7_PS_UAO_SRT_RD_OFFSET 0x154000 #define IPU7_PS_UAO_SRT_RD_PLANENUM 40 /* PS UAO SRT WR */ #define IPU7_PS_UAO_SRT_WR_OFFSET 0x155000 #define IPU7_PS_UAO_SRT_WR_PLANENUM 40 #define IPU7_IS_ZLX_UC_RD_OFFSET 0x278000 #define IPU7_IS_ZLX_UC_WR_OFFSET 0x279000 #define IPU7_IS_ZLX_M0_OFFSET 0x27a000 #define IPU7_IS_ZLX_M1_OFFSET 0x27b000 #define IPU7_IS_ZLX_UC_RD_NUM 4 #define IPU7_IS_ZLX_UC_WR_NUM 4 #define IPU7_IS_ZLX_M0_NUM 8 #define IPU7_IS_ZLX_M1_NUM 16 #define IPU7_PS_ZLX_DATA_RD_OFFSET 0x14e000 #define IPU7_PS_ZLX_DATA_WR_OFFSET 0x14f000 #define IPU7_PS_ZLX_FW_RD_OFFSET 0x150000 #define IPU7_PS_ZLX_FW_WR_OFFSET 0x151000 #define IPU7_PS_ZLX_DATA_RD_NUM 32 #define IPU7_PS_ZLX_DATA_WR_NUM 32 #define IPU7_PS_ZLX_FW_RD_NUM 16 #define IPU7_PS_ZLX_FW_WR_NUM 10 /* IPU7P5 for PTL */ /* IS MMU Cmd RD */ #define IPU7P5_IS_MMU_FW_RD_OFFSET 0x274000 #define IPU7P5_IS_MMU_FW_RD_STREAM_NUM 3 #define IPU7P5_IS_MMU_FW_RD_L1_BLOCKNR_REG 0x54 #define IPU7P5_IS_MMU_FW_RD_L2_BLOCKNR_REG 0x60 /* IS MMU Cmd WR */ #define IPU7P5_IS_MMU_FW_WR_OFFSET 0x275000 #define IPU7P5_IS_MMU_FW_WR_STREAM_NUM 3 #define IPU7P5_IS_MMU_FW_WR_L1_BLOCKNR_REG 0x54 #define IPU7P5_IS_MMU_FW_WR_L2_BLOCKNR_REG 0x60 /* IS MMU Data WR Snoop */ #define IPU7P5_IS_MMU_M0_OFFSET 0x276000 #define IPU7P5_IS_MMU_M0_STREAM_NUM 16 #define IPU7P5_IS_MMU_M0_L1_BLOCKNR_REG 0x54 #define IPU7P5_IS_MMU_M0_L2_BLOCKNR_REG 0x94 /* IS MMU Data WR ISOC */ #define IPU7P5_IS_MMU_M1_OFFSET 0x277000 #define IPU7P5_IS_MMU_M1_STREAM_NUM 16 #define IPU7P5_IS_MMU_M1_L1_BLOCKNR_REG 0x54 #define IPU7P5_IS_MMU_M1_L2_BLOCKNR_REG 0x94 /* PS MMU FW RD */ #define IPU7P5_PS_MMU_FW_RD_OFFSET 0x148000 #define IPU7P5_PS_MMU_FW_RD_STREAM_NUM 16 #define IPU7P5_PS_MMU_FW_RD_L1_BLOCKNR_REG 0x54 #define IPU7P5_PS_MMU_FW_RD_L2_BLOCKNR_REG 0x94 /* PS MMU FW WR */ #define IPU7P5_PS_MMU_FW_WR_OFFSET 0x149000 #define IPU7P5_PS_MMU_FW_WR_STREAM_NUM 10 #define IPU7P5_PS_MMU_FW_WR_L1_BLOCKNR_REG 0x54 #define IPU7P5_PS_MMU_FW_WR_L2_BLOCKNR_REG 0x7c /* PS MMU FW Data RD VC0 */ #define IPU7P5_PS_MMU_SRT_RD_OFFSET 0x14a000 #define IPU7P5_PS_MMU_SRT_RD_STREAM_NUM 22 #define IPU7P5_PS_MMU_SRT_RD_L1_BLOCKNR_REG 0x54 #define IPU7P5_PS_MMU_SRT_RD_L2_BLOCKNR_REG 0xac /* PS MMU FW Data WR VC0 */ #define IPU7P5_PS_MMU_SRT_WR_OFFSET 0x14b000 #define IPU7P5_PS_MMU_SRT_WR_STREAM_NUM 32 #define IPU7P5_PS_MMU_SRT_WR_L1_BLOCKNR_REG 0x54 #define IPU7P5_PS_MMU_SRT_WR_L2_BLOCKNR_REG 0xd4 /* IS UAO UC RD */ #define IPU7P5_IS_UAO_UC_RD_OFFSET 0x27c000 #define IPU7P5_IS_UAO_UC_RD_PLANENUM 4 /* IS UAO UC WR */ #define IPU7P5_IS_UAO_UC_WR_OFFSET 0x27d000 #define IPU7P5_IS_UAO_UC_WR_PLANENUM 4 /* IS UAO M0 WR */ #define IPU7P5_IS_UAO_M0_WR_OFFSET 0x27e000 #define IPU7P5_IS_UAO_M0_WR_PLANENUM 16 /* IS UAO M1 WR */ #define IPU7P5_IS_UAO_M1_WR_OFFSET 0x27f000 #define IPU7P5_IS_UAO_M1_WR_PLANENUM 16 /* PS UAO FW RD */ #define IPU7P5_PS_UAO_FW_RD_OFFSET 0x156000 #define IPU7P5_PS_UAO_FW_RD_PLANENUM 16 /* PS UAO FW WR */ #define IPU7P5_PS_UAO_FW_WR_OFFSET 0x157000 #define IPU7P5_PS_UAO_FW_WR_PLANENUM 10 /* PS UAO SRT RD */ #define IPU7P5_PS_UAO_SRT_RD_OFFSET 0x154000 #define IPU7P5_PS_UAO_SRT_RD_PLANENUM 22 /* PS UAO SRT WR */ #define IPU7P5_PS_UAO_SRT_WR_OFFSET 0x155000 #define IPU7P5_PS_UAO_SRT_WR_PLANENUM 32 #define IPU7P5_IS_ZLX_UC_RD_OFFSET 0x278000 #define IPU7P5_IS_ZLX_UC_WR_OFFSET 0x279000 #define IPU7P5_IS_ZLX_M0_OFFSET 0x27a000 #define IPU7P5_IS_ZLX_M1_OFFSET 0x27b000 #define IPU7P5_IS_ZLX_UC_RD_NUM 4 #define IPU7P5_IS_ZLX_UC_WR_NUM 4 #define IPU7P5_IS_ZLX_M0_NUM 16 #define IPU7P5_IS_ZLX_M1_NUM 16 #define IPU7P5_PS_ZLX_DATA_RD_OFFSET 0x14e000 #define IPU7P5_PS_ZLX_DATA_WR_OFFSET 0x14f000 #define IPU7P5_PS_ZLX_FW_RD_OFFSET 0x150000 #define IPU7P5_PS_ZLX_FW_WR_OFFSET 0x151000 #define IPU7P5_PS_ZLX_DATA_RD_NUM 22 #define IPU7P5_PS_ZLX_DATA_WR_NUM 32 #define IPU7P5_PS_ZLX_FW_RD_NUM 16 #define IPU7P5_PS_ZLX_FW_WR_NUM 10 /* IS MMU Cmd RD */ #define IPU8_IS_MMU_FW_RD_OFFSET 0x270000 #define IPU8_IS_MMU_FW_RD_STREAM_NUM 3 #define IPU8_IS_MMU_FW_RD_L1_BLOCKNR_REG 0x54 #define IPU8_IS_MMU_FW_RD_L2_BLOCKNR_REG 0x60 /* IS MMU Cmd WR */ #define IPU8_IS_MMU_FW_WR_OFFSET 0x271000 #define IPU8_IS_MMU_FW_WR_STREAM_NUM 3 #define IPU8_IS_MMU_FW_WR_L1_BLOCKNR_REG 0x54 #define IPU8_IS_MMU_FW_WR_L2_BLOCKNR_REG 0x60 /* IS MMU Data WR Snoop */ #define IPU8_IS_MMU_M0_OFFSET 0x272000 #define IPU8_IS_MMU_M0_STREAM_NUM 16 #define IPU8_IS_MMU_M0_L1_BLOCKNR_REG 0x54 #define IPU8_IS_MMU_M0_L2_BLOCKNR_REG 0x94 /* IS MMU Data WR ISOC */ #define IPU8_IS_MMU_M1_OFFSET 0x273000 #define IPU8_IS_MMU_M1_STREAM_NUM 16 #define IPU8_IS_MMU_M1_L1_BLOCKNR_REG 0x54 #define IPU8_IS_MMU_M1_L2_BLOCKNR_REG 0x94 /* IS MMU UPIPE ISOC */ #define IPU8_IS_MMU_UPIPE_OFFSET 0x274000 #define IPU8_IS_MMU_UPIPE_STREAM_NUM 6 #define IPU8_IS_MMU_UPIPE_L1_BLOCKNR_REG 0x54 #define IPU8_IS_MMU_UPIPE_L2_BLOCKNR_REG 0x6c /* PS MMU FW RD */ #define IPU8_PS_MMU_FW_RD_OFFSET 0x148000 #define IPU8_PS_MMU_FW_RD_STREAM_NUM 12 #define IPU8_PS_MMU_FW_RD_L1_BLOCKNR_REG 0x54 #define IPU8_PS_MMU_FW_RD_L2_BLOCKNR_REG 0x84 /* PS MMU FW WR */ #define IPU8_PS_MMU_FW_WR_OFFSET 0x149000 #define IPU8_PS_MMU_FW_WR_STREAM_NUM 8 #define IPU8_PS_MMU_FW_WR_L1_BLOCKNR_REG 0x54 #define IPU8_PS_MMU_FW_WR_L2_BLOCKNR_REG 0x74 /* PS MMU FW Data RD VC0 */ #define IPU8_PS_MMU_SRT_RD_OFFSET 0x14a000 #define IPU8_PS_MMU_SRT_RD_STREAM_NUM 26 #define IPU8_PS_MMU_SRT_RD_L1_BLOCKNR_REG 0x54 #define IPU8_PS_MMU_SRT_RD_L2_BLOCKNR_REG 0xbc /* PS MMU FW Data WR VC0 */ #define IPU8_PS_MMU_SRT_WR_OFFSET 0x14b000 #define IPU8_PS_MMU_SRT_WR_STREAM_NUM 26 #define IPU8_PS_MMU_SRT_WR_L1_BLOCKNR_REG 0x54 #define IPU8_PS_MMU_SRT_WR_L2_BLOCKNR_REG 0xbc /* IS UAO UC RD */ #define IPU8_IS_UAO_UC_RD_OFFSET 0x27a000 #define IPU8_IS_UAO_UC_RD_PLANENUM 4 /* IS UAO UC WR */ #define IPU8_IS_UAO_UC_WR_OFFSET 0x27b000 #define IPU8_IS_UAO_UC_WR_PLANENUM 4 /* IS UAO M0 WR */ #define IPU8_IS_UAO_M0_WR_OFFSET 0x27c000 #define IPU8_IS_UAO_M0_WR_PLANENUM 16 /* IS UAO M1 WR */ #define IPU8_IS_UAO_M1_WR_OFFSET 0x27d000 #define IPU8_IS_UAO_M1_WR_PLANENUM 16 /* IS UAO UPIPE */ #define IPU8_IS_UAO_UPIPE_OFFSET 0x27e000 #define IPU8_IS_UAO_UPIPE_PLANENUM 6 /* PS UAO FW RD */ #define IPU8_PS_UAO_FW_RD_OFFSET 0x156000 #define IPU8_PS_UAO_FW_RD_PLANENUM 12 /* PS UAO FW WR */ #define IPU8_PS_UAO_FW_WR_OFFSET 0x157000 #define IPU8_PS_UAO_FW_WR_PLANENUM 8 /* PS UAO SRT RD */ #define IPU8_PS_UAO_SRT_RD_OFFSET 0x154000 #define IPU8_PS_UAO_SRT_RD_PLANENUM 26 /* PS UAO SRT WR */ #define IPU8_PS_UAO_SRT_WR_OFFSET 0x155000 #define IPU8_PS_UAO_SRT_WR_PLANENUM 26 #define IPU8_IS_ZLX_UC_RD_OFFSET 0x275000 #define IPU8_IS_ZLX_UC_WR_OFFSET 0x276000 #define IPU8_IS_ZLX_M0_OFFSET 0x277000 #define IPU8_IS_ZLX_M1_OFFSET 0x278000 #define IPU8_IS_ZLX_UPIPE_OFFSET 0x279000 #define IPU8_IS_ZLX_UC_RD_NUM 4 #define IPU8_IS_ZLX_UC_WR_NUM 4 #define IPU8_IS_ZLX_M0_NUM 16 #define IPU8_IS_ZLX_M1_NUM 16 #define IPU8_IS_ZLX_UPIPE_NUM 6 #define IPU8_PS_ZLX_DATA_RD_OFFSET 0x14e000 #define IPU8_PS_ZLX_DATA_WR_OFFSET 0x14f000 #define IPU8_PS_ZLX_FW_RD_OFFSET 0x150000 #define IPU8_PS_ZLX_FW_WR_OFFSET 0x151000 #define IPU8_PS_ZLX_DATA_RD_NUM 26 #define IPU8_PS_ZLX_DATA_WR_NUM 26 #define IPU8_PS_ZLX_FW_RD_NUM 12 #define IPU8_PS_ZLX_FW_WR_NUM 8 #define MMU_REG_INVALIDATE_0 0x00 #define MMU_REG_INVALIDATE_1 0x04 #define MMU_REG_PAGE_TABLE_BASE_ADDR 0x08 #define MMU_REG_USER_INFO_BITS 0x0c #define MMU_REG_AXI_REFILL_IF_ID 0x10 #define MMU_REG_PW_EN_BITMAP 0x14 #define MMU_REG_COLLAPSE_ENABLE_BITMAP 0x18 #define MMU_REG_GENERAL_REG 0x1c #define MMU_REG_AT_SP_ARB_CFG 0x20 #define MMU_REG_INVALIDATION_STATUS 0x24 #define MMU_REG_IRQ_LEVEL_NO_PULSE 0x28 #define MMU_REG_IRQ_MASK 0x2c #define MMU_REG_IRQ_ENABLE 0x30 #define MMU_REG_IRQ_EDGE 0x34 #define MMU_REG_IRQ_CLEAR 0x38 #define MMU_REG_IRQ_CAUSE 0x3c #define MMU_REG_CG_CTRL_BITS 0x40 #define MMU_REG_RD_FIFOS_STATUS 0x44 #define MMU_REG_WR_FIFOS_STATUS 0x48 #define MMU_REG_COMMON_FIFOS_STATUS 0x4c #define MMU_REG_FSM_STATUS 0x50 #define ZLX_REG_AXI_POOL 0x0 #define ZLX_REG_EN 0x20 #define ZLX_REG_CONF 0x24 #define ZLX_REG_CG_CTRL 0x900 #define ZLX_REG_FORCE_BYPASS 0x904 struct ipu7_mmu_info { struct device *dev; u32 *l1_pt; u32 l1_pt_dma; u32 **l2_pts; u32 *dummy_l2_pt; u32 dummy_l2_pteval; void *dummy_page; u32 dummy_page_pteval; dma_addr_t aperture_start; dma_addr_t aperture_end; unsigned long pgsize_bitmap; spinlock_t lock; /* Serialize access to users */ struct ipu7_dma_mapping *dmap; }; struct ipu7_mmu { struct list_head node; struct ipu7_mmu_hw *mmu_hw; unsigned int nr_mmus; unsigned int mmid; phys_addr_t pgtbl; struct device *dev; struct ipu7_dma_mapping *dmap; struct list_head vma_list; struct page *trash_page; dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */ dma_addr_t iova_trash_page; /* IOVA for IPU child nodes to use */ bool ready; spinlock_t ready_lock; /* Serialize access to bool ready */ void (*tlb_invalidate)(struct ipu7_mmu *mmu); }; struct ipu7_mmu *ipu7_mmu_init(struct device *dev, void __iomem *base, int mmid, const struct ipu7_hw_variants *hw); void ipu7_mmu_cleanup(struct ipu7_mmu *mmu); int ipu7_mmu_hw_init(struct ipu7_mmu *mmu); void ipu7_mmu_hw_cleanup(struct ipu7_mmu *mmu); int ipu7_mmu_map(struct ipu7_mmu_info *mmu_info, unsigned long iova, phys_addr_t paddr, size_t size); void ipu7_mmu_unmap(struct ipu7_mmu_info *mmu_info, unsigned long iova, size_t size); phys_addr_t ipu7_mmu_iova_to_phys(struct ipu7_mmu_info *mmu_info, dma_addr_t iova); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-platform-regs.h000066400000000000000000000106071503071307200301720ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2018 - 2025 Intel Corporation */ #ifndef IPU7_PLATFORM_REGS_H #define IPU7_PLATFORM_REGS_H #define IS_BASE 0x230000 #define IS_UC_CTRL_BASE (IS_BASE + 0x0) #define PS_BASE 0x130000 #define PS_UC_CTRL_BASE (PS_BASE + 0x0) /* * bit 0: IRQ from FW, * bit 1, 2 and 3: IRQ from HW */ #define TO_SW_IRQ_MASK 0xf #define TO_SW_IRQ_FW BIT(0) #define FW_CODE_BASE 0x0 #define FW_DATA_BASE 0x4 #define CPU_AXI_CNTL 0x8 #define CPU_QOS_CNTL 0xc #define IDMA_AXI_CNTL 0x10 #define IDMA_QOS_CNTL 0x14 #define MEF_SPLIT_SIZE 0x18 #define FW_MSG_CONTROL 0x1c #define FW_MSG_CREDITS_STATUS 0x20 #define FW_MSG_CREDIT_TAKEN 0x24 #define FW_MSG_CREDIT_RETURNED 0x28 #define TRIG_IDMA_IN 0x2c #define IDMA_DONE 0x30 #define IDMA_DONE_CLEAR 0x34 #define DMEM_CAPACITY 0x38 #define NON_SECURE_CODE_OFFSET 0x3c #define UC_CG_CTRL_BITS 0x40 #define ALT_RESET_VEC 0x44 #define WDT_NMI_DURATION 0x104 #define WDT_RST_REQ_DURATION 0x108 #define WDT_CNTL 0x10c #define WDT_NMI_CURRENT_COUNT 0x110 #define WDT_RST_CURRENT_COUNT 0x114 #define WDT_HALT 0x118 #define WDT_STATUS 0x11c #define SPARE_REG_RW 0x120 #define SPARE_REG_RO 0x124 #define FW_TO_FW_IRQ_CNTL_EDGE 0x200 #define FW_TO_FW_IRQ_CNTL_MASK_N 0x204 #define FW_TO_FW_IRQ_CNTL_STATUS 0x208 #define FW_TO_FW_IRQ_CNTL_CLEAR 0x20c #define FW_TO_FW_IRQ_CNTL_ENABLE 0x210 #define FW_TO_FW_IRQ_CNTL_LEVEL_NOT_PULSE 0x214 #define CLK_GATE_DIS 0x218 #define DEBUG_STATUS 0x1000 #define DEBUG_EXCPETION 0x1004 #define TIE_GENERAL_INPUT 0x1008 #define ERR_STATUS 0x100c #define UC_ERR_INFO 0x1010 #define SPARE_CNTL 0x1014 #define MEF_TRC_CNTL 0x1100 #define DBG_MEF_LAST_PUSH 0x1104 #define DBG_MEF_LAST_POP 0x1108 #define DBG_MEF_COUNT_CNTL 0x110c #define DBG_MEF_COUNT1 0x1110 #define DBG_MEF_COUNT2 0x1114 #define DBG_MEF_ACC_OCCUPANCY 0x1118 #define DBG_MEF_MAX_IRQ_TO_POP 0x111c #define DBG_IRQ_CNTL 0x1120 #define DBG_IRQ_COUNT 0x1124 #define DBG_CYC_COUNT 0x1128 #define DBG_CNTL 0x1130 #define DBG_RST_REG 0x1134 #define DBG_MEF_STATUS0 0x1138 #define DBG_MEF_STATUS1 0x113c #define PDEBUG_CTL 0x1140 #define PDEBUG_DATA 0x1144 #define PDEBUG_INST 0x1148 #define PDEBUG_LS0ADDR 0x114c #define PDEBUG_LS0DATA 0x1150 #define PDEBUG_LS0STAT 0x1154 #define PDEBUG_PC 0x1158 #define PDEBUG_MISC 0x115c #define PDEBUG_PREF_STS 0x1160 #define MEF0_ADDR 0x2000 #define MEF1_ADDR 0x2020 #define PRINTF_EN_THROUGH_TRACE 0x3004 #define PRINTF_EN_DIRECTLY_TO_DDR 0x3008 #define PRINTF_DDR_BASE_ADDR 0x300c #define PRINTF_DDR_SIZE 0x3010 #define PRINTF_DDR_NEXT_ADDR 0x3014 #define PRINTF_STATUS 0x3018 #define PRINTF_AXI_CNTL 0x301c #define PRINTF_MSG_LENGTH 0x3020 #define TO_SW_IRQ_CNTL_EDGE 0x4000 #define TO_SW_IRQ_CNTL_MASK_N 0x4004 #define TO_SW_IRQ_CNTL_STATUS 0x4008 #define TO_SW_IRQ_CNTL_CLEAR 0x400c #define TO_SW_IRQ_CNTL_ENABLE 0x4010 #define TO_SW_IRQ_CNTL_LEVEL_NOT_PULSE 0x4014 #define ERR_IRQ_CNTL_EDGE 0x4018 #define ERR_IRQ_CNTL_MASK_N 0x401c #define ERR_IRQ_CNTL_STATUS 0x4020 #define ERR_IRQ_CNTL_CLEAR 0x4024 #define ERR_IRQ_CNTL_ENABLE 0x4028 #define ERR_IRQ_CNTL_LEVEL_NOT_PULSE 0x402c #define LOCAL_DMEM_BASE_ADDR 0x1300000 /* * IS_UC_TO_SW irqs * bit 0: IRQ from local FW * bit 1~3: IRQ from HW */ #define IS_UC_TO_SW_IRQ_MASK 0xf #define IPU_ISYS_SPC_OFFSET 0x210000 #define IPU7_PSYS_SPC_OFFSET 0x118000 #define IPU_ISYS_DMEM_OFFSET 0x200000 #define IPU_PSYS_DMEM_OFFSET 0x100000 #define IPU7_ISYS_CSI_PORT_NUM 4 /* IRQ-related registers in PSYS */ #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_EDGE 0x134000 #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_MASK 0x134004 #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_STATUS 0x134008 #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_CLEAR 0x13400c #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_ENABLE 0x134010 #define IPU_REG_PSYS_TO_SW_IRQ_CNTL_LEVEL_NOT_PULSE 0x134014 #define IRQ_FROM_LOCAL_FW BIT(0) /* * psys subdomains power request regs */ enum ipu7_device_buttress_psys_domain_pos { IPU_PSYS_SUBDOMAIN_LB = 0, IPU_PSYS_SUBDOMAIN_BB = 1, }; #define IPU7_PSYS_DOMAIN_POWER_MASK (BIT(IPU_PSYS_SUBDOMAIN_LB) | \ BIT(IPU_PSYS_SUBDOMAIN_BB)) #define IPU8_PSYS_DOMAIN_POWER_MASK BIT(IPU_PSYS_SUBDOMAIN_LB) #define IPU_PSYS_DOMAIN_POWER_IN_PROGRESS BIT(31) #endif /* IPU7_PLATFORM_REGS_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-syscom.c000066400000000000000000000050621503071307200267170ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include "abi/ipu7_fw_syscom_abi.h" #include "ipu7.h" #include "ipu7-syscom.h" static void __iomem *ipu7_syscom_get_indices(struct ipu7_syscom_context *ctx, u32 q) { return ctx->queue_indices + (q * sizeof(struct syscom_queue_indices_s)); } void *ipu7_syscom_get_token(struct ipu7_syscom_context *ctx, int q) { struct syscom_queue_config *queue_params = &ctx->queue_configs[q]; void __iomem *queue_indices = ipu7_syscom_get_indices(ctx, q); u32 write_index = readl(queue_indices + offsetof(struct syscom_queue_indices_s, write_index)); u32 read_index = readl(queue_indices + offsetof(struct syscom_queue_indices_s, read_index)); void *token = NULL; if (q < ctx->num_output_queues) { /* Output queue */ bool empty = (write_index == read_index); if (!empty) token = queue_params->token_array_mem + read_index * queue_params->token_size_in_bytes; } else { /* Input queue */ bool full = (read_index == ((write_index + 1U) % (u32)queue_params->max_capacity)); if (!full) token = queue_params->token_array_mem + write_index * queue_params->token_size_in_bytes; } return token; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_syscom_get_token, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_syscom_get_token, INTEL_IPU7); #endif void ipu7_syscom_put_token(struct ipu7_syscom_context *ctx, int q) { struct syscom_queue_config *queue_params = &ctx->queue_configs[q]; void __iomem *queue_indices = ipu7_syscom_get_indices(ctx, q); u32 offset, index; if (q < ctx->num_output_queues) /* Output queue */ offset = offsetof(struct syscom_queue_indices_s, read_index); else /* Input queue */ offset = offsetof(struct syscom_queue_indices_s, write_index); index = readl(queue_indices + offset); writel((index + 1U) % queue_params->max_capacity, queue_indices + offset); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_syscom_put_token, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_syscom_put_token, INTEL_IPU7); #endif struct syscom_queue_params_config * ipu7_syscom_get_queue_config(struct syscom_config_s *config) { return (struct syscom_queue_params_config *)(&config[1]); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_syscom_get_queue_config, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_syscom_get_queue_config, INTEL_IPU7); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7-syscom.h000066400000000000000000000014601503071307200267220ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_SYSCOM_H #define IPU7_SYSCOM_H #include struct syscom_config_s; struct syscom_queue_params_config; struct syscom_queue_config { void *token_array_mem; u32 queue_size; u16 token_size_in_bytes; u16 max_capacity; }; struct ipu7_syscom_context { u16 num_input_queues; u16 num_output_queues; struct syscom_queue_config *queue_configs; void __iomem *queue_indices; dma_addr_t queue_mem_dma_addr; void *queue_mem; u32 queue_mem_size; }; void ipu7_syscom_put_token(struct ipu7_syscom_context *ctx, int q); void *ipu7_syscom_get_token(struct ipu7_syscom_context *ctx, int q); struct syscom_queue_params_config * ipu7_syscom_get_queue_config(struct syscom_config_s *config); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7.c000066400000000000000000001762061503071307200254150ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 - 2025 Intel Corporation */ #include #include #include #include #ifdef CONFIG_DEBUG_FS #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "abi/ipu7_fw_common_abi.h" #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress.h" #include "ipu7-buttress-regs.h" #include "ipu7-cpd.h" #include "ipu7-dma.h" #include "ipu7-isys-csi2-regs.h" #include "ipu7-mmu.h" #include "ipu7-platform-regs.h" #define IPU_PCI_BAR 0 #define IPU_PCI_PBBAR 4 #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC static const unsigned int ipu7_tpg_offsets[] = { MGC_MG_PORT(0), MGC_MG_PORT(1), MGC_MG_PORT(2), }; #endif static const unsigned int ipu7_csi_offsets[] = { IPU_CSI_PORT_A_ADDR_OFFSET, IPU_CSI_PORT_B_ADDR_OFFSET, IPU_CSI_PORT_C_ADDR_OFFSET, IPU_CSI_PORT_D_ADDR_OFFSET, }; static struct ipu_isys_internal_pdata ipu7p5_isys_ipdata = { .csi2 = { .gpreg = IS_IO_CSI2_GPREGS_BASE, }, .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = IPU7P5_IS_MMU_NUM, .mmu_hw = { { .name = "IS_FW_RD", .offset = IPU7P5_IS_MMU_FW_RD_OFFSET, .zlx_offset = IPU7P5_IS_ZLX_UC_RD_OFFSET, .uao_offset = IPU7P5_IS_UAO_UC_RD_OFFSET, .info_bits = 0x20005101, .refill = 0x00002726, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_IS_MMU_FW_RD_L1_BLOCKNR_REG, .l2_block = IPU7P5_IS_MMU_FW_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_IS_MMU_FW_RD_STREAM_NUM, .nr_l2streams = IPU7P5_IS_MMU_FW_RD_STREAM_NUM, .l1_block_sz = { 0x0, 0x8, 0xa, }, .l2_block_sz = { 0x0, 0x2, 0x4, }, .zlx_nr = IPU7P5_IS_ZLX_UC_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 0, 1, 0, 0 }, .zlx_conf = { 0x0, }, .uao_p_num = IPU7P5_IS_UAO_UC_RD_PLANENUM, .uao_p2tlb = { 0x00000049, 0x0000004c, 0x0000004d, 0x00000000, }, }, { .name = "IS_FW_WR", .offset = IPU7P5_IS_MMU_FW_WR_OFFSET, .zlx_offset = IPU7P5_IS_ZLX_UC_WR_OFFSET, .uao_offset = IPU7P5_IS_UAO_UC_WR_OFFSET, .info_bits = 0x20005001, .refill = 0x00002524, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_IS_MMU_FW_WR_L1_BLOCKNR_REG, .l2_block = IPU7P5_IS_MMU_FW_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_IS_MMU_FW_WR_STREAM_NUM, .nr_l2streams = IPU7P5_IS_MMU_FW_WR_STREAM_NUM, .l1_block_sz = { 0x0, 0x8, 0xa, }, .l2_block_sz = { 0x0, 0x2, 0x4, }, .zlx_nr = IPU7P5_IS_ZLX_UC_WR_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 0, 1, 1, 0, }, .zlx_conf = { 0x0, 0x00010101, 0x00010101, 0x0, }, .uao_p_num = IPU7P5_IS_UAO_UC_WR_PLANENUM, .uao_p2tlb = { 0x00000049, 0x0000004a, 0x0000004b, 0x00000000, }, }, { .name = "IS_DATA_WR_ISOC", .offset = IPU7P5_IS_MMU_M0_OFFSET, .zlx_offset = IPU7P5_IS_ZLX_M0_OFFSET, .uao_offset = IPU7P5_IS_UAO_M0_WR_OFFSET, .info_bits = 0x20004e01, .refill = 0x00002120, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_IS_MMU_M0_L1_BLOCKNR_REG, .l2_block = IPU7P5_IS_MMU_M0_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_IS_MMU_M0_STREAM_NUM, .nr_l2streams = IPU7P5_IS_MMU_M0_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .zlx_nr = IPU7P5_IS_ZLX_M0_NUM, .zlx_axi_pool = { 0x00000f10, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, }, .uao_p_num = IPU7P5_IS_UAO_M0_WR_PLANENUM, .uao_p2tlb = { 0x00000041, 0x00000042, 0x00000043, 0x00000044, 0x00000041, 0x00000042, 0x00000043, 0x00000044, 0x00000041, 0x00000042, 0x00000043, 0x00000044, 0x00000041, 0x00000042, 0x00000043, 0x00000044, }, }, { .name = "IS_DATA_WR_SNOOP", .offset = IPU7P5_IS_MMU_M1_OFFSET, .zlx_offset = IPU7P5_IS_ZLX_M1_OFFSET, .uao_offset = IPU7P5_IS_UAO_M1_WR_OFFSET, .info_bits = 0x20004f01, .refill = 0x00002322, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_IS_MMU_M1_L1_BLOCKNR_REG, .l2_block = IPU7P5_IS_MMU_M1_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_IS_MMU_M1_STREAM_NUM, .nr_l2streams = IPU7P5_IS_MMU_M1_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .zlx_nr = IPU7P5_IS_ZLX_M1_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, }, .uao_p_num = IPU7P5_IS_UAO_M1_WR_PLANENUM, .uao_p2tlb = { 0x00000045, 0x00000046, 0x00000047, 0x00000048, 0x00000045, 0x00000046, 0x00000047, 0x00000048, 0x00000045, 0x00000046, 0x00000047, 0x00000048, 0x00000045, 0x00000046, 0x00000047, 0x00000048, }, }, }, .cdc_fifos = 3, .cdc_fifo_threshold = {6, 8, 2}, .dmem_offset = IPU_ISYS_DMEM_OFFSET, .spc_offset = IPU_ISYS_SPC_OFFSET, }, .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN, }; static struct ipu_psys_internal_pdata ipu7p5_psys_ipdata = { .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = IPU7P5_PS_MMU_NUM, .mmu_hw = { { .name = "PS_FW_RD", .offset = IPU7P5_PS_MMU_FW_RD_OFFSET, .zlx_offset = IPU7P5_PS_ZLX_FW_RD_OFFSET, .uao_offset = IPU7P5_PS_UAO_FW_RD_OFFSET, .info_bits = 0x20004001, .refill = 0x00002726, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_PS_MMU_FW_RD_L1_BLOCKNR_REG, .l2_block = IPU7P5_PS_MMU_FW_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_PS_MMU_FW_RD_STREAM_NUM, .nr_l2streams = IPU7P5_PS_MMU_FW_RD_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000d, 0x0000000f, 0x00000011, 0x00000012, 0x00000013, 0x00000014, 0x00000016, 0x00000018, 0x00000019, 0x0000001a, 0x0000001a, 0x0000001a, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .zlx_nr = IPU7P5_PS_ZLX_FW_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x00000000, 0x00010101, 0x00000000, 0x00000000, 0x00010101, 0x00010101, 0x00000000, 0x00000000, 0x00000000, 0x00010101, 0x00010101, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, .uao_p_num = IPU7P5_PS_UAO_FW_RD_PLANENUM, .uao_p2tlb = { 0x0000002e, 0x00000035, 0x00000036, 0x00000031, 0x00000037, 0x00000038, 0x00000039, 0x00000032, 0x00000033, 0x0000003a, 0x0000003b, 0x0000003c, 0x00000034, 0x0, 0x0, 0x0, }, }, { .name = "PS_FW_WR", .offset = IPU7P5_PS_MMU_FW_WR_OFFSET, .zlx_offset = IPU7P5_PS_ZLX_FW_WR_OFFSET, .uao_offset = IPU7P5_PS_UAO_FW_WR_OFFSET, .info_bits = 0x20003e01, .refill = 0x00002322, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_PS_MMU_FW_WR_L1_BLOCKNR_REG, .l2_block = IPU7P5_PS_MMU_FW_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_PS_MMU_FW_WR_STREAM_NUM, .nr_l2streams = IPU7P5_PS_MMU_FW_WR_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000d, 0x0000000e, 0x0000000f, 0x00000010, 0x00000010, 0x00000010, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, }, .zlx_nr = IPU7P5_PS_ZLX_FW_WR_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x00000000, 0x00010101, 0x00010101, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, .uao_p_num = IPU7P5_PS_UAO_FW_WR_PLANENUM, .uao_p2tlb = { 0x0000002e, 0x0000002f, 0x00000030, 0x00000031, 0x00000032, 0x00000033, 0x00000034, 0x0, 0x0, 0x0, }, }, { .name = "PS_DATA_RD", .offset = IPU7P5_PS_MMU_SRT_RD_OFFSET, .zlx_offset = IPU7P5_PS_ZLX_DATA_RD_OFFSET, .uao_offset = IPU7P5_PS_UAO_SRT_RD_OFFSET, .info_bits = 0x20003f01, .refill = 0x00002524, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_PS_MMU_SRT_RD_L1_BLOCKNR_REG, .l2_block = IPU7P5_PS_MMU_SRT_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_PS_MMU_SRT_RD_STREAM_NUM, .nr_l2streams = IPU7P5_PS_MMU_SRT_RD_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000004, 0x00000006, 0x00000008, 0x0000000b, 0x0000000d, 0x0000000f, 0x00000013, 0x00000017, 0x00000019, 0x0000001b, 0x0000001d, 0x0000001f, 0x0000002b, 0x00000033, 0x0000003f, 0x00000047, 0x00000049, 0x0000004b, 0x0000004c, 0x0000004d, 0x0000004e, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, 0x00000020, 0x00000022, 0x00000024, 0x00000026, 0x00000028, 0x0000002a, }, .zlx_nr = IPU7P5_PS_ZLX_DATA_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, }, .zlx_conf = { 0x00030303, 0x00010101, 0x00010101, 0x00030202, 0x00010101, 0x00010101, 0x00030303, 0x00030303, 0x00010101, 0x00030800, 0x00030500, 0x00020101, 0x00042000, 0x00031000, 0x00042000, 0x00031000, 0x00020400, 0x00010101, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, .uao_p_num = IPU7P5_PS_UAO_SRT_RD_PLANENUM, .uao_p2tlb = { 0x0000001c, 0x0000001d, 0x0000001e, 0x0000001f, 0x00000020, 0x00000021, 0x00000022, 0x00000023, 0x00000024, 0x00000025, 0x00000026, 0x00000027, 0x00000028, 0x00000029, 0x0000002a, 0x0000002b, 0x0000002c, 0x0000002d, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, }, { .name = "PS_DATA_WR", .offset = IPU7P5_PS_MMU_SRT_WR_OFFSET, .zlx_offset = IPU7P5_PS_ZLX_DATA_WR_OFFSET, .uao_offset = IPU7P5_PS_UAO_SRT_WR_OFFSET, .info_bits = 0x20003d01, .refill = 0x00002120, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU7P5_PS_MMU_SRT_WR_L1_BLOCKNR_REG, .l2_block = IPU7P5_PS_MMU_SRT_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7P5_PS_MMU_SRT_WR_STREAM_NUM, .nr_l2streams = IPU7P5_PS_MMU_SRT_WR_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000002, 0x00000006, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, 0x00000020, 0x00000022, 0x00000024, 0x00000028, 0x0000002a, 0x00000036, 0x0000003e, 0x00000040, 0x00000042, 0x0000004e, 0x00000056, 0x0000005c, 0x00000068, 0x00000070, 0x00000076, 0x00000077, 0x00000078, 0x00000079, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000006, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, 0x00000020, 0x00000022, 0x00000024, 0x00000028, 0x0000002a, 0x00000036, 0x0000003e, 0x00000040, 0x00000042, 0x0000004e, 0x00000056, 0x0000005c, 0x00000068, 0x00000070, 0x00000076, 0x00000077, 0x00000078, 0x00000079, }, .zlx_nr = IPU7P5_PS_ZLX_DATA_WR_NUM, .zlx_axi_pool = { 0x00000f50, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, }, .zlx_conf = { 0x00010102, 0x00030103, 0x00030103, 0x00010101, 0x00010101, 0x00030101, 0x00010101, 0x38010101, 0x00000000, 0x00000000, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x00030303, 0x00010101, 0x00042000, 0x00031000, 0x00010101, 0x00010101, 0x00042000, 0x00031000, 0x00031000, 0x00042000, 0x00031000, 0x00031000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, .uao_p_num = IPU7P5_PS_UAO_SRT_WR_PLANENUM, .uao_p2tlb = { 0x00000000, 0x00000001, 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000006, 0x00000007, 0x00000008, 0x00000009, 0x0000000a, 0x0000000b, 0x0000000c, 0x0000000d, 0x0000000e, 0x0000000f, 0x00000010, 0x00000011, 0x00000012, 0x00000013, 0x00000014, 0x00000015, 0x00000016, 0x00000017, 0x00000018, 0x00000019, 0x0000001a, 0x0000001b, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }, }, }, .dmem_offset = IPU_PSYS_DMEM_OFFSET, }, }; static struct ipu_isys_internal_pdata ipu7_isys_ipdata = { .csi2 = { .gpreg = IS_IO_CSI2_GPREGS_BASE, }, .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = IPU7_IS_MMU_NUM, .mmu_hw = { { .name = "IS_FW_RD", .offset = IPU7_IS_MMU_FW_RD_OFFSET, .zlx_offset = IPU7_IS_ZLX_UC_RD_OFFSET, .uao_offset = IPU7_IS_UAO_UC_RD_OFFSET, .info_bits = 0x20006701, .refill = 0x00002726, .collapse_en_bitmap = 0x0, .l1_block = IPU7_IS_MMU_FW_RD_L1_BLOCKNR_REG, .l2_block = IPU7_IS_MMU_FW_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7_IS_MMU_FW_RD_STREAM_NUM, .nr_l2streams = IPU7_IS_MMU_FW_RD_STREAM_NUM, .l1_block_sz = { 0x0, 0x8, 0xa, }, .l2_block_sz = { 0x0, 0x2, 0x4, }, .zlx_nr = IPU7_IS_ZLX_UC_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 0, 0, 0, 0 }, .zlx_conf = { 0x0, 0x0, 0x0, 0x0, }, .uao_p_num = IPU7_IS_UAO_UC_RD_PLANENUM, .uao_p2tlb = { 0x00000061, 0x00000064, 0x00000065, }, }, { .name = "IS_FW_WR", .offset = IPU7_IS_MMU_FW_WR_OFFSET, .zlx_offset = IPU7_IS_ZLX_UC_WR_OFFSET, .uao_offset = IPU7_IS_UAO_UC_WR_OFFSET, .info_bits = 0x20006801, .refill = 0x00002524, .collapse_en_bitmap = 0x0, .l1_block = IPU7_IS_MMU_FW_WR_L1_BLOCKNR_REG, .l2_block = IPU7_IS_MMU_FW_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7_IS_MMU_FW_WR_STREAM_NUM, .nr_l2streams = IPU7_IS_MMU_FW_WR_STREAM_NUM, .l1_block_sz = { 0x0, 0x8, 0xa, }, .l2_block_sz = { 0x0, 0x2, 0x4, }, .zlx_nr = IPU7_IS_ZLX_UC_WR_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 0, 1, 1, 0, }, .zlx_conf = { 0x0, 0x00010101, 0x00010101, }, .uao_p_num = IPU7_IS_UAO_UC_WR_PLANENUM, .uao_p2tlb = { 0x00000061, 0x00000062, 0x00000063, }, }, { .name = "IS_DATA_WR_ISOC", .offset = IPU7_IS_MMU_M0_OFFSET, .zlx_offset = IPU7_IS_ZLX_M0_OFFSET, .uao_offset = IPU7_IS_UAO_M0_WR_OFFSET, .info_bits = 0x20006601, .refill = 0x00002120, .collapse_en_bitmap = 0x0, .l1_block = IPU7_IS_MMU_M0_L1_BLOCKNR_REG, .l2_block = IPU7_IS_MMU_M0_L2_BLOCKNR_REG, .nr_l1streams = IPU7_IS_MMU_M0_STREAM_NUM, .nr_l2streams = IPU7_IS_MMU_M0_STREAM_NUM, .l1_block_sz = { 0x0, 0x3, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, }, .zlx_nr = IPU7_IS_ZLX_M0_NUM, .zlx_axi_pool = { 0x00000f10, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x00010103, 0x00010103, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, }, .uao_p_num = IPU7_IS_UAO_M0_WR_PLANENUM, .uao_p2tlb = { 0x00000049, 0x0000004a, 0x0000004b, 0x0000004c, 0x0000004d, 0x0000004e, 0x0000004f, 0x00000050, }, }, { .name = "IS_DATA_WR_SNOOP", .offset = IPU7_IS_MMU_M1_OFFSET, .zlx_offset = IPU7_IS_ZLX_M1_OFFSET, .uao_offset = IPU7_IS_UAO_M1_WR_OFFSET, .info_bits = 0x20006901, .refill = 0x00002322, .collapse_en_bitmap = 0x0, .l1_block = IPU7_IS_MMU_M1_L1_BLOCKNR_REG, .l2_block = IPU7_IS_MMU_M1_L2_BLOCKNR_REG, .nr_l1streams = IPU7_IS_MMU_M1_STREAM_NUM, .nr_l2streams = IPU7_IS_MMU_M1_STREAM_NUM, .l1_block_sz = { 0x0, 0x3, 0x6, 0x9, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, }, .zlx_nr = IPU7_IS_ZLX_M1_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010103, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, 0x00010101, }, .uao_p_num = IPU7_IS_UAO_M1_WR_PLANENUM, .uao_p2tlb = { 0x00000051, 0x00000052, 0x00000053, 0x00000054, 0x00000055, 0x00000056, 0x00000057, 0x00000058, 0x00000059, 0x0000005a, 0x0000005b, 0x0000005c, 0x0000005d, 0x0000005e, 0x0000005f, 0x00000060, }, }, }, .cdc_fifos = 3, .cdc_fifo_threshold = {6, 8, 2}, .dmem_offset = IPU_ISYS_DMEM_OFFSET, .spc_offset = IPU_ISYS_SPC_OFFSET, }, .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN, }; static struct ipu_psys_internal_pdata ipu7_psys_ipdata = { .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = IPU7_PS_MMU_NUM, .mmu_hw = { { .name = "PS_FW_RD", .offset = IPU7_PS_MMU_FW_RD_OFFSET, .zlx_offset = IPU7_PS_ZLX_FW_RD_OFFSET, .uao_offset = IPU7_PS_UAO_FW_RD_OFFSET, .info_bits = 0x20004801, .refill = 0x00002726, .collapse_en_bitmap = 0x0, .l1_block = IPU7_PS_MMU_FW_RD_L1_BLOCKNR_REG, .l2_block = IPU7_PS_MMU_FW_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7_PS_MMU_FW_RD_STREAM_NUM, .nr_l2streams = IPU7_PS_MMU_FW_RD_STREAM_NUM, .l1_block_sz = { 0, 0x8, 0xa, 0xc, 0xd, 0xf, 0x11, 0x12, 0x13, 0x14, 0x16, 0x18, 0x19, 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, }, .zlx_nr = IPU7_PS_ZLX_FW_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x0, }, .uao_p_num = IPU7_PS_UAO_FW_RD_PLANENUM, .uao_p2tlb = { 0x00000036, 0x0000003d, 0x0000003e, 0x00000039, 0x0000003f, 0x00000040, 0x00000041, 0x0000003a, 0x0000003b, 0x00000042, 0x00000043, 0x00000044, 0x0000003c, }, }, { .name = "PS_FW_WR", .offset = IPU7_PS_MMU_FW_WR_OFFSET, .zlx_offset = IPU7_PS_ZLX_FW_WR_OFFSET, .uao_offset = IPU7_PS_UAO_FW_WR_OFFSET, .info_bits = 0x20004601, .refill = 0x00002322, .collapse_en_bitmap = 0x0, .l1_block = IPU7_PS_MMU_FW_WR_L1_BLOCKNR_REG, .l2_block = IPU7_PS_MMU_FW_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7_PS_MMU_FW_WR_STREAM_NUM, .nr_l2streams = IPU7_PS_MMU_FW_WR_STREAM_NUM, .l1_block_sz = { 0, 0x8, 0xa, 0xc, 0xd, 0xe, 0xf, 0x10, 0x10, 0x10, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, }, .zlx_nr = IPU7_PS_ZLX_FW_WR_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x0, 0x00010101, 0x00010101, }, .uao_p_num = IPU7_PS_UAO_FW_WR_PLANENUM, .uao_p2tlb = { 0x00000036, 0x00000037, 0x00000038, 0x00000039, 0x0000003a, 0x0000003b, 0x0000003c, }, }, { .name = "PS_DATA_RD", .offset = IPU7_PS_MMU_SRT_RD_OFFSET, .zlx_offset = IPU7_PS_ZLX_DATA_RD_OFFSET, .uao_offset = IPU7_PS_UAO_SRT_RD_OFFSET, .info_bits = 0x20004701, .refill = 0x00002120, .collapse_en_bitmap = 0x0, .l1_block = IPU7_PS_MMU_SRT_RD_L1_BLOCKNR_REG, .l2_block = IPU7_PS_MMU_SRT_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU7_PS_MMU_SRT_RD_STREAM_NUM, .nr_l2streams = IPU7_PS_MMU_SRT_RD_STREAM_NUM, .l1_block_sz = { 0x0, 0x4, 0x6, 0x8, 0xb, 0xd, 0xf, 0x11, 0x13, 0x15, 0x17, 0x23, 0x2b, 0x37, 0x3f, 0x41, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x55, 0x57, 0x59, 0x5b, 0x5d, 0x5f, 0x61, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, }, .zlx_nr = IPU7_PS_ZLX_DATA_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x00030303, 0x00010101, 0x00010101, 0x00030202, 0x00010101, 0x00010101, 0x00010101, 0x00030800, 0x00030500, 0x00020101, 0x00042000, 0x00031000, 0x00042000, 0x00031000, 0x00020400, 0x00010101, }, .uao_p_num = IPU7_PS_UAO_SRT_RD_PLANENUM, .uao_p2tlb = { 0x00000022, 0x00000023, 0x00000024, 0x00000025, 0x00000026, 0x00000027, 0x00000028, 0x00000029, 0x0000002a, 0x0000002b, 0x0000002c, 0x0000002d, 0x0000002e, 0x0000002f, 0x00000030, 0x00000031, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0000001e, 0x0000001f, 0x00000020, 0x00000021, 0x00000032, 0x00000033, 0x00000034, 0x00000035, }, }, { .name = "PS_DATA_WR", .offset = IPU7_PS_MMU_SRT_WR_OFFSET, .zlx_offset = IPU7_PS_ZLX_DATA_WR_OFFSET, .uao_offset = IPU7_PS_UAO_SRT_WR_OFFSET, .info_bits = 0x20004501, .refill = 0x00002120, .collapse_en_bitmap = 0x0, .l1_block = IPU7_PS_MMU_SRT_WR_L1_BLOCKNR_REG, .l2_block = IPU7_PS_MMU_SRT_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU7_PS_MMU_SRT_WR_STREAM_NUM, .nr_l2streams = IPU7_PS_MMU_SRT_WR_STREAM_NUM, .l1_block_sz = { 0x0, 0x2, 0x6, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x32, 0x3a, 0x3c, 0x3e, 0x4a, 0x52, 0x58, 0x64, 0x6c, 0x72, 0x7e, 0x86, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x94, 0x96, 0x98, }, .l2_block_sz = { 0x0, 0x2, 0x4, 0x6, 0x8, 0xa, 0xc, 0xe, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, }, .zlx_nr = IPU7_PS_ZLX_DATA_WR_NUM, .zlx_axi_pool = { 0x00000f50, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, }, .zlx_conf = { 0x00010102, 0x00030103, 0x00030103, 0x00010101, 0x00010101, 0x00030101, 0x00010101, 0x38010101, 0x0, 0x0, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x38010101, 0x00010101, 0x00042000, 0x00031000, 0x00010101, 0x00010101, 0x00042000, 0x00031000, 0x00031000, 0x00042000, 0x00031000, 0x00031000, 0x00042000, 0x00031000, 0x00031000, 0x0, 0x0, }, .uao_p_num = IPU7_PS_UAO_SRT_WR_PLANENUM, .uao_p2tlb = { 0x00000000, 0x00000001, 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000006, 0x00000007, 0x00000008, 0x00000009, 0x0000000a, 0x0000000b, 0x0000000c, 0x0000000d, 0x0000000e, 0x0000000f, 0x00000010, 0x00000011, 0x00000012, 0x00000013, 0x00000014, 0x00000015, 0x00000016, 0x00000017, 0x00000018, 0x00000019, 0x0000001a, 0x0000001b, 0x0000001c, 0x0000001d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0000001e, 0x0000001f, 0x00000020, 0x00000021, }, }, }, .dmem_offset = IPU_PSYS_DMEM_OFFSET, }, }; static struct ipu_isys_internal_pdata ipu8_isys_ipdata = { .csi2 = { .gpreg = IPU8_IS_IO_CSI2_GPREGS_BASE, }, .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = IPU8_IS_MMU_NUM, .mmu_hw = { { .name = "IS_FW_RD", .offset = IPU8_IS_MMU_FW_RD_OFFSET, .zlx_offset = IPU8_IS_ZLX_UC_RD_OFFSET, .uao_offset = IPU8_IS_UAO_UC_RD_OFFSET, .info_bits = 0x20005101, .refill = 0x00002726, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU8_IS_MMU_FW_RD_L1_BLOCKNR_REG, .l2_block = IPU8_IS_MMU_FW_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU8_IS_MMU_FW_RD_STREAM_NUM, .nr_l2streams = IPU8_IS_MMU_FW_RD_STREAM_NUM, .l1_block_sz = { 0x0, 0x8, 0xa, }, .l2_block_sz = { 0x0, 0x2, 0x4, }, .zlx_nr = IPU8_IS_ZLX_UC_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 0, 1, 0, 0 }, .zlx_conf = { 0, 2, 0, 0 }, .uao_p_num = IPU8_IS_UAO_UC_RD_PLANENUM, .uao_p2tlb = { 0x00000049, 0x0000004c, 0x0000004d, 0x00000000, }, }, { .name = "IS_FW_WR", .offset = IPU8_IS_MMU_FW_WR_OFFSET, .zlx_offset = IPU8_IS_ZLX_UC_WR_OFFSET, .uao_offset = IPU8_IS_UAO_UC_WR_OFFSET, .info_bits = 0x20005001, .refill = 0x00002524, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU8_IS_MMU_FW_WR_L1_BLOCKNR_REG, .l2_block = IPU8_IS_MMU_FW_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU8_IS_MMU_FW_WR_STREAM_NUM, .nr_l2streams = IPU8_IS_MMU_FW_WR_STREAM_NUM, .l1_block_sz = { 0x0, 0x8, 0xa, }, .l2_block_sz = { 0x0, 0x2, 0x4, }, .zlx_nr = IPU8_IS_ZLX_UC_WR_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 0, 1, 1, 0, }, .zlx_conf = { 0x0, 0x2, 0x2, 0x0, }, .uao_p_num = IPU8_IS_UAO_UC_WR_PLANENUM, .uao_p2tlb = { 0x00000049, 0x0000004a, 0x0000004b, 0x00000000, }, }, { .name = "IS_DATA_WR_ISOC", .offset = IPU8_IS_MMU_M0_OFFSET, .zlx_offset = IPU8_IS_ZLX_M0_OFFSET, .uao_offset = IPU8_IS_UAO_M0_WR_OFFSET, .info_bits = 0x20004e01, .refill = 0x00002120, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU8_IS_MMU_M0_L1_BLOCKNR_REG, .l2_block = IPU8_IS_MMU_M0_L2_BLOCKNR_REG, .nr_l1streams = IPU8_IS_MMU_M0_STREAM_NUM, .nr_l2streams = IPU8_IS_MMU_M0_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .zlx_nr = IPU8_IS_ZLX_M0_NUM, .zlx_axi_pool = { 0x00000f10, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, }, .uao_p_num = IPU8_IS_UAO_M0_WR_PLANENUM, .uao_p2tlb = { 0x0000003b, 0x0000003c, 0x0000003d, 0x0000003e, 0x0000003b, 0x0000003c, 0x0000003d, 0x0000003e, 0x0000003b, 0x0000003c, 0x0000003d, 0x0000003e, 0x0000003b, 0x0000003c, 0x0000003d, 0x0000003e, }, }, { .name = "IS_DATA_WR_SNOOP", .offset = IPU8_IS_MMU_M1_OFFSET, .zlx_offset = IPU8_IS_ZLX_M1_OFFSET, .uao_offset = IPU8_IS_UAO_M1_WR_OFFSET, .info_bits = 0x20004f01, .refill = 0x00002322, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU8_IS_MMU_M1_L1_BLOCKNR_REG, .l2_block = IPU8_IS_MMU_M1_L2_BLOCKNR_REG, .nr_l1streams = IPU8_IS_MMU_M1_STREAM_NUM, .nr_l2streams = IPU8_IS_MMU_M1_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, }, .zlx_nr = IPU8_IS_ZLX_M1_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, }, .uao_p_num = IPU8_IS_UAO_M1_WR_PLANENUM, .uao_p2tlb = { 0x0000003f, 0x00000040, 0x00000041, 0x00000042, 0x0000003f, 0x00000040, 0x00000041, 0x00000042, 0x0000003f, 0x00000040, 0x00000041, 0x00000042, 0x0000003f, 0x00000040, 0x00000041, 0x00000042, }, }, { .name = "IS_UPIPE", .offset = IPU8_IS_MMU_UPIPE_OFFSET, .zlx_offset = IPU8_IS_ZLX_UPIPE_OFFSET, .uao_offset = IPU8_IS_UAO_UPIPE_OFFSET, .info_bits = 0x20005201, .refill = 0x00002928, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU8_IS_MMU_UPIPE_L1_BLOCKNR_REG, .l2_block = IPU8_IS_MMU_UPIPE_L2_BLOCKNR_REG, .nr_l1streams = IPU8_IS_MMU_UPIPE_STREAM_NUM, .nr_l2streams = IPU8_IS_MMU_UPIPE_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, }, .zlx_nr = IPU8_IS_ZLX_UPIPE_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 1, 1, 1, 1, 1, 1, }, .zlx_conf = { 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, }, .uao_p_num = IPU8_IS_UAO_UPIPE_PLANENUM, .uao_p2tlb = { 0x00000043, 0x00000044, 0x00000045, 0x00000046, 0x00000047, 0x00000048, }, }, }, .cdc_fifos = 3, .cdc_fifo_threshold = {6, 8, 2}, .dmem_offset = IPU_ISYS_DMEM_OFFSET, .spc_offset = IPU_ISYS_SPC_OFFSET, }, .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN, }; static struct ipu_psys_internal_pdata ipu8_psys_ipdata = { .hw_variant = { .offset = IPU_UNIFIED_OFFSET, .nr_mmus = IPU8_PS_MMU_NUM, .mmu_hw = { { .name = "PS_FW_RD", .offset = IPU8_PS_MMU_FW_RD_OFFSET, .zlx_offset = IPU8_PS_ZLX_FW_RD_OFFSET, .uao_offset = IPU8_PS_UAO_FW_RD_OFFSET, .info_bits = 0x20003a01, .refill = 0x00002726, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU8_PS_MMU_FW_RD_L1_BLOCKNR_REG, .l2_block = IPU8_PS_MMU_FW_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU8_PS_MMU_FW_RD_STREAM_NUM, .nr_l2streams = IPU8_PS_MMU_FW_RD_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000008, 0x0000000a, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x00000018, 0x00000018, 0x00000018, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, }, .zlx_nr = IPU8_PS_ZLX_FW_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x0, 0x2, 0x0, 0x0, 0x2, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, }, .uao_p_num = IPU8_PS_UAO_FW_RD_PLANENUM, .uao_p2tlb = { 0x0000002d, 0x00000032, 0x00000033, 0x00000030, 0x00000034, 0x00000035, 0x00000036, 0x00000031, 0x0, 0x0, 0x0, 0x0, }, }, { .name = "PS_FW_WR", .offset = IPU8_PS_MMU_FW_WR_OFFSET, .zlx_offset = IPU8_PS_ZLX_FW_WR_OFFSET, .uao_offset = IPU8_PS_UAO_FW_WR_OFFSET, .info_bits = 0x20003901, .refill = 0x00002524, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU8_PS_MMU_FW_WR_L1_BLOCKNR_REG, .l2_block = IPU8_PS_MMU_FW_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU8_PS_MMU_FW_WR_STREAM_NUM, .nr_l2streams = IPU8_PS_MMU_FW_WR_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000010, 0x00000010, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, }, .zlx_nr = IPU8_PS_ZLX_FW_WR_NUM, .zlx_axi_pool = { 0x00000f20, }, .zlx_en = { 0, 1, 1, 0, 0, 0, 0, 0, }, .zlx_conf = { 0x0, 0x2, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, }, .uao_p_num = IPU8_PS_UAO_FW_WR_PLANENUM, .uao_p2tlb = { 0x0000002d, 0x0000002e, 0x0000002f, 0x00000030, 0x00000031, 0x0, 0x0, 0x0, }, }, { .name = "PS_DATA_RD", .offset = IPU8_PS_MMU_SRT_RD_OFFSET, .zlx_offset = IPU8_PS_ZLX_DATA_RD_OFFSET, .uao_offset = IPU8_PS_UAO_SRT_RD_OFFSET, .info_bits = 0x20003801, .refill = 0x00002322, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU8_PS_MMU_SRT_RD_L1_BLOCKNR_REG, .l2_block = IPU8_PS_MMU_SRT_RD_L2_BLOCKNR_REG, .nr_l1streams = IPU8_PS_MMU_SRT_RD_STREAM_NUM, .nr_l2streams = IPU8_PS_MMU_SRT_RD_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000004, 0x00000006, 0x00000008, 0x0000000c, 0x0000000e, 0x00000010, 0x00000014, 0x00000018, 0x0000001c, 0x0000001e, 0x00000022, 0x00000024, 0x00000026, 0x00000028, 0x0000002a, 0x0000002c, 0x0000002e, 0x00000030, 0x00000032, 0x00000036, 0x0000003a, 0x0000003c, 0x0000003c, 0x0000003c, 0x0000003c, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, 0x00000020, 0x00000022, 0x00000024, 0x00000026, 0x00000028, 0x0000002a, 0x0000002c, 0x0000002e, 0x00000030, 0x00000032, }, .zlx_nr = IPU8_PS_ZLX_DATA_RD_NUM, .zlx_axi_pool = { 0x00000f30, }, .zlx_en = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, }, .zlx_conf = { 0x6, 0x3, 0x3, 0x6, 0x2, 0x2, 0x6, 0x6, 0x6, 0x3, 0x6, 0x3, 0x3, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x6, 0x6, 0x3, 0x0, 0x0, 0x0, 0x0, }, .uao_p_num = IPU8_PS_UAO_SRT_RD_PLANENUM, .uao_p2tlb = { 0x00000017, 0x00000018, 0x00000019, 0x0000001a, 0x0000001b, 0x0000001c, 0x0000001d, 0x0000001e, 0x0000001f, 0x00000020, 0x00000021, 0x00000022, 0x00000023, 0x00000024, 0x00000025, 0x00000026, 0x00000027, 0x00000028, 0x00000029, 0x0000002a, 0x0000002b, 0x0000002c, 0x0, 0x0, 0x0, 0x0, }, }, { .name = "PS_DATA_WR", .offset = IPU8_PS_MMU_SRT_WR_OFFSET, .zlx_offset = IPU8_PS_ZLX_DATA_WR_OFFSET, .uao_offset = IPU8_PS_UAO_SRT_WR_OFFSET, .info_bits = 0x20003701, .refill = 0x00002120, .collapse_en_bitmap = 0x1, .at_sp_arb_cfg = 0x1, .l1_block = IPU8_PS_MMU_SRT_WR_L1_BLOCKNR_REG, .l2_block = IPU8_PS_MMU_SRT_WR_L2_BLOCKNR_REG, .nr_l1streams = IPU8_PS_MMU_SRT_WR_STREAM_NUM, .nr_l2streams = IPU8_PS_MMU_SRT_WR_STREAM_NUM, .l1_block_sz = { 0x00000000, 0x00000002, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001c, 0x0000001e, 0x00000022, 0x00000024, 0x00000028, 0x0000002a, 0x0000002e, 0x00000030, 0x00000032, 0x00000036, 0x00000038, 0x0000003a, 0x0000003a, 0x0000003a, }, .l2_block_sz = { 0x00000000, 0x00000002, 0x00000004, 0x00000006, 0x00000008, 0x0000000a, 0x0000000c, 0x0000000e, 0x00000010, 0x00000012, 0x00000014, 0x00000016, 0x00000018, 0x0000001a, 0x0000001c, 0x0000001e, 0x00000020, 0x00000022, 0x00000024, 0x00000026, 0x00000028, 0x0000002a, 0x0000002c, 0x0000002e, 0x00000030, 0x00000032, }, .zlx_nr = IPU8_PS_ZLX_DATA_WR_NUM, .zlx_axi_pool = { 0x00000f50, }, .zlx_en = { 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, }, .zlx_conf = { 0x3, 0x6, 0x38000002, 0x38000000, 0x3, 0x38000002, 0x38000002, 0x38000002, 0x38000002, 0x38000002, 0x38000002, 0x6, 0x3, 0x6, 0x3, 0x6, 0x3, 0x6, 0x3, 0x3, 0x6, 0x3, 0x3, 0x0, 0x0, 0x0, }, .uao_p_num = IPU8_PS_UAO_SRT_WR_PLANENUM, .uao_p2tlb = { 0x00000000, 0x00000001, 0x00000002, 0x00000003, 0x00000004, 0x00000005, 0x00000006, 0x00000007, 0x00000008, 0x00000009, 0x0000000a, 0x0000000b, 0x0000000c, 0x0000000d, 0x0000000e, 0x0000000f, 0x00000010, 0x00000011, 0x00000012, 0x00000013, 0x00000014, 0x00000015, 0x00000016, 0x00000000, 0x00000000, 0x00000000, }, }, }, .dmem_offset = IPU_PSYS_DMEM_OFFSET, }, }; static const struct ipu_buttress_ctrl ipu7_isys_buttress_ctrl = { .subsys_id = IPU_IS, .ratio = IPU7_IS_FREQ_CTL_DEFAULT_RATIO, .ratio_shift = IPU_FREQ_CTL_RATIO_SHIFT, .cdyn = IPU_FREQ_CTL_CDYN, .cdyn_shift = IPU_FREQ_CTL_CDYN_SHIFT, .freq_ctl = BUTTRESS_REG_IS_WORKPOINT_REQ, .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT, .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK, .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE, .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE, .ovrd_clk = BUTTRESS_OVERRIDE_IS_CLK, .own_clk_ack = BUTTRESS_OWN_ACK_IS_CLK, }; static const struct ipu_buttress_ctrl ipu7_psys_buttress_ctrl = { .subsys_id = IPU_PS, .ratio = IPU7_PS_FREQ_CTL_DEFAULT_RATIO, .ratio_shift = IPU_FREQ_CTL_RATIO_SHIFT, .cdyn = IPU_FREQ_CTL_CDYN, .cdyn_shift = IPU_FREQ_CTL_CDYN_SHIFT, .freq_ctl = BUTTRESS_REG_PS_WORKPOINT_REQ, .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT, .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK, .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE, .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE, .ovrd_clk = BUTTRESS_OVERRIDE_PS_CLK, .own_clk_ack = BUTTRESS_OWN_ACK_PS_CLK, }; static const struct ipu_buttress_ctrl ipu8_isys_buttress_ctrl = { .subsys_id = IPU_IS, .ratio = IPU8_IS_FREQ_CTL_DEFAULT_RATIO, .ratio_shift = IPU_FREQ_CTL_RATIO_SHIFT, .cdyn = IPU_FREQ_CTL_CDYN, .cdyn_shift = IPU_FREQ_CTL_CDYN_SHIFT, .freq_ctl = BUTTRESS_REG_IS_WORKPOINT_REQ, .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT, .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK, .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE, .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE, }; static const struct ipu_buttress_ctrl ipu8_psys_buttress_ctrl = { .subsys_id = IPU_PS, .ratio = IPU8_PS_FREQ_CTL_DEFAULT_RATIO, .ratio_shift = IPU_FREQ_CTL_RATIO_SHIFT, .cdyn = IPU_FREQ_CTL_CDYN, .cdyn_shift = IPU_FREQ_CTL_CDYN_SHIFT, .freq_ctl = BUTTRESS_REG_PS_WORKPOINT_REQ, .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT, .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK, .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE, .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE, .own_clk_ack = BUTTRESS_OWN_ACK_PS_PLL, }; void ipu_internal_pdata_init(struct ipu_isys_internal_pdata *isys_ipdata, struct ipu_psys_internal_pdata *psys_ipdata) { isys_ipdata->csi2.nports = ARRAY_SIZE(ipu7_csi_offsets); isys_ipdata->csi2.offsets = ipu7_csi_offsets; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC isys_ipdata->tpg.ntpgs = ARRAY_SIZE(ipu7_tpg_offsets); isys_ipdata->tpg.offsets = ipu7_tpg_offsets; isys_ipdata->tpg.sels = NULL; #endif isys_ipdata->num_parallel_streams = IPU7_ISYS_NUM_STREAMS; psys_ipdata->hw_variant.spc_offset = IPU7_PSYS_SPC_OFFSET; } static int ipu7_isys_check_fwnode_graph(struct fwnode_handle *fwnode) { struct fwnode_handle *endpoint; if (IS_ERR_OR_NULL(fwnode)) return -EINVAL; endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL); if (endpoint) { fwnode_handle_put(endpoint); return 0; } return ipu7_isys_check_fwnode_graph(fwnode->secondary); } static struct ipu7_bus_device * ipu7_isys_init(struct pci_dev *pdev, struct device *parent, const struct ipu_buttress_ctrl *ctrl, void __iomem *base, const struct ipu_isys_internal_pdata *ipdata, unsigned int nr) { struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); struct ipu7_bus_device *isys_adev; struct device *dev = &pdev->dev; struct ipu7_isys_pdata *pdata; int ret; ret = ipu7_isys_check_fwnode_graph(fwnode); if (ret) { if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) { dev_err(dev, "fwnode graph has no endpoints connection\n"); return ERR_PTR(-EINVAL); } ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb); if (ret) { dev_err_probe(dev, ret, "IPU bridge init failed\n"); return ERR_PTR(ret); } } pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); pdata->base = base; pdata->ipdata = ipdata; isys_adev = ipu7_bus_initialize_device(pdev, parent, pdata, ctrl, IPU_ISYS_NAME); if (IS_ERR(isys_adev)) { dev_err_probe(dev, PTR_ERR(isys_adev), "ipu7_bus_initialize_device isys failed\n"); kfree(pdata); return ERR_CAST(isys_adev); } isys_adev->mmu = ipu7_mmu_init(dev, base, ISYS_MMID, &ipdata->hw_variant); if (IS_ERR(isys_adev->mmu)) { dev_err_probe(dev, PTR_ERR(isys_adev), "ipu7_mmu_init(isys_adev->mmu) failed\n"); put_device(&isys_adev->auxdev.dev); kfree(pdata); return ERR_CAST(isys_adev->mmu); } isys_adev->mmu->dev = &isys_adev->auxdev.dev; isys_adev->subsys = IPU_IS; ret = ipu7_bus_add_device(isys_adev); if (ret) { kfree(pdata); return ERR_PTR(ret); } return isys_adev; } static struct ipu7_bus_device * ipu7_psys_init(struct pci_dev *pdev, struct device *parent, const struct ipu_buttress_ctrl *ctrl, void __iomem *base, const struct ipu_psys_internal_pdata *ipdata, unsigned int nr) { struct ipu7_bus_device *psys_adev; struct ipu7_psys_pdata *pdata; int ret; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); pdata->base = base; pdata->ipdata = ipdata; psys_adev = ipu7_bus_initialize_device(pdev, parent, pdata, ctrl, IPU_PSYS_NAME); if (IS_ERR(psys_adev)) { dev_err_probe(&pdev->dev, PTR_ERR(psys_adev), "ipu7_bus_initialize_device psys failed\n"); kfree(pdata); return ERR_CAST(psys_adev); } psys_adev->mmu = ipu7_mmu_init(&pdev->dev, base, PSYS_MMID, &ipdata->hw_variant); if (IS_ERR(psys_adev->mmu)) { dev_err_probe(&pdev->dev, PTR_ERR(psys_adev), "ipu7_mmu_init(psys_adev->mmu) failed\n"); put_device(&psys_adev->auxdev.dev); kfree(pdata); return ERR_CAST(psys_adev->mmu); } psys_adev->mmu->dev = &psys_adev->auxdev.dev; psys_adev->subsys = IPU_PS; ret = ipu7_bus_add_device(psys_adev); if (ret) { kfree(pdata); return ERR_PTR(ret); } return psys_adev; } static struct ia_gofo_msg_log_info_ts fw_error_log[IPU_SUBSYS_NUM]; void ipu7_dump_fw_error_log(const struct ipu7_bus_device *adev) { void __iomem *reg = adev->isp->base + ((adev->subsys == IPU_IS) ? BUTTRESS_REG_FW_GP24 : BUTTRESS_REG_FW_GP8); memcpy_fromio(&fw_error_log[adev->subsys], reg, sizeof(fw_error_log[adev->subsys])); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) EXPORT_SYMBOL_NS_GPL(ipu7_dump_fw_error_log, "INTEL_IPU7"); #else EXPORT_SYMBOL_NS_GPL(ipu7_dump_fw_error_log, INTEL_IPU7); #endif #ifdef CONFIG_DEBUG_FS static struct debugfs_blob_wrapper isys_fw_error; static struct debugfs_blob_wrapper psys_fw_error; static int ipu7_init_debugfs(struct ipu7_device *isp) { struct dentry *file; struct dentry *dir; dir = debugfs_create_dir(pci_name(isp->pdev), NULL); if (!dir) return -ENOMEM; isys_fw_error.data = &fw_error_log[IPU_IS]; isys_fw_error.size = sizeof(fw_error_log[IPU_IS]); file = debugfs_create_blob("is_fw_error", 0400, dir, &isys_fw_error); if (!file) goto err; psys_fw_error.data = &fw_error_log[IPU_PS]; psys_fw_error.size = sizeof(fw_error_log[IPU_PS]); file = debugfs_create_blob("ps_fw_error", 0400, dir, &psys_fw_error); if (!file) goto err; isp->ipu7_dir = dir; return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } static void ipu7_remove_debugfs(struct ipu7_device *isp) { /* * Since isys and psys debugfs dir will be created under ipu root dir, * mark its dentry to NULL to avoid duplicate removal. */ debugfs_remove_recursive(isp->ipu7_dir); isp->ipu7_dir = NULL; } #endif /* CONFIG_DEBUG_FS */ static int ipu7_pci_config_setup(struct pci_dev *dev) { u16 pci_command; int ret; pci_read_config_word(dev, PCI_COMMAND, &pci_command); pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, pci_command); ret = pci_enable_msi(dev); if (ret) dev_err(&dev->dev, "Failed to enable msi (%d)\n", ret); return ret; } static int ipu7_map_fw_code_region(struct ipu7_bus_device *sys, void *data, size_t size) { struct device *dev = &sys->auxdev.dev; struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); struct sg_table *sgt = &sys->fw_sgt; struct ipu7_device *isp = adev->isp; struct pci_dev *pdev = isp->pdev; unsigned long n_pages, i; unsigned long attr = 0; struct page **pages; int ret; n_pages = PFN_UP(size); pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); if (!pages) return -ENOMEM; for (i = 0; i < n_pages; i++) { struct page *p = vmalloc_to_page(data); if (!p) { ret = -ENODEV; goto out; } pages[i] = p; data += PAGE_SIZE; } ret = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, size, GFP_KERNEL); if (ret) { ret = -ENOMEM; goto out; } if (!isp->secure_mode) attr |= DMA_ATTR_RESERVE_REGION; ret = dma_map_sgtable(&pdev->dev, sgt, DMA_BIDIRECTIONAL, 0); if (ret < 0) { dev_err(dev, "map fw code[%lu pages %u nents] failed\n", n_pages, sgt->nents); ret = -ENOMEM; sg_free_table(sgt); goto out; } ret = ipu7_dma_map_sgtable(sys, sgt, DMA_BIDIRECTIONAL, attr); if (ret) { dma_unmap_sgtable(&pdev->dev, sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(sgt); goto out; } ipu7_dma_sync_sgtable(sys, sgt); dev_dbg(dev, "fw code region mapped at 0x%llx entries %d\n", sgt->sgl->dma_address, sgt->nents); out: kfree(pages); return ret; } static void ipu7_unmap_fw_code_region(struct ipu7_bus_device *sys) { struct pci_dev *pdev = sys->isp->pdev; struct sg_table *sgt = &sys->fw_sgt; ipu7_dma_unmap_sgtable(sys, sgt, DMA_BIDIRECTIONAL, 0); dma_unmap_sgtable(&pdev->dev, sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(sgt); } static int ipu7_init_fw_code_region_by_sys(struct ipu7_bus_device *sys, const char *sys_name) { struct device *dev = &sys->auxdev.dev; struct ipu7_device *isp = sys->isp; int ret; /* Copy FW binaries to specific location. */ ret = ipu7_cpd_copy_binary(isp->cpd_fw->data, sys_name, isp->fw_code_region, &sys->fw_entry); if (ret) { dev_err(dev, "%s binary not found.\n", sys_name); return ret; } ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "Failed to get runtime PM\n"); return ret; } ret = ipu7_mmu_hw_init(sys->mmu); if (ret) { dev_err(dev, "Failed to set mmu hw\n"); pm_runtime_put(dev); return ret; } /* Map code region. */ ret = ipu7_map_fw_code_region(sys, isp->fw_code_region, IPU_FW_CODE_REGION_SIZE); if (ret) dev_err(dev, "Failed to map fw code region for %s.\n", sys_name); ipu7_mmu_hw_cleanup(sys->mmu); pm_runtime_put(dev); return ret; } static int ipu7_init_fw_code_region(struct ipu7_device *isp) { int ret; /* * Allocate and map memory for FW execution. * Not required in secure mode, in which FW runs in IMR. */ isp->fw_code_region = vmalloc(IPU_FW_CODE_REGION_SIZE); if (!isp->fw_code_region) return -ENOMEM; ret = ipu7_init_fw_code_region_by_sys(isp->isys, "isys"); if (ret) goto fail_init; ret = ipu7_init_fw_code_region_by_sys(isp->psys, "psys"); if (ret) goto fail_init; return 0; fail_init: vfree(isp->fw_code_region); return ret; } static int ipu7_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ipu_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL; struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); const struct ipu_buttress_ctrl *isys_buttress_ctrl; const struct ipu_buttress_ctrl *psys_buttress_ctrl; struct ipu_isys_internal_pdata *isys_ipdata; struct ipu_psys_internal_pdata *psys_ipdata; unsigned int dma_mask = IPU_DMA_MASK; struct device *dev = &pdev->dev; void __iomem *isys_base = NULL; void __iomem *psys_base = NULL; #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) void __iomem *const *iomap; #endif phys_addr_t phys, pb_phys; struct ipu7_device *isp; u32 is_es; int ret; if (!fwnode || fwnode_property_read_u32(fwnode, "is_es", &is_es)) is_es = 0; isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL); if (!isp) return -ENOMEM; dev_set_name(dev, "intel-ipu7"); isp->pdev = pdev; INIT_LIST_HEAD(&isp->devices); ret = pcim_enable_device(pdev); if (ret) return dev_err_probe(dev, ret, "Enable PCI device failed\n"); dev_info(dev, "Device 0x%x (rev: 0x%x)\n", pdev->device, pdev->revision); phys = pci_resource_start(pdev, IPU_PCI_BAR); pb_phys = pci_resource_start(pdev, IPU_PCI_PBBAR); dev_info(dev, "IPU7 PCI BAR0 base %llx BAR2 base %llx\n", phys, pb_phys); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) ret = pcim_iomap_regions(pdev, BIT(IPU_PCI_BAR) | BIT(IPU_PCI_PBBAR), pci_name(pdev)); if (ret) return dev_err_probe(dev, ret, "Failed to I/O memory remapping (%d)\n", ret); iomap = pcim_iomap_table(pdev); if (!iomap) return dev_err_probe(dev, -ENODEV, "Failed to iomap table\n"); isp->base = iomap[IPU_PCI_BAR]; isp->pb_base = iomap[IPU_PCI_PBBAR]; dev_info(dev, "IPU7 PCI BAR0 mapped at %p\n BAR2 mapped at %p\n", isp->base, isp->pb_base); #else isp->base = pcim_iomap_region(pdev, IPU_PCI_BAR, IPU_NAME); if (IS_ERR(isp->base)) return dev_err_probe(dev, PTR_ERR(isp->base), "Failed to I/O memory remapping bar %u\n", IPU_PCI_BAR); isp->pb_base = pcim_iomap_region(pdev, IPU_PCI_PBBAR, IPU_NAME); if (IS_ERR(isp->pb_base)) return dev_err_probe(dev, PTR_ERR(isp->pb_base), "Failed to I/O memory remapping bar %u\n", IPU_PCI_PBBAR); dev_info(dev, "IPU7 PCI BAR0 mapped at %p\n BAR2 mapped at %p\n", isp->base, isp->pb_base); #endif pci_set_drvdata(pdev, isp); pci_set_master(pdev); switch (id->device) { case IPU7_PCI_ID: isp->hw_ver = IPU_VER_7; isp->cpd_fw_name = IPU7_FIRMWARE_NAME; isys_ipdata = &ipu7_isys_ipdata; psys_ipdata = &ipu7_psys_ipdata; isys_buttress_ctrl = &ipu7_isys_buttress_ctrl; psys_buttress_ctrl = &ipu7_psys_buttress_ctrl; break; case IPU7P5_PCI_ID: isp->hw_ver = IPU_VER_7P5; isp->cpd_fw_name = IPU7P5_FIRMWARE_NAME; isys_ipdata = &ipu7p5_isys_ipdata; psys_ipdata = &ipu7p5_psys_ipdata; isys_buttress_ctrl = &ipu7_isys_buttress_ctrl; psys_buttress_ctrl = &ipu7_psys_buttress_ctrl; break; case IPU8_PCI_ID: isp->hw_ver = IPU_VER_8; isp->cpd_fw_name = IPU8_FIRMWARE_NAME; isys_ipdata = &ipu8_isys_ipdata; psys_ipdata = &ipu8_psys_ipdata; isys_buttress_ctrl = &ipu8_isys_buttress_ctrl; psys_buttress_ctrl = &ipu8_psys_buttress_ctrl; break; default: WARN(1, "Unsupported IPU device"); return -ENODEV; } ipu_internal_pdata_init(isys_ipdata, psys_ipdata); isys_base = isp->base + isys_ipdata->hw_variant.offset; psys_base = isp->base + psys_ipdata->hw_variant.offset; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask)); if (ret) return dev_err_probe(dev, ret, "Failed to set DMA mask\n"); dma_set_max_seg_size(dev, UINT_MAX); ret = ipu7_pci_config_setup(pdev); if (ret) return ret; ret = ipu_buttress_init(isp); if (ret) return ret; dev_info(dev, "firmware cpd file: %s\n", isp->cpd_fw_name); ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev); if (ret) { dev_err_probe(dev, ret, "Requesting signed firmware %s failed\n", isp->cpd_fw_name); goto buttress_exit; } ret = ipu7_cpd_validate_cpd_file(isp, isp->cpd_fw->data, isp->cpd_fw->size); if (ret) { dev_err_probe(dev, ret, "Failed to validate cpd\n"); goto out_ipu_bus_del_devices; } isys_ctrl = devm_kmemdup(dev, isys_buttress_ctrl, sizeof(*isys_buttress_ctrl), GFP_KERNEL); if (!isys_ctrl) { ret = -ENOMEM; goto out_ipu_bus_del_devices; } isp->isys = ipu7_isys_init(pdev, dev, isys_ctrl, isys_base, isys_ipdata, 0); if (IS_ERR(isp->isys)) { ret = PTR_ERR(isp->isys); goto out_ipu_bus_del_devices; } psys_ctrl = devm_kmemdup(dev, psys_buttress_ctrl, sizeof(*psys_buttress_ctrl), GFP_KERNEL); if (!psys_ctrl) { ret = -ENOMEM; goto out_ipu_bus_del_devices; } isp->psys = ipu7_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl, psys_base, psys_ipdata, 0); if (IS_ERR(isp->psys)) { ret = PTR_ERR(isp->psys); goto out_ipu_bus_del_devices; } ret = devm_request_threaded_irq(dev, pdev->irq, ipu_buttress_isr, ipu_buttress_isr_threaded, IRQF_SHARED, IPU_NAME, isp); if (ret) goto out_ipu_bus_del_devices; if (!isp->secure_mode) { ret = ipu7_init_fw_code_region(isp); if (ret) goto out_ipu_bus_del_devices; } else { ret = pm_runtime_get_sync(&isp->psys->auxdev.dev); if (ret < 0) { dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n"); goto out_ipu_bus_del_devices; } ret = ipu7_mmu_hw_init(isp->psys->mmu); if (ret) { dev_err_probe(&isp->pdev->dev, ret, "Failed to init MMU hardware\n"); goto out_ipu_bus_del_devices; } ret = ipu7_map_fw_code_region(isp->psys, (void *)isp->cpd_fw->data, isp->cpd_fw->size); if (ret) { dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n"); goto out_ipu_bus_del_devices; } ret = ipu_buttress_authenticate(isp); if (ret) { dev_err_probe(&isp->pdev->dev, ret, "FW authentication failed\n"); goto out_ipu_bus_del_devices; } ipu7_mmu_hw_cleanup(isp->psys->mmu); pm_runtime_put(&isp->psys->auxdev.dev); } #ifdef CONFIG_DEBUG_FS ret = ipu7_init_debugfs(isp); if (ret) { dev_err_probe(dev, ret, "Failed to initialize debugfs\n"); goto out_ipu_bus_del_devices; } #endif pm_runtime_put_noidle(dev); pm_runtime_allow(dev); isp->ipu7_bus_ready_to_probe = true; return 0; out_ipu_bus_del_devices: if (!IS_ERR_OR_NULL(isp->isys) && isp->isys->fw_sgt.nents) ipu7_unmap_fw_code_region(isp->isys); if (!IS_ERR_OR_NULL(isp->psys) && isp->psys->fw_sgt.nents) ipu7_unmap_fw_code_region(isp->psys); #ifdef CONFIG_DEBUG_FS if (!IS_ERR_OR_NULL(isp->fw_code_region)) vfree(isp->fw_code_region); #endif if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu)) ipu7_mmu_cleanup(isp->psys->mmu); if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu)) ipu7_mmu_cleanup(isp->isys->mmu); if (!IS_ERR_OR_NULL(isp->psys)) pm_runtime_put(&isp->psys->auxdev.dev); ipu7_bus_del_devices(pdev); release_firmware(isp->cpd_fw); buttress_exit: ipu_buttress_exit(isp); return ret; } static void ipu7_pci_remove(struct pci_dev *pdev) { struct ipu7_device *isp = pci_get_drvdata(pdev); #ifdef CONFIG_DEBUG_FS ipu7_remove_debugfs(isp); #endif if (!IS_ERR_OR_NULL(isp->isys) && isp->isys->fw_sgt.nents) ipu7_unmap_fw_code_region(isp->isys); if (!IS_ERR_OR_NULL(isp->psys) && isp->psys->fw_sgt.nents) ipu7_unmap_fw_code_region(isp->psys); if (!IS_ERR_OR_NULL(isp->fw_code_region)) vfree(isp->fw_code_region); ipu7_bus_del_devices(pdev); pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); #if LINUX_VERSION_CODE < KERNEL_VERSION(6, 12, 0) pci_release_regions(pdev); pci_disable_device(pdev); #endif ipu_buttress_exit(isp); release_firmware(isp->cpd_fw); ipu7_mmu_cleanup(isp->psys->mmu); ipu7_mmu_cleanup(isp->isys->mmu); } static void ipu7_pci_reset_prepare(struct pci_dev *pdev) { struct ipu7_device *isp = pci_get_drvdata(pdev); dev_warn(&pdev->dev, "FLR prepare\n"); pm_runtime_forbid(&isp->pdev->dev); } static void ipu7_pci_reset_done(struct pci_dev *pdev) { struct ipu7_device *isp = pci_get_drvdata(pdev); ipu_buttress_restore(isp); if (isp->secure_mode) ipu_buttress_reset_authentication(isp); isp->ipc_reinit = true; pm_runtime_allow(&isp->pdev->dev); dev_warn(&pdev->dev, "FLR completed\n"); } /* * PCI base driver code requires driver to provide these to enable * PCI device level PM state transitions (D0<->D3) */ static int ipu7_suspend(struct device *dev) { return 0; } static int ipu7_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ipu7_device *isp = pci_get_drvdata(pdev); struct ipu_buttress *b = &isp->buttress; int ret; isp->secure_mode = ipu_buttress_get_secure_mode(isp); dev_info(dev, "IPU7 in %s mode\n", isp->secure_mode ? "secure" : "non-secure"); ipu_buttress_restore(isp); ret = ipu_buttress_ipc_reset(isp, &b->cse); if (ret) dev_err(dev, "IPC reset protocol failed!\n"); ret = pm_runtime_get_sync(&isp->psys->auxdev.dev); if (ret < 0) { dev_err(dev, "Failed to get runtime PM\n"); return 0; } ret = ipu_buttress_authenticate(isp); if (ret) dev_err(dev, "FW authentication failed(%d)\n", ret); pm_runtime_put(&isp->psys->auxdev.dev); return 0; } static int ipu7_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ipu7_device *isp = pci_get_drvdata(pdev); int ret; ipu_buttress_restore(isp); if (isp->ipc_reinit) { struct ipu_buttress *b = &isp->buttress; isp->ipc_reinit = false; ret = ipu_buttress_ipc_reset(isp, &b->cse); if (ret) dev_err(dev, "IPC reset protocol failed!\n"); } return 0; } static const struct dev_pm_ops ipu7_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(&ipu7_suspend, &ipu7_resume) SET_RUNTIME_PM_OPS(&ipu7_suspend, /* Same as in suspend flow */ &ipu7_runtime_resume, NULL) }; static const struct pci_device_id ipu7_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU7_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU7P5_PCI_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU8_PCI_ID)}, {0,} }; MODULE_DEVICE_TABLE(pci, ipu7_pci_tbl); static const struct pci_error_handlers pci_err_handlers = { .reset_prepare = ipu7_pci_reset_prepare, .reset_done = ipu7_pci_reset_done, }; static struct pci_driver ipu7_pci_driver = { .name = IPU_NAME, .id_table = ipu7_pci_tbl, .probe = ipu7_pci_probe, .remove = ipu7_pci_remove, .driver = { .pm = &ipu7_pm_ops, }, .err_handler = &pci_err_handlers, }; module_pci_driver(ipu7_pci_driver); #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) MODULE_IMPORT_NS("INTEL_IPU_BRIDGE"); #else MODULE_IMPORT_NS(INTEL_IPU_BRIDGE); #endif MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Tianshu Qiu "); MODULE_AUTHOR("Qingwu Zhang "); MODULE_AUTHOR("Intel"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu7 pci driver"); ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/ipu7.h000066400000000000000000000144721503071307200254160ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 - 2025 Intel Corporation */ #ifndef IPU7_H #define IPU7_H #include #include #include #include "ipu7-buttress.h" struct ipu7_bus_device; struct pci_dev; struct firmware; #define IPU_NAME "intel-ipu7" #define IPU_MEDIA_DEV_MODEL_NAME "ipu7" #define IPU7_FIRMWARE_NAME "intel/ipu/ipu7_fw.bin" #define IPU7P5_FIRMWARE_NAME "intel/ipu/ipu7ptl_fw.bin" #define IPU8_FIRMWARE_NAME "intel/ipu/ipu8_fw.bin" #define IPU7_ISYS_NUM_STREAMS 12 #define IPU7_PCI_ID 0x645d #define IPU7P5_PCI_ID 0xb05d #define IPU8_PCI_ID 0xd719 #define FW_LOG_BUF_SIZE (2 * 1024 * 1024) enum ipu_version { IPU_VER_INVALID = 0, IPU_VER_7 = 1, IPU_VER_7P5 = 2, IPU_VER_8 = 3, }; static inline bool is_ipu7p5(u8 hw_ver) { return hw_ver == IPU_VER_7P5; } static inline bool is_ipu7(u8 hw_ver) { return hw_ver == IPU_VER_7; } static inline bool is_ipu8(u8 hw_ver) { return hw_ver == IPU_VER_8; } #define IPU_UNIFIED_OFFSET 0 /* * ISYS DMA can overshoot. For higher resolutions over allocation is one line * but it must be at minimum 1024 bytes. Value could be different in * different versions / generations thus provide it via platform data. */ #define IPU_ISYS_OVERALLOC_MIN 1024 #define IPU_FW_CODE_REGION_SIZE 0x1000000 /* 16MB */ #define IPU_FW_CODE_REGION_START 0x4000000 /* 64MB */ #define IPU_FW_CODE_REGION_END (IPU_FW_CODE_REGION_START + \ IPU_FW_CODE_REGION_SIZE) /* 80MB */ struct ipu7_device { struct pci_dev *pdev; struct list_head devices; struct ipu7_bus_device *isys; struct ipu7_bus_device *psys; struct ipu_buttress buttress; const struct firmware *cpd_fw; const char *cpd_fw_name; /* Only for non-secure mode. */ void *fw_code_region; void __iomem *base; void __iomem *pb_base; #ifdef CONFIG_DEBUG_FS struct dentry *ipu7_dir; #endif u8 hw_ver; bool ipc_reinit; bool secure_mode; bool ipu7_bus_ready_to_probe; }; #define IPU_DMA_MASK 39 #define IPU_LIB_CALL_TIMEOUT_MS 2000 #define IPU_PSYS_CMD_TIMEOUT_MS 2000 #define IPU_PSYS_OPEN_CLOSE_TIMEOUT_US 50 #define IPU_PSYS_OPEN_CLOSE_RETRY (10000 / IPU_PSYS_OPEN_CLOSE_TIMEOUT_US) #define IPU_ISYS_NAME "isys" #define IPU_PSYS_NAME "psys" #define IPU_MMU_ADDR_BITS 32 /* FW is accessible within the first 2 GiB only in non-secure mode. */ #define IPU_MMU_ADDR_BITS_NON_SECURE 31 #define IPU7_IS_MMU_NUM 4U #define IPU7_PS_MMU_NUM 4U #define IPU7P5_IS_MMU_NUM 4U #define IPU7P5_PS_MMU_NUM 4U #define IPU8_IS_MMU_NUM 5U #define IPU8_PS_MMU_NUM 4U #define IPU_MMU_MAX_NUM 5U /* max(IS, PS) */ #define IPU_MMU_MAX_TLB_L1_STREAMS 40U #define IPU_MMU_MAX_TLB_L2_STREAMS 40U #define IPU_ZLX_MAX_NUM 32U #define IPU_ZLX_POOL_NUM 8U #define IPU_UAO_PLANE_MAX_NUM 64U /* * To maximize the IOSF utlization, IPU need to send requests in bursts. * At the DMA interface with the buttress, there are CDC FIFOs with burst * collection capability. CDC FIFO burst collectors have a configurable * threshold and is configured based on the outcome of performance measurements. * * isys has 3 ports with IOSF interface for VC0, VC1 and VC2 * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2 * * Threshold values are pre-defined and are arrived at after performance * evaluations on a type of IPU */ #define IPU_MAX_VC_IOSF_PORTS 4 /* * IPU must configure correct arbitration mechanism related to the IOSF VC * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on * stall and 1 means stall until the request is completed. */ #define IPU_BTRS_ARB_MODE_TYPE_REARB 0 #define IPU_BTRS_ARB_MODE_TYPE_STALL 1 /* Currently chosen arbitration mechanism for VC0 */ #define IPU_BTRS_ARB_STALL_MODE_VC0 IPU_BTRS_ARB_MODE_TYPE_REARB /* Currently chosen arbitration mechanism for VC1 */ #define IPU_BTRS_ARB_STALL_MODE_VC1 IPU_BTRS_ARB_MODE_TYPE_REARB /* One L2 entry maps 1024 L1 entries and one L1 entry per page */ #define IPU_MMUV2_L2_RANGE (1024 * PAGE_SIZE) /* Max L2 blocks per stream */ #define IPU_MMUV2_MAX_L2_BLOCKS 2 /* Max L1 blocks per stream */ #define IPU_MMUV2_MAX_L1_BLOCKS 16 #define IPU_MMUV2_TRASH_RANGE (IPU_MMUV2_L2_RANGE * \ IPU_MMUV2_MAX_L2_BLOCKS) /* Entries per L1 block */ #define MMUV2_ENTRIES_PER_L1_BLOCK 16 #define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * PAGE_SIZE) #define MMUV2_TRASH_L2_BLOCK_OFFSET IPU_MMUV2_L2_RANGE struct ipu7_mmu_hw { char name[32]; void __iomem *base; void __iomem *zlx_base; void __iomem *uao_base; u32 offset; u32 zlx_offset; u32 uao_offset; u32 info_bits; u32 refill; u32 collapse_en_bitmap; u32 at_sp_arb_cfg; u32 l1_block; u32 l2_block; u8 nr_l1streams; u8 nr_l2streams; u32 l1_block_sz[IPU_MMU_MAX_TLB_L1_STREAMS]; u32 l2_block_sz[IPU_MMU_MAX_TLB_L2_STREAMS]; u8 zlx_nr; u32 zlx_axi_pool[IPU_ZLX_POOL_NUM]; u32 zlx_en[IPU_ZLX_MAX_NUM]; u32 zlx_conf[IPU_ZLX_MAX_NUM]; u32 uao_p_num; u32 uao_p2tlb[IPU_UAO_PLANE_MAX_NUM]; }; struct ipu7_mmu_pdata { u32 nr_mmus; struct ipu7_mmu_hw mmu_hw[IPU_MMU_MAX_NUM]; int mmid; }; struct ipu7_isys_csi2_pdata { void __iomem *base; }; struct ipu7_isys_internal_csi2_pdata { u32 nports; u32 const *offsets; u32 gpreg; }; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC struct ipu7_isys_internal_tpg_pdata { u32 ntpgs; u32 const *offsets; u32 *sels; }; #endif struct ipu7_hw_variants { unsigned long offset; u32 nr_mmus; struct ipu7_mmu_hw mmu_hw[IPU_MMU_MAX_NUM]; u8 cdc_fifos; u8 cdc_fifo_threshold[IPU_MAX_VC_IOSF_PORTS]; u32 dmem_offset; u32 spc_offset; /* SPC offset from psys base */ }; struct ipu_isys_internal_pdata { struct ipu7_isys_internal_csi2_pdata csi2; #ifdef CONFIG_VIDEO_INTEL_IPU7_MGC struct ipu7_isys_internal_tpg_pdata tpg; #endif struct ipu7_hw_variants hw_variant; u32 num_parallel_streams; u32 isys_dma_overshoot; }; struct ipu7_isys_pdata { void __iomem *base; const struct ipu_isys_internal_pdata *ipdata; }; struct ipu_psys_internal_pdata { struct ipu7_hw_variants hw_variant; }; struct ipu7_psys_pdata { void __iomem *base; const struct ipu_psys_internal_pdata *ipdata; }; int request_cpd_fw(const struct firmware **firmware_p, const char *name, struct device *device); void ipu_internal_pdata_init(struct ipu_isys_internal_pdata *isys_ipdata, struct ipu_psys_internal_pdata *psys_ipdata); void ipu7_dump_fw_error_log(const struct ipu7_bus_device *adev); #endif /* IPU7_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/psys/000077500000000000000000000000001503071307200253475ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/psys/Makefile000066400000000000000000000007371503071307200270160ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2017 - 2024 Intel Corporation. is_kernel_lt_6_10 = $(shell if [ $$(printf "6.10\n$(KERNELVERSION)" | sort -V | head -n1) != "6.10" ]; then echo 1; fi) ifeq ($(is_kernel_lt_6_10), 1) ifneq ($(EXTERNAL_BUILD), 1) src := $(srctree)/$(src) endif endif intel-ipu7-psys-objs += ipu-psys.o \ ipu7-psys.o \ ipu7-fw-psys.o obj-$(CONFIG_VIDEO_INTEL_IPU7) += intel-ipu7-psys.o ccflags-y += -I$(src)/ ccflags-y += -I$(src)/../ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/psys/ipu-psys.c000066400000000000000000001040221503071307200273030ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2013 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-mmu.h" #include "ipu7-bus.h" #include "ipu7-buttress.h" #include "ipu7-cpd.h" #include "ipu7-dma.h" #include "ipu7-fw-psys.h" #include "ipu7-psys.h" #include "ipu7-platform-regs.h" #include "ipu7-syscom.h" #include "ipu7-boot.h" #define IPU_PSYS_NUM_DEVICES 4U static int psys_runtime_pm_resume(struct device *dev); static int psys_runtime_pm_suspend(struct device *dev); #define IPU_FW_CALL_TIMEOUT_JIFFIES \ msecs_to_jiffies(IPU_PSYS_CMD_TIMEOUT_MS) static dev_t ipu7_psys_dev_t; static DECLARE_BITMAP(ipu7_psys_devices, IPU_PSYS_NUM_DEVICES); static DEFINE_MUTEX(ipu7_psys_mutex); static int ipu7_psys_get_userpages(struct ipu7_dma_buf_attach *attach) { struct vm_area_struct *vma; unsigned long start, end; int npages, array_size; struct page **pages; struct sg_table *sgt; int ret = -ENOMEM; int nr = 0; u32 flags; start = (unsigned long)attach->userptr; end = PAGE_ALIGN(start + attach->len); npages = PHYS_PFN(end - (start & PAGE_MASK)); array_size = npages * sizeof(struct page *); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return -ENOMEM; WARN_ON_ONCE(attach->npages); pages = kvzalloc(array_size, GFP_KERNEL); if (!pages) goto free_sgt; mmap_read_lock(current->mm); vma = vma_lookup(current->mm, start); if (unlikely(!vma)) { ret = -EFAULT; goto error_up_read; } mmap_read_unlock(current->mm); flags = FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM; nr = pin_user_pages_fast(start & PAGE_MASK, npages, flags, pages); if (nr < npages) goto error; attach->pages = pages; attach->npages = npages; ret = sg_alloc_table_from_pages(sgt, pages, npages, start & ~PAGE_MASK, attach->len, GFP_KERNEL); if (ret < 0) goto error; attach->sgt = sgt; return 0; error_up_read: mmap_read_unlock(current->mm); error: if (nr) unpin_user_pages(pages, nr); kvfree(pages); free_sgt: kfree(sgt); pr_err("failed to get userpages:%d\n", ret); return ret; } static void ipu7_psys_put_userpages(struct ipu7_dma_buf_attach *attach) { if (!attach || !attach->userptr || !attach->sgt) return; unpin_user_pages(attach->pages, attach->npages); kvfree(attach->pages); sg_free_table(attach->sgt); kfree(attach->sgt); attach->sgt = NULL; } static int ipu7_dma_buf_attach(struct dma_buf *dbuf, struct dma_buf_attachment *attach) { struct ipu7_psys_kbuffer *kbuf = dbuf->priv; struct ipu7_dma_buf_attach *ipu7_attach; int ret; ipu7_attach = kzalloc(sizeof(*ipu7_attach), GFP_KERNEL); if (!ipu7_attach) return -ENOMEM; ipu7_attach->len = kbuf->len; ipu7_attach->userptr = kbuf->userptr; attach->priv = ipu7_attach; ret = ipu7_psys_get_userpages(ipu7_attach); if (ret) { kfree(ipu7_attach); return ret; } return 0; } static void ipu7_dma_buf_detach(struct dma_buf *dbuf, struct dma_buf_attachment *attach) { struct ipu7_dma_buf_attach *ipu7_attach = attach->priv; ipu7_psys_put_userpages(ipu7_attach); kfree(ipu7_attach); attach->priv = NULL; } static struct sg_table *ipu7_dma_buf_map(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct ipu7_dma_buf_attach *ipu7_attach = attach->priv; struct pci_dev *pdev = to_pci_dev(attach->dev); struct ipu7_device *isp = pci_get_drvdata(pdev); struct ipu7_bus_device *adev = isp->psys; unsigned long attrs; int ret; attrs = DMA_ATTR_SKIP_CPU_SYNC; ret = dma_map_sgtable(&pdev->dev, ipu7_attach->sgt, DMA_BIDIRECTIONAL, attrs); if (ret) { dev_err(attach->dev, "pci buf map failed\n"); return ERR_PTR(-EIO); } dma_sync_sgtable_for_device(&pdev->dev, ipu7_attach->sgt, DMA_BIDIRECTIONAL); ret = ipu7_dma_map_sgtable(adev, ipu7_attach->sgt, dir, 0); if (ret) { dev_err(attach->dev, "ipu7 buf map failed\n"); return ERR_PTR(-EIO); } ipu7_dma_sync_sgtable(adev, ipu7_attach->sgt); return ipu7_attach->sgt; } static void ipu7_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { struct pci_dev *pdev = to_pci_dev(attach->dev); struct ipu7_device *isp = pci_get_drvdata(pdev); struct ipu7_bus_device *adev = isp->psys; ipu7_dma_unmap_sgtable(adev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); dma_unmap_sgtable(&pdev->dev, sgt, DMA_BIDIRECTIONAL, 0); } static int ipu7_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) { return -ENOTTY; } static void ipu7_dma_buf_release(struct dma_buf *buf) { struct ipu7_psys_kbuffer *kbuf = buf->priv; if (!kbuf) return; if (kbuf->db_attach) ipu7_psys_put_userpages(kbuf->db_attach->priv); kfree(kbuf); } static int ipu7_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction dir) { return -ENOTTY; } static int ipu7_dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct dma_buf_attachment *attach; struct ipu7_dma_buf_attach *ipu7_attach; if (list_empty(&dmabuf->attachments)) return -EINVAL; attach = list_last_entry(&dmabuf->attachments, struct dma_buf_attachment, node); ipu7_attach = attach->priv; if (!ipu7_attach || !ipu7_attach->pages || !ipu7_attach->npages) return -EINVAL; map->vaddr = vm_map_ram(ipu7_attach->pages, ipu7_attach->npages, 0); map->is_iomem = false; if (!map->vaddr) return -EINVAL; return 0; } static void ipu7_dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) { struct dma_buf_attachment *attach; struct ipu7_dma_buf_attach *ipu7_attach; if (WARN_ON(list_empty(&dmabuf->attachments))) return; attach = list_last_entry(&dmabuf->attachments, struct dma_buf_attachment, node); ipu7_attach = attach->priv; if (WARN_ON(!ipu7_attach || !ipu7_attach->pages || !ipu7_attach->npages)) return; vm_unmap_ram(map->vaddr, ipu7_attach->npages); } static struct dma_buf_ops ipu7_dma_buf_ops = { .attach = ipu7_dma_buf_attach, .detach = ipu7_dma_buf_detach, .map_dma_buf = ipu7_dma_buf_map, .unmap_dma_buf = ipu7_dma_buf_unmap, .release = ipu7_dma_buf_release, .begin_cpu_access = ipu7_dma_buf_begin_cpu_access, .mmap = ipu7_dma_buf_mmap, .vmap = ipu7_dma_buf_vmap, .vunmap = ipu7_dma_buf_vunmap, }; static int ipu7_psys_get_graph_id(struct ipu7_psys_fh *fh) { u8 graph_id = 0; for (graph_id = 0; graph_id < IPU_PSYS_NUM_STREAMS; graph_id++) { if (fh->psys->graph_id[graph_id] == INVALID_STREAM_ID) break; } if (graph_id == IPU_PSYS_NUM_STREAMS) return -EBUSY; fh->psys->graph_id[graph_id] = graph_id; return graph_id; } static void ipu7_psys_put_graph_id(struct ipu7_psys_fh *fh) { fh->psys->graph_id[fh->ip->graph_id] = INVALID_STREAM_ID; } static void ipu7_psys_free_msg_task(struct ipu_psys_task_queue *tq, struct ipu7_bus_device *adev) { if (tq->msg_task) ipu7_dma_free(adev, sizeof(struct ipu7_msg_task), tq->msg_task, tq->task_dma_addr, 0); list_del(&tq->list); kfree(tq); } static void ipu7_psys_stream_deinit(struct ipu7_psys_stream *ip, struct ipu7_bus_device *adev) { struct ipu_psys_task_ack *ack; struct ipu_psys_task_ack *event; struct ipu_psys_task_ack *tmp; struct ipu_psys_task_queue *tq; struct ipu_psys_task_queue *tq_tmp; mutex_destroy(&ip->event_mutex); mutex_destroy(&ip->task_mutex); list_for_each_entry_safe(tq, tq_tmp, &ip->tq_list, list) { ipu7_psys_free_msg_task(tq, adev); } list_for_each_entry_safe(tq, tq_tmp, &ip->tq_running_list, list) { ipu7_psys_free_msg_task(tq, adev); } list_for_each_entry_safe(event, tmp, &ip->event_list, list) { list_del(&event->list); kfree(event); } list_for_each_entry_safe(ack, tmp, &ip->ack_list, list) { list_del(&ack->list); kfree(ack); } } static int ipu7_psys_stream_init(struct ipu7_psys_stream *ip, struct ipu7_bus_device *adev) { struct device *dev = &adev->auxdev.dev; struct ipu_psys_task_ack *event; struct ipu_psys_task_ack *tmp; struct ipu_psys_task_queue *tq; struct ipu_psys_task_queue *tq_tmp; u8 i; INIT_LIST_HEAD(&ip->event_list); INIT_LIST_HEAD(&ip->ack_list); INIT_LIST_HEAD(&ip->tq_running_list); INIT_LIST_HEAD(&ip->tq_list); for (i = 0; i < TASK_EVENT_QUEUE_SIZE; i++) { event = kzalloc(sizeof(*event), GFP_KERNEL); if (!event) goto event_cleanup; list_add(&event->list, &ip->event_list); } for (i = 0; i < TASK_REQUEST_QUEUE_SIZE; i++) { tq = kzalloc(sizeof(*tq), GFP_KERNEL); if (!tq) goto tq_cleanup; list_add(&tq->list, &ip->tq_list); tq->msg_task = ipu7_dma_alloc(adev, sizeof(struct ipu7_msg_task), &tq->task_dma_addr, GFP_KERNEL, 0); if (!tq->msg_task) { dev_err(dev, "Failed to allocate msg task.\n"); goto tq_cleanup; } } init_completion(&ip->graph_open); init_completion(&ip->graph_close); return 0; tq_cleanup: list_for_each_entry_safe(tq, tq_tmp, &ip->tq_list, list) { ipu7_psys_free_msg_task(tq, adev); } event_cleanup: list_for_each_entry_safe(event, tmp, &ip->event_list, list) { list_del(&event->list); kfree(event); } return -ENOMEM; } static int ipu7_psys_open(struct inode *inode, struct file *file) { struct ipu7_psys *psys = inode_to_ipu_psys(inode); struct device *dev = &psys->adev->auxdev.dev; struct ipu7_psys_fh *fh; struct ipu7_psys_stream *ip; int ret; fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (!fh) return -ENOMEM; ip = kzalloc(sizeof(*ip), GFP_KERNEL); if (!ip) { ret = -ENOMEM; goto alloc_failed; } ret = ipu7_psys_stream_init(ip, psys->adev); if (ret) goto stream_init_failed; fh->ip = ip; ip->fh = fh; fh->psys = psys; file->private_data = fh; mutex_init(&fh->mutex); INIT_LIST_HEAD(&fh->bufmap); init_waitqueue_head(&fh->wait); mutex_init(&ip->task_mutex); mutex_init(&ip->event_mutex); mutex_lock(&psys->mutex); ret = ipu7_psys_get_graph_id(fh); if (ret < 0) goto open_failed; fh->ip->graph_id = ret; ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "Runtime PM failed (%d)\n", ret); goto rpm_put; } list_add_tail(&fh->list, &psys->fhs); mutex_unlock(&psys->mutex); return 0; rpm_put: pm_runtime_put(dev); ipu7_psys_put_graph_id(fh); open_failed: ipu7_psys_stream_deinit(ip, psys->adev); mutex_destroy(&fh->mutex); mutex_unlock(&psys->mutex); stream_init_failed: kfree(ip); alloc_failed: kfree(fh); return ret; } static inline void ipu7_psys_kbuf_unmap(struct ipu7_psys_fh *fh, struct ipu7_psys_kbuffer *kbuf) { if (!kbuf) return; kbuf->valid = false; if (kbuf->kaddr) { struct iosys_map dmap; iosys_map_set_vaddr(&dmap, kbuf->kaddr); dma_buf_vunmap_unlocked(kbuf->dbuf, &dmap); } if (!kbuf->userptr) ipu7_dma_unmap_sgtable(fh->psys->adev, kbuf->sgt, DMA_BIDIRECTIONAL, 0); if (kbuf->sgt) dma_buf_unmap_attachment_unlocked(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); if (kbuf->db_attach) dma_buf_detach(kbuf->dbuf, kbuf->db_attach); dma_buf_put(kbuf->dbuf); kbuf->db_attach = NULL; kbuf->dbuf = NULL; kbuf->sgt = NULL; } static int ipu7_psys_release(struct inode *inode, struct file *file) { struct ipu7_psys *psys = inode_to_ipu_psys(inode); struct ipu7_psys_fh *fh = file->private_data; struct ipu7_psys_kbuffer *kbuf, *kbuf0; struct dma_buf_attachment *dba; mutex_lock(&fh->mutex); /* clean up buffers */ if (!list_empty(&fh->bufmap)) { list_for_each_entry_safe(kbuf, kbuf0, &fh->bufmap, list) { list_del(&kbuf->list); dba = kbuf->db_attach; /* Unmap and release buffers */ if (kbuf->dbuf && dba) { ipu7_psys_kbuf_unmap(fh, kbuf); } else { if (dba) ipu7_psys_put_userpages(dba->priv); kfree(kbuf); } } } mutex_unlock(&fh->mutex); ipu7_psys_stream_deinit(fh->ip, psys->adev); mutex_lock(&psys->mutex); list_del(&fh->list); ipu7_psys_put_graph_id(fh); kfree(fh->ip); mutex_unlock(&psys->mutex); mutex_destroy(&fh->mutex); kfree(fh); pm_runtime_put_sync(&psys->adev->auxdev.dev); return 0; } static int ipu7_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu7_psys_fh *fh) { struct device *dev = &fh->psys->adev->auxdev.dev; struct ipu7_psys_kbuffer *kbuf; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct dma_buf *dbuf; int ret; if (!buf->base.userptr) { dev_err(dev, "Buffer allocation not supported\n"); return -EINVAL; } if (!PAGE_ALIGNED(buf->base.userptr)) { dev_err(dev, "Not page-aligned userptr is not supported\n"); return -EINVAL; } kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); if (!kbuf) return -ENOMEM; kbuf->len = buf->len; kbuf->userptr = buf->base.userptr; kbuf->flags = buf->flags; exp_info.ops = &ipu7_dma_buf_ops; exp_info.size = kbuf->len; exp_info.flags = O_RDWR; exp_info.priv = kbuf; dbuf = dma_buf_export(&exp_info); if (IS_ERR(dbuf)) { kfree(kbuf); return PTR_ERR(dbuf); } ret = dma_buf_fd(dbuf, 0); if (ret < 0) { dma_buf_put(dbuf); return ret; } kbuf->fd = ret; buf->base.fd = ret; buf->flags &= ~IPU_BUFFER_FLAG_USERPTR; buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE; kbuf->flags = buf->flags; mutex_lock(&fh->mutex); list_add(&kbuf->list, &fh->bufmap); mutex_unlock(&fh->mutex); dev_dbg(dev, "IOC_GETBUF: userptr %p size %llu to fd %d", buf->base.userptr, buf->len, buf->base.fd); return 0; } static int ipu7_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu7_psys_fh *fh) { return 0; } static struct ipu7_psys_kbuffer * ipu7_psys_lookup_kbuffer(struct ipu7_psys_fh *fh, int fd) { struct ipu7_psys_kbuffer *kbuf; list_for_each_entry(kbuf, &fh->bufmap, list) { if (kbuf->fd == fd) return kbuf; } return NULL; } static int ipu7_psys_unmapbuf_locked(int fd, struct ipu7_psys_fh *fh, struct ipu7_psys_kbuffer *kbuf) { struct device *dev = &fh->psys->adev->auxdev.dev; if (!kbuf || fd != kbuf->fd) { dev_err(dev, "invalid kbuffer\n"); return -EINVAL; } /* From now on it is not safe to use this kbuffer */ ipu7_psys_kbuf_unmap(fh, kbuf); list_del(&kbuf->list); if (!kbuf->userptr) kfree(kbuf); dev_dbg(dev, "%s fd %d unmapped\n", __func__, fd); return 0; } static int ipu7_psys_mapbuf_locked(int fd, struct ipu7_psys_fh *fh, struct ipu7_psys_kbuffer *kbuf) { struct ipu7_psys *psys = fh->psys; struct device *dev = &psys->adev->isp->pdev->dev; struct dma_buf *dbuf; struct iosys_map dmap; int ret; dbuf = dma_buf_get(fd); if (IS_ERR(dbuf)) return -EINVAL; if (!kbuf) { /* This fd isn't generated by ipu7_psys_getbuf, it * is a new fd. Create a new kbuf item for this fd, and * add this kbuf to bufmap list. */ kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); if (!kbuf) { ret = -ENOMEM; goto mapbuf_fail; } list_add(&kbuf->list, &fh->bufmap); } /* fd valid and found, need remap */ if (kbuf->dbuf && (kbuf->dbuf != dbuf || kbuf->len != dbuf->size)) { dev_dbg(dev, "dmabuf fd %d with kbuf %p changed, need remap.\n", fd, kbuf); ret = ipu7_psys_unmapbuf_locked(fd, fh, kbuf); if (ret) goto mapbuf_fail; kbuf = ipu7_psys_lookup_kbuffer(fh, fd); /* changed external dmabuf */ if (!kbuf) { kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL); if (!kbuf) { ret = -ENOMEM; goto mapbuf_fail; } list_add(&kbuf->list, &fh->bufmap); } } if (kbuf->sgt) { dev_dbg(dev, "fd %d has been mapped!\n", fd); dma_buf_put(dbuf); goto mapbuf_end; } kbuf->dbuf = dbuf; if (kbuf->len == 0) kbuf->len = kbuf->dbuf->size; kbuf->fd = fd; kbuf->db_attach = dma_buf_attach(kbuf->dbuf, dev); if (IS_ERR(kbuf->db_attach)) { ret = PTR_ERR(kbuf->db_attach); dev_err(dev, "dma buf attach failed\n"); goto attach_fail; } kbuf->sgt = dma_buf_map_attachment_unlocked(kbuf->db_attach, DMA_BIDIRECTIONAL); if (IS_ERR_OR_NULL(kbuf->sgt)) { ret = -EINVAL; kbuf->sgt = NULL; dev_err(dev, "dma buf map attachment failed\n"); goto kbuf_map_fail; } if (!kbuf->userptr) { ret = ipu7_dma_map_sgtable(psys->adev, kbuf->sgt, DMA_BIDIRECTIONAL, 0); if (ret) { dev_dbg(dev, "ipu7 buf map failed\n"); goto kbuf_map_fail; } } kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl); /* no need vmap for imported dmabufs */ if (!kbuf->userptr) goto mapbuf_end; dmap.is_iomem = false; ret = dma_buf_vmap_unlocked(kbuf->dbuf, &dmap); if (ret) { dev_err(dev, "dma buf vmap failed\n"); goto kbuf_map_fail; } kbuf->kaddr = dmap.vaddr; mapbuf_end: dev_dbg(dev, "%s %s kbuf %p fd %d with len %llu mapped\n", __func__, kbuf->kaddr ? "private" : "imported", kbuf, fd, kbuf->len); kbuf->valid = true; return 0; kbuf_map_fail: if (!IS_ERR_OR_NULL(kbuf->sgt)) { if (!kbuf->userptr) ipu7_dma_unmap_sgtable(psys->adev, kbuf->sgt, DMA_BIDIRECTIONAL, 0); dma_buf_unmap_attachment_unlocked(kbuf->db_attach, kbuf->sgt, DMA_BIDIRECTIONAL); } dma_buf_detach(kbuf->dbuf, kbuf->db_attach); attach_fail: list_del(&kbuf->list); if (!kbuf->userptr) kfree(kbuf); mapbuf_fail: dma_buf_put(dbuf); dev_err(dev, "%s failed for fd %d\n", __func__, fd); return ret; } static long ipu7_psys_mapbuf(int fd, struct ipu7_psys_fh *fh) { long ret; struct ipu7_psys_kbuffer *kbuf; dev_dbg(&fh->psys->adev->auxdev.dev, "IOC_MAPBUF\n"); mutex_lock(&fh->mutex); kbuf = ipu7_psys_lookup_kbuffer(fh, fd); ret = ipu7_psys_mapbuf_locked(fd, fh, kbuf); mutex_unlock(&fh->mutex); return ret; } static long ipu7_psys_unmapbuf(int fd, struct ipu7_psys_fh *fh) { struct device *dev = &fh->psys->adev->auxdev.dev; struct ipu7_psys_kbuffer *kbuf; long ret; dev_dbg(dev, "IOC_UNMAPBUF\n"); mutex_lock(&fh->mutex); kbuf = ipu7_psys_lookup_kbuffer(fh, fd); if (!kbuf) { dev_err(dev, "buffer with fd %d not found\n", fd); mutex_unlock(&fh->mutex); return -EINVAL; } ret = ipu7_psys_unmapbuf_locked(fd, fh, kbuf); mutex_unlock(&fh->mutex); return ret; } static long ipu_psys_graph_open(struct ipu_psys_graph_info *graph, struct ipu7_psys_fh *fh) { struct ipu7_psys *psys = fh->psys; int ret = 0; if (fh->ip->graph_state != IPU_MSG_GRAPH_STATE_CLOSED) { dev_err(&psys->dev, "Wrong state %d to open graph %d\n", fh->ip->graph_state, fh->ip->graph_id); return -EINVAL; } if (!graph->nodes || graph->num_nodes > MAX_GRAPH_NODES) { dev_err(&psys->dev, "nodes is wrong\n"); return -EINVAL; } if (copy_from_user(fh->ip->nodes, graph->nodes, graph->num_nodes * sizeof(*graph->nodes))) { dev_err(&psys->dev, "Failed to copy nodes\n"); return -EINVAL; } reinit_completion(&fh->ip->graph_open); ret = ipu7_fw_psys_graph_open(graph, psys, fh->ip); if (ret) { dev_err(&psys->dev, "Failed to open graph %d\n", fh->ip->graph_id); return ret; } fh->ip->graph_state = IPU_MSG_GRAPH_STATE_OPEN_WAIT; ret = wait_for_completion_timeout(&fh->ip->graph_open, IPU_FW_CALL_TIMEOUT_JIFFIES); if (!ret) { dev_err(&psys->dev, "Open graph %d timeout\n", fh->ip->graph_id); fh->ip->graph_state = IPU_MSG_GRAPH_STATE_CLOSED; return -ETIMEDOUT; } if (fh->ip->graph_state != IPU_MSG_GRAPH_STATE_OPEN) { dev_err(&psys->dev, "Failed to set graph\n"); fh->ip->graph_state = IPU_MSG_GRAPH_STATE_CLOSED; return -EINVAL; } graph->graph_id = fh->ip->graph_id; return 0; } static long ipu_psys_graph_close(int graph_id, struct ipu7_psys_fh *fh) { struct ipu7_psys *psys = fh->psys; int ret = 0; if (fh->ip->graph_state != IPU_MSG_GRAPH_STATE_OPEN) { dev_err(&psys->dev, "Wrong state %d to open graph %d\n", fh->ip->graph_state, fh->ip->graph_id); return -EINVAL; } reinit_completion(&fh->ip->graph_close); ret = ipu7_fw_psys_graph_close(fh->ip->graph_id, fh->psys); if (ret) { dev_err(&psys->dev, "Failed to close graph %d\n", fh->ip->graph_id); return ret; } fh->ip->graph_state = IPU_MSG_GRAPH_STATE_CLOSE_WAIT; ret = wait_for_completion_timeout(&fh->ip->graph_close, IPU_FW_CALL_TIMEOUT_JIFFIES); if (!ret) { dev_err(&psys->dev, "Close graph %d timeout\n", fh->ip->graph_id); return -ETIMEDOUT; } if (fh->ip->graph_state != IPU_MSG_GRAPH_STATE_CLOSED) { dev_err(&psys->dev, "Failed to close graph\n"); fh->ip->graph_state = IPU_MSG_GRAPH_STATE_CLOSED; return -EINVAL; } return 0; } static struct ipu_psys_task_queue * ipu7_psys_get_task_queue(struct ipu7_psys_stream *ip, struct ipu_psys_task_request *task) { struct device *dev = &ip->fh->psys->dev; struct ipu7_psys_kbuffer *kbuf = NULL; struct ipu_psys_task_queue *tq; int fd, prevfd = -1; u32 i; if (task->term_buf_count > MAX_GRAPH_TERMINALS) { dev_err(dev, "num_teminal_buffer is too large\n"); return NULL; } mutex_lock(&ip->task_mutex); if (list_empty(&ip->tq_list)) { dev_err(dev, "No available take queue for stream %p\n", ip); goto unlock; } tq = list_first_entry(&ip->tq_list, struct ipu_psys_task_queue, list); if (copy_from_user(tq->task_buffers, task->task_buffers, task->term_buf_count * sizeof(*task->task_buffers))) { dev_err(dev, "failed to copy task buffers\n"); goto unlock; } for (i = 0; i < task->term_buf_count; i++) { fd = tq->task_buffers[i].term_buf.base.fd; kbuf = ipu7_psys_lookup_kbuffer(ip->fh, fd); if (!kbuf) { dev_err(dev, "fd %d not found\n", fd); goto unlock; } tq->ipu7_addr[i] = kbuf->dma_addr + tq->task_buffers[i].term_buf.data_offset; if (prevfd == fd || (tq->task_buffers[i].term_buf.flags & IPU_BUFFER_FLAG_NO_FLUSH)) continue; prevfd = fd; if (kbuf->kaddr) clflush_cache_range(kbuf->kaddr, kbuf->len); } dev_dbg(dev, "frame %d to task queue %p\n", task->frame_id, tq); list_move_tail(&tq->list, &ip->tq_running_list); mutex_unlock(&ip->task_mutex); return tq; unlock: mutex_unlock(&ip->task_mutex); return NULL; } static long ipu_psys_task_request(struct ipu_psys_task_request *task, struct ipu7_psys_fh *fh) { struct ipu7_psys *psys = fh->psys; struct ipu_psys_task_queue *tq; int ret = 0; if (task->term_buf_count == 0 || !task->task_buffers) { dev_err(&psys->dev, "task_buffer is NULL\n"); return -EINVAL; } tq = ipu7_psys_get_task_queue(fh->ip, task); if (!tq) { dev_err(&psys->dev, "Failed to get task queue\n"); return -EINVAL; } ret = ipu7_fw_psys_task_request(task, fh->ip, tq, psys); if (ret) { dev_err(&psys->dev, "Failed to request task %d\n", fh->ip->graph_id); mutex_lock(&fh->ip->task_mutex); list_move_tail(&tq->list, &fh->ip->tq_list); mutex_unlock(&fh->ip->task_mutex); return ret; } tq->task_state = IPU_MSG_TASK_STATE_WAIT_DONE; return 0; } static unsigned int ipu7_psys_poll(struct file *file, struct poll_table_struct *wait) { struct ipu7_psys_fh *fh = file->private_data; struct device *dev = &fh->psys->adev->auxdev.dev; struct ipu7_psys_stream *ip = fh->ip; unsigned int res = 0; dev_dbg(dev, "ipu psys poll\n"); poll_wait(file, &fh->wait, wait); mutex_lock(&ip->event_mutex); if (!list_empty(&ip->ack_list)) res = POLLIN; mutex_unlock(&ip->event_mutex); dev_dbg(dev, "ipu psys poll res %u\n", res); return res; } static long ipu7_psys_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { union { struct ipu_psys_graph_info graph; struct ipu_psys_task_request task; struct ipu_psys_buffer buf; struct ipu_psys_event ev; } karg; struct ipu7_psys_fh *fh = file->private_data; long err = 0; void __user *up = (void __user *)arg; bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF && cmd != IPU_IOC_GRAPH_CLOSE); if (copy) { if (_IOC_SIZE(cmd) > sizeof(karg)) return -ENOTTY; if (_IOC_DIR(cmd) & _IOC_WRITE) { err = copy_from_user(&karg, up, _IOC_SIZE(cmd)); if (err) return -EFAULT; } } switch (cmd) { case IPU_IOC_MAPBUF: err = ipu7_psys_mapbuf(arg, fh); break; case IPU_IOC_UNMAPBUF: err = ipu7_psys_unmapbuf(arg, fh); break; case IPU_IOC_GETBUF: err = ipu7_psys_getbuf(&karg.buf, fh); break; case IPU_IOC_PUTBUF: err = ipu7_psys_putbuf(&karg.buf, fh); break; case IPU_IOC_GRAPH_OPEN: err = ipu_psys_graph_open(&karg.graph, fh); break; case IPU_IOC_GRAPH_CLOSE: err = ipu_psys_graph_close(arg, fh); break; case IPU_IOC_TASK_REQUEST: err = ipu_psys_task_request(&karg.task, fh); break; case IPU_IOC_DQEVENT: err = ipu7_ioctl_dqevent(&karg.ev, fh, file->f_flags); break; default: err = -ENOTTY; break; } if (err) return err; if (copy && _IOC_DIR(cmd) & _IOC_READ) if (copy_to_user(up, &karg, _IOC_SIZE(cmd))) return -EFAULT; return 0; } static const struct file_operations ipu7_psys_fops = { .open = ipu7_psys_open, .release = ipu7_psys_release, .unlocked_ioctl = ipu7_psys_ioctl, .poll = ipu7_psys_poll, .owner = THIS_MODULE, }; static void ipu7_psys_dev_release(struct device *dev) { } static int psys_runtime_pm_resume(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); struct ipu7_psys *psys = ipu7_bus_get_drvdata(adev); unsigned long flags; int ret; if (!psys) return 0; spin_lock_irqsave(&psys->ready_lock, flags); if (psys->ready) { spin_unlock_irqrestore(&psys->ready_lock, flags); return 0; } spin_unlock_irqrestore(&psys->ready_lock, flags); ret = ipu7_mmu_hw_init(adev->mmu); if (ret) return ret; if (!ipu_buttress_auth_done(adev->isp)) { dev_dbg(dev, "%s: not yet authenticated, skipping\n", __func__); return 0; } ipu7_psys_setup_hw(psys); ipu7_psys_subdomains_power(psys, 1); ret = ipu7_boot_start_fw(psys->adev); if (ret) { dev_err(&psys->dev, "failed to start psys fw. ret: %d\n", ret); return ret; } ret = ipu7_fw_psys_open(psys); if (ret) { dev_err(&psys->adev->auxdev.dev, "Failed to open abi.\n"); return ret; } spin_lock_irqsave(&psys->ready_lock, flags); psys->ready = 1; spin_unlock_irqrestore(&psys->ready_lock, flags); return 0; } static int psys_runtime_pm_suspend(struct device *dev) { struct ipu7_bus_device *adev = to_ipu7_bus_device(dev); struct ipu7_psys *psys = ipu7_bus_get_drvdata(adev); unsigned long flags; if (!psys) return 0; spin_lock_irqsave(&psys->ready_lock, flags); if (!psys->ready) { spin_unlock_irqrestore(&psys->ready_lock, flags); return 0; } psys->ready = 0; spin_unlock_irqrestore(&psys->ready_lock, flags); ipu7_fw_psys_close(psys); ipu7_boot_stop_fw(psys->adev); ipu7_psys_subdomains_power(psys, 0); ipu7_mmu_hw_cleanup(adev->mmu); return 0; } /* The following PM callbacks are needed to enable runtime PM in IPU PCI * device resume, otherwise, runtime PM can't work in PCI resume from * S3 state. */ static int psys_resume(struct device *dev) { return 0; } static int psys_suspend(struct device *dev) { struct ipu7_psys *psys = dev_get_drvdata(dev); unsigned long flags; int ret = 0; spin_lock_irqsave(&psys->ready_lock, flags); if (psys->ready) ret = -EBUSY; spin_unlock_irqrestore(&psys->ready_lock, flags); return ret; } static const struct dev_pm_ops psys_pm_ops = { .runtime_suspend = psys_runtime_pm_suspend, .runtime_resume = psys_runtime_pm_resume, .suspend = psys_suspend, .resume = psys_resume, }; #ifdef CONFIG_DEBUG_FS static int psys_fw_log_init(struct ipu7_psys *psys) { struct device *dev = &psys->adev->auxdev.dev; struct psys_fw_log *fw_log; void *log_buf; if (psys->fw_log) return 0; fw_log = devm_kzalloc(dev, sizeof(*fw_log), GFP_KERNEL); if (!fw_log) return -ENOMEM; mutex_init(&fw_log->mutex); log_buf = devm_kzalloc(dev, FW_LOG_BUF_SIZE, GFP_KERNEL); if (!log_buf) return -ENOMEM; fw_log->head = log_buf; fw_log->addr = log_buf; fw_log->count = 0; fw_log->size = 0; psys->fw_log = fw_log; return 0; } static ssize_t fwlog_read(struct file *file, char __user *userbuf, size_t size, loff_t *pos) { struct ipu7_psys *psys = file->private_data; struct psys_fw_log *fw_log = psys->fw_log; struct device *dev = &psys->adev->auxdev.dev; u32 log_size; void *buf; int ret = 0; if (!fw_log) return 0; buf = kvzalloc(FW_LOG_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&fw_log->mutex); if (!fw_log->size) { dev_warn(dev, "no available fw log\n"); mutex_unlock(&fw_log->mutex); goto free_and_return; } if (fw_log->size > FW_LOG_BUF_SIZE) log_size = FW_LOG_BUF_SIZE; else log_size = fw_log->size; memcpy(buf, fw_log->addr, log_size); dev_dbg(dev, "copy %d bytes fw log to user\n", log_size); mutex_unlock(&fw_log->mutex); ret = simple_read_from_buffer(userbuf, size, pos, buf, log_size); free_and_return: kvfree(buf); return ret; } static const struct file_operations psys_fw_log_fops = { .open = simple_open, .owner = THIS_MODULE, .read = fwlog_read, .llseek = default_llseek, }; static int ipu7_psys_init_debugfs(struct ipu7_psys *psys) { struct dentry *file; struct dentry *dir; dir = debugfs_create_dir("psys", psys->adev->isp->ipu7_dir); if (IS_ERR(dir)) return -ENOMEM; file = debugfs_create_file("fwlog", 0400, dir, psys, &psys_fw_log_fops); if (IS_ERR(file)) goto err; psys->debugfsdir = dir; return 0; err: debugfs_remove_recursive(dir); return -ENOMEM; } #endif static const struct bus_type ipu7_psys_bus = { .name = "intel-ipu7-psys", }; static int ipu7_psys_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *auxdev_id) { struct ipu7_bus_device *adev = auxdev_to_adev(auxdev); struct device *dev = &auxdev->dev; struct ipu7_psys *psys; unsigned int minor; unsigned int i; int ret; if (!adev->isp->ipu7_bus_ready_to_probe) return -EPROBE_DEFER; ret = alloc_chrdev_region(&ipu7_psys_dev_t, 0, IPU_PSYS_NUM_DEVICES, IPU_PSYS_NAME); if (ret) { dev_err(dev, "can't alloc psys chrdev region (%d)\n", ret); return ret; } ret = pm_runtime_resume_and_get(&auxdev->dev); if (ret < 0) return ret; ret = ipu7_mmu_hw_init(adev->mmu); if (ret) goto out_unregister_chr_region; mutex_lock(&ipu7_psys_mutex); minor = find_next_zero_bit(ipu7_psys_devices, IPU_PSYS_NUM_DEVICES, 0); if (minor == IPU_PSYS_NUM_DEVICES) { dev_err(dev, "too many devices\n"); goto out_unlock; } psys = devm_kzalloc(dev, sizeof(*psys), GFP_KERNEL); if (!psys) { ret = -ENOMEM; goto out_unlock; } for (i = 0 ; i < IPU_PSYS_NUM_STREAMS; i++) psys->graph_id[i] = INVALID_STREAM_ID; adev->auxdrv_data = (const struct ipu7_auxdrv_data *)auxdev_id->driver_data; adev->auxdrv = to_auxiliary_drv(dev->driver); psys->adev = adev; psys->pdata = adev->pdata; cdev_init(&psys->cdev, &ipu7_psys_fops); psys->cdev.owner = ipu7_psys_fops.owner; ret = cdev_add(&psys->cdev, MKDEV(MAJOR(ipu7_psys_dev_t), minor), 1); if (ret) { dev_err(dev, "cdev_add failed (%d)\n", ret); goto out_unlock; } set_bit(minor, ipu7_psys_devices); spin_lock_init(&psys->ready_lock); psys->ready = 0; psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS; mutex_init(&psys->mutex); INIT_LIST_HEAD(&psys->fhs); ret = ipu7_fw_psys_init(psys); if (ret) { dev_err(dev, "FW init failed(%d)\n", ret); goto out_mutex_destroy; } psys->dev.bus = &ipu7_psys_bus; psys->dev.parent = dev; psys->dev.devt = MKDEV(MAJOR(ipu7_psys_dev_t), minor); psys->dev.release = ipu7_psys_dev_release; dev_set_name(&psys->dev, "ipu7-psys%d", minor); ret = device_register(&psys->dev); if (ret < 0) { dev_err(&psys->dev, "psys device_register failed\n"); goto out_fw_release; } dev_set_drvdata(dev, psys); #ifdef CONFIG_DEBUG_FS psys_fw_log_init(psys); ipu7_psys_init_debugfs(psys); #endif dev_info(dev, "IPU psys probe done.\n"); ipu7_mmu_hw_cleanup(adev->mmu); pm_runtime_put(&auxdev->dev); return 0; out_fw_release: ipu7_fw_psys_release(psys); out_mutex_destroy: mutex_destroy(&psys->mutex); cdev_del(&psys->cdev); out_unlock: /* Safe to call even if the init is not called */ mutex_unlock(&ipu7_psys_mutex); ipu7_mmu_hw_cleanup(adev->mmu); out_unregister_chr_region: unregister_chrdev_region(ipu7_psys_dev_t, IPU_PSYS_NUM_DEVICES); pm_runtime_put(&auxdev->dev); return ret; } static void ipu7_psys_remove(struct auxiliary_device *auxdev) { struct ipu7_psys *psys = dev_get_drvdata(&auxdev->dev); struct device *dev = &auxdev->dev; #ifdef CONFIG_DEBUG_FS struct ipu7_device *isp = psys->adev->isp; if (isp->ipu7_dir) debugfs_remove_recursive(psys->debugfsdir); #endif mutex_lock(&ipu7_psys_mutex); ipu7_fw_psys_release(psys); device_unregister(&psys->dev); clear_bit(MINOR(psys->cdev.dev), ipu7_psys_devices); cdev_del(&psys->cdev); mutex_unlock(&ipu7_psys_mutex); mutex_destroy(&psys->mutex); unregister_chrdev_region(ipu7_psys_dev_t, IPU_PSYS_NUM_DEVICES); dev_info(dev, "removed\n"); } static irqreturn_t psys_isr_threaded(struct ipu7_bus_device *adev) { struct ipu7_psys *psys = ipu7_bus_get_drvdata(adev); struct device *dev = &psys->adev->auxdev.dev; void __iomem *base = psys->pdata->base; u32 status, state; int r; mutex_lock(&psys->mutex); r = pm_runtime_get_if_in_use(dev); if (!r || WARN_ON_ONCE(r < 0)) { mutex_unlock(&psys->mutex); return IRQ_NONE; } state = ipu7_boot_get_boot_state(adev); if (IA_GOFO_FW_BOOT_STATE_IS_CRITICAL(state)) { dev_warn(&psys->dev, "error state %u\n", state); } else { status = readl(base + IPU_REG_PSYS_TO_SW_IRQ_CNTL_STATUS); writel(status, base + IPU_REG_PSYS_TO_SW_IRQ_CNTL_CLEAR); if (status & IRQ_FROM_LOCAL_FW) ipu7_psys_handle_events(psys); } pm_runtime_put(dev); mutex_unlock(&psys->mutex); return IRQ_HANDLED; } static const struct ipu7_auxdrv_data ipu7_psys_auxdrv_data = { .isr_threaded = psys_isr_threaded, .wake_isr_thread = true, }; static const struct auxiliary_device_id ipu7_psys_id_table[] = { { .name = "intel_ipu7.psys", .driver_data = (kernel_ulong_t)&ipu7_psys_auxdrv_data, }, { } }; MODULE_DEVICE_TABLE(auxiliary, ipu7_psys_id_table); static struct auxiliary_driver ipu7_psys_driver = { .name = IPU_PSYS_NAME, .probe = ipu7_psys_probe, .remove = ipu7_psys_remove, .id_table = ipu7_psys_id_table, .driver = { .pm = &psys_pm_ops, }, }; module_auxiliary_driver(ipu7_psys_driver); MODULE_AUTHOR("Bingbu Cao "); MODULE_AUTHOR("Qingwu Zhang "); MODULE_AUTHOR("Tianshu Qiu "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Intel ipu7 processing system driver"); #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0) MODULE_IMPORT_NS("INTEL_IPU7"); MODULE_IMPORT_NS("DMA_BUF"); #else MODULE_IMPORT_NS(INTEL_IPU7); MODULE_IMPORT_NS(DMA_BUF); #endif ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/psys/ipu7-fw-psys.c000066400000000000000000000423561503071307200300170ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2016 - 2024 Intel Corporation #include #include #include #include "abi/ipu7_fw_common_abi.h" #include "abi/ipu7_fw_msg_abi.h" #include "abi/ipu7_fw_psys_config_abi.h" #include "ipu7-boot.h" #include "ipu7-bus.h" #include "ipu7-dma.h" #include "ipu7-fw-psys.h" #include "ipu7-syscom.h" #include "ipu7-psys.h" #define TLV_TYPE(type) ((u32)(type) & 0x3FU) #define TLV_SIZE(buf_size) (((buf_size) / TLV_ITEM_ALIGNMENT) & 0xFFFFU) /* * Node resource ID of INSYS, required when there is a link from INSYS to PSYS. */ #define IPU_PSYS_NODE_RSRC_ID_IS (0xFEU) /* * Special node resource ID to identify a generic external node. * Required when there is a link to/from IPU and that node. */ #define IPU_PSYS_NODE_RSRC_ID_EXT_IP (0xFFU) int ipu7_fw_psys_init(struct ipu7_psys *psys) { struct ipu7_bus_device *adev = psys->adev; struct device *dev = &adev->auxdev.dev; struct ipu7_syscom_context *syscom; struct ipu7_psys_config *psys_config; struct syscom_queue_config *queue_configs; dma_addr_t psys_config_dma_addr; u32 freq; int i, num_queues, ret; /* Allocate and init syscom context. */ syscom = devm_kzalloc(dev, sizeof(struct ipu7_syscom_context), GFP_KERNEL); if (!syscom) return -ENOMEM; adev->syscom = syscom; syscom->num_input_queues = FWPS_MSG_ABI_MAX_INPUT_QUEUES; syscom->num_output_queues = FWPS_MSG_ABI_MAX_OUTPUT_QUEUES; num_queues = syscom->num_input_queues + syscom->num_output_queues; queue_configs = devm_kzalloc(dev, FW_QUEUE_CONFIG_SIZE(num_queues), GFP_KERNEL); if (!queue_configs) { ipu7_fw_psys_release(psys); return -ENOMEM; } syscom->queue_configs = queue_configs; queue_configs[FWPS_MSG_ABI_OUT_ACK_QUEUE_ID].max_capacity = IPU_PSYS_ACK_QUEUE_SIZE; queue_configs[FWPS_MSG_ABI_OUT_ACK_QUEUE_ID].token_size_in_bytes = IPU_PSYS_OUT_MSG_SIZE; queue_configs[FWPS_MSG_ABI_OUT_LOG_QUEUE_ID].max_capacity = IPU_PSYS_LOG_QUEUE_SIZE; queue_configs[FWPS_MSG_ABI_OUT_LOG_QUEUE_ID].token_size_in_bytes = IPU_PSYS_OUT_MSG_SIZE; queue_configs[FWPS_MSG_ABI_IN_DEV_QUEUE_ID].max_capacity = IPU_PSYS_CMD_QUEUE_SIZE; queue_configs[FWPS_MSG_ABI_IN_DEV_QUEUE_ID].token_size_in_bytes = FWPS_MSG_HOST2FW_MAX_SIZE; queue_configs[FWPS_MSG_ABI_IN_RESERVED_QUEUE_ID].max_capacity = 0; queue_configs[FWPS_MSG_ABI_IN_RESERVED_QUEUE_ID].token_size_in_bytes = 0; for (i = FWPS_MSG_ABI_IN_FIRST_TASK_QUEUE_ID; i < num_queues; i++) { queue_configs[i].max_capacity = IPU_PSYS_TASK_QUEUE_SIZE; queue_configs[i].token_size_in_bytes = sizeof(struct ia_gofo_msg_indirect); } /* Allocate PSYS subsys config. */ psys_config = ipu7_dma_alloc(adev, sizeof(struct ipu7_psys_config), &psys_config_dma_addr, GFP_KERNEL, 0); if (!psys_config) { dev_err(dev, "Failed to allocate psys subsys config.\n"); ipu7_fw_psys_release(psys); return -ENOMEM; } psys->subsys_config = psys_config; psys->subsys_config_dma_addr = psys_config_dma_addr; memset(psys_config, 0, sizeof(struct ipu7_psys_config)); ret = ipu_buttress_get_psys_freq(adev->isp, &freq); if (ret) { dev_err(dev, "Failed to get PSYS frequency.\n"); ipu7_fw_psys_release(psys); return ret; } ret = ipu7_boot_init_boot_config(adev, queue_configs, num_queues, freq, psys_config_dma_addr, 1U); if (ret) ipu7_fw_psys_release(psys); return ret; } void ipu7_fw_psys_release(struct ipu7_psys *psys) { struct ipu7_bus_device *adev = psys->adev; ipu7_boot_release_boot_config(adev); if (psys->subsys_config) { ipu7_dma_free(adev, sizeof(struct ipu7_psys_config), psys->subsys_config, psys->subsys_config_dma_addr, 0); psys->subsys_config = NULL; psys->subsys_config_dma_addr = 0; } } static int ipu7_fw_dev_ready(struct ipu7_psys *psys, u16 type) { const struct ia_gofo_msg_header_ack *ack_header; u8 buffer[FWPS_MSG_FW2HOST_MAX_SIZE]; int ret; ret = ipu7_fw_psys_event_handle(psys, buffer); if (ret) return ret; ack_header = (const struct ia_gofo_msg_header_ack *)buffer; if (ack_header->header.tlv_header.tlv_type == type) return 0; return -EAGAIN; } static int ipu7_fw_dev_open(struct ipu7_psys *psys) { struct ipu7_syscom_context *ctx = psys->adev->syscom; struct ipu7_msg_dev_open *token; dev_dbg(&psys->dev, "send_token: fw psys open\n"); token = ipu7_syscom_get_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); if (!token) return -ENODATA; token->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_DEV_OPEN); token->header.tlv_header.tlv_len32 = TLV_SIZE(sizeof(*token)); token->header.user_token = 0; token->max_graphs = IPU_PSYS_MAX_GRAPH_NUMS; token->dev_msg_map = (u8)(IPU_MSG_DEVICE_OPEN_SEND_RESP | IPU_MSG_DEVICE_OPEN_SEND_IRQ); token->enable_power_gating = 0; ipu7_syscom_put_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); ipu_buttress_wakeup_ps_uc(psys->adev->isp); return 0; } int ipu7_fw_psys_open(struct ipu7_psys *psys) { u32 retry = IPU_PSYS_OPEN_CLOSE_RETRY; int ret; ret = ipu7_fw_dev_open(psys); if (ret) { dev_err(&psys->dev, "failed to open PSYS dev.\n"); return ret; } psys->dev_state = IPU_MSG_DEV_STATE_OPEN_WAIT; do { usleep_range(IPU_PSYS_OPEN_CLOSE_TIMEOUT_US, IPU_PSYS_OPEN_CLOSE_TIMEOUT_US + 10); ret = ipu7_fw_dev_ready(psys, IPU_MSG_TYPE_DEV_OPEN_ACK); if (!ret) { dev_dbg(&psys->dev, "dev open done.\n"); psys->dev_state = IPU_MSG_DEV_STATE_OPEN; return 0; } } while (retry--); if (!retry) dev_err(&psys->dev, "wait dev open timeout!\n"); return ret; } static int ipu7_fw_dev_close(struct ipu7_psys *psys) { struct ipu7_syscom_context *ctx = psys->adev->syscom; struct ipu7_msg_dev_close *token; dev_dbg(&psys->dev, "send_token: fw psys close\n"); token = ipu7_syscom_get_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); if (!token) return -ENODATA; token->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_DEV_CLOSE); token->header.tlv_header.tlv_len32 = TLV_SIZE(sizeof(*token)); token->header.user_token = 0; token->dev_msg_map = (u8)(IPU_MSG_DEVICE_CLOSE_SEND_RESP | IPU_MSG_DEVICE_CLOSE_SEND_IRQ); ipu7_syscom_put_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); ipu_buttress_wakeup_ps_uc(psys->adev->isp); return 0; } void ipu7_fw_psys_close(struct ipu7_psys *psys) { u32 retry = IPU_PSYS_OPEN_CLOSE_RETRY; int ret; ret = ipu7_fw_dev_close(psys); if (ret) { dev_err(&psys->dev, "failed to close PSYS dev.\n"); return; } psys->dev_state = IPU_MSG_DEV_STATE_CLOSE_WAIT; do { usleep_range(IPU_PSYS_OPEN_CLOSE_TIMEOUT_US, IPU_PSYS_OPEN_CLOSE_TIMEOUT_US + 10); ret = ipu7_fw_dev_ready(psys, IPU_MSG_TYPE_DEV_CLOSE_ACK); if (!ret) { dev_dbg(&psys->dev, "dev close done.\n"); psys->dev_state = IPU_MSG_DEV_STATE_CLOSED; return; } } while (retry--); if (!retry) dev_err(&psys->dev, "wait dev close timeout!\n"); } static void ipu7_fw_psys_build_node_profile(const struct node_profile *profile, void **buf_ptr_ptr) { struct ipu7_msg_cb_profile *cb_profile = (struct ipu7_msg_cb_profile *)*buf_ptr_ptr; u16 buf_size = sizeof(*cb_profile); memcpy(cb_profile->profile_base.teb, profile->teb, sizeof(cb_profile->profile_base.teb)); memcpy(cb_profile->rbm, profile->rbm, sizeof(cb_profile->rbm)); memcpy(cb_profile->deb, profile->deb, sizeof(cb_profile->deb)); memcpy(cb_profile->reb, profile->reb, sizeof(cb_profile->reb)); cb_profile->profile_base.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_NODE_PROFILE_TYPE_CB); cb_profile->profile_base.tlv_header.tlv_len32 = TLV_SIZE(buf_size); *buf_ptr_ptr += buf_size; } /* skip term, return false */ static bool ipu7_fw_psys_build_node_term(const struct node_ternimal *term, void **buf_ptr_ptr) { struct ipu7_msg_term *msg_term = (struct ipu7_msg_term *)*buf_ptr_ptr; u16 buf_size = sizeof(*msg_term); if (!term->term_id && !term->buf_size) return false; memset(msg_term, 0, sizeof(*msg_term)); msg_term->term_id = term->term_id; /* Disable progress message on connect terminals */ msg_term->event_req_bm = 0U; msg_term->payload_size = term->buf_size; msg_term->tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TERM_TYPE_BASE); msg_term->tlv_header.tlv_len32 = TLV_SIZE(buf_size); *buf_ptr_ptr += buf_size; return true; } /* When skip processing node, just return false */ static bool ipu7_fw_psys_build_node(const struct graph_node *node, void **buf_ptr_ptr) { struct ipu7_msg_node *msg_node = (struct ipu7_msg_node *)*buf_ptr_ptr; u16 buf_size = sizeof(*msg_node); bool ret = false; u8 i = 0; u8 max_terms = 0; memset(msg_node, 0, sizeof(*msg_node)); /** * Pass node info to FW, do not check for external IP and ISYS * As FW expects a external node */ if (node->node_rsrc_id != IPU_PSYS_NODE_RSRC_ID_IS && node->node_rsrc_id != IPU_PSYS_NODE_RSRC_ID_EXT_IP) { if (node->profiles[0].teb[0] == 0U) return false; } /** * Sanity check for dummy node, TEB should set to required one */ if (node->node_rsrc_id == IPU_PSYS_NODE_RSRC_ID_IS || node->node_rsrc_id == IPU_PSYS_NODE_RSRC_ID_EXT_IP) { if (node->profiles[0].teb[0] != IPU_MSG_NODE_DONT_CARE_TEB_LO || node->profiles[0].teb[1] != IPU_MSG_NODE_DONT_CARE_TEB_HI) return false; } msg_node->node_rsrc_id = node->node_rsrc_id; msg_node->node_ctx_id = node->node_ctx_id; msg_node->num_frags = 1; *buf_ptr_ptr += buf_size; msg_node->profiles_list.head_offset = (u16)((uintptr_t)*buf_ptr_ptr - (uintptr_t)&msg_node->profiles_list); for (i = 0; i < ARRAY_SIZE(node->profiles); i++) { ipu7_fw_psys_build_node_profile(&node->profiles[i], buf_ptr_ptr); msg_node->profiles_list.num_elems++; } msg_node->terms_list.head_offset = (u16)((uintptr_t)*buf_ptr_ptr - (uintptr_t)&msg_node->terms_list); max_terms = ARRAY_SIZE(node->terminals); for (i = 0; i < max_terms && i < node->num_terms; i++) { ret = ipu7_fw_psys_build_node_term(&node->terminals[i], buf_ptr_ptr); if (ret) msg_node->terms_list.num_elems++; } buf_size = (u32)(uintptr_t)*buf_ptr_ptr - (uintptr_t)msg_node; msg_node->tlv_header.tlv_type = TLV_TYPE(IPU_MSG_NODE_TYPE_BASE); msg_node->tlv_header.tlv_len32 = TLV_SIZE(buf_size); return true; } static bool ipu7_fw_psys_build_link(const struct graph_link *link, void **buf_ptr_ptr) { struct ipu7_msg_link *msg_link = (struct ipu7_msg_link *)*buf_ptr_ptr; if (!link->ep_src.node_ctx_id && !link->ep_dst.node_ctx_id && !link->ep_src.term_id && !link->ep_dst.term_id) return false; msg_link->endpoints.ep_src.node_ctx_id = link->ep_src.node_ctx_id; msg_link->endpoints.ep_src.term_id = link->ep_src.term_id; msg_link->endpoints.ep_dst.node_ctx_id = link->ep_dst.node_ctx_id; msg_link->endpoints.ep_dst.term_id = link->ep_dst.term_id; msg_link->foreign_key = link->foreign_key; msg_link->streaming_mode = link->streaming_mode; msg_link->pbk_id = link->pbk_id; msg_link->pbk_slot_id = link->pbk_slot_id; msg_link->delayed_link = link->delayed_link; *buf_ptr_ptr += sizeof(*msg_link); msg_link->link_options.num_elems = 0; msg_link->link_options.head_offset = (u16)((uintptr_t)*buf_ptr_ptr - (uintptr_t)&msg_link->link_options); msg_link->tlv_header.tlv_type = TLV_TYPE(IPU_MSG_LINK_TYPE_GENERIC); msg_link->tlv_header.tlv_len32 = TLV_SIZE(sizeof(*msg_link)); return true; } int ipu7_fw_psys_graph_open(const struct ipu_psys_graph_info *graph, struct ipu7_psys *psys, struct ipu7_psys_stream *ip) { struct ipu7_syscom_context *ctx = psys->adev->syscom; void *buf_ptr; struct ipu7_msg_graph_open *graph_open; u32 buf_size = 0; bool ret = false; u8 i = 0; dev_dbg(&psys->dev, "send_token: fw psys graph open\n"); buf_ptr = ipu7_syscom_get_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); if (!buf_ptr) return -ENODATA; graph_open = (struct ipu7_msg_graph_open *)buf_ptr; memset(graph_open, 0, sizeof(*graph_open)); graph_open->graph_id = ip->graph_id; graph_open->graph_msg_map = (u8)(IPU_MSG_GRAPH_OPEN_SEND_RESP | IPU_MSG_GRAPH_OPEN_SEND_IRQ); buf_ptr += sizeof(*graph_open); graph_open->nodes.head_offset = (u16)((uintptr_t)buf_ptr - (uintptr_t)&graph_open->nodes); for (i = 0; i < ARRAY_SIZE(ip->nodes); i++) { ret = ipu7_fw_psys_build_node(&ip->nodes[i], &buf_ptr); if (ret) graph_open->nodes.num_elems++; } graph_open->links.head_offset = (u16)((uintptr_t)buf_ptr - (uintptr_t)&graph_open->links); for (i = 0; i < ARRAY_SIZE(graph->links); i++) { ret = ipu7_fw_psys_build_link(&graph->links[i], &buf_ptr); if (ret) graph_open->links.num_elems++; } buf_size = (u32)((uintptr_t)buf_ptr - (uintptr_t)graph_open); graph_open->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_GRAPH_OPEN); graph_open->header.tlv_header.tlv_len32 = TLV_SIZE(buf_size); graph_open->header.user_token = 0; ipu7_syscom_put_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); ipu_buttress_wakeup_ps_uc(psys->adev->isp); return 0; } int ipu7_fw_psys_graph_close(u8 graph_id, struct ipu7_psys *psys) { struct ipu7_syscom_context *ctx = psys->adev->syscom; struct ipu7_msg_graph_close *token; dev_dbg(&psys->dev, "send_token: fw psys graph close\n"); token = ipu7_syscom_get_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); if (!token) return -ENODATA; token->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_GRAPH_CLOSE); token->header.tlv_header.tlv_len32 = TLV_SIZE(sizeof(*token)); token->header.user_token = 0; token->graph_id = graph_id; token->graph_msg_map = (u8)(IPU_MSG_DEVICE_CLOSE_SEND_RESP | IPU_MSG_DEVICE_CLOSE_SEND_IRQ); ipu7_syscom_put_token(ctx, FWPS_MSG_ABI_IN_DEV_QUEUE_ID); ipu_buttress_wakeup_ps_uc(psys->adev->isp); return 0; } int ipu7_fw_psys_task_request(const struct ipu_psys_task_request *task, struct ipu7_psys_stream *ip, struct ipu_psys_task_queue *tq, struct ipu7_psys *psys) { struct ipu7_syscom_context *ctx = psys->adev->syscom; struct ipu7_msg_task *msg = tq->msg_task; struct ia_gofo_msg_indirect *ind; u32 node_q_id = ip->q_id[task->node_ctx_id]; u32 teb_hi, teb_lo; u64 teb; u8 i, term_id; u8 num_terms; ind = ipu7_syscom_get_token(ctx, node_q_id); if (!ind) return -ENODATA; memset(msg, 0, sizeof(*msg)); msg->graph_id = task->graph_id; msg->node_ctx_id = task->node_ctx_id; msg->profile_idx = 0U; /* Only one profile on HKR */ msg->frame_id = task->frame_id; msg->frag_id = 0U; /* No frag, set to 0 */ /* * Each task has a flag indicating if ack needed, it may be used to * reduce interrupts if multiple CBs supported. */ msg->req_done_msg = 1; msg->req_done_irq = 1; memcpy(msg->payload_reuse_bm, task->payload_reuse_bm, sizeof(task->payload_reuse_bm)); teb_hi = ip->nodes[msg->node_ctx_id].profiles[0].teb[1]; teb_lo = ip->nodes[msg->node_ctx_id].profiles[0].teb[0]; teb = (teb_lo | (((u64)teb_hi) << 32)); num_terms = ip->nodes[msg->node_ctx_id].num_terms; for (i = 0U; i < num_terms; i++) { term_id = tq->task_buffers[i].term_id; if ((1U << term_id) & teb) msg->term_buffers[term_id] = tq->ipu7_addr[i]; } msg->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_TASK_REQ); msg->header.tlv_header.tlv_len32 = TLV_SIZE(sizeof(*msg)); msg->header.user_token = (uintptr_t)tq; ind->header.tlv_header.tlv_type = TLV_TYPE(IPU_MSG_TYPE_INDIRECT); ind->header.tlv_header.tlv_len32 = TLV_SIZE(sizeof(*ind)); ind->header.msg_options.num_elems = 0; ind->header.msg_options.head_offset = 0; ind->ref_header = msg->header.tlv_header; ind->ref_msg_ptr = tq->task_dma_addr; ipu7_syscom_put_token(ctx, node_q_id); ipu_buttress_wakeup_ps_uc(psys->adev->isp); return 0; } int ipu7_fw_psys_event_handle(struct ipu7_psys *psys, u8 *buf_ptr) { struct ipu7_syscom_context *ctx = psys->adev->syscom; void *token; token = ipu7_syscom_get_token(ctx, FWPS_MSG_ABI_OUT_ACK_QUEUE_ID); if (!token) return -ENODATA; memcpy(buf_ptr, token, sizeof(u8) * FWPS_MSG_FW2HOST_MAX_SIZE); ipu7_syscom_put_token(ctx, FWPS_MSG_ABI_OUT_ACK_QUEUE_ID); return 0; } int ipu7_fw_psys_get_log(struct ipu7_psys *psys) { void *token; struct ia_gofo_msg_log *log_msg; u8 msg_type, msg_len; u32 count, fmt_id; struct device *dev = &psys->adev->auxdev.dev; struct psys_fw_log *fw_log = psys->fw_log; u32 log_size = sizeof(struct ia_gofo_msg_log_info_ts); token = ipu7_syscom_get_token(psys->adev->syscom, FWPS_MSG_ABI_OUT_LOG_QUEUE_ID); if (!token) return -ENODATA; while (token) { log_msg = (struct ia_gofo_msg_log *)token; msg_type = log_msg->header.tlv_header.tlv_type; msg_len = log_msg->header.tlv_header.tlv_len32; if (msg_type != IPU_MSG_TYPE_DEV_LOG || !msg_len) dev_warn(dev, "Invalid msg data from Log queue!\n"); count = log_msg->log_info_ts.log_info.log_counter; fmt_id = log_msg->log_info_ts.log_info.fmt_id; if (count > fw_log->count + 1) dev_warn(dev, "log msg lost, count %u+1 != %u!\n", count, fw_log->count); if (fmt_id == IA_GOFO_MSG_LOG_FMT_ID_INVALID) { dev_err(dev, "invalid log msg fmt_id 0x%x!\n", fmt_id); ipu7_syscom_put_token(psys->adev->syscom, FWPS_MSG_ABI_OUT_LOG_QUEUE_ID); return -EIO; } if (log_size + fw_log->head - fw_log->addr > FW_LOG_BUF_SIZE) fw_log->head = fw_log->addr; memcpy(fw_log->head, (void *)&log_msg->log_info_ts, sizeof(struct ia_gofo_msg_log_info_ts)); fw_log->count = count; fw_log->head += log_size; fw_log->size += log_size; ipu7_syscom_put_token(psys->adev->syscom, FWPS_MSG_ABI_OUT_LOG_QUEUE_ID); token = ipu7_syscom_get_token(psys->adev->syscom, FWPS_MSG_ABI_OUT_LOG_QUEUE_ID); }; return 0; } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/psys/ipu7-fw-psys.h000066400000000000000000000023401503071307200300110ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 - 2024 Intel Corporation */ #ifndef IPU7_FW_PSYS_H #define IPU7_FW_PSYS_H #include "abi/ipu7_fw_common_abi.h" #include "abi/ipu7_fw_msg_abi.h" #include "ipu7-syscom.h" #include #define IPU_PSYS_MAX_GRAPH_NUMS (8U) #define IPU_PSYS_NUM_STREAMS IPU_PSYS_MAX_GRAPH_NUMS struct ipu7_msg_to_str { const enum ipu7_msg_type type; const char *msg; }; struct ipu7_psys; struct ipu7_psys_stream; struct ipu_psys_task_queue; int ipu7_fw_psys_init(struct ipu7_psys *psys); void ipu7_fw_psys_release(struct ipu7_psys *psys); int ipu7_fw_psys_open(struct ipu7_psys *psys); void ipu7_fw_psys_close(struct ipu7_psys *psys); int ipu7_fw_psys_graph_open(const struct ipu_psys_graph_info *graph, struct ipu7_psys *psys, struct ipu7_psys_stream *ip); int ipu7_fw_psys_graph_close(u8 graph_id, struct ipu7_psys *psys); int ipu7_fw_psys_task_request(const struct ipu_psys_task_request *task, struct ipu7_psys_stream *ip, struct ipu_psys_task_queue *tq, struct ipu7_psys *psys); int ipu7_fw_psys_event_handle(struct ipu7_psys *psys, u8 *buf_ptr); int ipu7_fw_psys_get_log(struct ipu7_psys *psys); #endif /* IPU7_FW_PSYS_H */ ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/psys/ipu7-psys.c000066400000000000000000000255761503071307200274120ustar00rootroot00000000000000// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2020 - 2024 Intel Corporation #include #include #include #include #include #include #include #include #include #include #include #include #include "ipu7.h" #include "ipu7-bus.h" #include "ipu7-buttress-regs.h" #include "ipu7-psys.h" #include "ipu7-platform-regs.h" #include "ipu7-fw-psys.h" #define DOMAIN_POWE_TIMEOUT_US (200 * USEC_PER_MSEC) static const struct ipu7_msg_to_str ps_fw_msg[] = { {IPU_MSG_TYPE_RESERVED, "IPU_MSG_TYPE_RESERVED"}, {IPU_MSG_TYPE_INDIRECT, "IPU_MSG_TYPE_INDIRECT"}, {IPU_MSG_TYPE_DEV_LOG, "IPU_MSG_TYPE_DEV_LOG"}, {IPU_MSG_TYPE_GENERAL_ERR, "IPU_MSG_TYPE_GENERAL_ERR"}, {IPU_MSG_TYPE_DEV_OPEN, "IPU_MSG_TYPE_DEV_OPEN"}, {IPU_MSG_TYPE_DEV_OPEN_ACK, "IPU_MSG_TYPE_DEV_OPEN_ACK"}, {IPU_MSG_TYPE_GRAPH_OPEN, "IPU_MSG_TYPE_GRAPH_OPEN"}, {IPU_MSG_TYPE_GRAPH_OPEN_ACK, "IPU_MSG_TYPE_GRAPH_OPEN_ACK"}, {IPU_MSG_TYPE_TASK_REQ, "IPU_MSG_TYPE_TASK_REQ"}, {IPU_MSG_TYPE_TASK_DONE, "IPU_MSG_TYPE_TASK_DONE"}, {IPU_MSG_TYPE_GRAPH_CLOSE, "IPU_MSG_TYPE_GRAPH_CLOSE"}, {IPU_MSG_TYPE_GRAPH_CLOSE_ACK, "IPU_MSG_TYPE_GRAPH_CLOSE_ACK"}, {IPU_MSG_TYPE_DEV_CLOSE, "IPU_MSG_TYPE_DEV_CLOSE"}, {IPU_MSG_TYPE_DEV_CLOSE_ACK, "IPU_MSG_TYPE_DEV_CLOSE_ACK"}, {IPU_MSG_TYPE_N, "IPU_MSG_TYPE_N"}, }; void ipu7_psys_subdomains_power(struct ipu7_psys *psys, bool on) { struct device *dev = &psys->adev->auxdev.dev; struct ipu7_device *isp = psys->adev->isp; void __iomem *base = isp->base; u32 mask; u32 val; u32 req; int ret; /* power domain req */ mask = is_ipu8(isp->hw_ver) ? IPU8_PSYS_DOMAIN_POWER_MASK : IPU7_PSYS_DOMAIN_POWER_MASK; req = on ? mask : 0; val = readl(base + BUTTRESS_REG_PS_DOMAINS_STATUS); dev_dbg(dev, "power %s psys sub-domains. status: 0x%x\n", on ? "UP" : "DOWN", val); if ((val & mask) == req) { dev_warn(dev, "psys sub-domains power already in request state.\n"); return; } writel(req, base + BUTTRESS_REG_PS_WORKPOINT_DOMAIN_REQ); ret = readl_poll_timeout(base + BUTTRESS_REG_PS_DOMAINS_STATUS, val, !(val & IPU_PSYS_DOMAIN_POWER_IN_PROGRESS) && ((val & mask) == req), 100, DOMAIN_POWE_TIMEOUT_US); if (ret) dev_err(dev, "Psys sub-domains power %s timeout! status: 0x%x\n", on ? "UP" : "DOWN", val); } void ipu7_psys_setup_hw(struct ipu7_psys *psys) { void __iomem *base = psys->pdata->base; u32 val = IRQ_FROM_LOCAL_FW; /* Enable TO_SW IRQ from FW */ writel(val, base + IPU_REG_PSYS_TO_SW_IRQ_CNTL_CLEAR); writel(val, base + IPU_REG_PSYS_TO_SW_IRQ_CNTL_MASK); writel(val, base + IPU_REG_PSYS_TO_SW_IRQ_CNTL_ENABLE); /* correct the initial printf configuration */ writel(0x2, base + PS_UC_CTRL_BASE + PRINTF_AXI_CNTL); } static struct ipu7_psys_stream* ipu7_psys_get_ip_from_fh(struct ipu7_psys *psys, u8 graph_id) { struct ipu7_psys_fh *fh; list_for_each_entry(fh, &psys->fhs, list) { if (fh->ip->graph_id == graph_id) return fh->ip; } return NULL; } static void ipu7_psys_handle_graph_open_ack(struct ipu7_psys *psys, const void *buffer) { const struct ipu7_msg_graph_open_ack *ack_msg = (const struct ipu7_msg_graph_open_ack *)buffer; const struct ia_gofo_msg_header_ack *ack_header = &ack_msg->header; struct ipu7_psys_stream *ip; struct device *dev = &psys->dev; const struct ia_gofo_tlv_header *msg_tlv_item; u16 num_items; u16 head_offset; u32 i; dev_dbg(dev, "[ACK]%s: graph_id: %d\n", __func__, ack_msg->graph_id); if (ack_msg->graph_id > (u8)IPU_PSYS_MAX_GRAPH_NUMS) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return; } ip = ipu7_psys_get_ip_from_fh(psys, ack_msg->graph_id); if (!ip) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return; } if (ip->graph_state != IPU_MSG_GRAPH_STATE_OPEN_WAIT) { dev_err(dev, "%s state %d\n", __func__, ip->graph_state); goto open_graph_exit; } num_items = ack_header->header.msg_options.num_elems; if (!num_items) { dev_err(dev, "%s, num_items is 0\n", __func__); goto open_graph_exit; } head_offset = ack_header->header.msg_options.head_offset; msg_tlv_item = (const struct ia_gofo_tlv_header *) ((u8 *)&ack_header->header.msg_options + head_offset); if (!msg_tlv_item) { dev_err(dev, "%s: failed to get tlv item\n", __func__); goto open_graph_exit; } for (i = 0U; i < num_items; i++) { u32 option_type = msg_tlv_item->tlv_type; u32 option_bytes = msg_tlv_item->tlv_len32 * TLV_ITEM_ALIGNMENT; struct ipu7_msg_graph_open_ack_task_q_info *msg_option = (void *)msg_tlv_item; switch (option_type) { case IPU_MSG_GRAPH_ACK_TASK_Q_INFO: /* * Should do check that: * - Each managed node has a queue ID * - Queue ID's are sane */ dev_dbg(dev, "[ACK]set node_ctx_id %d q_id %d\n", msg_option->node_ctx_id, msg_option->q_id); ip->q_id[msg_option->node_ctx_id] = msg_option->q_id; break; default: /* * Only one option supported */ dev_err(dev, "not supported %u\n", option_type); break; } msg_tlv_item = (struct ia_gofo_tlv_header *) (((u8 *)msg_tlv_item) + option_bytes); } ip->graph_state = IPU_MSG_GRAPH_STATE_OPEN; open_graph_exit: complete(&ip->graph_open); } static int ipu7_psys_handle_task_done(struct ipu7_psys *psys, void *buffer, u32 error) { const struct ipu7_msg_task_done *ack_msg = (const struct ipu7_msg_task_done *)buffer; const struct ia_gofo_msg_header_ack *ack_header = &ack_msg->header; const struct ia_gofo_msg_header *msg_header = &ack_header->header; struct ipu_psys_task_queue *tq; struct device *dev = &psys->dev; struct ipu7_psys_stream *ip; struct ipu_psys_task_ack *event; if (ack_msg->graph_id > (u8)IPU_PSYS_MAX_GRAPH_NUMS) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return -EINVAL; } ip = ipu7_psys_get_ip_from_fh(psys, ack_msg->graph_id); if (!ip) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return -EINVAL; } tq = (void *)(uintptr_t)msg_header->user_token; if (!tq) { dev_err(dev, "%s: task_token is NULL\n", __func__); return -EINVAL; } mutex_lock(&ip->task_mutex); if (tq->task_state != IPU_MSG_TASK_STATE_WAIT_DONE) { dev_err(dev, "%s: graph %d node %d error %d\n", __func__, ack_msg->graph_id, ack_msg->node_ctx_id, tq->task_state); list_move_tail(&tq->list, &ip->tq_list); mutex_unlock(&ip->task_mutex); return -ENOENT; } tq->task_state = IPU_MSG_TASK_STATE_DONE; dev_dbg(dev, "%s: task_token(%p)\n", __func__, tq); list_move_tail(&tq->list, &ip->tq_list); mutex_unlock(&ip->task_mutex); mutex_lock(&ip->event_mutex); if (!list_empty(&ip->event_list)) { event = list_first_entry(&ip->event_list, struct ipu_psys_task_ack, list); event->graph_id = ack_msg->graph_id; event->node_ctx_id = ack_msg->node_ctx_id; event->frame_id = ack_msg->frame_id; event->err_code = error; list_move_tail(&event->list, &ip->ack_list); } else { dev_dbg(dev, "event queue is full, add new one\n"); event = kzalloc(sizeof(*event), GFP_KERNEL); if (event) { event->graph_id = ack_msg->graph_id; event->node_ctx_id = ack_msg->node_ctx_id; event->frame_id = ack_msg->frame_id; event->err_code = error; list_add_tail(&event->list, &ip->ack_list); } else { dev_err(dev, "failed to alloc event buf\n"); } } mutex_unlock(&ip->event_mutex); wake_up_interruptible(&ip->fh->wait); return 0; } static void ipu7_psys_handle_graph_close_ack(struct ipu7_psys *psys, void *buffer) { struct ipu7_msg_graph_close_ack *ack_msg = (struct ipu7_msg_graph_close_ack *)buffer; struct device *dev = &psys->dev; struct ipu7_psys_stream *ip; dev_dbg(dev, "[ACK]%s:graph_id: %d\n", __func__, ack_msg->graph_id); if (ack_msg->graph_id > (u8)IPU_PSYS_MAX_GRAPH_NUMS) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return; } ip = ipu7_psys_get_ip_from_fh(psys, ack_msg->graph_id); if (!ip) { dev_err(dev, "%s: graph id %d\n", __func__, ack_msg->graph_id); return; } if (ip->graph_state != IPU_MSG_GRAPH_STATE_CLOSE_WAIT) { dev_err(dev, "%s state %d\n", __func__, ip->graph_state); goto graph_close_exit; } ip->graph_state = IPU_MSG_GRAPH_STATE_CLOSED; graph_close_exit: complete(&ip->graph_close); } void ipu7_psys_handle_events(struct ipu7_psys *psys) { const struct ia_gofo_msg_header_ack *ack_header; u8 buffer[FWPS_MSG_FW2HOST_MAX_SIZE]; struct device *dev = &psys->dev; u32 error = 0; int ret = 0; do { #ifdef ENABLE_FW_OFFLINE_LOGGER ipu7_fw_psys_get_log(psys); #endif ret = ipu7_fw_psys_event_handle(psys, buffer); if (ret) break; ack_header = (const struct ia_gofo_msg_header_ack *)buffer; dev_dbg(dev, "[ACK]%s: ack msg %s received\n", __func__, ps_fw_msg[ack_header->header.tlv_header.tlv_type].msg); if (!IA_GOFO_MSG_ERR_IS_OK(ack_header->err)) { dev_err(dev, "group %d, code %d, detail: %d, %d\n", ack_header->err.err_group, ack_header->err.err_code, ack_header->err.err_detail[0], ack_header->err.err_detail[1]); error = (ack_header->err.err_group == IPU_MSG_ERR_GROUP_TASK) ? IPU_PSYS_EVT_ERROR_FRAME : IPU_PSYS_EVT_ERROR_INTERNAL; } switch (ack_header->header.tlv_header.tlv_type) { case IPU_MSG_TYPE_GRAPH_OPEN_ACK: ipu7_psys_handle_graph_open_ack(psys, buffer); break; case IPU_MSG_TYPE_TASK_DONE: ipu7_psys_handle_task_done(psys, buffer, error); break; case IPU_MSG_TYPE_GRAPH_CLOSE_ACK: ipu7_psys_handle_graph_close_ack(psys, buffer); break; case IPU_MSG_TYPE_GENERAL_ERR: /* already printed the log above for general error */ break; default: dev_err(&psys->dev, "Unknown type %d\n", ack_header->header.tlv_header.tlv_type); } } while (1); } static int ipu7_psys_get_event(struct ipu7_psys_stream *ip, struct ipu_psys_event *event) { struct ipu_psys_task_ack *ack; mutex_lock(&ip->event_mutex); /* Check if there is already an event in the list */ if (list_empty(&ip->ack_list)) { mutex_unlock(&ip->event_mutex); return -EAGAIN; } ack = list_first_entry(&ip->ack_list, struct ipu_psys_task_ack, list); event->graph_id = ack->graph_id; event->node_ctx_id = ack->node_ctx_id; event->frame_id = ack->frame_id; event->error = ack->err_code; list_move_tail(&ack->list, &ip->event_list); mutex_unlock(&ip->event_mutex); dev_dbg(&ip->fh->psys->dev, "event graph %d cb %d frame %d dequeued", event->graph_id, event->node_ctx_id, event->frame_id); return 0; } long ipu7_ioctl_dqevent(struct ipu_psys_event *event, struct ipu7_psys_fh *fh, unsigned int f_flags) { struct ipu7_psys *psys = fh->psys; int ret = 0; dev_dbg(&psys->adev->auxdev.dev, "IOC_DQEVENT\n"); if (!(f_flags & O_NONBLOCK)) { ret = wait_event_interruptible(fh->wait, !ipu7_psys_get_event(fh->ip, event)); if (ret == -ERESTARTSYS) return ret; } else { ret = ipu7_psys_get_event(fh->ip, event); } return ret; } ipu7-drivers-0~git202507010803.e6f86fa7/drivers/media/pci/intel/ipu7/psys/ipu7-psys.h000066400000000000000000000107641503071307200274100ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) 2013 - 2024 Intel Corporation */ #ifndef IPU7_PSYS_H #define IPU7_PSYS_H #include #include #include #include "ipu7.h" #include "ipu7-fw-psys.h" #define IPU_PSYS_WORK_QUEUE system_power_efficient_wq #define IPU_PSYS_CMD_QUEUE_SIZE 0x20 #define IPU_PSYS_TASK_QUEUE_SIZE 0x20 #define IPU_PSYS_ACK_QUEUE_SIZE 0x40 #define IPU_PSYS_LOG_QUEUE_SIZE 256 #define IPU_PSYS_OUT_MSG_SIZE 256 /** * Each event from FW will be first queued into a * event queue, define the queue depth here */ #define TASK_EVENT_QUEUE_SIZE 3U /** * Each task queue from user will be first queued into * a task queue, define the queue depth here */ #define TASK_REQUEST_QUEUE_SIZE 8U #define INVALID_STREAM_ID 0xFF /** * Task request queues per stream * * Each task will first assigned a task queue buffer here, * all the nodes will share the same task queue, maximum * queue will be full there. */ struct ipu_psys_task_queue { struct ipu_psys_term_buffers task_buffers[MAX_GRAPH_TERMINALS]; dma_addr_t ipu7_addr[MAX_GRAPH_TERMINALS]; struct ipu7_msg_task *msg_task; dma_addr_t task_dma_addr; struct list_head list; /* task state of each task input, represent ipu7_msg_task_state */ enum ipu7_msg_task_state task_state; }; struct psys_fw_log { struct mutex mutex; /* protect whole struct */ void *head; void *addr; u32 count; /* running counter of log */ u32 size; /* actual size of log content, in bits */ }; /** * Task quest event context * * Each task request should get its event ack from FW and save * to this structure and for user dequeue purpose. */ struct ipu_psys_task_ack { u8 graph_id; /* graph id of the task request */ u8 node_ctx_id; /* logical node id */ u8 frame_id; /* frame id of the original task request */ struct list_head list; u32 err_code; /* error indication to user */ }; /** * stream here is equal to pipe, each stream has * its dedicated graph_id, and task request queue. * * For multiple stream supported design. */ struct ipu7_psys_stream { struct ipu7_psys_fh *fh; u8 graph_id; /* graph_id on this stream */ /* Handle events from FW */ struct mutex event_mutex; struct list_head event_list; /* Reserved event list */ struct list_head ack_list; /* Received ack from FW */ /* Serialize task queue */ struct mutex task_mutex; struct list_head tq_list; /* Reserved task queue list */ struct list_head tq_running_list; /* Running task sent to FW */ u8 num_nodes; /* Number of enabled nodes */ struct graph_node nodes[MAX_GRAPH_NODES]; u8 q_id[MAX_GRAPH_NODES]; /* syscom input queue id assigned by fw */ struct completion graph_open; struct completion graph_close; /* Graph state, represent enum ipu7_msg_graph_state */ enum ipu7_msg_graph_state graph_state; }; struct task_struct; struct ipu7_psys { struct cdev cdev; struct device dev; struct mutex mutex; /* Psys various */ int ready; /* psys fw status */ spinlock_t ready_lock; /* protect psys firmware state */ struct list_head fhs; struct ipu7_psys_pdata *pdata; struct ipu7_bus_device *adev; #ifdef CONFIG_DEBUG_FS struct dentry *debugfsdir; #endif unsigned long timeout; struct psys_fw_log *fw_log; /* available graph_id range is 0 ~ IPU_PSYS_NUM_STREAMS - 1 */ u8 graph_id[IPU_PSYS_NUM_STREAMS]; /* Device state, represent enum ipu7_msg_dev_state */ enum ipu7_msg_dev_state dev_state; struct ipu7_psys_config *subsys_config; dma_addr_t subsys_config_dma_addr; }; struct ipu7_psys_fh { struct ipu7_psys *psys; struct mutex mutex; /* Protects bufmap & kcmds fields */ struct list_head list; struct list_head bufmap; wait_queue_head_t wait; struct ipu7_psys_stream *ip; }; struct ipu7_dma_buf_attach { struct device *dev; u64 len; uintptr_t *userptr; struct sg_table *sgt; struct page **pages; size_t npages; }; struct ipu7_psys_kbuffer { u64 len; uintptr_t *userptr; u32 flags; int fd; void *kaddr; struct list_head list; dma_addr_t dma_addr; struct sg_table *sgt; struct dma_buf_attachment *db_attach; struct dma_buf *dbuf; bool valid; /* True when buffer is usable */ }; #define inode_to_ipu_psys(inode) \ container_of((inode)->i_cdev, struct ipu7_psys, cdev) void ipu7_psys_setup_hw(struct ipu7_psys *psys); void ipu7_psys_subdomains_power(struct ipu7_psys *psys, bool on); void ipu7_psys_handle_events(struct ipu7_psys *psys); long ipu7_ioctl_dqevent(struct ipu_psys_event *event, struct ipu7_psys_fh *fh, unsigned int f_flags); #endif /* IPU7_PSYS_H */ ipu7-drivers-0~git202507010803.e6f86fa7/include/000077500000000000000000000000001503071307200204655ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/include/uapi/000077500000000000000000000000001503071307200214235ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/include/uapi/linux/000077500000000000000000000000001503071307200225625ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/include/uapi/linux/ipu7-psys.h000066400000000000000000000157121503071307200246210ustar00rootroot00000000000000/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (C) 2013 - 2023 Intel Corporation */ #ifndef _UAPI_IPU_PSYS_H #define _UAPI_IPU_PSYS_H #ifdef __KERNEL__ #include #else #include #endif struct ipu_psys_capability { uint32_t version; uint8_t driver[20]; uint8_t dev_model[32]; uint32_t reserved[17]; } __attribute__ ((packed)); /** * PSYS event error to user */ enum ipu_psys_event_error { IPU_PSYS_EVT_ERROR_NONE = 0U, IPU_PSYS_EVT_ERROR_INTERNAL = 1U, IPU_PSYS_EVT_ERROR_FRAME = 2U, IPU_PSYS_EVT_ERROR_FORCE_CLOSED = 3U, IPU_PSYS_EVT_ERROR_MAX } __attribute__ ((packed)); /** * struct ipu_psys_event - event back from driver to user for requested tasks * @graph_id: unique id per graph * @node_ctx_id: unique logical context id per cb * @frame_id: unique id per frame, originally assigned by user * @error: error code of ipu_psys_event_error type */ struct ipu_psys_event { uint8_t graph_id; uint8_t node_ctx_id; uint8_t frame_id; uint32_t error; int32_t reserved[2]; } __attribute__ ((packed)); /** * struct ipu_psys_buffer - for input/output terminals * @len: total allocated size @ base address * @userptr: user pointer * @fd: DMA-BUF handle * @data_offset:offset to valid data * @bytes_used: amount of valid data including offset * @flags: flags */ struct ipu_psys_buffer { uint64_t len; union { int fd; void __user *userptr; uint64_t reserved; } base; uint32_t data_offset; uint32_t bytes_used; uint32_t flags; uint32_t reserved[2]; } __attribute__ ((packed)); /**< Max number of logical node */ #define MAX_GRAPH_NODES 5U /**< Max number of profile */ #define MAX_GRAPH_NODE_PROFILES 1U #define MAX_GRAPH_LINKS 10U #define MAX_GRAPH_TERMINALS 32U /** * Settings per node on the bitmap * @teb: Terminal Enable bitmap * @deb: Device Enable bitmap * @rbm: Routing bitmap * @reb: Routing Enable bitmap */ struct node_profile { uint32_t teb[2]; uint32_t deb[4]; uint32_t rbm[4]; uint32_t reb[4]; } __attribute__ ((packed)); /** * struct node_ternimal - terminal description on the node * * Terminal is the logical connection entity that is in the node, * it can be different types, one node could have multiple terminal. * * @term_id: id of the terminal * @buf_size: payload(PAC or SDI) size of the certain terminal */ struct node_ternimal { uint8_t term_id; uint32_t buf_size; } __attribute__ ((packed)); /** * struct graph_node - Description of graph that will be used for device * and graph open purpose * * Node is the logical entity of a graph, one graph could have multiple * nodes and it could have connection between each node with terminal. * * @node_rsrc_id: Physical node id * @node_ctx_id: Logical node id, unique per graph * @num_terms: Number of enabled terms in the node * @profiles: bitmap settings on the node * @terminals: terminal info on the node * @num_frags: Number of fragments */ struct graph_node { uint8_t node_rsrc_id; uint8_t node_ctx_id; uint8_t num_terms; struct node_profile profiles[MAX_GRAPH_NODE_PROFILES]; struct node_ternimal terminals[MAX_GRAPH_TERMINALS]; } __attribute__ ((packed)); /** * struct graph_link_ep - link endpoint description * * Link endpoint is used to describe the connection between different nodes. * * @node_ctx_id: Node ID as described in the list of nodes in the fgraph * @term_id: Term ID as described in the list of terms in the fgraph */ struct graph_link_ep { uint8_t node_ctx_id; uint8_t term_id; } __attribute__ ((packed)); /** * All local links (links between nodes within a subsystem) require this * value to be set. */ #define IPU_PSYS_FOREIGN_KEY_NONE UINT16_MAX /** None value of TOP pbk id if not used */ #define IPU_PSYS_LINK_PBK_ID_NONE UINT8_MAX /** None value of TOP pbk slot id if not used */ #define IPU_PSYS_LINK_PBK_SLOT_ID_NONE UINT8_MAX /** Static Offline */ #define IPU_PSYS_LINK_STREAMING_MODE_SOFF 0U /** * struct graph_link - graph link to connect between cbs * * The sink and source links are defined with terminal information. * * @ep_src: Source side of the link * @ep_dst: Destination side of the link * @foreign_key: MUST set to IPU_PSYS_FOREIGN_KEY_NONE * @streaming_mode: Value should be set from IPU_PSYS_LINK_STREAMING_MODE_X * @pbk_id: TOP PBK id that used to connected to external IP * @pbk_slot_id: TOP PBK slot id that used to connected to external IP * @delayed_link: A delay link between producer N and consumer N+1 frame */ struct graph_link { struct graph_link_ep ep_src; struct graph_link_ep ep_dst; uint16_t foreign_key; uint8_t streaming_mode; uint8_t pbk_id; uint8_t pbk_slot_id; uint8_t delayed_link; } __attribute__ ((packed)); /** * struct ipu_psys_graph_info * * Topology that describes an IPU internal connection includes CB and terminal * information. * * @graph_id: id of graph, set initial to 0xFF by user, returned by driver * @num_nodes: number of nodes in graph * @nodes: node entity * @links: link entity */ struct ipu_psys_graph_info { uint8_t graph_id; uint8_t num_nodes; struct graph_node __user *nodes; struct graph_link links[MAX_GRAPH_LINKS]; } __attribute__ ((packed)); /** * struct ipu_psys_term_buffers * * Descprion of each terminal payload buffer * * @term_id: terminal id * @term_buf: terminal buffer */ struct ipu_psys_term_buffers { uint8_t term_id; struct ipu_psys_buffer term_buf; } __attribute__ ((packed)); /** * struct ipu_psys_task_request * * Task request is for user to send a request associated with terminal * payload and expect IPU to process, each task request would expect * an event, @see ipu_psys_event * * @graph_id: graph id returned from graph open * @node_ctx_id: unique logical context id per cb * @frame_id: frame id * @payload_reuse_bm: Any terminal marked here must be enabled * @term_buf_count: the number of terminal buffers * @task_buffers: terminal buffers on the task request * @num_frags: the number of fragments * @frag_buffers: the buffer information of fragments */ struct ipu_psys_task_request { uint8_t graph_id; uint8_t node_ctx_id; uint8_t frame_id; uint32_t payload_reuse_bm[2]; uint8_t term_buf_count; struct ipu_psys_term_buffers __user *task_buffers; } __attribute__ ((packed)); #define IPU_BUFFER_FLAG_INPUT (1 << 0) #define IPU_BUFFER_FLAG_OUTPUT (1 << 1) #define IPU_BUFFER_FLAG_MAPPED (1 << 2) #define IPU_BUFFER_FLAG_NO_FLUSH (1 << 3) #define IPU_BUFFER_FLAG_DMA_HANDLE (1 << 4) #define IPU_BUFFER_FLAG_USERPTR (1 << 5) #define IPU_IOC_QUERYCAP _IOR('A', 1, struct ipu_psys_capability) #define IPU_IOC_MAPBUF _IOWR('A', 2, int) #define IPU_IOC_UNMAPBUF _IOWR('A', 3, int) #define IPU_IOC_GETBUF _IOWR('A', 4, struct ipu_psys_buffer) #define IPU_IOC_PUTBUF _IOWR('A', 5, struct ipu_psys_buffer) #define IPU_IOC_DQEVENT _IOWR('A', 6, struct ipu_psys_event) #define IPU_IOC_GRAPH_OPEN _IOWR('A', 7, struct ipu_psys_graph_info) #define IPU_IOC_TASK_REQUEST _IOWR('A', 8, struct ipu_psys_task_request) #define IPU_IOC_GRAPH_CLOSE _IOWR('A', 9, int) #endif /* _UAPI_IPU_PSYS_H */ ipu7-drivers-0~git202507010803.e6f86fa7/patch/000077500000000000000000000000001503071307200201415ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.10-v6.12/000077500000000000000000000000001503071307200214655ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.10-v6.12/in-tree-build/000077500000000000000000000000001503071307200241255ustar00rootroot000000000000000001-media-pci-intel-Add-IPU7-Kconfig-Makefile.patch000066400000000000000000000021521503071307200347070ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.10-v6.12/in-tree-buildFrom 60309890288bd0cc6b179eacdcbffce83f50cd1e Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Tue, 2 Jul 2024 17:26:08 +0800 Subject: [PATCH] media: pci: intel: Add IPU7 Kconfig & Makefile --- drivers/media/pci/intel/Kconfig | 1 + drivers/media/pci/intel/Makefile | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig index d9fcddce028b..948cda08fff5 100644 --- a/drivers/media/pci/intel/Kconfig +++ b/drivers/media/pci/intel/Kconfig @@ -2,6 +2,7 @@ source "drivers/media/pci/intel/ipu3/Kconfig" source "drivers/media/pci/intel/ipu6/Kconfig" +source "drivers/media/pci/intel/ipu7/Kconfig" source "drivers/media/pci/intel/ivsc/Kconfig" config IPU_BRIDGE diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile index 3a2cc6567159..ff0fea13422d 100644 --- a/drivers/media/pci/intel/Makefile +++ b/drivers/media/pci/intel/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_IPU_BRIDGE) += ipu-bridge.o obj-y += ipu3/ obj-y += ivsc/ obj-$(CONFIG_VIDEO_INTEL_IPU6) += ipu6/ +obj-$(CONFIG_VIDEO_INTEL_IPU7) += ipu7/ -- 2.43.0 ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.14_iot/000077500000000000000000000000001503071307200215725ustar00rootroot000000000000000001-media-pci-set-snoop-1-to-fix-cache-line-issue.patch000066400000000000000000000020141503071307200333420ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.14_iotFrom 3e017d911c8ef8b12dc523c3fc73367d9f9d20b9 Mon Sep 17 00:00:00 2001 From: linya14x Date: Thu, 20 Mar 2025 15:39:16 +0800 Subject: [PATCH] media: pci: set snoop = 1 to fix cache line issue Signed-off-by: linya14x Signed-off-by: zouxiaoh --- drivers/media/pci/intel/ipu7/ipu7-isys-video.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/pci/intel/ipu7/ipu7-isys-video.c b/drivers/media/pci/intel/ipu7/ipu7-isys-video.c index e7507f533c1e..541a55bcb8f3 100644 --- a/drivers/media/pci/intel/ipu7/ipu7-isys-video.c +++ b/drivers/media/pci/intel/ipu7/ipu7-isys-video.c @@ -452,7 +452,7 @@ static int ipu7_isys_fw_pin_cfg(struct ipu7_isys_video *av, output_pin->link.dest = IPU_INSYS_OUTPUT_LINK_DEST_MEM; output_pin->link.use_sw_managed = 1; /* TODO: set the snoop bit for metadata capture */ - output_pin->link.is_snoop = 0; + output_pin->link.is_snoop = 1; /* output pin crop */ output_pin->crop.line_top = 0; -- 2.34.1 0002-media-i2c-enable-lt6911gxd-in-ipu-bridge.patch000066400000000000000000000016771503071307200321760ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.14_iotFrom 79e1b644221c92bc08d415725f0d23ba1a3d77f2 Mon Sep 17 00:00:00 2001 From: linya14x Date: Wed, 11 Dec 2024 17:04:17 +0800 Subject: [PATCH] media: i2c: enable lt6911gxd in ipu-bridge Signed-off-by: linya14x Signed-off-by: zouxiaoh --- drivers/media/pci/intel/ipu-bridge.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index 1cb745855600..da6683efbbd8 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -84,6 +84,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("OVTI2680", 1, 331200000), /* Omnivision OV8856 */ IPU_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000), + /* Lontium lt6911gxd */ + IPU_SENSOR_CONFIG("INTC10C5", 0), }; static const struct ipu_property_names prop_names = { -- 2.34.1 0003-media-i2c-add-support-for-lt6911gxd.patch000066400000000000000000000030371503071307200313360ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.14_iotFrom b44192f0e7db68a65553dfe9a326edb8e180a461 Mon Sep 17 00:00:00 2001 From: linya14x Date: Wed, 11 Dec 2024 17:11:18 +0800 Subject: [PATCH] media: i2c: add support for lt6911gxd Signed-off-by: linya14x Signed-off-by: zouxiaoh --- drivers/media/i2c/Kconfig | 12 ++++++++++++ drivers/media/i2c/Makefile | 1 + 2 files changed, 13 insertions(+) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 8ba096b8ebca..2a32ea1d5571 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -264,6 +264,18 @@ config VIDEO_IMX415 To compile this driver as a module, choose M here: the module will be called imx415. +config VIDEO_LT6911GXD + tristate "Lontium LT6911GXD decoder" + depends on ACPI || COMPILE_TEST + select V4L2_CCI_I2C + help + This is a Video4Linux2 sensor-level driver for the Lontium + LT6911GXD HDMI to MIPI CSI-2 bridge. + + To compile this driver as a module, choose M here: the + module will be called lt6911gxd. + + config VIDEO_MAX9271_LIB tristate diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index fbb988bd067a..acf75bc8c3b1 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -157,3 +157,4 @@ obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o obj-$(CONFIG_VIDEO_WM8739) += wm8739.o obj-$(CONFIG_VIDEO_WM8775) += wm8775.o +obj-$(CONFIG_VIDEO_LT6911GXD) += lt6911gxd.o -- 2.34.1 ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.14_iot/0004-INT3472-Support-LT6911GXD.patch000066400000000000000000000053561503071307200270320ustar00rootroot00000000000000From 59e8640ed50ed3fce6eb98aacf26b487912969a2 Mon Sep 17 00:00:00 2001 From: linya14x Date: Wed, 11 Dec 2024 16:14:42 +0800 Subject: [PATCH] INT3472: Support LT6911GXD Signed-off-by: linya14x Signed-off-by: zouxiaoh Signed-off-by: hepengpx --- drivers/platform/x86/intel/int3472/common.h | 2 ++ drivers/platform/x86/intel/int3472/discrete.c | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h index 145dec66df64..1d667e1187d2 100644 --- a/drivers/platform/x86/intel/int3472/common.h +++ b/drivers/platform/x86/intel/int3472/common.h @@ -22,6 +22,8 @@ #define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b #define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c #define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d +#define INT3472_GPIO_TYPE_READY_STAT 0x13 +#define INT3472_GPIO_TYPE_HDMI_DETECT 0x14 #define INT3472_PDEV_MAX_NAME_LEN 23 #define INT3472_MAX_SENSOR_GPIOS 3 diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index 092252eb95a8..4e2fbf43c2cf 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -190,6 +190,14 @@ static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type, *func = "power-enable"; *gpio_flags = GPIO_ACTIVE_HIGH; break; + case INT3472_GPIO_TYPE_READY_STAT: + *func = "readystat"; + *gpio_flags = GPIO_LOOKUP_FLAGS_DEFAULT; + break; + case INT3472_GPIO_TYPE_HDMI_DETECT: + *func = "hdmidetect"; + *gpio_flags = GPIO_LOOKUP_FLAGS_DEFAULT; + break; default: *func = "unknown"; *gpio_flags = GPIO_ACTIVE_HIGH; @@ -281,9 +289,15 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, switch (type) { case INT3472_GPIO_TYPE_RESET: case INT3472_GPIO_TYPE_POWERDOWN: + case INT3472_GPIO_TYPE_READY_STAT: + case INT3472_GPIO_TYPE_HDMI_DETECT: ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, gpio_flags); - if (ret) + if (ret) { err_msg = "Failed to map GPIO pin to sensor\n"; + dev_warn(int3472->dev, + "Failed to map GPIO pin to sensor, type %02x, func %s, gpio_flags %u\n", + type, func, gpio_flags); + } break; case INT3472_GPIO_TYPE_CLK_ENABLE: @@ -391,7 +405,7 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev) return ret; } - if (cldb.control_logic_type != 1) { + if (cldb.control_logic_type != 1 && cldb.control_logic_type != 5) { dev_err(&pdev->dev, "Unsupported control logic type %u\n", cldb.control_logic_type); return -EINVAL; -- 2.34.1 ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.14_iot/0005-ipu-bridge-add-CPHY-support.patch000066400000000000000000000071311503071307200302270ustar00rootroot00000000000000From 9ce464a2bea2260869bc18f66b60887b930cd16b Mon Sep 17 00:00:00 2001 From: Chen Meng J Date: Wed, 20 Nov 2024 17:56:07 +0800 Subject: [PATCH] ipu-bridge: add CPHY support get DPHY or CPHY mode when parse ssdb Signed-off-by: Chen Meng J Signed-off-by: zouxiaoh --- drivers/media/pci/intel/ipu-bridge.c | 23 ++++++++++++++++++++++- include/media/ipu-bridge.h | 7 +++++-- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index da6683efbbd8..fe5a838adea7 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -35,6 +35,9 @@ */ #define IVSC_DEV_NAME "intel_vsc" +#define PHY_MODE_DPHY 0 +#define PHY_MODE_CPHY 1 + /* * Extend this array with ACPI Hardware IDs of devices known to be working * plus the number of link-frequencies expected by their drivers, along with @@ -306,6 +309,7 @@ int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor) sensor->link = ssdb.link; sensor->lanes = ssdb.lanes; + sensor->phyconfig = ssdb.phyconfig; sensor->mclkspeed = ssdb.mclkspeed; sensor->rotation = ipu_bridge_parse_rotation(adev, &ssdb); sensor->orientation = ipu_bridge_parse_orientation(adev); @@ -324,6 +328,7 @@ static void ipu_bridge_create_fwnode_properties( { struct ipu_property_names *names = &sensor->prop_names; struct software_node *nodes = sensor->swnodes; + u8 bus_type; sensor->prop_names = prop_names; @@ -381,9 +386,16 @@ static void ipu_bridge_create_fwnode_properties( PROPERTY_ENTRY_REF_ARRAY("lens-focus", sensor->vcm_ref); } + if (sensor->phyconfig == PHY_MODE_DPHY) + bus_type = V4L2_FWNODE_BUS_TYPE_CSI2_DPHY; + else if (sensor->phyconfig == PHY_MODE_CPHY) + bus_type = V4L2_FWNODE_BUS_TYPE_CSI2_CPHY; + else + bus_type = V4L2_FWNODE_BUS_TYPE_GUESS; + sensor->ep_properties[0] = PROPERTY_ENTRY_U32( sensor->prop_names.bus_type, - V4L2_FWNODE_BUS_TYPE_CSI2_DPHY); + bus_type); sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN( sensor->prop_names.data_lanes, bridge->data_lanes, sensor->lanes); @@ -403,6 +415,15 @@ static void ipu_bridge_create_fwnode_properties( sensor->ipu_properties[1] = PROPERTY_ENTRY_REF_ARRAY( sensor->prop_names.remote_endpoint, sensor->remote_ref); + + /* + * TODO: Remove the bus_type property for IPU + * 1. keep fwnode property list no change. + * 2. IPU driver needs to get bus_type from remote sensor ep. + */ + sensor->ipu_properties[2] = PROPERTY_ENTRY_U32 + (sensor->prop_names.bus_type, + bus_type); } static void ipu_bridge_init_swnode_names(struct ipu_sensor *sensor) diff --git a/include/media/ipu-bridge.h b/include/media/ipu-bridge.h index 16fac765456e..f8642d09968d 100644 --- a/include/media/ipu-bridge.h +++ b/include/media/ipu-bridge.h @@ -91,7 +91,9 @@ struct ipu_sensor_ssdb { u8 controllogicid; u8 reserved1[3]; u8 mclkport; - u8 reserved2[13]; + u8 reserved2[5]; + u8 phyconfig; + u8 reserved3[7]; } __packed; struct ipu_property_names { @@ -139,11 +141,12 @@ struct ipu_sensor { u32 rotation; enum v4l2_fwnode_orientation orientation; const char *vcm_type; + u8 phyconfig; struct ipu_property_names prop_names; struct property_entry ep_properties[5]; struct property_entry dev_properties[5]; - struct property_entry ipu_properties[3]; + struct property_entry ipu_properties[4]; struct property_entry ivsc_properties[1]; struct property_entry ivsc_sensor_ep_properties[4]; struct property_entry ivsc_ipu_ep_properties[4]; -- 2.34.1 0006-media-ipu-bridge-change-LT6911GXD-ACPI-ID-to-INTC112.patch000066400000000000000000000020341503071307200332510ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.14_iotFrom 2181ccbfec15e6ba9c0111439fe562790d1d5c5f Mon Sep 17 00:00:00 2001 From: linya14x Date: Fri, 21 Feb 2025 15:34:42 +0800 Subject: [PATCH] media: ipu-bridge: change LT6911GXD ACPI ID to INTC1124 Signed-off-by: linya14x Signed-off-by: zouxiaoh --- drivers/media/pci/intel/ipu-bridge.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index fe5a838adea7..92a397f69290 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -87,8 +87,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("OVTI2680", 1, 331200000), /* Omnivision OV8856 */ IPU_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000), - /* Lontium lt6911gxd */ - IPU_SENSOR_CONFIG("INTC10C5", 0), + /* Lontium LT6911GXD */ + IPU_SENSOR_CONFIG("INTC1124", 0), }; static const struct ipu_property_names prop_names = { -- 2.34.1 0007-platform-x86-fix-ipu7-allyes-build-error-for-int3472.patch000066400000000000000000000021201503071307200344370ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.14_iotFrom 3040527f97fde27419d2f24679212bd2b967f5b5 Mon Sep 17 00:00:00 2001 From: linya14x Date: Fri, 7 Mar 2025 10:52:08 +0800 Subject: [PATCH] platform: x86: fix ipu7 allyes build error for int3472 Change print gpio_flags from %u to %lu. Signed-off-by: linya14x Signed-off-by: zouxiaoh --- drivers/platform/x86/intel/int3472/discrete.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index 4e2fbf43c2cf..2dba2ee66e1f 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -295,7 +295,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares, if (ret) { err_msg = "Failed to map GPIO pin to sensor\n"; dev_warn(int3472->dev, - "Failed to map GPIO pin to sensor, type %02x, func %s, gpio_flags %u\n", + "Failed to map GPIO pin to sensor, type %02x, func %s, gpio_flags %lu\n", type, func, gpio_flags); } -- 2.34.1 0008-media-i2c-add-driver-for-LT6911GXD.patch000066400000000000000000000462151503071307200306670ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.14_iotFrom cdd2762665019f68c660d9e21f11a10d78d4148c Mon Sep 17 00:00:00 2001 From: zouxiaoh Date: Thu, 3 Apr 2025 10:49:57 +0800 Subject: [PATCH] media: i2c: add driver for LT6911GXD Signed-off-by: zouxiaoh --- drivers/media/i2c/lt6911gxd.c | 688 ++++++++++++++++++++++++++++++++++ 1 file changed, 688 insertions(+) create mode 100644 drivers/media/i2c/lt6911gxd.c diff --git a/drivers/media/i2c/lt6911gxd.c b/drivers/media/i2c/lt6911gxd.c new file mode 100644 index 000000000000..5bea5faa46a5 --- /dev/null +++ b/drivers/media/i2c/lt6911gxd.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2023 - 2025 Intel Corporation. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define POLLING_MODE + +#define LT6911GXD_CHIP_ID 0x2204 +#define REG_CHIP_ID CCI_REG16(0xe100) + +#define REG_ENABLE_I2C CCI_REG8(0xe0ee) + +#define REG_PIX_CLK CCI_REG24(0xe085) +#define REG_BYTE_CLK CCI_REG24(0xe092) +#define REG_H_TOTAL CCI_REG16(0xe088) +#define REG_V_TOTAL CCI_REG16(0xe08a) +#define REG_HALF_H_ACTIVE CCI_REG16(0xe08c) +#define REG_V_ACTIVE CCI_REG16(0xe08e) +#define REG_MIPI_FORMAT CCI_REG8(0xf988) +#define REG_MIPI_TX_CTRL CCI_REG8(0xe0b0) + +/* Interrupts */ +#define REG_INT_HDMI CCI_REG8(0xe084) +#define INT_VIDEO_DISAPPEAR 0x0 +#define INT_VIDEO_READY 0x1 + +#define LT6911GXD_DEFAULT_WIDTH 3840 +#define LT6911GXD_DEFAULT_HEIGHT 2160 +#define LT6911GXD_DEFAULT_LANES 2 +#define LT6911GXD_DEFAULT_FPS 30 +#define LT6911GXD_MAX_LINK_FREQ 297000000 +#define LT6911GXD_PAGE_CONTROL 0xff +#define BPP_MODE_BIT 4 +#define YUV16_BIT 2 + +static const struct regmap_range_cfg lt6911gxd_ranges[] = { + { + .name = "register_range", + .range_min = 0, + .range_max = 0xffff, + .selector_reg = LT6911GXD_PAGE_CONTROL, + .selector_mask = 0xff, + .selector_shift = 0, + .window_start = 0, + .window_len = 0x100, + }, +}; + +static const struct regmap_config lt6911gxd_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xffff, + .ranges = lt6911gxd_ranges, + .num_ranges = ARRAY_SIZE(lt6911gxd_ranges), +}; + +struct lt6911gxd_mode { + u32 width; + u32 height; + u32 code; + u32 fps; + u32 lanes; + s64 link_freq; +}; + +struct lt6911gxd { + struct v4l2_subdev sd; + struct media_pad pad; + struct v4l2_ctrl_handler ctrl_handler; + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *link_freq; + + struct lt6911gxd_mode cur_mode; + struct regmap *regmap; + struct gpio_desc *reset_gpio; + struct gpio_desc *irq_gpio; +#ifdef POLLING_MODE +/* polling mode add start*/ + struct task_struct *poll_task; + u32 thread_run; +/* polling mode add start*/ +#endif +}; + +static const struct v4l2_event lt6911gxd_ev_source_change = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, +}; + +static const struct v4l2_event lt6911gxd_ev_stream_end = { + .type = V4L2_EVENT_EOS, +}; + +static inline struct lt6911gxd *to_lt6911gxd(struct v4l2_subdev *sd) +{ + return container_of(sd, struct lt6911gxd, sd); +} + +static const struct lt6911gxd_mode default_mode = { + .width = LT6911GXD_DEFAULT_WIDTH, + .height = LT6911GXD_DEFAULT_HEIGHT, + .code = MEDIA_BUS_FMT_UYVY8_1X16, + .fps = LT6911GXD_DEFAULT_FPS, + .lanes = LT6911GXD_DEFAULT_LANES, + .link_freq = LT6911GXD_MAX_LINK_FREQ, +}; + +static s64 get_pixel_rate(struct lt6911gxd *lt6911gxd) +{ + s64 pixel_rate; + + pixel_rate = (s64)lt6911gxd->cur_mode.width * + lt6911gxd->cur_mode.height * + lt6911gxd->cur_mode.fps * 16; + do_div(pixel_rate, lt6911gxd->cur_mode.lanes); + + return pixel_rate; +} + +static int lt6911gxd_status_update(struct lt6911gxd *lt6911gxd) +{ + struct i2c_client *client = v4l2_get_subdevdata(<6911gxd->sd); + u64 int_event; + u64 byte_clk, pixel_clk, fps; + u64 htotal, vtotal, width, height; + int timeout_cnt = 3; + int ret = 0; + + /* Read interrupt event */ + cci_read(lt6911gxd->regmap, REG_INT_HDMI, &int_event, &ret); + while (ret && timeout_cnt--) { + ret = cci_read(lt6911gxd->regmap, REG_INT_HDMI, + &int_event, NULL); + } + + if (ret) + return dev_err_probe(&client->dev, ret, + "failed to read interrupt event\n"); + + /* TODO: add audio ready and disappear type */ + switch (int_event) { + case INT_VIDEO_READY: + cci_read(lt6911gxd->regmap, REG_BYTE_CLK, &byte_clk, &ret); + byte_clk *= 1000; + cci_read(lt6911gxd->regmap, REG_PIX_CLK, &pixel_clk, &ret); + pixel_clk *= 1000; + + if (ret || byte_clk == 0 || pixel_clk == 0) { + dev_err(&client->dev, + "invalid ByteClock or PixelClock\n"); + return -EINVAL; + } + + cci_read(lt6911gxd->regmap, REG_H_TOTAL, &htotal, &ret); + cci_read(lt6911gxd->regmap, REG_V_TOTAL, &vtotal, &ret); + if (ret || htotal == 0 || vtotal == 0) { + dev_err(&client->dev, "invalid htotal or vtotal\n"); + return -EINVAL; + } + + fps = div_u64(pixel_clk, htotal * vtotal); + if (fps > 60) { + dev_err(&client->dev, + "max fps is 60, current fps: %llu\n", fps); + return -EINVAL; + } + + cci_read(lt6911gxd->regmap, REG_HALF_H_ACTIVE, + &width, &ret); + cci_read(lt6911gxd->regmap, REG_V_ACTIVE, &height, &ret); + if (ret || width == 0 || width > 3840 || + height == 0 || height > 2160) { + dev_err(&client->dev, "invalid width or height\n"); + return -EINVAL; + } + + lt6911gxd->cur_mode.height = height; + lt6911gxd->cur_mode.width = width; + lt6911gxd->cur_mode.fps = fps; + /* MIPI Clock Rate = ByteClock × 4, defined in lt6911gxd spec */ + lt6911gxd->cur_mode.link_freq = byte_clk * 4; + v4l2_subdev_notify_event(<6911gxd->sd, + <6911gxd_ev_source_change); + break; + + case INT_VIDEO_DISAPPEAR: + cci_write(lt6911gxd->regmap, REG_MIPI_TX_CTRL, 0x0, NULL); + lt6911gxd->cur_mode.height = 0; + lt6911gxd->cur_mode.width = 0; + lt6911gxd->cur_mode.fps = 0; + lt6911gxd->cur_mode.link_freq = 0; + v4l2_subdev_notify_event(<6911gxd->sd, + <6911gxd_ev_stream_end); + break; + + default: + return -ENOLINK; + } + + return ret; +} + +/* TODO: add audio sampling rate and present control */ +static int lt6911gxd_init_controls(struct lt6911gxd *lt6911gxd) +{ + struct v4l2_ctrl_handler *ctrl_hdlr; + s64 pixel_rate; + int ret; + + ctrl_hdlr = <6911gxd->ctrl_handler; + ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); + if (ret) + return ret; + + lt6911gxd->link_freq = + v4l2_ctrl_new_int_menu(ctrl_hdlr, NULL, V4L2_CID_LINK_FREQ, + sizeof(lt6911gxd->cur_mode.link_freq), + 0, <6911gxd->cur_mode.link_freq); + + pixel_rate = get_pixel_rate(lt6911gxd); + lt6911gxd->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, NULL, + V4L2_CID_PIXEL_RATE, + pixel_rate, pixel_rate, 1, + pixel_rate); + + if (ctrl_hdlr->error) { + ret = ctrl_hdlr->error; + goto hdlr_free; + } + lt6911gxd->sd.ctrl_handler = ctrl_hdlr; + + return 0; + +hdlr_free: + v4l2_ctrl_handler_free(ctrl_hdlr); + return ret; +} + +static void lt6911gxd_update_pad_format(const struct lt6911gxd_mode *mode, + struct v4l2_mbus_framefmt *fmt) +{ + fmt->width = mode->width; + fmt->height = mode->height; + fmt->code = mode->code; + fmt->field = V4L2_FIELD_NONE; +} + +static int lt6911gxd_enable_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct lt6911gxd *lt6911gxd = to_lt6911gxd(sd); + int ret; + + ret = pm_runtime_resume_and_get(&client->dev); + if (ret < 0) + return ret; + + cci_write(lt6911gxd->regmap, REG_MIPI_TX_CTRL, 0x1, &ret); + if (ret) { + dev_err(&client->dev, "failed to start stream: %d\n", ret); + goto err_rpm_put; + } + + return 0; + +err_rpm_put: + pm_runtime_put(&client->dev); + return ret; +} + +static int lt6911gxd_disable_streams(struct v4l2_subdev *sd, + struct v4l2_subdev_state *state, + u32 pad, u64 streams_mask) +{ + struct lt6911gxd *lt6911gxd = to_lt6911gxd(sd); + struct i2c_client *client = v4l2_get_subdevdata(<6911gxd->sd); + int ret; + + ret = cci_write(lt6911gxd->regmap, REG_MIPI_TX_CTRL, 0x0, NULL); + if (ret) + dev_err(&client->dev, "failed to stop stream: %d\n", ret); + + pm_runtime_put(&client->dev); + return 0; +} + +static int lt6911gxd_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct lt6911gxd *lt6911gxd = to_lt6911gxd(sd); + u64 pixel_rate, link_freq; + + lt6911gxd_update_pad_format(<6911gxd->cur_mode, &fmt->format); + *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format; + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) + return 0; + + pixel_rate = get_pixel_rate(lt6911gxd); + __v4l2_ctrl_modify_range(lt6911gxd->pixel_rate, pixel_rate, + pixel_rate, 1, pixel_rate); + + link_freq = lt6911gxd->cur_mode.link_freq; + __v4l2_ctrl_modify_range(lt6911gxd->link_freq, link_freq, + link_freq, 1, link_freq); + + return 0; +} + +static int lt6911gxd_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_format *fmt) +{ + struct lt6911gxd *lt6911gxd = to_lt6911gxd(sd); + + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) + fmt->format = *v4l2_subdev_state_get_format(sd_state, fmt->pad); + else + lt6911gxd_update_pad_format(<6911gxd->cur_mode, &fmt->format); + + return 0; +} + +static int lt6911gxd_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct lt6911gxd *lt6911gxd = to_lt6911gxd(sd); + + if (code->index) + return -EINVAL; + + code->code = lt6911gxd->cur_mode.code; + + return 0; +} + +static int lt6911gxd_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct lt6911gxd *lt6911gxd = to_lt6911gxd(sd); + + if (fse->index != 0) + return -EINVAL; + + if (fse->code != MEDIA_BUS_FMT_UYVY8_1X16) + return -EINVAL; + + fse->min_width = lt6911gxd->cur_mode.width; + fse->max_width = fse->min_width; + fse->min_height = lt6911gxd->cur_mode.height; + fse->max_height = fse->min_height; + + return 0; +} + +static int lt6911gxd_enum_frame_interval(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state, + struct v4l2_subdev_frame_interval_enum *fie) +{ + struct lt6911gxd *lt6911gxd = to_lt6911gxd(sd); + + if (fie->index != 0) + return -EINVAL; + + fie->interval.numerator = 1; + fie->interval.denominator = lt6911gxd->cur_mode.fps; + + return 0; +} + +static int lt6911gxd_init_state(struct v4l2_subdev *sd, + struct v4l2_subdev_state *sd_state) +{ + struct v4l2_subdev_format fmt = { + .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY + : V4L2_SUBDEV_FORMAT_ACTIVE, + }; + + return lt6911gxd_set_format(sd, sd_state, &fmt); +} + +static const struct v4l2_subdev_video_ops lt6911gxd_video_ops = { + .s_stream = v4l2_subdev_s_stream_helper, +}; + +static const struct v4l2_subdev_pad_ops lt6911gxd_pad_ops = { + .set_fmt = lt6911gxd_set_format, + .get_fmt = lt6911gxd_get_format, + .enable_streams = lt6911gxd_enable_streams, + .disable_streams = lt6911gxd_disable_streams, + .enum_mbus_code = lt6911gxd_enum_mbus_code, + .enum_frame_size = lt6911gxd_enum_frame_size, + .enum_frame_interval = lt6911gxd_enum_frame_interval, + .get_frame_interval = v4l2_subdev_get_frame_interval, +}; + +static const struct v4l2_subdev_core_ops lt6911gxd_subdev_core_ops = { + .subscribe_event = v4l2_ctrl_subdev_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static const struct v4l2_subdev_ops lt6911gxd_subdev_ops = { + .core = <6911gxd_subdev_core_ops, + .video = <6911gxd_video_ops, + .pad = <6911gxd_pad_ops, +}; + +static const struct media_entity_operations lt6911gxd_subdev_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct v4l2_subdev_internal_ops lt6911gxd_internal_ops = { + .init_state = lt6911gxd_init_state, +}; + +static int lt6911gxd_fwnode_parse(struct lt6911gxd *lt6911gxd, + struct device *dev) +{ + struct fwnode_handle *endpoint; + struct v4l2_fwnode_endpoint bus_cfg = { + .bus_type = V4L2_MBUS_CSI2_CPHY, + }; + int ret; + + endpoint = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, + FWNODE_GRAPH_ENDPOINT_NEXT); + if (!endpoint) + return dev_err_probe(dev, -EPROBE_DEFER, + "endpoint node not found\n"); + + ret = v4l2_fwnode_endpoint_parse(endpoint, &bus_cfg); + fwnode_handle_put(endpoint); + if (ret) { + dev_err(dev, "failed to parse endpoint node: %d\n", ret); + goto out_err; + } + + /* + * Check the number of MIPI CSI2 data lanes, + * lt6911gxd only support 4 lanes. + */ + if (bus_cfg.bus.mipi_csi2.num_data_lanes != LT6911GXD_DEFAULT_LANES) { + dev_err(dev, "only 2 data lanes are currently supported\n"); + goto out_err; + } + + return 0; + +out_err: + v4l2_fwnode_endpoint_free(&bus_cfg); + return ret; +} + +static int lt6911gxd_identify_module(struct lt6911gxd *lt6911gxd, + struct device *dev) +{ + u64 val; + int ret = 0; + + /* Chip ID should be confirmed when the I2C slave is active */ + cci_write(lt6911gxd->regmap, REG_ENABLE_I2C, 0x1, &ret); + cci_read(lt6911gxd->regmap, REG_CHIP_ID, &val, &ret); + cci_write(lt6911gxd->regmap, REG_ENABLE_I2C, 0x0, &ret); + if (ret) + return dev_err_probe(dev, ret, "fail to read chip id\n"); + + if (val != LT6911GXD_CHIP_ID) { + return dev_err_probe(dev, -ENXIO, "chip id mismatch: %x!=%x\n", + LT6911GXD_CHIP_ID, (u16)val); + } + + return 0; +} + +// static irqreturn_t lt6911gxd_threaded_irq_fn(int irq, void *dev_id) +// { +// struct v4l2_subdev *sd = dev_id; +// struct lt6911gxd *lt6911gxd = to_lt6911gxd(sd); +// struct v4l2_subdev_state *state; + +// state = v4l2_subdev_lock_and_get_active_state(sd); +// lt6911gxd_status_update(lt6911gxd); +// v4l2_subdev_unlock_state(state); + +// return IRQ_HANDLED; +// } + +static void lt6911gxd_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct lt6911gxd *lt6911gxd = to_lt6911gxd(sd); + + free_irq(gpiod_to_irq(lt6911gxd->irq_gpio), lt6911gxd); + v4l2_async_unregister_subdev(sd); + v4l2_subdev_cleanup(sd); + media_entity_cleanup(&sd->entity); + v4l2_ctrl_handler_free(<6911gxd->ctrl_handler); + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); +} + +#ifdef POLLING_MODE +/* try polling mode start*/ + +static int lt6911gxd_detect_thread(void *data) +{ + struct lt6911gxd *lt6911gxd = (struct lt6911gxd *)data; + struct i2c_client *client = v4l2_get_subdevdata(<6911gxd->sd); + + if (!lt6911gxd) { + dev_err(&client->dev, "Invalid argument\n"); + return -EINVAL; + } + + while (lt6911gxd->thread_run) { + lt6911gxd_status_update(lt6911gxd); + usleep_range(2000000, 2050000); + } + return 0; +} +/* try polling mode end*/ +#endif + +static int lt6911gxd_probe(struct i2c_client *client) +{ + struct lt6911gxd *lt6911gxd; + struct device *dev = &client->dev; + // u64 irq_pin_flags; + int ret; + + lt6911gxd = devm_kzalloc(dev, sizeof(*lt6911gxd), GFP_KERNEL); + if (!lt6911gxd) + return -ENOMEM; + + /* define default mode: 4k@60fps, changed when interrupt occurs. */ + lt6911gxd->cur_mode = default_mode; + + lt6911gxd->regmap = devm_regmap_init_i2c(client, + <6911gxd_regmap_config); + if (IS_ERR(lt6911gxd->regmap)) + return dev_err_probe(dev, PTR_ERR(lt6911gxd->regmap), + "failed to init CCI\n"); + + v4l2_i2c_subdev_init(<6911gxd->sd, client, <6911gxd_subdev_ops); + + lt6911gxd->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_IN); + if (IS_ERR(lt6911gxd->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(lt6911gxd->reset_gpio), + "failed to get reset gpio\n"); + + ret = lt6911gxd_fwnode_parse(lt6911gxd, dev); + if (ret) + return ret; + + usleep_range(10000, 10500); + + ret = lt6911gxd_identify_module(lt6911gxd, dev); + if (ret) + return dev_err_probe(dev, ret, "failed to find chip\n"); + + ret = lt6911gxd_init_controls(lt6911gxd); + if (ret) + return dev_err_probe(dev, ret, "failed to init control\n"); + + lt6911gxd->sd.dev = dev; + lt6911gxd->sd.internal_ops = <6911gxd_internal_ops; + lt6911gxd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_EVENTS; + lt6911gxd->sd.entity.ops = <6911gxd_subdev_entity_ops; + lt6911gxd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; + lt6911gxd->pad.flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(<6911gxd->sd.entity, 1, <6911gxd->pad); + if (ret) { + dev_err(dev, "failed to init entity pads: %d\n", ret); + goto err_v4l2_ctrl_handler_free; + } + + /* + * Device is already turned on by i2c-core with ACPI domain PM. + * Enable runtime PM and turn off the device. + */ + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_idle(dev); + + ret = v4l2_subdev_init_finalize(<6911gxd->sd); + if (ret) { + dev_err(dev, "failed to init v4l2 subdev: %d\n", ret); + goto err_media_entity_cleanup; + } + + lt6911gxd_status_update(lt6911gxd); + +#ifdef POLLING_MODE + lt6911gxd->poll_task = kthread_create(lt6911gxd_detect_thread, + lt6911gxd, "lt6911gxd polling thread"); + if (lt6911gxd->poll_task == NULL) { + dev_err(&client->dev, "create lt6911gxd polling thread failed.\n"); + goto err_media_entity_cleanup; + } else { + lt6911gxd->thread_run = true; + wake_up_process(lt6911gxd->poll_task); + dev_info(&client->dev, "Started lt6911gxd polling thread.\n"); + } +#else + //lt6911gxd->irq_gpio = 541; + + //lt6911gxd->irq_gpio = gpio_to_desc(541); //C5: GPP_C_6 ; DEV change to intc105D + lt6911gxd->irq_gpio = devm_gpiod_get(dev, "power-enable", GPIOD_IN); + if (IS_ERR(lt6911gxd->irq_gpio)) + return dev_err_probe(dev, PTR_ERR(lt6911gxd->irq_gpio), + "failed to get power-en gpio\n"); + dev_dbg(dev, "lt6911gxd_probe: use power-en irq_gpio!\n"); + + /* Setting irq */ + irq_pin_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | + IRQF_ONESHOT; + + ret = request_threaded_irq(gpiod_to_irq(lt6911gxd->irq_gpio), NULL, + lt6911gxd_threaded_irq_fn, + irq_pin_flags, NULL, lt6911gxd); + if (ret) { + dev_err(dev, "failed to request IRQ: %d\n", ret); + goto err_subdev_cleanup; + } +#endif + + ret = v4l2_async_register_subdev_sensor(<6911gxd->sd); + if (ret) { + dev_err(dev, "failed to register V4L2 subdev: %d\n", ret); + goto err_free_irq; + } + + return 0; + +err_free_irq: + free_irq(gpiod_to_irq(lt6911gxd->irq_gpio), lt6911gxd); + +// err_subdev_cleanup: +// v4l2_subdev_cleanup(<6911gxd->sd); + +err_media_entity_cleanup: + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + media_entity_cleanup(<6911gxd->sd.entity); + +err_v4l2_ctrl_handler_free: + v4l2_ctrl_handler_free(lt6911gxd->sd.ctrl_handler); + + return ret; +} + +static const struct acpi_device_id lt6911gxd_acpi_ids[] = { + { "INTC1124" }, + {} +}; +MODULE_DEVICE_TABLE(acpi, lt6911gxd_acpi_ids); + +static struct i2c_driver lt6911gxd_i2c_driver = { + .driver = { + .name = "lt6911gxd", + .acpi_match_table = ACPI_PTR(lt6911gxd_acpi_ids), + }, + .probe = lt6911gxd_probe, + .remove = lt6911gxd_remove, +}; + +module_i2c_driver(lt6911gxd_i2c_driver); + +MODULE_AUTHOR("Zou, Xiaohong xiaohong.zou@intel.com"); +MODULE_DESCRIPTION("Lontium lt6911gxd HDMI to MIPI Bridge Driver"); +MODULE_LICENSE("GPL"); -- 2.34.1 ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.14_iot/readme000066400000000000000000000012701503071307200227520ustar00rootroot000000000000001. checkout iot kernel 6.14 2. copy patches to kernel source tree and apply them a. enable IPU snoop for cache consistency issue git am 0001-media-pci-set-snoop-1-to-fix-cache-line-issue.patch b. enable lt6911gxd git am 0002-media-i2c-enable-lt6911gxd-in-ipu-bridge.patch git am 0003-media-i2c-add-support-for-lt6911gxd.patch git am 0004-INT3472-Support-LT6911GXD.patch git am 0006-media-ipu-bridge-change-LT6911GXD-ACPI-ID-to-INTC112.patch git am 0007-platform-x86-fix-ipu7-allyes-build-error-for-int3472.patch git am 0008-media-i2c-add-driver-for-LT6911GXD.patch c. enable CPHY git am 0005-ipu-bridge-add-CPHY-support.patch ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.4/000077500000000000000000000000001503071307200206365ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.4/0001-LNL-Enable-GPIO-pinctrl-driver.patch000066400000000000000000000230471503071307200275520ustar00rootroot00000000000000From c96cc019f5a9081eafc96ae1299f09cb079ccfc1 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 13 Nov 2023 14:28:47 +0200 Subject: [PATCH 1/3] LNL: Enable GPIO pinctrl driver It is still under review here: * https://lore.kernel.org/all/20231113123147.4075203-1- andriy.shevchenko@linux.intel.com/ Including changes: * pinctrl: intel: Revert "Unexport intel_pinctrl_probe()" * pinctrl: intel: Add a generic Intel pin control platform driver Signed-off-by: Andy Shevchenko --- drivers/pinctrl/intel/Kconfig | 10 + drivers/pinctrl/intel/Makefile | 1 + .../pinctrl/intel/pinctrl-intel-platform.c | 231 ++++++++++++++++++ drivers/pinctrl/intel/pinctrl-intel.c | 5 +- drivers/pinctrl/intel/pinctrl-intel.h | 3 + 5 files changed, 248 insertions(+), 2 deletions(-) create mode 100644 drivers/pinctrl/intel/pinctrl-intel-platform.c diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig index b3ec00624416..e14353d706cc 100644 --- a/drivers/pinctrl/intel/Kconfig +++ b/drivers/pinctrl/intel/Kconfig @@ -66,6 +66,16 @@ config PINCTRL_INTEL select GPIOLIB select GPIOLIB_IRQCHIP +config PINCTRL_INTEL_PLATFORM + tristate "Intel pinctrl and GPIO platform driver" + depends on ACPI + select PINCTRL_INTEL + help + This pinctrl driver provides an interface that allows configuring + of Intel PCH pins and using them as GPIOs. Currently the following + Intel SoCs / platforms require this to be functional: + - Lunar Lake + config PINCTRL_ALDERLAKE tristate "Intel Alder Lake pinctrl and GPIO driver" depends on ACPI diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile index 906dd6c8d837..bf14268de763 100644 --- a/drivers/pinctrl/intel/Makefile +++ b/drivers/pinctrl/intel/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_PINCTRL_LYNXPOINT) += pinctrl-lynxpoint.o obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o obj-$(CONFIG_PINCTRL_MOOREFIELD) += pinctrl-moorefield.o obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o +obj-$(CONFIG_PINCTRL_INTEL_PLATFORM) += pinctrl-intel-platform.o obj-$(CONFIG_PINCTRL_ALDERLAKE) += pinctrl-alderlake.o obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o diff --git a/drivers/pinctrl/intel/pinctrl-intel-platform.c b/drivers/pinctrl/intel/pinctrl-intel-platform.c new file mode 100644 index 000000000000..ff8a01fc9fa4 --- /dev/null +++ b/drivers/pinctrl/intel/pinctrl-intel-platform.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel PCH pinctrl/GPIO driver + * + * Copyright (C) 2021-2023, Intel Corporation + * Author: Andy Shevchenko + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "pinctrl-intel.h" + +struct intel_platform_pins { + struct pinctrl_pin_desc *pins; + size_t npins; +}; + +static int intel_platform_pinctrl_prepare_pins(struct device *dev, size_t base, + const char *name, u32 size, + struct intel_platform_pins *pins) +{ + struct pinctrl_pin_desc *descs; + char **pin_names; + unsigned int i; + size_t bytes; + + pin_names = devm_kasprintf_strarray(dev, name, size); + if (IS_ERR(pin_names)) + return PTR_ERR(pin_names); + + if (unlikely(check_mul_overflow(base + size, sizeof(*descs), &bytes))) + return -ENOMEM; + + descs = devm_krealloc(dev, pins->pins, bytes, GFP_KERNEL); + if (!descs) + return -ENOMEM; + + for (i = 0; i < size; i++) { + unsigned int pin_number = base + i; + char *pin_name = pin_names[i]; + struct pinctrl_pin_desc *desc; + + /* Unify delimiter for pin name */ + strreplace(pin_name, '-', '_'); + + desc = &descs[pin_number]; + desc->number = pin_number; + desc->name = pin_name; + } + + pins->pins = descs; + pins->npins = base + size; + + return 0; +} + +static int intel_platform_pinctrl_prepare_group(struct device *dev, + struct fwnode_handle *child, + struct intel_padgroup *gpp, + struct intel_platform_pins *pins) +{ + size_t base = pins->npins; + const char *name; + u32 size; + int ret; + + ret = fwnode_property_read_string(child, "intc-gpio-group-name", &name); + if (ret) + return ret; + + ret = fwnode_property_read_u32(child, "intc-gpio-pad-count", &size); + if (ret) + return ret; + + ret = intel_platform_pinctrl_prepare_pins(dev, base, name, size, pins); + if (ret) + return ret; + + gpp->base = base; + gpp->size = size; + gpp->gpio_base = INTEL_GPIO_BASE_MATCH; + + return 0; +} + +static int intel_platform_pinctrl_prepare_community(struct device *dev, + struct intel_community *community, + struct intel_platform_pins *pins) +{ + struct fwnode_handle *child; + struct intel_padgroup *gpps; + unsigned int group; + size_t ngpps; + u32 offset; + int ret; + + ret = device_property_read_u32(dev, "intc-gpio-pad-ownership-offset", &offset); + if (ret) + return ret; + community->padown_offset = offset; + + ret = device_property_read_u32(dev, "intc-gpio-pad-configuration-lock-offset", &offset); + if (ret) + return ret; + community->padcfglock_offset = offset; + + ret = device_property_read_u32(dev, "intc-gpio-host-software-pad-ownership-offset", &offset); + if (ret) + return ret; + community->hostown_offset = offset; + + ret = device_property_read_u32(dev, "intc-gpio-gpi-interrupt-status-offset", &offset); + if (ret) + return ret; + community->is_offset = offset; + + ret = device_property_read_u32(dev, "intc-gpio-gpi-interrupt-enable-offset", &offset); + if (ret) + return ret; + community->ie_offset = offset; + + ngpps = device_get_child_node_count(dev); + if (!ngpps) + return -ENODEV; + + gpps = devm_kcalloc(dev, ngpps, sizeof(*gpps), GFP_KERNEL); + if (!gpps) + return -ENOMEM; + + group = 0; + device_for_each_child_node(dev, child) { + struct intel_padgroup *gpp = &gpps[group]; + + gpp->reg_num = group; + + ret = intel_platform_pinctrl_prepare_group(dev, child, gpp, pins); + if (ret) + return ret; + + group++; + } + + community->ngpps = ngpps; + community->gpps = gpps; + + return 0; +} + +static int intel_platform_pinctrl_prepare_soc_data(struct device *dev, + struct intel_pinctrl_soc_data *data) +{ + struct intel_platform_pins pins = {}; + struct intel_community *communities; + size_t ncommunities; + unsigned int i; + int ret; + + /* Version 1.0 of the specification assumes only a single community per device node */ + ncommunities = 1, + communities = devm_kcalloc(dev, ncommunities, sizeof(*communities), GFP_KERNEL); + if (!communities) + return -ENOMEM; + + for (i = 0; i < ncommunities; i++) { + struct intel_community *community = &communities[i]; + + community->barno = i; + community->pin_base = pins.npins; + + ret = intel_platform_pinctrl_prepare_community(dev, community, &pins); + if (ret) + return ret; + + community->npins = pins.npins - community->pin_base; + } + + data->ncommunities = ncommunities; + data->communities = communities; + + data->npins = pins.npins; + data->pins = pins.pins; + + return 0; +} + +static int intel_platform_pinctrl_probe(struct platform_device *pdev) +{ + struct intel_pinctrl_soc_data *data; + struct device *dev = &pdev->dev; + int ret; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = intel_platform_pinctrl_prepare_soc_data(dev, data); + if (ret) + return ret; + + return intel_pinctrl_probe(pdev, data); +} + +static const struct acpi_device_id intel_platform_pinctrl_acpi_match[] = { + { "INTC105F" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, intel_platform_pinctrl_acpi_match); + +INTEL_PINCTRL_PM_OPS(intel_pinctrl_pm_ops); + +static struct platform_driver intel_platform_pinctrl_driver = { + .probe = intel_platform_pinctrl_probe, + .driver = { + .name = "intel-pinctrl", + .acpi_match_table = intel_platform_pinctrl_acpi_match, + .pm = pm_sleep_ptr(&intel_pinctrl_pm_ops), + }, +}; +module_platform_driver(intel_platform_pinctrl_driver); + +MODULE_AUTHOR("Andy Shevchenko "); +MODULE_DESCRIPTION("Intel PCH pinctrl/GPIO driver"); +MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(PINCTRL_INTEL); diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index c7a71c49df0a..37f2bd8dea45 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -1532,8 +1532,8 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl, return PTR_ERR_OR_ZERO(pwm); } -static int intel_pinctrl_probe(struct platform_device *pdev, - const struct intel_pinctrl_soc_data *soc_data) +int intel_pinctrl_probe(struct platform_device *pdev, + const struct intel_pinctrl_soc_data *soc_data) { struct device *dev = &pdev->dev; struct intel_pinctrl *pctrl; @@ -1651,6 +1651,7 @@ static int intel_pinctrl_probe(struct platform_device *pdev, return 0; } +EXPORT_SYMBOL_NS_GPL(intel_pinctrl_probe, PINCTRL_INTEL); int intel_pinctrl_probe_by_hid(struct platform_device *pdev) { diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h index 1faf2ada480a..0278c9331de7 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.h +++ b/drivers/pinctrl/intel/pinctrl-intel.h @@ -252,6 +252,9 @@ struct intel_pinctrl { int irq; }; +int intel_pinctrl_probe(struct platform_device *pdev, + const struct intel_pinctrl_soc_data *soc_data); + int intel_pinctrl_probe_by_hid(struct platform_device *pdev); int intel_pinctrl_probe_by_uid(struct platform_device *pdev); -- 2.43.2 0002-mfd-intel-lpss-Add-Intel-Lunar-Lake-M-PCI-IDs.patch000066400000000000000000000033531503071307200317710ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.4From d7e0360636dbc3cbd59b25eedd682dfb52aeef07 Mon Sep 17 00:00:00 2001 From: Jarkko Nikula Date: Mon, 2 Oct 2023 11:33:44 +0300 Subject: [PATCH 2/3] mfd: intel-lpss: Add Intel Lunar Lake-M PCI IDs Add Intel Lunar Lake-M SoC PCI IDs. Signed-off-by: Jarkko Nikula Signed-off-by: Lee Jones --- drivers/mfd/intel-lpss-pci.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 699f44ffff0e..ae5759200622 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -561,6 +561,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info }, + /* LNL-M */ + { PCI_VDEVICE(INTEL, 0xa825), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa826), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa827), (kernel_ulong_t)&tgl_info }, + { PCI_VDEVICE(INTEL, 0xa830), (kernel_ulong_t)&tgl_info }, + { PCI_VDEVICE(INTEL, 0xa846), (kernel_ulong_t)&tgl_info }, + { PCI_VDEVICE(INTEL, 0xa850), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa851), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa852), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa878), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa879), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa87a), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa87b), (kernel_ulong_t)&ehl_i2c_info }, { } }; MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); -- 2.43.2 ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.4/0003-Add-IPU7-Kconfig-and-Makefile.patch000066400000000000000000000027301503071307200273040ustar00rootroot00000000000000From a00700ad5e7de77e03572b9a357a464071a7326a Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Fri, 26 Apr 2024 17:44:20 +0800 Subject: [PATCH 3/3] Add IPU7 Kconfig and Makefile --- drivers/media/pci/Kconfig | 2 +- drivers/media/pci/intel/Kconfig | 4 ++++ drivers/media/pci/intel/Makefile | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 drivers/media/pci/intel/Kconfig diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig index 480194543d05..ee095bde0b68 100644 --- a/drivers/media/pci/Kconfig +++ b/drivers/media/pci/Kconfig @@ -73,7 +73,7 @@ config VIDEO_PCI_SKELETON Enable build of the skeleton PCI driver, used as a reference when developing new drivers. -source "drivers/media/pci/intel/ipu3/Kconfig" +source "drivers/media/pci/intel/Kconfig" endif #MEDIA_PCI_SUPPORT endif #PCI diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig new file mode 100644 index 000000000000..1fc42fd3a733 --- /dev/null +++ b/drivers/media/pci/intel/Kconfig @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +source "drivers/media/pci/intel/ipu3/Kconfig" +source "drivers/media/pci/intel/ipu7/Kconfig" diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile index 0b4236c4db49..f4cad3411e22 100644 --- a/drivers/media/pci/intel/Makefile +++ b/drivers/media/pci/intel/Makefile @@ -4,3 +4,4 @@ # obj-y += ipu3/ +obj-$(CONFIG_VIDEO_INTEL_IPU7) += ipu7/ -- 2.43.2 ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.8/000077500000000000000000000000001503071307200206425ustar00rootroot000000000000000001-media-pci-ipu-bridge-Support-ov13b10-camera.patch000066400000000000000000000015031503071307200320270ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.8From 60ddc99ffcfd25c3c980a1b4036c081217f615e5 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Tue, 2 Jul 2024 17:26:53 +0800 Subject: [PATCH 1/4] media: pci: ipu-bridge: Support ov13b10 camera --- drivers/media/pci/intel/ipu-bridge.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index f980e3125a7b..ea51537c6b4e 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -58,6 +58,7 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("INT3537", 1, 437000000), /* Omnivision ov13b10 */ IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000), + IPU_SENSOR_CONFIG("OVTI13B1", 1, 560000000), /* GalaxyCore GC0310 */ IPU_SENSOR_CONFIG("INT0310", 0), }; -- 2.45.2 0002-media-pci-ipu-bridge-Support-ov02c10-camera.patch000066400000000000000000000016201503071307200320270ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.8From 9cceb57c706b83b565a19abbb0327af58dd751f3 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Wed, 3 Jul 2024 16:06:52 +0800 Subject: [PATCH 2/4] media: pci: ipu-bridge: Support ov02c10 camera --- drivers/media/pci/intel/ipu-bridge.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c index ea51537c6b4e..1a32b91c2395 100644 --- a/drivers/media/pci/intel/ipu-bridge.c +++ b/drivers/media/pci/intel/ipu-bridge.c @@ -56,6 +56,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = { IPU_SENSOR_CONFIG("INT3474", 1, 180000000), /* Hynix hi556 */ IPU_SENSOR_CONFIG("INT3537", 1, 437000000), + /* Omnivision ov02c10 */ + IPU_SENSOR_CONFIG("OVTI02C1", 1, 400000000), /* Omnivision ov13b10 */ IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000), IPU_SENSOR_CONFIG("OVTI13B1", 1, 560000000), -- 2.45.2 ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.8/in-tree-build/000077500000000000000000000000001503071307200233025ustar00rootroot000000000000000003-media-pci-intel-Add-IPU7-Kconfig-Makefile.patch000066400000000000000000000020521503071307200340650ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.8/in-tree-buildFrom 7704cde6485721b1d9a90c00a7681d344abdb54a Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Tue, 2 Jul 2024 17:26:08 +0800 Subject: [PATCH 3/4] media: pci: intel: Add IPU7 Kconfig & Makefile --- drivers/media/pci/intel/Kconfig | 1 + drivers/media/pci/intel/Makefile | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig index ee4684159d3d..3b325d2b095d 100644 --- a/drivers/media/pci/intel/Kconfig +++ b/drivers/media/pci/intel/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only source "drivers/media/pci/intel/ipu3/Kconfig" +source "drivers/media/pci/intel/ipu7/Kconfig" source "drivers/media/pci/intel/ivsc/Kconfig" config IPU_BRIDGE diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile index f199a97e1d78..213056608aca 100644 --- a/drivers/media/pci/intel/Makefile +++ b/drivers/media/pci/intel/Makefile @@ -4,4 +4,5 @@ # obj-$(CONFIG_IPU_BRIDGE) += ipu-bridge.o obj-y += ipu3/ +obj-y += ipu7/ obj-y += ivsc/ -- 2.45.2 0004-media-i2c-Add-ov02c10-Kconfig-Makefile.patch000066400000000000000000000027541503071307200332360ustar00rootroot00000000000000ipu7-drivers-0~git202507010803.e6f86fa7/patch/v6.8/in-tree-buildFrom 710b52437ed6d109306fd35e886dedcd76d735a6 Mon Sep 17 00:00:00 2001 From: Hao Yao Date: Wed, 3 Jul 2024 16:04:10 +0800 Subject: [PATCH 4/4] media: i2c: Add ov02c10 Kconfig & Makefile --- drivers/media/i2c/Kconfig | 9 +++++++++ drivers/media/i2c/Makefile | 1 + 2 files changed, 10 insertions(+) diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 4c3435921f19..bf7dc2514666 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -322,6 +322,15 @@ config VIDEO_OV02A10 To compile this driver as a module, choose M here: the module will be called ov02a10. +config VIDEO_OV02C10 + tristate "OmniVision OV02C10 sensor support" + help + This is a Video4Linux2 sensor driver for the OmniVision + OV02C10 camera. + + To compile this driver as a module, choose M here: the + module will be called ov02c10. + config VIDEO_OV08D10 tristate "OmniVision OV08D10 sensor support" help diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index dfbe6448b549..9ffc67e7a812 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -77,6 +77,7 @@ obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o +obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o obj-$(CONFIG_VIDEO_OV08D10) += ov08d10.o obj-$(CONFIG_VIDEO_OV08X40) += ov08x40.o obj-$(CONFIG_VIDEO_OV13858) += ov13858.o -- 2.45.2