kvm-ioctls-0.2.0/.buildkite/pipeline.yml0100644€T¡ŽÔ€T 000000073151350324253600172410ustar0000000000000000steps: - label: "build-gnu-x86" commands: - cargo build --release retry: automatic: false agents: platform: x86_64.metal plugins: - docker#v3.0.1: image: "rustvmm/dev:v2" always-pull: true - label: "build-gnu-arm" commands: - cargo build --release retry: automatic: false agents: platform: arm.metal plugins: - docker#v3.0.1: image: "rustvmm/dev:v2" always-pull: true - label: "build-musl-x86" commands: - cargo build --release --target x86_64-unknown-linux-musl retry: automatic: false agents: platform: x86_64.metal plugins: - docker#v3.0.1: image: "rustvmm/dev:v2" always-pull: true - label: "build-musl-arm" commands: - cargo build --release --target aarch64-unknown-linux-musl retry: automatic: false agents: platform: arm.metal plugins: - docker#v3.0.1: image: "rustvmm/dev:v2" always-pull: true - label: "style" command: cargo fmt --all -- --check retry: automatic: false agents: platform: x86_64.metal plugins: - docker#v3.0.1: image: "rustvmm/dev:v2" always-pull: true - label: "unittests-gnu-x86" commands: - cargo test retry: automatic: false agents: platform: x86_64.metal plugins: - docker#v3.0.1: privileged: true image: "rustvmm/dev:v2" always-pull: true - label: "unittests-gnu-arm" commands: - cargo test retry: automatic: false agents: platform: arm.metal plugins: - docker#v3.0.1: privileged: true image: "rustvmm/dev:v2" always-pull: true - label: "unittests-musl-x86" command: - cargo test --target x86_64-unknown-linux-musl retry: automatic: false agents: platform: x86_64.metal plugins: - docker#v3.0.1: privileged: true image: "rustvmm/dev:v2" always-pull: true - label: "unittests-musl-arm" command: - cargo test --target aarch64-unknown-linux-musl retry: automatic: false agents: platform: arm.metal plugins: - docker#v3.0.1: privileged: true image: "rustvmm/dev:v2" always-pull: true - label: "clippy-x86" commands: - cargo clippy --all -- -D warnings retry: automatic: false agents: platform: x86_64.metal plugins: - docker#v3.0.1: image: "rustvmm/dev:v2" always-pull: true - label: "clippy-arm" commands: - cargo clippy --all -- -D warnings retry: automatic: false agents: platform: arm.metal plugins: - docker#v3.0.1: image: "rustvmm/dev:v2" always-pull: true - label: "coverage-x86" commands: - pytest tests/test_coverage.py retry: automatic: false agents: platform: x86_64.metal plugins: - docker#v3.0.1: privileged: true image: "rustvmm/dev:v2" always-pull: true - label: "check-warnings-x86" commands: - RUSTFLAGS="-D warnings" cargo check --all-targets retry: automatic: false agents: platform: x86_64.metal plugins: - docker#v3.0.1: privileged: true image: "rustvmm/dev:v2" always-pull: true - label: "check-warnings-arm" command: - RUSTFLAGS="-D warnings" cargo check --all-targets retry: automatic: false agents: platform: arm.metal plugins: - docker#v3.0.1: privileged: true image: "rustvmm/dev:v2" always-pull: truekvm-ioctls-0.2.0/.cargo/config0100644€T¡ŽÔ€T 000000001601350315155600152100ustar0000000000000000[target.aarch64-unknown-linux-musl] rustflags = [ "-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc" ] kvm-ioctls-0.2.0/.gitignore0100644€T¡ŽÔ€T 000000000661350315155600146440ustar0000000000000000/target **/*.rs.bk **/.pytest_cache/ **/__pycache__/* kvm-ioctls-0.2.0/CODEOWNERS0100644€T¡ŽÔ€T 000000001701350324253600142420ustar0000000000000000# These owners will be the default owners for everything in # the repo. * @acatangiu @aghecenco @andreeaflorescu @sameo kvm-ioctls-0.2.0/Cargo.toml.orig0100644€T¡ŽÔ€T 000000006121350435253400155400ustar0000000000000000[package] name = "kvm-ioctls" version = "0.2.0" authors = ["Amazon Firecracker Team "] description = "Safe wrappers over KVM ioctls" repository = "https://github.com/rust-vmm/kvm-ioctls" readme = "README.md" keywords = ["kvm"] license = "Apache-2.0 OR MIT" [dependencies] libc = ">=0.2.39" kvm-bindings = ">=0.1.0" [dev-dependencies] byteorder = ">=1.2.1" kvm-ioctls-0.2.0/Cargo.toml0000644000000016740000000000000111700ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g. crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "kvm-ioctls" version = "0.2.0" authors = ["Amazon Firecracker Team "] description = "Safe wrappers over KVM ioctls" readme = "README.md" keywords = ["kvm"] license = "Apache-2.0 OR MIT" repository = "https://github.com/rust-vmm/kvm-ioctls" [dependencies.kvm-bindings] version = ">=0.1.0" [dependencies.libc] version = ">=0.2.39" [dev-dependencies.byteorder] version = ">=1.2.1" kvm-ioctls-0.2.0/Cargo.toml.orig0000644000000016750000000000000121300ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "kvm-ioctls" version = "0.2.0" authors = ["Amazon Firecracker Team "] description = "Safe wrappers over KVM ioctls" readme = "README.md" keywords = ["kvm"] license = "Apache-2.0 OR MIT" repository = "https://github.com/rust-vmm/kvm-ioctls" [dependencies.kvm-bindings] version = ">=0.1.0" [dependencies.libc] version = ">=0.2.39" [dev-dependencies.byteorder] version = ">=1.2.1" kvm-ioctls-0.2.0/LICENSE-APACHE0100644€T¡ŽÔ€T 000000261361350315155600146060ustar0000000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. kvm-ioctls-0.2.0/LICENSE-MIT0100644€T¡ŽÔ€T 000000020001350315155600142760ustar0000000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. kvm-ioctls-0.2.0/README.md0100644€T¡ŽÔ€T 000000070421350324253600141330ustar0000000000000000[![Build Status](https://badge.buildkite.com/9e0e6c88972a3248a0908506d6946624da84e4e18c0870c4d0.svg)](https://buildkite.com/rust-vmm/kvm-ioctls-ci) ![crates.io](https://img.shields.io/crates/v/kvm-ioctls.svg) # kvm-ioctls The kvm-ioctls crate provides safe wrappers over the [KVM API](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt), a set of ioctls used for creating and configuring Virtual Machines (VMs) on Linux. The ioctls are accessible through four structures: - `Kvm` - wrappers over system ioctls - `VmFd` - wrappers over VM ioctls - `VcpuFd` - wrappers over vCPU ioctls - `DeviceFd` - wrappers over device ioctls For further details check the [KVM API](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt) as well as the code documentation. ## Supported Platforms The kvm-ioctls can be used on x86_64 and aarch64. Right now the aarch64 support is considered experimental. For a production ready version, please check the progress in the corresponding [GitHub issue](https://github.com/rust-vmm/kvm-ioctls/issues/8). ## Running the tests Our Continuous Integration (CI) pipeline is implemented on top of [Buildkite](https://buildkite.com/). For the complete list of tests, check our [CI pipeline](https://buildkite.com/rust-vmm/kvm-ioctls-ci). Each individual test runs in a container. To reproduce a test locally, you can use the dev-container on both x86 and arm64. ```bash docker run --device=/dev/kvm \ -it \ --security-opt seccomp=unconfined \ --volume $(pwd)/kvm-ioctls:/kvm-ioctls \ rustvmm/dev:v2 cd kvm-ioctls/ cargo test ``` ### Test Profiles The integration tests support two test profiles: - **devel**: this is the recommended profile for running the integration tests on a local development machine. - **ci** (default option): this is the profile used when running the integration tests as part of the the Continuous Integration (CI). The test profiles are applicable to tests that run using pytest. Currently only the [coverage test](tests/test_coverage.py) follows this model as all the other integration tests are run using the [Buildkite pipeline](https://buildkite.com/rust-vmm/kvm-ioctls-ci). The difference between is declaring tests as passed or failed: - with the **devel** profile the coverage test passes if the current coverage is equal or higher than the upstream coverage value. In case the current coverage is higher, the coverage file is updated to the new coverage value. - with the **ci** profile the coverage test passes only if the current coverage is equal to the upstream coverage value. Further details about the coverage test can be found in the [Adaptive Coverage](#adaptive-coverage) section. ### Adaptive Coverage The line coverage is saved in [tests/coverage](tests/coverage). To update the coverage before submitting a PR, run the coverage test: ```bash docker run --device=/dev/kvm \ -it \ --security-opt seccomp=unconfined \ --volume $(pwd)/kvm-ioctls:/kvm-ioctls \ rustvmm/dev:v2 cd kvm-ioctls/ pytest --profile=devel tests/test_coverage.py ``` If the PR coverage is higher than the upstream coverage, the coverage file needs to be manually added to the commit before submitting the PR: ```bash git add tests/coverage ``` Failing to do so will generate a fail on the CI pipeline when publishing the PR. **NOTE:** The coverage file is only updated in the `devel` test profile. In the `ci` profile the coverage test will fail if the current coverage is higher than the coverage reported in [tests/coverage](tests/coverage). kvm-ioctls-0.2.0/THIRD-PARTY0100644€T¡ŽÔ€T 000000030321350315155600143420ustar0000000000000000// Copyright 2017 The Chromium OS Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. kvm-ioctls-0.2.0/src/cap.rs0100644€T¡ŽÔ€T 000000126171350324253600145600ustar0000000000000000// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. use kvm_bindings::*; /// Capabilities exposed by KVM. /// /// The capabilities list can be used in conjunction with /// [Kvm::check_extension()](struct.Kvm.html#method.check_extension) to check if a particular /// capability is available. /// /// The list of capabilities is based on the the KVM_CAP_* defines from the /// [Linux KVM header](https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/kvm.h). /// #[derive(Clone, Copy, Debug)] #[repr(u32)] // We are allowing docs to be missing here because this enum is a wrapper // over auto-generated code. #[allow(missing_docs)] pub enum Cap { Irqchip = KVM_CAP_IRQCHIP, Hlt = KVM_CAP_HLT, MmuShadowCacheControl = KVM_CAP_MMU_SHADOW_CACHE_CONTROL, UserMemory = KVM_CAP_USER_MEMORY, SetTssAddr = KVM_CAP_SET_TSS_ADDR, Vapic = KVM_CAP_VAPIC, ExtCpuid = KVM_CAP_EXT_CPUID, Clocksource = KVM_CAP_CLOCKSOURCE, NrVcpus = KVM_CAP_NR_VCPUS, NrMemslots = KVM_CAP_NR_MEMSLOTS, Pit = KVM_CAP_PIT, NopIoDelay = KVM_CAP_NOP_IO_DELAY, PvMmu = KVM_CAP_PV_MMU, MpState = KVM_CAP_MP_STATE, CoalescedMmio = KVM_CAP_COALESCED_MMIO, SyncMmu = KVM_CAP_SYNC_MMU, Iommu = KVM_CAP_IOMMU, DestroyMemoryRegionWorks = KVM_CAP_DESTROY_MEMORY_REGION_WORKS, UserNmi = KVM_CAP_USER_NMI, SetGuestDebug = KVM_CAP_SET_GUEST_DEBUG, #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ReinjectControl = KVM_CAP_REINJECT_CONTROL, IrqRouting = KVM_CAP_IRQ_ROUTING, IrqInjectStatus = KVM_CAP_IRQ_INJECT_STATUS, AssignDevIrq = KVM_CAP_ASSIGN_DEV_IRQ, JoinMemoryRegionsWorks = KVM_CAP_JOIN_MEMORY_REGIONS_WORKS, #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] Mce = KVM_CAP_MCE, Irqfd = KVM_CAP_IRQFD, #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] Pit2 = KVM_CAP_PIT2, SetBootCpuId = KVM_CAP_SET_BOOT_CPU_ID, #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] PitState2 = KVM_CAP_PIT_STATE2, Ioeventfd = KVM_CAP_IOEVENTFD, SetIdentityMapAddr = KVM_CAP_SET_IDENTITY_MAP_ADDR, #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] XenHvm = KVM_CAP_XEN_HVM, AdjustClock = KVM_CAP_ADJUST_CLOCK, InternalErrorData = KVM_CAP_INTERNAL_ERROR_DATA, #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] VcpuEvents = KVM_CAP_VCPU_EVENTS, S390Psw = KVM_CAP_S390_PSW, PpcSegstate = KVM_CAP_PPC_SEGSTATE, Hyperv = KVM_CAP_HYPERV, HypervVapic = KVM_CAP_HYPERV_VAPIC, HypervSpin = KVM_CAP_HYPERV_SPIN, PciSegment = KVM_CAP_PCI_SEGMENT, PpcPairedSingles = KVM_CAP_PPC_PAIRED_SINGLES, IntrShadow = KVM_CAP_INTR_SHADOW, #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] Debugregs = KVM_CAP_DEBUGREGS, X86RobustSinglestep = KVM_CAP_X86_ROBUST_SINGLESTEP, PpcOsi = KVM_CAP_PPC_OSI, PpcUnsetIrq = KVM_CAP_PPC_UNSET_IRQ, EnableCap = KVM_CAP_ENABLE_CAP, #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] Xsave = KVM_CAP_XSAVE, #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] Xcrs = KVM_CAP_XCRS, PpcGetPvinfo = KVM_CAP_PPC_GET_PVINFO, PpcIrqLevel = KVM_CAP_PPC_IRQ_LEVEL, AsyncPf = KVM_CAP_ASYNC_PF, TscControl = KVM_CAP_TSC_CONTROL, GetTscKhz = KVM_CAP_GET_TSC_KHZ, PpcBookeSregs = KVM_CAP_PPC_BOOKE_SREGS, SpaprTce = KVM_CAP_SPAPR_TCE, PpcSmt = KVM_CAP_PPC_SMT, PpcRma = KVM_CAP_PPC_RMA, MaxVcpus = KVM_CAP_MAX_VCPUS, PpcHior = KVM_CAP_PPC_HIOR, PpcPapr = KVM_CAP_PPC_PAPR, SwTlb = KVM_CAP_SW_TLB, OneReg = KVM_CAP_ONE_REG, S390Gmap = KVM_CAP_S390_GMAP, TscDeadlineTimer = KVM_CAP_TSC_DEADLINE_TIMER, S390Ucontrol = KVM_CAP_S390_UCONTROL, SyncRegs = KVM_CAP_SYNC_REGS, Pci23 = KVM_CAP_PCI_2_3, KvmclockCtrl = KVM_CAP_KVMCLOCK_CTRL, SignalMsi = KVM_CAP_SIGNAL_MSI, PpcGetSmmuInfo = KVM_CAP_PPC_GET_SMMU_INFO, S390Cow = KVM_CAP_S390_COW, PpcAllocHtab = KVM_CAP_PPC_ALLOC_HTAB, ReadonlyMem = KVM_CAP_READONLY_MEM, IrqfdResample = KVM_CAP_IRQFD_RESAMPLE, PpcBookeWatchdog = KVM_CAP_PPC_BOOKE_WATCHDOG, PpcHtabFd = KVM_CAP_PPC_HTAB_FD, S390CssSupport = KVM_CAP_S390_CSS_SUPPORT, PpcEpr = KVM_CAP_PPC_EPR, ArmPsci = KVM_CAP_ARM_PSCI, ArmSetDeviceAddr = KVM_CAP_ARM_SET_DEVICE_ADDR, DeviceCtrl = KVM_CAP_DEVICE_CTRL, IrqMpic = KVM_CAP_IRQ_MPIC, PpcRtas = KVM_CAP_PPC_RTAS, IrqXics = KVM_CAP_IRQ_XICS, ArmEl132bit = KVM_CAP_ARM_EL1_32BIT, SpaprMultitce = KVM_CAP_SPAPR_MULTITCE, ExtEmulCpuid = KVM_CAP_EXT_EMUL_CPUID, HypervTime = KVM_CAP_HYPERV_TIME, IoapicPolarityIgnored = KVM_CAP_IOAPIC_POLARITY_IGNORED, EnableCapVm = KVM_CAP_ENABLE_CAP_VM, S390Irqchip = KVM_CAP_S390_IRQCHIP, IoeventfdNoLength = KVM_CAP_IOEVENTFD_NO_LENGTH, VmAttributes = KVM_CAP_VM_ATTRIBUTES, ArmPsci02 = KVM_CAP_ARM_PSCI_0_2, PpcFixupHcall = KVM_CAP_PPC_FIXUP_HCALL, PpcEnableHcall = KVM_CAP_PPC_ENABLE_HCALL, CheckExtensionVm = KVM_CAP_CHECK_EXTENSION_VM, S390UserSigp = KVM_CAP_S390_USER_SIGP, #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] SplitIrqchip = KVM_CAP_SPLIT_IRQCHIP, ImmediateExit = KVM_CAP_IMMEDIATE_EXIT, } kvm-ioctls-0.2.0/src/ioctls/device.rs0100644€T¡ŽÔ€T 000000063631350315155600165530ustar0000000000000000// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT use std::fs::File; use std::io; use std::os::unix::io::{AsRawFd, RawFd}; use kvm_bindings::kvm_device_attr; use ioctls::Result; use kvm_ioctls::KVM_SET_DEVICE_ATTR; use sys_ioctl::ioctl_with_ref; /// Wrapper over the file descriptor obtained when creating an emulated device in the kernel. pub struct DeviceFd { fd: File, } impl DeviceFd { /// Sets a specified piece of device configuration and/or state. /// /// See the documentation for `KVM_SET_DEVICE_ATTR`. /// # Arguments /// /// * `device_attr` - The device attribute to be set. /// pub fn set_device_attr(&self, device_attr: &kvm_device_attr) -> Result<()> { let ret = unsafe { ioctl_with_ref(self, KVM_SET_DEVICE_ATTR(), device_attr) }; if ret != 0 { return Err(io::Error::last_os_error()); } Ok(()) } } /// Helper function for creating a new device. pub fn new_device(dev_fd: File) -> DeviceFd { DeviceFd { fd: dev_fd } } impl AsRawFd for DeviceFd { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } } #[cfg(test)] mod tests { use super::*; use ioctls::system::Kvm; use kvm_bindings::{ kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V3, kvm_device_type_KVM_DEV_TYPE_VFIO, KVM_CREATE_DEVICE_TEST, KVM_DEV_VFIO_GROUP, KVM_DEV_VFIO_GROUP_ADD, }; #[test] fn test_create_device() { #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] use kvm_bindings::kvm_device_type_KVM_DEV_TYPE_FSL_MPIC_20; let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let mut gic_device = kvm_bindings::kvm_create_device { #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] type_: kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V3, #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] type_: kvm_device_type_KVM_DEV_TYPE_FSL_MPIC_20, fd: 0, flags: KVM_CREATE_DEVICE_TEST, }; // This fails on x86_64 because there is no VGIC there. // This fails on aarch64 as it does not use MPIC (MultiProcessor Interrupt Controller), it uses // the VGIC. assert!(vm.create_device(&mut gic_device).is_err()); if cfg!(any(target_arch = "x86", target_arch = "x86_64")) { gic_device.type_ = kvm_device_type_KVM_DEV_TYPE_VFIO; } else if cfg!(any(target_arch = "arm", target_arch = "aarch64")) { gic_device.type_ = kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V3; } let device_fd = vm .create_device(&mut gic_device) .expect("Cannot create KVM device"); let dist_attr = kvm_bindings::kvm_device_attr { group: KVM_DEV_VFIO_GROUP, attr: KVM_DEV_VFIO_GROUP_ADD as u64, addr: 0x0, flags: 0, }; // We are just creating a test device. Creating a real device would make the CI dependent // on host configuration (like having /dev/vfio). We expect this to fail. assert!(device_fd.set_device_attr(&dist_attr).is_err()); assert_eq!(io::Error::last_os_error().raw_os_error().unwrap(), 25); } } kvm-ioctls-0.2.0/src/ioctls/mod.rs0100644€T¡ŽÔ€T 000000246301350416426200160670ustar0000000000000000// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. use std::io; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use std::mem::size_of; use std::os::unix::io::AsRawFd; use std::ptr::null_mut; use std::result; use kvm_bindings::kvm_run; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use kvm_bindings::{kvm_cpuid2, kvm_cpuid_entry2}; /// Wrappers over KVM device ioctls. pub mod device; /// Wrappers over KVM system ioctls. pub mod system; /// Wrappers over KVM VCPU ioctls. pub mod vcpu; /// Wrappers over KVM Virtual Machine ioctls. pub mod vm; /// A specialized `Result` type for KVM ioctls. /// /// This typedef is generally used to avoid writing out io::Error directly and /// is otherwise a direct mapping to Result. pub type Result = result::Result; // Returns a `Vec` with a size in bytes at least as large as `size_in_bytes`. #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn vec_with_size_in_bytes(size_in_bytes: usize) -> Vec { let rounded_size = (size_in_bytes + size_of::() - 1) / size_of::(); let mut v = Vec::with_capacity(rounded_size); for _ in 0..rounded_size { v.push(T::default()) } v } // The kvm API has many structs that resemble the following `Foo` structure: // // ``` // #[repr(C)] // struct Foo { // some_data: u32 // entries: __IncompleteArrayField<__u32>, // } // ``` // // In order to allocate such a structure, `size_of::()` would be too small because it would not // include any space for `entries`. To make the allocation large enough while still being aligned // for `Foo`, a `Vec` is created. Only the first element of `Vec` would actually be used // as a `Foo`. The remaining memory in the `Vec` is for `entries`, which must be contiguous // with `Foo`. This function is used to make the `Vec` with enough space for `count` entries. #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn vec_with_array_field(count: usize) -> Vec { let element_space = count * size_of::(); let vec_size_bytes = size_of::() + element_space; vec_with_size_in_bytes(vec_size_bytes) } /// Wrapper over the `kvm_cpuid2` structure. /// /// The structure has a zero length array at the end, hidden behind bounds check. #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub struct CpuId { // Wrapper over `kvm_cpuid2` from which we only use the first element. kvm_cpuid: Vec, // Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2. allocated_len: usize, } #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] impl Clone for CpuId { fn clone(&self) -> Self { let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len()); for _ in 0..self.kvm_cpuid.len() { kvm_cpuid.push(kvm_cpuid2::default()); } let num_bytes = self.kvm_cpuid.len() * size_of::(); let src_byte_slice = unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) }; let dst_byte_slice = unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) }; dst_byte_slice.copy_from_slice(src_byte_slice); CpuId { kvm_cpuid, allocated_len: self.allocated_len, } } } #[cfg(test)] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] impl PartialEq for CpuId { fn eq(&self, other: &CpuId) -> bool { let entries: &[kvm_cpuid_entry2] = unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) }; let other_entries: &[kvm_cpuid_entry2] = unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) }; self.allocated_len == other.allocated_len && entries == other_entries } } #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] impl CpuId { /// Creates a new `CpuId` structure that contains at most `array_len` KVM CPUID entries. /// /// # Arguments /// /// * `array_len` - Maximum number of CPUID entries. /// /// # Example /// /// ``` /// use kvm_ioctls::CpuId; /// let cpu_id = CpuId::new(32); /// ``` pub fn new(array_len: usize) -> CpuId { let mut kvm_cpuid = vec_with_array_field::(array_len); kvm_cpuid[0].nent = array_len as u32; CpuId { kvm_cpuid, allocated_len: array_len, } } /// Creates a new `CpuId` structure based on a supplied vector of `kvm_cpuid_entry2`. /// /// # Arguments /// /// * `entries` - The vector of `kvm_cpuid_entry2` entries. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// extern crate kvm_bindings; /// /// use kvm_bindings::kvm_cpuid_entry2; /// use kvm_ioctls::CpuId; /// // Create a Cpuid to hold one entry. /// let mut cpuid = CpuId::new(1); /// let mut entries = cpuid.mut_entries_slice().to_vec(); /// let new_entry = kvm_cpuid_entry2 { /// function: 0x4, /// index: 0, /// flags: 1, /// eax: 0b1100000, /// ebx: 0, /// ecx: 0, /// edx: 0, /// padding: [0, 0, 0], /// }; /// entries.insert(0, new_entry); /// cpuid = CpuId::from_entries(&entries); /// ``` /// pub fn from_entries(entries: &[kvm_cpuid_entry2]) -> CpuId { let mut kvm_cpuid = vec_with_array_field::(entries.len()); kvm_cpuid[0].nent = entries.len() as u32; unsafe { kvm_cpuid[0] .entries .as_mut_slice(entries.len()) .copy_from_slice(entries); } CpuId { kvm_cpuid, allocated_len: entries.len(), } } /// Returns the mutable entries slice so they can be modified before passing to the VCPU. /// /// # Example /// ```rust /// use kvm_ioctls::{CpuId, Kvm, MAX_KVM_CPUID_ENTRIES}; /// let kvm = Kvm::new().unwrap(); /// let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); /// let cpuid_entries = cpuid.mut_entries_slice(); /// ``` /// pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] { // Mapping the unsized array to a slice is unsafe because the length isn't known. Using // the length we originally allocated with eliminates the possibility of overflow. if self.kvm_cpuid[0].nent as usize > self.allocated_len { self.kvm_cpuid[0].nent = self.allocated_len as u32; } let nent = self.kvm_cpuid[0].nent as usize; unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) } } /// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe. /// pub fn as_ptr(&self) -> *const kvm_cpuid2 { &self.kvm_cpuid[0] } /// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe. /// pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 { &mut self.kvm_cpuid[0] } } /// Safe wrapper over the `kvm_run` struct. /// /// The wrapper is needed for sending the pointer to `kvm_run` between /// threads as raw pointers do not implement `Send` and `Sync`. pub struct KvmRunWrapper { kvm_run_ptr: *mut u8, // This field is need so we can `munmap` the memory mapped to hold `kvm_run`. mmap_size: usize, } // Send and Sync aren't automatically inherited for the raw address pointer. // Accessing that pointer is only done through the stateless interface which // allows the object to be shared by multiple threads without a decrease in // safety. unsafe impl Send for KvmRunWrapper {} unsafe impl Sync for KvmRunWrapper {} impl KvmRunWrapper { /// Maps the first `size` bytes of the given `fd`. /// /// # Arguments /// * `fd` - File descriptor to mmap from. /// * `size` - Size of memory region in bytes. pub fn mmap_from_fd(fd: &AsRawFd, size: usize) -> Result { // This is safe because we are creating a mapping in a place not already used by any other // area in this process. let addr = unsafe { libc::mmap( null_mut(), size, libc::PROT_READ | libc::PROT_WRITE, libc::MAP_SHARED, fd.as_raw_fd(), 0, ) }; if addr == libc::MAP_FAILED { return Err(io::Error::last_os_error()); } Ok(KvmRunWrapper { kvm_run_ptr: addr as *mut u8, mmap_size: size, }) } /// Returns a mutable reference to `kvm_run`. /// #[allow(clippy::mut_from_ref)] pub fn as_mut_ref(&self) -> &mut kvm_run { // Safe because we know we mapped enough memory to hold the kvm_run struct because the // kernel told us how large it was. #[allow(clippy::cast_ptr_alignment)] unsafe { &mut *(self.kvm_run_ptr as *mut kvm_run) } } } impl Drop for KvmRunWrapper { fn drop(&mut self) { // This is safe because we mmap the area at kvm_run_ptr ourselves, // and nobody else is holding a reference to it. unsafe { libc::munmap(self.kvm_run_ptr as *mut libc::c_void, self.mmap_size); } } } #[cfg(test)] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] mod tests { use super::*; #[test] fn test_cpuid_from_entries() { let num_entries = 4; let mut cpuid = CpuId::new(num_entries); // add entry let mut entries = cpuid.mut_entries_slice().to_vec(); let new_entry = kvm_cpuid_entry2 { function: 0x4, index: 0, flags: 1, eax: 0b1100000, ebx: 0, ecx: 0, edx: 0, padding: [0, 0, 0], }; entries.insert(0, new_entry); cpuid = CpuId::from_entries(&entries); // check that the cpuid contains the new entry assert_eq!(cpuid.allocated_len, num_entries + 1); assert_eq!(cpuid.kvm_cpuid[0].nent, (num_entries + 1) as u32); assert_eq!(cpuid.mut_entries_slice().len(), num_entries + 1); assert_eq!(cpuid.mut_entries_slice()[0], new_entry); } } kvm-ioctls-0.2.0/src/ioctls/system.rs0100644€T¡ŽÔ€T 000000373161350315155600166420ustar0000000000000000// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use kvm_bindings::kvm_msr_list; use libc::{open, O_CLOEXEC, O_RDWR}; use std::fs::File; use std::io; use std::os::raw::{c_char, c_ulong}; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use cap::Cap; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use ioctls::vec_with_array_field; use ioctls::vm::{new_vmfd, VmFd}; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use ioctls::CpuId; use ioctls::Result; use kvm_ioctls::*; use sys_ioctl::*; /// Wrapper over KVM system ioctls. pub struct Kvm { kvm: File, } impl Kvm { /// Opens `/dev/kvm` and returns a `Kvm` object on success. /// /// # Example /// /// ``` /// use kvm_ioctls::Kvm; /// let kvm = Kvm::new().unwrap(); /// ``` /// #[allow(clippy::new_ret_no_self)] pub fn new() -> Result { // Open `/dev/kvm` using `O_CLOEXEC` flag. let fd = Self::open_with_cloexec(true)?; // Safe because we verify that ret is valid and we own the fd. Ok(unsafe { Self::new_with_fd_number(fd) }) } /// Creates a new Kvm object assuming `fd` represents an existing open file descriptor /// associated with `/dev/kvm`. /// /// For usage examples check [open_with_cloexec()](struct.Kvm.html#method.open_with_cloexec). /// /// # Arguments /// /// * `fd` - File descriptor for `/dev/kvm`. /// pub unsafe fn new_with_fd_number(fd: RawFd) -> Self { Kvm { kvm: File::from_raw_fd(fd), } } /// Opens `/dev/kvm` and returns the fd number on success. /// /// One usecase for this method is opening `/dev/kvm` before exec-ing into a /// process with seccomp filters enabled that blacklist the `sys_open` syscall. /// For this usecase `open_with_cloexec` must be called with the `close_on_exec` /// parameter set to false. /// /// # Arguments /// /// * `close_on_exec`: If true opens `/dev/kvm` using the `O_CLOEXEC` flag. /// /// # Example /// /// ``` /// # use kvm_ioctls::Kvm; /// let kvm_fd = Kvm::open_with_cloexec(false).unwrap(); /// // The `kvm_fd` can now be passed to another process where we can use /// // `new_with_fd_number` for creating a `Kvm` object: /// let kvm = unsafe { Kvm::new_with_fd_number(kvm_fd) }; /// ``` /// pub fn open_with_cloexec(close_on_exec: bool) -> Result { let open_flags = O_RDWR | if close_on_exec { O_CLOEXEC } else { 0 }; // Safe because we give a constant nul-terminated string and verify the result. let ret = unsafe { open("/dev/kvm\0".as_ptr() as *const c_char, open_flags) }; if ret < 0 { Err(io::Error::last_os_error()) } else { Ok(ret) } } /// Returns the KVM API version. /// /// See the documentation for `KVM_GET_API_VERSION`. /// /// # Example /// /// ``` /// # use kvm_ioctls::Kvm; /// let kvm = Kvm::new().unwrap(); /// assert_eq!(kvm.get_api_version(), 12); /// ``` /// pub fn get_api_version(&self) -> i32 { // Safe because we know that our file is a KVM fd and that the request is one of the ones // defined by kernel. unsafe { ioctl(self, KVM_GET_API_VERSION()) } } /// Wrapper over `KVM_CHECK_EXTENSION`. /// /// Returns 0 if the capability is not available and a positive integer otherwise. fn check_extension_int(&self, c: Cap) -> i32 { // Safe because we know that our file is a KVM fd and that the extension is one of the ones // defined by kernel. unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), c as c_ulong) } } /// Checks if a particular `Cap` is available. /// /// Returns true if the capability is supported and false otherwise. /// See the documentation for `KVM_CHECK_EXTENSION`. /// /// # Arguments /// /// * `c` - KVM capability to check. /// /// # Example /// /// ``` /// # use kvm_ioctls::Kvm; /// use kvm_ioctls::Cap; /// /// let kvm = Kvm::new().unwrap(); /// // Check if `KVM_CAP_USER_MEMORY` is supported. /// assert!(kvm.check_extension(Cap::UserMemory)); /// ``` /// pub fn check_extension(&self, c: Cap) -> bool { self.check_extension_int(c) > 0 } /// Returns the size of the memory mapping required to use the vcpu's `kvm_run` structure. /// /// See the documentation for `KVM_GET_VCPU_MMAP_SIZE`. /// /// # Example /// /// ``` /// # use kvm_ioctls::Kvm; /// let kvm = Kvm::new().unwrap(); /// assert!(kvm.get_vcpu_mmap_size().unwrap() > 0); /// ``` /// pub fn get_vcpu_mmap_size(&self) -> Result { // Safe because we know that our file is a KVM fd and we verify the return result. let res = unsafe { ioctl(self, KVM_GET_VCPU_MMAP_SIZE()) }; if res > 0 { Ok(res as usize) } else { Err(io::Error::last_os_error()) } } /// Gets the recommended number of VCPUs per VM. /// /// See the documentation for `KVM_CAP_NR_VCPUS`. /// Default to 4 when `KVM_CAP_NR_VCPUS` is not implemented. /// /// # Example /// /// ``` /// # use kvm_ioctls::Kvm; /// let kvm = Kvm::new().unwrap(); /// // We expect the number of vCPUs to be > 0 as per KVM API documentation. /// assert!(kvm.get_nr_vcpus() > 0); /// ``` /// pub fn get_nr_vcpus(&self) -> usize { let x = self.check_extension_int(Cap::NrVcpus); if x > 0 { x as usize } else { 4 } } /// Returns the maximum allowed memory slots per VM. /// /// KVM reports the number of available memory slots (`KVM_CAP_NR_MEMSLOTS`) /// using the extension interface. Both x86 and s390 implement this, ARM /// and powerpc do not yet enable it. /// Default to 32 when `KVM_CAP_NR_MEMSLOTS` is not implemented. /// /// # Example /// /// ``` /// # use kvm_ioctls::Kvm; /// let kvm = Kvm::new().unwrap(); /// assert!(kvm.get_nr_memslots() > 0); /// ``` /// pub fn get_nr_memslots(&self) -> usize { let x = self.check_extension_int(Cap::NrMemslots); if x > 0 { x as usize } else { 32 } } /// Gets the recommended maximum number of VCPUs per VM. /// /// See the documentation for `KVM_CAP_MAX_VCPUS`. /// Returns [get_nr_vcpus()](struct.Kvm.html#method.get_nr_vcpus) when /// `KVM_CAP_MAX_VCPUS` is not implemented. /// /// # Example /// /// ``` /// # use kvm_ioctls::Kvm; /// let kvm = Kvm::new().unwrap(); /// assert!(kvm.get_max_vcpus() > 0); /// ``` /// pub fn get_max_vcpus(&self) -> usize { match self.check_extension_int(Cap::MaxVcpus) { 0 => self.get_nr_vcpus(), x => x as usize, } } #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn get_cpuid(&self, kind: u64, max_entries_count: usize) -> Result { let mut cpuid = CpuId::new(max_entries_count); let ret = unsafe { // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory // allocated for the struct. The limit is read from nent, which is set to the allocated // size(max_entries_count) above. ioctl_with_mut_ptr(self, kind, cpuid.as_mut_ptr()) }; if ret < 0 { return Err(io::Error::last_os_error()); } Ok(cpuid) } /// X86 specific call to get the system emulated CPUID values. /// /// See the documentation for `KVM_GET_EMULATED_CPUID`. /// /// # Arguments /// /// * `max_entries_count` - Maximum number of CPUID entries. This function can return less than /// this when the hardware does not support so many CPUID entries. /// /// # Example /// /// ``` /// use kvm_ioctls::{Kvm, MAX_KVM_CPUID_ENTRIES}; /// /// let kvm = Kvm::new().unwrap(); /// let mut cpuid = kvm.get_emulated_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); /// let cpuid_entries = cpuid.mut_entries_slice(); /// assert!(cpuid_entries.len() <= MAX_KVM_CPUID_ENTRIES); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn get_emulated_cpuid(&self, max_entries_count: usize) -> Result { self.get_cpuid(KVM_GET_EMULATED_CPUID(), max_entries_count) } /// X86 specific call to get the system supported CPUID values. /// /// See the documentation for `KVM_GET_SUPPORTED_CPUID`. /// /// # Arguments /// /// * `max_entries_count` - Maximum number of CPUID entries. This function can return less than /// this when the hardware does not support so many CPUID entries. /// /// # Example /// /// ``` /// use kvm_ioctls::{Kvm, MAX_KVM_CPUID_ENTRIES}; /// /// let kvm = Kvm::new().unwrap(); /// let mut cpuid = kvm.get_emulated_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); /// let cpuid_entries = cpuid.mut_entries_slice(); /// assert!(cpuid_entries.len() <= MAX_KVM_CPUID_ENTRIES); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn get_supported_cpuid(&self, max_entries_count: usize) -> Result { self.get_cpuid(KVM_GET_SUPPORTED_CPUID(), max_entries_count) } /// X86 specific call to get list of supported MSRS /// /// See the documentation for `KVM_GET_MSR_INDEX_LIST`. /// /// # Example /// /// ``` /// use kvm_ioctls::{Kvm, MAX_KVM_CPUID_ENTRIES}; /// /// let kvm = Kvm::new().unwrap(); /// let msr_index_list = kvm.get_msr_index_list().unwrap(); /// ``` #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn get_msr_index_list(&self) -> Result> { const MAX_KVM_MSR_ENTRIES: usize = 256; let mut msr_list = vec_with_array_field::(MAX_KVM_MSR_ENTRIES); msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32; let ret = unsafe { // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory // allocated for the struct. The limit is read from nmsrs, which is set to the allocated // size (MAX_KVM_MSR_ENTRIES) above. ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST(), &mut msr_list[0]) }; if ret < 0 { return Err(io::Error::last_os_error()); } let mut nmsrs = msr_list[0].nmsrs; // Mapping the unsized array to a slice is unsafe because the length isn't known. Using // the length we originally allocated with eliminates the possibility of overflow. let indices: &[u32] = unsafe { if nmsrs > MAX_KVM_MSR_ENTRIES as u32 { nmsrs = MAX_KVM_MSR_ENTRIES as u32; } msr_list[0].indices.as_slice(nmsrs as usize) }; Ok(indices.to_vec()) } /// Creates a VM fd using the KVM fd. /// /// See the documentation for `KVM_CREATE_VM`. /// A call to this function will also initialize the size of the vcpu mmap area using the /// `KVM_GET_VCPU_MMAP_SIZE` ioctl. /// /// # Example /// /// ``` /// # use kvm_ioctls::Kvm; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// // Check that the VM mmap size is the same reported by `KVM_GET_VCPU_MMAP_SIZE`. /// assert!(vm.run_size() == kvm.get_vcpu_mmap_size().unwrap()); /// ``` /// pub fn create_vm(&self) -> Result { // Safe because we know `self.kvm` is a real KVM fd as this module is the only one that // create Kvm objects. let ret = unsafe { ioctl(&self.kvm, KVM_CREATE_VM()) }; if ret >= 0 { // Safe because we verify the value of ret and we are the owners of the fd. let vm_file = unsafe { File::from_raw_fd(ret) }; let run_mmap_size = self.get_vcpu_mmap_size()?; Ok(new_vmfd(vm_file, run_mmap_size)) } else { Err(io::Error::last_os_error()) } } } impl AsRawFd for Kvm { fn as_raw_fd(&self) -> RawFd { self.kvm.as_raw_fd() } } #[cfg(test)] mod tests { use super::*; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use MAX_KVM_CPUID_ENTRIES; #[test] fn test_kvm_new() { Kvm::new().unwrap(); } #[test] fn test_kvm_api_version() { let kvm = Kvm::new().unwrap(); assert_eq!(kvm.get_api_version(), 12); assert!(kvm.check_extension(Cap::UserMemory)); } #[test] fn test_kvm_getters() { let kvm = Kvm::new().unwrap(); // vCPU related getters let nr_vcpus = kvm.get_nr_vcpus(); assert!(nr_vcpus >= 4); assert!(kvm.get_max_vcpus() >= nr_vcpus); // Memory related getters assert!(kvm.get_vcpu_mmap_size().unwrap() > 0); assert!(kvm.get_nr_memslots() >= 32); } #[test] fn test_create_vm() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); assert_eq!(vm.run_size(), kvm.get_vcpu_mmap_size().unwrap()); } #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] #[test] fn test_get_supported_cpuid() { let kvm = Kvm::new().unwrap(); let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); let cpuid_entries = cpuid.mut_entries_slice(); assert!(cpuid_entries.len() > 0); assert!(cpuid_entries.len() <= MAX_KVM_CPUID_ENTRIES); } #[test] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn test_get_emulated_cpuid() { let kvm = Kvm::new().unwrap(); let mut cpuid = kvm.get_emulated_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); let cpuid_entries = cpuid.mut_entries_slice(); assert!(cpuid_entries.len() > 0); assert!(cpuid_entries.len() <= MAX_KVM_CPUID_ENTRIES); } #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] #[test] fn test_cpuid_clone() { let kvm = Kvm::new().unwrap(); let cpuid_1 = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); let mut cpuid_2 = cpuid_1.clone(); assert!(cpuid_1 == cpuid_2); cpuid_2 = unsafe { std::mem::zeroed() }; assert!(cpuid_1 != cpuid_2); } #[test] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn get_msr_index_list() { let kvm = Kvm::new().unwrap(); let msr_list = kvm.get_msr_index_list().unwrap(); assert!(msr_list.len() >= 2); } fn get_raw_errno(result: super::Result) -> i32 { result.err().unwrap().raw_os_error().unwrap() } #[test] fn test_bad_kvm_fd() { let badf_errno = libc::EBADF; let faulty_kvm = Kvm { kvm: unsafe { File::from_raw_fd(-1) }, }; assert_eq!(get_raw_errno(faulty_kvm.get_vcpu_mmap_size()), badf_errno); assert_eq!(faulty_kvm.get_nr_vcpus(), 4); assert_eq!(faulty_kvm.get_nr_memslots(), 32); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { assert_eq!(get_raw_errno(faulty_kvm.get_emulated_cpuid(4)), badf_errno); assert_eq!(get_raw_errno(faulty_kvm.get_supported_cpuid(4)), badf_errno); assert_eq!(get_raw_errno(faulty_kvm.get_msr_index_list()), badf_errno); } assert_eq!(get_raw_errno(faulty_kvm.create_vm()), badf_errno); } } kvm-ioctls-0.2.0/src/ioctls/vcpu.rs0100644€T¡ŽÔ€T 000001232151350416426200162640ustar0000000000000000// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. use kvm_bindings::*; use libc::EINVAL; use std::fs::File; use std::io; use std::os::unix::io::{AsRawFd, RawFd}; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use ioctls::CpuId; use ioctls::{KvmRunWrapper, Result}; use kvm_ioctls::*; use sys_ioctl::*; /// Reasons for vCPU exits. /// /// The exit reasons are mapped to the `KVM_EXIT_*` defines in the /// [Linux KVM header](https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/kvm.h). /// #[derive(Debug)] pub enum VcpuExit<'a> { /// An out port instruction was run on the given port with the given data. IoOut(u16 /* port */, &'a [u8] /* data */), /// An in port instruction was run on the given port. /// /// The given slice should be filled in before [run()](struct.VcpuFd.html#method.run) /// is called again. IoIn(u16 /* port */, &'a mut [u8] /* data */), /// A read instruction was run against the given MMIO address. /// /// The given slice should be filled in before [run()](struct.VcpuFd.html#method.run) /// is called again. MmioRead(u64 /* address */, &'a mut [u8]), /// A write instruction was run against the given MMIO address with the given data. MmioWrite(u64 /* address */, &'a [u8]), /// Corresponds to KVM_EXIT_UNKNOWN. Unknown, /// Corresponds to KVM_EXIT_EXCEPTION. Exception, /// Corresponds to KVM_EXIT_HYPERCALL. Hypercall, /// Corresponds to KVM_EXIT_DEBUG. Debug, /// Corresponds to KVM_EXIT_HLT. Hlt, /// Corresponds to KVM_EXIT_IRQ_WINDOW_OPEN. IrqWindowOpen, /// Corresponds to KVM_EXIT_SHUTDOWN. Shutdown, /// Corresponds to KVM_EXIT_FAIL_ENTRY. FailEntry, /// Corresponds to KVM_EXIT_INTR. Intr, /// Corresponds to KVM_EXIT_SET_TPR. SetTpr, /// Corresponds to KVM_EXIT_TPR_ACCESS. TprAccess, /// Corresponds to KVM_EXIT_S390_SIEIC. S390Sieic, /// Corresponds to KVM_EXIT_S390_RESET. S390Reset, /// Corresponds to KVM_EXIT_DCR. Dcr, /// Corresponds to KVM_EXIT_NMI. Nmi, /// Corresponds to KVM_EXIT_INTERNAL_ERROR. InternalError, /// Corresponds to KVM_EXIT_OSI. Osi, /// Corresponds to KVM_EXIT_PAPR_HCALL. PaprHcall, /// Corresponds to KVM_EXIT_S390_UCONTROL. S390Ucontrol, /// Corresponds to KVM_EXIT_WATCHDOG. Watchdog, /// Corresponds to KVM_EXIT_S390_TSCH. S390Tsch, /// Corresponds to KVM_EXIT_EPR. Epr, /// Corresponds to KVM_EXIT_SYSTEM_EVENT. SystemEvent, /// Corresponds to KVM_EXIT_S390_STSI. S390Stsi, /// Corresponds to KVM_EXIT_IOAPIC_EOI. IoapicEoi(u8 /* vector */), /// Corresponds to KVM_EXIT_HYPERV. Hyperv, } /// Wrapper over KVM vCPU ioctls. pub struct VcpuFd { vcpu: File, kvm_run_ptr: KvmRunWrapper, } impl VcpuFd { /// Returns the vCPU general purpose registers. /// /// The registers are returned in a `kvm_regs` structure as defined in the /// [KVM API documentation](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// See documentation for `KVM_GET_REGS`. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] /// let regs = vcpu.get_regs().unwrap(); /// ``` /// #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] pub fn get_regs(&self) -> Result { // Safe because we know that our file is a vCPU fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let mut regs = unsafe { std::mem::zeroed() }; let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS(), &mut regs) }; if ret != 0 { return Err(io::Error::last_os_error()); } Ok(regs) } /// Sets the vCPU general purpose registers using the `KVM_SET_REGS` ioctl. /// /// # Arguments /// /// * `regs` - general purpose registers. For details check the `kvm_regs` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// /// #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] { /// // Get the current vCPU registers. /// let mut regs = vcpu.get_regs().unwrap(); /// // Set a new value for the Instruction Pointer. /// regs.rip = 0x100; /// vcpu.set_regs(®s).unwrap(); /// } /// ``` /// #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] pub fn set_regs(&self, regs: &kvm_regs) -> Result<()> { // Safe because we know that our file is a vCPU fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_REGS(), regs) }; if ret != 0 { return Err(io::Error::last_os_error()); } Ok(()) } /// Returns the vCPU special registers. /// /// The registers are returned in a `kvm_sregs` structure as defined in the /// [KVM API documentation](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// See documentation for `KVM_GET_SREGS`. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] /// let sregs = vcpu.get_sregs().unwrap(); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn get_sregs(&self) -> Result { // Safe because we know that our file is a vCPU fd, we know the kernel will only write the // correct amount of memory to our pointer, and we verify the return result. let mut regs = kvm_sregs::default(); let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) }; if ret != 0 { return Err(io::Error::last_os_error()); } Ok(regs) } /// Sets the vCPU special registers using the `KVM_SET_SREGS` ioctl. /// /// # Arguments /// /// * `sregs` - Special registers. For details check the `kvm_sregs` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] { /// let mut sregs = vcpu.get_sregs().unwrap(); /// // Update the code segment (cs). /// sregs.cs.base = 0; /// sregs.cs.selector = 0; /// vcpu.set_sregs(&sregs).unwrap(); /// } /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn set_sregs(&self, sregs: &kvm_sregs) -> Result<()> { // Safe because we know that our file is a vCPU fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS(), sregs) }; if ret != 0 { return Err(io::Error::last_os_error()); } Ok(()) } /// Returns the floating point state (FPU) from the vCPU. /// /// The state is returned in a `kvm_fpu` structure as defined in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// See the documentation for `KVM_GET_FPU`. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] /// let fpu = vcpu.get_fpu().unwrap(); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn get_fpu(&self) -> Result { let mut fpu = kvm_fpu::default(); let ret = unsafe { // Here we trust the kernel not to read past the end of the kvm_fpu struct. ioctl_with_mut_ref(self, KVM_GET_FPU(), &mut fpu) }; if ret != 0 { return Err(io::Error::last_os_error()); } Ok(fpu) } /// Set the floating point state (FPU) of a vCPU using the `KVM_SET_FPU` ioct. /// /// # Arguments /// /// * `fpu` - FPU configuration. For details check the `kvm_fpu` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # extern crate kvm_bindings; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// # use kvm_bindings::kvm_fpu; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] { /// let KVM_FPU_CWD: u16 = 0x37f; /// let fpu = kvm_fpu { /// fcw: KVM_FPU_CWD, /// ..Default::default() /// }; /// vcpu.set_fpu(&fpu).unwrap(); /// } /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn set_fpu(&self, fpu: &kvm_fpu) -> Result<()> { let ret = unsafe { // Here we trust the kernel not to read past the end of the kvm_fpu struct. ioctl_with_ref(self, KVM_SET_FPU(), fpu) }; if ret < 0 { return Err(io::Error::last_os_error()); } Ok(()) } /// X86 specific call to setup the CPUID registers. /// /// See the documentation for `KVM_SET_CPUID2`. /// /// # Arguments /// /// * `cpuid` - CPUID registers. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # extern crate kvm_bindings; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd, MAX_KVM_CPUID_ENTRIES}; /// # use kvm_bindings::kvm_fpu; /// let kvm = Kvm::new().unwrap(); /// let mut kvm_cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// /// // Update the CPUID entries to disable the EPB feature. /// const ECX_EPB_SHIFT: u32 = 3; /// { /// let entries = kvm_cpuid.mut_entries_slice(); /// for entry in entries.iter_mut() { /// match entry.function { /// 6 => entry.ecx &= !(1 << ECX_EPB_SHIFT), /// _ => (), /// } /// } /// } /// /// vcpu.set_cpuid2(&kvm_cpuid); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn set_cpuid2(&self, cpuid: &CpuId) -> Result<()> { let ret = unsafe { // Here we trust the kernel not to read past the end of the kvm_cpuid2 struct. ioctl_with_ptr(self, KVM_SET_CPUID2(), cpuid.as_ptr()) }; if ret < 0 { return Err(io::Error::last_os_error()); } Ok(()) } /// Returns the state of the LAPIC (Local Advanced Programmable Interrupt Controller). /// /// The state is returned in a `kvm_lapic_state` structure as defined in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// See the documentation for `KVM_GET_LAPIC`. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// // For `get_lapic` to work, you first need to create a IRQ chip before creating the vCPU. /// vm.create_irq_chip().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// let lapic = vcpu.get_lapic().unwrap(); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn get_lapic(&self) -> Result { let mut klapic = kvm_lapic_state::default(); let ret = unsafe { // The ioctl is unsafe unless you trust the kernel not to write past the end of the // local_apic struct. ioctl_with_mut_ref(self, KVM_GET_LAPIC(), &mut klapic) }; if ret < 0 { return Err(io::Error::last_os_error()); } Ok(klapic) } /// Sets the state of the LAPIC (Local Advanced Programmable Interrupt Controller). /// /// See the documentation for `KVM_SET_LAPIC`. /// /// # Arguments /// /// * `klapic` - LAPIC state. For details check the `kvm_lapic_state` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// use std::io::Write; /// /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// // For `get_lapic` to work, you first need to create a IRQ chip before creating the vCPU. /// vm.create_irq_chip().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// let mut lapic = vcpu.get_lapic().unwrap(); /// /// // Write to APIC_ICR offset the value 2. /// let apic_icr_offset = 0x300; /// let write_value: &[u8] = &[2, 0, 0, 0]; /// let mut apic_icr_slice = /// unsafe { &mut *(&mut lapic.regs[apic_icr_offset..] as *mut [i8] as *mut [u8]) }; /// apic_icr_slice.write(write_value).unwrap(); /// /// // Update the value of LAPIC. ///vcpu.set_lapic(&lapic).unwrap(); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> { let ret = unsafe { // The ioctl is safe because the kernel will only read from the klapic struct. ioctl_with_ref(self, KVM_SET_LAPIC(), klapic) }; if ret < 0 { return Err(io::Error::last_os_error()); } Ok(()) } /// Returns the model-specific registers (MSR) for this vCPU. /// /// It emulates `KVM_GET_MSRS` ioctl's behavior by returning the number of MSRs /// successfully read upon success or the last error number in case of failure. /// The MSRs are returned in the `msr` method argument. /// /// # Arguments /// /// * `msrs` - MSRs (input/output). For details check the `kvm_msrs` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # extern crate kvm_bindings; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// # use kvm_bindings::kvm_msrs; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// let mut msrs = kvm_msrs::default(); /// vcpu.get_msrs(&mut msrs).unwrap(); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn get_msrs(&self, msrs: &mut kvm_msrs) -> Result<(i32)> { let ret = unsafe { // Here we trust the kernel not to read past the end of the kvm_msrs struct. ioctl_with_mut_ref(self, KVM_GET_MSRS(), msrs) }; if ret < 0 { return Err(io::Error::last_os_error()); } Ok(ret) } /// Setup the model-specific registers (MSR) for this vCPU. /// /// See the documentation for `KVM_SET_MSRS`. /// /// # Arguments /// /// * `msrs` - MSRs. For details check the `kvm_msrs` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # extern crate kvm_bindings; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// # use kvm_bindings::{kvm_msrs, kvm_msr_entry}; /// # use std::mem; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// let mut msrs = kvm_msrs::default(); /// vcpu.get_msrs(&mut msrs).unwrap(); /// /// let msrs_entries = { /// kvm_msr_entry { /// index: 0x0000_0174, /// ..Default::default() /// } /// }; /// /// // Create a vector large enough to hold the MSR entry defined above in /// // a `kvm_msrs`structure. /// let msrs_vec: Vec = /// Vec::with_capacity(mem::size_of::() + mem::size_of::()); /// let mut msrs: &mut kvm_msrs = unsafe { /// &mut *(msrs_vec.as_ptr() as *mut kvm_msrs) /// }; /// msrs.nmsrs = 1; /// vcpu.set_msrs(msrs).unwrap(); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn set_msrs(&self, msrs: &kvm_msrs) -> Result<()> { let ret = unsafe { // Here we trust the kernel not to read past the end of the kvm_msrs struct. ioctl_with_ref(self, KVM_SET_MSRS(), msrs) }; if ret < 0 { // KVM_SET_MSRS actually returns the number of msr entries written. return Err(io::Error::last_os_error()); } Ok(()) } /// Sets the type of CPU to be exposed to the guest and optional features. /// /// This initializes an ARM vCPU to the specified type with the specified features /// and resets the values of all of its registers to defaults. See the documentation for /// `KVM_ARM_VCPU_INIT`. /// /// # Arguments /// /// * `kvi` - information about preferred CPU target type and recommended features for it. /// For details check the `kvm_vcpu_init` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// /// # Example /// ```rust /// # extern crate kvm_ioctls; /// # extern crate kvm_bindings; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// use kvm_bindings::kvm_vcpu_init; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let vcpu = vm.create_vcpu(0).unwrap(); /// /// let mut kvi = kvm_vcpu_init::default(); /// vm.get_preferred_target(&mut kvi).unwrap(); /// vcpu.vcpu_init(&kvi).unwrap(); /// ``` /// #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] pub fn vcpu_init(&self, kvi: &kvm_vcpu_init) -> Result<()> { // This is safe because we allocated the struct and we know the kernel will read // exactly the size of the struct. let ret = unsafe { ioctl_with_ref(self, KVM_ARM_VCPU_INIT(), kvi) }; if ret < 0 { return Err(io::Error::last_os_error()); } Ok(()) } /// Sets the value of one register for this vCPU. /// /// The id of the register is encoded as specified in the kernel documentation /// for `KVM_SET_ONE_REG`. /// /// # Arguments /// /// * `reg_id` - ID of the register for which we are setting the value. /// * `data` - value for the specified register. /// #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] pub fn set_one_reg(&self, reg_id: u64, data: u64) -> Result<()> { let data_ref = &data as *const u64; let onereg = kvm_one_reg { id: reg_id, addr: data_ref as u64, }; // This is safe because we allocated the struct and we know the kernel will read // exactly the size of the struct. let ret = unsafe { ioctl_with_ref(self, KVM_SET_ONE_REG(), &onereg) }; if ret < 0 { return Err(io::Error::last_os_error()); } Ok(()) } /// Triggers the running of the current virtual CPU returning an exit reason. /// /// See documentation for `KVM_RUN`. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # extern crate kvm_bindings; /// # use std::io::Write; /// # use std::ptr::null_mut; /// # use std::slice; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd, VcpuExit}; /// # use kvm_bindings::{kvm_userspace_memory_region, KVM_MEM_LOG_DIRTY_PAGES}; /// # let kvm = Kvm::new().unwrap(); /// # let vm = kvm.create_vm().unwrap(); /// // This is a dummy example for running on x86 based on https://lwn.net/Articles/658511/. /// #[cfg(target_arch = "x86_64")] { /// let mem_size = 0x4000; /// let guest_addr: u64 = 0x1000; /// let load_addr: *mut u8 = unsafe { /// libc::mmap( /// null_mut(), /// mem_size, /// libc::PROT_READ | libc::PROT_WRITE, /// libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE, /// -1, /// 0, /// ) as *mut u8 /// }; /// /// let mem_region = kvm_userspace_memory_region { /// slot: 0, /// guest_phys_addr: guest_addr, /// memory_size: mem_size as u64, /// userspace_addr: load_addr as u64, /// flags: 0, /// }; /// unsafe { vm.set_user_memory_region(mem_region).unwrap() }; /// /// // Dummy x86 code that just calls halt. /// let x86_code = [ /// 0xf4, /* hlt */ /// ]; /// /// // Write the code in the guest memory. This will generate a dirty page. /// unsafe { /// let mut slice = slice::from_raw_parts_mut(load_addr, mem_size); /// slice.write(&x86_code).unwrap(); /// } /// /// let vcpu_fd = vm.create_vcpu(0).unwrap(); /// /// let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap(); /// vcpu_sregs.cs.base = 0; /// vcpu_sregs.cs.selector = 0; /// vcpu_fd.set_sregs(&vcpu_sregs).unwrap(); /// /// let mut vcpu_regs = vcpu_fd.get_regs().unwrap(); /// // Set the Instruction Pointer to the guest address where we loaded the code. /// vcpu_regs.rip = guest_addr; /// vcpu_regs.rax = 2; /// vcpu_regs.rbx = 3; /// vcpu_regs.rflags = 2; /// vcpu_fd.set_regs(&vcpu_regs).unwrap(); /// /// loop { /// match vcpu_fd.run().expect("run failed") { /// VcpuExit::Hlt => { /// break; /// } /// exit_reason => panic!("unexpected exit reason: {:?}", exit_reason), /// } /// } /// } /// ``` /// pub fn run(&self) -> Result { // Safe because we know that our file is a vCPU fd and we verify the return result. let ret = unsafe { ioctl(self, KVM_RUN()) }; if ret == 0 { let run = self.kvm_run_ptr.as_mut_ref(); match run.exit_reason { // make sure you treat all possible exit reasons from include/uapi/linux/kvm.h corresponding // when upgrading to a different kernel version KVM_EXIT_UNKNOWN => Ok(VcpuExit::Unknown), KVM_EXIT_EXCEPTION => Ok(VcpuExit::Exception), KVM_EXIT_IO => { let run_start = run as *mut kvm_run as *mut u8; // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let io = unsafe { run.__bindgen_anon_1.io }; let port = io.port; let data_size = io.count as usize * io.size as usize; // The data_offset is defined by the kernel to be some number of bytes into the // kvm_run stucture, which we have fully mmap'd. let data_ptr = unsafe { run_start.offset(io.data_offset as isize) }; // The slice's lifetime is limited to the lifetime of this vCPU, which is equal // to the mmap of the `kvm_run` struct that this is slicing from. let data_slice = unsafe { std::slice::from_raw_parts_mut::(data_ptr as *mut u8, data_size) }; match u32::from(io.direction) { KVM_EXIT_IO_IN => Ok(VcpuExit::IoIn(port, data_slice)), KVM_EXIT_IO_OUT => Ok(VcpuExit::IoOut(port, data_slice)), _ => Err(io::Error::from_raw_os_error(EINVAL)), } } KVM_EXIT_HYPERCALL => Ok(VcpuExit::Hypercall), KVM_EXIT_DEBUG => Ok(VcpuExit::Debug), KVM_EXIT_HLT => Ok(VcpuExit::Hlt), KVM_EXIT_MMIO => { // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let mmio = unsafe { &mut run.__bindgen_anon_1.mmio }; let addr = mmio.phys_addr; let len = mmio.len as usize; let data_slice = &mut mmio.data[..len]; if mmio.is_write != 0 { Ok(VcpuExit::MmioWrite(addr, data_slice)) } else { Ok(VcpuExit::MmioRead(addr, data_slice)) } } KVM_EXIT_IRQ_WINDOW_OPEN => Ok(VcpuExit::IrqWindowOpen), KVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown), KVM_EXIT_FAIL_ENTRY => Ok(VcpuExit::FailEntry), KVM_EXIT_INTR => Ok(VcpuExit::Intr), KVM_EXIT_SET_TPR => Ok(VcpuExit::SetTpr), KVM_EXIT_TPR_ACCESS => Ok(VcpuExit::TprAccess), KVM_EXIT_S390_SIEIC => Ok(VcpuExit::S390Sieic), KVM_EXIT_S390_RESET => Ok(VcpuExit::S390Reset), KVM_EXIT_DCR => Ok(VcpuExit::Dcr), KVM_EXIT_NMI => Ok(VcpuExit::Nmi), KVM_EXIT_INTERNAL_ERROR => Ok(VcpuExit::InternalError), KVM_EXIT_OSI => Ok(VcpuExit::Osi), KVM_EXIT_PAPR_HCALL => Ok(VcpuExit::PaprHcall), KVM_EXIT_S390_UCONTROL => Ok(VcpuExit::S390Ucontrol), KVM_EXIT_WATCHDOG => Ok(VcpuExit::Watchdog), KVM_EXIT_S390_TSCH => Ok(VcpuExit::S390Tsch), KVM_EXIT_EPR => Ok(VcpuExit::Epr), KVM_EXIT_SYSTEM_EVENT => Ok(VcpuExit::SystemEvent), KVM_EXIT_S390_STSI => Ok(VcpuExit::S390Stsi), KVM_EXIT_IOAPIC_EOI => { // Safe because the exit_reason (which comes from the kernel) told us which // union field to use. let eoi = unsafe { &mut run.__bindgen_anon_1.eoi }; Ok(VcpuExit::IoapicEoi(eoi.vector)) } KVM_EXIT_HYPERV => Ok(VcpuExit::Hyperv), r => panic!("unknown kvm exit reason: {}", r), } } else { Err(io::Error::last_os_error()) } } } /// Helper function to create a new `VcpuFd`. /// /// This should not be exported as a public function because the preferred way is to use /// `create_vcpu` from `VmFd`. The function cannot be part of the `VcpuFd` implementation because /// then it would be exported with the public `VcpuFd` interface. pub fn new_vcpu(vcpu: File, kvm_run_ptr: KvmRunWrapper) -> VcpuFd { VcpuFd { vcpu, kvm_run_ptr } } impl AsRawFd for VcpuFd { fn as_raw_fd(&self) -> RawFd { self.vcpu.as_raw_fd() } } #[cfg(test)] mod tests { extern crate byteorder; #[cfg(target_arch = "x86_64")] use super::*; use ioctls::system::Kvm; #[cfg(target_arch = "x86_64")] use {Cap, MAX_KVM_CPUID_ENTRIES}; // Helper function for memory mapping `size` bytes of anonymous memory. // Panics if the mmap fails. #[cfg(target_arch = "x86_64")] fn mmap_anonymous(size: usize) -> *mut u8 { use std::ptr::null_mut; let addr = unsafe { libc::mmap( null_mut(), size, libc::PROT_READ | libc::PROT_WRITE, libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE, -1, 0, ) }; if addr == libc::MAP_FAILED { panic!("mmap failed."); } return addr as *mut u8; } #[test] fn test_create_vcpu() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); assert!(vm.create_vcpu(0).is_ok()); } #[cfg(target_arch = "x86_64")] #[test] fn test_set_cpuid2() { let kvm = Kvm::new().unwrap(); if kvm.check_extension(Cap::ExtCpuid) { let vm = kvm.create_vm().unwrap(); let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap(); assert!(cpuid.mut_entries_slice().len() <= MAX_KVM_CPUID_ENTRIES); let nr_vcpus = kvm.get_nr_vcpus(); for cpu_id in 0..nr_vcpus { let vcpu = vm.create_vcpu(cpu_id as u8).unwrap(); vcpu.set_cpuid2(&cpuid).unwrap(); } } } #[cfg(target_arch = "x86_64")] #[allow(non_snake_case)] #[test] fn test_fpu() { // as per https://github.com/torvalds/linux/blob/master/arch/x86/include/asm/fpu/internal.h let KVM_FPU_CWD: usize = 0x37f; let KVM_FPU_MXCSR: usize = 0x1f80; let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let vcpu = vm.create_vcpu(0).unwrap(); let mut fpu: kvm_fpu = kvm_fpu { fcw: KVM_FPU_CWD as u16, mxcsr: KVM_FPU_MXCSR as u32, ..Default::default() }; fpu.fcw = KVM_FPU_CWD as u16; fpu.mxcsr = KVM_FPU_MXCSR as u32; vcpu.set_fpu(&fpu).unwrap(); assert_eq!(vcpu.get_fpu().unwrap().fcw, KVM_FPU_CWD as u16); } #[cfg(target_arch = "x86_64")] #[test] fn lapic_test() { use std::io::Cursor; // We might get read of byteorder if we replace mem::transmute with something safer. use self::byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; // As per https://github.com/torvalds/linux/arch/x86/kvm/lapic.c // Try to write and read the APIC_ICR (0x300) register which is non-read only and // one can simply write to it. let kvm = Kvm::new().unwrap(); assert!(kvm.check_extension(Cap::Irqchip)); let vm = kvm.create_vm().unwrap(); // The get_lapic ioctl will fail if there is no irqchip created beforehand. assert!(vm.create_irq_chip().is_ok()); let vcpu = vm.create_vcpu(0).unwrap(); let mut klapic: kvm_lapic_state = vcpu.get_lapic().unwrap(); let reg_offset = 0x300; let value = 2 as u32; //try to write and read the APIC_ICR 0x300 let write_slice = unsafe { &mut *(&mut klapic.regs[reg_offset..] as *mut [i8] as *mut [u8]) }; let mut writer = Cursor::new(write_slice); writer.write_u32::(value).unwrap(); vcpu.set_lapic(&klapic).unwrap(); klapic = vcpu.get_lapic().unwrap(); let read_slice = unsafe { &*(&klapic.regs[reg_offset..] as *const [i8] as *const [u8]) }; let mut reader = Cursor::new(read_slice); assert_eq!(reader.read_u32::().unwrap(), value); } #[cfg(target_arch = "x86_64")] #[test] fn msrs_test() { use std::mem; let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let vcpu = vm.create_vcpu(0).unwrap(); let mut configured_entry_vec = Vec::::new(); configured_entry_vec.push(kvm_msr_entry { index: 0x0000_0174, data: 0x0, ..Default::default() }); configured_entry_vec.push(kvm_msr_entry { index: 0x0000_0175, data: 0x1, ..Default::default() }); let vec_size_bytes = mem::size_of::() + (configured_entry_vec.len() * mem::size_of::()); let vec: Vec = Vec::with_capacity(vec_size_bytes); let msrs: &mut kvm_msrs = unsafe { &mut *(vec.as_ptr() as *mut kvm_msrs) }; unsafe { let entries: &mut [kvm_msr_entry] = msrs.entries.as_mut_slice(configured_entry_vec.len()); entries.copy_from_slice(&configured_entry_vec); } msrs.nmsrs = configured_entry_vec.len() as u32; vcpu.set_msrs(msrs).unwrap(); //now test that GET_MSRS returns the same let wanted_kvm_msrs_entries = [ kvm_msr_entry { index: 0x0000_0174, ..Default::default() }, kvm_msr_entry { index: 0x0000_0175, ..Default::default() }, ]; let vec2: Vec = Vec::with_capacity(vec_size_bytes); let mut msrs2: &mut kvm_msrs = unsafe { // Converting the vector's memory to a struct is unsafe. Carefully using the read-only // vector to size and set the members ensures no out-of-bounds errors below. &mut *(vec2.as_ptr() as *mut kvm_msrs) }; unsafe { let entries: &mut [kvm_msr_entry] = msrs2.entries.as_mut_slice(configured_entry_vec.len()); entries.copy_from_slice(&wanted_kvm_msrs_entries); } msrs2.nmsrs = configured_entry_vec.len() as u32; let read_msrs = vcpu.get_msrs(&mut msrs2).unwrap(); assert_eq!(read_msrs, configured_entry_vec.len() as i32); let returned_kvm_msr_entries: &mut [kvm_msr_entry] = unsafe { msrs2.entries.as_mut_slice(msrs2.nmsrs as usize) }; for (i, entry) in returned_kvm_msr_entries.iter_mut().enumerate() { assert_eq!(entry, &mut configured_entry_vec[i]); } } #[cfg(target_arch = "x86_64")] #[test] fn test_run_code() { use std::io::Write; let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); // This example is based on https://lwn.net/Articles/658511/ #[rustfmt::skip] let code = [ 0xba, 0xf8, 0x03, /* mov $0x3f8, %dx */ 0x00, 0xd8, /* add %bl, %al */ 0x04, b'0', /* add $'0', %al */ 0xee, /* out %al, %dx */ 0xec, /* in %dx, %al */ 0xc6, 0x06, 0x00, 0x80, 0x00, /* movl $0, (0x8000); This generates a MMIO Write.*/ 0x8a, 0x16, 0x00, 0x80, /* movl (0x8000), %dl; This generates a MMIO Read.*/ 0xc6, 0x06, 0x00, 0x20, 0x00, /* movl $0, (0x2000); Dirty one page in guest mem. */ 0xf4, /* hlt */ ]; let mem_size = 0x4000; let load_addr = mmap_anonymous(mem_size); let guest_addr: u64 = 0x1000; let slot: u32 = 0; let mem_region = kvm_userspace_memory_region { slot, guest_phys_addr: guest_addr, memory_size: mem_size as u64, userspace_addr: load_addr as u64, flags: KVM_MEM_LOG_DIRTY_PAGES, }; unsafe { vm.set_user_memory_region(mem_region).unwrap(); } unsafe { // Get a mutable slice of `mem_size` from `load_addr`. // This is safe because we mapped it before. let mut slice = std::slice::from_raw_parts_mut(load_addr, mem_size); slice.write(&code).unwrap(); } let vcpu_fd = vm.create_vcpu(0).unwrap(); let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap(); assert_ne!(vcpu_sregs.cs.base, 0); assert_ne!(vcpu_sregs.cs.selector, 0); vcpu_sregs.cs.base = 0; vcpu_sregs.cs.selector = 0; vcpu_fd.set_sregs(&vcpu_sregs).unwrap(); let mut vcpu_regs = vcpu_fd.get_regs().unwrap(); // Set the Instruction Pointer to the guest address where we loaded the code. vcpu_regs.rip = guest_addr; vcpu_regs.rax = 2; vcpu_regs.rbx = 3; vcpu_regs.rflags = 2; vcpu_fd.set_regs(&vcpu_regs).unwrap(); loop { match vcpu_fd.run().expect("run failed") { VcpuExit::IoIn(addr, data) => { assert_eq!(addr, 0x3f8); assert_eq!(data.len(), 1); } VcpuExit::IoOut(addr, data) => { assert_eq!(addr, 0x3f8); assert_eq!(data.len(), 1); assert_eq!(data[0], b'5'); } VcpuExit::MmioRead(addr, data) => { assert_eq!(addr, 0x8000); assert_eq!(data.len(), 1); } VcpuExit::MmioWrite(addr, data) => { assert_eq!(addr, 0x8000); assert_eq!(data.len(), 1); assert_eq!(data[0], 0); } VcpuExit::Hlt => { // The code snippet dirties 2 pages: // * one when the code itself is loaded in memory; // * and one more from the `movl` that writes to address 0x8000 let dirty_pages_bitmap = vm.get_dirty_log(slot, mem_size).unwrap(); let dirty_pages = dirty_pages_bitmap .into_iter() .map(|page| page.count_ones()) .fold(0, |dirty_page_count, i| dirty_page_count + i); assert_eq!(dirty_pages, 2); break; } r => panic!("unexpected exit reason: {:?}", r), } } } #[test] #[cfg(target_arch = "x86_64")] fn test_faulty_vcpu_fd() { use std::os::unix::io::FromRawFd; let badf_errno = libc::EBADF; let faulty_vcpu_fd = VcpuFd { vcpu: unsafe { File::from_raw_fd(-1) }, kvm_run_ptr: KvmRunWrapper { kvm_run_ptr: mmap_anonymous(10), mmap_size: 10, }, }; fn get_raw_errno(result: super::Result) -> i32 { result.err().unwrap().raw_os_error().unwrap() } assert_eq!(get_raw_errno(faulty_vcpu_fd.get_regs()), badf_errno); assert_eq!( get_raw_errno(faulty_vcpu_fd.set_regs(&unsafe { std::mem::zeroed() })), badf_errno ); assert_eq!(get_raw_errno(faulty_vcpu_fd.get_sregs()), badf_errno); assert_eq!( get_raw_errno(faulty_vcpu_fd.set_sregs(&unsafe { std::mem::zeroed() })), badf_errno ); assert_eq!(get_raw_errno(faulty_vcpu_fd.get_fpu()), badf_errno); assert_eq!( get_raw_errno(faulty_vcpu_fd.set_fpu(&unsafe { std::mem::zeroed() })), badf_errno ); assert_eq!( get_raw_errno( faulty_vcpu_fd.set_cpuid2( &Kvm::new() .unwrap() .get_supported_cpuid(MAX_KVM_CPUID_ENTRIES) .unwrap() ) ), badf_errno ); // `kvm_lapic_state` does not implement debug by default so we cannot // use unwrap_err here. assert!(faulty_vcpu_fd.get_lapic().is_err()); assert_eq!( get_raw_errno(faulty_vcpu_fd.set_lapic(&unsafe { std::mem::zeroed() })), badf_errno ); assert_eq!( get_raw_errno(faulty_vcpu_fd.get_msrs(&mut kvm_msrs::default())), badf_errno ); assert_eq!( get_raw_errno(faulty_vcpu_fd.set_msrs(&unsafe { std::mem::zeroed() })), badf_errno ); assert_eq!(get_raw_errno(faulty_vcpu_fd.run()), badf_errno); } #[test] #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] fn test_get_preferred_target() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let vcpu = vm.create_vcpu(0).unwrap(); let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default(); assert!(vcpu.vcpu_init(&kvi).is_err()); vm.get_preferred_target(&mut kvi) .expect("Cannot get preferred target"); assert!(vcpu.vcpu_init(&kvi).is_ok()); } #[test] #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] fn test_set_one_reg() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let vcpu = vm.create_vcpu(0).unwrap(); let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default(); vm.get_preferred_target(&mut kvi) .expect("Cannot get preferred target"); vcpu.vcpu_init(&kvi).expect("Cannot initialize vcpu"); let data: u64 = 0; let reg_id: u64 = 0; assert!(vcpu.set_one_reg(reg_id, data).is_err()); // Exercising KVM_SET_ONE_REG by trying to alter the data inside the PSTATE register (which is a // specific aarch64 register). const PSTATE_REG_ID: u64 = 0x6030_0000_0010_0042; vcpu.set_one_reg(PSTATE_REG_ID, data) .expect("Failed to set pstate register"); } } kvm-ioctls-0.2.0/src/ioctls/vm.rs0100644€T¡ŽÔ€T 000001000521350324253600157230ustar0000000000000000// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. use ioctls::Result; use kvm_bindings::*; use std::fs::File; use std::io; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] use std::os::raw::c_void; use std::os::raw::{c_int, c_ulong}; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use ioctls::device::new_device; use ioctls::device::DeviceFd; use ioctls::vcpu::new_vcpu; use ioctls::vcpu::VcpuFd; use ioctls::KvmRunWrapper; use kvm_ioctls::*; use sys_ioctl::*; /// An address either in programmable I/O space or in memory mapped I/O space. /// /// The `IoEventAddress` is used for specifying the type when registering an event /// in [register_ioevent](struct.VmFd.html#method.register_ioevent). /// pub enum IoEventAddress { /// Representation of an programmable I/O address. Pio(u64), /// Representation of an memory mapped I/O address. Mmio(u64), } /// Helper structure for disabling datamatch. /// /// The structure can be used as a parameter to /// [`register_ioevent`](struct.VmFd.html#method.register_ioevent) /// to disable filtering of events based on the datamatch flag. For details check the /// [KVM API documentation](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// pub struct NoDatamatch; impl Into for NoDatamatch { fn into(self) -> u64 { 0 } } /// Wrapper over KVM VM ioctls. pub struct VmFd { vm: File, run_size: usize, } impl VmFd { /// Creates/modifies a guest physical memory slot. /// /// See the documentation for `KVM_SET_USER_MEMORY_REGION`. /// /// # Arguments /// /// * `user_memory_region` - Guest physical memory slot. For details check the /// `kvm_userspace_memory_region` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// /// # Safety /// /// This function is unsafe because there is no guarantee `userspace_addr` points to a valid /// memory region, nor the memory region lives as long as the kernel needs it to. /// /// The caller of this method must make sure that: /// - the raw pointer (`userspace_addr`) points to valid memory /// - the regions provided to KVM are not overlapping other memory regions. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// extern crate kvm_bindings; /// /// use kvm_ioctls::{Kvm, VmFd}; /// use kvm_bindings::kvm_userspace_memory_region; /// /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let mem_region = kvm_userspace_memory_region { /// slot: 0, /// guest_phys_addr: 0x1000 as u64, /// memory_size: 0x4000 as u64, /// userspace_addr: 0x0 as u64, /// flags: 0, /// }; /// unsafe { /// vm.set_user_memory_region(mem_region).unwrap(); /// }; /// ``` /// pub unsafe fn set_user_memory_region( &self, user_memory_region: kvm_userspace_memory_region, ) -> Result<()> { let ret = ioctl_with_ref(self, KVM_SET_USER_MEMORY_REGION(), &user_memory_region); if ret == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } } /// Sets the address of the three-page region in the VM's address space. /// /// See the documentation for `KVM_SET_TSS_ADDR`. /// /// # Arguments /// /// * `offset` - Physical address of a three-page region in the guest's physical address space. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # use kvm_ioctls::{Kvm, VmFd}; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// vm.set_tss_address(0xfffb_d000).unwrap(); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn set_tss_address(&self, offset: usize) -> Result<()> { // Safe because we know that our file is a VM fd and we verify the return result. let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR(), offset as c_ulong) }; if ret == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } } /// Creates an in-kernel interrupt controller. /// /// See the documentation for `KVM_CREATE_IRQCHIP`. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # use kvm_ioctls::{Kvm, VmFd}; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] /// vm.create_irq_chip().unwrap(); /// ``` /// #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64" ))] pub fn create_irq_chip(&self) -> Result<()> { // Safe because we know that our file is a VM fd and we verify the return result. let ret = unsafe { ioctl(self, KVM_CREATE_IRQCHIP()) }; if ret == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } } /// Creates a PIT as per the `KVM_CREATE_PIT2` ioctl. /// /// # Arguments /// /// * pit_config - PIT configuration. For details check the `kvm_pit_config` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// extern crate kvm_bindings; /// # use kvm_ioctls::{Kvm, VmFd}; /// use kvm_bindings::kvm_pit_config; /// /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let pit_config = kvm_pit_config::default(); /// vm.create_pit2(pit_config).unwrap(); /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn create_pit2(&self, pit_config: kvm_pit_config) -> Result<()> { // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2(), &pit_config) }; if ret == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } } /// Directly injects a MSI message as per the `KVM_SIGNAL_MSI` ioctl. /// /// See the documentation for `KVM_SIGNAL_MSI`. /// /// This ioctl returns > 0 when the MSI is successfully delivered and 0 /// when the guest blocked the MSI. /// /// # Arguments /// /// * kvm_msi - MSI message configuration. For details check the `kvm_msi` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// # Example /// /// In this example, the important function signal_msi() calling into /// the actual ioctl is commented out. The reason is that MSI vectors are /// not chosen from the HW side (VMM). The guest OS (or anything that runs /// inside the VM) is supposed to allocate the MSI vectors, and usually /// communicate back through PCI configuration space. Sending a random MSI /// vector through this signal_msi() function will always result in a /// failure, which is why it needs to be commented out. /// /// ```rust /// # extern crate kvm_ioctls; /// extern crate kvm_bindings; /// # use kvm_ioctls::{Kvm, VmFd}; /// use kvm_bindings::kvm_msi; /// /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let msi = kvm_msi::default(); /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] /// vm.create_irq_chip().unwrap(); /// //vm.signal_msi(msi).unwrap(); /// ``` /// #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64" ))] pub fn signal_msi(&self, msi: kvm_msi) -> Result { // Safe because we allocated the structure and we know the kernel // will read exactly the size of the structure. let ret = unsafe { ioctl_with_ref(self, KVM_SIGNAL_MSI(), &msi) }; if ret >= 0 { Ok(ret) } else { Err(io::Error::last_os_error()) } } /// Registers an event to be signaled whenever a certain address is written to. /// /// See the documentation for `KVM_IOEVENTFD`. /// /// # Arguments /// /// * `fd` - FD which will be signaled. When signaling, the usual `vmexit` to userspace /// is prevented. /// * `addr` - Address being written to. /// * `datamatch` - Limits signaling `fd` to only the cases where the value being written is /// equal to this parameter. The size of `datamatch` is important and it must /// match the expected size of the guest's write. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// extern crate libc; /// # use kvm_ioctls::{IoEventAddress, Kvm, NoDatamatch, VmFd}; /// use libc::{eventfd, EFD_NONBLOCK}; /// /// let kvm = Kvm::new().unwrap(); /// let vm_fd = kvm.create_vm().unwrap(); /// let evtfd = unsafe { eventfd(0, EFD_NONBLOCK) }; /// vm_fd /// .register_ioevent(evtfd, &IoEventAddress::Pio(0xf4), NoDatamatch) /// .unwrap(); /// vm_fd /// .register_ioevent(evtfd, &IoEventAddress::Mmio(0x1000), NoDatamatch) /// .unwrap(); /// ``` /// pub fn register_ioevent>( &self, fd: RawFd, addr: &IoEventAddress, datamatch: T, ) -> Result<()> { let mut flags = 0; if std::mem::size_of::() > 0 { flags |= 1 << kvm_ioeventfd_flag_nr_datamatch } if let IoEventAddress::Pio(_) = *addr { flags |= 1 << kvm_ioeventfd_flag_nr_pio } let ioeventfd = kvm_ioeventfd { datamatch: datamatch.into(), len: std::mem::size_of::() as u32, addr: match addr { IoEventAddress::Pio(ref p) => *p as u64, IoEventAddress::Mmio(ref m) => *m, }, fd, flags, ..Default::default() }; // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_IOEVENTFD(), &ioeventfd) }; if ret == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } } /// Gets the bitmap of pages dirtied since the last call of this function. /// /// Leverages the dirty page logging feature in KVM. As a side-effect, this also resets the /// bitmap inside the kernel. For the dirty log to be available, you have to set the flag /// `KVM_MEM_LOG_DIRTY_PAGES` when creating guest memory regions. /// /// Check the documentation for `KVM_GET_DIRTY_LOG`. /// /// # Arguments /// /// * `slot` - Guest memory slot identifier. /// * `memory_size` - Size of the memory region. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # extern crate kvm_bindings; /// # use std::io::Write; /// # use std::ptr::null_mut; /// # use std::slice; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd, VcpuExit}; /// # use kvm_bindings::{kvm_userspace_memory_region, KVM_MEM_LOG_DIRTY_PAGES}; /// # let kvm = Kvm::new().unwrap(); /// # let vm = kvm.create_vm().unwrap(); /// // This examples is based on https://lwn.net/Articles/658511/. /// let mem_size = 0x4000; /// let guest_addr: u64 = 0x1000; /// let load_addr: *mut u8 = unsafe { /// libc::mmap( /// null_mut(), /// mem_size, /// libc::PROT_READ | libc::PROT_WRITE, /// libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE, /// -1, /// 0, /// ) as *mut u8 /// }; /// /// // Initialize a guest memory region using the flag `KVM_MEM_LOG_DIRTY_PAGES`. /// let mem_region = kvm_userspace_memory_region { /// slot: 0, /// guest_phys_addr: guest_addr, /// memory_size: mem_size as u64, /// userspace_addr: load_addr as u64, /// flags: KVM_MEM_LOG_DIRTY_PAGES, /// }; /// unsafe { vm.set_user_memory_region(mem_region).unwrap() }; /// /// // Dummy x86 code that just calls halt. /// let x86_code = [ /// 0xf4, /* hlt */ /// ]; /// /// // Write the code in the guest memory. This will generate a dirty page. /// unsafe { /// let mut slice = slice::from_raw_parts_mut(load_addr, mem_size); /// slice.write(&x86_code).unwrap(); /// } /// /// let vcpu_fd = vm.create_vcpu(0).unwrap(); /// /// let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap(); /// vcpu_sregs.cs.base = 0; /// vcpu_sregs.cs.selector = 0; /// vcpu_fd.set_sregs(&vcpu_sregs).unwrap(); /// /// let mut vcpu_regs = vcpu_fd.get_regs().unwrap(); /// // Set the Instruction Pointer to the guest address where we loaded the code. /// vcpu_regs.rip = guest_addr; /// vcpu_regs.rax = 2; /// vcpu_regs.rbx = 3; /// vcpu_regs.rflags = 2; /// vcpu_fd.set_regs(&vcpu_regs).unwrap(); /// /// loop { /// match vcpu_fd.run().expect("run failed") { /// VcpuExit::Hlt => { /// // The code snippet dirties 1 page when loading the code in memory. /// let dirty_pages_bitmap = vm.get_dirty_log(0, mem_size).unwrap(); /// let dirty_pages = dirty_pages_bitmap /// .into_iter() /// .map(|page| page.count_ones()) /// .fold(0, |dirty_page_count, i| dirty_page_count + i); /// assert_eq!(dirty_pages, 1); /// break; /// } /// exit_reason => panic!("unexpected exit reason: {:?}", exit_reason), /// } /// } /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub fn get_dirty_log(&self, slot: u32, memory_size: usize) -> Result> { // Compute the length of the bitmap needed for all dirty pages in one memory slot. // One memory page is 4KiB (4096 bits) and `KVM_GET_DIRTY_LOG` returns one dirty bit for // each page. let page_size = 4 << 10; let div_round_up = |dividend, divisor| (dividend + divisor - 1) / divisor; // For ease of access we are saving the bitmap in a u64 vector. We are using ceil to // make sure we count all dirty pages even when `mem_size` is not a multiple of // page_size * 64. let bitmap_size = div_round_up(memory_size, page_size * 64); let mut bitmap = vec![0; bitmap_size]; let b_data = bitmap.as_mut_ptr() as *mut c_void; let dirtylog = kvm_dirty_log { slot, padding1: 0, __bindgen_anon_1: kvm_dirty_log__bindgen_ty_1 { dirty_bitmap: b_data, }, }; // Safe because we know that our file is a VM fd, and we know that the amount of memory // we allocated for the bitmap is at least one bit per page. let ret = unsafe { ioctl_with_ref(self, KVM_GET_DIRTY_LOG(), &dirtylog) }; if ret == 0 { Ok(bitmap) } else { Err(io::Error::last_os_error()) } } /// Registers an event that will, when signaled, trigger the `gsi` IRQ. /// /// # Arguments /// /// * `fd` - Event to be signaled. /// * `gsi` - IRQ to be triggered. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # extern crate libc; /// # use kvm_ioctls::{Kvm, VmFd}; /// # use libc::{eventfd, EFD_NONBLOCK}; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let evtfd = unsafe { eventfd(0, EFD_NONBLOCK) }; /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] /// vm.register_irqfd(evtfd, 0).unwrap(); /// ``` /// #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64" ))] pub fn register_irqfd(&self, fd: RawFd, gsi: u32) -> Result<()> { let irqfd = kvm_irqfd { fd: fd as u32, gsi, ..Default::default() }; // Safe because we know that our file is a VM fd, we know the kernel will only read the // correct amount of memory from our pointer, and we verify the return result. let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) }; if ret == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } } /// Creates a new KVM vCPU file descriptor and maps the memory corresponding /// its `kvm_run` structure. /// /// See the documentation for `KVM_CREATE_VCPU`. /// /// # Arguments /// /// * `id` - The vCPU ID. /// /// # Errors /// /// Returns an io::Error when the VM fd is invalid or the vCPU memory cannot /// be mapped correctly. /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// // Create one vCPU with the ID=0. /// let vcpu = vm.create_vcpu(0); /// ``` /// pub fn create_vcpu(&self, id: u8) -> Result { // Safe because we know that vm is a VM fd and we verify the return result. #[allow(clippy::cast_lossless)] let vcpu_fd = unsafe { ioctl_with_val(&self.vm, KVM_CREATE_VCPU(), id as c_ulong) }; if vcpu_fd < 0 { return Err(io::Error::last_os_error()); } // Wrap the vCPU now in case the following ? returns early. This is safe because we verified // the value of the fd and we own the fd. let vcpu = unsafe { File::from_raw_fd(vcpu_fd) }; let kvm_run_ptr = KvmRunWrapper::mmap_from_fd(&vcpu, self.run_size)?; Ok(new_vcpu(vcpu, kvm_run_ptr)) } /// Creates an emulated device in the kernel. /// /// See the documentation for `KVM_CREATE_DEVICE`. /// /// # Arguments /// /// * `device`: device configuration. For details check the `kvm_create_device` structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # extern crate kvm_bindings; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// use kvm_bindings::{ /// kvm_device_type_KVM_DEV_TYPE_VFIO, /// kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V3, /// KVM_CREATE_DEVICE_TEST, /// }; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// /// // Creating a device with the KVM_CREATE_DEVICE_TEST flag to check /// // whether the device type is supported. This will not create the device. /// // To create the device the flag needs to be removed. /// let mut device = kvm_bindings::kvm_create_device { /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] /// type_: kvm_device_type_KVM_DEV_TYPE_VFIO, /// #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] /// type_: kvm_device_type_KVM_DEV_TYPE_ARM_VGIC_V3, /// fd: 0, /// flags: KVM_CREATE_DEVICE_TEST, /// }; /// let device_fd = vm /// .create_device(&mut device).unwrap(); /// ``` /// pub fn create_device(&self, device: &mut kvm_create_device) -> Result { let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_DEVICE(), device) }; if ret == 0 { Ok(new_device(unsafe { File::from_raw_fd(device.fd as i32) })) } else { Err(io::Error::last_os_error()) } } /// Returns the preferred CPU target type which can be emulated by KVM on underlying host. /// /// The preferred CPU target is returned in the `kvi` parameter. /// See documentation for `KVM_ARM_PREFERRED_TARGET`. /// /// # Arguments /// * `kvi` - CPU target configuration (out). For details check the `kvm_vcpu_init` /// structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// # extern crate kvm_bindings; /// # use kvm_ioctls::{Kvm, VmFd, VcpuFd}; /// use kvm_bindings::kvm_vcpu_init; /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let mut kvi = kvm_vcpu_init::default(); /// vm.get_preferred_target(&mut kvi).unwrap(); /// ``` /// #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] pub fn get_preferred_target(&self, kvi: &mut kvm_vcpu_init) -> Result<()> { // The ioctl is safe because we allocated the struct and we know the // kernel will write exactly the size of the struct. let ret = unsafe { ioctl_with_mut_ref(self, KVM_ARM_PREFERRED_TARGET(), kvi) }; if ret != 0 { return Err(io::Error::last_os_error()); } Ok(()) } /// Enable the specified capability as per the `KVM_ENABLE_CAP` ioctl. /// /// See the documentation for `KVM_ENABLE_CAP`. /// /// Returns an io::Error when the capability could not be enabled. /// /// # Arguments /// /// * kvm_enable_cap - KVM capability structure. For details check the `kvm_enable_cap` /// structure in the /// [KVM API doc](https://www.kernel.org/doc/Documentation/virtual/kvm/api.txt). /// /// # Example /// /// ```rust /// # extern crate kvm_ioctls; /// extern crate kvm_bindings; /// /// # use kvm_ioctls::{Cap, Kvm, VmFd}; /// use kvm_bindings::{kvm_enable_cap, KVM_CAP_SPLIT_IRQCHIP}; /// /// let kvm = Kvm::new().unwrap(); /// let vm = kvm.create_vm().unwrap(); /// let mut cap: kvm_enable_cap = Default::default(); /// // This example cannot enable an arm/aarch64 capability since there /// // is no capability available for these architectures. /// if cfg!(target_arch = "x86") || cfg!(target_arch = "x86_64") { /// cap.cap = KVM_CAP_SPLIT_IRQCHIP; /// // As per the KVM documentation, KVM_CAP_SPLIT_IRQCHIP only emulates /// // the local APIC in kernel, expecting that a userspace IOAPIC will /// // be implemented by the VMM. /// // Along with this capability, the user needs to specify the number /// // of pins reserved for the userspace IOAPIC. This number needs to be /// // provided through the first argument of the capability structure, as /// // specified in KVM documentation: /// // args[0] - number of routes reserved for userspace IOAPICs /// // /// // Because an IOAPIC supports 24 pins, that's the reason why this test /// // picked this number as reference. /// cap.args[0] = 24; /// vm.enable_cap(&cap).unwrap(); /// } /// ``` /// #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] pub fn enable_cap(&self, cap: &kvm_enable_cap) -> Result<()> { // The ioctl is safe because we allocated the struct and we know the // kernel will write exactly the size of the struct. let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP(), cap) }; if ret == 0 { Ok(()) } else { Err(io::Error::last_os_error()) } } /// Get the `kvm_run` size. pub fn run_size(&self) -> usize { self.run_size } } /// Helper function to create a new `VmFd`. /// /// This should not be exported as a public function because the preferred way is to use /// `create_vm` from `Kvm`. The function cannot be part of the `VmFd` implementation because /// then it would be exported with the public `VmFd` interface. pub fn new_vmfd(vm: File, run_size: usize) -> VmFd { VmFd { vm, run_size } } impl AsRawFd for VmFd { fn as_raw_fd(&self) -> RawFd { self.vm.as_raw_fd() } } #[cfg(test)] mod tests { use super::*; use {Cap, Kvm}; use libc::{eventfd, EFD_NONBLOCK}; #[test] fn test_set_invalid_memory() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let invalid_mem_region = kvm_userspace_memory_region { slot: 0, guest_phys_addr: 0, memory_size: 0, userspace_addr: 0, flags: 0, }; assert!(unsafe { vm.set_user_memory_region(invalid_mem_region) }.is_err()); } #[test] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn test_set_tss_address() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); assert!(vm.set_tss_address(0xfffb_d000).is_ok()); } #[test] #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64" ))] fn test_create_irq_chip() { let kvm = Kvm::new().unwrap(); assert!(kvm.check_extension(Cap::Irqchip)); let vm = kvm.create_vm().unwrap(); if cfg!(any(target_arch = "x86", target_arch = "x86_64")) { assert!(vm.create_irq_chip().is_ok()); } else if cfg!(any(target_arch = "arm", target_arch = "aarch64")) { // On arm, we expect this to fail as the irq chip needs to be created after the vcpus. assert!(vm.create_irq_chip().is_err()); } } #[test] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn test_create_pit2() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); assert!(vm.create_pit2(kvm_pit_config::default()).is_ok()); } #[test] fn test_register_ioevent() { assert_eq!(std::mem::size_of::(), 0); let kvm = Kvm::new().unwrap(); let vm_fd = kvm.create_vm().unwrap(); let evtfd = unsafe { eventfd(0, EFD_NONBLOCK) }; assert!(vm_fd .register_ioevent(evtfd, &IoEventAddress::Pio(0xf4), NoDatamatch) .is_ok()); assert!(vm_fd .register_ioevent(evtfd, &IoEventAddress::Mmio(0x1000), NoDatamatch) .is_ok()); assert!(vm_fd .register_ioevent(evtfd, &IoEventAddress::Pio(0xc1), 0x7fu8) .is_ok()); assert!(vm_fd .register_ioevent(evtfd, &IoEventAddress::Pio(0xc2), 0x1337u16) .is_ok()); assert!(vm_fd .register_ioevent(evtfd, &IoEventAddress::Pio(0xc4), 0xdead_beefu32) .is_ok()); assert!(vm_fd .register_ioevent(evtfd, &IoEventAddress::Pio(0xc8), 0xdead_beef_dead_beefu64) .is_ok()); } #[test] #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64" ))] fn test_register_irqfd() { let kvm = Kvm::new().unwrap(); let vm_fd = kvm.create_vm().unwrap(); let evtfd1 = unsafe { eventfd(0, EFD_NONBLOCK) }; let evtfd2 = unsafe { eventfd(0, EFD_NONBLOCK) }; let evtfd3 = unsafe { eventfd(0, EFD_NONBLOCK) }; if cfg!(any(target_arch = "x86", target_arch = "x86_64")) { assert!(vm_fd.register_irqfd(evtfd1, 4).is_ok()); assert!(vm_fd.register_irqfd(evtfd2, 8).is_ok()); assert!(vm_fd.register_irqfd(evtfd3, 4).is_ok()); } // On aarch64, this fails because setting up the interrupt controller is mandatory before // registering any IRQ. // On x86_64 this fails as the event fd was already matched with a GSI. assert!(vm_fd.register_irqfd(evtfd3, 4).is_err()); assert!(vm_fd.register_irqfd(evtfd3, 5).is_err()); } #[test] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn test_faulty_vm_fd() { let badf_errno = libc::EBADF; let faulty_vm_fd = VmFd { vm: unsafe { File::from_raw_fd(-1) }, run_size: 0, }; let invalid_mem_region = kvm_userspace_memory_region { slot: 0, guest_phys_addr: 0, memory_size: 0, userspace_addr: 0, flags: 0, }; fn get_raw_errno(result: super::Result) -> i32 { result.err().unwrap().raw_os_error().unwrap() } assert_eq!( get_raw_errno(unsafe { faulty_vm_fd.set_user_memory_region(invalid_mem_region) }), badf_errno ); assert_eq!(get_raw_errno(faulty_vm_fd.set_tss_address(0)), badf_errno); assert_eq!(get_raw_errno(faulty_vm_fd.create_irq_chip()), badf_errno); assert_eq!( get_raw_errno(faulty_vm_fd.create_pit2(kvm_pit_config::default())), badf_errno ); let event_fd = unsafe { eventfd(0, EFD_NONBLOCK) }; assert_eq!( get_raw_errno(faulty_vm_fd.register_ioevent(event_fd, &IoEventAddress::Pio(0), 0u64)), badf_errno ); assert_eq!( get_raw_errno(faulty_vm_fd.register_irqfd(event_fd, 0)), badf_errno ); assert_eq!(get_raw_errno(faulty_vm_fd.create_vcpu(0)), badf_errno); assert_eq!(get_raw_errno(faulty_vm_fd.get_dirty_log(0, 0)), badf_errno); } #[test] #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] fn test_get_preferred_target() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let mut kvi: kvm_bindings::kvm_vcpu_init = kvm_bindings::kvm_vcpu_init::default(); assert!(vm.get_preferred_target(&mut kvi).is_ok()); } /// As explained in the example code related to signal_msi(), sending /// a random MSI vector will always fail because no vector has been /// previously allocated from the guest itself. #[test] #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64" ))] fn test_signal_msi_failure() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let msi = kvm_msi::default(); assert!(vm.signal_msi(msi).is_err()); } #[test] #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] fn test_enable_cap_failure() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let cap: kvm_enable_cap = Default::default(); // Providing the `kvm_enable_cap` structure filled with default() should // always result in a failure as it is not a valid capability. assert!(vm.enable_cap(&cap).is_err()); } #[test] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] fn test_enable_split_irqchip_cap() { let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let mut cap: kvm_enable_cap = Default::default(); cap.cap = KVM_CAP_SPLIT_IRQCHIP; // As per the KVM documentation, KVM_CAP_SPLIT_IRQCHIP only emulates // the local APIC in kernel, expecting that a userspace IOAPIC will // be implemented by the VMM. // Along with this capability, the user needs to specify the number // of pins reserved for the userspace IOAPIC. This number needs to be // provided through the first argument of the capability structure, as // specified in KVM documentation: // args[0] - number of routes reserved for userspace IOAPICs // // Because an IOAPIC supports 24 pins, that's the reason why this test // picked this number as reference. cap.args[0] = 24; assert!(vm.enable_cap(&cap).is_ok()); } } kvm-ioctls-0.2.0/src/kvm_ioctls.rs0100644€T¡ŽÔ€T 000000121711350324253600161620ustar0000000000000000// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. #![allow(unused)] use super::sys_ioctl::*; use kvm_bindings::*; // Declares necessary ioctls specific to their platform. ioctl_io_nr!(KVM_GET_API_VERSION, KVMIO, 0x00); ioctl_io_nr!(KVM_CREATE_VM, KVMIO, 0x01); ioctl_io_nr!(KVM_CHECK_EXTENSION, KVMIO, 0x03); ioctl_io_nr!(KVM_GET_VCPU_MMAP_SIZE, KVMIO, 0x04); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_iowr_nr!(KVM_GET_SUPPORTED_CPUID, KVMIO, 0x05, kvm_cpuid2); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_iowr_nr!(KVM_GET_EMULATED_CPUID, KVMIO, 0x09, kvm_cpuid2); ioctl_io_nr!(KVM_CREATE_VCPU, KVMIO, 0x41); ioctl_iow_nr!(KVM_GET_DIRTY_LOG, KVMIO, 0x42, kvm_dirty_log); ioctl_iow_nr!( KVM_SET_USER_MEMORY_REGION, KVMIO, 0x46, kvm_userspace_memory_region ); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_io_nr!(KVM_SET_TSS_ADDR, KVMIO, 0x47); #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64", target_arch = "s390" ))] ioctl_io_nr!(KVM_CREATE_IRQCHIP, KVMIO, 0x60); #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64", target_arch = "s390" ))] ioctl_iow_nr!(KVM_IRQFD, KVMIO, 0x76, kvm_irqfd); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_iow_nr!(KVM_CREATE_PIT2, KVMIO, 0x77, kvm_pit_config); ioctl_iow_nr!(KVM_IOEVENTFD, KVMIO, 0x79, kvm_ioeventfd); ioctl_io_nr!(KVM_RUN, KVMIO, 0x80); #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] ioctl_ior_nr!(KVM_GET_REGS, KVMIO, 0x81, kvm_regs); #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] ioctl_iow_nr!(KVM_SET_REGS, KVMIO, 0x82, kvm_regs); #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "powerpc", target_arch = "powerpc64" ))] ioctl_ior_nr!(KVM_GET_SREGS, KVMIO, 0x83, kvm_sregs); #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "powerpc", target_arch = "powerpc64" ))] ioctl_iow_nr!(KVM_SET_SREGS, KVMIO, 0x84, kvm_sregs); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_iowr_nr!(KVM_GET_MSR_INDEX_LIST, KVMIO, 0x02, kvm_msr_list); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_iowr_nr!(KVM_GET_MSRS, KVMIO, 0x88, kvm_msrs); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_iow_nr!(KVM_SET_MSRS, KVMIO, 0x89, kvm_msrs); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_iow_nr!(KVM_SET_CPUID2, KVMIO, 0x90, kvm_cpuid2); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_ior_nr!(KVM_GET_FPU, KVMIO, 0x8c, kvm_fpu); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_iow_nr!(KVM_SET_FPU, KVMIO, 0x8d, kvm_fpu); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_ior_nr!(KVM_GET_LAPIC, KVMIO, 0x8e, kvm_lapic_state); #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] ioctl_iow_nr!(KVM_SET_LAPIC, KVMIO, 0x8f, kvm_lapic_state); #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))] ioctl_iow_nr!(KVM_ENABLE_CAP, KVMIO, 0xa3, kvm_enable_cap); #[cfg(any( target_arch = "x86", target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64" ))] ioctl_iow_nr!(KVM_SIGNAL_MSI, KVMIO, 0xa5, kvm_msi); #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] ioctl_iow_nr!(KVM_SET_ONE_REG, KVMIO, 0xac, kvm_one_reg); #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] ioctl_iow_nr!(KVM_ARM_VCPU_INIT, KVMIO, 0xae, kvm_vcpu_init); #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] ioctl_ior_nr!(KVM_ARM_PREFERRED_TARGET, KVMIO, 0xaf, kvm_vcpu_init); ioctl_iowr_nr!(KVM_CREATE_DEVICE, KVMIO, 0xe0, kvm_create_device); ioctl_iow_nr!(KVM_SET_DEVICE_ATTR, KVMIO, 0xe1, kvm_device_attr); #[cfg(test)] mod tests { use std::fs::File; use std::os::unix::io::FromRawFd; use libc::{c_char, open, O_RDWR}; use super::*; const KVM_PATH: &'static str = "/dev/kvm\0"; #[test] fn get_version() { let sys_fd = unsafe { open(KVM_PATH.as_ptr() as *const c_char, O_RDWR) }; assert!(sys_fd >= 0); let ret = unsafe { ioctl(&File::from_raw_fd(sys_fd), KVM_GET_API_VERSION()) }; assert_eq!(ret as u32, KVM_API_VERSION); } #[test] fn create_vm_fd() { let sys_fd = unsafe { open(KVM_PATH.as_ptr() as *const c_char, O_RDWR) }; assert!(sys_fd >= 0); let vm_fd = unsafe { ioctl(&File::from_raw_fd(sys_fd), KVM_CREATE_VM()) }; assert!(vm_fd >= 0); } #[test] fn check_vm_extension() { let sys_fd = unsafe { open(KVM_PATH.as_ptr() as *const c_char, O_RDWR) }; assert!(sys_fd >= 0); let has_user_memory = unsafe { ioctl_with_val( &File::from_raw_fd(sys_fd), KVM_CHECK_EXTENSION(), KVM_CAP_USER_MEMORY.into(), ) }; assert_eq!(has_user_memory, 1); } } kvm-ioctls-0.2.0/src/lib.rs0100644€T¡ŽÔ€T 000000167631350315155600145720ustar0000000000000000// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. #![deny(missing_docs)] //! A safe wrapper around the kernel's KVM interface. //! //! This crate offers safe wrappers for: //! - [system ioctls](struct.Kvm.html) using the `Kvm` structure //! - [VM ioctls](struct.VmFd.html) using the `VmFd` structure //! - [vCPU ioctls](struct.VcpuFd.html) using the `VcpuFd` structure //! - [device ioctls](struct.DeviceFd.html) using the `DeviceFd` structure //! //! # Platform support //! //! - x86_64 //! - arm64 (experimental) //! //! **NOTE:** The list of available ioctls is not extensive. //! //! # Example - Running a VM on x86_64 //! //! In this example we are creating a Virtual Machine (VM) with one vCPU. //! On the vCPU we are running x86_64 specific code. This example is based on //! the [LWN article](https://lwn.net/Articles/658511/) on using the KVM API. //! //! To get code running on the vCPU we are going through the following steps: //! //! 1. Instantiate KVM. This is used for running //! [system specific ioctls](struct.Kvm.html). //! 2. Use the KVM object to create a VM. The VM is used for running //! [VM specific ioctls](struct.VmFd.html). //! 3. Initialize the guest memory for the created VM. In this dummy example we //! are adding only one memory region and write the code in one memory page. //! 4. Create a vCPU using the VM object. The vCPU is used for running //! [vCPU specific ioctls](struct.VcpuFd.html). //! 5. Setup x86 specific general purpose registers and special registers. For //! details about how and why these registers are set, please check the //! [LWN article](https://lwn.net/Articles/658511/) on which this example is //! built. //! 6. Run the vCPU code in a loop and check the //! [exit reasons](enum.VcpuExit.html). //! //! //! ```rust //! extern crate kvm_ioctls; //! extern crate kvm_bindings; //! //! use kvm_ioctls::{Kvm, VmFd, VcpuFd}; //! use kvm_ioctls::VcpuExit; //! //! #[cfg(target_arch = "x86_64")] //! fn main(){ //! use std::io::Write; //! use std::slice; //! use std::ptr::null_mut; //! //! use kvm_bindings::KVM_MEM_LOG_DIRTY_PAGES; //! use kvm_bindings::kvm_userspace_memory_region; //! //! // 1. Instantiate KVM. //! let kvm = Kvm::new().unwrap(); //! //! // 2. Create a VM. //! let vm = kvm.create_vm().unwrap(); //! //! // 3. Initialize Guest Memory. //! let mem_size = 0x4000; //! let guest_addr: u64 = 0x1000; //! let load_addr: *mut u8 = unsafe { //! libc::mmap( //! null_mut(), //! mem_size, //! libc::PROT_READ | libc::PROT_WRITE, //! libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE, //! -1, //! 0, //! ) as *mut u8 //! }; //! //! let slot = 0; //! // When initializing the guest memory slot specify the //! // `KVM_MEM_LOG_DIRTY_PAGES` to enable the dirty log. //! let mem_region = kvm_userspace_memory_region { //! slot, //! guest_phys_addr: guest_addr, //! memory_size: mem_size as u64, //! userspace_addr: load_addr as u64, //! flags: KVM_MEM_LOG_DIRTY_PAGES, //! }; //! unsafe { vm.set_user_memory_region(mem_region).unwrap() }; //! //! //! let x86_code = [ //! 0xba, 0xf8, 0x03, /* mov $0x3f8, %dx */ //! 0x00, 0xd8, /* add %bl, %al */ //! 0x04, b'0', /* add $'0', %al */ //! 0xee, /* out %al, %dx */ //! 0xec, /* in %dx, %al */ //! 0xc6, 0x06, 0x00, 0x80, 0x00, /* movl $0, (0x8000); This generates a MMIO Write.*/ //! 0x8a, 0x16, 0x00, 0x80, /* movl (0x8000), %dl; This generates a MMIO Read.*/ //! 0xf4, /* hlt */ //! ]; //! //! // Write the code in the guest memory. This will generate a dirty page. //! unsafe { //! let mut slice = slice::from_raw_parts_mut(load_addr, mem_size); //! slice.write(&x86_code).unwrap(); //! } //! //! // 4. Create one vCPU. //! let vcpu_fd = vm.create_vcpu(0).unwrap(); //! //! // 5. Initialize general purpose and special registers. //! let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap(); //! vcpu_sregs.cs.base = 0; //! vcpu_sregs.cs.selector = 0; //! vcpu_fd.set_sregs(&vcpu_sregs).unwrap(); //! //! let mut vcpu_regs = vcpu_fd.get_regs().unwrap(); //! vcpu_regs.rip = guest_addr; //! vcpu_regs.rax = 2; //! vcpu_regs.rbx = 3; //! vcpu_regs.rflags = 2; //! vcpu_fd.set_regs(&vcpu_regs).unwrap(); //! //! // 6. Run code on the vCPU. //! loop { //! match vcpu_fd.run().expect("run failed") { //! VcpuExit::IoIn(addr, data) => { //! println!( //! "Received an I/O in exit. Address: {:#x}. Data: {:#x}", //! addr, //! data[0], //! ); //! } //! VcpuExit::IoOut(addr, data) => { //! println!( //! "Received an I/O out exit. Address: {:#x}. Data: {:#x}", //! addr, //! data[0], //! ); //! } //! VcpuExit::MmioRead(addr, data) => { //! println!( //! "Received an MMIO Read Request for the address {:#x}.", //! addr, //! ); //! } //! VcpuExit::MmioWrite(addr, data) => { //! println!( //! "Received an MMIO Write Request to the address {:#x}.", //! addr, //! ); //! } //! VcpuExit::Hlt => { //! // The code snippet dirties 1 page when it is loaded in memory //! let dirty_pages_bitmap = vm.get_dirty_log(slot, mem_size).unwrap(); //! let dirty_pages = dirty_pages_bitmap //! .into_iter() //! .map(|page| page.count_ones()) //! .fold(0, |dirty_page_count, i| dirty_page_count + i); //! assert_eq!(dirty_pages, 1); //! break; //! } //! r => panic!("Unexpected exit reason: {:?}", r), //! } //! } //! } //! //! #[cfg(not(target_arch = "x86_64"))] //! fn main() { //! println!("This code example only works on x86_64."); //! } //! ``` extern crate kvm_bindings; extern crate libc; #[macro_use] mod sys_ioctl; #[macro_use] mod kvm_ioctls; mod cap; mod ioctls; pub use cap::Cap; pub use ioctls::device::DeviceFd; pub use ioctls::system::Kvm; pub use ioctls::vcpu::{VcpuExit, VcpuFd}; pub use ioctls::vm::{IoEventAddress, NoDatamatch, VmFd}; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] pub use ioctls::CpuId; // The following example is used to verify that our public // structures are exported properly. /// # Example /// /// ``` /// #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] /// use kvm_ioctls::{KvmRunWrapper, Result}; /// ``` pub use ioctls::{KvmRunWrapper, Result}; /// Maximum number of CPUID entries that can be returned by a call to KVM ioctls. /// /// This value is taken from Linux Kernel v4.14.13 (arch/x86/include/asm/kvm_host.h). /// It can be used for calls to [get_supported_cpuid](struct.Kvm.html#method.get_supported_cpuid) and /// [get_emulated_cpuid](struct.Kvm.html#method.get_emulated_cpuid). pub const MAX_KVM_CPUID_ENTRIES: usize = 80; kvm-ioctls-0.2.0/src/sys_ioctl.rs0100644€T¡ŽÔ€T 000000121611350315155600160200ustar0000000000000000#![allow(dead_code)] // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. //! Macros and wrapper functions for dealing with ioctls. use libc; use std::os::raw::{c_int, c_uint, c_ulong, c_void}; use std::os::unix::io::AsRawFd; /// Raw macro to declare a function that returns an ioctl number. #[macro_export] macro_rules! ioctl_ioc_nr { ($name:ident, $dir:expr, $ty:expr, $nr:expr, $size:expr) => { #[allow(non_snake_case)] pub fn $name() -> ::std::os::raw::c_ulong { u64::from( ($dir << $crate::sys_ioctl::_IOC_DIRSHIFT) | ($ty << $crate::sys_ioctl::_IOC_TYPESHIFT) | ($nr << $crate::sys_ioctl::_IOC_NRSHIFT) | ($size << $crate::sys_ioctl::_IOC_SIZESHIFT), ) } }; } /// Declare an ioctl that transfers no data. #[macro_export] macro_rules! ioctl_io_nr { ($name:ident, $ty:expr, $nr:expr) => { ioctl_ioc_nr!($name, $crate::sys_ioctl::_IOC_NONE, $ty, $nr, 0); }; } /// Declare an ioctl that reads data. #[macro_export] macro_rules! ioctl_ior_nr { ($name:ident, $ty:expr, $nr:expr, $size:ty) => { ioctl_ioc_nr!( $name, $crate::sys_ioctl::_IOC_READ, $ty, $nr, ::std::mem::size_of::<$size>() as u32 ); }; } /// Declare an ioctl that writes data. #[macro_export] macro_rules! ioctl_iow_nr { ($name:ident, $ty:expr, $nr:expr, $size:ty) => { ioctl_ioc_nr!( $name, $crate::sys_ioctl::_IOC_WRITE, $ty, $nr, ::std::mem::size_of::<$size>() as u32 ); }; } /// Declare an ioctl that reads and writes data. #[macro_export] macro_rules! ioctl_iowr_nr { ($name:ident, $ty:expr, $nr:expr, $size:ty) => { ioctl_ioc_nr!( $name, $crate::sys_ioctl::_IOC_READ | $crate::sys_ioctl::_IOC_WRITE, $ty, $nr, ::std::mem::size_of::<$size>() as u32 ); }; } pub const _IOC_NRBITS: c_uint = 8; pub const _IOC_TYPEBITS: c_uint = 8; pub const _IOC_SIZEBITS: c_uint = 14; pub const _IOC_DIRBITS: c_uint = 2; pub const _IOC_NRMASK: c_uint = 255; pub const _IOC_TYPEMASK: c_uint = 255; pub const _IOC_SIZEMASK: c_uint = 16383; pub const _IOC_DIRMASK: c_uint = 3; pub const _IOC_NRSHIFT: c_uint = 0; pub const _IOC_TYPESHIFT: c_uint = 8; pub const _IOC_SIZESHIFT: c_uint = 16; pub const _IOC_DIRSHIFT: c_uint = 30; pub const _IOC_NONE: c_uint = 0; pub const _IOC_WRITE: c_uint = 1; pub const _IOC_READ: c_uint = 2; pub const IOC_IN: c_uint = 1_073_741_824; pub const IOC_OUT: c_uint = 2_147_483_648; pub const IOC_INOUT: c_uint = 3_221_225_472; pub const IOCSIZE_MASK: c_uint = 1_073_676_288; pub const IOCSIZE_SHIFT: c_uint = 16; // The type of the `req` parameter is different for the `musl` library. This will enable // successful build for other non-musl libraries. #[cfg(target_env = "musl")] type IoctlRequest = c_int; #[cfg(not(target_env = "musl"))] type IoctlRequest = c_ulong; /// Run an ioctl with no arguments. pub unsafe fn ioctl(fd: &F, req: c_ulong) -> c_int { libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, 0) } /// Run an ioctl with a single value argument. pub unsafe fn ioctl_with_val(fd: &F, req: c_ulong, arg: c_ulong) -> c_int { libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg) } /// Run an ioctl with an immutable reference. pub unsafe fn ioctl_with_ref(fd: &F, req: c_ulong, arg: &T) -> c_int { libc::ioctl( fd.as_raw_fd(), req as IoctlRequest, arg as *const T as *const c_void, ) } /// Run an ioctl with a mutable reference. pub unsafe fn ioctl_with_mut_ref(fd: &F, req: c_ulong, arg: &mut T) -> c_int { libc::ioctl( fd.as_raw_fd(), req as IoctlRequest, arg as *mut T as *mut c_void, ) } /// Run an ioctl with a raw pointer. pub unsafe fn ioctl_with_ptr(fd: &F, req: c_ulong, arg: *const T) -> c_int { libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg as *const c_void) } /// Run an ioctl with a mutable raw pointer. pub unsafe fn ioctl_with_mut_ptr(fd: &F, req: c_ulong, arg: *mut T) -> c_int { libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg as *mut c_void) } #[cfg(test)] mod tests { const TUNTAP: ::std::os::raw::c_uint = 0x54; const KVMIO: ::std::os::raw::c_uint = 0xAE; ioctl_io_nr!(KVM_CREATE_VM, KVMIO, 0x01); ioctl_ior_nr!(TUNGETFEATURES, TUNTAP, 0xcf, ::std::os::raw::c_uint); ioctl_iow_nr!(TUNSETQUEUE, TUNTAP, 0xd9, ::std::os::raw::c_int); ioctl_iowr_nr!(KVM_GET_MSR_INDEX_LIST, KVMIO, 0x2, ::std::os::raw::c_int); #[test] fn ioctl_macros() { assert_eq!(0x0000AE01, KVM_CREATE_VM()); assert_eq!(0x800454CF, TUNGETFEATURES()); assert_eq!(0x400454D9, TUNSETQUEUE()); assert_eq!(0xC004AE02, KVM_GET_MSR_INDEX_LIST()); } } kvm-ioctls-0.2.0/tests/conftest.py0100644€T¡ŽÔ€T 000000011011350324253600162030ustar0000000000000000import pytest PROFILE_CI="ci" PROFILE_DEVEL="devel" def pytest_addoption(parser): parser.addoption( "--profile", default=PROFILE_CI, choices=[PROFILE_CI, PROFILE_DEVEL], help="Profile for running the test: {} or {}".format( PROFILE_CI, PROFILE_DEVEL ) ) @pytest.fixture def profile(request): return request.config.getoption("--profile") # This is used for defining global variables in pytest. def pytest_configure(): pytest.profile_ci = PROFILE_CI pytest.profile_devel = PROFILE_DEVEL kvm-ioctls-0.2.0/tests/coverage0100644€T¡ŽÔ€T 000000000031350416426200155220ustar000000000000000091 kvm-ioctls-0.2.0/tests/test_coverage.py0100644€T¡ŽÔ€T 000000073471350324253600172320ustar0000000000000000# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 OR MIT """Test the coverage and update the threshold when coverage is increased.""" import os, re, shutil, subprocess import pytest def _get_current_coverage(): """Helper function that returns the coverage computed with kcov.""" kcov_ouput_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), "kcov_output" ) # By default the build output for kcov and unit tests are both in the debug # directory. This causes some linker errors that I haven't investigated. # Error: error: linking with `cc` failed: exit code: 1 # An easy fix is to have separate build directories for kcov & unit tests. kcov_build_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), "kcov_build" ) # Remove kcov output and build directory to be sure we are always working # on a clean environment. shutil.rmtree(kcov_ouput_dir, ignore_errors=True) shutil.rmtree(kcov_build_dir, ignore_errors=True) exclude_pattern = ( '${CARGO_HOME:-$HOME/.cargo/},' 'usr/lib/,' 'lib/' ) exclude_region = "'mod tests {'" kcov_cmd = "CARGO_TARGET_DIR={} cargo kcov --all " \ "--output {} -- " \ "--exclude-region={} " \ "--exclude-pattern={} " \ "--verify".format( kcov_build_dir, kcov_ouput_dir, exclude_region, exclude_pattern ) subprocess.run(kcov_cmd, shell=True, check=True) # Read the coverage reported by kcov. coverage_file = os.path.join(kcov_ouput_dir, 'index.js') with open(coverage_file) as cov_output: coverage = float(re.findall( r'"covered":"(\d+\.\d)"', cov_output.read() )[0]) # Remove coverage related directories. shutil.rmtree(kcov_ouput_dir, ignore_errors=True) shutil.rmtree(kcov_build_dir, ignore_errors=True) return coverage def _get_previous_coverage(): """Helper function that returns the last reported coverage.""" coverage_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'coverage' ) # The first and only line of the file contains the coverage. with open(coverage_path) as f: coverage = f.readline() return float(coverage.strip()) def _update_coverage(cov_value): """Updates the coverage in the coverage file.""" coverage_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'coverage' ) with open(coverage_path, "w") as f: f.write(str(cov_value)) def test_coverage(profile): current_coverage = _get_current_coverage() previous_coverage = _get_previous_coverage() if previous_coverage < current_coverage: if profile == pytest.profile_ci: # In the CI Profile we expect the coverage to be manually updated. assert False, "Coverage is increased from {} to {}. " \ "Please update the coverage in " \ "tests/coverage.".format( previous_coverage, current_coverage ) elif profile == pytest.profile_devel: _update_coverage(current_coverage) else: # This should never happen because pytest should only accept # the valid test profiles specified with `choices` in # `pytest_addoption`. assert False, "Invalid test profile." elif previous_coverage > current_coverage: diff = float(previous_coverage - current_coverage) assert False, "Coverage drops by {:.2f}%. Please add unit tests for" \ "the uncovered lines.".format(diff) kvm-ioctls-0.2.0/.idea/misc.xml0100644€T¡ŽÔ€T 000000006271345061561200153130ustar0000000000000000 kvm-ioctls-0.2.0/.idea/modules.xml0100644€T¡ŽÔ€T 000000004041345061532300160200ustar0000000000000000 kvm-ioctls-0.2.0/.idea/vcs.xml0100644€T¡ŽÔ€T 000000002471345061532300151500ustar0000000000000000 kvm-ioctls-0.2.0/.idea/workspace.xml0100644€T¡ŽÔ€T 000001402461350415001600163500ustar0000000000000000 get_dirty get_dir bindings kvm_msr MAX_KVM_CPUID_ENTRIES c_void cap fromr mmiow get_ run_code Result<T clone test_clo fandree/rust-vmm-dev fandree set_user ptr impl Se impl clone fs_imp::File fs_imp: fs_imp create_irqchi spi v1 clipp clip coverage kvmrunwr //! rustvmm/dev:v1 v2