vm-memory-0.14.0/.buildkite/custom-tests.json000064400000000000000000000037751046102023000172510ustar 00000000000000{ "tests": [ { "test_name": "build-gnu-mmap", "command": "cargo build --release --features=xen", "platform": ["x86_64", "aarch64"] }, { "test_name": "build-gnu-mmap-no-xen", "command": "cargo build --release --features=backend-mmap", "platform": ["x86_64", "aarch64"] }, { "test_name": "build-musl-mmap", "command": "cargo build --release --features=xen --target {target_platform}-unknown-linux-musl", "platform": ["x86_64", "aarch64"] }, { "test_name": "build-musl-mmap-no-xen", "command": "cargo build --release --features=backend-mmap --target {target_platform}-unknown-linux-musl", "platform": ["x86_64", "aarch64"] }, { "test_name": "miri", "command": "RUST_BACKTRACE=1 MIRIFLAGS='-Zmiri-disable-isolation -Zmiri-panic-on-unsupported -Zmiri-backtrace=full' cargo +nightly miri test --features backend-mmap", "platform": ["x86_64", "aarch64"] }, { "test_name": "unittests-gnu-no-xen", "command": "cargo test --features 'backend-bitmap backend-mmap backend-atomic' --workspace", "platform": [ "x86_64", "aarch64" ] }, { "test_name": "unittests-musl-no-xen", "command": "cargo test --features 'backend-bitmap backend-mmap backend-atomic' --workspace --target {target_platform}-unknown-linux-musl", "platform": [ "x86_64", "aarch64" ] }, { "test_name": "clippy-no-xen", "command": "cargo clippy --workspace --bins --examples --benches --features 'backend-bitmap backend-mmap backend-atomic' --all-targets -- -D warnings -D clippy::undocumented_unsafe_blocks", "platform": [ "x86_64", "aarch64" ] }, { "test_name": "check-warnings-no-xen", "command": "RUSTFLAGS=\"-D warnings\" cargo check --all-targets --features 'backend-bitmap backend-mmap backend-atomic' --workspace", "platform": [ "x86_64", "aarch64" ] } ] } vm-memory-0.14.0/.buildkite/pipeline.windows.yml000064400000000000000000000032541046102023000177150ustar 00000000000000steps: - label: "build-msvc-x86" commands: - cargo build --release retry: automatic: true agents: platform: x86_64 os: windows plugins: - docker#v3.7.0: image: "lpetrut/rust_win_buildtools" always-pull: true - label: "build-msvc-x86-mmap" commands: - cargo build --release --features=backend-mmap retry: automatic: true agents: platform: x86_64 os: windows plugins: - docker#v3.7.0: image: "lpetrut/rust_win_buildtools" always-pull: true - label: "style" command: cargo fmt --all -- --check retry: automatic: true agents: platform: x86_64 os: windows plugins: - docker#v3.7.0: image: "lpetrut/rust_win_buildtools" always-pull: true - label: "unittests-msvc-x86" commands: - cargo test --all-features retry: automatic: true agents: platform: x86_64 os: windows plugins: - docker#v3.7.0: image: "lpetrut/rust_win_buildtools" always-pull: true - label: "clippy-x86" commands: - cargo clippy --all retry: automatic: true agents: platform: x86_64 os: windows plugins: - docker#v3.7.0: image: "lpetrut/rust_win_buildtools" always-pull: true - label: "check-warnings-x86" commands: - cargo check --all-targets retry: automatic: true agents: platform: x86_64 os: windows plugins: - docker#v3.7.0: image: "lpetrut/rust_win_buildtools" always-pull: true environment: - "RUSTFLAGS=-D warnings" vm-memory-0.14.0/.cargo/audit.toml000064400000000000000000000012041046102023000150070ustar 00000000000000[advisories] ignore = [ # serde_cbor is an unmaintained dependency introduced by criterion. # We are using criterion only for benchmarks, so we can ignore # this vulnerability until criterion is fixing this. # See https://github.com/bheisler/criterion.rs/issues/534. "RUSTSEC-2021-0127", # atty is unmaintained (the unsound problem doesn't seem to impact us). # We are ignoring this advisory because it's only used by criterion, # and we are using criterion for benchmarks. This is not a problem for # production use cases. Also, criterion did not update the dependency, # so there is not much else we can do. "RUSTSEC-2021-0145" ] vm-memory-0.14.0/.cargo/config000064400000000000000000000001561046102023000142010ustar 00000000000000[target.aarch64-unknown-linux-musl] rustflags = [ "-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"] vm-memory-0.14.0/.cargo_vcs_info.json0000644000000001360000000000100130460ustar { "git": { "sha1": "b8bf1dbcca5d85a158cf7a8ed6cb19774428819c" }, "path_in_vcs": "" }vm-memory-0.14.0/.github/dependabot.yml000064400000000000000000000002101046102023000160170ustar 00000000000000version: 2 updates: - package-ecosystem: gitsubmodule directory: "/" schedule: interval: monthly open-pull-requests-limit: 10 vm-memory-0.14.0/.gitignore000064400000000000000000000000361046102023000136250ustar 00000000000000/target **/*.rs.bk Cargo.lock vm-memory-0.14.0/.gitmodules000064400000000000000000000001411046102023000140070ustar 00000000000000[submodule "rust-vmm-ci"] path = rust-vmm-ci url = https://github.com/rust-vmm/rust-vmm-ci.git vm-memory-0.14.0/CHANGELOG.md000064400000000000000000000147701046102023000134600ustar 00000000000000# Changelog ## Upcoming version ### Added ### Changed ### Fixed ### Removed ### Deprecated ## [v0.14.0] ### Added - [[#266](https://github.com/rust-vmm/vm-memory/pull/266)] Derive `Debug` for several types that were missing it. ### Changed - [[#274](https://github.com/rust-vmm/vm-memory/pull/274)] Drop `Default` as requirement for `ByteValued`. ## [v0.13.1] ### Added - [[#256](https://github.com/rust-vmm/vm-memory/pull/256)] Implement `WriteVolatile` for `std::io::Stdout`. - [[#256](https://github.com/rust-vmm/vm-memory/pull/256)] Implement `WriteVolatile` for `std::vec::Vec`. - [[#256](https://github.com/rust-vmm/vm-memory/pull/256)] Implement `WriteVolatile` for `Cursor<&mut [u8]>`. - [[#256](https://github.com/rust-vmm/vm-memory/pull/256)] Implement `ReadVolatile` for `Cursor`. ## [v0.13.0] ### Added - [[#247]](https://github.com/rust-vmm/vm-memory/pull/247) Add `ReadVolatile` and `WriteVolatile` traits which are equivalents of `Read`/`Write` with volatile access semantics. ### Changed - [[#247]](https://github.com/rust-vmm/vm-memory/pull/247) Deprecate `Bytes::{read_from, read_exact_from, write_to, write_all_to}`. Instead use `ReadVolatile`/`WriteVolatile`, which do not incur the performance penalty of copying to hypervisor memory due to `Read`/`Write` being incompatible with volatile semantics (see also #217). ## [v0.12.2] ### Fixed - [[#251]](https://github.com/rust-vmm/vm-memory/pull/251): Inserted checks that verify that the value returned by `VolatileMemory::get_slice` is of the correct length. ### Deprecated - [[#244]](https://github.com/rust-vmm/vm-memory/pull/241) Deprecate volatile memory's `as_ptr()` interfaces. The new interfaces to be used instead are: `ptr_guard()` and `ptr_guard_mut()`. ## [v0.12.1] ### Fixed - [[#241]](https://github.com/rust-vmm/vm-memory/pull/245) mmap_xen: Don't drop the FileOffset while in use #245 ## [v0.12.0] ### Added - [[#241]](https://github.com/rust-vmm/vm-memory/pull/241) Add Xen memory mapping support: Foreign and Grant. Add new API for accessing pointers to volatile slices, as `as_ptr()` can't be used with Xen's Grant mapping. - [[#237]](https://github.com/rust-vmm/vm-memory/pull/237) Implement `ByteValued` for `i/u128`. ## [v0.11.0] ### Added - [[#216]](https://github.com/rust-vmm/vm-memory/pull/216) Add `GuestRegionMmap::from_region`. ### Fixed - [[#217]](https://github.com/rust-vmm/vm-memory/pull/217) Fix vm-memory internally taking rust-style slices to guest memory in ways that could potentially cause undefined behavior. Removes/deprecates various `as_slice`/`as_slice_mut` methods whose usage violated rust's aliasing rules, as well as an unsound `impl<'a> VolatileMemory for &'a mut [u8]`. ## [v0.10.0] ### Changed - [[#208]](https://github.com/rust-vmm/vm-memory/issues/208) Updated vmm-sys-util dependency to v0.11.0 - [[#203]](https://github.com/rust-vmm/vm-memory/pull/203) Switched to Rust edition 2021. ## [v0.9.0] ### Fixed - [[#195]](https://github.com/rust-vmm/vm-memory/issues/195): `mmap::check_file_offset` is doing the correct size validation for block and char devices as well. ### Changed - [[#198]](https://github.com/rust-vmm/vm-memory/pull/198): atomic: enable 64 bit atomics on ppc64le and s390x. - [[#200]](https://github.com/rust-vmm/vm-memory/pull/200): docs: enable all features in `docs.rs`. - [[#199]](https://github.com/rust-vmm/vm-memory/issues/199): Update the way the dependencies are pulled such that we don't end up with incompatible versions. ## [v0.8.0] ### Fixed - [[#190]](https://github.com/rust-vmm/vm-memory/pull/190): `VolatileSlice::read/write` when input slice is empty. ## [v0.7.0] ### Changed - [[#176]](https://github.com/rust-vmm/vm-memory/pull/176): Relax the trait bounds of `Bytes` auto impl for `T: GuestMemory` - [[#178]](https://github.com/rust-vmm/vm-memory/pull/178): `MmapRegion::build_raw` no longer requires that the length of the region is a multiple of the page size. ## [v0.6.0] ### Added - [[#160]](https://github.com/rust-vmm/vm-memory/pull/160): Add `ArcRef` and `AtomicBitmapArc` bitmap backend implementations. - [[#149]](https://github.com/rust-vmm/vm-memory/issues/149): Implement builder for MmapRegion. - [[#140]](https://github.com/rust-vmm/vm-memory/issues/140): Add dirty bitmap tracking abstractions. ### Deprecated - [[#133]](https://github.com/rust-vmm/vm-memory/issues/8): Deprecate `GuestMemory::with_regions()`, `GuestMemory::with_regions_mut()`, `GuestMemory::map_and_fold()`. ## [v0.5.0] ### Added - [[#8]](https://github.com/rust-vmm/vm-memory/issues/8): Add GuestMemory method to return an Iterator - [[#120]](https://github.com/rust-vmm/vm-memory/pull/120): Add is_hugetlbfs() to GuestMemoryRegion - [[#126]](https://github.com/rust-vmm/vm-memory/pull/126): Add VolatileSlice::split_at() - [[#128]](https://github.com/rust-vmm/vm-memory/pull/128): Add VolatileSlice::subslice() ## [v0.4.0] ### Fixed - [[#100]](https://github.com/rust-vmm/vm-memory/issues/100): Performance degradation after fixing [#95](https://github.com/rust-vmm/vm-memory/pull/95). - [[#122]](https://github.com/rust-vmm/vm-memory/pull/122): atomic, Cargo.toml: Update for arc-swap 1.0.0. ## [v0.3.0] ### Added - [[#109]](https://github.com/rust-vmm/vm-memory/pull/109): Added `build_raw` to `MmapRegion` which can be used to operate on externally created mappings. - [[#101]](https://github.com/rust-vmm/vm-memory/pull/101): Added `check_range` for GuestMemory which could be used to validate a range of guest memory. - [[#115]](https://github.com/rust-vmm/vm-memory/pull/115): Add methods for atomic access to `Bytes`. ### Fixed - [[#93]](https://github.com/rust-vmm/vm-memory/issues/93): DoS issue when using virtio with rust-vmm/vm-memory. - [[#106]](https://github.com/rust-vmm/vm-memory/issues/106): Asserts trigger on zero-length access. ### Removed - `integer-atomics` is no longer a distinct feature of the crate. ## [v0.2.0] ### Added - [[#76]](https://github.com/rust-vmm/vm-memory/issues/76): Added `get_slice` and `as_volatile_slice` to `GuestMemoryRegion`. - [[#82]](https://github.com/rust-vmm/vm-memory/issues/82): Added `Clone` bound for `GuestAddressSpace::T`, the return value of `GuestAddressSpace::memory()`. - [[#88]](https://github.com/rust-vmm/vm-memory/issues/88): Added `as_bytes` for `ByteValued` which can be used for reading into POD structures from raw bytes. ## [v0.1.0] ### Added - Added traits for working with VM memory. - Added a mmap based implemention for the Guest Memory. vm-memory-0.14.0/CODEOWNERS000064400000000000000000000000641046102023000132310ustar 00000000000000* @alexandruag @bonzini @jiangliu @tkreuzer @roypat vm-memory-0.14.0/Cargo.toml0000644000000032020000000000100110410ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "vm-memory" version = "0.14.0" authors = ["Liu Jiang "] autobenches = false description = "Safe abstractions for accessing the VM physical memory" readme = "README.md" keywords = ["memory"] categories = ["memory-management"] license = "Apache-2.0 OR BSD-3-Clause" repository = "https://github.com/rust-vmm/vm-memory" [package.metadata.docs.rs] all-features = true [profile.bench] lto = true codegen-units = 1 [[bench]] name = "main" harness = false [dependencies.arc-swap] version = "1.0.0" optional = true [dependencies.bitflags] version = "2.4.0" optional = true [dependencies.libc] version = "0.2.39" [dependencies.thiserror] version = "1.0.40" [dependencies.vmm-sys-util] version = "0.12.1" optional = true [dev-dependencies.criterion] version = "0.3.0" [dev-dependencies.matches] version = "0.1.0" [dev-dependencies.vmm-sys-util] version = "0.12.1" [features] backend-atomic = ["arc-swap"] backend-bitmap = [] backend-mmap = [] default = [] xen = [ "backend-mmap", "bitflags", "vmm-sys-util", ] [target."cfg(windows)".dependencies.winapi] version = "0.3" features = [ "errhandlingapi", "sysinfoapi", ] vm-memory-0.14.0/Cargo.toml.orig000064400000000000000000000020321046102023000145220ustar 00000000000000[package] name = "vm-memory" version = "0.14.0" description = "Safe abstractions for accessing the VM physical memory" keywords = ["memory"] categories = ["memory-management"] authors = ["Liu Jiang "] repository = "https://github.com/rust-vmm/vm-memory" readme = "README.md" license = "Apache-2.0 OR BSD-3-Clause" edition = "2021" autobenches = false [features] default = [] backend-bitmap = [] backend-mmap = [] backend-atomic = ["arc-swap"] xen = ["backend-mmap", "bitflags", "vmm-sys-util"] [dependencies] libc = "0.2.39" arc-swap = { version = "1.0.0", optional = true } bitflags = { version = "2.4.0", optional = true } thiserror = "1.0.40" vmm-sys-util = { version = "0.12.1", optional = true } [target.'cfg(windows)'.dependencies.winapi] version = "0.3" features = ["errhandlingapi", "sysinfoapi"] [dev-dependencies] criterion = "0.3.0" matches = "0.1.0" vmm-sys-util = "0.12.1" [[bench]] name = "main" harness = false [profile.bench] lto = true codegen-units = 1 [package.metadata.docs.rs] all-features = true vm-memory-0.14.0/DESIGN.md000064400000000000000000000150711046102023000131350ustar 00000000000000# Design ## Objectives - Provide a set of traits for accessing and configuring the physical memory of a virtual machine. - Provide a clean abstraction of the VM memory such that rust-vmm components can use it without depending on the implementation details specific to different VMMs. ## API Principles - Define consumer side interfaces to access VM's physical memory. - Do not define provider side interfaces to supply VM physical memory. The `vm-memory` crate focuses on defining consumer side interfaces to access the physical memory of the VM. It does not define how the underlying VM memory provider is implemented. Lightweight VMMs like [CrosVM](https://chromium.googlesource.com/chromiumos/platform/crosvm/) and [Firecracker](https://github.com/firecracker-microvm/firecracker) can make assumptions about the structure of VM's physical memory and implement a lightweight backend to access it. For VMMs like [Qemu](https://www.qemu.org/), a high performance and full functionality backend may be implemented with less assumptions. ## Architecture The `vm-memory` is derived from two upstream projects: - [CrosVM](https://chromium.googlesource.com/chromiumos/platform/crosvm/) commit 186eb8b0db644892e8ffba8344efe3492bb2b823 - [Firecracker](https://github.com/firecracker-microvm/firecracker) commit 80128ea61b305a27df1f751d70415b04b503eae7 The high level abstraction of the VM memory has been heavily refactored to provide a VMM agnostic interface. The `vm-memory` crate could be divided into four logic parts as: - [Abstraction of Address Space](#abstraction-of-address-space) - [Specialization for Virtual Machine Physical Address Space](#specialization-for-virtual-machine-physical-address-space) - [Backend Implementation Based on `mmap`](#backend-implementation-based-on-`mmap`) - [Utilities and helpers](#utilities-and-helpers) ### Address Space Abstraction The address space abstraction contains traits and implementations for working with addresses as follows: - `AddressValue`: stores the raw value of an address. Typically `u32`, `u64` or `usize` are used to store the raw value. Pointers such as `*u8`, can not be used as an implementation of `AddressValue` because the `Add` and `Sub` traits are not implemented for that type. - `Address`: implementation of `AddressValue`. - `Bytes`: trait for volatile access to memory. The `Bytes` trait can be parameterized with types that represent addresses, in order to enforce that addresses are used with the right "kind" of volatile memory. - `VolatileMemory`: basic implementation of volatile access to memory. Implements `Bytes`. To make the abstraction as generic as possible, all of above traits only define methods to access the address space, and they never define methods to manage (create, delete, insert, remove etc) address spaces. This way, the address space consumers may be decoupled from the address space provider (typically a VMM). ### Specialization for Virtual Machine Physical Address Space The generic address space crates are specialized to access the physical memory of the VM using the following traits: - `GuestAddress`: represents a guest physical address (GPA). On ARM64, a 32-bit VMM/hypervisor can be used to support a 64-bit VM. For simplicity, `u64` is used to store the the raw value no matter if it is a 32-bit or a 64-bit virtual machine. - `GuestMemoryRegion`: represents a continuous region of the VM memory. - `GuestMemory`: represents a collection of `GuestMemoryRegion` objects. The main responsibilities of the `GuestMemory` trait are: - hide the detail of accessing physical addresses (for example complex hierarchical structures). - map an address request to a `GuestMemoryRegion` object and relay the request to it. - handle cases where an access request is spanning two or more `GuestMemoryRegion` objects. The VM memory consumers should only rely on traits and structs defined here to access VM's physical memory and not on the implementation of the traits. ### Backend Implementation Based on `mmap` Provides an implementation of the `GuestMemory` trait by mmapping the VM's physical memory into the current process. - `MmapRegion`: implementation of mmap a continuous range of physical memory with methods for accessing the mapped memory. - `GuestRegionMmap`: implementation of `GuestMemoryRegion` providing a wrapper used to map VM's physical address into a `(mmap_region, offset)` tuple. - `GuestMemoryMmap`: implementation of `GuestMemory` that manages a collection of `GuestRegionMmap` objects for a VM. One of the main responsibilities of `GuestMemoryMmap` is to handle the use cases where an access request crosses the memory region boundary. This scenario may be triggered when memory hotplug is supported. There is a trade-off between simplicity and code complexity: - The following pattern currently used in both CrosVM and Firecracker is simple, but fails when the request crosses region boundary. ```rust let guest_memory_mmap: GuestMemoryMmap = ... let addr: GuestAddress = ... let buf = &mut [0u8; 5]; let result = guest_memory_mmap.find_region(addr).unwrap().write(buf, addr); ``` - To support requests crossing region boundary, the following update is needed: ```rust let guest_memory_mmap: GuestMemoryMmap = ... let addr: GuestAddress = ... let buf = &mut [0u8; 5]; let result = guest_memory_mmap.write(buf, addr); ``` ### Utilities and Helpers The following utilities and helper traits/macros are imported from the [crosvm project](https://chromium.googlesource.com/chromiumos/platform/crosvm/) with minor changes: - `ByteValued` (originally `DataInit`): types which are safe to be initialized from raw data. A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a byte array. This is generally true for all plain-old-data structs. It is notably not true for any type that includes a reference. - `{Le,Be}_{16,32,64}`: explicit endian types useful for embedding in structs or reinterpreting data. ## Relationships between Traits, Structs and Types **Traits**: - `Address` inherits `AddressValue` - `GuestMemoryRegion` inherits `Bytes`. The `Bytes` trait must be implemented. - `GuestMemory` has a generic implementation of `Bytes`. **Types**: - `GuestAddress`: `Address` - `MemoryRegionAddress`: `Address` **Structs**: - `MmapRegion` implements `VolatileMemory` - `GuestRegionMmap` implements `Bytes + GuestMemoryRegion` - `GuestMemoryMmap` implements `GuestMemory` - `VolatileSlice` implements `Bytes + VolatileMemory` vm-memory-0.14.0/LICENSE-APACHE000064400000000000000000000261361046102023000135720ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. vm-memory-0.14.0/LICENSE-BSD-3-Clause000064400000000000000000000030321046102023000146210ustar 00000000000000// Copyright 2017 The Chromium OS Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vm-memory-0.14.0/README.md000064400000000000000000000063541046102023000131250ustar 00000000000000# vm-memory [![crates.io](https://img.shields.io/crates/v/vm-memory)](https://crates.io/crates/vm-memory) [![docs.rs](https://img.shields.io/docsrs/vm-memory)](https://docs.rs/vm-memory/) ## Design In a typical Virtual Machine Monitor (VMM) there are several components, such as boot loader, virtual device drivers, virtio backend drivers and vhost drivers, that need to access the VM physical memory. The `vm-memory` crate provides a set of traits to decouple VM memory consumers from VM memory providers. Based on these traits, VM memory consumers can access the physical memory of the VM without knowing the implementation details of the VM memory provider. Thus VMM components based on these traits can be shared and reused by multiple virtualization solutions. The detailed design of the `vm-memory` crate can be found [here](DESIGN.md). ### Platform Support - Arch: x86, AMD64, ARM64 - OS: Linux/Unix/Windows ### Xen support Supporting Xen requires special handling while mapping the guest memory and hence a separate feature is provided in the crate: `xen`. Mapping the guest memory for Xen requires an `ioctl()` to be issued along with `mmap()` for the memory area. The arguments for the `ioctl()` are received via the `vhost-user` protocol's memory region area. Xen allows two different mapping models: `Foreign` and `Grant`. In `Foreign` mapping model, the entire guest address space is mapped at once, in advance. In `Grant` mapping model, the memory for few regions, like those representing the virtqueues, is mapped in advance. The rest of the memory regions are mapped (partially) only while accessing the buffers and the same is immediately deallocated after the buffer is accessed. Hence, special handling for the same in `VolatileMemory.rs`. In order to still support standard Unix memory regions, for special regions and testing, the Xen specific implementation here allows a third mapping type: `MmapXenFlags::UNIX`. This performs standard Unix memory mapping and the same is used for all tests in this crate. It was decided by the `rust-vmm` maintainers to keep the interface simple and build the crate for either standard Unix memory mapping or Xen, and not both. Xen is only supported for Unix platforms. ## Usage Add `vm-memory` as a dependency in `Cargo.toml` ```toml [dependencies] vm-memory = "*" ``` Then add `extern crate vm-memory;` to your crate root. ## Examples - Creating a VM physical memory object in hypervisor specific ways using the `GuestMemoryMmap` implementation of the `GuestMemory` trait: ```rust fn provide_mem_to_virt_dev() { let gm = GuestMemoryMmap::from_ranges(&[ (GuestAddress(0), 0x1000), (GuestAddress(0x1000), 0x1000) ]).unwrap(); virt_device_io(&gm); } ``` - Consumers accessing the VM's physical memory: ```rust fn virt_device_io(mem: &T) { let sample_buf = &[1, 2, 3, 4, 5]; assert_eq!(mem.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5); let buf = &mut [0u8; 5]; assert_eq!(mem.read(buf, GuestAddress(0xffc)).unwrap(), 5); assert_eq!(buf, sample_buf); } ``` ## License This project is licensed under either of - [Apache License](http://www.apache.org/licenses/LICENSE-2.0), Version 2.0 - [BSD-3-Clause License](https://opensource.org/licenses/BSD-3-Clause) vm-memory-0.14.0/TODO.md000064400000000000000000000002261046102023000127250ustar 00000000000000### TODO List - Abstraction layer to seperate VM memory management from VM memory accessor. - Help needed to refine documentation and usage examples. vm-memory-0.14.0/benches/guest_memory.rs000064400000000000000000000016301046102023000163320ustar 00000000000000// Copyright (C) 2020 Alibaba Cloud Computing. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause #![cfg(feature = "backend-mmap")] pub use criterion::{black_box, Criterion}; use vm_memory::bitmap::Bitmap; use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap}; const REGION_SIZE: usize = 0x10_0000; const REGIONS_COUNT: u64 = 256; pub fn benchmark_for_guest_memory(c: &mut Criterion) { benchmark_find_region(c); } fn find_region(mem: &GuestMemoryMmap) where B: Bitmap + 'static, { for i in 0..REGIONS_COUNT { let _ = mem .find_region(black_box(GuestAddress(i * REGION_SIZE as u64))) .unwrap(); } } fn benchmark_find_region(c: &mut Criterion) { let memory = super::create_guest_memory_mmap(REGION_SIZE, REGIONS_COUNT); c.bench_function("find_region", |b| { b.iter(|| find_region(black_box(&memory))) }); } vm-memory-0.14.0/benches/main.rs000064400000000000000000000025341046102023000145430ustar 00000000000000// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause extern crate criterion; pub use criterion::{black_box, criterion_group, criterion_main, Criterion}; #[cfg(feature = "backend-mmap")] use vm_memory::{GuestAddress, GuestMemoryMmap}; mod guest_memory; mod mmap; mod volatile; use volatile::benchmark_for_volatile; #[cfg(feature = "backend-mmap")] // Use this function with caution. It does not check against overflows // and `GuestMemoryMmap::from_ranges` errors. fn create_guest_memory_mmap(size: usize, count: u64) -> GuestMemoryMmap<()> { let mut regions: Vec<(GuestAddress, usize)> = Vec::new(); for i in 0..count { regions.push((GuestAddress(i * size as u64), size)); } GuestMemoryMmap::from_ranges(regions.as_slice()).unwrap() } pub fn criterion_benchmark(_c: &mut Criterion) { #[cfg(feature = "backend-mmap")] mmap::benchmark_for_mmap(_c); } pub fn benchmark_guest_memory(_c: &mut Criterion) { #[cfg(feature = "backend-mmap")] guest_memory::benchmark_for_guest_memory(_c) } criterion_group! { name = benches; config = Criterion::default().sample_size(200).measurement_time(std::time::Duration::from_secs(50)); targets = criterion_benchmark, benchmark_guest_memory, benchmark_for_volatile } criterion_main! { benches, } vm-memory-0.14.0/benches/mmap/mod.rs000064400000000000000000000150431046102023000153270ustar 00000000000000// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause #![cfg(feature = "backend-mmap")] #![allow(clippy::undocumented_unsafe_blocks)] extern crate criterion; extern crate vm_memory; use std::fs::{File, OpenOptions}; use std::mem::size_of; use std::path::Path; use criterion::{black_box, Criterion}; use vm_memory::{ByteValued, Bytes, GuestAddress, GuestMemory}; const REGION_SIZE: usize = 0x8000_0000; const REGIONS_COUNT: u64 = 8; const ACCESS_SIZE: usize = 0x200; #[repr(C)] #[derive(Copy, Clone, Default)] struct SmallDummy { a: u32, b: u32, } unsafe impl ByteValued for SmallDummy {} #[repr(C)] #[derive(Copy, Clone, Default)] struct BigDummy { elements: [u64; 12], } unsafe impl ByteValued for BigDummy {} fn make_image(size: usize) -> Vec { let mut image: Vec = Vec::with_capacity(size); for i in 0..size { // We just want some different numbers here, so the conversion is OK. image.push(i as u8); } image } enum AccessKind { // The parameter represents the index of the region where the access should happen. // Indices are 0-based. InRegion(u64), // The parameter represents the index of the first region (i.e. where the access starts). CrossRegion(u64), } impl AccessKind { fn make_offset(&self, access_size: usize) -> u64 { match *self { AccessKind::InRegion(idx) => REGION_SIZE as u64 * idx, AccessKind::CrossRegion(idx) => { REGION_SIZE as u64 * (idx + 1) - (access_size as u64 / 2) } } } } pub fn benchmark_for_mmap(c: &mut Criterion) { let memory = super::create_guest_memory_mmap(REGION_SIZE, REGIONS_COUNT); // Just a sanity check. assert_eq!( memory.last_addr(), GuestAddress(REGION_SIZE as u64 * REGIONS_COUNT - 0x01) ); let some_small_dummy = SmallDummy { a: 0x1111_2222, b: 0x3333_4444, }; let some_big_dummy = BigDummy { elements: [0x1111_2222_3333_4444; 12], }; let mut image = make_image(ACCESS_SIZE); let buf = &mut [0u8; ACCESS_SIZE]; let mut file = File::open(Path::new("/dev/zero")).expect("Could not open /dev/zero"); let mut file_to_write = OpenOptions::new() .write(true) .open("/dev/null") .expect("Could not open /dev/null"); let accesses = &[ AccessKind::InRegion(0), AccessKind::CrossRegion(0), AccessKind::CrossRegion(REGIONS_COUNT - 2), AccessKind::InRegion(REGIONS_COUNT - 1), ]; for access in accesses { let offset = access.make_offset(ACCESS_SIZE); let address = GuestAddress(offset); // Check performance for read operations. c.bench_function(format!("read_from_{:#0X}", offset).as_str(), |b| { b.iter(|| { black_box(&memory) .read_volatile_from(address, &mut image.as_slice(), ACCESS_SIZE) .unwrap() }) }); c.bench_function(format!("read_from_file_{:#0X}", offset).as_str(), |b| { b.iter(|| { black_box(&memory) .read_volatile_from(address, &mut file, ACCESS_SIZE) .unwrap() }) }); c.bench_function(format!("read_exact_from_{:#0X}", offset).as_str(), |b| { b.iter(|| { black_box(&memory) .read_exact_volatile_from(address, &mut image.as_slice(), ACCESS_SIZE) .unwrap() }) }); c.bench_function( format!("read_entire_slice_from_{:#0X}", offset).as_str(), |b| b.iter(|| black_box(&memory).read_slice(buf, address).unwrap()), ); c.bench_function(format!("read_slice_from_{:#0X}", offset).as_str(), |b| { b.iter(|| black_box(&memory).read(buf, address).unwrap()) }); let obj_off = access.make_offset(size_of::()); let obj_addr = GuestAddress(obj_off); c.bench_function( format!("read_small_obj_from_{:#0X}", obj_off).as_str(), |b| b.iter(|| black_box(&memory).read_obj::(obj_addr).unwrap()), ); let obj_off = access.make_offset(size_of::()); let obj_addr = GuestAddress(obj_off); c.bench_function(format!("read_big_obj_from_{:#0X}", obj_off).as_str(), |b| { b.iter(|| black_box(&memory).read_obj::(obj_addr).unwrap()) }); // Check performance for write operations. c.bench_function(format!("write_to_{:#0X}", offset).as_str(), |b| { b.iter(|| { black_box(&memory) .write_volatile_to(address, &mut image.as_mut_slice(), ACCESS_SIZE) .unwrap() }) }); c.bench_function(format!("write_to_file_{:#0X}", offset).as_str(), |b| { b.iter(|| { black_box(&memory) .write_volatile_to(address, &mut file_to_write, ACCESS_SIZE) .unwrap() }) }); c.bench_function(format!("write_exact_to_{:#0X}", offset).as_str(), |b| { b.iter(|| { black_box(&memory) .write_all_volatile_to(address, &mut image.as_mut_slice(), ACCESS_SIZE) .unwrap() }) }); c.bench_function( format!("write_entire_slice_to_{:#0X}", offset).as_str(), |b| b.iter(|| black_box(&memory).write_slice(buf, address).unwrap()), ); c.bench_function(format!("write_slice_to_{:#0X}", offset).as_str(), |b| { b.iter(|| black_box(&memory).write(buf, address).unwrap()) }); let obj_off = access.make_offset(size_of::()); let obj_addr = GuestAddress(obj_off); c.bench_function( format!("write_small_obj_to_{:#0X}", obj_off).as_str(), |b| { b.iter(|| { black_box(&memory) .write_obj::(some_small_dummy, obj_addr) .unwrap() }) }, ); let obj_off = access.make_offset(size_of::()); let obj_addr = GuestAddress(obj_off); c.bench_function(format!("write_big_obj_to_{:#0X}", obj_off).as_str(), |b| { b.iter(|| { black_box(&memory) .write_obj::(some_big_dummy, obj_addr) .unwrap() }) }); } } vm-memory-0.14.0/benches/volatile.rs000064400000000000000000000032551046102023000154370ustar 00000000000000// Copyright (C) 2020 Alibaba Cloud. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause pub use criterion::{black_box, Criterion}; use vm_memory::volatile_memory::VolatileMemory; use vm_memory::VolatileSlice; pub fn benchmark_for_volatile(c: &mut Criterion) { let mut a = [0xa5u8; 1024]; let vslice = VolatileSlice::from(&mut a[..]); let v_ref8 = vslice.get_slice(0, vslice.len()).unwrap(); let mut d8 = [0u8; 1024]; // Check performance for read operations. c.bench_function("VolatileSlice::copy_to_u8", |b| { b.iter(|| v_ref8.copy_to(black_box(&mut d8[..]))) }); let v_ref16 = vslice.get_slice(0, vslice.len() / 2).unwrap(); let mut d16 = [0u16; 512]; c.bench_function("VolatileSlice::copy_to_u16", |b| { b.iter(|| v_ref16.copy_to(black_box(&mut d16[..]))) }); benchmark_volatile_copy_to_volatile_slice(c); // Check performance for write operations. c.bench_function("VolatileSlice::copy_from_u8", |b| { b.iter(|| v_ref8.copy_from(black_box(&d8[..]))) }); c.bench_function("VolatileSlice::copy_from_u16", |b| { b.iter(|| v_ref16.copy_from(black_box(&d16[..]))) }); } fn benchmark_volatile_copy_to_volatile_slice(c: &mut Criterion) { let mut a = [0xa5u8; 10240]; let vslice = VolatileSlice::from(&mut a[..]); let a_slice = vslice.get_slice(0, vslice.len()).unwrap(); let mut d = [0u8; 10240]; let vslice2 = VolatileSlice::from(&mut d[..]); let d_slice = vslice2.get_slice(0, vslice2.len()).unwrap(); c.bench_function("VolatileSlice::copy_to_volatile_slice", |b| { b.iter(|| black_box(a_slice).copy_to_volatile_slice(d_slice)) }); } vm-memory-0.14.0/coverage_config_aarch64.json000064400000000000000000000002041046102023000171550ustar 00000000000000{ "coverage_score": 85.2, "exclude_path": "mmap_windows.rs", "crate_features": "backend-mmap,backend-atomic,backend-bitmap" } vm-memory-0.14.0/coverage_config_x86_64.json000064400000000000000000000002051046102023000166640ustar 00000000000000{ "coverage_score": 86.07, "exclude_path": "mmap_windows.rs", "crate_features": "backend-mmap,backend-atomic,backend-bitmap" } vm-memory-0.14.0/src/address.rs000064400000000000000000000310231046102023000144170ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-3-Clause file. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! Traits to represent an address within an address space. //! //! Two traits are defined to represent an address within an address space: //! - [`AddressValue`](trait.AddressValue.html): stores the raw value of an address. Typically //! `u32`,`u64` or `usize` is used to store the raw value. But pointers, such as `*u8`, can't be used //! because they don't implement the [`Add`](https://doc.rust-lang.org/std/ops/trait.Add.html) and //! [`Sub`](https://doc.rust-lang.org/std/ops/trait.Sub.html) traits. //! - [Address](trait.Address.html): encapsulates an [`AddressValue`](trait.AddressValue.html) //! object and defines methods to access and manipulate it. use std::cmp::{Eq, Ord, PartialEq, PartialOrd}; use std::fmt::Debug; use std::ops::{Add, BitAnd, BitOr, Not, Sub}; /// Simple helper trait used to store a raw address value. pub trait AddressValue { /// Type of the raw address value. type V: Copy + PartialEq + Eq + PartialOrd + Ord + Not + Add + Sub + BitAnd + BitOr + Debug + From; /// Return the value zero, coerced into the value type `Self::V` fn zero() -> Self::V { 0u8.into() } /// Return the value zero, coerced into the value type `Self::V` fn one() -> Self::V { 1u8.into() } } /// Trait to represent an address within an address space. /// /// To simplify the design and implementation, assume the same raw data type `(AddressValue::V)` /// could be used to store address, size and offset for the address space. Thus the `Address` trait /// could be used to manage address, size and offset. On the other hand, type aliases may be /// defined to improve code readability. /// /// One design rule is applied to the `Address` trait, namely that operators (+, -, &, | etc) are /// not supported and it forces clients to explicitly invoke corresponding methods. But there are /// always exceptions: /// `Address` (BitAnd|BitOr) `AddressValue` are supported. pub trait Address: AddressValue + Sized + Default + Copy + Eq + PartialEq + Ord + PartialOrd + BitAnd<::V, Output = Self> + BitOr<::V, Output = Self> { /// Creates an address from a raw address value. fn new(addr: Self::V) -> Self; /// Returns the raw value of the address. fn raw_value(&self) -> Self::V; /// Returns the bitwise and of the address with the given mask. fn mask(&self, mask: Self::V) -> Self::V { self.raw_value() & mask } /// Computes the offset from this address to the given base address. /// /// Returns `None` if there is underflow. fn checked_offset_from(&self, base: Self) -> Option; /// Computes the offset from this address to the given base address. /// /// In the event of overflow, follows standard Rust behavior, i.e. panic in debug builds, /// silently wrap in release builds. /// /// Note that, unlike the `unchecked_*` methods in std, this method never invokes undefined /// behavior. /// # Examples /// /// ``` /// # use vm_memory::{Address, GuestAddress}; /// # /// let base = GuestAddress(0x100); /// let addr = GuestAddress(0x150); /// assert_eq!(addr.unchecked_offset_from(base), 0x50); /// ``` fn unchecked_offset_from(&self, base: Self) -> Self::V { self.raw_value() - base.raw_value() } /// Returns self, aligned to the given power of two. fn checked_align_up(&self, power_of_two: Self::V) -> Option { let mask = power_of_two - Self::one(); assert_ne!(power_of_two, Self::zero()); assert_eq!(power_of_two & mask, Self::zero()); self.checked_add(mask).map(|x| x & !mask) } /// Returns self, aligned to the given power of two. /// Only use this when the result is guaranteed not to overflow. fn unchecked_align_up(&self, power_of_two: Self::V) -> Self { let mask = power_of_two - Self::one(); self.unchecked_add(mask) & !mask } /// Computes `self + other`, returning `None` if overflow occurred. fn checked_add(&self, other: Self::V) -> Option; /// Computes `self + other`. /// /// Returns a tuple of the addition result along with a boolean indicating whether an arithmetic /// overflow would occur. If an overflow would have occurred then the wrapped address /// is returned. fn overflowing_add(&self, other: Self::V) -> (Self, bool); /// Computes `self + offset`. /// /// In the event of overflow, follows standard Rust behavior, i.e. panic in debug builds, /// silently wrap in release builds. /// /// Note that, unlike the `unchecked_*` methods in std, this method never invokes undefined /// behavior.. fn unchecked_add(&self, offset: Self::V) -> Self; /// Subtracts two addresses, checking for underflow. If underflow happens, `None` is returned. fn checked_sub(&self, other: Self::V) -> Option; /// Computes `self - other`. /// /// Returns a tuple of the subtraction result along with a boolean indicating whether an /// arithmetic overflow would occur. If an overflow would have occurred then the wrapped /// address is returned. fn overflowing_sub(&self, other: Self::V) -> (Self, bool); /// Computes `self - other`. /// /// In the event of underflow, follows standard Rust behavior, i.e. panic in debug builds, /// silently wrap in release builds. /// /// Note that, unlike the `unchecked_*` methods in std, this method never invokes undefined /// behavior. fn unchecked_sub(&self, other: Self::V) -> Self; } macro_rules! impl_address_ops { ($T:ident, $V:ty) => { impl AddressValue for $T { type V = $V; } impl Address for $T { fn new(value: $V) -> $T { $T(value) } fn raw_value(&self) -> $V { self.0 } fn checked_offset_from(&self, base: $T) -> Option<$V> { self.0.checked_sub(base.0) } fn checked_add(&self, other: $V) -> Option<$T> { self.0.checked_add(other).map($T) } fn overflowing_add(&self, other: $V) -> ($T, bool) { let (t, ovf) = self.0.overflowing_add(other); ($T(t), ovf) } fn unchecked_add(&self, offset: $V) -> $T { $T(self.0 + offset) } fn checked_sub(&self, other: $V) -> Option<$T> { self.0.checked_sub(other).map($T) } fn overflowing_sub(&self, other: $V) -> ($T, bool) { let (t, ovf) = self.0.overflowing_sub(other); ($T(t), ovf) } fn unchecked_sub(&self, other: $V) -> $T { $T(self.0 - other) } } impl Default for $T { fn default() -> $T { Self::new(0 as $V) } } impl BitAnd<$V> for $T { type Output = $T; fn bitand(self, other: $V) -> $T { $T(self.0 & other) } } impl BitOr<$V> for $T { type Output = $T; fn bitor(self, other: $V) -> $T { $T(self.0 | other) } } }; } #[cfg(test)] mod tests { use super::*; #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] struct MockAddress(pub u64); impl_address_ops!(MockAddress, u64); #[test] fn test_new() { assert_eq!(MockAddress::new(0), MockAddress(0)); assert_eq!(MockAddress::new(std::u64::MAX), MockAddress(std::u64::MAX)); } #[test] fn test_offset_from() { let base = MockAddress(0x100); let addr = MockAddress(0x150); assert_eq!(addr.unchecked_offset_from(base), 0x50u64); assert_eq!(addr.checked_offset_from(base), Some(0x50u64)); assert_eq!(base.checked_offset_from(addr), None); } #[test] fn test_equals() { let a = MockAddress(0x300); let b = MockAddress(0x300); let c = MockAddress(0x301); assert_eq!(a, MockAddress(a.raw_value())); assert_eq!(a, b); assert_eq!(b, a); assert_ne!(a, c); assert_ne!(c, a); } #[test] fn test_cmp() { let a = MockAddress(0x300); let b = MockAddress(0x301); assert!(a < b); } #[test] fn test_checked_align_up() { assert_eq!( MockAddress::new(0x128).checked_align_up(8), Some(MockAddress(0x128)) ); assert_eq!( MockAddress::new(0x128).checked_align_up(16), Some(MockAddress(0x130)) ); assert_eq!( MockAddress::new(std::u64::MAX - 0x3fff).checked_align_up(0x10000), None ); } #[test] #[should_panic] fn test_checked_align_up_invalid() { let _ = MockAddress::new(0x128).checked_align_up(12); } #[test] fn test_unchecked_align_up() { assert_eq!( MockAddress::new(0x128).unchecked_align_up(8), MockAddress(0x128) ); assert_eq!( MockAddress::new(0x128).unchecked_align_up(16), MockAddress(0x130) ); } #[test] fn test_mask() { let a = MockAddress(0x5050); assert_eq!(MockAddress(0x5000), a & 0xff00u64); assert_eq!(0x5000, a.mask(0xff00u64)); assert_eq!(MockAddress(0x5055), a | 0x0005u64); } fn check_add(a: u64, b: u64, expected_overflow: bool, expected_result: u64) { assert_eq!( (MockAddress(expected_result), expected_overflow), MockAddress(a).overflowing_add(b) ); if expected_overflow { assert!(MockAddress(a).checked_add(b).is_none()); #[cfg(debug_assertions)] assert!(std::panic::catch_unwind(|| MockAddress(a).unchecked_add(b)).is_err()); } else { assert_eq!( Some(MockAddress(expected_result)), MockAddress(a).checked_add(b) ); assert_eq!( MockAddress(expected_result), MockAddress(a).unchecked_add(b) ); } } #[test] fn test_add() { // without overflow // normal case check_add(10, 10, false, 20); // edge case check_add(std::u64::MAX - 1, 1, false, std::u64::MAX); // with overflow check_add(std::u64::MAX, 1, true, 0); } fn check_sub(a: u64, b: u64, expected_overflow: bool, expected_result: u64) { assert_eq!( (MockAddress(expected_result), expected_overflow), MockAddress(a).overflowing_sub(b) ); if expected_overflow { assert!(MockAddress(a).checked_sub(b).is_none()); assert!(MockAddress(a).checked_offset_from(MockAddress(b)).is_none()); #[cfg(debug_assertions)] assert!(std::panic::catch_unwind(|| MockAddress(a).unchecked_sub(b)).is_err()); } else { assert_eq!( Some(MockAddress(expected_result)), MockAddress(a).checked_sub(b) ); assert_eq!( Some(expected_result), MockAddress(a).checked_offset_from(MockAddress(b)) ); assert_eq!( MockAddress(expected_result), MockAddress(a).unchecked_sub(b) ); } } #[test] fn test_sub() { // without overflow // normal case check_sub(20, 10, false, 10); // edge case check_sub(1, 1, false, 0); // with underflow check_sub(0, 1, true, std::u64::MAX); } #[test] fn test_default() { assert_eq!(MockAddress::default(), MockAddress(0)); } #[test] fn test_bit_and() { let a = MockAddress(0x0ff0); assert_eq!(a & 0xf00f, MockAddress(0)); } #[test] fn test_bit_or() { let a = MockAddress(0x0ff0); assert_eq!(a | 0xf00f, MockAddress(0xffff)); } } vm-memory-0.14.0/src/atomic.rs000064400000000000000000000226561046102023000142620ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // Copyright (C) 2020 Red Hat, Inc. All rights reserved. // SPDX-License-Identifier: Apache-2.0 //! A wrapper over an `ArcSwap` struct to support RCU-style mutability. //! //! With the `backend-atomic` feature enabled, simply replacing `GuestMemoryMmap` //! with `GuestMemoryAtomic` will enable support for mutable memory maps. //! To support mutable memory maps, devices will also need to use //! `GuestAddressSpace::memory()` to gain temporary access to guest memory. extern crate arc_swap; use arc_swap::{ArcSwap, Guard}; use std::ops::Deref; use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError}; use crate::{GuestAddressSpace, GuestMemory}; /// A fast implementation of a mutable collection of memory regions. /// /// This implementation uses `ArcSwap` to provide RCU-like snapshotting of the memory map: /// every update of the memory map creates a completely new `GuestMemory` object, and /// readers will not be blocked because the copies they retrieved will be collected once /// no one can access them anymore. Under the assumption that updates to the memory map /// are rare, this allows a very efficient implementation of the `memory()` method. #[derive(Clone, Debug)] pub struct GuestMemoryAtomic { // GuestAddressSpace, which we want to implement, is basically a drop-in // replacement for &M. Therefore, we need to pass to devices the `GuestMemoryAtomic` // rather than a reference to it. To obtain this effect we wrap the actual fields // of GuestMemoryAtomic with an Arc, and derive the Clone trait. See the // documentation for GuestAddressSpace for an example. inner: Arc<(ArcSwap, Mutex<()>)>, } impl From> for GuestMemoryAtomic { /// create a new `GuestMemoryAtomic` object whose initial contents come from /// the `map` reference counted `GuestMemory`. fn from(map: Arc) -> Self { let inner = (ArcSwap::new(map), Mutex::new(())); GuestMemoryAtomic { inner: Arc::new(inner), } } } impl GuestMemoryAtomic { /// create a new `GuestMemoryAtomic` object whose initial contents come from /// the `map` `GuestMemory`. pub fn new(map: M) -> Self { Arc::new(map).into() } fn load(&self) -> Guard> { self.inner.0.load() } /// Acquires the update mutex for the `GuestMemoryAtomic`, blocking the current /// thread until it is able to do so. The returned RAII guard allows for /// scoped unlock of the mutex (that is, the mutex will be unlocked when /// the guard goes out of scope), and optionally also for replacing the /// contents of the `GuestMemoryAtomic` when the lock is dropped. pub fn lock(&self) -> LockResult> { match self.inner.1.lock() { Ok(guard) => Ok(GuestMemoryExclusiveGuard { parent: self, _guard: guard, }), Err(err) => Err(PoisonError::new(GuestMemoryExclusiveGuard { parent: self, _guard: err.into_inner(), })), } } } impl GuestAddressSpace for GuestMemoryAtomic { type T = GuestMemoryLoadGuard; type M = M; fn memory(&self) -> Self::T { GuestMemoryLoadGuard { guard: self.load() } } } /// A guard that provides temporary access to a `GuestMemoryAtomic`. This /// object is returned from the `memory()` method. It dereference to /// a snapshot of the `GuestMemory`, so it can be used transparently to /// access memory. #[derive(Debug)] pub struct GuestMemoryLoadGuard { guard: Guard>, } impl GuestMemoryLoadGuard { /// Make a clone of the held pointer and returns it. This is more /// expensive than just using the snapshot, but it allows to hold on /// to the snapshot outside the scope of the guard. It also allows /// writers to proceed, so it is recommended if the reference must /// be held for a long time (including for caching purposes). pub fn into_inner(self) -> Arc { Guard::into_inner(self.guard) } } impl Clone for GuestMemoryLoadGuard { fn clone(&self) -> Self { GuestMemoryLoadGuard { guard: Guard::from_inner(Arc::clone(&*self.guard)), } } } impl Deref for GuestMemoryLoadGuard { type Target = M; fn deref(&self) -> &Self::Target { &self.guard } } /// An RAII implementation of a "scoped lock" for `GuestMemoryAtomic`. When /// this structure is dropped (falls out of scope) the lock will be unlocked, /// possibly after updating the memory map represented by the /// `GuestMemoryAtomic` that created the guard. #[derive(Debug)] pub struct GuestMemoryExclusiveGuard<'a, M: GuestMemory> { parent: &'a GuestMemoryAtomic, _guard: MutexGuard<'a, ()>, } impl GuestMemoryExclusiveGuard<'_, M> { /// Replace the memory map in the `GuestMemoryAtomic` that created the guard /// with the new memory map, `map`. The lock is then dropped since this /// method consumes the guard. pub fn replace(self, map: M) { self.parent.inner.0.store(Arc::new(map)) } } #[cfg(test)] #[cfg(feature = "backend-mmap")] mod tests { use super::*; use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MmapRegion}; type GuestMemoryMmap = crate::GuestMemoryMmap<()>; type GuestRegionMmap = crate::GuestRegionMmap<()>; type GuestMemoryMmapAtomic = GuestMemoryAtomic; #[test] fn test_atomic_memory() { let region_size = 0x400; let regions = vec![ (GuestAddress(0x0), region_size), (GuestAddress(0x1000), region_size), ]; let mut iterated_regions = Vec::new(); let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); let gm = GuestMemoryMmapAtomic::new(gmm); let mem = gm.memory(); for region in mem.iter() { assert_eq!(region.len(), region_size as GuestUsize); } for region in mem.iter() { iterated_regions.push((region.start_addr(), region.len() as usize)); } assert_eq!(regions, iterated_regions); assert_eq!(mem.num_regions(), 2); assert!(mem.find_region(GuestAddress(0x1000)).is_some()); assert!(mem.find_region(GuestAddress(0x10000)).is_none()); assert!(regions .iter() .map(|x| (x.0, x.1)) .eq(iterated_regions.iter().copied())); let mem2 = mem.into_inner(); for region in mem2.iter() { assert_eq!(region.len(), region_size as GuestUsize); } assert_eq!(mem2.num_regions(), 2); assert!(mem2.find_region(GuestAddress(0x1000)).is_some()); assert!(mem2.find_region(GuestAddress(0x10000)).is_none()); assert!(regions .iter() .map(|x| (x.0, x.1)) .eq(iterated_regions.iter().copied())); let mem3 = mem2.memory(); for region in mem3.iter() { assert_eq!(region.len(), region_size as GuestUsize); } assert_eq!(mem3.num_regions(), 2); assert!(mem3.find_region(GuestAddress(0x1000)).is_some()); assert!(mem3.find_region(GuestAddress(0x10000)).is_none()); } #[test] fn test_clone_guard() { let region_size = 0x400; let regions = vec![ (GuestAddress(0x0), region_size), (GuestAddress(0x1000), region_size), ]; let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); let gm = GuestMemoryMmapAtomic::new(gmm); let mem = { let guard1 = gm.memory(); Clone::clone(&guard1) }; assert_eq!(mem.num_regions(), 2); } #[test] fn test_atomic_hotplug() { let region_size = 0x1000; let regions = vec![ (GuestAddress(0x0), region_size), (GuestAddress(0x10_0000), region_size), ]; let mut gmm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); let gm: GuestMemoryAtomic<_> = gmm.clone().into(); let mem_orig = gm.memory(); assert_eq!(mem_orig.num_regions(), 2); { let guard = gm.lock().unwrap(); let new_gmm = Arc::make_mut(&mut gmm); let mmap = Arc::new( GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x8000)) .unwrap(), ); let new_gmm = new_gmm.insert_region(mmap).unwrap(); let mmap = Arc::new( GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x4000)) .unwrap(), ); let new_gmm = new_gmm.insert_region(mmap).unwrap(); let mmap = Arc::new( GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000)) .unwrap(), ); let new_gmm = new_gmm.insert_region(mmap).unwrap(); let mmap = Arc::new( GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000)) .unwrap(), ); new_gmm.insert_region(mmap).unwrap_err(); guard.replace(new_gmm); } assert_eq!(mem_orig.num_regions(), 2); let mem = gm.memory(); assert_eq!(mem.num_regions(), 5); } } vm-memory-0.14.0/src/atomic_integer.rs000064400000000000000000000066321046102023000157730ustar 00000000000000// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause use std::sync::atomic::Ordering; /// # Safety /// /// Objects that implement this trait must consist exclusively of atomic types /// from [`std::sync::atomic`](https://doc.rust-lang.org/std/sync/atomic/), except for /// [`AtomicPtr`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html) and /// [`AtomicBool`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicBool.html). pub unsafe trait AtomicInteger: Sync + Send { /// The raw value type associated with the atomic integer (i.e. `u16` for `AtomicU16`). type V; /// Create a new instance of `Self`. fn new(v: Self::V) -> Self; /// Loads a value from the atomic integer. fn load(&self, order: Ordering) -> Self::V; /// Stores a value into the atomic integer. fn store(&self, val: Self::V, order: Ordering); } macro_rules! impl_atomic_integer_ops { ($T:path, $V:ty) => { // SAFETY: This is safe as long as T is an Atomic type. // This is a helper macro for generating the implementation for common // Atomic types. unsafe impl AtomicInteger for $T { type V = $V; fn new(v: Self::V) -> Self { Self::new(v) } fn load(&self, order: Ordering) -> Self::V { self.load(order) } fn store(&self, val: Self::V, order: Ordering) { self.store(val, order) } } }; } // TODO: Detect availability using #[cfg(target_has_atomic) when it is stabilized. // Right now we essentially assume we're running on either x86 or Arm (32 or 64 bit). AFAIK, // Rust starts using additional synchronization primitives to implement atomics when they're // not natively available, and that doesn't interact safely with how we cast pointers to // atomic value references. We should be wary of this when looking at a broader range of // platforms. impl_atomic_integer_ops!(std::sync::atomic::AtomicI8, i8); impl_atomic_integer_ops!(std::sync::atomic::AtomicI16, i16); impl_atomic_integer_ops!(std::sync::atomic::AtomicI32, i32); #[cfg(any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", target_arch = "s390x" ))] impl_atomic_integer_ops!(std::sync::atomic::AtomicI64, i64); impl_atomic_integer_ops!(std::sync::atomic::AtomicU8, u8); impl_atomic_integer_ops!(std::sync::atomic::AtomicU16, u16); impl_atomic_integer_ops!(std::sync::atomic::AtomicU32, u32); #[cfg(any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", target_arch = "s390x" ))] impl_atomic_integer_ops!(std::sync::atomic::AtomicU64, u64); impl_atomic_integer_ops!(std::sync::atomic::AtomicIsize, isize); impl_atomic_integer_ops!(std::sync::atomic::AtomicUsize, usize); #[cfg(test)] mod tests { use super::*; use std::fmt::Debug; use std::sync::atomic::AtomicU32; fn check_atomic_integer_ops() where A::V: Copy + Debug + From + PartialEq, { let v = A::V::from(0); let a = A::new(v); assert_eq!(a.load(Ordering::Relaxed), v); let v2 = A::V::from(100); a.store(v2, Ordering::Relaxed); assert_eq!(a.load(Ordering::Relaxed), v2); } #[test] fn test_atomic_integer_ops() { check_atomic_integer_ops::() } } vm-memory-0.14.0/src/bitmap/backend/atomic_bitmap.rs000064400000000000000000000164101046102023000204500ustar 00000000000000// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! Bitmap backend implementation based on atomic integers. use std::num::NonZeroUsize; use std::sync::atomic::{AtomicU64, Ordering}; use crate::bitmap::{Bitmap, RefSlice, WithBitmapSlice}; #[cfg(feature = "backend-mmap")] use crate::mmap::NewBitmap; /// `AtomicBitmap` implements a simple bit map on the page level with test and set operations. /// It is page-size aware, so it converts addresses to page numbers before setting or clearing /// the bits. #[derive(Debug)] pub struct AtomicBitmap { map: Vec, size: usize, page_size: NonZeroUsize, } #[allow(clippy::len_without_is_empty)] impl AtomicBitmap { /// Create a new bitmap of `byte_size`, with one bit per page. This is effectively /// rounded up, and we get a new vector of the next multiple of 64 bigger than `bit_size`. pub fn new(byte_size: usize, page_size: NonZeroUsize) -> Self { let mut num_pages = byte_size / page_size; if byte_size % page_size > 0 { num_pages += 1; } // Adding one entry element more just in case `num_pages` is not a multiple of `64`. let map_size = num_pages / 64 + 1; let map: Vec = (0..map_size).map(|_| AtomicU64::new(0)).collect(); AtomicBitmap { map, size: num_pages, page_size, } } /// Is bit `n` set? Bits outside the range of the bitmap are always unset. pub fn is_bit_set(&self, index: usize) -> bool { if index < self.size { (self.map[index >> 6].load(Ordering::Acquire) & (1 << (index & 63))) != 0 } else { // Out-of-range bits are always unset. false } } /// Is the bit corresponding to address `addr` set? pub fn is_addr_set(&self, addr: usize) -> bool { self.is_bit_set(addr / self.page_size) } /// Set a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds /// to address `start_addr + len - 1`. pub fn set_addr_range(&self, start_addr: usize, len: usize) { // Return early in the unlikely event that `len == 0` so the `len - 1` computation // below does not underflow. if len == 0 { return; } let first_bit = start_addr / self.page_size; // Handle input ranges where `start_addr + len - 1` would otherwise overflow an `usize` // by ignoring pages at invalid addresses. let last_bit = start_addr.saturating_add(len - 1) / self.page_size; for n in first_bit..=last_bit { if n >= self.size { // Attempts to set bits beyond the end of the bitmap are simply ignored. break; } self.map[n >> 6].fetch_or(1 << (n & 63), Ordering::SeqCst); } } /// Get the length of the bitmap in bits (i.e. in how many pages it can represent). pub fn len(&self) -> usize { self.size } /// Atomically get and reset the dirty page bitmap. pub fn get_and_reset(&self) -> Vec { self.map .iter() .map(|u| u.fetch_and(0, Ordering::SeqCst)) .collect() } /// Reset all bitmap bits to 0. pub fn reset(&self) { for it in self.map.iter() { it.store(0, Ordering::Release); } } } impl Clone for AtomicBitmap { fn clone(&self) -> Self { let map = self .map .iter() .map(|i| i.load(Ordering::Acquire)) .map(AtomicU64::new) .collect(); AtomicBitmap { map, size: self.size, page_size: self.page_size, } } } impl<'a> WithBitmapSlice<'a> for AtomicBitmap { type S = RefSlice<'a, Self>; } impl Bitmap for AtomicBitmap { fn mark_dirty(&self, offset: usize, len: usize) { self.set_addr_range(offset, len) } fn dirty_at(&self, offset: usize) -> bool { self.is_addr_set(offset) } fn slice_at(&self, offset: usize) -> ::S { RefSlice::new(self, offset) } } impl Default for AtomicBitmap { fn default() -> Self { // SAFETY: Safe as `0x1000` is non-zero. AtomicBitmap::new(0, unsafe { NonZeroUsize::new_unchecked(0x1000) }) } } #[cfg(feature = "backend-mmap")] impl NewBitmap for AtomicBitmap { fn with_len(len: usize) -> Self { #[cfg(unix)] // SAFETY: There's no unsafe potential in calling this function. let page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) }; #[cfg(windows)] let page_size = { use winapi::um::sysinfoapi::{GetSystemInfo, LPSYSTEM_INFO, SYSTEM_INFO}; let mut sysinfo = MaybeUninit::zeroed(); // SAFETY: It's safe to call `GetSystemInfo` as `sysinfo` is rightly sized // allocated memory. unsafe { GetSystemInfo(sysinfo.as_mut_ptr()) }; // SAFETY: It's safe to call `assume_init` as `GetSystemInfo` initializes `sysinfo`. unsafe { sysinfo.assume_init().dwPageSize } }; // The `unwrap` is safe to use because the above call should always succeed on the // supported platforms, and the size of a page will always fit within a `usize`. AtomicBitmap::new( len, NonZeroUsize::try_from(usize::try_from(page_size).unwrap()).unwrap(), ) } } #[cfg(test)] mod tests { use super::*; use crate::bitmap::tests::test_bitmap; #[allow(clippy::undocumented_unsafe_blocks)] const DEFAULT_PAGE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(128) }; #[test] fn test_bitmap_basic() { // Test that bitmap size is properly rounded up. let a = AtomicBitmap::new(1025, DEFAULT_PAGE_SIZE); assert_eq!(a.len(), 9); let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE); assert_eq!(b.len(), 8); b.set_addr_range(128, 129); assert!(!b.is_addr_set(0)); assert!(b.is_addr_set(128)); assert!(b.is_addr_set(256)); assert!(!b.is_addr_set(384)); #[allow(clippy::redundant_clone)] let copy_b = b.clone(); assert!(copy_b.is_addr_set(256)); assert!(!copy_b.is_addr_set(384)); b.reset(); assert!(!b.is_addr_set(128)); assert!(!b.is_addr_set(256)); assert!(!b.is_addr_set(384)); b.set_addr_range(128, 129); let v = b.get_and_reset(); assert!(!b.is_addr_set(128)); assert!(!b.is_addr_set(256)); assert!(!b.is_addr_set(384)); assert_eq!(v.len(), 1); assert_eq!(v[0], 0b110); } #[test] fn test_bitmap_out_of_range() { let b = AtomicBitmap::new(1024, NonZeroUsize::MIN); // Set a partial range that goes beyond the end of the bitmap b.set_addr_range(768, 512); assert!(b.is_addr_set(768)); // The bitmap is never set beyond its end. assert!(!b.is_addr_set(1024)); assert!(!b.is_addr_set(1152)); } #[test] fn test_bitmap_impl() { let b = AtomicBitmap::new(0x2000, DEFAULT_PAGE_SIZE); test_bitmap(&b); } } vm-memory-0.14.0/src/bitmap/backend/atomic_bitmap_arc.rs000064400000000000000000000043031046102023000212730ustar 00000000000000// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause use std::ops::Deref; use std::sync::Arc; use crate::bitmap::{ArcSlice, AtomicBitmap, Bitmap, WithBitmapSlice}; #[cfg(feature = "backend-mmap")] use crate::mmap::NewBitmap; /// A `Bitmap` implementation that's based on an atomically reference counted handle to an /// `AtomicBitmap` object. pub struct AtomicBitmapArc { inner: Arc, } impl AtomicBitmapArc { pub fn new(inner: AtomicBitmap) -> Self { AtomicBitmapArc { inner: Arc::new(inner), } } } // The current clone implementation creates a deep clone of the inner bitmap, as opposed to // simply cloning the `Arc`. impl Clone for AtomicBitmapArc { fn clone(&self) -> Self { Self::new(self.inner.deref().clone()) } } // Providing a `Deref` to `AtomicBitmap` implementation, so the methods of the inner object // can be called in a transparent manner. impl Deref for AtomicBitmapArc { type Target = AtomicBitmap; fn deref(&self) -> &Self::Target { self.inner.deref() } } impl WithBitmapSlice<'_> for AtomicBitmapArc { type S = ArcSlice; } impl Bitmap for AtomicBitmapArc { fn mark_dirty(&self, offset: usize, len: usize) { self.inner.set_addr_range(offset, len) } fn dirty_at(&self, offset: usize) -> bool { self.inner.is_addr_set(offset) } fn slice_at(&self, offset: usize) -> ::S { ArcSlice::new(self.inner.clone(), offset) } } impl Default for AtomicBitmapArc { fn default() -> Self { Self::new(AtomicBitmap::default()) } } #[cfg(feature = "backend-mmap")] impl NewBitmap for AtomicBitmapArc { fn with_len(len: usize) -> Self { Self::new(AtomicBitmap::with_len(len)) } } #[cfg(test)] mod tests { use super::*; use crate::bitmap::tests::test_bitmap; use std::num::NonZeroUsize; #[test] fn test_bitmap_impl() { // SAFETY: `128` is non-zero. let b = AtomicBitmapArc::new(AtomicBitmap::new(0x2000, unsafe { NonZeroUsize::new_unchecked(128) })); test_bitmap(&b); } } vm-memory-0.14.0/src/bitmap/backend/mod.rs000064400000000000000000000004571046102023000164230ustar 00000000000000// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause mod atomic_bitmap; mod atomic_bitmap_arc; mod slice; pub use atomic_bitmap::AtomicBitmap; pub use atomic_bitmap_arc::AtomicBitmapArc; pub use slice::{ArcSlice, RefSlice}; vm-memory-0.14.0/src/bitmap/backend/slice.rs000064400000000000000000000072701046102023000167430ustar 00000000000000// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! Contains a generic implementation of `BitmapSlice`. use std::fmt::{self, Debug}; use std::ops::Deref; use std::sync::Arc; use crate::bitmap::{Bitmap, BitmapSlice, WithBitmapSlice}; /// Represents a slice into a `Bitmap` object, starting at `base_offset`. #[derive(Clone, Copy)] pub struct BaseSlice { inner: B, base_offset: usize, } impl BaseSlice { /// Create a new `BitmapSlice`, starting at the specified `offset`. pub fn new(inner: B, offset: usize) -> Self { BaseSlice { inner, base_offset: offset, } } } impl<'a, B> WithBitmapSlice<'a> for BaseSlice where B: Clone + Deref, B::Target: Bitmap, { type S = Self; } impl BitmapSlice for BaseSlice where B: Clone + Deref, B::Target: Bitmap, { } impl Bitmap for BaseSlice where B: Clone + Deref, B::Target: Bitmap, { /// Mark the memory range specified by the given `offset` (relative to the base offset of /// the slice) and `len` as dirtied. fn mark_dirty(&self, offset: usize, len: usize) { // The `Bitmap` operations are supposed to accompany guest memory accesses defined by the // same parameters (i.e. offset & length), so we use simple wrapping arithmetic instead of // performing additional checks. If an overflow would occur, we simply end up marking some // other region as dirty (which is just a false positive) instead of a region that could // not have been accessed to begin with. self.inner .mark_dirty(self.base_offset.wrapping_add(offset), len) } fn dirty_at(&self, offset: usize) -> bool { self.inner.dirty_at(self.base_offset.wrapping_add(offset)) } /// Create a new `BitmapSlice` starting from the specified `offset` into the current slice. fn slice_at(&self, offset: usize) -> Self { BaseSlice { inner: self.inner.clone(), base_offset: self.base_offset.wrapping_add(offset), } } } impl Debug for BaseSlice { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Dummy impl for now. write!(f, "(bitmap slice)") } } impl Default for BaseSlice { fn default() -> Self { BaseSlice { inner: B::default(), base_offset: 0, } } } /// A `BitmapSlice` implementation that wraps a reference to a `Bitmap` object. pub type RefSlice<'a, B> = BaseSlice<&'a B>; /// A `BitmapSlice` implementation that uses an `Arc` handle to a `Bitmap` object. pub type ArcSlice = BaseSlice>; #[cfg(test)] mod tests { use super::*; use crate::bitmap::tests::{range_is_clean, range_is_dirty, test_bitmap}; use crate::bitmap::AtomicBitmap; use std::num::NonZeroUsize; #[test] fn test_slice() { let bitmap_size = 0x1_0000; let dirty_offset = 0x1000; let dirty_len = 0x100; { let bitmap = AtomicBitmap::new(bitmap_size, NonZeroUsize::MIN); let slice1 = bitmap.slice_at(0); let slice2 = bitmap.slice_at(dirty_offset); assert!(range_is_clean(&slice1, 0, bitmap_size)); assert!(range_is_clean(&slice2, 0, dirty_len)); bitmap.mark_dirty(dirty_offset, dirty_len); assert!(range_is_dirty(&slice1, dirty_offset, dirty_len)); assert!(range_is_dirty(&slice2, 0, dirty_len)); } { let bitmap = AtomicBitmap::new(bitmap_size, NonZeroUsize::MIN); let slice = bitmap.slice_at(0); test_bitmap(&slice); } } } vm-memory-0.14.0/src/bitmap/mod.rs000064400000000000000000000340701046102023000150320ustar 00000000000000// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! This module holds abstractions that enable tracking the areas dirtied by writes of a specified //! length to a given offset. In particular, this is used to track write accesses within a //! `GuestMemoryRegion` object, and the resulting bitmaps can then be aggregated to build the //! global view for an entire `GuestMemory` object. #[cfg(any(test, feature = "backend-bitmap"))] mod backend; use std::fmt::Debug; use crate::{GuestMemory, GuestMemoryRegion}; #[cfg(any(test, feature = "backend-bitmap"))] pub use backend::{ArcSlice, AtomicBitmap, RefSlice}; /// Trait implemented by types that support creating `BitmapSlice` objects. pub trait WithBitmapSlice<'a> { /// Type of the bitmap slice. type S: BitmapSlice; } /// Trait used to represent that a `BitmapSlice` is a `Bitmap` itself, but also satisfies the /// restriction that slices created from it have the same type as `Self`. pub trait BitmapSlice: Bitmap + Clone + Debug + for<'a> WithBitmapSlice<'a, S = Self> {} /// Common bitmap operations. Using Higher-Rank Trait Bounds (HRTBs) to effectively define /// an associated type that has a lifetime parameter, without tagging the `Bitmap` trait with /// a lifetime as well. /// /// Using an associated type allows implementing the `Bitmap` and `BitmapSlice` functionality /// as a zero-cost abstraction when providing trivial implementations such as the one /// defined for `()`. // These methods represent the core functionality that's required by `vm-memory` abstractions // to implement generic tracking logic, as well as tests that can be reused by different backends. pub trait Bitmap: for<'a> WithBitmapSlice<'a> { /// Mark the memory range specified by the given `offset` and `len` as dirtied. fn mark_dirty(&self, offset: usize, len: usize); /// Check whether the specified `offset` is marked as dirty. fn dirty_at(&self, offset: usize) -> bool; /// Return a `::S` slice of the current bitmap, starting at /// the specified `offset`. fn slice_at(&self, offset: usize) -> ::S; } /// A no-op `Bitmap` implementation that can be provided for backends that do not actually /// require the tracking functionality. impl<'a> WithBitmapSlice<'a> for () { type S = Self; } impl BitmapSlice for () {} impl Bitmap for () { fn mark_dirty(&self, _offset: usize, _len: usize) {} fn dirty_at(&self, _offset: usize) -> bool { false } fn slice_at(&self, _offset: usize) -> Self {} } /// A `Bitmap` and `BitmapSlice` implementation for `Option`. impl<'a, B> WithBitmapSlice<'a> for Option where B: WithBitmapSlice<'a>, { type S = Option; } impl BitmapSlice for Option {} impl Bitmap for Option { fn mark_dirty(&self, offset: usize, len: usize) { if let Some(inner) = self { inner.mark_dirty(offset, len) } } fn dirty_at(&self, offset: usize) -> bool { if let Some(inner) = self { return inner.dirty_at(offset); } false } fn slice_at(&self, offset: usize) -> Option<::S> { if let Some(inner) = self { return Some(inner.slice_at(offset)); } None } } /// Helper type alias for referring to the `BitmapSlice` concrete type associated with /// an object `B: WithBitmapSlice<'a>`. pub type BS<'a, B> = >::S; /// Helper type alias for referring to the `BitmapSlice` concrete type associated with /// the memory regions of an object `M: GuestMemory`. pub type MS<'a, M> = BS<'a, <::R as GuestMemoryRegion>::B>; #[cfg(test)] pub(crate) mod tests { use super::*; use std::io::Cursor; use std::marker::PhantomData; use std::mem::size_of_val; use std::result::Result; use std::sync::atomic::Ordering; use crate::{Bytes, VolatileMemory}; #[cfg(feature = "backend-mmap")] use crate::{GuestAddress, MemoryRegionAddress}; // Helper method to check whether a specified range is clean. pub fn range_is_clean(b: &B, start: usize, len: usize) -> bool { (start..start + len).all(|offset| !b.dirty_at(offset)) } // Helper method to check whether a specified range is dirty. pub fn range_is_dirty(b: &B, start: usize, len: usize) -> bool { (start..start + len).all(|offset| b.dirty_at(offset)) } pub fn check_range(b: &B, start: usize, len: usize, clean: bool) -> bool { if clean { range_is_clean(b, start, len) } else { range_is_dirty(b, start, len) } } // Helper method that tests a generic `B: Bitmap` implementation. It assumes `b` covers // an area of length at least 0x2000. pub fn test_bitmap(b: &B) { let len = 0x2000; let dirty_offset = 0x1000; let dirty_len = 0x100; // Some basic checks. let s = b.slice_at(dirty_offset); assert!(range_is_clean(b, 0, len)); assert!(range_is_clean(&s, 0, dirty_len)); b.mark_dirty(dirty_offset, dirty_len); assert!(range_is_dirty(b, dirty_offset, dirty_len)); assert!(range_is_dirty(&s, 0, dirty_len)); } #[derive(Debug)] pub enum TestAccessError { RangeCleanCheck, RangeDirtyCheck, } // A helper object that implements auxiliary operations for testing `Bytes` implementations // in the context of dirty bitmap tracking. struct BytesHelper { check_range_fn: F, address_fn: G, phantom: PhantomData<*const M>, } // `F` represents a closure the checks whether a specified range associated with the `Bytes` // object that's being tested is marked as dirty or not (depending on the value of the last // parameter). It has the following parameters: // - A reference to a `Bytes` implementations that's subject to testing. // - The offset of the range. // - The length of the range. // - Whether we are checking if the range is clean (when `true`) or marked as dirty. // // `G` represents a closure that translates an offset into an address value that's // relevant for the `Bytes` implementation being tested. impl BytesHelper where F: Fn(&M, usize, usize, bool) -> bool, G: Fn(usize) -> A, M: Bytes, { fn check_range(&self, m: &M, start: usize, len: usize, clean: bool) -> bool { (self.check_range_fn)(m, start, len, clean) } fn address(&self, offset: usize) -> A { (self.address_fn)(offset) } fn test_access( &self, bytes: &M, dirty_offset: usize, dirty_len: usize, op: Op, ) -> Result<(), TestAccessError> where Op: Fn(&M, A), { if !self.check_range(bytes, dirty_offset, dirty_len, true) { return Err(TestAccessError::RangeCleanCheck); } op(bytes, self.address(dirty_offset)); if !self.check_range(bytes, dirty_offset, dirty_len, false) { return Err(TestAccessError::RangeDirtyCheck); } Ok(()) } } // `F` and `G` stand for the same closure types as described in the `BytesHelper` comment. // The `step` parameter represents the offset that's added the the current address after // performing each access. It provides finer grained control when testing tracking // implementations that aggregate entire ranges for accounting purposes (for example, doing // tracking at the page level). pub fn test_bytes(bytes: &M, check_range_fn: F, address_fn: G, step: usize) where F: Fn(&M, usize, usize, bool) -> bool, G: Fn(usize) -> A, A: Copy, M: Bytes, >::E: Debug, { const BUF_SIZE: usize = 1024; let buf = vec![1u8; 1024]; let val = 1u64; let h = BytesHelper { check_range_fn, address_fn, phantom: PhantomData, }; let mut dirty_offset = 0x1000; // Test `write`. h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| { assert_eq!(m.write(buf.as_slice(), addr).unwrap(), BUF_SIZE) }) .unwrap(); dirty_offset += step; // Test `write_slice`. h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| { m.write_slice(buf.as_slice(), addr).unwrap() }) .unwrap(); dirty_offset += step; // Test `write_obj`. h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| { m.write_obj(val, addr).unwrap() }) .unwrap(); dirty_offset += step; // Test `read_from`. #[allow(deprecated)] // test of deprecated functions h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| { assert_eq!( m.read_from(addr, &mut Cursor::new(&buf), BUF_SIZE).unwrap(), BUF_SIZE ) }) .unwrap(); dirty_offset += step; // Test `read_exact_from`. #[allow(deprecated)] // test of deprecated functions h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| { m.read_exact_from(addr, &mut Cursor::new(&buf), BUF_SIZE) .unwrap() }) .unwrap(); dirty_offset += step; // Test `store`. h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| { m.store(val, addr, Ordering::Relaxed).unwrap() }) .unwrap(); } // This function and the next are currently conditionally compiled because we only use // them to test the mmap-based backend implementations for now. Going forward, the generic // test functions defined here can be placed in a separate module (i.e. `test_utilities`) // which is gated by a feature and can be used for testing purposes by other crates as well. #[cfg(feature = "backend-mmap")] fn test_guest_memory_region(region: &R) { let dirty_addr = MemoryRegionAddress(0x0); let val = 123u64; let dirty_len = size_of_val(&val); let slice = region.get_slice(dirty_addr, dirty_len).unwrap(); assert!(range_is_clean(region.bitmap(), 0, region.len() as usize)); assert!(range_is_clean(slice.bitmap(), 0, dirty_len)); region.write_obj(val, dirty_addr).unwrap(); assert!(range_is_dirty( region.bitmap(), dirty_addr.0 as usize, dirty_len )); assert!(range_is_dirty(slice.bitmap(), 0, dirty_len)); // Finally, let's invoke the generic tests for `R: Bytes`. It's ok to pass the same // `region` handle because `test_bytes` starts performing writes after the range that's // been already dirtied in the first part of this test. test_bytes( region, |r: &R, start: usize, len: usize, clean: bool| { check_range(r.bitmap(), start, len, clean) }, |offset| MemoryRegionAddress(offset as u64), 0x1000, ); } #[cfg(feature = "backend-mmap")] // Assumptions about M generated by f ... pub fn test_guest_memory_and_region(f: F) where M: GuestMemory, F: Fn() -> M, { let m = f(); let dirty_addr = GuestAddress(0x1000); let val = 123u64; let dirty_len = size_of_val(&val); let (region, region_addr) = m.to_region_addr(dirty_addr).unwrap(); let slice = m.get_slice(dirty_addr, dirty_len).unwrap(); assert!(range_is_clean(region.bitmap(), 0, region.len() as usize)); assert!(range_is_clean(slice.bitmap(), 0, dirty_len)); m.write_obj(val, dirty_addr).unwrap(); assert!(range_is_dirty( region.bitmap(), region_addr.0 as usize, dirty_len )); assert!(range_is_dirty(slice.bitmap(), 0, dirty_len)); // Now let's invoke the tests for the inner `GuestMemoryRegion` type. test_guest_memory_region(f().find_region(GuestAddress(0)).unwrap()); // Finally, let's invoke the generic tests for `Bytes`. let check_range_closure = |m: &M, start: usize, len: usize, clean: bool| -> bool { let mut check_result = true; m.try_access(len, GuestAddress(start as u64), |_, size, reg_addr, reg| { if !check_range(reg.bitmap(), reg_addr.0 as usize, size, clean) { check_result = false; } Ok(size) }) .unwrap(); check_result }; test_bytes( &f(), check_range_closure, |offset| GuestAddress(offset as u64), 0x1000, ); } pub fn test_volatile_memory(m: &M) { assert!(m.len() >= 0x8000); let dirty_offset = 0x1000; let val = 123u64; let dirty_len = size_of_val(&val); let get_ref_offset = 0x2000; let array_ref_offset = 0x3000; let s1 = m.as_volatile_slice(); let s2 = m.get_slice(dirty_offset, dirty_len).unwrap(); assert!(range_is_clean(s1.bitmap(), 0, s1.len())); assert!(range_is_clean(s2.bitmap(), 0, s2.len())); s1.write_obj(val, dirty_offset).unwrap(); assert!(range_is_dirty(s1.bitmap(), dirty_offset, dirty_len)); assert!(range_is_dirty(s2.bitmap(), 0, dirty_len)); let v_ref = m.get_ref::(get_ref_offset).unwrap(); assert!(range_is_clean(s1.bitmap(), get_ref_offset, dirty_len)); v_ref.store(val); assert!(range_is_dirty(s1.bitmap(), get_ref_offset, dirty_len)); let arr_ref = m.get_array_ref::(array_ref_offset, 1).unwrap(); assert!(range_is_clean(s1.bitmap(), array_ref_offset, dirty_len)); arr_ref.store(0, val); assert!(range_is_dirty(s1.bitmap(), array_ref_offset, dirty_len)); } } vm-memory-0.14.0/src/bytes.rs000064400000000000000000000503031046102023000141220ustar 00000000000000// Portions Copyright 2019 Red Hat, Inc. // // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-3-Clause file. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random //! data. use std::io::{Read, Write}; use std::mem::{size_of, MaybeUninit}; use std::result::Result; use std::slice::{from_raw_parts, from_raw_parts_mut}; use std::sync::atomic::Ordering; use crate::atomic_integer::AtomicInteger; use crate::volatile_memory::VolatileSlice; /// Types for which it is safe to initialize from raw data. /// /// # Safety /// /// A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a /// byte array. This is generally true for all plain-old-data structs. It is notably not true for /// any type that includes a reference. It is generally also not safe for non-packed structs, as /// compiler-inserted padding is considered uninitialized memory, and thus reads/writing it will /// cause undefined behavior. /// /// Implementing this trait guarantees that it is safe to instantiate the struct with random data. pub unsafe trait ByteValued: Copy + Send + Sync { /// Converts a slice of raw data into a reference of `Self`. /// /// The value of `data` is not copied. Instead a reference is made from the given slice. The /// value of `Self` will depend on the representation of the type in memory, and may change in /// an unstable fashion. /// /// This will return `None` if the length of data does not match the size of `Self`, or if the /// data is not aligned for the type of `Self`. fn from_slice(data: &[u8]) -> Option<&Self> { // Early out to avoid an unneeded `align_to` call. if data.len() != size_of::() { return None; } // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and // we ensured the size of the pointer's buffer is the correct size. The `align_to` method // ensures that we don't have any unaligned references. This aliases a pointer, but because // the pointer is from a const slice reference, there are no mutable aliases. Finally, the // reference returned can not outlive data because they have equal implicit lifetime // constraints. match unsafe { data.align_to::() } { ([], [mid], []) => Some(mid), _ => None, } } /// Converts a mutable slice of raw data into a mutable reference of `Self`. /// /// Because `Self` is made from a reference to the mutable slice, mutations to the returned /// reference are immediately reflected in `data`. The value of the returned `Self` will depend /// on the representation of the type in memory, and may change in an unstable fashion. /// /// This will return `None` if the length of data does not match the size of `Self`, or if the /// data is not aligned for the type of `Self`. fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> { // Early out to avoid an unneeded `align_to_mut` call. if data.len() != size_of::() { return None; } // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and // we ensured the size of the pointer's buffer is the correct size. The `align_to` method // ensures that we don't have any unaligned references. This aliases a pointer, but because // the pointer is from a mut slice reference, we borrow the passed in mutable reference. // Finally, the reference returned can not outlive data because they have equal implicit // lifetime constraints. match unsafe { data.align_to_mut::() } { ([], [mid], []) => Some(mid), _ => None, } } /// Converts a reference to `self` into a slice of bytes. /// /// The value of `self` is not copied. Instead, the slice is made from a reference to `self`. /// The value of bytes in the returned slice will depend on the representation of the type in /// memory, and may change in an unstable fashion. fn as_slice(&self) -> &[u8] { // SAFETY: Safe because the entire size of self is accessible as bytes because the trait // guarantees it. The lifetime of the returned slice is the same as the passed reference, // so that no dangling pointers will result from this pointer alias. unsafe { from_raw_parts(self as *const Self as *const u8, size_of::()) } } /// Converts a mutable reference to `self` into a mutable slice of bytes. /// /// Because the slice is made from a reference to `self`, mutations to the returned slice are /// immediately reflected in `self`. The value of bytes in the returned slice will depend on /// the representation of the type in memory, and may change in an unstable fashion. fn as_mut_slice(&mut self) -> &mut [u8] { // SAFETY: Safe because the entire size of self is accessible as bytes because the trait // guarantees it. The trait also guarantees that any combination of bytes is valid for this // type, so modifying them in the form of a byte slice is valid. The lifetime of the // returned slice is the same as the passed reference, so that no dangling pointers will // result from this pointer alias. Although this does alias a mutable pointer, we do so by // exclusively borrowing the given mutable reference. unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::()) } } /// Converts a mutable reference to `self` into a `VolatileSlice`. This is /// useful because `VolatileSlice` provides a `Bytes` implementation. /// /// # Safety /// /// Unlike most `VolatileMemory` implementation, this method requires an exclusive /// reference to `self`; this trivially fulfills `VolatileSlice::new`'s requirement /// that all accesses to `self` use volatile accesses (because there can /// be no other accesses). fn as_bytes(&mut self) -> VolatileSlice { // SAFETY: This is safe because the lifetime is the same as self unsafe { VolatileSlice::new(self as *mut Self as *mut _, size_of::()) } } } macro_rules! byte_valued_array { ($T:ty, $($N:expr)+) => { $( // SAFETY: All intrinsic types and arrays of intrinsic types are ByteValued. // They are just numbers. unsafe impl ByteValued for [$T; $N] {} )+ } } macro_rules! byte_valued_type { ($T:ty) => { // SAFETY: Safe as long T is POD. // We are using this macro to generated the implementation for integer types below. unsafe impl ByteValued for $T {} byte_valued_array! { $T, 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 } }; } byte_valued_type!(u8); byte_valued_type!(u16); byte_valued_type!(u32); byte_valued_type!(u64); byte_valued_type!(u128); byte_valued_type!(usize); byte_valued_type!(i8); byte_valued_type!(i16); byte_valued_type!(i32); byte_valued_type!(i64); byte_valued_type!(i128); byte_valued_type!(isize); /// A trait used to identify types which can be accessed atomically by proxy. pub trait AtomicAccess: ByteValued // Could not find a more succinct way of stating that `Self` can be converted // into `Self::A::V`, and the other way around. + From<<::A as AtomicInteger>::V> + Into<<::A as AtomicInteger>::V> { /// The `AtomicInteger` that atomic operations on `Self` are based on. type A: AtomicInteger; } macro_rules! impl_atomic_access { ($T:ty, $A:path) => { impl AtomicAccess for $T { type A = $A; } }; } impl_atomic_access!(i8, std::sync::atomic::AtomicI8); impl_atomic_access!(i16, std::sync::atomic::AtomicI16); impl_atomic_access!(i32, std::sync::atomic::AtomicI32); #[cfg(any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", target_arch = "s390x" ))] impl_atomic_access!(i64, std::sync::atomic::AtomicI64); impl_atomic_access!(u8, std::sync::atomic::AtomicU8); impl_atomic_access!(u16, std::sync::atomic::AtomicU16); impl_atomic_access!(u32, std::sync::atomic::AtomicU32); #[cfg(any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", target_arch = "s390x" ))] impl_atomic_access!(u64, std::sync::atomic::AtomicU64); impl_atomic_access!(isize, std::sync::atomic::AtomicIsize); impl_atomic_access!(usize, std::sync::atomic::AtomicUsize); /// A container to host a range of bytes and access its content. /// /// Candidates which may implement this trait include: /// - anonymous memory areas /// - mmapped memory areas /// - data files /// - a proxy to access memory on remote pub trait Bytes { /// Associated error codes type E; /// Writes a slice into the container at `addr`. /// /// Returns the number of bytes written. The number of bytes written can /// be less than the length of the slice if there isn't enough room in the /// container. fn write(&self, buf: &[u8], addr: A) -> Result; /// Reads data from the container at `addr` into a slice. /// /// Returns the number of bytes read. The number of bytes read can be less than the length /// of the slice if there isn't enough data within the container. fn read(&self, buf: &mut [u8], addr: A) -> Result; /// Writes the entire content of a slice into the container at `addr`. /// /// # Errors /// /// Returns an error if there isn't enough space within the container to write the entire slice. /// Part of the data may have been copied nevertheless. fn write_slice(&self, buf: &[u8], addr: A) -> Result<(), Self::E>; /// Reads data from the container at `addr` to fill an entire slice. /// /// # Errors /// /// Returns an error if there isn't enough data within the container to fill the entire slice. /// Part of the data may have been copied nevertheless. fn read_slice(&self, buf: &mut [u8], addr: A) -> Result<(), Self::E>; /// Writes an object into the container at `addr`. /// /// # Errors /// /// Returns an error if the object doesn't fit inside the container. fn write_obj(&self, val: T, addr: A) -> Result<(), Self::E> { self.write_slice(val.as_slice(), addr) } /// Reads an object from the container at `addr`. /// /// Reading from a volatile area isn't strictly safe as it could change mid-read. /// However, as long as the type T is plain old data and can handle random initialization, /// everything will be OK. /// /// # Errors /// /// Returns an error if there's not enough data inside the container. fn read_obj(&self, addr: A) -> Result { // SAFETY: ByteValued objects must be assignable from a arbitrary byte // sequence and are mandated to be packed. // Hence, zeroed memory is a fine initialization. let mut result: T = unsafe { MaybeUninit::::zeroed().assume_init() }; self.read_slice(result.as_mut_slice(), addr).map(|_| result) } /// Reads up to `count` bytes from an object and writes them into the container at `addr`. /// /// Returns the number of bytes written into the container. /// /// # Arguments /// * `addr` - Begin writing at this address. /// * `src` - Copy from `src` into the container. /// * `count` - Copy `count` bytes from `src` into the container. #[deprecated( note = "Use `.read_volatile_from` or the functions of the `ReadVolatile` trait instead" )] fn read_from(&self, addr: A, src: &mut F, count: usize) -> Result where F: Read; /// Reads exactly `count` bytes from an object and writes them into the container at `addr`. /// /// # Errors /// /// Returns an error if `count` bytes couldn't have been copied from `src` to the container. /// Part of the data may have been copied nevertheless. /// /// # Arguments /// * `addr` - Begin writing at this address. /// * `src` - Copy from `src` into the container. /// * `count` - Copy exactly `count` bytes from `src` into the container. #[deprecated( note = "Use `.read_exact_volatile_from` or the functions of the `ReadVolatile` trait instead" )] fn read_exact_from(&self, addr: A, src: &mut F, count: usize) -> Result<(), Self::E> where F: Read; /// Reads up to `count` bytes from the container at `addr` and writes them it into an object. /// /// Returns the number of bytes written into the object. /// /// # Arguments /// * `addr` - Begin reading from this address. /// * `dst` - Copy from the container to `dst`. /// * `count` - Copy `count` bytes from the container to `dst`. #[deprecated( note = "Use `.write_volatile_to` or the functions of the `WriteVolatile` trait instead" )] fn write_to(&self, addr: A, dst: &mut F, count: usize) -> Result where F: Write; /// Reads exactly `count` bytes from the container at `addr` and writes them into an object. /// /// # Errors /// /// Returns an error if `count` bytes couldn't have been copied from the container to `dst`. /// Part of the data may have been copied nevertheless. /// /// # Arguments /// * `addr` - Begin reading from this address. /// * `dst` - Copy from the container to `dst`. /// * `count` - Copy exactly `count` bytes from the container to `dst`. #[deprecated( note = "Use `.write_all_volatile_to` or the functions of the `WriteVolatile` trait instead" )] fn write_all_to(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E> where F: Write; /// Atomically store a value at the specified address. fn store(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>; /// Atomically load a value from the specified address. fn load(&self, addr: A, order: Ordering) -> Result; } #[cfg(test)] pub(crate) mod tests { #![allow(clippy::undocumented_unsafe_blocks)] use super::*; use std::cell::RefCell; use std::fmt::Debug; use std::mem::align_of; // Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be // zero-initialized. pub fn check_atomic_accesses(b: B, addr: A, bad_addr: A) where A: Copy, B: Bytes, B::E: Debug, { let val = 100u32; assert_eq!(b.load::(addr, Ordering::Relaxed).unwrap(), 0); b.store(val, addr, Ordering::Relaxed).unwrap(); assert_eq!(b.load::(addr, Ordering::Relaxed).unwrap(), val); assert!(b.load::(bad_addr, Ordering::Relaxed).is_err()); assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err()); } fn check_byte_valued_type() where T: ByteValued + PartialEq + Debug + Default, { let mut data = [0u8; 48]; let pre_len = { let (pre, _, _) = unsafe { data.align_to::() }; pre.len() }; { let aligned_data = &mut data[pre_len..pre_len + size_of::()]; { let mut val: T = Default::default(); assert_eq!(T::from_slice(aligned_data), Some(&val)); assert_eq!(T::from_mut_slice(aligned_data), Some(&mut val)); assert_eq!(val.as_slice(), aligned_data); assert_eq!(val.as_mut_slice(), aligned_data); } } for i in 1..size_of::().min(align_of::()) { let begin = pre_len + i; let end = begin + size_of::(); let unaligned_data = &mut data[begin..end]; { if align_of::() != 1 { assert_eq!(T::from_slice(unaligned_data), None); assert_eq!(T::from_mut_slice(unaligned_data), None); } } } // Check the early out condition { assert!(T::from_slice(&data).is_none()); assert!(T::from_mut_slice(&mut data).is_none()); } } #[test] fn test_byte_valued() { check_byte_valued_type::(); check_byte_valued_type::(); check_byte_valued_type::(); check_byte_valued_type::(); check_byte_valued_type::(); check_byte_valued_type::(); check_byte_valued_type::(); check_byte_valued_type::(); check_byte_valued_type::(); check_byte_valued_type::(); check_byte_valued_type::(); check_byte_valued_type::(); } pub const MOCK_BYTES_CONTAINER_SIZE: usize = 10; pub struct MockBytesContainer { container: RefCell<[u8; MOCK_BYTES_CONTAINER_SIZE]>, } impl MockBytesContainer { pub fn new() -> Self { MockBytesContainer { container: RefCell::new([0; MOCK_BYTES_CONTAINER_SIZE]), } } pub fn validate_slice_op(&self, buf: &[u8], addr: usize) -> Result<(), ()> { if MOCK_BYTES_CONTAINER_SIZE - buf.len() <= addr { return Err(()); } Ok(()) } } impl Bytes for MockBytesContainer { type E = (); fn write(&self, _: &[u8], _: usize) -> Result { unimplemented!() } fn read(&self, _: &mut [u8], _: usize) -> Result { unimplemented!() } fn write_slice(&self, buf: &[u8], addr: usize) -> Result<(), Self::E> { self.validate_slice_op(buf, addr)?; let mut container = self.container.borrow_mut(); container[addr..addr + buf.len()].copy_from_slice(buf); Ok(()) } fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<(), Self::E> { self.validate_slice_op(buf, addr)?; let container = self.container.borrow(); buf.copy_from_slice(&container[addr..addr + buf.len()]); Ok(()) } fn read_from(&self, _: usize, _: &mut F, _: usize) -> Result where F: Read, { unimplemented!() } fn read_exact_from(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E> where F: Read, { unimplemented!() } fn write_to(&self, _: usize, _: &mut F, _: usize) -> Result where F: Write, { unimplemented!() } fn write_all_to(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E> where F: Write, { unimplemented!() } fn store( &self, _val: T, _addr: usize, _order: Ordering, ) -> Result<(), Self::E> { unimplemented!() } fn load(&self, _addr: usize, _order: Ordering) -> Result { unimplemented!() } } #[test] fn test_bytes() { let bytes = MockBytesContainer::new(); assert!(bytes.write_obj(std::u64::MAX, 0).is_ok()); assert_eq!(bytes.read_obj::(0).unwrap(), std::u64::MAX); assert!(bytes .write_obj(std::u64::MAX, MOCK_BYTES_CONTAINER_SIZE) .is_err()); assert!(bytes.read_obj::(MOCK_BYTES_CONTAINER_SIZE).is_err()); } #[repr(C)] #[derive(Copy, Clone, Default)] struct S { a: u32, b: u32, } unsafe impl ByteValued for S {} #[test] fn byte_valued_slice() { let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1]; let mut s: S = Default::default(); s.as_bytes().copy_from(&a); assert_eq!(s.a, 0); assert_eq!(s.b, 0x0101_0101); } } vm-memory-0.14.0/src/endian.rs000064400000000000000000000116001046102023000142270ustar 00000000000000// Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-3-Clause file. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! Explicit endian types useful for embedding in structs or reinterpreting data. //! //! Each endian type is guaarnteed to have the same size and alignment as a regular unsigned //! primitive of the equal size. //! //! # Examples //! //! ``` //! # use vm_memory::{Be32, Le32}; //! # //! let b: Be32 = From::from(3); //! let l: Le32 = From::from(3); //! //! assert_eq!(b.to_native(), 3); //! assert_eq!(l.to_native(), 3); //! assert!(b == 3); //! assert!(l == 3); //! //! let b_trans: u32 = unsafe { std::mem::transmute(b) }; //! let l_trans: u32 = unsafe { std::mem::transmute(l) }; //! //! #[cfg(target_endian = "little")] //! assert_eq!(l_trans, 3); //! #[cfg(target_endian = "big")] //! assert_eq!(b_trans, 3); //! //! assert_ne!(b_trans, l_trans); //! ``` use std::mem::{align_of, size_of}; use crate::bytes::ByteValued; macro_rules! const_assert { ($condition:expr) => { let _ = [(); 0 - !$condition as usize]; }; } macro_rules! endian_type { ($old_type:ident, $new_type:ident, $to_new:ident, $from_new:ident) => { /// An unsigned integer type of with an explicit endianness. /// /// See module level documentation for examples. #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)] pub struct $new_type($old_type); impl $new_type { fn _assert() { const_assert!(align_of::<$new_type>() == align_of::<$old_type>()); const_assert!(size_of::<$new_type>() == size_of::<$old_type>()); } /// Converts `self` to the native endianness. pub fn to_native(self) -> $old_type { $old_type::$from_new(self.0) } } // SAFETY: Safe because we are using this for implementing ByteValued for endian types // which are POD. unsafe impl ByteValued for $new_type {} impl PartialEq<$old_type> for $new_type { fn eq(&self, other: &$old_type) -> bool { self.0 == $old_type::$to_new(*other) } } impl PartialEq<$new_type> for $old_type { fn eq(&self, other: &$new_type) -> bool { $old_type::$to_new(other.0) == *self } } impl From<$new_type> for $old_type { fn from(v: $new_type) -> $old_type { v.to_native() } } impl From<$old_type> for $new_type { fn from(v: $old_type) -> $new_type { $new_type($old_type::$to_new(v)) } } }; } endian_type!(u16, Le16, to_le, from_le); endian_type!(u32, Le32, to_le, from_le); endian_type!(u64, Le64, to_le, from_le); endian_type!(usize, LeSize, to_le, from_le); endian_type!(u16, Be16, to_be, from_be); endian_type!(u32, Be32, to_be, from_be); endian_type!(u64, Be64, to_be, from_be); endian_type!(usize, BeSize, to_be, from_be); #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] use super::*; use std::convert::From; use std::mem::transmute; #[cfg(target_endian = "little")] const NATIVE_LITTLE: bool = true; #[cfg(target_endian = "big")] const NATIVE_LITTLE: bool = false; const NATIVE_BIG: bool = !NATIVE_LITTLE; macro_rules! endian_test { ($old_type:ty, $new_type:ty, $test_name:ident, $native:expr) => { mod $test_name { use super::*; #[allow(overflowing_literals)] #[test] fn test_endian_type() { <$new_type>::_assert(); let v = 0x0123_4567_89AB_CDEF as $old_type; let endian_v: $new_type = From::from(v); let endian_into: $old_type = endian_v.into(); let endian_transmute: $old_type = unsafe { transmute(endian_v) }; if $native { assert_eq!(endian_v, endian_transmute); } else { assert_eq!(endian_v, endian_transmute.swap_bytes()); } assert_eq!(endian_into, v); assert_eq!(endian_v.to_native(), v); assert!(v == endian_v); assert!(endian_v == v); } } }; } endian_test!(u16, Le16, test_le16, NATIVE_LITTLE); endian_test!(u32, Le32, test_le32, NATIVE_LITTLE); endian_test!(u64, Le64, test_le64, NATIVE_LITTLE); endian_test!(usize, LeSize, test_le_size, NATIVE_LITTLE); endian_test!(u16, Be16, test_be16, NATIVE_BIG); endian_test!(u32, Be32, test_be32, NATIVE_BIG); endian_test!(u64, Be64, test_be64, NATIVE_BIG); endian_test!(usize, BeSize, test_be_size, NATIVE_BIG); } vm-memory-0.14.0/src/guest_memory.rs000064400000000000000000001431471046102023000155240ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-3-Clause file. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! Traits to track and access the physical memory of the guest. //! //! To make the abstraction as generic as possible, all the core traits declared here only define //! methods to access guest's memory, and never define methods to manage (create, delete, insert, //! remove etc) guest's memory. This way, the guest memory consumers (virtio device drivers, //! vhost drivers and boot loaders etc) may be decoupled from the guest memory provider (typically //! a hypervisor). //! //! Traits and Structs //! - [`GuestAddress`](struct.GuestAddress.html): represents a guest physical address (GPA). //! - [`MemoryRegionAddress`](struct.MemoryRegionAddress.html): represents an offset inside a //! region. //! - [`GuestMemoryRegion`](trait.GuestMemoryRegion.html): represent a continuous region of guest's //! physical memory. //! - [`GuestMemory`](trait.GuestMemory.html): represent a collection of `GuestMemoryRegion` //! objects. //! The main responsibilities of the `GuestMemory` trait are: //! - hide the detail of accessing guest's physical address. //! - map a request address to a `GuestMemoryRegion` object and relay the request to it. //! - handle cases where an access request spanning two or more `GuestMemoryRegion` objects. //! //! Whenever a collection of `GuestMemoryRegion` objects is mutable, //! [`GuestAddressSpace`](trait.GuestAddressSpace.html) should be implemented //! for clients to obtain a [`GuestMemory`] reference or smart pointer. //! //! The `GuestMemoryRegion` trait has an associated `B: Bitmap` type which is used to handle //! dirty bitmap tracking. Backends are free to define the granularity (or whether tracking is //! actually performed at all). Those that do implement tracking functionality are expected to //! ensure the correctness of the underlying `Bytes` implementation. The user has to explicitly //! record (using the handle returned by `GuestRegionMmap::bitmap`) write accesses performed //! via pointers, references, or slices returned by methods of `GuestMemory`,`GuestMemoryRegion`, //! `VolatileSlice`, `VolatileRef`, or `VolatileArrayRef`. use std::convert::From; use std::fs::File; use std::io::{self, Read, Write}; use std::ops::{BitAnd, BitOr, Deref}; use std::rc::Rc; use std::sync::atomic::Ordering; use std::sync::Arc; use crate::address::{Address, AddressValue}; use crate::bitmap::{Bitmap, BS, MS}; use crate::bytes::{AtomicAccess, Bytes}; use crate::io::{ReadVolatile, WriteVolatile}; use crate::volatile_memory::{self, VolatileSlice}; use crate::GuestMemoryError; static MAX_ACCESS_CHUNK: usize = 4096; /// Errors associated with handling guest memory accesses. #[allow(missing_docs)] #[derive(Debug, thiserror::Error)] pub enum Error { /// Failure in finding a guest address in any memory regions mapped by this guest. #[error("Guest memory error: invalid guest address {}",.0.raw_value())] InvalidGuestAddress(GuestAddress), /// Couldn't read/write from the given source. #[error("Guest memory error: {0}")] IOError(io::Error), /// Incomplete read or write. #[error("Guest memory error: only used {completed} bytes in {expected} long buffer")] PartialBuffer { expected: usize, completed: usize }, /// Requested backend address is out of range. #[error("Guest memory error: invalid backend address")] InvalidBackendAddress, /// Host virtual address not available. #[error("Guest memory error: host virtual address not available")] HostAddressNotAvailable, /// The length returned by the callback passed to `try_access` is outside the address range. #[error( "The length returned by the callback passed to `try_access` is outside the address range." )] CallbackOutOfRange, /// The address to be read by `try_access` is outside the address range. #[error("The address to be read by `try_access` is outside the address range")] GuestAddressOverflow, } impl From for Error { fn from(e: volatile_memory::Error) -> Self { match e { volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress, volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress, volatile_memory::Error::TooBig { .. } => Error::InvalidBackendAddress, volatile_memory::Error::Misaligned { .. } => Error::InvalidBackendAddress, volatile_memory::Error::IOError(e) => Error::IOError(e), volatile_memory::Error::PartialBuffer { expected, completed, } => Error::PartialBuffer { expected, completed, }, } } } /// Result of guest memory operations. pub type Result = std::result::Result; /// Represents a guest physical address (GPA). /// /// # Notes: /// On ARM64, a 32-bit hypervisor may be used to support a 64-bit guest. For simplicity, /// `u64` is used to store the the raw value no matter if the guest a 32-bit or 64-bit virtual /// machine. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct GuestAddress(pub u64); impl_address_ops!(GuestAddress, u64); /// Represents an offset inside a region. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct MemoryRegionAddress(pub u64); impl_address_ops!(MemoryRegionAddress, u64); /// Type of the raw value stored in a `GuestAddress` object. pub type GuestUsize = ::V; /// Represents the start point within a `File` that backs a `GuestMemoryRegion`. #[derive(Clone, Debug)] pub struct FileOffset { file: Arc, start: u64, } impl FileOffset { /// Creates a new `FileOffset` object. pub fn new(file: File, start: u64) -> Self { FileOffset::from_arc(Arc::new(file), start) } /// Creates a new `FileOffset` object based on an exiting `Arc`. pub fn from_arc(file: Arc, start: u64) -> Self { FileOffset { file, start } } /// Returns a reference to the inner `File` object. pub fn file(&self) -> &File { self.file.as_ref() } /// Return a reference to the inner `Arc` object. pub fn arc(&self) -> &Arc { &self.file } /// Returns the start offset within the file. pub fn start(&self) -> u64 { self.start } } /// Represents a continuous region of guest physical memory. #[allow(clippy::len_without_is_empty)] pub trait GuestMemoryRegion: Bytes { /// Type used for dirty memory tracking. type B: Bitmap; /// Returns the size of the region. fn len(&self) -> GuestUsize; /// Returns the minimum (inclusive) address managed by the region. fn start_addr(&self) -> GuestAddress; /// Returns the maximum (inclusive) address managed by the region. fn last_addr(&self) -> GuestAddress { // unchecked_add is safe as the region bounds were checked when it was created. self.start_addr().unchecked_add(self.len() - 1) } /// Borrow the associated `Bitmap` object. fn bitmap(&self) -> &Self::B; /// Returns the given address if it is within this region. fn check_address(&self, addr: MemoryRegionAddress) -> Option { if self.address_in_range(addr) { Some(addr) } else { None } } /// Returns `true` if the given address is within this region. fn address_in_range(&self, addr: MemoryRegionAddress) -> bool { addr.raw_value() < self.len() } /// Returns the address plus the offset if it is in this region. fn checked_offset( &self, base: MemoryRegionAddress, offset: usize, ) -> Option { base.checked_add(offset as u64) .and_then(|addr| self.check_address(addr)) } /// Tries to convert an absolute address to a relative address within this region. /// /// Returns `None` if `addr` is out of the bounds of this region. fn to_region_addr(&self, addr: GuestAddress) -> Option { addr.checked_offset_from(self.start_addr()) .and_then(|offset| self.check_address(MemoryRegionAddress(offset))) } /// Returns the host virtual address corresponding to the region address. /// /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`, /// have the capability to mmap guest address range into host virtual address space for /// direct access, so the corresponding host virtual address may be passed to other subsystems. /// /// # Note /// The underlying guest memory is not protected from memory aliasing, which breaks the /// Rust memory safety model. It's the caller's responsibility to ensure that there's no /// concurrent accesses to the underlying guest memory. fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> { Err(Error::HostAddressNotAvailable) } /// Returns information regarding the file and offset backing this memory region. fn file_offset(&self) -> Option<&FileOffset> { None } /// Returns a slice corresponding to the data in the region. /// /// Returns `None` if the region does not support slice-based access. /// /// # Safety /// /// Unsafe because of possible aliasing. #[deprecated = "It is impossible to use this function for accessing memory of a running virtual \ machine without violating aliasing rules "] unsafe fn as_slice(&self) -> Option<&[u8]> { None } /// Returns a mutable slice corresponding to the data in the region. /// /// Returns `None` if the region does not support slice-based access. /// /// # Safety /// /// Unsafe because of possible aliasing. Mutable accesses performed through the /// returned slice are not visible to the dirty bitmap tracking functionality of /// the region, and must be manually recorded using the associated bitmap object. #[deprecated = "It is impossible to use this function for accessing memory of a running virtual \ machine without violating aliasing rules "] unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> { None } /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at /// `offset`. #[allow(unused_variables)] fn get_slice( &self, offset: MemoryRegionAddress, count: usize, ) -> Result>> { Err(Error::HostAddressNotAvailable) } /// Gets a slice of memory for the entire region that supports volatile access. /// /// # Examples (uses the `backend-mmap` feature) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion}; /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef}; /// # /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None) /// .expect("Could not create guest memory"); /// let slice = region /// .as_volatile_slice() /// .expect("Could not get volatile slice"); /// /// let v = 42u32; /// let r = slice /// .get_ref::(0x200) /// .expect("Could not get reference"); /// r.store(v); /// assert_eq!(r.load(), v); /// # } /// ``` fn as_volatile_slice(&self) -> Result>> { self.get_slice(MemoryRegionAddress(0), self.len() as usize) } /// Show if the region is based on the `HugeTLBFS`. /// Returns Some(true) if the region is backed by hugetlbfs. /// None represents that no information is available. /// /// # Examples (uses the `backend-mmap` feature) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap}; /// let addr = GuestAddress(0x1000); /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap(); /// let r = mem.find_region(addr).unwrap(); /// assert_eq!(r.is_hugetlbfs(), None); /// # } /// ``` #[cfg(target_os = "linux")] fn is_hugetlbfs(&self) -> Option { None } } /// `GuestAddressSpace` provides a way to retrieve a `GuestMemory` object. /// The vm-memory crate already provides trivial implementation for /// references to `GuestMemory` or reference-counted `GuestMemory` objects, /// but the trait can also be implemented by any other struct in order /// to provide temporary access to a snapshot of the memory map. /// /// In order to support generic mutable memory maps, devices (or other things /// that access memory) should store the memory as a `GuestAddressSpace`. /// This example shows that references can also be used as the `GuestAddressSpace` /// implementation, providing a zero-cost abstraction whenever immutable memory /// maps are sufficient. /// /// # Examples (uses the `backend-mmap` and `backend-atomic` features) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use std::sync::Arc; /// # use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryMmap}; /// # /// pub struct VirtioDevice { /// mem: Option, /// } /// /// impl VirtioDevice { /// fn new() -> Self { /// VirtioDevice { mem: None } /// } /// fn activate(&mut self, mem: AS) { /// self.mem = Some(mem) /// } /// } /// /// fn get_mmap() -> GuestMemoryMmap<()> { /// let start_addr = GuestAddress(0x1000); /// GuestMemoryMmap::from_ranges(&vec![(start_addr, 0x400)]) /// .expect("Could not create guest memory") /// } /// /// // Using `VirtioDevice` with an immutable GuestMemoryMmap: /// let mut for_immutable_mmap = VirtioDevice::<&GuestMemoryMmap<()>>::new(); /// let mmap = get_mmap(); /// for_immutable_mmap.activate(&mmap); /// let mut another = VirtioDevice::<&GuestMemoryMmap<()>>::new(); /// another.activate(&mmap); /// /// # #[cfg(feature = "backend-atomic")] /// # { /// # use vm_memory::GuestMemoryAtomic; /// // Using `VirtioDevice` with a mutable GuestMemoryMmap: /// let mut for_mutable_mmap = VirtioDevice::>>::new(); /// let atomic = GuestMemoryAtomic::new(get_mmap()); /// for_mutable_mmap.activate(atomic.clone()); /// let mut another = VirtioDevice::>>::new(); /// another.activate(atomic.clone()); /// /// // atomic can be modified here... /// # } /// # } /// ``` pub trait GuestAddressSpace { /// The type that will be used to access guest memory. type M: GuestMemory; /// A type that provides access to the memory. type T: Clone + Deref; /// Return an object (e.g. a reference or guard) that can be used /// to access memory through this address space. The object provides /// a consistent snapshot of the memory map. fn memory(&self) -> Self::T; } impl GuestAddressSpace for &M { type M = M; type T = Self; fn memory(&self) -> Self { self } } impl GuestAddressSpace for Rc { type M = M; type T = Self; fn memory(&self) -> Self { self.clone() } } impl GuestAddressSpace for Arc { type M = M; type T = Self; fn memory(&self) -> Self { self.clone() } } /// Lifetime generic associated iterators. The actual iterator type is defined through associated /// item `Iter`, for example: /// /// ``` /// # use std::marker::PhantomData; /// # use vm_memory::guest_memory::GuestMemoryIterator; /// # /// // Declare the relevant Region and Memory types /// struct MyGuestRegion {/* fields omitted */} /// struct MyGuestMemory {/* fields omitted */} /// /// // Make an Iterator type to iterate over the Regions /// # /* /// struct MyGuestMemoryIter<'a> {/* fields omitted */} /// # */ /// # struct MyGuestMemoryIter<'a> { /// # _marker: PhantomData<&'a MyGuestRegion>, /// # } /// impl<'a> Iterator for MyGuestMemoryIter<'a> { /// type Item = &'a MyGuestRegion; /// fn next(&mut self) -> Option<&'a MyGuestRegion> { /// // ... /// # None /// } /// } /// /// // Associate the Iter type with the Memory type /// impl<'a> GuestMemoryIterator<'a, MyGuestRegion> for MyGuestMemory { /// type Iter = MyGuestMemoryIter<'a>; /// } /// ``` pub trait GuestMemoryIterator<'a, R: 'a> { /// Type of the `iter` method's return value. type Iter: Iterator; } /// `GuestMemory` represents a container for an *immutable* collection of /// `GuestMemoryRegion` objects. `GuestMemory` provides the `Bytes` /// trait to hide the details of accessing guest memory by physical address. /// Interior mutability is not allowed for implementations of `GuestMemory` so /// that they always provide a consistent view of the memory map. /// /// The task of the `GuestMemory` trait are: /// - map a request address to a `GuestMemoryRegion` object and relay the request to it. /// - handle cases where an access request spanning two or more `GuestMemoryRegion` objects. pub trait GuestMemory { /// Type of objects hosted by the address space. type R: GuestMemoryRegion; /// Lifetime generic associated iterators. Usually this is just `Self`. type I: for<'a> GuestMemoryIterator<'a, Self::R>; /// Returns the number of regions in the collection. fn num_regions(&self) -> usize; /// Returns the region containing the specified address or `None`. fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>; /// Perform the specified action on each region. /// /// It only walks children of current region and does not step into sub regions. #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")] fn with_regions(&self, cb: F) -> std::result::Result<(), E> where F: Fn(usize, &Self::R) -> std::result::Result<(), E>, { for (index, region) in self.iter().enumerate() { cb(index, region)?; } Ok(()) } /// Perform the specified action on each region mutably. /// /// It only walks children of current region and does not step into sub regions. #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")] fn with_regions_mut(&self, mut cb: F) -> std::result::Result<(), E> where F: FnMut(usize, &Self::R) -> std::result::Result<(), E>, { for (index, region) in self.iter().enumerate() { cb(index, region)?; } Ok(()) } /// Gets an iterator over the entries in the collection. /// /// # Examples /// /// * Compute the total size of all memory mappings in KB by iterating over the memory regions /// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the /// `backend-mmap` feature) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap}; /// # /// let start_addr1 = GuestAddress(0x0); /// let start_addr2 = GuestAddress(0x400); /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)]) /// .expect("Could not create guest memory"); /// /// let total_size = gm /// .iter() /// .map(|region| region.len() / 1024) /// .fold(0, |acc, size| acc + size); /// assert_eq!(3, total_size) /// # } /// ``` fn iter(&self) -> >::Iter; /// Applies two functions, specified as callbacks, on the inner memory regions. /// /// # Arguments /// * `init` - Starting value of the accumulator for the `foldf` function. /// * `mapf` - "Map" function, applied to all the inner memory regions. It returns an array of /// the same size as the memory regions array, containing the function's results /// for each region. /// * `foldf` - "Fold" function, applied to the array returned by `mapf`. It acts as an /// operator, applying itself to the `init` value and to each subsequent elemnent /// in the array returned by `mapf`. /// /// # Examples /// /// * Compute the total size of all memory mappings in KB by iterating over the memory regions /// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the /// `backend-mmap` feature) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap}; /// # /// let start_addr1 = GuestAddress(0x0); /// let start_addr2 = GuestAddress(0x400); /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)]) /// .expect("Could not create guest memory"); /// /// let total_size = gm.map_and_fold(0, |(_, region)| region.len() / 1024, |acc, size| acc + size); /// assert_eq!(3, total_size) /// # } /// ``` #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")] fn map_and_fold(&self, init: T, mapf: F, foldf: G) -> T where F: Fn((usize, &Self::R)) -> T, G: Fn(T, T) -> T, { self.iter().enumerate().map(mapf).fold(init, foldf) } /// Returns the maximum (inclusive) address managed by the /// [`GuestMemory`](trait.GuestMemory.html). /// /// # Examples (uses the `backend-mmap` feature) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{Address, GuestAddress, GuestMemory, GuestMemoryMmap}; /// # /// let start_addr = GuestAddress(0x1000); /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// .expect("Could not create guest memory"); /// /// assert_eq!(start_addr.checked_add(0x3ff), Some(gm.last_addr())); /// # } /// ``` fn last_addr(&self) -> GuestAddress { self.iter() .map(GuestMemoryRegion::last_addr) .fold(GuestAddress(0), std::cmp::max) } /// Tries to convert an absolute address to a relative address within the corresponding region. /// /// Returns `None` if `addr` isn't present within the memory of the guest. fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> { self.find_region(addr) .map(|r| (r, r.to_region_addr(addr).unwrap())) } /// Returns `true` if the given address is present within the memory of the guest. fn address_in_range(&self, addr: GuestAddress) -> bool { self.find_region(addr).is_some() } /// Returns the given address if it is present within the memory of the guest. fn check_address(&self, addr: GuestAddress) -> Option { self.find_region(addr).map(|_| addr) } /// Check whether the range [base, base + len) is valid. fn check_range(&self, base: GuestAddress, len: usize) -> bool { match self.try_access(len, base, |_, count, _, _| -> Result { Ok(count) }) { Ok(count) => count == len, _ => false, } } /// Returns the address plus the offset if it is present within the memory of the guest. fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option { base.checked_add(offset as u64) .and_then(|addr| self.check_address(addr)) } /// Invokes callback `f` to handle data in the address range `[addr, addr + count)`. /// /// The address range `[addr, addr + count)` may span more than one /// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object, or even have holes in it. /// So [`try_access()`](trait.GuestMemory.html#method.try_access) invokes the callback 'f' /// for each [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object involved and returns: /// - the error code returned by the callback 'f' /// - the size of the already handled data when encountering the first hole /// - the size of the already handled data when the whole range has been handled fn try_access(&self, count: usize, addr: GuestAddress, mut f: F) -> Result where F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result, { let mut cur = addr; let mut total = 0; while let Some(region) = self.find_region(cur) { let start = region.to_region_addr(cur).unwrap(); let cap = region.len() - start.raw_value(); let len = std::cmp::min(cap, (count - total) as GuestUsize); match f(total, len as usize, start, region) { // no more data Ok(0) => return Ok(total), // made some progress Ok(len) => { total = match total.checked_add(len) { Some(x) if x < count => x, Some(x) if x == count => return Ok(x), _ => return Err(Error::CallbackOutOfRange), }; cur = match cur.overflowing_add(len as GuestUsize) { (x @ GuestAddress(0), _) | (x, false) => x, (_, true) => return Err(Error::GuestAddressOverflow), }; } // error happened e => return e, } } if total == 0 { Err(Error::InvalidGuestAddress(addr)) } else { Ok(total) } } /// Reads up to `count` bytes from an object and writes them into guest memory at `addr`. /// /// Returns the number of bytes written into guest memory. /// /// # Arguments /// * `addr` - Begin writing at this address. /// * `src` - Copy from `src` into the container. /// * `count` - Copy `count` bytes from `src` into the container. /// /// # Examples /// /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{Address, GuestMemory, Bytes, GuestAddress, GuestMemoryMmap}; /// # use std::fs::File; /// # use std::path::Path; /// # /// # let start_addr = GuestAddress(0x1000); /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// # .expect("Could not create guest memory"); /// # let addr = GuestAddress(0x1010); /// # let mut file = if cfg!(unix) { /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom"); /// # file /// # } else { /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")) /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe") /// # }; /// /// gm.read_volatile_from(addr, &mut file, 128) /// .expect("Could not read from /dev/urandom into guest memory"); /// /// let read_addr = addr.checked_add(8).expect("Could not compute read address"); /// let rand_val: u32 = gm /// .read_obj(read_addr) /// .expect("Could not read u32 val from /dev/urandom"); /// # } /// ``` fn read_volatile_from(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result where F: ReadVolatile, { self.try_access(count, addr, |offset, len, caddr, region| -> Result { // Check if something bad happened before doing unsafe things. assert!(offset <= count); let len = std::cmp::min(len, MAX_ACCESS_CHUNK); let mut vslice = region.get_slice(caddr, len)?; src.read_volatile(&mut vslice) .map_err(GuestMemoryError::from) }) } /// Reads up to `count` bytes from guest memory at `addr` and writes them it into an object. /// /// Returns the number of bytes copied from guest memory. /// /// # Arguments /// * `addr` - Begin reading from this address. /// * `dst` - Copy from guest memory to `dst`. /// * `count` - Copy `count` bytes from guest memory to `dst`. fn write_volatile_to(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result where F: WriteVolatile, { self.try_access(count, addr, |offset, len, caddr, region| -> Result { // Check if something bad happened before doing unsafe things. assert!(offset <= count); let len = std::cmp::min(len, MAX_ACCESS_CHUNK); let vslice = region.get_slice(caddr, len)?; // For a non-RAM region, reading could have side effects, so we // must use write_all(). dst.write_all_volatile(&vslice)?; Ok(len) }) } /// Reads exactly `count` bytes from an object and writes them into guest memory at `addr`. /// /// # Errors /// /// Returns an error if `count` bytes couldn't have been copied from `src` to guest memory. /// Part of the data may have been copied nevertheless. /// /// # Arguments /// * `addr` - Begin writing at this address. /// * `src` - Copy from `src` into guest memory. /// * `count` - Copy exactly `count` bytes from `src` into guest memory. fn read_exact_volatile_from( &self, addr: GuestAddress, src: &mut F, count: usize, ) -> Result<()> where F: ReadVolatile, { let res = self.read_volatile_from(addr, src, count)?; if res != count { return Err(Error::PartialBuffer { expected: count, completed: res, }); } Ok(()) } /// Reads exactly `count` bytes from guest memory at `addr` and writes them into an object. /// /// # Errors /// /// Returns an error if `count` bytes couldn't have been copied from guest memory to `dst`. /// Part of the data may have been copied nevertheless. /// /// # Arguments /// * `addr` - Begin reading from this address. /// * `dst` - Copy from guest memory to `dst`. /// * `count` - Copy exactly `count` bytes from guest memory to `dst`. fn write_all_volatile_to(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()> where F: WriteVolatile, { let res = self.write_volatile_to(addr, dst, count)?; if res != count { return Err(Error::PartialBuffer { expected: count, completed: res, }); } Ok(()) } /// Get the host virtual address corresponding to the guest address. /// /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`, /// have the capability to mmap the guest address range into virtual address space of the host /// for direct access, so the corresponding host virtual address may be passed to other /// subsystems. /// /// # Note /// The underlying guest memory is not protected from memory aliasing, which breaks the /// Rust memory safety model. It's the caller's responsibility to ensure that there's no /// concurrent accesses to the underlying guest memory. /// /// # Arguments /// * `addr` - Guest address to convert. /// /// # Examples (uses the `backend-mmap` feature) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap}; /// # /// # let start_addr = GuestAddress(0x1000); /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x500)]) /// # .expect("Could not create guest memory"); /// # /// let addr = gm /// .get_host_address(GuestAddress(0x1200)) /// .expect("Could not get host address"); /// println!("Host address is {:p}", addr); /// # } /// ``` fn get_host_address(&self, addr: GuestAddress) -> Result<*mut u8> { self.to_region_addr(addr) .ok_or(Error::InvalidGuestAddress(addr)) .and_then(|(r, addr)| r.get_host_address(addr)) } /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at /// `addr`. fn get_slice(&self, addr: GuestAddress, count: usize) -> Result>> { self.to_region_addr(addr) .ok_or(Error::InvalidGuestAddress(addr)) .and_then(|(r, addr)| r.get_slice(addr, count)) } } impl Bytes for T { type E = Error; fn write(&self, buf: &[u8], addr: GuestAddress) -> Result { self.try_access( buf.len(), addr, |offset, _count, caddr, region| -> Result { region.write(&buf[offset..], caddr) }, ) } fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result { self.try_access( buf.len(), addr, |offset, _count, caddr, region| -> Result { region.read(&mut buf[offset..], caddr) }, ) } /// # Examples /// /// * Write a slice at guestaddress 0x1000. (uses the `backend-mmap` feature) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap}; /// # /// # let start_addr = GuestAddress(0x1000); /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// # .expect("Could not create guest memory"); /// # /// gm.write_slice(&[1, 2, 3, 4, 5], start_addr) /// .expect("Could not write slice to guest memory"); /// # } /// ``` fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> { let res = self.write(buf, addr)?; if res != buf.len() { return Err(Error::PartialBuffer { expected: buf.len(), completed: res, }); } Ok(()) } /// # Examples /// /// * Read a slice of length 16 at guestaddress 0x1000. (uses the `backend-mmap` feature) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap}; /// # /// let start_addr = GuestAddress(0x1000); /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// .expect("Could not create guest memory"); /// let buf = &mut [0u8; 16]; /// /// gm.read_slice(buf, start_addr) /// .expect("Could not read slice from guest memory"); /// # } /// ``` fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> { let res = self.read(buf, addr)?; if res != buf.len() { return Err(Error::PartialBuffer { expected: buf.len(), completed: res, }); } Ok(()) } /// # Examples /// /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature) /// /// ``` /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; /// # use std::fs::File; /// # use std::path::Path; /// # /// # let start_addr = GuestAddress(0x1000); /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// # .expect("Could not create guest memory"); /// # let addr = GuestAddress(0x1010); /// # let mut file = if cfg!(unix) { /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom"); /// # file /// # } else { /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")) /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe") /// # }; /// /// gm.read_from(addr, &mut file, 128) /// .expect("Could not read from /dev/urandom into guest memory"); /// /// let read_addr = addr.checked_add(8).expect("Could not compute read address"); /// let rand_val: u32 = gm /// .read_obj(read_addr) /// .expect("Could not read u32 val from /dev/urandom"); /// # } /// ``` fn read_from(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result where F: Read, { self.try_access(count, addr, |offset, len, caddr, region| -> Result { // Check if something bad happened before doing unsafe things. assert!(offset <= count); let len = std::cmp::min(len, MAX_ACCESS_CHUNK); let mut buf = vec![0u8; len].into_boxed_slice(); loop { match src.read(&mut buf[..]) { Ok(bytes_read) => { // We don't need to update the dirty bitmap manually here because it's // expected to be handled by the logic within the `Bytes` // implementation for the region object. let bytes_written = region.write(&buf[0..bytes_read], caddr)?; assert_eq!(bytes_written, bytes_read); break Ok(bytes_read); } Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue, Err(e) => break Err(Error::IOError(e)), } } }) } fn read_exact_from(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<()> where F: Read, { #[allow(deprecated)] // this function itself is deprecated let res = self.read_from(addr, src, count)?; if res != count { return Err(Error::PartialBuffer { expected: count, completed: res, }); } Ok(()) } /// # Examples /// /// * Write 128 bytes to /dev/null (uses the `backend-mmap` feature) /// /// ``` /// # #[cfg(not(unix))] /// # extern crate vmm_sys_util; /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; /// # /// # let start_addr = GuestAddress(0x1000); /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 1024)]) /// # .expect("Could not create guest memory"); /// # let mut file = if cfg!(unix) { /// # use std::fs::OpenOptions; /// let mut file = OpenOptions::new() /// .write(true) /// .open("/dev/null") /// .expect("Could not open /dev/null"); /// # file /// # } else { /// # use vmm_sys_util::tempfile::TempFile; /// # TempFile::new().unwrap().into_file() /// # }; /// /// gm.write_to(start_addr, &mut file, 128) /// .expect("Could not write 128 bytes to the provided address"); /// # } /// ``` fn write_to(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result where F: Write, { self.try_access(count, addr, |offset, len, caddr, region| -> Result { // Check if something bad happened before doing unsafe things. assert!(offset <= count); let len = std::cmp::min(len, MAX_ACCESS_CHUNK); let mut buf = vec![0u8; len].into_boxed_slice(); let bytes_read = region.read(&mut buf, caddr)?; assert_eq!(bytes_read, len); // For a non-RAM region, reading could have side effects, so we // must use write_all(). dst.write_all(&buf).map_err(Error::IOError)?; Ok(len) }) } /// # Examples /// /// * Write 128 bytes to /dev/null (uses the `backend-mmap` feature) /// /// ``` /// # #[cfg(not(unix))] /// # extern crate vmm_sys_util; /// # #[cfg(feature = "backend-mmap")] /// # { /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; /// # /// # let start_addr = GuestAddress(0x1000); /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 1024)]) /// # .expect("Could not create guest memory"); /// # let mut file = if cfg!(unix) { /// # use std::fs::OpenOptions; /// let mut file = OpenOptions::new() /// .write(true) /// .open("/dev/null") /// .expect("Could not open /dev/null"); /// # file /// # } else { /// # use vmm_sys_util::tempfile::TempFile; /// # TempFile::new().unwrap().into_file() /// # }; /// /// gm.write_all_to(start_addr, &mut file, 128) /// .expect("Could not write 128 bytes to the provided address"); /// # } /// ``` fn write_all_to(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()> where F: Write, { #[allow(deprecated)] // this function itself is deprecated let res = self.write_to(addr, dst, count)?; if res != count { return Err(Error::PartialBuffer { expected: count, completed: res, }); } Ok(()) } fn store(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> { // `find_region` should really do what `to_region_addr` is doing right now, except // it should keep returning a `Result`. self.to_region_addr(addr) .ok_or(Error::InvalidGuestAddress(addr)) .and_then(|(region, region_addr)| region.store(val, region_addr, order)) } fn load(&self, addr: GuestAddress, order: Ordering) -> Result { self.to_region_addr(addr) .ok_or(Error::InvalidGuestAddress(addr)) .and_then(|(region, region_addr)| region.load(region_addr, order)) } } #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] use super::*; #[cfg(feature = "backend-mmap")] use crate::bytes::ByteValued; #[cfg(feature = "backend-mmap")] use crate::GuestAddress; #[cfg(feature = "backend-mmap")] use std::time::{Duration, Instant}; use vmm_sys_util::tempfile::TempFile; #[cfg(feature = "backend-mmap")] type GuestMemoryMmap = crate::GuestMemoryMmap<()>; #[cfg(feature = "backend-mmap")] fn make_image(size: u8) -> Vec { let mut image: Vec = Vec::with_capacity(size as usize); for i in 0..size { image.push(i); } image } #[test] fn test_file_offset() { let file = TempFile::new().unwrap().into_file(); let start = 1234; let file_offset = FileOffset::new(file, start); assert_eq!(file_offset.start(), start); assert_eq!( file_offset.file() as *const File, file_offset.arc().as_ref() as *const File ); } #[cfg(feature = "backend-mmap")] #[test] fn checked_read_from() { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x40); let mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 64), (start_addr2, 64)]).unwrap(); let image = make_image(0x80); let offset = GuestAddress(0x30); let count: usize = 0x20; assert_eq!( 0x20_usize, mem.read_volatile_from(offset, &mut image.as_slice(), count) .unwrap() ); } // Runs the provided closure in a loop, until at least `duration` time units have elapsed. #[cfg(feature = "backend-mmap")] fn loop_timed(duration: Duration, mut f: F) where F: FnMut(), { // We check the time every `CHECK_PERIOD` iterations. const CHECK_PERIOD: u64 = 1_000_000; let start_time = Instant::now(); loop { for _ in 0..CHECK_PERIOD { f(); } if start_time.elapsed() >= duration { break; } } } // Helper method for the following test. It spawns a writer and a reader thread, which // simultaneously try to access an object that is placed at the junction of two memory regions. // The part of the object that's continuously accessed is a member of type T. The writer // flips all the bits of the member with every write, while the reader checks that every byte // has the same value (and thus it did not do a non-atomic access). The test succeeds if // no mismatch is detected after performing accesses for a pre-determined amount of time. #[cfg(feature = "backend-mmap")] #[cfg(not(miri))] // This test simulates a race condition between guest and vmm fn non_atomic_access_helper() where T: ByteValued + std::fmt::Debug + From + Into + std::ops::Not + PartialEq, { use std::mem; use std::thread; // A dummy type that's always going to have the same alignment as the first member, // and then adds some bytes at the end. #[derive(Clone, Copy, Debug, Default, PartialEq)] struct Data { val: T, some_bytes: [u8; 8], } // Some sanity checks. assert_eq!(mem::align_of::(), mem::align_of::>()); assert_eq!(mem::size_of::(), mem::align_of::()); // There must be no padding bytes, as otherwise implementing ByteValued is UB assert_eq!(mem::size_of::>(), mem::size_of::() + 8); unsafe impl ByteValued for Data {} // Start of first guest memory region. let start = GuestAddress(0); let region_len = 1 << 12; // The address where we start writing/reading a Data value. let data_start = GuestAddress((region_len - mem::size_of::()) as u64); let mem = GuestMemoryMmap::from_ranges(&[ (start, region_len), (start.unchecked_add(region_len as u64), region_len), ]) .unwrap(); // Need to clone this and move it into the new thread we create. let mem2 = mem.clone(); // Just some bytes. let some_bytes = [1u8, 2, 4, 16, 32, 64, 128, 255]; let mut data = Data { val: T::from(0u8), some_bytes, }; // Simple check that cross-region write/read is ok. mem.write_obj(data, data_start).unwrap(); let read_data = mem.read_obj::>(data_start).unwrap(); assert_eq!(read_data, data); let t = thread::spawn(move || { let mut count: u64 = 0; loop_timed(Duration::from_secs(3), || { let data = mem2.read_obj::>(data_start).unwrap(); // Every time data is written to memory by the other thread, the value of // data.val alternates between 0 and T::MAX, so the inner bytes should always // have the same value. If they don't match, it means we read a partial value, // so the access was not atomic. let bytes = data.val.into().to_le_bytes(); for i in 1..mem::size_of::() { if bytes[0] != bytes[i] { panic!( "val bytes don't match {:?} after {} iterations", &bytes[..mem::size_of::()], count ); } } count += 1; }); }); // Write the object while flipping the bits of data.val over and over again. loop_timed(Duration::from_secs(3), || { mem.write_obj(data, data_start).unwrap(); data.val = !data.val; }); t.join().unwrap() } #[cfg(feature = "backend-mmap")] #[test] #[cfg(not(miri))] fn test_non_atomic_access() { non_atomic_access_helper::() } #[cfg(feature = "backend-mmap")] #[test] fn test_zero_length_accesses() { #[derive(Default, Clone, Copy)] #[repr(C)] struct ZeroSizedStruct { dummy: [u32; 0], } unsafe impl ByteValued for ZeroSizedStruct {} let addr = GuestAddress(0x1000); let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap(); let obj = ZeroSizedStruct::default(); let mut image = make_image(0x80); assert_eq!(mem.write(&[], addr).unwrap(), 0); assert_eq!(mem.read(&mut [], addr).unwrap(), 0); assert!(mem.write_slice(&[], addr).is_ok()); assert!(mem.read_slice(&mut [], addr).is_ok()); assert!(mem.write_obj(obj, addr).is_ok()); assert!(mem.read_obj::(addr).is_ok()); assert_eq!( mem.read_volatile_from(addr, &mut image.as_slice(), 0) .unwrap(), 0 ); assert!(mem .read_exact_volatile_from(addr, &mut image.as_slice(), 0) .is_ok()); assert_eq!( mem.write_volatile_to(addr, &mut image.as_mut_slice(), 0) .unwrap(), 0 ); assert!(mem .write_all_volatile_to(addr, &mut image.as_mut_slice(), 0) .is_ok()); } #[cfg(feature = "backend-mmap")] #[test] fn test_atomic_accesses() { let addr = GuestAddress(0x1000); let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap(); let bad_addr = addr.unchecked_add(0x1000); crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr); } #[cfg(feature = "backend-mmap")] #[cfg(target_os = "linux")] #[test] fn test_guest_memory_mmap_is_hugetlbfs() { let addr = GuestAddress(0x1000); let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap(); let r = mem.find_region(addr).unwrap(); assert_eq!(r.is_hugetlbfs(), None); } } vm-memory-0.14.0/src/io.rs000064400000000000000000000546541046102023000134200ustar 00000000000000// Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //! Module containing versions of the standard library's [`Read`](std::io::Read) and //! [`Write`](std::io::Write) traits compatible with volatile memory accesses. use crate::bitmap::BitmapSlice; use crate::volatile_memory::copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice}; use crate::{VolatileMemoryError, VolatileSlice}; use std::io::{Cursor, ErrorKind, Stdout}; use std::os::fd::AsRawFd; /// A version of the standard library's [`Read`](std::io::Read) trait that operates on volatile /// memory instead of slices /// /// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on /// guest memory [1]. /// /// [1]: https://github.com/rust-vmm/vm-memory/pull/217 pub trait ReadVolatile { /// Tries to read some bytes into the given [`VolatileSlice`] buffer, returning how many bytes /// were read. /// /// The behavior of implementations should be identical to [`Read::read`](std::io::Read::read) fn read_volatile( &mut self, buf: &mut VolatileSlice, ) -> Result; /// Tries to fill the given [`VolatileSlice`] buffer by reading from `self` returning an error /// if insufficient bytes could be read. /// /// The default implementation is identical to that of [`Read::read_exact`](std::io::Read::read_exact) fn read_exact_volatile( &mut self, buf: &mut VolatileSlice, ) -> Result<(), VolatileMemoryError> { // Implementation based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L465 let mut partial_buf = buf.offset(0)?; while !partial_buf.is_empty() { match self.read_volatile(&mut partial_buf) { Err(VolatileMemoryError::IOError(err)) if err.kind() == ErrorKind::Interrupted => { continue } Ok(0) => { return Err(VolatileMemoryError::IOError(std::io::Error::new( ErrorKind::UnexpectedEof, "failed to fill whole buffer", ))) } Ok(bytes_read) => partial_buf = partial_buf.offset(bytes_read)?, Err(err) => return Err(err), } } Ok(()) } } /// A version of the standard library's [`Write`](std::io::Write) trait that operates on volatile /// memory instead of slices. /// /// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on /// guest memory [1]. /// /// [1]: https://github.com/rust-vmm/vm-memory/pull/217 pub trait WriteVolatile { /// Tries to write some bytes from the given [`VolatileSlice`] buffer, returning how many bytes /// were written. /// /// The behavior of implementations should be identical to [`Write::write`](std::io::Write::write) fn write_volatile( &mut self, buf: &VolatileSlice, ) -> Result; /// Tries write the entire content of the given [`VolatileSlice`] buffer to `self` returning an /// error if not all bytes could be written. /// /// The default implementation is identical to that of [`Write::write_all`](std::io::Write::write_all) fn write_all_volatile( &mut self, buf: &VolatileSlice, ) -> Result<(), VolatileMemoryError> { // Based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L1570 let mut partial_buf = buf.offset(0)?; while !partial_buf.is_empty() { match self.write_volatile(&partial_buf) { Err(VolatileMemoryError::IOError(err)) if err.kind() == ErrorKind::Interrupted => { continue } Ok(0) => { return Err(VolatileMemoryError::IOError(std::io::Error::new( ErrorKind::WriteZero, "failed to write whole buffer", ))) } Ok(bytes_written) => partial_buf = partial_buf.offset(bytes_written)?, Err(err) => return Err(err), } } Ok(()) } } // We explicitly implement our traits for [`std::fs::File`] and [`std::os::unix::net::UnixStream`] // instead of providing blanket implementation for [`AsRawFd`] due to trait coherence limitations: A // blanket implementation would prevent us from providing implementations for `&mut [u8]` below, as // "an upstream crate could implement AsRawFd for &mut [u8]`. macro_rules! impl_read_write_volatile_for_raw_fd { ($raw_fd_ty:ty) => { impl ReadVolatile for $raw_fd_ty { fn read_volatile( &mut self, buf: &mut VolatileSlice, ) -> Result { read_volatile_raw_fd(self, buf) } } impl WriteVolatile for $raw_fd_ty { fn write_volatile( &mut self, buf: &VolatileSlice, ) -> Result { write_volatile_raw_fd(self, buf) } } }; } impl WriteVolatile for Stdout { fn write_volatile( &mut self, buf: &VolatileSlice, ) -> Result { write_volatile_raw_fd(self, buf) } } impl_read_write_volatile_for_raw_fd!(std::fs::File); impl_read_write_volatile_for_raw_fd!(std::os::unix::net::UnixStream); impl_read_write_volatile_for_raw_fd!(std::os::fd::OwnedFd); impl_read_write_volatile_for_raw_fd!(std::os::fd::BorrowedFd<'_>); /// Tries to do a single `read` syscall on the provided file descriptor, storing the data raed in /// the given [`VolatileSlice`]. /// /// Returns the numbers of bytes read. fn read_volatile_raw_fd( raw_fd: &mut Fd, buf: &mut VolatileSlice, ) -> Result { let fd = raw_fd.as_raw_fd(); let guard = buf.ptr_guard_mut(); let dst = guard.as_ptr().cast::(); // SAFETY: We got a valid file descriptor from `AsRawFd`. The memory pointed to by `dst` is // valid for writes of length `buf.len() by the invariants upheld by the constructor // of `VolatileSlice`. let bytes_read = unsafe { libc::read(fd, dst, buf.len()) }; if bytes_read < 0 { // We don't know if a partial read might have happened, so mark everything as dirty buf.bitmap().mark_dirty(0, buf.len()); Err(VolatileMemoryError::IOError(std::io::Error::last_os_error())) } else { let bytes_read = bytes_read.try_into().unwrap(); buf.bitmap().mark_dirty(0, bytes_read); Ok(bytes_read) } } /// Tries to do a single `write` syscall on the provided file descriptor, attempting to write the /// data stored in the given [`VolatileSlice`]. /// /// Returns the numbers of bytes written. fn write_volatile_raw_fd( raw_fd: &mut Fd, buf: &VolatileSlice, ) -> Result { let fd = raw_fd.as_raw_fd(); let guard = buf.ptr_guard(); let src = guard.as_ptr().cast::(); // SAFETY: We got a valid file descriptor from `AsRawFd`. The memory pointed to by `src` is // valid for reads of length `buf.len() by the invariants upheld by the constructor // of `VolatileSlice`. let bytes_written = unsafe { libc::write(fd, src, buf.len()) }; if bytes_written < 0 { Err(VolatileMemoryError::IOError(std::io::Error::last_os_error())) } else { Ok(bytes_written.try_into().unwrap()) } } impl WriteVolatile for &mut [u8] { fn write_volatile( &mut self, buf: &VolatileSlice, ) -> Result { let total = buf.len().min(self.len()); let src = buf.subslice(0, total)?; // SAFETY: // We check above that `src` is contiguously allocated memory of length `total <= self.len())`. // Furthermore, both src and dst of the call to // copy_from_volatile_slice are valid for reads and writes respectively of length `total` // since total is the minimum of lengths of the memory areas pointed to. The areas do not // overlap, since `dst` is inside guest memory, and buf is a slice (no slices to guest // memory are possible without violating rust's aliasing rules). let written = unsafe { copy_from_volatile_slice(self.as_mut_ptr(), &src, total) }; // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#335 *self = std::mem::take(self).split_at_mut(written).1; Ok(written) } fn write_all_volatile( &mut self, buf: &VolatileSlice, ) -> Result<(), VolatileMemoryError> { // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L376-L382 if self.write_volatile(buf)? == buf.len() { Ok(()) } else { Err(VolatileMemoryError::IOError(std::io::Error::new( ErrorKind::WriteZero, "failed to write whole buffer", ))) } } } impl ReadVolatile for &[u8] { fn read_volatile( &mut self, buf: &mut VolatileSlice, ) -> Result { let total = buf.len().min(self.len()); let dst = buf.subslice(0, total)?; // SAFETY: // We check above that `dst` is contiguously allocated memory of length `total <= self.len())`. // Furthermore, both src and dst of the call to copy_to_volatile_slice are valid for reads // and writes respectively of length `total` since total is the minimum of lengths of the // memory areas pointed to. The areas do not overlap, since `dst` is inside guest memory, // and buf is a slice (no slices to guest memory are possible without violating rust's aliasing rules). let read = unsafe { copy_to_volatile_slice(&dst, self.as_ptr(), total) }; // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#232-310 *self = self.split_at(read).1; Ok(read) } fn read_exact_volatile( &mut self, buf: &mut VolatileSlice, ) -> Result<(), VolatileMemoryError> { // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L282-L302 if buf.len() > self.len() { return Err(VolatileMemoryError::IOError(std::io::Error::new( ErrorKind::UnexpectedEof, "failed to fill whole buffer", ))); } self.read_volatile(buf).map(|_| ()) } } // WriteVolatile implementation for Vec is based upon the Write impl for Vec, which // defers to Vec::append_elements, after which the below functionality is modelled. impl WriteVolatile for Vec { fn write_volatile( &mut self, buf: &VolatileSlice, ) -> Result { let count = buf.len(); self.reserve(count); let len = self.len(); // SAFETY: Calling Vec::reserve() above guarantees the the backing storage of the Vec has // length at least `len + count`. This means that self.as_mut_ptr().add(len) remains within // the same allocated object, the offset does not exceed isize (as otherwise reserve would // have panicked), and does not rely on address space wrapping around. // In particular, the entire `count` bytes after `self.as_mut_ptr().add(count)` is // contiguously allocated and valid for writes. // Lastly, `copy_to_volatile_slice` correctly initialized `copied_len` additional bytes // in the Vec's backing storage, and we assert this to be equal to `count`. Additionally, // `len + count` is at most the reserved capacity of the vector. Thus the call to `set_len` // is safe. unsafe { let copied_len = copy_from_volatile_slice(self.as_mut_ptr().add(len), buf, count); assert_eq!(copied_len, count); self.set_len(len + count); } Ok(count) } } // ReadVolatile and WriteVolatile implementations for Cursor is modelled after the standard // library's implementation (modulo having to inline `Cursor::remaining_slice`, as that's nightly only) impl ReadVolatile for Cursor where T: AsRef<[u8]>, { fn read_volatile( &mut self, buf: &mut VolatileSlice, ) -> Result { let inner = self.get_ref().as_ref(); let len = self.position().min(inner.len() as u64); let n = ReadVolatile::read_volatile(&mut &inner[(len as usize)..], buf)?; self.set_position(self.position() + n as u64); Ok(n) } fn read_exact_volatile( &mut self, buf: &mut VolatileSlice, ) -> Result<(), VolatileMemoryError> { let inner = self.get_ref().as_ref(); let n = buf.len(); let len = self.position().min(inner.len() as u64); ReadVolatile::read_exact_volatile(&mut &inner[(len as usize)..], buf)?; self.set_position(self.position() + n as u64); Ok(()) } } impl WriteVolatile for Cursor<&mut [u8]> { fn write_volatile( &mut self, buf: &VolatileSlice, ) -> Result { let pos = self.position().min(self.get_ref().len() as u64); let n = WriteVolatile::write_volatile(&mut &mut self.get_mut()[(pos as usize)..], buf)?; self.set_position(self.position() + n as u64); Ok(n) } // no write_all provided in standard library, since our default for write_all is based on the // standard library's write_all, omitting it here as well will correctly mimic stdlib behavior. } #[cfg(test)] mod tests { use crate::io::{ReadVolatile, WriteVolatile}; use crate::{VolatileMemoryError, VolatileSlice}; use std::io::{Cursor, ErrorKind, Read, Seek, Write}; use vmm_sys_util::tempfile::TempFile; // ---- Test ReadVolatile for &[u8] ---- fn read_4_bytes_to_5_byte_memory(source: Vec, expected_output: [u8; 5]) { // Test read_volatile for &[u8] works let mut memory = vec![0u8; 5]; assert_eq!( (&source[..]) .read_volatile(&mut VolatileSlice::from(&mut memory[..4])) .unwrap(), source.len().min(4) ); assert_eq!(&memory, &expected_output); // Test read_exact_volatile for &[u8] works let mut memory = vec![0u8; 5]; let result = (&source[..]).read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4])); // read_exact fails if there are not enough bytes in input to completely fill // memory[..4] if source.len() < 4 { match result.unwrap_err() { VolatileMemoryError::IOError(ioe) => { assert_eq!(ioe.kind(), ErrorKind::UnexpectedEof) } err => panic!("{:?}", err), } assert_eq!(memory, vec![0u8; 5]); } else { result.unwrap(); assert_eq!(&memory, &expected_output); } } // ---- Test ReadVolatile for File ---- fn read_4_bytes_from_file(source: Vec, expected_output: [u8; 5]) { let mut temp_file = TempFile::new().unwrap().into_file(); temp_file.write_all(source.as_ref()).unwrap(); temp_file.rewind().unwrap(); // Test read_volatile for File works let mut memory = vec![0u8; 5]; assert_eq!( temp_file .read_volatile(&mut VolatileSlice::from(&mut memory[..4])) .unwrap(), source.len().min(4) ); assert_eq!(&memory, &expected_output); temp_file.rewind().unwrap(); // Test read_exact_volatile for File works let mut memory = vec![0u8; 5]; let read_exact_result = temp_file.read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4])); if source.len() < 4 { read_exact_result.unwrap_err(); } else { read_exact_result.unwrap(); } assert_eq!(&memory, &expected_output); } #[test] fn test_read_volatile() { let test_cases = [ (vec![1u8, 2], [1u8, 2, 0, 0, 0]), (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]), // ensure we don't have a buffer overrun (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]), ]; for (input, output) in test_cases { read_4_bytes_to_5_byte_memory(input.clone(), output); read_4_bytes_from_file(input, output); } } // ---- Test WriteVolatile for &mut [u8] ---- fn write_4_bytes_to_5_byte_vec(mut source: Vec, expected_result: [u8; 5]) { let mut memory = vec![0u8; 5]; // Test write_volatile for &mut [u8] works assert_eq!( (&mut memory[..4]) .write_volatile(&VolatileSlice::from(source.as_mut_slice())) .unwrap(), source.len().min(4) ); assert_eq!(&memory, &expected_result); // Test write_all_volatile for &mut [u8] works let mut memory = vec![0u8; 5]; let result = (&mut memory[..4]).write_all_volatile(&VolatileSlice::from(source.as_mut_slice())); if source.len() > 4 { match result.unwrap_err() { VolatileMemoryError::IOError(ioe) => { assert_eq!(ioe.kind(), ErrorKind::WriteZero) } err => panic!("{:?}", err), } // This quirky behavior of writing to the slice even in the case of failure is also // exhibited by the stdlib assert_eq!(&memory, &expected_result); } else { result.unwrap(); assert_eq!(&memory, &expected_result); } } // ---- Test ẂriteVolatile for File works ---- fn write_5_bytes_to_file(mut source: Vec) { // Test write_volatile for File works let mut temp_file = TempFile::new().unwrap().into_file(); temp_file .write_volatile(&VolatileSlice::from(source.as_mut_slice())) .unwrap(); temp_file.rewind().unwrap(); let mut written = vec![0u8; source.len()]; temp_file.read_exact(written.as_mut_slice()).unwrap(); assert_eq!(source, written); // check no excess bytes were written to the file assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0); // Test write_all_volatile for File works let mut temp_file = TempFile::new().unwrap().into_file(); temp_file .write_all_volatile(&VolatileSlice::from(source.as_mut_slice())) .unwrap(); temp_file.rewind().unwrap(); let mut written = vec![0u8; source.len()]; temp_file.read_exact(written.as_mut_slice()).unwrap(); assert_eq!(source, written); // check no excess bytes were written to the file assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0); } #[test] fn test_write_volatile() { let test_cases = [ (vec![1u8, 2], [1u8, 2, 0, 0, 0]), (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]), // ensure we don't have a buffer overrun (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]), ]; for (input, output) in test_cases { write_4_bytes_to_5_byte_vec(input.clone(), output); write_5_bytes_to_file(input); } } #[test] fn test_read_volatile_for_cursor() { let read_buffer = [1, 2, 3, 4, 5, 6, 7]; let mut output = vec![0u8; 5]; let mut cursor = Cursor::new(read_buffer); // Read 4 bytes from cursor to volatile slice (amount read limited by volatile slice length) assert_eq!( cursor .read_volatile(&mut VolatileSlice::from(&mut output[..4])) .unwrap(), 4 ); assert_eq!(output, vec![1, 2, 3, 4, 0]); // Read next 3 bytes from cursor to volatile slice (amount read limited by length of remaining data in cursor) assert_eq!( cursor .read_volatile(&mut VolatileSlice::from(&mut output[..4])) .unwrap(), 3 ); assert_eq!(output, vec![5, 6, 7, 4, 0]); cursor.set_position(0); // Same as first test above, but with read_exact cursor .read_exact_volatile(&mut VolatileSlice::from(&mut output[..4])) .unwrap(); assert_eq!(output, vec![1, 2, 3, 4, 0]); // Same as above, but with read_exact. Should fail now, because we cannot fill a 4 byte buffer // with whats remaining in the cursor (3 bytes). Output should remain unchanged. assert!(cursor .read_exact_volatile(&mut VolatileSlice::from(&mut output[..4])) .is_err()); assert_eq!(output, vec![1, 2, 3, 4, 0]); } #[test] fn test_write_volatile_for_cursor() { let mut write_buffer = vec![0u8; 7]; let mut input = [1, 2, 3, 4]; let mut cursor = Cursor::new(write_buffer.as_mut_slice()); // Write 4 bytes from volatile slice to cursor (amount written limited by volatile slice length) assert_eq!( cursor .write_volatile(&VolatileSlice::from(input.as_mut_slice())) .unwrap(), 4 ); assert_eq!(cursor.get_ref(), &[1, 2, 3, 4, 0, 0, 0]); // Write 3 bytes from volatile slice to cursor (amount written limited by remaining space in cursor) assert_eq!( cursor .write_volatile(&VolatileSlice::from(input.as_mut_slice())) .unwrap(), 3 ); assert_eq!(cursor.get_ref(), &[1, 2, 3, 4, 1, 2, 3]); } #[test] fn test_write_volatile_for_vec() { let mut write_buffer = Vec::new(); let mut input = [1, 2, 3, 4]; assert_eq!( write_buffer .write_volatile(&VolatileSlice::from(input.as_mut_slice())) .unwrap(), 4 ); assert_eq!(&write_buffer, &input); } } vm-memory-0.14.0/src/lib.rs000064400000000000000000000047721046102023000135530ustar 00000000000000// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-3-Clause file. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! Traits for allocating, handling and interacting with the VM's physical memory. //! //! For a typical hypervisor, there are several components, such as boot loader, virtual device //! drivers, virtio backend drivers and vhost drivers etc, that need to access VM's physical memory. //! This crate aims to provide a set of stable traits to decouple VM memory consumers from VM //! memory providers. Based on these traits, VM memory consumers could access VM's physical memory //! without knowing the implementation details of the VM memory provider. Thus hypervisor //! components, such as boot loader, virtual device drivers, virtio backend drivers and vhost //! drivers etc, could be shared and reused by multiple hypervisors. #![deny(clippy::doc_markdown)] #![deny(missing_docs)] #![deny(missing_debug_implementations)] #[macro_use] pub mod address; pub use address::{Address, AddressValue}; #[cfg(feature = "backend-atomic")] pub mod atomic; #[cfg(feature = "backend-atomic")] pub use atomic::{GuestMemoryAtomic, GuestMemoryLoadGuard}; mod atomic_integer; pub use atomic_integer::AtomicInteger; pub mod bitmap; pub mod bytes; pub use bytes::{AtomicAccess, ByteValued, Bytes}; pub mod endian; pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize}; pub mod guest_memory; pub use guest_memory::{ Error as GuestMemoryError, FileOffset, GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult, }; pub mod io; pub use io::{ReadVolatile, WriteVolatile}; #[cfg(all(feature = "backend-mmap", not(feature = "xen"), unix))] mod mmap_unix; #[cfg(all(feature = "backend-mmap", feature = "xen", unix))] mod mmap_xen; #[cfg(all(feature = "backend-mmap", windows))] mod mmap_windows; #[cfg(feature = "backend-mmap")] pub mod mmap; #[cfg(feature = "backend-mmap")] pub use mmap::{Error, GuestMemoryMmap, GuestRegionMmap, MmapRegion}; #[cfg(all(feature = "backend-mmap", feature = "xen", unix))] pub use mmap::{MmapRange, MmapXenFlags}; pub mod volatile_memory; pub use volatile_memory::{ Error as VolatileMemoryError, Result as VolatileMemoryResult, VolatileArrayRef, VolatileMemory, VolatileRef, VolatileSlice, }; vm-memory-0.14.0/src/mmap.rs000064400000000000000000001475571046102023000137500ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-3-Clause file. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! The default implementation for the [`GuestMemory`](trait.GuestMemory.html) trait. //! //! This implementation is mmap-ing the memory of the guest into the current process. use std::borrow::Borrow; use std::io::{Read, Write}; #[cfg(unix)] use std::io::{Seek, SeekFrom}; use std::ops::Deref; use std::result; use std::sync::atomic::Ordering; use std::sync::Arc; use crate::address::Address; use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::{ self, FileOffset, GuestAddress, GuestMemory, GuestMemoryIterator, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, }; use crate::volatile_memory::{VolatileMemory, VolatileSlice}; use crate::{AtomicAccess, Bytes}; #[cfg(all(not(feature = "xen"), unix))] pub use crate::mmap_unix::{Error as MmapRegionError, MmapRegion, MmapRegionBuilder}; #[cfg(all(feature = "xen", unix))] pub use crate::mmap_xen::{Error as MmapRegionError, MmapRange, MmapRegion, MmapXenFlags}; #[cfg(windows)] pub use crate::mmap_windows::MmapRegion; #[cfg(windows)] pub use std::io::Error as MmapRegionError; /// A `Bitmap` that can be created starting from an initial size. pub trait NewBitmap: Bitmap + Default { /// Create a new object based on the specified length in bytes. fn with_len(len: usize) -> Self; } impl NewBitmap for () { fn with_len(_len: usize) -> Self {} } /// Errors that can occur when creating a memory map. #[derive(Debug, thiserror::Error)] pub enum Error { /// Adding the guest base address to the length of the underlying mapping resulted /// in an overflow. #[error("Adding the guest base address to the length of the underlying mapping resulted in an overflow")] InvalidGuestRegion, /// Error creating a `MmapRegion` object. #[error("{0}")] MmapRegion(MmapRegionError), /// No memory region found. #[error("No memory region found")] NoMemoryRegion, /// Some of the memory regions intersect with each other. #[error("Some of the memory regions intersect with each other")] MemoryRegionOverlap, /// The provided memory regions haven't been sorted. #[error("The provided memory regions haven't been sorted")] UnsortedMemoryRegions, } // TODO: use this for Windows as well after we redefine the Error type there. #[cfg(unix)] /// Checks if a mapping of `size` bytes fits at the provided `file_offset`. /// /// For a borrowed `FileOffset` and size, this function checks whether the mapping does not /// extend past EOF, and that adding the size to the file offset does not lead to overflow. pub fn check_file_offset( file_offset: &FileOffset, size: usize, ) -> result::Result<(), MmapRegionError> { let mut file = file_offset.file(); let start = file_offset.start(); if let Some(end) = start.checked_add(size as u64) { let filesize = file .seek(SeekFrom::End(0)) .map_err(MmapRegionError::SeekEnd)?; file.rewind().map_err(MmapRegionError::SeekStart)?; if filesize < end { return Err(MmapRegionError::MappingPastEof); } } else { return Err(MmapRegionError::InvalidOffsetLength); } Ok(()) } /// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) implementation that mmaps the guest's /// memory region in the current process. /// /// Represents a continuous region of the guest's physical memory that is backed by a mapping /// in the virtual address space of the calling process. #[derive(Debug)] pub struct GuestRegionMmap { mapping: MmapRegion, guest_base: GuestAddress, } impl Deref for GuestRegionMmap { type Target = MmapRegion; fn deref(&self) -> &MmapRegion { &self.mapping } } impl GuestRegionMmap { /// Create a new memory-mapped memory region for the guest's physical memory. pub fn new(mapping: MmapRegion, guest_base: GuestAddress) -> result::Result { if guest_base.0.checked_add(mapping.size() as u64).is_none() { return Err(Error::InvalidGuestRegion); } Ok(GuestRegionMmap { mapping, guest_base, }) } } #[cfg(not(feature = "xen"))] impl GuestRegionMmap { /// Create a new memory-mapped memory region from guest's physical memory, size and file. pub fn from_range( addr: GuestAddress, size: usize, file: Option, ) -> result::Result { let region = if let Some(ref f_off) = file { MmapRegion::from_file(f_off.clone(), size) } else { MmapRegion::new(size) } .map_err(Error::MmapRegion)?; Self::new(region, addr) } } #[cfg(feature = "xen")] impl GuestRegionMmap { /// Create a new Unix memory-mapped memory region from guest's physical memory, size and file. /// This must only be used for tests, doctests, benches and is not designed for end consumers. pub fn from_range( addr: GuestAddress, size: usize, file: Option, ) -> result::Result { let range = MmapRange::new_unix(size, file, addr); let region = MmapRegion::from_range(range).map_err(Error::MmapRegion)?; Self::new(region, addr) } } impl Bytes for GuestRegionMmap { type E = guest_memory::Error; /// # Examples /// * Write a slice at guest address 0x1200. /// /// ``` /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; /// # /// # let start_addr = GuestAddress(0x1000); /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// # .expect("Could not create guest memory"); /// # /// let res = gm /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) /// .expect("Could not write to guest memory"); /// assert_eq!(5, res); /// ``` fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result { let maddr = addr.raw_value() as usize; self.as_volatile_slice() .unwrap() .write(buf, maddr) .map_err(Into::into) } /// # Examples /// * Read a slice of length 16 at guestaddress 0x1200. /// /// ``` /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; /// # /// # let start_addr = GuestAddress(0x1000); /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// # .expect("Could not create guest memory"); /// # /// let buf = &mut [0u8; 16]; /// let res = gm /// .read(buf, GuestAddress(0x1200)) /// .expect("Could not read from guest memory"); /// assert_eq!(16, res); /// ``` fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result { let maddr = addr.raw_value() as usize; self.as_volatile_slice() .unwrap() .read(buf, maddr) .map_err(Into::into) } fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> { let maddr = addr.raw_value() as usize; self.as_volatile_slice() .unwrap() .write_slice(buf, maddr) .map_err(Into::into) } fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> { let maddr = addr.raw_value() as usize; self.as_volatile_slice() .unwrap() .read_slice(buf, maddr) .map_err(Into::into) } /// # Examples /// /// * Read bytes from /dev/urandom /// /// ``` /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; /// # use std::fs::File; /// # use std::path::Path; /// # /// # let start_addr = GuestAddress(0x1000); /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// # .expect("Could not create guest memory"); /// # let addr = GuestAddress(0x1010); /// # let mut file = if cfg!(unix) { /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom"); /// # file /// # } else { /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")) /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe") /// # }; /// /// gm.read_from(addr, &mut file, 128) /// .expect("Could not read from /dev/urandom into guest memory"); /// /// let read_addr = addr.checked_add(8).expect("Could not compute read address"); /// let rand_val: u32 = gm /// .read_obj(read_addr) /// .expect("Could not read u32 val from /dev/urandom"); /// ``` fn read_from( &self, addr: MemoryRegionAddress, src: &mut F, count: usize, ) -> guest_memory::Result where F: Read, { let maddr = addr.raw_value() as usize; #[allow(deprecated)] // function itself is deprecated self.as_volatile_slice() .unwrap() .read_from::(maddr, src, count) .map_err(Into::into) } /// # Examples /// /// * Read bytes from /dev/urandom /// /// ``` /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; /// # use std::fs::File; /// # use std::path::Path; /// # /// # let start_addr = GuestAddress(0x1000); /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// # .expect("Could not create guest memory"); /// # let addr = GuestAddress(0x1010); /// # let mut file = if cfg!(unix) { /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom"); /// # file /// # } else { /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")) /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe") /// # }; /// /// gm.read_exact_from(addr, &mut file, 128) /// .expect("Could not read from /dev/urandom into guest memory"); /// /// let read_addr = addr.checked_add(8).expect("Could not compute read address"); /// let rand_val: u32 = gm /// .read_obj(read_addr) /// .expect("Could not read u32 val from /dev/urandom"); /// ``` fn read_exact_from( &self, addr: MemoryRegionAddress, src: &mut F, count: usize, ) -> guest_memory::Result<()> where F: Read, { let maddr = addr.raw_value() as usize; #[allow(deprecated)] // function itself is deprecated self.as_volatile_slice() .unwrap() .read_exact_from::(maddr, src, count) .map_err(Into::into) } /// Writes data from the region to a writable object. /// /// # Examples /// /// * Write 128 bytes to a /dev/null file /// /// ``` /// # #[cfg(not(unix))] /// # extern crate vmm_sys_util; /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; /// # /// # let start_addr = GuestAddress(0x1000); /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// # .expect("Could not create guest memory"); /// # let mut file = if cfg!(unix) { /// # use std::fs::OpenOptions; /// let mut file = OpenOptions::new() /// .write(true) /// .open("/dev/null") /// .expect("Could not open /dev/null"); /// # file /// # } else { /// # use vmm_sys_util::tempfile::TempFile; /// # TempFile::new().unwrap().into_file() /// # }; /// /// gm.write_to(start_addr, &mut file, 128) /// .expect("Could not write to file from guest memory"); /// ``` fn write_to( &self, addr: MemoryRegionAddress, dst: &mut F, count: usize, ) -> guest_memory::Result where F: Write, { let maddr = addr.raw_value() as usize; #[allow(deprecated)] // function itself is deprecated self.as_volatile_slice() .unwrap() .write_to::(maddr, dst, count) .map_err(Into::into) } /// Writes data from the region to a writable object. /// /// # Examples /// /// * Write 128 bytes to a /dev/null file /// /// ``` /// # #[cfg(not(unix))] /// # extern crate vmm_sys_util; /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; /// # /// # let start_addr = GuestAddress(0x1000); /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) /// # .expect("Could not create guest memory"); /// # let mut file = if cfg!(unix) { /// # use std::fs::OpenOptions; /// let mut file = OpenOptions::new() /// .write(true) /// .open("/dev/null") /// .expect("Could not open /dev/null"); /// # file /// # } else { /// # use vmm_sys_util::tempfile::TempFile; /// # TempFile::new().unwrap().into_file() /// # }; /// /// gm.write_all_to(start_addr, &mut file, 128) /// .expect("Could not write to file from guest memory"); /// ``` fn write_all_to( &self, addr: MemoryRegionAddress, dst: &mut F, count: usize, ) -> guest_memory::Result<()> where F: Write, { let maddr = addr.raw_value() as usize; #[allow(deprecated)] // function itself is deprecated self.as_volatile_slice() .unwrap() .write_all_to::(maddr, dst, count) .map_err(Into::into) } fn store( &self, val: T, addr: MemoryRegionAddress, order: Ordering, ) -> guest_memory::Result<()> { self.as_volatile_slice().and_then(|s| { s.store(val, addr.raw_value() as usize, order) .map_err(Into::into) }) } fn load( &self, addr: MemoryRegionAddress, order: Ordering, ) -> guest_memory::Result { self.as_volatile_slice() .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) } } impl GuestMemoryRegion for GuestRegionMmap { type B = B; fn len(&self) -> GuestUsize { self.mapping.size() as GuestUsize } fn start_addr(&self) -> GuestAddress { self.guest_base } fn bitmap(&self) -> &Self::B { self.mapping.bitmap() } fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> { // Not sure why wrapping_offset is not unsafe. Anyway this // is safe because we've just range-checked addr using check_address. self.check_address(addr) .ok_or(guest_memory::Error::InvalidBackendAddress) .map(|addr| { self.mapping .as_ptr() .wrapping_offset(addr.raw_value() as isize) }) } fn file_offset(&self) -> Option<&FileOffset> { self.mapping.file_offset() } fn get_slice( &self, offset: MemoryRegionAddress, count: usize, ) -> guest_memory::Result>> { let slice = self.mapping.get_slice(offset.raw_value() as usize, count)?; Ok(slice) } #[cfg(target_os = "linux")] fn is_hugetlbfs(&self) -> Option { self.mapping.is_hugetlbfs() } } /// [`GuestMemory`](trait.GuestMemory.html) implementation that mmaps the guest's memory /// in the current process. /// /// Represents the entire physical memory of the guest by tracking all its memory regions. /// Each region is an instance of `GuestRegionMmap`, being backed by a mapping in the /// virtual address space of the calling process. #[derive(Clone, Debug, Default)] pub struct GuestMemoryMmap { regions: Vec>>, } impl GuestMemoryMmap { /// Creates an empty `GuestMemoryMmap` instance. pub fn new() -> Self { Self::default() } /// Creates a container and allocates anonymous memory for guest memory regions. /// /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address. pub fn from_ranges(ranges: &[(GuestAddress, usize)]) -> result::Result { Self::from_ranges_with_files(ranges.iter().map(|r| (r.0, r.1, None))) } /// Creates a container and allocates anonymous memory for guest memory regions. /// /// Valid memory regions are specified as a sequence of (Address, Size, [`Option`]) /// tuples sorted by Address. pub fn from_ranges_with_files(ranges: T) -> result::Result where A: Borrow<(GuestAddress, usize, Option)>, T: IntoIterator, { Self::from_regions( ranges .into_iter() .map(|x| { GuestRegionMmap::from_range(x.borrow().0, x.borrow().1, x.borrow().2.clone()) }) .collect::, Error>>()?, ) } } impl GuestMemoryMmap { /// Creates a new `GuestMemoryMmap` from a vector of regions. /// /// # Arguments /// /// * `regions` - The vector of regions. /// The regions shouldn't overlap and they should be sorted /// by the starting address. pub fn from_regions(mut regions: Vec>) -> result::Result { Self::from_arc_regions(regions.drain(..).map(Arc::new).collect()) } /// Creates a new `GuestMemoryMmap` from a vector of Arc regions. /// /// Similar to the constructor `from_regions()` as it returns a /// `GuestMemoryMmap`. The need for this constructor is to provide a way for /// consumer of this API to create a new `GuestMemoryMmap` based on existing /// regions coming from an existing `GuestMemoryMmap` instance. /// /// # Arguments /// /// * `regions` - The vector of `Arc` regions. /// The regions shouldn't overlap and they should be sorted /// by the starting address. pub fn from_arc_regions(regions: Vec>>) -> result::Result { if regions.is_empty() { return Err(Error::NoMemoryRegion); } for window in regions.windows(2) { let prev = &window[0]; let next = &window[1]; if prev.start_addr() > next.start_addr() { return Err(Error::UnsortedMemoryRegions); } if prev.last_addr() >= next.start_addr() { return Err(Error::MemoryRegionOverlap); } } Ok(Self { regions }) } /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`. /// /// # Arguments /// * `region`: the memory region to insert into the guest memory object. pub fn insert_region( &self, region: Arc>, ) -> result::Result, Error> { let mut regions = self.regions.clone(); regions.push(region); regions.sort_by_key(|x| x.start_addr()); Self::from_arc_regions(regions) } /// Remove a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap` /// on success, together with the removed region. /// /// # Arguments /// * `base`: base address of the region to be removed /// * `size`: size of the region to be removed pub fn remove_region( &self, base: GuestAddress, size: GuestUsize, ) -> result::Result<(GuestMemoryMmap, Arc>), Error> { if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) { if self.regions.get(region_index).unwrap().mapping.size() as GuestUsize == size { let mut regions = self.regions.clone(); let region = regions.remove(region_index); return Ok((Self { regions }, region)); } } Err(Error::InvalidGuestRegion) } } /// An iterator over the elements of `GuestMemoryMmap`. /// /// This struct is created by `GuestMemory::iter()`. See its documentation for more. #[derive(Debug)] pub struct Iter<'a, B>(std::slice::Iter<'a, Arc>>); impl<'a, B> Iterator for Iter<'a, B> { type Item = &'a GuestRegionMmap; fn next(&mut self) -> Option { self.0.next().map(AsRef::as_ref) } } impl<'a, B: 'a> GuestMemoryIterator<'a, GuestRegionMmap> for GuestMemoryMmap { type Iter = Iter<'a, B>; } impl GuestMemory for GuestMemoryMmap { type R = GuestRegionMmap; type I = Self; fn num_regions(&self) -> usize { self.regions.len() } fn find_region(&self, addr: GuestAddress) -> Option<&GuestRegionMmap> { let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) { Ok(x) => Some(x), // Within the closest region with starting address < addr Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1), _ => None, }; index.map(|x| self.regions[x].as_ref()) } fn iter(&self) -> Iter { Iter(self.regions.iter()) } } #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] extern crate vmm_sys_util; use super::*; use crate::bitmap::tests::test_guest_memory_and_region; use crate::bitmap::AtomicBitmap; use crate::GuestAddressSpace; use std::fs::File; use std::mem; use std::path::Path; use vmm_sys_util::tempfile::TempFile; type GuestMemoryMmap = super::GuestMemoryMmap<()>; type GuestRegionMmap = super::GuestRegionMmap<()>; type MmapRegion = super::MmapRegion<()>; #[test] fn basic_map() { let m = MmapRegion::new(1024).unwrap(); assert_eq!(1024, m.size()); } fn check_guest_memory_mmap( maybe_guest_mem: Result, expected_regions_summary: &[(GuestAddress, usize)], ) { assert!(maybe_guest_mem.is_ok()); let guest_mem = maybe_guest_mem.unwrap(); assert_eq!(guest_mem.num_regions(), expected_regions_summary.len()); let maybe_last_mem_reg = expected_regions_summary.last(); if let Some((region_addr, region_size)) = maybe_last_mem_reg { let mut last_addr = region_addr.unchecked_add(*region_size as u64); if last_addr.raw_value() != 0 { last_addr = last_addr.unchecked_sub(1); } assert_eq!(guest_mem.last_addr(), last_addr); } for ((region_addr, region_size), mmap) in expected_regions_summary .iter() .zip(guest_mem.regions.iter()) { assert_eq!(region_addr, &mmap.guest_base); assert_eq!(region_size, &mmap.mapping.size()); assert!(guest_mem.find_region(*region_addr).is_some()); } } fn new_guest_memory_mmap( regions_summary: &[(GuestAddress, usize)], ) -> Result { GuestMemoryMmap::from_ranges(regions_summary) } fn new_guest_memory_mmap_from_regions( regions_summary: &[(GuestAddress, usize)], ) -> Result { GuestMemoryMmap::from_regions( regions_summary .iter() .map(|(region_addr, region_size)| { GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap() }) .collect(), ) } fn new_guest_memory_mmap_from_arc_regions( regions_summary: &[(GuestAddress, usize)], ) -> Result { GuestMemoryMmap::from_arc_regions( regions_summary .iter() .map(|(region_addr, region_size)| { Arc::new(GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap()) }) .collect(), ) } fn new_guest_memory_mmap_with_files( regions_summary: &[(GuestAddress, usize)], ) -> Result { let regions: Vec<(GuestAddress, usize, Option)> = regions_summary .iter() .map(|(region_addr, region_size)| { let f = TempFile::new().unwrap().into_file(); f.set_len(*region_size as u64).unwrap(); (*region_addr, *region_size, Some(FileOffset::new(f, 0))) }) .collect(); GuestMemoryMmap::from_ranges_with_files(®ions) } #[test] fn test_no_memory_region() { let regions_summary = []; assert_eq!( format!( "{:?}", new_guest_memory_mmap(®ions_summary).err().unwrap() ), format!("{:?}", Error::NoMemoryRegion) ); assert_eq!( format!( "{:?}", new_guest_memory_mmap_with_files(®ions_summary) .err() .unwrap() ), format!("{:?}", Error::NoMemoryRegion) ); assert_eq!( format!( "{:?}", new_guest_memory_mmap_from_regions(®ions_summary) .err() .unwrap() ), format!("{:?}", Error::NoMemoryRegion) ); assert_eq!( format!( "{:?}", new_guest_memory_mmap_from_arc_regions(®ions_summary) .err() .unwrap() ), format!("{:?}", Error::NoMemoryRegion) ); } #[test] fn test_overlapping_memory_regions() { let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(99), 100_usize)]; assert_eq!( format!( "{:?}", new_guest_memory_mmap(®ions_summary).err().unwrap() ), format!("{:?}", Error::MemoryRegionOverlap) ); assert_eq!( format!( "{:?}", new_guest_memory_mmap_with_files(®ions_summary) .err() .unwrap() ), format!("{:?}", Error::MemoryRegionOverlap) ); assert_eq!( format!( "{:?}", new_guest_memory_mmap_from_regions(®ions_summary) .err() .unwrap() ), format!("{:?}", Error::MemoryRegionOverlap) ); assert_eq!( format!( "{:?}", new_guest_memory_mmap_from_arc_regions(®ions_summary) .err() .unwrap() ), format!("{:?}", Error::MemoryRegionOverlap) ); } #[test] fn test_unsorted_memory_regions() { let regions_summary = [(GuestAddress(100), 100_usize), (GuestAddress(0), 100_usize)]; assert_eq!( format!( "{:?}", new_guest_memory_mmap(®ions_summary).err().unwrap() ), format!("{:?}", Error::UnsortedMemoryRegions) ); assert_eq!( format!( "{:?}", new_guest_memory_mmap_with_files(®ions_summary) .err() .unwrap() ), format!("{:?}", Error::UnsortedMemoryRegions) ); assert_eq!( format!( "{:?}", new_guest_memory_mmap_from_regions(®ions_summary) .err() .unwrap() ), format!("{:?}", Error::UnsortedMemoryRegions) ); assert_eq!( format!( "{:?}", new_guest_memory_mmap_from_arc_regions(®ions_summary) .err() .unwrap() ), format!("{:?}", Error::UnsortedMemoryRegions) ); } #[test] fn test_valid_memory_regions() { let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(100), 100_usize)]; let guest_mem = GuestMemoryMmap::new(); assert_eq!(guest_mem.regions.len(), 0); check_guest_memory_mmap(new_guest_memory_mmap(®ions_summary), ®ions_summary); check_guest_memory_mmap( new_guest_memory_mmap_with_files(®ions_summary), ®ions_summary, ); check_guest_memory_mmap( new_guest_memory_mmap_from_regions(®ions_summary), ®ions_summary, ); check_guest_memory_mmap( new_guest_memory_mmap_from_arc_regions(®ions_summary), ®ions_summary, ); } #[test] fn slice_addr() { let m = GuestRegionMmap::from_range(GuestAddress(0), 5, None).unwrap(); let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap(); let guard = s.ptr_guard(); assert_eq!(guard.as_ptr(), unsafe { m.as_ptr().offset(2) }); } #[test] #[cfg(not(miri))] // Miri cannot mmap files fn mapped_file_read() { let mut f = TempFile::new().unwrap().into_file(); let sample_buf = &[1, 2, 3, 4, 5]; assert!(f.write_all(sample_buf).is_ok()); let file = Some(FileOffset::new(f, 0)); let mem_map = GuestRegionMmap::from_range(GuestAddress(0), sample_buf.len(), file).unwrap(); let buf = &mut [0u8; 16]; assert_eq!( mem_map.as_volatile_slice().unwrap().read(buf, 0).unwrap(), sample_buf.len() ); assert_eq!(buf[0..sample_buf.len()], sample_buf[..]); } #[test] fn test_address_in_range() { let f1 = TempFile::new().unwrap().into_file(); f1.set_len(0x400).unwrap(); let f2 = TempFile::new().unwrap().into_file(); f2.set_len(0x400).unwrap(); let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), ]) .unwrap(); let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; for guest_mem in guest_mem_list.iter() { assert!(guest_mem.address_in_range(GuestAddress(0x200))); assert!(!guest_mem.address_in_range(GuestAddress(0x600))); assert!(guest_mem.address_in_range(GuestAddress(0xa00))); assert!(!guest_mem.address_in_range(GuestAddress(0xc00))); } } #[test] fn test_check_address() { let f1 = TempFile::new().unwrap().into_file(); f1.set_len(0x400).unwrap(); let f2 = TempFile::new().unwrap().into_file(); f2.set_len(0x400).unwrap(); let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), ]) .unwrap(); let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; for guest_mem in guest_mem_list.iter() { assert_eq!( guest_mem.check_address(GuestAddress(0x200)), Some(GuestAddress(0x200)) ); assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None); assert_eq!( guest_mem.check_address(GuestAddress(0xa00)), Some(GuestAddress(0xa00)) ); assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None); } } #[test] fn test_to_region_addr() { let f1 = TempFile::new().unwrap().into_file(); f1.set_len(0x400).unwrap(); let f2 = TempFile::new().unwrap().into_file(); f2.set_len(0x400).unwrap(); let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), ]) .unwrap(); let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; for guest_mem in guest_mem_list.iter() { assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none()); let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap(); let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap(); assert!(r0.as_ptr() == r1.as_ptr()); assert_eq!(addr0, MemoryRegionAddress(0)); assert_eq!(addr1, MemoryRegionAddress(0x200)); } } #[test] fn test_get_host_address() { let f1 = TempFile::new().unwrap().into_file(); f1.set_len(0x400).unwrap(); let f2 = TempFile::new().unwrap().into_file(); f2.set_len(0x400).unwrap(); let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), ]) .unwrap(); let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; for guest_mem in guest_mem_list.iter() { assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err()); let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap(); let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap(); assert_eq!( ptr0, guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr() ); assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1); } } #[test] fn test_deref() { let f = TempFile::new().unwrap().into_file(); f.set_len(0x400).unwrap(); let start_addr = GuestAddress(0x0); let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[( start_addr, 0x400, Some(FileOffset::new(f, 0)), )]) .unwrap(); let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file]; for guest_mem in guest_mem_list.iter() { let sample_buf = &[1, 2, 3, 4, 5]; assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5); let slice = guest_mem .find_region(GuestAddress(0)) .unwrap() .as_volatile_slice() .unwrap(); let buf = &mut [0, 0, 0, 0, 0]; assert_eq!(slice.read(buf, 0).unwrap(), 5); assert_eq!(buf, sample_buf); } } #[test] fn test_read_u64() { let f1 = TempFile::new().unwrap().into_file(); f1.set_len(0x1000).unwrap(); let f2 = TempFile::new().unwrap().into_file(); f2.set_len(0x1000).unwrap(); let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); let bad_addr = GuestAddress(0x2001); let bad_addr2 = GuestAddress(0x1ffc); let max_addr = GuestAddress(0x2000); let gm = GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap(); let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))), (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))), ]) .unwrap(); let gm_list = vec![gm, gm_backed_by_file]; for gm in gm_list.iter() { let val1: u64 = 0xaa55_aa55_aa55_aa55; let val2: u64 = 0x55aa_55aa_55aa_55aa; assert_eq!( format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()), format!("InvalidGuestAddress({:?})", bad_addr,) ); assert_eq!( format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()), format!( "PartialBuffer {{ expected: {:?}, completed: {:?} }}", mem::size_of::(), max_addr.checked_offset_from(bad_addr2).unwrap() ) ); gm.write_obj(val1, GuestAddress(0x500)).unwrap(); gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap(); let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap(); let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap(); assert_eq!(val1, num1); assert_eq!(val2, num2); } } #[test] fn write_and_read() { let f = TempFile::new().unwrap().into_file(); f.set_len(0x400).unwrap(); let mut start_addr = GuestAddress(0x1000); let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[( start_addr, 0x400, Some(FileOffset::new(f, 0)), )]) .unwrap(); let gm_list = vec![gm, gm_backed_by_file]; for gm in gm_list.iter() { let sample_buf = &[1, 2, 3, 4, 5]; assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5); let buf = &mut [0u8; 5]; assert_eq!(gm.read(buf, start_addr).unwrap(), 5); assert_eq!(buf, sample_buf); start_addr = GuestAddress(0x13ff); assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1); assert_eq!(gm.read(buf, start_addr).unwrap(), 1); assert_eq!(buf[0], sample_buf[0]); start_addr = GuestAddress(0x1000); } } #[test] fn read_to_and_write_from_mem() { let f = TempFile::new().unwrap().into_file(); f.set_len(0x400).unwrap(); let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x1000), 0x400)]).unwrap(); let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[( GuestAddress(0x1000), 0x400, Some(FileOffset::new(f, 0)), )]) .unwrap(); let gm_list = vec![gm, gm_backed_by_file]; for gm in gm_list.iter() { let addr = GuestAddress(0x1010); let mut file = if cfg!(unix) { File::open(Path::new("/dev/zero")).unwrap() } else { File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap() }; gm.write_obj(!0u32, addr).unwrap(); gm.read_exact_volatile_from(addr, &mut file, mem::size_of::()) .unwrap(); let value: u32 = gm.read_obj(addr).unwrap(); if cfg!(unix) { assert_eq!(value, 0); } else { assert_eq!(value, 0x0090_5a4d); } let mut sink = vec![0; mem::size_of::()]; gm.write_all_volatile_to(addr, &mut sink.as_mut_slice(), mem::size_of::()) .unwrap(); if cfg!(unix) { assert_eq!(sink, vec![0; mem::size_of::()]); } else { assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]); }; } } #[test] fn create_vec_with_regions() { let region_size = 0x400; let regions = vec![ (GuestAddress(0x0), region_size), (GuestAddress(0x1000), region_size), ]; let mut iterated_regions = Vec::new(); let gm = GuestMemoryMmap::from_ranges(®ions).unwrap(); for region in gm.iter() { assert_eq!(region.len(), region_size as GuestUsize); } for region in gm.iter() { iterated_regions.push((region.start_addr(), region.len() as usize)); } assert_eq!(regions, iterated_regions); assert!(regions .iter() .map(|x| (x.0, x.1)) .eq(iterated_regions.iter().copied())); assert_eq!(gm.regions[0].guest_base, regions[0].0); assert_eq!(gm.regions[1].guest_base, regions[1].0); } #[test] fn test_memory() { let region_size = 0x400; let regions = vec![ (GuestAddress(0x0), region_size), (GuestAddress(0x1000), region_size), ]; let mut iterated_regions = Vec::new(); let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); let mem = gm.memory(); for region in mem.iter() { assert_eq!(region.len(), region_size as GuestUsize); } for region in mem.iter() { iterated_regions.push((region.start_addr(), region.len() as usize)); } assert_eq!(regions, iterated_regions); assert!(regions .iter() .map(|x| (x.0, x.1)) .eq(iterated_regions.iter().copied())); assert_eq!(gm.regions[0].guest_base, regions[0].0); assert_eq!(gm.regions[1].guest_base, regions[1].0); } #[test] fn test_access_cross_boundary() { let f1 = TempFile::new().unwrap().into_file(); f1.set_len(0x1000).unwrap(); let f2 = TempFile::new().unwrap().into_file(); f2.set_len(0x1000).unwrap(); let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); let gm = GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap(); let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))), (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))), ]) .unwrap(); let gm_list = vec![gm, gm_backed_by_file]; for gm in gm_list.iter() { let sample_buf = &[1, 2, 3, 4, 5]; assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5); let buf = &mut [0u8; 5]; assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5); assert_eq!(buf, sample_buf); } } #[test] fn test_retrieve_fd_backing_memory_region() { let f = TempFile::new().unwrap().into_file(); f.set_len(0x400).unwrap(); let start_addr = GuestAddress(0x0); let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); assert!(gm.find_region(start_addr).is_some()); let region = gm.find_region(start_addr).unwrap(); assert!(region.file_offset().is_none()); let gm = GuestMemoryMmap::from_ranges_with_files(&[( start_addr, 0x400, Some(FileOffset::new(f, 0)), )]) .unwrap(); assert!(gm.find_region(start_addr).is_some()); let region = gm.find_region(start_addr).unwrap(); assert!(region.file_offset().is_some()); } // Windows needs a dedicated test where it will retrieve the allocation // granularity to determine a proper offset (other than 0) that can be // used for the backing file. Refer to Microsoft docs here: // https://docs.microsoft.com/en-us/windows/desktop/api/memoryapi/nf-memoryapi-mapviewoffile #[test] #[cfg(unix)] fn test_retrieve_offset_from_fd_backing_memory_region() { let f = TempFile::new().unwrap().into_file(); f.set_len(0x1400).unwrap(); // Needs to be aligned on 4k, otherwise mmap will fail. let offset = 0x1000; let start_addr = GuestAddress(0x0); let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); assert!(gm.find_region(start_addr).is_some()); let region = gm.find_region(start_addr).unwrap(); assert!(region.file_offset().is_none()); let gm = GuestMemoryMmap::from_ranges_with_files(&[( start_addr, 0x400, Some(FileOffset::new(f, offset)), )]) .unwrap(); assert!(gm.find_region(start_addr).is_some()); let region = gm.find_region(start_addr).unwrap(); assert!(region.file_offset().is_some()); assert_eq!(region.file_offset().unwrap().start(), offset); } #[test] fn test_mmap_insert_region() { let region_size = 0x1000; let regions = vec![ (GuestAddress(0x0), region_size), (GuestAddress(0x10_0000), region_size), ]; let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); let mem_orig = gm.memory(); assert_eq!(mem_orig.num_regions(), 2); let mmap = Arc::new(GuestRegionMmap::from_range(GuestAddress(0x8000), 0x1000, None).unwrap()); let gm = gm.insert_region(mmap).unwrap(); let mmap = Arc::new(GuestRegionMmap::from_range(GuestAddress(0x4000), 0x1000, None).unwrap()); let gm = gm.insert_region(mmap).unwrap(); let mmap = Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap()); let gm = gm.insert_region(mmap).unwrap(); let mmap = Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap()); gm.insert_region(mmap).unwrap_err(); assert_eq!(mem_orig.num_regions(), 2); assert_eq!(gm.num_regions(), 5); assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000)); assert_eq!(gm.regions[1].start_addr(), GuestAddress(0x4000)); assert_eq!(gm.regions[2].start_addr(), GuestAddress(0x8000)); assert_eq!(gm.regions[3].start_addr(), GuestAddress(0xc000)); assert_eq!(gm.regions[4].start_addr(), GuestAddress(0x10_0000)); } #[test] fn test_mmap_remove_region() { let region_size = 0x1000; let regions = vec![ (GuestAddress(0x0), region_size), (GuestAddress(0x10_0000), region_size), ]; let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); let mem_orig = gm.memory(); assert_eq!(mem_orig.num_regions(), 2); gm.remove_region(GuestAddress(0), 128).unwrap_err(); gm.remove_region(GuestAddress(0x4000), 128).unwrap_err(); let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap(); assert_eq!(mem_orig.num_regions(), 2); assert_eq!(gm.num_regions(), 1); assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000)); assert_eq!(region.start_addr(), GuestAddress(0x10_0000)); } #[test] fn test_guest_memory_mmap_get_slice() { let region = GuestRegionMmap::from_range(GuestAddress(0), 0x400, None).unwrap(); // Normal case. let slice_addr = MemoryRegionAddress(0x100); let slice_size = 0x200; let slice = region.get_slice(slice_addr, slice_size).unwrap(); assert_eq!(slice.len(), slice_size); // Empty slice. let slice_addr = MemoryRegionAddress(0x200); let slice_size = 0x0; let slice = region.get_slice(slice_addr, slice_size).unwrap(); assert!(slice.is_empty()); // Error case when slice_size is beyond the boundary. let slice_addr = MemoryRegionAddress(0x300); let slice_size = 0x200; assert!(region.get_slice(slice_addr, slice_size).is_err()); } #[test] fn test_guest_memory_mmap_as_volatile_slice() { let region_size = 0x400; let region = GuestRegionMmap::from_range(GuestAddress(0), region_size, None).unwrap(); // Test slice length. let slice = region.as_volatile_slice().unwrap(); assert_eq!(slice.len(), region_size); // Test slice data. let v = 0x1234_5678u32; let r = slice.get_ref::(0x200).unwrap(); r.store(v); assert_eq!(r.load(), v); } #[test] fn test_guest_memory_get_slice() { let start_addr1 = GuestAddress(0); let start_addr2 = GuestAddress(0x800); let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); // Normal cases. let slice_size = 0x200; let slice = guest_mem .get_slice(GuestAddress(0x100), slice_size) .unwrap(); assert_eq!(slice.len(), slice_size); let slice_size = 0x400; let slice = guest_mem .get_slice(GuestAddress(0x800), slice_size) .unwrap(); assert_eq!(slice.len(), slice_size); // Empty slice. assert!(guest_mem .get_slice(GuestAddress(0x900), 0) .unwrap() .is_empty()); // Error cases, wrong size or base address. assert!(guest_mem.get_slice(GuestAddress(0), 0x500).is_err()); assert!(guest_mem.get_slice(GuestAddress(0x600), 0x100).is_err()); assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err()); } #[test] fn test_checked_offset() { let start_addr1 = GuestAddress(0); let start_addr2 = GuestAddress(0x800); let start_addr3 = GuestAddress(0xc00); let guest_mem = GuestMemoryMmap::from_ranges(&[ (start_addr1, 0x400), (start_addr2, 0x400), (start_addr3, 0x400), ]) .unwrap(); assert_eq!( guest_mem.checked_offset(start_addr1, 0x200), Some(GuestAddress(0x200)) ); assert_eq!( guest_mem.checked_offset(start_addr1, 0xa00), Some(GuestAddress(0xa00)) ); assert_eq!( guest_mem.checked_offset(start_addr2, 0x7ff), Some(GuestAddress(0xfff)) ); assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None); assert_eq!(guest_mem.checked_offset(start_addr1, std::usize::MAX), None); assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None); assert_eq!( guest_mem.checked_offset(start_addr1, 0x400 - 1), Some(GuestAddress(0x400 - 1)) ); } #[test] fn test_check_range() { let start_addr1 = GuestAddress(0); let start_addr2 = GuestAddress(0x800); let start_addr3 = GuestAddress(0xc00); let guest_mem = GuestMemoryMmap::from_ranges(&[ (start_addr1, 0x400), (start_addr2, 0x400), (start_addr3, 0x400), ]) .unwrap(); assert!(guest_mem.check_range(start_addr1, 0x0)); assert!(guest_mem.check_range(start_addr1, 0x200)); assert!(guest_mem.check_range(start_addr1, 0x400)); assert!(!guest_mem.check_range(start_addr1, 0xa00)); assert!(guest_mem.check_range(start_addr2, 0x7ff)); assert!(guest_mem.check_range(start_addr2, 0x800)); assert!(!guest_mem.check_range(start_addr2, 0x801)); assert!(!guest_mem.check_range(start_addr2, 0xc00)); assert!(!guest_mem.check_range(start_addr1, std::usize::MAX)); } #[test] fn test_atomic_accesses() { let region = GuestRegionMmap::from_range(GuestAddress(0), 0x1000, None).unwrap(); crate::bytes::tests::check_atomic_accesses( region, MemoryRegionAddress(0), MemoryRegionAddress(0x1000), ); } #[test] fn test_dirty_tracking() { test_guest_memory_and_region(|| { crate::GuestMemoryMmap::::from_ranges(&[(GuestAddress(0), 0x1_0000)]) .unwrap() }); } } vm-memory-0.14.0/src/mmap_unix.rs000064400000000000000000000560021046102023000147730ustar 00000000000000// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. // // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD-3-Clause file. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! Helper structure for working with mmaped memory regions in Unix. use std::io; use std::os::unix::io::AsRawFd; use std::ptr::null_mut; use std::result; use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::FileOffset; use crate::mmap::{check_file_offset, NewBitmap}; use crate::volatile_memory::{self, VolatileMemory, VolatileSlice}; /// Error conditions that may arise when creating a new `MmapRegion` object. #[derive(Debug, thiserror::Error)] pub enum Error { /// The specified file offset and length cause overflow when added. #[error("The specified file offset and length cause overflow when added")] InvalidOffsetLength, /// The specified pointer to the mapping is not page-aligned. #[error("The specified pointer to the mapping is not page-aligned")] InvalidPointer, /// The forbidden `MAP_FIXED` flag was specified. #[error("The forbidden `MAP_FIXED` flag was specified")] MapFixed, /// Mappings using the same fd overlap in terms of file offset and length. #[error("Mappings using the same fd overlap in terms of file offset and length")] MappingOverlap, /// A mapping with offset + length > EOF was attempted. #[error("The specified file offset and length is greater then file length")] MappingPastEof, /// The `mmap` call returned an error. #[error("{0}")] Mmap(io::Error), /// Seeking the end of the file returned an error. #[error("Error seeking the end of the file: {0}")] SeekEnd(io::Error), /// Seeking the start of the file returned an error. #[error("Error seeking the start of the file: {0}")] SeekStart(io::Error), } pub type Result = result::Result; /// A factory struct to build `MmapRegion` objects. #[derive(Debug)] pub struct MmapRegionBuilder { size: usize, prot: i32, flags: i32, file_offset: Option, raw_ptr: Option<*mut u8>, hugetlbfs: Option, bitmap: B, } impl MmapRegionBuilder { /// Create a new `MmapRegionBuilder` using the default value for /// the inner `Bitmap` object. pub fn new(size: usize) -> Self { Self::new_with_bitmap(size, B::default()) } } impl MmapRegionBuilder { /// Create a new `MmapRegionBuilder` using the provided `Bitmap` object. /// /// When instantiating the builder for a region that does not require dirty bitmap /// bitmap tracking functionality, we can specify a trivial `Bitmap` implementation /// such as `()`. pub fn new_with_bitmap(size: usize, bitmap: B) -> Self { MmapRegionBuilder { size, prot: 0, flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE, file_offset: None, raw_ptr: None, hugetlbfs: None, bitmap, } } /// Create the `MmapRegion` object with the specified mmap memory protection flag `prot`. pub fn with_mmap_prot(mut self, prot: i32) -> Self { self.prot = prot; self } /// Create the `MmapRegion` object with the specified mmap `flags`. pub fn with_mmap_flags(mut self, flags: i32) -> Self { self.flags = flags; self } /// Create the `MmapRegion` object with the specified `file_offset`. pub fn with_file_offset(mut self, file_offset: FileOffset) -> Self { self.file_offset = Some(file_offset); self } /// Create the `MmapRegion` object with the specified `hugetlbfs` flag. pub fn with_hugetlbfs(mut self, hugetlbfs: bool) -> Self { self.hugetlbfs = Some(hugetlbfs); self } /// Create the `MmapRegion` object with pre-mmapped raw pointer. /// /// # Safety /// /// To use this safely, the caller must guarantee that `raw_addr` and `self.size` define a /// region within a valid mapping that is already present in the process. pub unsafe fn with_raw_mmap_pointer(mut self, raw_ptr: *mut u8) -> Self { self.raw_ptr = Some(raw_ptr); self } /// Build the `MmapRegion` object. pub fn build(self) -> Result> { if self.raw_ptr.is_some() { return self.build_raw(); } // Forbid MAP_FIXED, as it doesn't make sense in this context, and is pretty dangerous // in general. if self.flags & libc::MAP_FIXED != 0 { return Err(Error::MapFixed); } let (fd, offset) = if let Some(ref f_off) = self.file_offset { check_file_offset(f_off, self.size)?; (f_off.file().as_raw_fd(), f_off.start()) } else { (-1, 0) }; #[cfg(not(miri))] // SAFETY: This is safe because we're not allowing MAP_FIXED, and invalid parameters // cannot break Rust safety guarantees (things may change if we're mapping /dev/mem or // some wacky file). let addr = unsafe { libc::mmap( null_mut(), self.size, self.prot, self.flags, fd, offset as libc::off_t, ) }; #[cfg(not(miri))] if addr == libc::MAP_FAILED { return Err(Error::Mmap(io::Error::last_os_error())); } #[cfg(miri)] if self.size == 0 { return Err(Error::Mmap(io::Error::from_raw_os_error(libc::EINVAL))); } // Miri does not support the mmap syscall, so we use rust's allocator for miri tests #[cfg(miri)] let addr = unsafe { std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align(self.size, 8).unwrap()) }; Ok(MmapRegion { addr: addr as *mut u8, size: self.size, bitmap: self.bitmap, file_offset: self.file_offset, prot: self.prot, flags: self.flags, owned: true, hugetlbfs: self.hugetlbfs, }) } fn build_raw(self) -> Result> { // SAFETY: Safe because this call just returns the page size and doesn't have any side // effects. let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) } as usize; let addr = self.raw_ptr.unwrap(); // Check that the pointer to the mapping is page-aligned. if (addr as usize) & (page_size - 1) != 0 { return Err(Error::InvalidPointer); } Ok(MmapRegion { addr, size: self.size, bitmap: self.bitmap, file_offset: self.file_offset, prot: self.prot, flags: self.flags, owned: false, hugetlbfs: self.hugetlbfs, }) } } /// Helper structure for working with mmaped memory regions in Unix. /// /// The structure is used for accessing the guest's physical memory by mmapping it into /// the current process. /// /// # Limitations /// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's /// physical memory may be mapped into the current process due to the limited virtual address /// space size of the process. #[derive(Debug)] pub struct MmapRegion { addr: *mut u8, size: usize, bitmap: B, file_offset: Option, prot: i32, flags: i32, owned: bool, hugetlbfs: Option, } // SAFETY: Send and Sync aren't automatically inherited for the raw address pointer. // Accessing that pointer is only done through the stateless interface which // allows the object to be shared by multiple threads without a decrease in // safety. unsafe impl Send for MmapRegion {} // SAFETY: See comment above. unsafe impl Sync for MmapRegion {} impl MmapRegion { /// Creates a shared anonymous mapping of `size` bytes. /// /// # Arguments /// * `size` - The size of the memory region in bytes. pub fn new(size: usize) -> Result { MmapRegionBuilder::new_with_bitmap(size, B::with_len(size)) .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE) .with_mmap_flags(libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE) .build() } /// Creates a shared file mapping of `size` bytes. /// /// # Arguments /// * `file_offset` - The mapping will be created at offset `file_offset.start` in the file /// referred to by `file_offset.file`. /// * `size` - The size of the memory region in bytes. pub fn from_file(file_offset: FileOffset, size: usize) -> Result { MmapRegionBuilder::new_with_bitmap(size, B::with_len(size)) .with_file_offset(file_offset) .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE) .with_mmap_flags(libc::MAP_NORESERVE | libc::MAP_SHARED) .build() } /// Creates a mapping based on the provided arguments. /// /// # Arguments /// * `file_offset` - if provided, the method will create a file mapping at offset /// `file_offset.start` in the file referred to by `file_offset.file`. /// * `size` - The size of the memory region in bytes. /// * `prot` - The desired memory protection of the mapping. /// * `flags` - This argument determines whether updates to the mapping are visible to other /// processes mapping the same region, and whether updates are carried through to /// the underlying file. pub fn build( file_offset: Option, size: usize, prot: i32, flags: i32, ) -> Result { let mut builder = MmapRegionBuilder::new_with_bitmap(size, B::with_len(size)) .with_mmap_prot(prot) .with_mmap_flags(flags); if let Some(v) = file_offset { builder = builder.with_file_offset(v); } builder.build() } /// Creates a `MmapRegion` instance for an externally managed mapping. /// /// This method is intended to be used exclusively in situations in which the mapping backing /// the region is provided by an entity outside the control of the caller (e.g. the dynamic /// linker). /// /// # Arguments /// * `addr` - Pointer to the start of the mapping. Must be page-aligned. /// * `size` - The size of the memory region in bytes. /// * `prot` - Must correspond to the memory protection attributes of the existing mapping. /// * `flags` - Must correspond to the flags that were passed to `mmap` for the creation of /// the existing mapping. /// /// # Safety /// /// To use this safely, the caller must guarantee that `addr` and `size` define a region within /// a valid mapping that is already present in the process. pub unsafe fn build_raw(addr: *mut u8, size: usize, prot: i32, flags: i32) -> Result { MmapRegionBuilder::new_with_bitmap(size, B::with_len(size)) .with_raw_mmap_pointer(addr) .with_mmap_prot(prot) .with_mmap_flags(flags) .build() } } impl MmapRegion { /// Returns a pointer to the beginning of the memory region. Mutable accesses performed /// using the resulting pointer are not automatically accounted for by the dirty bitmap /// tracking functionality. /// /// Should only be used for passing this region to ioctls for setting guest memory. pub fn as_ptr(&self) -> *mut u8 { self.addr } /// Returns the size of this region. pub fn size(&self) -> usize { self.size } /// Returns information regarding the offset into the file backing this region (if any). pub fn file_offset(&self) -> Option<&FileOffset> { self.file_offset.as_ref() } /// Returns the value of the `prot` parameter passed to `mmap` when mapping this region. pub fn prot(&self) -> i32 { self.prot } /// Returns the value of the `flags` parameter passed to `mmap` when mapping this region. pub fn flags(&self) -> i32 { self.flags } /// Returns `true` if the mapping is owned by this `MmapRegion` instance. pub fn owned(&self) -> bool { self.owned } /// Checks whether this region and `other` are backed by overlapping /// [`FileOffset`](struct.FileOffset.html) objects. /// /// This is mostly a sanity check available for convenience, as different file descriptors /// can alias the same file. pub fn fds_overlap(&self, other: &MmapRegion) -> bool { if let Some(f_off1) = self.file_offset() { if let Some(f_off2) = other.file_offset() { if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() { let s1 = f_off1.start(); let s2 = f_off2.start(); let l1 = self.len() as u64; let l2 = other.len() as u64; if s1 < s2 { return s1 + l1 > s2; } else { return s2 + l2 > s1; } } } } false } /// Set the hugetlbfs of the region pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) { self.hugetlbfs = Some(hugetlbfs) } /// Returns `true` if the region is hugetlbfs pub fn is_hugetlbfs(&self) -> Option { self.hugetlbfs } /// Returns a reference to the inner bitmap object. pub fn bitmap(&self) -> &B { &self.bitmap } } impl VolatileMemory for MmapRegion { type B = B; fn len(&self) -> usize { self.size } fn get_slice( &self, offset: usize, count: usize, ) -> volatile_memory::Result>> { let _ = self.compute_end_offset(offset, count)?; Ok( // SAFETY: Safe because we checked that offset + count was within our range and we only // ever hand out volatile accessors. unsafe { VolatileSlice::with_bitmap( self.addr.add(offset), count, self.bitmap.slice_at(offset), None, ) }, ) } } impl Drop for MmapRegion { fn drop(&mut self) { if self.owned { // SAFETY: This is safe because we mmap the area at addr ourselves, and nobody // else is holding a reference to it. unsafe { #[cfg(not(miri))] libc::munmap(self.addr as *mut libc::c_void, self.size); #[cfg(miri)] std::alloc::dealloc( self.addr, std::alloc::Layout::from_size_align(self.size, 8).unwrap(), ); } } } } #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] use super::*; use std::io::Write; use std::num::NonZeroUsize; use std::slice; use std::sync::Arc; use vmm_sys_util::tempfile::TempFile; use crate::bitmap::AtomicBitmap; type MmapRegion = super::MmapRegion<()>; // Adding a helper method to extract the errno within an Error::Mmap(e), or return a // distinctive value when the error is represented by another variant. impl Error { pub fn raw_os_error(&self) -> i32 { match self { Error::Mmap(e) => e.raw_os_error().unwrap(), _ => std::i32::MIN, } } } #[test] fn test_mmap_region_new() { assert!(MmapRegion::new(0).is_err()); let size = 4096; let r = MmapRegion::new(4096).unwrap(); assert_eq!(r.size(), size); assert!(r.file_offset().is_none()); assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); assert_eq!( r.flags(), libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE ); } #[test] fn test_mmap_region_set_hugetlbfs() { assert!(MmapRegion::new(0).is_err()); let size = 4096; let r = MmapRegion::new(size).unwrap(); assert_eq!(r.size(), size); assert!(r.file_offset().is_none()); assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); assert_eq!( r.flags(), libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE ); assert_eq!(r.is_hugetlbfs(), None); let mut r = MmapRegion::new(size).unwrap(); r.set_hugetlbfs(false); assert_eq!(r.size(), size); assert!(r.file_offset().is_none()); assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); assert_eq!( r.flags(), libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE ); assert_eq!(r.is_hugetlbfs(), Some(false)); let mut r = MmapRegion::new(size).unwrap(); r.set_hugetlbfs(true); assert_eq!(r.size(), size); assert!(r.file_offset().is_none()); assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); assert_eq!( r.flags(), libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE ); assert_eq!(r.is_hugetlbfs(), Some(true)); } #[test] #[cfg(not(miri))] // Miri cannot mmap files fn test_mmap_region_from_file() { let mut f = TempFile::new().unwrap().into_file(); let offset: usize = 0; let buf1 = [1u8, 2, 3, 4, 5]; f.write_all(buf1.as_ref()).unwrap(); let r = MmapRegion::from_file(FileOffset::new(f, offset as u64), buf1.len()).unwrap(); assert_eq!(r.size(), buf1.len() - offset); assert_eq!(r.file_offset().unwrap().start(), offset as u64); assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_SHARED); let buf2 = unsafe { slice::from_raw_parts(r.as_ptr(), buf1.len() - offset) }; assert_eq!(&buf1[offset..], buf2); } #[test] #[cfg(not(miri))] // Miri cannot mmap files fn test_mmap_region_build() { let a = Arc::new(TempFile::new().unwrap().into_file()); let prot = libc::PROT_READ | libc::PROT_WRITE; let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; let offset = 4096; let size = 1000; // Offset + size will overflow. let r = MmapRegion::build( Some(FileOffset::from_arc(a.clone(), std::u64::MAX)), size, prot, flags, ); assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); // Offset + size is greater than the size of the file (which is 0 at this point). let r = MmapRegion::build( Some(FileOffset::from_arc(a.clone(), offset)), size, prot, flags, ); assert_eq!(format!("{:?}", r.unwrap_err()), "MappingPastEof"); // MAP_FIXED was specified among the flags. let r = MmapRegion::build( Some(FileOffset::from_arc(a.clone(), offset)), size, prot, flags | libc::MAP_FIXED, ); assert_eq!(format!("{:?}", r.unwrap_err()), "MapFixed"); // Let's resize the file. assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0); // The offset is not properly aligned. let r = MmapRegion::build( Some(FileOffset::from_arc(a.clone(), offset - 1)), size, prot, flags, ); assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL); // The build should be successful now. let r = MmapRegion::build(Some(FileOffset::from_arc(a, offset)), size, prot, flags).unwrap(); assert_eq!(r.size(), size); assert_eq!(r.file_offset().unwrap().start(), offset); assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_PRIVATE); assert!(r.owned()); let region_size = 0x10_0000; let bitmap = AtomicBitmap::new(region_size, unsafe { NonZeroUsize::new_unchecked(0x1000) }); let builder = MmapRegionBuilder::new_with_bitmap(region_size, bitmap) .with_hugetlbfs(true) .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE); assert_eq!(builder.size, region_size); assert_eq!(builder.hugetlbfs, Some(true)); assert_eq!(builder.prot, libc::PROT_READ | libc::PROT_WRITE); crate::bitmap::tests::test_volatile_memory(&(builder.build().unwrap())); } #[test] #[cfg(not(miri))] // Causes warnings due to the pointer casts fn test_mmap_region_build_raw() { let addr = 0; let size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }; let prot = libc::PROT_READ | libc::PROT_WRITE; let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; let r = unsafe { MmapRegion::build_raw((addr + 1) as *mut u8, size, prot, flags) }; assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidPointer"); let r = unsafe { MmapRegion::build_raw(addr as *mut u8, size, prot, flags).unwrap() }; assert_eq!(r.size(), size); assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_PRIVATE); assert!(!r.owned()); } #[test] #[cfg(not(miri))] // Miri cannot mmap files fn test_mmap_region_fds_overlap() { let a = Arc::new(TempFile::new().unwrap().into_file()); assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0); let r1 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 0), 4096).unwrap(); let r2 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 4096), 4096).unwrap(); assert!(!r1.fds_overlap(&r2)); let r1 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 0), 5000).unwrap(); assert!(r1.fds_overlap(&r2)); let r2 = MmapRegion::from_file(FileOffset::from_arc(a, 0), 1000).unwrap(); assert!(r1.fds_overlap(&r2)); // Different files, so there's not overlap. let new_file = TempFile::new().unwrap().into_file(); // Resize before mapping. assert_eq!( unsafe { libc::ftruncate(new_file.as_raw_fd(), 1024 * 10) }, 0 ); let r2 = MmapRegion::from_file(FileOffset::new(new_file, 0), 5000).unwrap(); assert!(!r1.fds_overlap(&r2)); // R2 is not file backed, so no overlap. let r2 = MmapRegion::new(5000).unwrap(); assert!(!r1.fds_overlap(&r2)); } #[test] fn test_dirty_tracking() { // Using the `crate` prefix because we aliased `MmapRegion` to `MmapRegion<()>` for // the rest of the unit tests above. let m = crate::MmapRegion::::new(0x1_0000).unwrap(); crate::bitmap::tests::test_volatile_memory(&m); } } vm-memory-0.14.0/src/mmap_windows.rs000064400000000000000000000210011046102023000154710ustar 00000000000000// Copyright (C) 2019 CrowdStrike, Inc. All rights reserved. // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! Helper structure for working with mmaped memory regions in Windows. use std; use std::io; use std::os::windows::io::{AsRawHandle, RawHandle}; use std::ptr::{null, null_mut}; use libc::{c_void, size_t}; use winapi::um::errhandlingapi::GetLastError; use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::FileOffset; use crate::mmap::NewBitmap; use crate::volatile_memory::{self, compute_offset, VolatileMemory, VolatileSlice}; #[allow(non_snake_case)] #[link(name = "kernel32")] extern "stdcall" { pub fn VirtualAlloc( lpAddress: *mut c_void, dwSize: size_t, flAllocationType: u32, flProtect: u32, ) -> *mut c_void; pub fn VirtualFree(lpAddress: *mut c_void, dwSize: size_t, dwFreeType: u32) -> u32; pub fn CreateFileMappingA( hFile: RawHandle, // HANDLE lpFileMappingAttributes: *const c_void, // LPSECURITY_ATTRIBUTES flProtect: u32, // DWORD dwMaximumSizeHigh: u32, // DWORD dwMaximumSizeLow: u32, // DWORD lpName: *const u8, // LPCSTR ) -> RawHandle; // HANDLE pub fn MapViewOfFile( hFileMappingObject: RawHandle, dwDesiredAccess: u32, dwFileOffsetHigh: u32, dwFileOffsetLow: u32, dwNumberOfBytesToMap: size_t, ) -> *mut c_void; pub fn CloseHandle(hObject: RawHandle) -> u32; // BOOL } const MM_HIGHEST_VAD_ADDRESS: u64 = 0x000007FFFFFDFFFF; const MEM_COMMIT: u32 = 0x00001000; const MEM_RELEASE: u32 = 0x00008000; const FILE_MAP_ALL_ACCESS: u32 = 0xf001f; const PAGE_READWRITE: u32 = 0x04; pub const MAP_FAILED: *mut c_void = 0 as *mut c_void; pub const INVALID_HANDLE_VALUE: RawHandle = (-1isize) as RawHandle; #[allow(dead_code)] pub const ERROR_INVALID_PARAMETER: i32 = 87; /// Helper structure for working with mmaped memory regions in Unix. /// /// The structure is used for accessing the guest's physical memory by mmapping it into /// the current process. /// /// # Limitations /// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's /// physical memory may be mapped into the current process due to the limited virtual address /// space size of the process. #[derive(Debug)] pub struct MmapRegion { addr: *mut u8, size: usize, bitmap: B, file_offset: Option, } // Send and Sync aren't automatically inherited for the raw address pointer. // Accessing that pointer is only done through the stateless interface which // allows the object to be shared by multiple threads without a decrease in // safety. unsafe impl Send for MmapRegion {} unsafe impl Sync for MmapRegion {} impl MmapRegion { /// Creates a shared anonymous mapping of `size` bytes. /// /// # Arguments /// * `size` - The size of the memory region in bytes. pub fn new(size: usize) -> io::Result { if (size == 0) || (size > MM_HIGHEST_VAD_ADDRESS as usize) { return Err(io::Error::from_raw_os_error(libc::EINVAL)); } // This is safe because we are creating an anonymous mapping in a place not already used by // any other area in this process. let addr = unsafe { VirtualAlloc(0 as *mut c_void, size, MEM_COMMIT, PAGE_READWRITE) }; if addr == MAP_FAILED { return Err(io::Error::last_os_error()); } Ok(Self { addr: addr as *mut u8, size, bitmap: B::with_len(size), file_offset: None, }) } /// Creates a shared file mapping of `size` bytes. /// /// # Arguments /// * `file_offset` - The mapping will be created at offset `file_offset.start` in the file /// referred to by `file_offset.file`. /// * `size` - The size of the memory region in bytes. pub fn from_file(file_offset: FileOffset, size: usize) -> io::Result { let handle = file_offset.file().as_raw_handle(); if handle == INVALID_HANDLE_VALUE { return Err(io::Error::from_raw_os_error(libc::EBADF)); } let mapping = unsafe { CreateFileMappingA( handle, null(), PAGE_READWRITE, (size >> 32) as u32, size as u32, null(), ) }; if mapping == 0 as RawHandle { return Err(io::Error::last_os_error()); } let offset = file_offset.start(); // This is safe because we are creating a mapping in a place not already used by any other // area in this process. let addr = unsafe { MapViewOfFile( mapping, FILE_MAP_ALL_ACCESS, (offset >> 32) as u32, offset as u32, size, ) }; unsafe { CloseHandle(mapping); } if addr == null_mut() { return Err(io::Error::last_os_error()); } Ok(Self { addr: addr as *mut u8, size, bitmap: B::with_len(size), file_offset: Some(file_offset), }) } } impl MmapRegion { /// Returns a pointer to the beginning of the memory region. Mutable accesses performed /// using the resulting pointer are not automatically accounted for by the dirty bitmap /// tracking functionality. /// /// Should only be used for passing this region to ioctls for setting guest memory. pub fn as_ptr(&self) -> *mut u8 { self.addr } /// Returns the size of this region. pub fn size(&self) -> usize { self.size } /// Returns information regarding the offset into the file backing this region (if any). pub fn file_offset(&self) -> Option<&FileOffset> { self.file_offset.as_ref() } /// Returns a reference to the inner bitmap object. pub fn bitmap(&self) -> &B { &self.bitmap } } impl VolatileMemory for MmapRegion { type B = B; fn len(&self) -> usize { self.size } fn get_slice( &self, offset: usize, count: usize, ) -> volatile_memory::Result>> { let end = compute_offset(offset, count)?; if end > self.size { return Err(volatile_memory::Error::OutOfBounds { addr: end }); } // Safe because we checked that offset + count was within our range and we only ever hand // out volatile accessors. Ok(unsafe { VolatileSlice::with_bitmap( self.addr.add(offset), count, self.bitmap.slice_at(offset), None, ) }) } } impl Drop for MmapRegion { fn drop(&mut self) { // This is safe because we mmap the area at addr ourselves, and nobody // else is holding a reference to it. // Note that the size must be set to 0 when using MEM_RELEASE, // otherwise the function fails. unsafe { let ret_val = VirtualFree(self.addr as *mut libc::c_void, 0, MEM_RELEASE); if ret_val == 0 { let err = GetLastError(); // We can't use any fancy logger here, yet we want to // pin point memory leaks. println!( "WARNING: Could not deallocate mmap region. \ Address: {:?}. Size: {}. Error: {}", self.addr, self.size, err ) } } } } #[cfg(test)] mod tests { use std::os::windows::io::FromRawHandle; use crate::bitmap::AtomicBitmap; use crate::guest_memory::FileOffset; use crate::mmap_windows::INVALID_HANDLE_VALUE; type MmapRegion = super::MmapRegion<()>; #[test] fn map_invalid_handle() { let file = unsafe { std::fs::File::from_raw_handle(INVALID_HANDLE_VALUE) }; let file_offset = FileOffset::new(file, 0); let e = MmapRegion::from_file(file_offset, 1024).unwrap_err(); assert_eq!(e.raw_os_error(), Some(libc::EBADF)); } #[test] fn test_dirty_tracking() { // Using the `crate` prefix because we aliased `MmapRegion` to `MmapRegion<()>` for // the rest of the unit tests above. let m = crate::MmapRegion::::new(0x1_0000).unwrap(); crate::bitmap::tests::test_volatile_memory(&m); } } vm-memory-0.14.0/src/mmap_xen.rs000064400000000000000000001071351046102023000146060ustar 00000000000000// Copyright 2023 Linaro Ltd. All Rights Reserved. // Viresh Kumar // // Xen specific memory mapping implementations // // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause //! Helper structure for working with mmap'ed memory regions on Xen. use bitflags::bitflags; use libc::{c_int, c_void, MAP_SHARED, _SC_PAGESIZE}; use std::{io, mem::size_of, os::raw::c_ulong, os::unix::io::AsRawFd, ptr::null_mut, result}; use vmm_sys_util::{ fam::{Error as FamError, FamStruct, FamStructWrapper}, generate_fam_struct_impl, ioctl::{ioctl_expr, _IOC_NONE}, }; // Use a dummy ioctl implementation for tests instead. #[cfg(not(test))] use vmm_sys_util::ioctl::ioctl_with_ref; #[cfg(test)] use tests::ioctl_with_ref; use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::{FileOffset, GuestAddress}; use crate::mmap::{check_file_offset, NewBitmap}; use crate::volatile_memory::{self, VolatileMemory, VolatileSlice}; /// Error conditions that may arise when creating a new `MmapRegion` object. #[derive(Debug, thiserror::Error)] pub enum Error { /// The specified file offset and length cause overflow when added. #[error("The specified file offset and length cause overflow when added")] InvalidOffsetLength, /// The forbidden `MAP_FIXED` flag was specified. #[error("The forbidden `MAP_FIXED` flag was specified")] MapFixed, /// A mapping with offset + length > EOF was attempted. #[error("The specified file offset and length is greater then file length")] MappingPastEof, /// The `mmap` call returned an error. #[error("{0}")] Mmap(io::Error), /// Seeking the end of the file returned an error. #[error("Error seeking the end of the file: {0}")] SeekEnd(io::Error), /// Seeking the start of the file returned an error. #[error("Error seeking the start of the file: {0}")] SeekStart(io::Error), /// Invalid file offset. #[error("Invalid file offset")] InvalidFileOffset, /// Memory mapped in advance. #[error("Memory mapped in advance")] MappedInAdvance, /// Invalid Xen mmap flags. #[error("Invalid Xen Mmap flags: {0:x}")] MmapFlags(u32), /// Fam error. #[error("Fam error: {0}")] Fam(FamError), /// Unexpected error. #[error("Unexpected error")] UnexpectedError, } type Result = result::Result; /// `MmapRange` represents a range of arguments required to create Mmap regions. #[derive(Clone, Debug)] pub struct MmapRange { size: usize, file_offset: Option, prot: Option, flags: Option, hugetlbfs: Option, addr: GuestAddress, mmap_flags: u32, mmap_data: u32, } impl MmapRange { /// Creates instance of the range with multiple arguments. pub fn new( size: usize, file_offset: Option, addr: GuestAddress, mmap_flags: u32, mmap_data: u32, ) -> Self { Self { size, file_offset, prot: None, flags: None, hugetlbfs: None, addr, mmap_flags, mmap_data, } } /// Creates instance of the range for `MmapXenFlags::UNIX` type mapping. pub fn new_unix(size: usize, file_offset: Option, addr: GuestAddress) -> Self { let flags = Some(match file_offset { Some(_) => libc::MAP_NORESERVE | libc::MAP_SHARED, None => libc::MAP_ANONYMOUS | libc::MAP_PRIVATE, }); Self { size, file_offset, prot: None, flags, hugetlbfs: None, addr, mmap_flags: MmapXenFlags::UNIX.bits(), mmap_data: 0, } } /// Set the prot of the range. pub fn set_prot(&mut self, prot: i32) { self.prot = Some(prot) } /// Set the flags of the range. pub fn set_flags(&mut self, flags: i32) { self.flags = Some(flags) } /// Set the hugetlbfs of the range. pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) { self.hugetlbfs = Some(hugetlbfs) } } /// Helper structure for working with mmaped memory regions with Xen. /// /// The structure is used for accessing the guest's physical memory by mmapping it into /// the current process. /// /// # Limitations /// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's /// physical memory may be mapped into the current process due to the limited virtual address /// space size of the process. #[derive(Debug)] pub struct MmapRegion { bitmap: B, size: usize, prot: i32, flags: i32, file_offset: Option, hugetlbfs: Option, mmap: MmapXen, } // SAFETY: Send and Sync aren't automatically inherited for the raw address pointer. // Accessing that pointer is only done through the stateless interface which // allows the object to be shared by multiple threads without a decrease in // safety. unsafe impl Send for MmapRegion {} // SAFETY: See comment above. unsafe impl Sync for MmapRegion {} impl MmapRegion { /// Creates a shared anonymous mapping of `size` bytes. /// /// # Arguments /// * `range` - An instance of type `MmapRange`. /// /// # Examples /// * Write a slice at guest address 0x1200 with Xen's Grant mapping. /// /// ``` /// use std::fs::File; /// use std::path::Path; /// use vm_memory::{ /// Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion, /// MmapXenFlags, /// }; /// # use vmm_sys_util::tempfile::TempFile; /// /// let addr = GuestAddress(0x1000); /// # if false { /// let file = Some(FileOffset::new( /// File::open(Path::new("/dev/xen/gntdev")).expect("Could not open file"), /// 0, /// )); /// /// let range = MmapRange::new(0x400, file, addr, MmapXenFlags::GRANT.bits(), 0); /// # } /// # // We need a UNIX mapping for tests to succeed. /// # let range = MmapRange::new_unix(0x400, None, addr); /// /// let r = GuestRegionMmap::new( /// MmapRegion::<()>::from_range(range).expect("Could not create mmap region"), /// addr, /// ) /// .expect("Could not create guest region"); /// /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory"); /// let res = gm /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) /// .expect("Could not write to guest memory"); /// assert_eq!(5, res); /// ``` /// /// * Write a slice at guest address 0x1200 with Xen's Foreign mapping. /// /// ``` /// use std::fs::File; /// use std::path::Path; /// use vm_memory::{ /// Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion, /// MmapXenFlags, /// }; /// # use vmm_sys_util::tempfile::TempFile; /// /// let addr = GuestAddress(0x1000); /// # if false { /// let file = Some(FileOffset::new( /// File::open(Path::new("/dev/xen/privcmd")).expect("Could not open file"), /// 0, /// )); /// /// let range = MmapRange::new(0x400, file, addr, MmapXenFlags::FOREIGN.bits(), 0); /// # } /// # // We need a UNIX mapping for tests to succeed. /// # let range = MmapRange::new_unix(0x400, None, addr); /// /// let r = GuestRegionMmap::new( /// MmapRegion::<()>::from_range(range).expect("Could not create mmap region"), /// addr, /// ) /// .expect("Could not create guest region"); /// /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory"); /// let res = gm /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) /// .expect("Could not write to guest memory"); /// assert_eq!(5, res); /// ``` pub fn from_range(mut range: MmapRange) -> Result { if range.prot.is_none() { range.prot = Some(libc::PROT_READ | libc::PROT_WRITE); } match range.flags { Some(flags) => { if flags & libc::MAP_FIXED != 0 { // Forbid MAP_FIXED, as it doesn't make sense in this context, and is pretty dangerous // in general. return Err(Error::MapFixed); } } None => range.flags = Some(libc::MAP_NORESERVE | libc::MAP_SHARED), } let mmap = MmapXen::new(&range)?; Ok(MmapRegion { bitmap: B::with_len(range.size), size: range.size, prot: range.prot.ok_or(Error::UnexpectedError)?, flags: range.flags.ok_or(Error::UnexpectedError)?, file_offset: range.file_offset, hugetlbfs: range.hugetlbfs, mmap, }) } } impl MmapRegion { /// Returns a pointer to the beginning of the memory region. Mutable accesses performed /// using the resulting pointer are not automatically accounted for by the dirty bitmap /// tracking functionality. /// /// Should only be used for passing this region to ioctls for setting guest memory. pub fn as_ptr(&self) -> *mut u8 { self.mmap.addr() } /// Returns the size of this region. pub fn size(&self) -> usize { self.size } /// Returns information regarding the offset into the file backing this region (if any). pub fn file_offset(&self) -> Option<&FileOffset> { self.file_offset.as_ref() } /// Returns the value of the `prot` parameter passed to `mmap` when mapping this region. pub fn prot(&self) -> i32 { self.prot } /// Returns the value of the `flags` parameter passed to `mmap` when mapping this region. pub fn flags(&self) -> i32 { self.flags } /// Checks whether this region and `other` are backed by overlapping /// [`FileOffset`](struct.FileOffset.html) objects. /// /// This is mostly a sanity check available for convenience, as different file descriptors /// can alias the same file. pub fn fds_overlap(&self, other: &MmapRegion) -> bool { if let Some(f_off1) = self.file_offset() { if let Some(f_off2) = other.file_offset() { if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() { let s1 = f_off1.start(); let s2 = f_off2.start(); let l1 = self.len() as u64; let l2 = other.len() as u64; if s1 < s2 { return s1 + l1 > s2; } else { return s2 + l2 > s1; } } } } false } /// Set the hugetlbfs of the region pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) { self.hugetlbfs = Some(hugetlbfs) } /// Returns `true` if the region is hugetlbfs pub fn is_hugetlbfs(&self) -> Option { self.hugetlbfs } /// Returns a reference to the inner bitmap object. pub fn bitmap(&self) -> &B { &self.bitmap } /// Returns xen mmap flags. pub fn xen_mmap_flags(&self) -> u32 { self.mmap.flags() } /// Returns xen mmap data. pub fn xen_mmap_data(&self) -> u32 { self.mmap.data() } } impl VolatileMemory for MmapRegion { type B = B; fn len(&self) -> usize { self.size } fn get_slice( &self, offset: usize, count: usize, ) -> volatile_memory::Result>> { let _ = self.compute_end_offset(offset, count)?; let mmap_info = if self.mmap.mmap_in_advance() { None } else { Some(&self.mmap) }; Ok( // SAFETY: Safe because we checked that offset + count was within our range and we only // ever hand out volatile accessors. unsafe { VolatileSlice::with_bitmap( self.as_ptr().add(offset), count, self.bitmap.slice_at(offset), mmap_info, ) }, ) } } #[derive(Clone, Debug, PartialEq)] struct MmapUnix { addr: *mut u8, size: usize, } impl MmapUnix { fn new(size: usize, prot: i32, flags: i32, fd: i32, f_offset: u64) -> Result { let addr = // SAFETY: This is safe because we're not allowing MAP_FIXED, and invalid parameters // cannot break Rust safety guarantees (things may change if we're mapping /dev/mem or // some wacky file). unsafe { libc::mmap(null_mut(), size, prot, flags, fd, f_offset as libc::off_t) }; if addr == libc::MAP_FAILED { return Err(Error::Mmap(io::Error::last_os_error())); } Ok(Self { addr: addr as *mut u8, size, }) } fn addr(&self) -> *mut u8 { self.addr } } impl Drop for MmapUnix { fn drop(&mut self) { // SAFETY: This is safe because we mmap the area at addr ourselves, and nobody // else is holding a reference to it. unsafe { libc::munmap(self.addr as *mut libc::c_void, self.size); } } } // Bit mask for the vhost-user xen mmap message. bitflags! { /// Flags for the Xen mmap message. #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct MmapXenFlags: u32 { /// Standard Unix memory mapping. const UNIX = 0x0; /// Xen foreign memory (accessed via /dev/privcmd). const FOREIGN = 0x1; /// Xen grant memory (accessed via /dev/gntdev). const GRANT = 0x2; /// Xen no advance mapping. const NO_ADVANCE_MAP = 0x8; /// All valid mappings. const ALL = Self::FOREIGN.bits() | Self::GRANT.bits(); } } impl MmapXenFlags { /// Mmap flags are valid. pub fn is_valid(&self) -> bool { // only one of unix, foreign or grant should be set and mmap_in_advance() should be true // with foreign and unix. if self.is_grant() { !self.is_foreign() } else if self.is_foreign() || self.is_unix() { self.mmap_in_advance() } else { false } } /// Is standard Unix memory. pub fn is_unix(&self) -> bool { self.bits() == Self::UNIX.bits() } /// Is xen foreign memory. pub fn is_foreign(&self) -> bool { self.contains(Self::FOREIGN) } /// Is xen grant memory. pub fn is_grant(&self) -> bool { self.contains(Self::GRANT) } /// Can mmap entire region in advance. pub fn mmap_in_advance(&self) -> bool { !self.contains(Self::NO_ADVANCE_MAP) } } fn page_size() -> u64 { // SAFETY: Safe because this call just returns the page size and doesn't have any side effects. unsafe { libc::sysconf(_SC_PAGESIZE) as u64 } } fn pages(size: usize) -> (usize, usize) { let page_size = page_size() as usize; let num = (size + page_size - 1) / page_size; (num, page_size * num) } fn validate_file(file_offset: &Option) -> Result<(i32, u64)> { let file_offset = match file_offset { Some(f) => f, None => return Err(Error::InvalidFileOffset), }; let fd = file_offset.file().as_raw_fd(); let f_offset = file_offset.start(); // We don't allow file offsets with Xen foreign mappings. if f_offset != 0 { return Err(Error::InvalidOffsetLength); } Ok((fd, f_offset)) } // Xen Foreign memory mapping interface. trait MmapXenTrait: std::fmt::Debug { fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result; fn addr(&self) -> *mut u8; } // Standard Unix memory mapping for testing other crates. #[derive(Clone, Debug, PartialEq)] struct MmapXenUnix(MmapUnix); impl MmapXenUnix { fn new(range: &MmapRange) -> Result { let (fd, offset) = if let Some(ref f_off) = range.file_offset { check_file_offset(f_off, range.size)?; (f_off.file().as_raw_fd(), f_off.start()) } else { (-1, 0) }; Ok(Self(MmapUnix::new( range.size, range.prot.ok_or(Error::UnexpectedError)?, range.flags.ok_or(Error::UnexpectedError)?, fd, offset, )?)) } } impl MmapXenTrait for MmapXenUnix { #[allow(unused_variables)] fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result { Err(Error::MappedInAdvance) } fn addr(&self) -> *mut u8 { self.0.addr() } } // Privcmd mmap batch v2 command // // include/uapi/xen/privcmd.h: `privcmd_mmapbatch_v2` #[repr(C)] #[derive(Debug, Copy, Clone)] struct PrivCmdMmapBatchV2 { // number of pages to populate num: u32, // target domain domid: u16, // virtual address addr: *mut c_void, // array of mfns arr: *const u64, // array of error codes err: *mut c_int, } const XEN_PRIVCMD_TYPE: u32 = 'P' as u32; // #define IOCTL_PRIVCMD_MMAPBATCH_V2 _IOC(_IOC_NONE, 'P', 4, sizeof(privcmd_mmapbatch_v2_t)) fn ioctl_privcmd_mmapbatch_v2() -> c_ulong { ioctl_expr( _IOC_NONE, XEN_PRIVCMD_TYPE, 4, size_of::() as u32, ) } // Xen foreign memory specific implementation. #[derive(Clone, Debug, PartialEq)] struct MmapXenForeign { domid: u32, guest_base: GuestAddress, unix_mmap: MmapUnix, fd: i32, } impl AsRawFd for MmapXenForeign { fn as_raw_fd(&self) -> i32 { self.fd } } impl MmapXenForeign { fn new(range: &MmapRange) -> Result { let (fd, f_offset) = validate_file(&range.file_offset)?; let (count, size) = pages(range.size); let unix_mmap = MmapUnix::new( size, range.prot.ok_or(Error::UnexpectedError)?, range.flags.ok_or(Error::UnexpectedError)? | MAP_SHARED, fd, f_offset, )?; let foreign = Self { domid: range.mmap_data, guest_base: range.addr, unix_mmap, fd, }; foreign.mmap_ioctl(count)?; Ok(foreign) } // Ioctl to pass additional information to mmap infrastructure of privcmd driver. fn mmap_ioctl(&self, count: usize) -> Result<()> { let base = self.guest_base.0 / page_size(); let mut pfn = Vec::with_capacity(count); for i in 0..count { pfn.push(base + i as u64); } let mut err: Vec = vec![0; count]; let map = PrivCmdMmapBatchV2 { num: count as u32, domid: self.domid as u16, addr: self.addr() as *mut c_void, arr: pfn.as_ptr(), err: err.as_mut_ptr(), }; // SAFETY: This is safe because the ioctl guarantees to not access memory beyond `map`. let ret = unsafe { ioctl_with_ref(self, ioctl_privcmd_mmapbatch_v2(), &map) }; if ret == 0 { Ok(()) } else { Err(Error::Mmap(io::Error::last_os_error())) } } } impl MmapXenTrait for MmapXenForeign { #[allow(unused_variables)] fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result { Err(Error::MappedInAdvance) } fn addr(&self) -> *mut u8 { self.unix_mmap.addr() } } // Xen Grant memory mapping interface. const XEN_GRANT_ADDR_OFF: u64 = 1 << 63; // Grant reference // // include/uapi/xen/gntdev.h: `ioctl_gntdev_grant_ref` #[repr(C)] #[derive(Copy, Clone, Debug, Default, PartialEq)] struct GntDevGrantRef { // The domain ID of the grant to be mapped. domid: u32, // The grant reference of the grant to be mapped. reference: u32, } #[repr(C)] #[derive(Debug, Default, PartialEq, Eq)] struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); impl __IncompleteArrayField { #[inline] unsafe fn as_ptr(&self) -> *const T { self as *const __IncompleteArrayField as *const T } #[inline] unsafe fn as_mut_ptr(&mut self) -> *mut T { self as *mut __IncompleteArrayField as *mut T } #[inline] unsafe fn as_slice(&self, len: usize) -> &[T] { ::std::slice::from_raw_parts(self.as_ptr(), len) } #[inline] unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) } } // Grant dev mapping reference // // include/uapi/xen/gntdev.h: `ioctl_gntdev_map_grant_ref` #[repr(C)] #[derive(Debug, Default)] struct GntDevMapGrantRef { // The number of grants to be mapped. count: u32, // Unused padding pad: u32, // The offset to be used on a subsequent call to mmap(). index: u64, // Array of grant references, of size @count. refs: __IncompleteArrayField, } generate_fam_struct_impl!( GntDevMapGrantRef, GntDevGrantRef, refs, u32, count, usize::MAX ); type GntDevMapGrantRefWrapper = FamStructWrapper; impl GntDevMapGrantRef { fn new(domid: u32, base: u32, count: usize) -> Result { let mut wrapper = GntDevMapGrantRefWrapper::new(count).map_err(Error::Fam)?; let refs = wrapper.as_mut_slice(); // GntDevMapGrantRef's pad and index are initialized to 0 by Fam layer. for (i, r) in refs.iter_mut().enumerate().take(count) { r.domid = domid; r.reference = base + i as u32; } Ok(wrapper) } } // Grant dev un-mapping reference // // include/uapi/xen/gntdev.h: `ioctl_gntdev_unmap_grant_ref` #[repr(C)] #[derive(Debug, Copy, Clone)] struct GntDevUnmapGrantRef { // The offset returned by the map operation. index: u64, // The number of grants to be unmapped. count: u32, // Unused padding pad: u32, } impl GntDevUnmapGrantRef { fn new(index: u64, count: u32) -> Self { Self { index, count, pad: 0, } } } const XEN_GNTDEV_TYPE: u32 = 'G' as u32; // #define IOCTL_GNTDEV_MAP_GRANT_REF _IOC(_IOC_NONE, 'G', 0, sizeof(ioctl_gntdev_map_grant_ref)) fn ioctl_gntdev_map_grant_ref() -> c_ulong { ioctl_expr( _IOC_NONE, XEN_GNTDEV_TYPE, 0, (size_of::() + size_of::()) as u32, ) } // #define IOCTL_GNTDEV_UNMAP_GRANT_REF _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) fn ioctl_gntdev_unmap_grant_ref() -> c_ulong { ioctl_expr( _IOC_NONE, XEN_GNTDEV_TYPE, 1, size_of::() as u32, ) } // Xen grant memory specific implementation. #[derive(Clone, Debug)] struct MmapXenGrant { guest_base: GuestAddress, unix_mmap: Option, file_offset: FileOffset, flags: i32, size: usize, index: u64, domid: u32, } impl AsRawFd for MmapXenGrant { fn as_raw_fd(&self) -> i32 { self.file_offset.file().as_raw_fd() } } impl MmapXenGrant { fn new(range: &MmapRange, mmap_flags: MmapXenFlags) -> Result { validate_file(&range.file_offset)?; let mut grant = Self { guest_base: range.addr, unix_mmap: None, file_offset: range.file_offset.as_ref().unwrap().clone(), flags: range.flags.ok_or(Error::UnexpectedError)?, size: 0, index: 0, domid: range.mmap_data, }; // Region can't be mapped in advance, partial mapping will be done later via // `MmapXenSlice`. if mmap_flags.mmap_in_advance() { let (unix_mmap, index) = grant.mmap_range( range.addr, range.size, range.prot.ok_or(Error::UnexpectedError)?, )?; grant.unix_mmap = Some(unix_mmap); grant.index = index; grant.size = range.size; } Ok(grant) } fn mmap_range(&self, addr: GuestAddress, size: usize, prot: i32) -> Result<(MmapUnix, u64)> { let (count, size) = pages(size); let index = self.mmap_ioctl(addr, count)?; let unix_mmap = MmapUnix::new(size, prot, self.flags, self.as_raw_fd(), index)?; Ok((unix_mmap, index)) } fn unmap_range(&self, unix_mmap: MmapUnix, size: usize, index: u64) { let (count, _) = pages(size); // Unmap the address first. drop(unix_mmap); self.unmap_ioctl(count as u32, index).unwrap(); } fn mmap_ioctl(&self, addr: GuestAddress, count: usize) -> Result { let base = ((addr.0 & !XEN_GRANT_ADDR_OFF) / page_size()) as u32; let wrapper = GntDevMapGrantRef::new(self.domid, base, count)?; let reference = wrapper.as_fam_struct_ref(); // SAFETY: This is safe because the ioctl guarantees to not access memory beyond reference. let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_map_grant_ref(), reference) }; if ret == 0 { Ok(reference.index) } else { Err(Error::Mmap(io::Error::last_os_error())) } } fn unmap_ioctl(&self, count: u32, index: u64) -> Result<()> { let unmap = GntDevUnmapGrantRef::new(index, count); // SAFETY: This is safe because the ioctl guarantees to not access memory beyond unmap. let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_unmap_grant_ref(), &unmap) }; if ret == 0 { Ok(()) } else { Err(Error::Mmap(io::Error::last_os_error())) } } } impl MmapXenTrait for MmapXenGrant { // Maps a slice out of the entire region. fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result { MmapXenSlice::new_with(self.clone(), addr as usize, prot, len) } fn addr(&self) -> *mut u8 { if let Some(ref unix_mmap) = self.unix_mmap { unix_mmap.addr() } else { null_mut() } } } impl Drop for MmapXenGrant { fn drop(&mut self) { if let Some(unix_mmap) = self.unix_mmap.take() { self.unmap_range(unix_mmap, self.size, self.index); } } } #[derive(Debug)] pub(crate) struct MmapXenSlice { grant: Option, unix_mmap: Option, addr: *mut u8, size: usize, index: u64, } impl MmapXenSlice { fn raw(addr: *mut u8) -> Self { Self { grant: None, unix_mmap: None, addr, size: 0, index: 0, } } fn new_with(grant: MmapXenGrant, offset: usize, prot: i32, size: usize) -> Result { let page_size = page_size() as usize; let page_base: usize = (offset / page_size) * page_size; let offset = offset - page_base; let size = offset + size; let addr = grant.guest_base.0 + page_base as u64; let (unix_mmap, index) = grant.mmap_range(GuestAddress(addr), size, prot)?; // SAFETY: We have already mapped the range including offset. let addr = unsafe { unix_mmap.addr().add(offset) }; Ok(Self { grant: Some(grant), unix_mmap: Some(unix_mmap), addr, size, index, }) } // Mapped address for the region. pub(crate) fn addr(&self) -> *mut u8 { self.addr } } impl Drop for MmapXenSlice { fn drop(&mut self) { // Unmaps memory automatically once this instance goes out of scope. if let Some(unix_mmap) = self.unix_mmap.take() { self.grant .as_ref() .unwrap() .unmap_range(unix_mmap, self.size, self.index); } } } #[derive(Debug)] pub struct MmapXen { xen_flags: MmapXenFlags, domid: u32, mmap: Box, } impl MmapXen { fn new(range: &MmapRange) -> Result { let xen_flags = match MmapXenFlags::from_bits(range.mmap_flags) { Some(flags) => flags, None => return Err(Error::MmapFlags(range.mmap_flags)), }; if !xen_flags.is_valid() { return Err(Error::MmapFlags(xen_flags.bits())); } Ok(Self { xen_flags, domid: range.mmap_data, mmap: if xen_flags.is_foreign() { Box::new(MmapXenForeign::new(range)?) } else if xen_flags.is_grant() { Box::new(MmapXenGrant::new(range, xen_flags)?) } else { Box::new(MmapXenUnix::new(range)?) }, }) } fn addr(&self) -> *mut u8 { self.mmap.addr() } fn flags(&self) -> u32 { self.xen_flags.bits() } fn data(&self) -> u32 { self.domid } fn mmap_in_advance(&self) -> bool { self.xen_flags.mmap_in_advance() } pub(crate) fn mmap( mmap_xen: Option<&Self>, addr: *mut u8, prot: i32, len: usize, ) -> MmapXenSlice { match mmap_xen { Some(mmap_xen) => mmap_xen.mmap.mmap_slice(addr, prot, len).unwrap(), None => MmapXenSlice::raw(addr), } } } #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] use super::*; use vmm_sys_util::tempfile::TempFile; // Adding a helper method to extract the errno within an Error::Mmap(e), or return a // distinctive value when the error is represented by another variant. impl Error { fn raw_os_error(&self) -> i32 { match self { Error::Mmap(e) => e.raw_os_error().unwrap(), _ => std::i32::MIN, } } } #[allow(unused_variables)] pub unsafe fn ioctl_with_ref(fd: &F, req: c_ulong, arg: &T) -> c_int { 0 } impl MmapRange { fn initialized(is_file: bool) -> Self { let file_offset = if is_file { Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0)) } else { None }; let mut range = MmapRange::new_unix(0x1000, file_offset, GuestAddress(0x1000)); range.prot = Some(libc::PROT_READ | libc::PROT_WRITE); range.mmap_data = 1; range } } impl MmapRegion { pub fn new(size: usize) -> Result { let range = MmapRange::new_unix(size, None, GuestAddress(0)); Self::from_range(range) } } #[test] fn test_mmap_xen_failures() { let mut range = MmapRange::initialized(true); // Invalid flags range.mmap_flags = 16; let r = MmapXen::new(&range); assert_eq!( format!("{:?}", r.unwrap_err()), format!("MmapFlags({})", range.mmap_flags), ); range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::GRANT.bits(); let r = MmapXen::new(&range); assert_eq!( format!("{:?}", r.unwrap_err()), format!("MmapFlags({:x})", MmapXenFlags::ALL.bits()), ); range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits(); let r = MmapXen::new(&range); assert_eq!( format!("{:?}", r.unwrap_err()), format!( "MmapFlags({:x})", MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits(), ), ); } #[test] fn test_mmap_xen_success() { let mut range = MmapRange::initialized(true); range.mmap_flags = MmapXenFlags::FOREIGN.bits(); let r = MmapXen::new(&range).unwrap(); assert_eq!(r.flags(), range.mmap_flags); assert_eq!(r.data(), range.mmap_data); assert_ne!(r.addr(), null_mut()); assert!(r.mmap_in_advance()); range.mmap_flags = MmapXenFlags::GRANT.bits(); let r = MmapXen::new(&range).unwrap(); assert_eq!(r.flags(), range.mmap_flags); assert_eq!(r.data(), range.mmap_data); assert_ne!(r.addr(), null_mut()); assert!(r.mmap_in_advance()); range.mmap_flags = MmapXenFlags::GRANT.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits(); let r = MmapXen::new(&range).unwrap(); assert_eq!(r.flags(), range.mmap_flags); assert_eq!(r.data(), range.mmap_data); assert_eq!(r.addr(), null_mut()); assert!(!r.mmap_in_advance()); } #[test] fn test_foreign_map_failure() { let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0)); range.prot = None; let r = MmapXenForeign::new(&range); assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); let mut range = MmapRange::initialized(true); range.flags = None; let r = MmapXenForeign::new(&range); assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); let r = MmapXenForeign::new(&range); assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); let mut range = MmapRange::initialized(true); range.size = 0; let r = MmapXenForeign::new(&range); assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL); } #[test] fn test_foreign_map_success() { let range = MmapRange::initialized(true); let r = MmapXenForeign::new(&range).unwrap(); assert_ne!(r.addr(), null_mut()); assert_eq!(r.domid, range.mmap_data); assert_eq!(r.guest_base, range.addr); } #[test] fn test_grant_map_failure() { let mut range = MmapRange::initialized(true); range.prot = None; let r = MmapXenGrant::new(&range, MmapXenFlags::empty()); assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); let mut range = MmapRange::initialized(true); range.prot = None; // Protection isn't used for no-advance mappings MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap(); let mut range = MmapRange::initialized(true); range.flags = None; let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); let mut range = MmapRange::initialized(true); range.size = 0; let r = MmapXenGrant::new(&range, MmapXenFlags::empty()); assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL); } #[test] fn test_grant_map_success() { let range = MmapRange::initialized(true); let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap(); assert_eq!(r.addr(), null_mut()); assert_eq!(r.domid, range.mmap_data); assert_eq!(r.guest_base, range.addr); let mut range = MmapRange::initialized(true); // Size isn't used with no-advance mapping. range.size = 0; MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap(); let range = MmapRange::initialized(true); let r = MmapXenGrant::new(&range, MmapXenFlags::empty()).unwrap(); assert_ne!(r.addr(), null_mut()); assert_eq!(r.domid, range.mmap_data); assert_eq!(r.guest_base, range.addr); } #[test] fn test_grant_ref_alloc() { let wrapper = GntDevMapGrantRef::new(0, 0x1000, 0x100).unwrap(); let r = wrapper.as_fam_struct_ref(); assert_eq!(r.count, 0x100); assert_eq!(r.pad, 0); assert_eq!(r.index, 0); } } vm-memory-0.14.0/src/volatile_memory.rs000064400000000000000000002520601046102023000162070ustar 00000000000000// Portions Copyright 2019 Red Hat, Inc. // // Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRT-PARTY file. // // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause //! Types for volatile access to memory. //! //! Two of the core rules for safe rust is no data races and no aliased mutable references. //! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement //! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be //! accessed volatile. Some systems really do need to operate on shared memory and can't have the //! compiler reordering or eliding access because it has no visibility into what other systems are //! doing with that hunk of memory. //! //! For the purposes of maintaining safety, volatile memory has some rules of its own: //! 1. No references or slices to volatile memory (`&` or `&mut`). //! 2. Access should always been done with a volatile read or write. //! The First rule is because having references of any kind to memory considered volatile would //! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if //! done concurrently without synchronization. With volatile access we know that the compiler has //! not reordered or elided the access. use std::cmp::min; use std::io::{self, Read, Write}; use std::marker::PhantomData; use std::mem::{align_of, size_of}; use std::ptr::copy; use std::ptr::{read_volatile, write_volatile}; use std::result; use std::sync::atomic::Ordering; use std::usize; use crate::atomic_integer::AtomicInteger; use crate::bitmap::{Bitmap, BitmapSlice, BS}; use crate::{AtomicAccess, ByteValued, Bytes}; #[cfg(all(feature = "backend-mmap", feature = "xen", unix))] use crate::mmap_xen::{MmapXen as MmapInfo, MmapXenSlice}; #[cfg(not(feature = "xen"))] type MmapInfo = std::marker::PhantomData<()>; use crate::io::{ReadVolatile, WriteVolatile}; use copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice}; /// `VolatileMemory` related errors. #[allow(missing_docs)] #[derive(Debug, thiserror::Error)] pub enum Error { /// `addr` is out of bounds of the volatile memory slice. #[error("address 0x{addr:x} is out of bounds")] OutOfBounds { addr: usize }, /// Taking a slice at `base` with `offset` would overflow `usize`. #[error("address 0x{base:x} offset by 0x{offset:x} would overflow")] Overflow { base: usize, offset: usize }, /// Taking a slice whose size overflows `usize`. #[error("{nelements:?} elements of size {size:?} would overflow a usize")] TooBig { nelements: usize, size: usize }, /// Trying to obtain a misaligned reference. #[error("address 0x{addr:x} is not aligned to {alignment:?}")] Misaligned { addr: usize, alignment: usize }, /// Writing to memory failed #[error("{0}")] IOError(io::Error), /// Incomplete read or write #[error("only used {completed} bytes in {expected} long buffer")] PartialBuffer { expected: usize, completed: usize }, } /// Result of volatile memory operations. pub type Result = result::Result; /// Convenience function for computing `base + offset`. /// /// # Errors /// /// Returns [`Err(Error::Overflow)`](enum.Error.html#variant.Overflow) in case `base + offset` /// exceeds `usize::MAX`. /// /// # Examples /// /// ``` /// # use vm_memory::volatile_memory::compute_offset; /// # /// assert_eq!(108, compute_offset(100, 8).unwrap()); /// assert!(compute_offset(std::usize::MAX, 6).is_err()); /// ``` pub fn compute_offset(base: usize, offset: usize) -> Result { match base.checked_add(offset) { None => Err(Error::Overflow { base, offset }), Some(m) => Ok(m), } } /// Types that support raw volatile access to their data. pub trait VolatileMemory { /// Type used for dirty memory tracking. type B: Bitmap; /// Gets the size of this slice. fn len(&self) -> usize; /// Check whether the region is empty. fn is_empty(&self) -> bool { self.len() == 0 } /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at /// `offset`. /// /// Note that the property `get_slice(offset, count).len() == count` MUST NOT be /// relied on for the correctness of unsafe code. This is a safe function inside of a /// safe trait, and implementors are under no obligation to follow its documentation. fn get_slice(&self, offset: usize, count: usize) -> Result>>; /// Gets a slice of memory for the entire region that supports volatile access. fn as_volatile_slice(&self) -> VolatileSlice> { self.get_slice(0, self.len()).unwrap() } /// Gets a `VolatileRef` at `offset`. fn get_ref(&self, offset: usize) -> Result>> { let slice = self.get_slice(offset, size_of::())?; assert_eq!( slice.len(), size_of::(), "VolatileMemory::get_slice(offset, count) returned slice of length != count." ); // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that // slice.addr is valid memory of size slice.len(). The assert above ensures that // the length of the slice is exactly enough to hold one `T`. Lastly, the lifetime of the // returned VolatileRef match that of the VolatileSlice returned by get_slice and thus the // lifetime one `self`. unsafe { Ok(VolatileRef::with_bitmap( slice.addr, slice.bitmap, slice.mmap, )) } } /// Returns a [`VolatileArrayRef`](struct.VolatileArrayRef.html) of `n` elements starting at /// `offset`. fn get_array_ref( &self, offset: usize, n: usize, ) -> Result>> { // Use isize to avoid problems with ptr::offset and ptr::add down the line. let nbytes = isize::try_from(n) .ok() .and_then(|n| n.checked_mul(size_of::() as isize)) .ok_or(Error::TooBig { nelements: n, size: size_of::(), })?; let slice = self.get_slice(offset, nbytes as usize)?; assert_eq!( slice.len(), nbytes as usize, "VolatileMemory::get_slice(offset, count) returned slice of length != count." ); // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that // slice.addr is valid memory of size slice.len(). The assert above ensures that // the length of the slice is exactly enough to hold `n` instances of `T`. Lastly, the lifetime of the // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the // lifetime one `self`. unsafe { Ok(VolatileArrayRef::with_bitmap( slice.addr, n, slice.bitmap, slice.mmap, )) } } /// Returns a reference to an instance of `T` at `offset`. /// /// # Safety /// To use this safely, the caller must guarantee that there are no other /// users of the given chunk of memory for the lifetime of the result. /// /// # Errors /// /// If the resulting pointer is not aligned, this method will return an /// [`Error`](enum.Error.html). unsafe fn aligned_as_ref(&self, offset: usize) -> Result<&T> { let slice = self.get_slice(offset, size_of::())?; slice.check_alignment(align_of::())?; assert_eq!( slice.len(), size_of::(), "VolatileMemory::get_slice(offset, count) returned slice of length != count." ); // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that // slice.addr is valid memory of size slice.len(). The assert above ensures that // the length of the slice is exactly enough to hold one `T`. // Dereferencing the pointer is safe because we check the alignment above, and the invariants // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the // lifetime one `self`. unsafe { Ok(&*(slice.addr as *const T)) } } /// Returns a mutable reference to an instance of `T` at `offset`. Mutable accesses performed /// using the resulting reference are not automatically accounted for by the dirty bitmap /// tracking functionality. /// /// # Safety /// /// To use this safely, the caller must guarantee that there are no other /// users of the given chunk of memory for the lifetime of the result. /// /// # Errors /// /// If the resulting pointer is not aligned, this method will return an /// [`Error`](enum.Error.html). unsafe fn aligned_as_mut(&self, offset: usize) -> Result<&mut T> { let slice = self.get_slice(offset, size_of::())?; slice.check_alignment(align_of::())?; assert_eq!( slice.len(), size_of::(), "VolatileMemory::get_slice(offset, count) returned slice of length != count." ); // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that // slice.addr is valid memory of size slice.len(). The assert above ensures that // the length of the slice is exactly enough to hold one `T`. // Dereferencing the pointer is safe because we check the alignment above, and the invariants // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the // lifetime one `self`. unsafe { Ok(&mut *(slice.addr as *mut T)) } } /// Returns a reference to an instance of `T` at `offset`. Mutable accesses performed /// using the resulting reference are not automatically accounted for by the dirty bitmap /// tracking functionality. /// /// # Errors /// /// If the resulting pointer is not aligned, this method will return an /// [`Error`](enum.Error.html). fn get_atomic_ref(&self, offset: usize) -> Result<&T> { let slice = self.get_slice(offset, size_of::())?; slice.check_alignment(align_of::())?; assert_eq!( slice.len(), size_of::(), "VolatileMemory::get_slice(offset, count) returned slice of length != count." ); // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that // slice.addr is valid memory of size slice.len(). The assert above ensures that // the length of the slice is exactly enough to hold one `T`. // Dereferencing the pointer is safe because we check the alignment above. Lastly, the lifetime of the // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the // lifetime one `self`. unsafe { Ok(&*(slice.addr as *const T)) } } /// Returns the sum of `base` and `offset` if the resulting address is valid. fn compute_end_offset(&self, base: usize, offset: usize) -> Result { let mem_end = compute_offset(base, offset)?; if mem_end > self.len() { return Err(Error::OutOfBounds { addr: mem_end }); } Ok(mem_end) } } impl<'a> From<&'a mut [u8]> for VolatileSlice<'a, ()> { fn from(value: &'a mut [u8]) -> Self { // SAFETY: Since we construct the VolatileSlice from a rust slice, we know that // the memory at addr `value as *mut u8` is valid for reads and writes (because mutable // reference) of len `value.len()`. Since the `VolatileSlice` inherits the lifetime `'a`, // it is not possible to access/mutate `value` while the VolatileSlice is alive. // // Note that it is possible for multiple aliasing sub slices of this `VolatileSlice`s to // be created through `VolatileSlice::subslice`. This is OK, as pointers are allowed to // alias, and it is impossible to get rust-style references from a `VolatileSlice`. unsafe { VolatileSlice::new(value.as_mut_ptr(), value.len()) } } } #[repr(C, packed)] struct Packed(T); /// A guard to perform mapping and protect unmapping of the memory. #[derive(Debug)] pub struct PtrGuard { addr: *mut u8, len: usize, // This isn't used anymore, but it protects the slice from getting unmapped while in use. // Once this goes out of scope, the memory is unmapped automatically. #[cfg(all(feature = "xen", unix))] _slice: MmapXenSlice, } #[allow(clippy::len_without_is_empty)] impl PtrGuard { #[allow(unused_variables)] fn new(mmap: Option<&MmapInfo>, addr: *mut u8, prot: i32, len: usize) -> Self { #[cfg(all(feature = "xen", unix))] let (addr, _slice) = { let slice = MmapInfo::mmap(mmap, addr, prot, len); (slice.addr(), slice) }; Self { addr, len, #[cfg(all(feature = "xen", unix))] _slice, } } fn read(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self { Self::new(mmap, addr, libc::PROT_READ, len) } /// Returns a non-mutable pointer to the beginning of the slice. pub fn as_ptr(&self) -> *const u8 { self.addr } /// Gets the length of the mapped region. pub fn len(&self) -> usize { self.len } } /// A mutable guard to perform mapping and protect unmapping of the memory. #[derive(Debug)] pub struct PtrGuardMut(PtrGuard); #[allow(clippy::len_without_is_empty)] impl PtrGuardMut { fn write(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self { Self(PtrGuard::new(mmap, addr, libc::PROT_WRITE, len)) } /// Returns a mutable pointer to the beginning of the slice. Mutable accesses performed /// using the resulting pointer are not automatically accounted for by the dirty bitmap /// tracking functionality. pub fn as_ptr(&self) -> *mut u8 { self.0.addr } /// Gets the length of the mapped region. pub fn len(&self) -> usize { self.0.len } } /// A slice of raw memory that supports volatile access. #[derive(Clone, Copy, Debug)] pub struct VolatileSlice<'a, B = ()> { addr: *mut u8, size: usize, bitmap: B, mmap: Option<&'a MmapInfo>, } impl<'a> VolatileSlice<'a, ()> { /// Creates a slice of raw memory that must support volatile access. /// /// # Safety /// /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller /// must also guarantee that all other users of the given chunk of memory are using volatile /// accesses. pub unsafe fn new(addr: *mut u8, size: usize) -> VolatileSlice<'a> { Self::with_bitmap(addr, size, (), None) } } impl<'a, B: BitmapSlice> VolatileSlice<'a, B> { /// Creates a slice of raw memory that must support volatile access, and uses the provided /// `bitmap` object for dirty page tracking. /// /// # Safety /// /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller /// must also guarantee that all other users of the given chunk of memory are using volatile /// accesses. pub unsafe fn with_bitmap( addr: *mut u8, size: usize, bitmap: B, mmap: Option<&'a MmapInfo>, ) -> VolatileSlice<'a, B> { VolatileSlice { addr, size, bitmap, mmap, } } /// Returns a pointer to the beginning of the slice. Mutable accesses performed /// using the resulting pointer are not automatically accounted for by the dirty bitmap /// tracking functionality. #[deprecated( since = "0.12.1", note = "Use `.ptr_guard()` or `.ptr_guard_mut()` instead" )] #[cfg(not(all(feature = "xen", unix)))] pub fn as_ptr(&self) -> *mut u8 { self.addr } /// Returns a guard for the pointer to the underlying memory. pub fn ptr_guard(&self) -> PtrGuard { PtrGuard::read(self.mmap, self.addr, self.len()) } /// Returns a mutable guard for the pointer to the underlying memory. pub fn ptr_guard_mut(&self) -> PtrGuardMut { PtrGuardMut::write(self.mmap, self.addr, self.len()) } /// Gets the size of this slice. pub fn len(&self) -> usize { self.size } /// Checks if the slice is empty. pub fn is_empty(&self) -> bool { self.size == 0 } /// Borrows the inner `BitmapSlice`. pub fn bitmap(&self) -> &B { &self.bitmap } /// Divides one slice into two at an index. /// /// # Example /// /// ``` /// # use vm_memory::{VolatileMemory, VolatileSlice}; /// # /// # // Create a buffer /// # let mut mem = [0u8; 32]; /// # /// # // Get a `VolatileSlice` from the buffer /// let vslice = VolatileSlice::from(&mut mem[..]); /// /// let (start, end) = vslice.split_at(8).expect("Could not split VolatileSlice"); /// assert_eq!(8, start.len()); /// assert_eq!(24, end.len()); /// ``` pub fn split_at(&self, mid: usize) -> Result<(Self, Self)> { let end = self.offset(mid)?; let start = // SAFETY: safe because self.offset() already checked the bounds unsafe { VolatileSlice::with_bitmap(self.addr, mid, self.bitmap.clone(), self.mmap) }; Ok((start, end)) } /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at /// `offset` with `count` length. /// /// The returned subslice is a copy of this slice with the address increased by `offset` bytes /// and the size set to `count` bytes. pub fn subslice(&self, offset: usize, count: usize) -> Result { let _ = self.compute_end_offset(offset, count)?; // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and // the lifetime is the same as the original slice. unsafe { Ok(VolatileSlice::with_bitmap( self.addr.add(offset), count, self.bitmap.slice_at(offset), self.mmap, )) } } /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at /// `offset`. /// /// The returned subslice is a copy of this slice with the address increased by `count` bytes /// and the size reduced by `count` bytes. pub fn offset(&self, count: usize) -> Result> { let new_addr = (self.addr as usize) .checked_add(count) .ok_or(Error::Overflow { base: self.addr as usize, offset: count, })?; let new_size = self .size .checked_sub(count) .ok_or(Error::OutOfBounds { addr: new_addr })?; // SAFETY: Safe because the memory has the same lifetime and points to a subset of the // memory of the original slice. unsafe { Ok(VolatileSlice::with_bitmap( self.addr.add(count), new_size, self.bitmap.slice_at(count), self.mmap, )) } } /// Copies as many elements of type `T` as possible from this slice to `buf`. /// /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller, /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks /// using volatile reads. /// /// # Examples /// /// ``` /// # use vm_memory::{VolatileMemory, VolatileSlice}; /// # /// let mut mem = [0u8; 32]; /// let vslice = VolatileSlice::from(&mut mem[..]); /// let mut buf = [5u8; 16]; /// let res = vslice.copy_to(&mut buf[..]); /// /// assert_eq!(16, res); /// for &v in &buf[..] { /// assert_eq!(v, 0); /// } /// ``` pub fn copy_to(&self, buf: &mut [T]) -> usize where T: ByteValued, { // A fast path for u8/i8 if size_of::() == 1 { let total = buf.len().min(self.len()); // SAFETY: // - dst is valid for writes of at least `total`, since total <= buf.len() // - src is valid for reads of at least `total` as total <= self.len() // - The regions are non-overlapping as `src` points to guest memory and `buf` is // a slice and thus has to live outside of guest memory (there can be more slices to // guest memory without violating rust's aliasing rules) // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine unsafe { copy_from_volatile_slice(buf.as_mut_ptr() as *mut u8, self, total) } } else { let count = self.size / size_of::(); let source = self.get_array_ref::(0, count).unwrap(); source.copy_to(buf) } } /// Copies as many bytes as possible from this slice to the provided `slice`. /// /// The copies happen in an undefined order. /// /// # Examples /// /// ``` /// # use vm_memory::{VolatileMemory, VolatileSlice}; /// # /// # // Create a buffer /// # let mut mem = [0u8; 32]; /// # /// # // Get a `VolatileSlice` from the buffer /// # let vslice = VolatileSlice::from(&mut mem[..]); /// # /// vslice.copy_to_volatile_slice( /// vslice /// .get_slice(16, 16) /// .expect("Could not get VolatileSlice"), /// ); /// ``` pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) { // SAFETY: Safe because the pointers are range-checked when the slices // are created, and they never escape the VolatileSlices. // FIXME: ... however, is it really okay to mix non-volatile // operations such as copy with read_volatile and write_volatile? unsafe { let count = min(self.size, slice.size); copy(self.addr, slice.addr, count); slice.bitmap.mark_dirty(0, count); } } /// Copies as many elements of type `T` as possible from `buf` to this slice. /// /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes. /// /// # Examples /// /// ``` /// # use vm_memory::{VolatileMemory, VolatileSlice}; /// # /// let mut mem = [0u8; 32]; /// let vslice = VolatileSlice::from(&mut mem[..]); /// /// let buf = [5u8; 64]; /// vslice.copy_from(&buf[..]); /// /// for i in 0..4 { /// let val = vslice /// .get_ref::(i * 4) /// .expect("Could not get value") /// .load(); /// assert_eq!(val, 0x05050505); /// } /// ``` pub fn copy_from(&self, buf: &[T]) where T: ByteValued, { // A fast path for u8/i8 if size_of::() == 1 { let total = buf.len().min(self.len()); // SAFETY: // - dst is valid for writes of at least `total`, since total <= self.len() // - src is valid for reads of at least `total` as total <= buf.len() // - The regions are non-overlapping as `dst` points to guest memory and `buf` is // a slice and thus has to live outside of guest memory (there can be more slices to // guest memory without violating rust's aliasing rules) // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine unsafe { copy_to_volatile_slice(self, buf.as_ptr() as *const u8, total) }; } else { let count = self.size / size_of::(); // It's ok to use unwrap here because `count` was computed based on the current // length of `self`. let dest = self.get_array_ref::(0, count).unwrap(); // No need to explicitly call `mark_dirty` after this call because // `VolatileArrayRef::copy_from` already takes care of that. dest.copy_from(buf); }; } /// Checks if the current slice is aligned at `alignment` bytes. fn check_alignment(&self, alignment: usize) -> Result<()> { // Check that the desired alignment is a power of two. debug_assert!((alignment & (alignment - 1)) == 0); if ((self.addr as usize) & (alignment - 1)) != 0 { return Err(Error::Misaligned { addr: self.addr as usize, alignment, }); } Ok(()) } } impl Bytes for VolatileSlice<'_, B> { type E = Error; /// # Examples /// * Write a slice of size 5 at offset 1020 of a 1024-byte `VolatileSlice`. /// /// ``` /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; /// # /// let mut mem = [0u8; 1024]; /// let vslice = VolatileSlice::from(&mut mem[..]); /// let res = vslice.write(&[1, 2, 3, 4, 5], 1020); /// /// assert!(res.is_ok()); /// assert_eq!(res.unwrap(), 4); /// ``` fn write(&self, mut buf: &[u8], addr: usize) -> Result { if buf.is_empty() { return Ok(0); } if addr >= self.size { return Err(Error::OutOfBounds { addr }); } // NOTE: the duality of read <-> write here is correct. This is because we translate a call // "volatile_slice.write(buf)" (e.g. "write to volatile_slice from buf") into // "buf.read_volatile(volatile_slice)" (e.g. read from buf into volatile_slice) buf.read_volatile(&mut self.offset(addr)?) } /// # Examples /// * Read a slice of size 16 at offset 1010 of a 1024-byte `VolatileSlice`. /// /// ``` /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; /// # /// let mut mem = [0u8; 1024]; /// let vslice = VolatileSlice::from(&mut mem[..]); /// let buf = &mut [0u8; 16]; /// let res = vslice.read(buf, 1010); /// /// assert!(res.is_ok()); /// assert_eq!(res.unwrap(), 14); /// ``` fn read(&self, mut buf: &mut [u8], addr: usize) -> Result { if buf.is_empty() { return Ok(0); } if addr >= self.size { return Err(Error::OutOfBounds { addr }); } // NOTE: The duality of read <-> write here is correct. This is because we translate a call // volatile_slice.read(buf) (e.g. read from volatile_slice into buf) into // "buf.write_volatile(volatile_slice)" (e.g. write into buf from volatile_slice) // Both express data transfer from volatile_slice to buf. buf.write_volatile(&self.offset(addr)?) } /// # Examples /// * Write a slice at offset 256. /// /// ``` /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; /// # /// # // Create a buffer /// # let mut mem = [0u8; 1024]; /// # /// # // Get a `VolatileSlice` from the buffer /// # let vslice = VolatileSlice::from(&mut mem[..]); /// # /// let res = vslice.write_slice(&[1, 2, 3, 4, 5], 256); /// /// assert!(res.is_ok()); /// assert_eq!(res.unwrap(), ()); /// ``` fn write_slice(&self, buf: &[u8], addr: usize) -> Result<()> { // `mark_dirty` called within `self.write`. let len = self.write(buf, addr)?; if len != buf.len() { return Err(Error::PartialBuffer { expected: buf.len(), completed: len, }); } Ok(()) } /// # Examples /// * Read a slice of size 16 at offset 256. /// /// ``` /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; /// # /// # // Create a buffer /// # let mut mem = [0u8; 1024]; /// # /// # // Get a `VolatileSlice` from the buffer /// # let vslice = VolatileSlice::from(&mut mem[..]); /// # /// let buf = &mut [0u8; 16]; /// let res = vslice.read_slice(buf, 256); /// /// assert!(res.is_ok()); /// ``` fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<()> { let len = self.read(buf, addr)?; if len != buf.len() { return Err(Error::PartialBuffer { expected: buf.len(), completed: len, }); } Ok(()) } /// # Examples /// /// * Read bytes from /dev/urandom /// /// ``` /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; /// # use std::fs::File; /// # use std::path::Path; /// # /// # if cfg!(unix) { /// # let mut mem = [0u8; 1024]; /// # let vslice = VolatileSlice::from(&mut mem[..]); /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom"); /// /// vslice /// .read_from(32, &mut file, 128) /// .expect("Could not read bytes from file into VolatileSlice"); /// /// let rand_val: u32 = vslice /// .read_obj(40) /// .expect("Could not read value from VolatileSlice"); /// # } /// ``` fn read_from(&self, addr: usize, src: &mut F, count: usize) -> Result where F: Read, { let _ = self.compute_end_offset(addr, count)?; let mut dst = vec![0; count]; let bytes_read = loop { match src.read(&mut dst) { Ok(n) => break n, Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue, Err(e) => return Err(Error::IOError(e)), } }; // There is no guarantee that the read implementation is well-behaved, see the docs for // Read::read. assert!(bytes_read <= count); let slice = self.subslice(addr, bytes_read)?; // SAFETY: We have checked via compute_end_offset that accessing the specified // region of guest memory is valid. We asserted that the value returned by `read` is between // 0 and count (the length of the buffer passed to it), and that the // regions don't overlap because we allocated the Vec outside of guest memory. Ok(unsafe { copy_to_volatile_slice(&slice, dst.as_ptr(), bytes_read) }) } /// # Examples /// /// * Read bytes from /dev/urandom /// /// ``` /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; /// # use std::fs::File; /// # use std::path::Path; /// # /// # if cfg!(unix) { /// # let mut mem = [0u8; 1024]; /// # let vslice = VolatileSlice::from(&mut mem[..]); /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom"); /// /// vslice /// .read_exact_from(32, &mut file, 128) /// .expect("Could not read bytes from file into VolatileSlice"); /// /// let rand_val: u32 = vslice /// .read_obj(40) /// .expect("Could not read value from VolatileSlice"); /// # } /// ``` fn read_exact_from(&self, addr: usize, src: &mut F, count: usize) -> Result<()> where F: Read, { let _ = self.compute_end_offset(addr, count)?; let mut dst = vec![0; count]; // Read into buffer that can be copied into guest memory src.read_exact(&mut dst).map_err(Error::IOError)?; let slice = self.subslice(addr, count)?; // SAFETY: We have checked via compute_end_offset that accessing the specified // region of guest memory is valid. We know that `dst` has len `count`, and that the // regions don't overlap because we allocated the Vec outside of guest memory unsafe { copy_to_volatile_slice(&slice, dst.as_ptr(), count) }; Ok(()) } /// # Examples /// /// * Write 128 bytes to /dev/null /// /// ``` /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; /// # use std::fs::OpenOptions; /// # use std::path::Path; /// # /// # if cfg!(unix) { /// # let mut mem = [0u8; 1024]; /// # let vslice = VolatileSlice::from(&mut mem[..]); /// let mut file = OpenOptions::new() /// .write(true) /// .open("/dev/null") /// .expect("Could not open /dev/null"); /// /// vslice /// .write_to(32, &mut file, 128) /// .expect("Could not write value from VolatileSlice to /dev/null"); /// # } /// ``` fn write_to(&self, addr: usize, dst: &mut F, count: usize) -> Result where F: Write, { let _ = self.compute_end_offset(addr, count)?; let mut src = Vec::with_capacity(count); let slice = self.subslice(addr, count)?; // SAFETY: We checked the addr and count so accessing the slice is safe. // It is safe to read from volatile memory. The Vec has capacity for exactly `count` // many bytes, and the memory regions pointed to definitely do not overlap, as we // allocated src outside of guest memory. // The call to set_len is safe because the bytes between 0 and count have been initialized // via copying from guest memory, and the Vec's capacity is `count` unsafe { copy_from_volatile_slice(src.as_mut_ptr(), &slice, count); src.set_len(count); } loop { match dst.write(&src) { Ok(n) => break Ok(n), Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue, Err(e) => break Err(Error::IOError(e)), } } } /// # Examples /// /// * Write 128 bytes to /dev/null /// /// ``` /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; /// # use std::fs::OpenOptions; /// # use std::path::Path; /// # /// # if cfg!(unix) { /// # let mut mem = [0u8; 1024]; /// # let vslice = VolatileSlice::from(&mut mem[..]); /// let mut file = OpenOptions::new() /// .write(true) /// .open("/dev/null") /// .expect("Could not open /dev/null"); /// /// vslice /// .write_all_to(32, &mut file, 128) /// .expect("Could not write value from VolatileSlice to /dev/null"); /// # } /// ``` fn write_all_to(&self, addr: usize, dst: &mut F, count: usize) -> Result<()> where F: Write, { let _ = self.compute_end_offset(addr, count)?; let mut src = Vec::with_capacity(count); let slice = self.subslice(addr, count)?; // SAFETY: We checked the addr and count so accessing the slice is safe. // It is safe to read from volatile memory. The Vec has capacity for exactly `count` // many bytes, and the memory regions pointed to definitely do not overlap, as we // allocated src outside of guest memory. // The call to set_len is safe because the bytes between 0 and count have been initialized // via copying from guest memory, and the Vec's capacity is `count` unsafe { copy_from_volatile_slice(src.as_mut_ptr(), &slice, count); src.set_len(count); } dst.write_all(&src).map_err(Error::IOError)?; Ok(()) } fn store(&self, val: T, addr: usize, order: Ordering) -> Result<()> { self.get_atomic_ref::(addr).map(|r| { r.store(val.into(), order); self.bitmap.mark_dirty(addr, size_of::()) }) } fn load(&self, addr: usize, order: Ordering) -> Result { self.get_atomic_ref::(addr) .map(|r| r.load(order).into()) } } impl VolatileMemory for VolatileSlice<'_, B> { type B = B; fn len(&self) -> usize { self.size } fn get_slice(&self, offset: usize, count: usize) -> Result> { let _ = self.compute_end_offset(offset, count)?; Ok( // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and // the lifetime is the same as self. unsafe { VolatileSlice::with_bitmap( self.addr.add(offset), count, self.bitmap.slice_at(offset), self.mmap, ) }, ) } } /// A memory location that supports volatile access to an instance of `T`. /// /// # Examples /// /// ``` /// # use vm_memory::VolatileRef; /// # /// let mut v = 5u32; /// let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32 as *mut u8) }; /// /// assert_eq!(v, 5); /// assert_eq!(v_ref.load(), 5); /// v_ref.store(500); /// assert_eq!(v, 500); /// ``` #[derive(Clone, Copy, Debug)] pub struct VolatileRef<'a, T, B = ()> { addr: *mut Packed, bitmap: B, mmap: Option<&'a MmapInfo>, } impl<'a, T> VolatileRef<'a, T, ()> where T: ByteValued, { /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`. /// /// # Safety /// /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller /// must also guarantee that all other users of the given chunk of memory are using volatile /// accesses. pub unsafe fn new(addr: *mut u8) -> Self { Self::with_bitmap(addr, (), None) } } #[allow(clippy::len_without_is_empty)] impl<'a, T, B> VolatileRef<'a, T, B> where T: ByteValued, B: BitmapSlice, { /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`, using the /// provided `bitmap` object for dirty page tracking. /// /// # Safety /// /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller /// must also guarantee that all other users of the given chunk of memory are using volatile /// accesses. pub unsafe fn with_bitmap(addr: *mut u8, bitmap: B, mmap: Option<&'a MmapInfo>) -> Self { VolatileRef { addr: addr as *mut Packed, bitmap, mmap, } } /// Returns a pointer to the underlying memory. Mutable accesses performed /// using the resulting pointer are not automatically accounted for by the dirty bitmap /// tracking functionality. #[deprecated( since = "0.12.1", note = "Use `.ptr_guard()` or `.ptr_guard_mut()` instead" )] #[cfg(not(all(feature = "xen", unix)))] pub fn as_ptr(&self) -> *mut u8 { self.addr as *mut u8 } /// Returns a guard for the pointer to the underlying memory. pub fn ptr_guard(&self) -> PtrGuard { PtrGuard::read(self.mmap, self.addr as *mut u8, self.len()) } /// Returns a mutable guard for the pointer to the underlying memory. pub fn ptr_guard_mut(&self) -> PtrGuardMut { PtrGuardMut::write(self.mmap, self.addr as *mut u8, self.len()) } /// Gets the size of the referenced type `T`. /// /// # Examples /// /// ``` /// # use std::mem::size_of; /// # use vm_memory::VolatileRef; /// # /// let v_ref = unsafe { VolatileRef::::new(0 as *mut _) }; /// assert_eq!(v_ref.len(), size_of::() as usize); /// ``` pub fn len(&self) -> usize { size_of::() } /// Borrows the inner `BitmapSlice`. pub fn bitmap(&self) -> &B { &self.bitmap } /// Does a volatile write of the value `v` to the address of this ref. #[inline(always)] pub fn store(&self, v: T) { let guard = self.ptr_guard_mut(); // SAFETY: Safe because we checked the address and size when creating this VolatileRef. unsafe { write_volatile(guard.as_ptr() as *mut Packed, Packed::(v)) }; self.bitmap.mark_dirty(0, self.len()) } /// Does a volatile read of the value at the address of this ref. #[inline(always)] pub fn load(&self) -> T { let guard = self.ptr_guard(); // SAFETY: Safe because we checked the address and size when creating this VolatileRef. // For the purposes of demonstrating why read_volatile is necessary, try replacing the code // in this function with the commented code below and running `cargo test --release`. // unsafe { *(self.addr as *const T) } unsafe { read_volatile(guard.as_ptr() as *const Packed).0 } } /// Converts this to a [`VolatileSlice`](struct.VolatileSlice.html) with the same size and /// address. pub fn to_slice(&self) -> VolatileSlice<'a, B> { // SAFETY: Safe because we checked the address and size when creating this VolatileRef. unsafe { VolatileSlice::with_bitmap( self.addr as *mut u8, size_of::(), self.bitmap.clone(), self.mmap, ) } } } /// A memory location that supports volatile access to an array of elements of type `T`. /// /// # Examples /// /// ``` /// # use vm_memory::VolatileArrayRef; /// # /// let mut v = [5u32; 1]; /// let v_ref = unsafe { VolatileArrayRef::new(&mut v[0] as *mut u32 as *mut u8, v.len()) }; /// /// assert_eq!(v[0], 5); /// assert_eq!(v_ref.load(0), 5); /// v_ref.store(0, 500); /// assert_eq!(v[0], 500); /// ``` #[derive(Clone, Copy, Debug)] pub struct VolatileArrayRef<'a, T, B = ()> { addr: *mut u8, nelem: usize, bitmap: B, phantom: PhantomData<&'a T>, mmap: Option<&'a MmapInfo>, } impl<'a, T> VolatileArrayRef<'a, T> where T: ByteValued, { /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of /// type `T`. /// /// # Safety /// /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for /// `nelem` values of type `T` and is available for the duration of the lifetime of the new /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of /// memory are using volatile accesses. pub unsafe fn new(addr: *mut u8, nelem: usize) -> Self { Self::with_bitmap(addr, nelem, (), None) } } impl<'a, T, B> VolatileArrayRef<'a, T, B> where T: ByteValued, B: BitmapSlice, { /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of /// type `T`, using the provided `bitmap` object for dirty page tracking. /// /// # Safety /// /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for /// `nelem` values of type `T` and is available for the duration of the lifetime of the new /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of /// memory are using volatile accesses. pub unsafe fn with_bitmap( addr: *mut u8, nelem: usize, bitmap: B, mmap: Option<&'a MmapInfo>, ) -> Self { VolatileArrayRef { addr, nelem, bitmap, phantom: PhantomData, mmap, } } /// Returns `true` if this array is empty. /// /// # Examples /// /// ``` /// # use vm_memory::VolatileArrayRef; /// # /// let v_array = unsafe { VolatileArrayRef::::new(0 as *mut _, 0) }; /// assert!(v_array.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.nelem == 0 } /// Returns the number of elements in the array. /// /// # Examples /// /// ``` /// # use vm_memory::VolatileArrayRef; /// # /// # let v_array = unsafe { VolatileArrayRef::::new(0 as *mut _, 1) }; /// assert_eq!(v_array.len(), 1); /// ``` pub fn len(&self) -> usize { self.nelem } /// Returns the size of `T`. /// /// # Examples /// /// ``` /// # use std::mem::size_of; /// # use vm_memory::VolatileArrayRef; /// # /// let v_ref = unsafe { VolatileArrayRef::::new(0 as *mut _, 0) }; /// assert_eq!(v_ref.element_size(), size_of::() as usize); /// ``` pub fn element_size(&self) -> usize { size_of::() } /// Returns a pointer to the underlying memory. Mutable accesses performed /// using the resulting pointer are not automatically accounted for by the dirty bitmap /// tracking functionality. #[deprecated( since = "0.12.1", note = "Use `.ptr_guard()` or `.ptr_guard_mut()` instead" )] #[cfg(not(all(feature = "xen", unix)))] pub fn as_ptr(&self) -> *mut u8 { self.addr } /// Returns a guard for the pointer to the underlying memory. pub fn ptr_guard(&self) -> PtrGuard { PtrGuard::read(self.mmap, self.addr, self.len()) } /// Returns a mutable guard for the pointer to the underlying memory. pub fn ptr_guard_mut(&self) -> PtrGuardMut { PtrGuardMut::write(self.mmap, self.addr, self.len()) } /// Borrows the inner `BitmapSlice`. pub fn bitmap(&self) -> &B { &self.bitmap } /// Converts this to a `VolatileSlice` with the same size and address. pub fn to_slice(&self) -> VolatileSlice<'a, B> { // SAFETY: Safe as long as the caller validated addr when creating this object. unsafe { VolatileSlice::with_bitmap( self.addr, self.nelem * self.element_size(), self.bitmap.clone(), self.mmap, ) } } /// Does a volatile read of the element at `index`. /// /// # Panics /// /// Panics if `index` is less than the number of elements of the array to which `&self` points. pub fn ref_at(&self, index: usize) -> VolatileRef<'a, T, B> { assert!(index < self.nelem); // SAFETY: Safe because the memory has the same lifetime and points to a subset of the // memory of the VolatileArrayRef. unsafe { // byteofs must fit in an isize as it was checked in get_array_ref. let byteofs = (self.element_size() * index) as isize; let ptr = self.addr.offset(byteofs); VolatileRef::with_bitmap(ptr, self.bitmap.slice_at(byteofs as usize), self.mmap) } } /// Does a volatile read of the element at `index`. pub fn load(&self, index: usize) -> T { self.ref_at(index).load() } /// Does a volatile write of the element at `index`. pub fn store(&self, index: usize, value: T) { // The `VolatileRef::store` call below implements the required dirty bitmap tracking logic, // so no need to do that in this method as well. self.ref_at(index).store(value) } /// Copies as many elements of type `T` as possible from this array to `buf`. /// /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller, /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks /// using volatile reads. /// /// # Examples /// /// ``` /// # use vm_memory::VolatileArrayRef; /// # /// let mut v = [0u8; 32]; /// let v_ref = unsafe { VolatileArrayRef::new(v.as_mut_ptr(), v.len()) }; /// /// let mut buf = [5u8; 16]; /// v_ref.copy_to(&mut buf[..]); /// for &v in &buf[..] { /// assert_eq!(v, 0); /// } /// ``` pub fn copy_to(&self, buf: &mut [T]) -> usize { // A fast path for u8/i8 if size_of::() == 1 { let source = self.to_slice(); let total = buf.len().min(source.len()); // SAFETY: // - dst is valid for writes of at least `total`, since total <= buf.len() // - src is valid for reads of at least `total` as total <= source.len() // - The regions are non-overlapping as `src` points to guest memory and `buf` is // a slice and thus has to live outside of guest memory (there can be more slices to // guest memory without violating rust's aliasing rules) // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine return unsafe { copy_from_volatile_slice(buf.as_mut_ptr() as *mut u8, &source, total) }; } let guard = self.ptr_guard(); let mut ptr = guard.as_ptr() as *const Packed; let start = ptr; for v in buf.iter_mut().take(self.len()) { // SAFETY: read_volatile is safe because the pointers are range-checked when // the slices are created, and they never escape the VolatileSlices. // ptr::add is safe because get_array_ref() validated that // size_of::() * self.len() fits in an isize. unsafe { *v = read_volatile(ptr).0; ptr = ptr.add(1); } } // SAFETY: It is guaranteed that start and ptr point to the regions of the same slice. unsafe { ptr.offset_from(start) as usize } } /// Copies as many bytes as possible from this slice to the provided `slice`. /// /// The copies happen in an undefined order. /// /// # Examples /// /// ``` /// # use vm_memory::VolatileArrayRef; /// # /// let mut v = [0u8; 32]; /// let v_ref = unsafe { VolatileArrayRef::::new(v.as_mut_ptr(), v.len()) }; /// let mut buf = [5u8; 16]; /// let v_ref2 = unsafe { VolatileArrayRef::::new(buf.as_mut_ptr(), buf.len()) }; /// /// v_ref.copy_to_volatile_slice(v_ref2.to_slice()); /// for &v in &buf[..] { /// assert_eq!(v, 0); /// } /// ``` pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) { // SAFETY: Safe because the pointers are range-checked when the slices // are created, and they never escape the VolatileSlices. // FIXME: ... however, is it really okay to mix non-volatile // operations such as copy with read_volatile and write_volatile? unsafe { let count = min(self.len() * self.element_size(), slice.size); copy(self.addr, slice.addr, count); slice.bitmap.mark_dirty(0, count); } } /// Copies as many elements of type `T` as possible from `buf` to this slice. /// /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller, /// to this slice's memory. The copy happens from smallest to largest address in /// `T` sized chunks using volatile writes. /// /// # Examples /// /// ``` /// # use vm_memory::VolatileArrayRef; /// # /// let mut v = [0u8; 32]; /// let v_ref = unsafe { VolatileArrayRef::::new(v.as_mut_ptr(), v.len()) }; /// /// let buf = [5u8; 64]; /// v_ref.copy_from(&buf[..]); /// for &val in &v[..] { /// assert_eq!(5u8, val); /// } /// ``` pub fn copy_from(&self, buf: &[T]) { // A fast path for u8/i8 if size_of::() == 1 { let destination = self.to_slice(); let total = buf.len().min(destination.len()); // absurd formatting brought to you by clippy // SAFETY: // - dst is valid for writes of at least `total`, since total <= destination.len() // - src is valid for reads of at least `total` as total <= buf.len() // - The regions are non-overlapping as `dst` points to guest memory and `buf` is // a slice and thus has to live outside of guest memory (there can be more slices to // guest memory without violating rust's aliasing rules) // - size is always a multiple of alignment, so treating *const T as *const u8 is fine unsafe { copy_to_volatile_slice(&destination, buf.as_ptr() as *const u8, total) }; } else { let guard = self.ptr_guard_mut(); let start = guard.as_ptr(); let mut ptr = start as *mut Packed; for &v in buf.iter().take(self.len()) { // SAFETY: write_volatile is safe because the pointers are range-checked when // the slices are created, and they never escape the VolatileSlices. // ptr::add is safe because get_array_ref() validated that // size_of::() * self.len() fits in an isize. unsafe { write_volatile(ptr, Packed::(v)); ptr = ptr.add(1); } } self.bitmap.mark_dirty(0, ptr as usize - start as usize); } } } impl<'a, B: BitmapSlice> From> for VolatileArrayRef<'a, u8, B> { fn from(slice: VolatileSlice<'a, B>) -> Self { // SAFETY: Safe because the result has the same lifetime and points to the same // memory as the incoming VolatileSlice. unsafe { VolatileArrayRef::with_bitmap(slice.addr, slice.len(), slice.bitmap, slice.mmap) } } } // Return the largest value that `addr` is aligned to. Forcing this function to return 1 will // cause test_non_atomic_access to fail. fn alignment(addr: usize) -> usize { // Rust is silly and does not let me write addr & -addr. addr & (!addr + 1) } pub(crate) mod copy_slice_impl { use super::*; // SAFETY: Has the same safety requirements as `read_volatile` + `write_volatile`, namely: // - `src_addr` and `dst_addr` must be valid for reads/writes. // - `src_addr` and `dst_addr` must be properly aligned with respect to `align`. // - `src_addr` must point to a properly initialized value, which is true here because // we're only using integer primitives. unsafe fn copy_single(align: usize, src_addr: *const u8, dst_addr: *mut u8) { match align { 8 => write_volatile(dst_addr as *mut u64, read_volatile(src_addr as *const u64)), 4 => write_volatile(dst_addr as *mut u32, read_volatile(src_addr as *const u32)), 2 => write_volatile(dst_addr as *mut u16, read_volatile(src_addr as *const u16)), 1 => write_volatile(dst_addr, read_volatile(src_addr)), _ => unreachable!(), } } /// Copies `total` bytes from `src` to `dst` using a loop of volatile reads and writes /// /// SAFETY: `src` and `dst` must be point to a contiguously allocated memory region of at least /// length `total`. The regions must not overlap unsafe fn copy_slice_volatile(mut dst: *mut u8, mut src: *const u8, total: usize) -> usize { let mut left = total; let align = min(alignment(src as usize), alignment(dst as usize)); let mut copy_aligned_slice = |min_align| { if align < min_align { return; } while left >= min_align { // SAFETY: Safe because we check alignment beforehand, the memory areas are valid // for reads/writes, and the source always contains a valid value. unsafe { copy_single(min_align, src, dst) }; left -= min_align; if left == 0 { break; } // SAFETY: We only explain the invariants for `src`, the argument for `dst` is // analogous. // - `src` and `src + min_align` are within (or one byte past) the same allocated object // This is given by the invariant on this function ensuring that [src, src + total) // are part of the same allocated object, and the condition on the while loop // ensures that we do not go outside this object // - The computed offset in bytes cannot overflow isize, because `min_align` is at // most 8 when the closure is called (see below) // - The sum `src as usize + min_align` can only wrap around if src as usize + min_align - 1 == usize::MAX, // however in this case, left == 0, and we'll have exited the loop above. unsafe { src = src.add(min_align); dst = dst.add(min_align); } } }; if size_of::() > 4 { copy_aligned_slice(8); } copy_aligned_slice(4); copy_aligned_slice(2); copy_aligned_slice(1); total } /// Copies `total` bytes from `src` to `dst` /// /// SAFETY: `src` and `dst` must be point to a contiguously allocated memory region of at least /// length `total`. The regions must not overlap unsafe fn copy_slice(dst: *mut u8, src: *const u8, total: usize) -> usize { if total <= size_of::() { // SAFETY: Invariants of copy_slice_volatile are the same as invariants of copy_slice unsafe { copy_slice_volatile(dst, src, total); }; } else { // SAFETY: // - Both src and dst are allocated for reads/writes of length `total` by function // invariant // - src and dst are properly aligned, as any alignment is valid for u8 // - The regions are not overlapping by function invariant unsafe { std::ptr::copy_nonoverlapping(src, dst, total); } } total } /// Copies `total` bytes from `slice` to `dst` /// /// SAFETY: `slice` and `dst` must be point to a contiguously allocated memory region of at /// least length `total`. The regions must not overlap. pub(crate) unsafe fn copy_from_volatile_slice( dst: *mut u8, slice: &VolatileSlice<'_, B>, total: usize, ) -> usize { let guard = slice.ptr_guard(); // SAFETY: guaranteed by function invariants. copy_slice(dst, guard.as_ptr(), total) } /// Copies `total` bytes from 'src' to `slice` /// /// SAFETY: `slice` and `src` must be point to a contiguously allocated memory region of at /// least length `total`. The regions must not overlap. pub(crate) unsafe fn copy_to_volatile_slice( slice: &VolatileSlice<'_, B>, src: *const u8, total: usize, ) -> usize { let guard = slice.ptr_guard_mut(); // SAFETY: guaranteed by function invariants. let count = copy_slice(guard.as_ptr(), src, total); slice.bitmap.mark_dirty(0, count); count } } #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] use super::*; use std::alloc::Layout; use std::fs::File; use std::mem::size_of_val; use std::path::Path; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Barrier}; use std::thread::spawn; use matches::assert_matches; use std::num::NonZeroUsize; use vmm_sys_util::tempfile::TempFile; use crate::bitmap::tests::{ check_range, range_is_clean, range_is_dirty, test_bytes, test_volatile_memory, }; use crate::bitmap::{AtomicBitmap, RefSlice}; const DEFAULT_PAGE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(0x1000) }; #[test] fn test_display_error() { assert_eq!( format!("{}", Error::OutOfBounds { addr: 0x10 }), "address 0x10 is out of bounds" ); assert_eq!( format!( "{}", Error::Overflow { base: 0x0, offset: 0x10 } ), "address 0x0 offset by 0x10 would overflow" ); assert_eq!( format!( "{}", Error::TooBig { nelements: 100_000, size: 1_000_000_000 } ), "100000 elements of size 1000000000 would overflow a usize" ); assert_eq!( format!( "{}", Error::Misaligned { addr: 0x4, alignment: 8 } ), "address 0x4 is not aligned to 8" ); assert_eq!( format!( "{}", Error::PartialBuffer { expected: 100, completed: 90 } ), "only used 90 bytes in 100 long buffer" ); } #[test] fn misaligned_ref() { let mut a = [0u8; 3]; let a_ref = VolatileSlice::from(&mut a[..]); unsafe { assert!( a_ref.aligned_as_ref::(0).is_err() ^ a_ref.aligned_as_ref::(1).is_err() ); assert!( a_ref.aligned_as_mut::(0).is_err() ^ a_ref.aligned_as_mut::(1).is_err() ); } } #[test] fn atomic_store() { let mut a = [0usize; 1]; { let a_ref = unsafe { VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::()) }; let atomic = a_ref.get_atomic_ref::(0).unwrap(); atomic.store(2usize, Ordering::Relaxed) } assert_eq!(a[0], 2); } #[test] fn atomic_load() { let mut a = [5usize; 1]; { let a_ref = unsafe { VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::()) }; let atomic = { let atomic = a_ref.get_atomic_ref::(0).unwrap(); assert_eq!(atomic.load(Ordering::Relaxed), 5usize); atomic }; // To make sure we can take the atomic out of the scope we made it in: atomic.load(Ordering::Relaxed); // but not too far: // atomicu8 } //.load(std::sync::atomic::Ordering::Relaxed) ; } #[test] fn misaligned_atomic() { let mut a = [5usize, 5usize]; let a_ref = unsafe { VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::()) }; assert!(a_ref.get_atomic_ref::(0).is_ok()); assert!(a_ref.get_atomic_ref::(1).is_err()); } #[test] fn ref_store() { let mut a = [0u8; 1]; { let a_ref = VolatileSlice::from(&mut a[..]); let v_ref = a_ref.get_ref(0).unwrap(); v_ref.store(2u8); } assert_eq!(a[0], 2); } #[test] fn ref_load() { let mut a = [5u8; 1]; { let a_ref = VolatileSlice::from(&mut a[..]); let c = { let v_ref = a_ref.get_ref::(0).unwrap(); assert_eq!(v_ref.load(), 5u8); v_ref }; // To make sure we can take a v_ref out of the scope we made it in: c.load(); // but not too far: // c } //.load() ; } #[test] fn ref_to_slice() { let mut a = [1u8; 5]; let a_ref = VolatileSlice::from(&mut a[..]); let v_ref = a_ref.get_ref(1).unwrap(); v_ref.store(0x1234_5678u32); let ref_slice = v_ref.to_slice(); assert_eq!(v_ref.addr as usize, ref_slice.addr as usize); assert_eq!(v_ref.len(), ref_slice.len()); assert!(!ref_slice.is_empty()); } #[test] fn observe_mutate() { struct RawMemory(*mut u8); // SAFETY: we use property synchronization below unsafe impl Send for RawMemory {} unsafe impl Sync for RawMemory {} let mem = Arc::new(RawMemory(unsafe { std::alloc::alloc(Layout::from_size_align(1, 1).unwrap()) })); let outside_slice = unsafe { VolatileSlice::new(Arc::clone(&mem).0, 1) }; let inside_arc = Arc::clone(&mem); let v_ref = outside_slice.get_ref::(0).unwrap(); let barrier = Arc::new(Barrier::new(2)); let barrier1 = barrier.clone(); v_ref.store(99); spawn(move || { barrier1.wait(); let inside_slice = unsafe { VolatileSlice::new(inside_arc.0, 1) }; let clone_v_ref = inside_slice.get_ref::(0).unwrap(); clone_v_ref.store(0); barrier1.wait(); }); assert_eq!(v_ref.load(), 99); barrier.wait(); barrier.wait(); assert_eq!(v_ref.load(), 0); unsafe { std::alloc::dealloc(mem.0, Layout::from_size_align(1, 1).unwrap()) } } #[test] fn mem_is_empty() { let mut backing = vec![0u8; 100]; let a = VolatileSlice::from(backing.as_mut_slice()); assert!(!a.is_empty()); let mut backing = vec![]; let a = VolatileSlice::from(backing.as_mut_slice()); assert!(a.is_empty()); } #[test] fn slice_len() { let mut backing = vec![0u8; 100]; let mem = VolatileSlice::from(backing.as_mut_slice()); let slice = mem.get_slice(0, 27).unwrap(); assert_eq!(slice.len(), 27); assert!(!slice.is_empty()); let slice = mem.get_slice(34, 27).unwrap(); assert_eq!(slice.len(), 27); assert!(!slice.is_empty()); let slice = slice.get_slice(20, 5).unwrap(); assert_eq!(slice.len(), 5); assert!(!slice.is_empty()); let slice = mem.get_slice(34, 0).unwrap(); assert!(slice.is_empty()); } #[test] fn slice_subslice() { let mut backing = vec![0u8; 100]; let mem = VolatileSlice::from(backing.as_mut_slice()); let slice = mem.get_slice(0, 100).unwrap(); assert!(slice.write(&[1; 80], 10).is_ok()); assert!(slice.subslice(0, 0).is_ok()); assert!(slice.subslice(0, 101).is_err()); assert!(slice.subslice(99, 0).is_ok()); assert!(slice.subslice(99, 1).is_ok()); assert!(slice.subslice(99, 2).is_err()); assert!(slice.subslice(100, 0).is_ok()); assert!(slice.subslice(100, 1).is_err()); assert!(slice.subslice(101, 0).is_err()); assert!(slice.subslice(101, 1).is_err()); assert!(slice.subslice(std::usize::MAX, 2).is_err()); assert!(slice.subslice(2, std::usize::MAX).is_err()); let maybe_offset_slice = slice.subslice(10, 80); assert!(maybe_offset_slice.is_ok()); let offset_slice = maybe_offset_slice.unwrap(); assert_eq!(offset_slice.len(), 80); let mut buf = [0; 80]; assert!(offset_slice.read(&mut buf, 0).is_ok()); assert_eq!(&buf[0..80], &[1; 80][0..80]); } #[test] fn slice_offset() { let mut backing = vec![0u8; 100]; let mem = VolatileSlice::from(backing.as_mut_slice()); let slice = mem.get_slice(0, 100).unwrap(); assert!(slice.write(&[1; 80], 10).is_ok()); assert!(slice.offset(101).is_err()); let maybe_offset_slice = slice.offset(10); assert!(maybe_offset_slice.is_ok()); let offset_slice = maybe_offset_slice.unwrap(); assert_eq!(offset_slice.len(), 90); let mut buf = [0; 90]; assert!(offset_slice.read(&mut buf, 0).is_ok()); assert_eq!(&buf[0..80], &[1; 80][0..80]); assert_eq!(&buf[80..90], &[0; 10][0..10]); } #[test] fn slice_copy_to_u8() { let mut a = [2u8, 4, 6, 8, 10]; let mut b = [0u8; 4]; let mut c = [0u8; 6]; let a_ref = VolatileSlice::from(&mut a[..]); let v_ref = a_ref.get_slice(0, a_ref.len()).unwrap(); v_ref.copy_to(&mut b[..]); v_ref.copy_to(&mut c[..]); assert_eq!(b[0..4], a[0..4]); assert_eq!(c[0..5], a[0..5]); } #[test] fn slice_copy_to_u16() { let mut a = [0x01u16, 0x2, 0x03, 0x4, 0x5]; let mut b = [0u16; 4]; let mut c = [0u16; 6]; let a_ref = &mut a[..]; let v_ref = unsafe { VolatileSlice::new(a_ref.as_mut_ptr() as *mut u8, 9) }; v_ref.copy_to(&mut b[..]); v_ref.copy_to(&mut c[..]); assert_eq!(b[0..4], a_ref[0..4]); assert_eq!(c[0..4], a_ref[0..4]); assert_eq!(c[4], 0); } #[test] fn slice_copy_from_u8() { let a = [2u8, 4, 6, 8, 10]; let mut b = [0u8; 4]; let mut c = [0u8; 6]; let b_ref = VolatileSlice::from(&mut b[..]); let v_ref = b_ref.get_slice(0, b_ref.len()).unwrap(); v_ref.copy_from(&a[..]); assert_eq!(b[0..4], a[0..4]); let c_ref = VolatileSlice::from(&mut c[..]); let v_ref = c_ref.get_slice(0, c_ref.len()).unwrap(); v_ref.copy_from(&a[..]); assert_eq!(c[0..5], a[0..5]); } #[test] fn slice_copy_from_u16() { let a = [2u16, 4, 6, 8, 10]; let mut b = [0u16; 4]; let mut c = [0u16; 6]; let b_ref = &mut b[..]; let v_ref = unsafe { VolatileSlice::new(b_ref.as_mut_ptr() as *mut u8, 8) }; v_ref.copy_from(&a[..]); assert_eq!(b_ref[0..4], a[0..4]); let c_ref = &mut c[..]; let v_ref = unsafe { VolatileSlice::new(c_ref.as_mut_ptr() as *mut u8, 9) }; v_ref.copy_from(&a[..]); assert_eq!(c_ref[0..4], a[0..4]); assert_eq!(c_ref[4], 0); } #[test] fn slice_copy_to_volatile_slice() { let mut a = [2u8, 4, 6, 8, 10]; let a_ref = VolatileSlice::from(&mut a[..]); let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap(); let mut b = [0u8; 4]; let b_ref = VolatileSlice::from(&mut b[..]); let b_slice = b_ref.get_slice(0, b_ref.len()).unwrap(); a_slice.copy_to_volatile_slice(b_slice); assert_eq!(b, [2, 4, 6, 8]); } #[test] fn slice_overflow_error() { use std::usize::MAX; let mut backing = vec![0u8]; let a = VolatileSlice::from(backing.as_mut_slice()); let res = a.get_slice(MAX, 1).unwrap_err(); assert_matches!( res, Error::Overflow { base: MAX, offset: 1, } ); } #[test] fn slice_oob_error() { let mut backing = vec![0u8; 100]; let a = VolatileSlice::from(backing.as_mut_slice()); a.get_slice(50, 50).unwrap(); let res = a.get_slice(55, 50).unwrap_err(); assert_matches!(res, Error::OutOfBounds { addr: 105 }); } #[test] fn ref_overflow_error() { use std::usize::MAX; let mut backing = vec![0u8]; let a = VolatileSlice::from(backing.as_mut_slice()); let res = a.get_ref::(MAX).unwrap_err(); assert_matches!( res, Error::Overflow { base: MAX, offset: 1, } ); } #[test] fn ref_oob_error() { let mut backing = vec![0u8; 100]; let a = VolatileSlice::from(backing.as_mut_slice()); a.get_ref::(99).unwrap(); let res = a.get_ref::(99).unwrap_err(); assert_matches!(res, Error::OutOfBounds { addr: 101 }); } #[test] fn ref_oob_too_large() { let mut backing = vec![0u8; 3]; let a = VolatileSlice::from(backing.as_mut_slice()); let res = a.get_ref::(0).unwrap_err(); assert_matches!(res, Error::OutOfBounds { addr: 4 }); } #[test] fn slice_store() { let mut backing = vec![0u8; 5]; let a = VolatileSlice::from(backing.as_mut_slice()); let s = a.as_volatile_slice(); let r = a.get_ref(2).unwrap(); r.store(9u16); assert_eq!(s.read_obj::(2).unwrap(), 9); } #[test] fn test_write_past_end() { let mut backing = vec![0u8; 5]; let a = VolatileSlice::from(backing.as_mut_slice()); let s = a.as_volatile_slice(); let res = s.write(&[1, 2, 3, 4, 5, 6], 0); assert!(res.is_ok()); assert_eq!(res.unwrap(), 5); } #[test] fn slice_read_and_write() { let mut backing = vec![0u8; 5]; let a = VolatileSlice::from(backing.as_mut_slice()); let s = a.as_volatile_slice(); let sample_buf = [1, 2, 3]; assert!(s.write(&sample_buf, 5).is_err()); assert!(s.write(&sample_buf, 2).is_ok()); let mut buf = [0u8; 3]; assert!(s.read(&mut buf, 5).is_err()); assert!(s.read_slice(&mut buf, 2).is_ok()); assert_eq!(buf, sample_buf); // Writing an empty buffer at the end of the volatile slice works. assert_eq!(s.write(&[], 100).unwrap(), 0); let buf: &mut [u8] = &mut []; assert_eq!(s.read(buf, 4).unwrap(), 0); // Check that reading and writing an empty buffer does not yield an error. let mut backing = Vec::new(); let empty_mem = VolatileSlice::from(backing.as_mut_slice()); let empty = empty_mem.as_volatile_slice(); assert_eq!(empty.write(&[], 1).unwrap(), 0); assert_eq!(empty.read(buf, 1).unwrap(), 0); } #[test] fn obj_read_and_write() { let mut backing = vec![0u8; 5]; let a = VolatileSlice::from(backing.as_mut_slice()); let s = a.as_volatile_slice(); assert!(s.write_obj(55u16, 4).is_err()); assert!(s.write_obj(55u16, core::usize::MAX).is_err()); assert!(s.write_obj(55u16, 2).is_ok()); assert_eq!(s.read_obj::(2).unwrap(), 55u16); assert!(s.read_obj::(4).is_err()); assert!(s.read_obj::(core::usize::MAX).is_err()); } #[test] fn mem_read_and_write() { let mut backing = vec![0u8; 5]; let a = VolatileSlice::from(backing.as_mut_slice()); let s = a.as_volatile_slice(); assert!(s.write_obj(!0u32, 1).is_ok()); let mut file = if cfg!(unix) { File::open(Path::new("/dev/zero")).unwrap() } else { File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap() }; assert!(file .read_exact_volatile(&mut s.get_slice(1, size_of::()).unwrap()) .is_ok()); let mut f = TempFile::new().unwrap().into_file(); assert!(f .read_exact_volatile(&mut s.get_slice(1, size_of::()).unwrap()) .is_err()); let value = s.read_obj::(1).unwrap(); if cfg!(unix) { assert_eq!(value, 0); } else { assert_eq!(value, 0x0090_5a4d); } let mut sink = vec![0; size_of::()]; assert!(sink .as_mut_slice() .write_all_volatile(&s.get_slice(1, size_of::()).unwrap()) .is_ok()); if cfg!(unix) { assert_eq!(sink, vec![0; size_of::()]); } else { assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]); }; } #[test] fn unaligned_read_and_write() { let mut backing = vec![0u8; 7]; let a = VolatileSlice::from(backing.as_mut_slice()); let s = a.as_volatile_slice(); let sample_buf: [u8; 7] = [1, 2, 0xAA, 0xAA, 0xAA, 0xAA, 4]; assert!(s.write_slice(&sample_buf, 0).is_ok()); let r = a.get_ref::(2).unwrap(); assert_eq!(r.load(), 0xAAAA_AAAA); r.store(0x5555_5555); let sample_buf: [u8; 7] = [1, 2, 0x55, 0x55, 0x55, 0x55, 4]; let mut buf: [u8; 7] = Default::default(); assert!(s.read_slice(&mut buf, 0).is_ok()); assert_eq!(buf, sample_buf); } #[test] fn test_read_from_exceeds_size() { #[derive(Debug, Default, Copy, Clone)] struct BytesToRead { _val1: u128, // 16 bytes _val2: u128, // 16 bytes } unsafe impl ByteValued for BytesToRead {} let cursor_size = 20; let image = vec![1u8; cursor_size]; // Trying to read more bytes than we have space for in image // make the read_from function return maximum vec size (i.e. 20). let mut bytes_to_read = BytesToRead::default(); assert_eq!( image .as_slice() .read_volatile(&mut bytes_to_read.as_bytes()) .unwrap(), cursor_size ); } #[test] fn ref_array_from_slice() { let mut a = [2, 4, 6, 8, 10]; let a_vec = a.to_vec(); let a_ref = VolatileSlice::from(&mut a[..]); let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap(); let a_array_ref: VolatileArrayRef = a_slice.into(); for (i, entry) in a_vec.iter().enumerate() { assert_eq!(&a_array_ref.load(i), entry); } } #[test] fn ref_array_store() { let mut a = [0u8; 5]; { let a_ref = VolatileSlice::from(&mut a[..]); let v_ref = a_ref.get_array_ref(1, 4).unwrap(); v_ref.store(1, 2u8); v_ref.store(2, 4u8); v_ref.store(3, 6u8); } let expected = [2u8, 4u8, 6u8]; assert_eq!(a[2..=4], expected); } #[test] fn ref_array_load() { let mut a = [0, 0, 2, 3, 10]; { let a_ref = VolatileSlice::from(&mut a[..]); let c = { let v_ref = a_ref.get_array_ref::(1, 4).unwrap(); assert_eq!(v_ref.load(1), 2u8); assert_eq!(v_ref.load(2), 3u8); assert_eq!(v_ref.load(3), 10u8); v_ref }; // To make sure we can take a v_ref out of the scope we made it in: c.load(0); // but not too far: // c } //.load() ; } #[test] fn ref_array_overflow() { let mut a = [0, 0, 2, 3, 10]; let a_ref = VolatileSlice::from(&mut a[..]); let res = a_ref.get_array_ref::(4, usize::MAX).unwrap_err(); assert_matches!( res, Error::TooBig { nelements: usize::MAX, size: 4, } ); } #[test] fn alignment() { let a = [0u8; 64]; let a = &a[a.as_ptr().align_offset(32)] as *const u8 as usize; assert!(super::alignment(a) >= 32); assert_eq!(super::alignment(a + 9), 1); assert_eq!(super::alignment(a + 30), 2); assert_eq!(super::alignment(a + 12), 4); assert_eq!(super::alignment(a + 8), 8); } #[test] fn test_atomic_accesses() { let len = 0x1000; let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) }; let a = unsafe { VolatileSlice::new(buf, len) }; crate::bytes::tests::check_atomic_accesses(a, 0, 0x1000); unsafe { std::alloc::dealloc(buf, Layout::from_size_align(len, 8).unwrap()); } } #[test] fn split_at() { let mut mem = [0u8; 32]; let mem_ref = VolatileSlice::from(&mut mem[..]); let vslice = mem_ref.get_slice(0, 32).unwrap(); let (start, end) = vslice.split_at(8).unwrap(); assert_eq!(start.len(), 8); assert_eq!(end.len(), 24); let (start, end) = vslice.split_at(0).unwrap(); assert_eq!(start.len(), 0); assert_eq!(end.len(), 32); let (start, end) = vslice.split_at(31).unwrap(); assert_eq!(start.len(), 31); assert_eq!(end.len(), 1); let (start, end) = vslice.split_at(32).unwrap(); assert_eq!(start.len(), 32); assert_eq!(end.len(), 0); let err = vslice.split_at(33).unwrap_err(); assert_matches!(err, Error::OutOfBounds { addr: _ }) } #[test] fn test_volatile_slice_dirty_tracking() { let val = 123u64; let dirty_offset = 0x1000; let dirty_len = size_of_val(&val); let len = 0x10000; let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) }; // Invoke the `Bytes` test helper function. { let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE); let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) }; test_bytes( &slice, |s: &VolatileSlice>, start: usize, len: usize, clean: bool| { check_range(s.bitmap(), start, len, clean) }, |offset| offset, 0x1000, ); } // Invoke the `VolatileMemory` test helper function. { let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE); let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) }; test_volatile_memory(&slice); } let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE); let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) }; let bitmap2 = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE); let slice2 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap2.slice_at(0), None) }; let bitmap3 = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE); let slice3 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap3.slice_at(0), None) }; assert!(range_is_clean(slice.bitmap(), 0, slice.len())); assert!(range_is_clean(slice2.bitmap(), 0, slice2.len())); slice.write_obj(val, dirty_offset).unwrap(); assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len)); slice.copy_to_volatile_slice(slice2); assert!(range_is_dirty(slice2.bitmap(), 0, slice2.len())); { let (s1, s2) = slice.split_at(dirty_offset).unwrap(); assert!(range_is_clean(s1.bitmap(), 0, s1.len())); assert!(range_is_dirty(s2.bitmap(), 0, dirty_len)); } { let s = slice.subslice(dirty_offset, dirty_len).unwrap(); assert!(range_is_dirty(s.bitmap(), 0, s.len())); } { let s = slice.offset(dirty_offset).unwrap(); assert!(range_is_dirty(s.bitmap(), 0, dirty_len)); } // Test `copy_from` for size_of:: == 1. { let buf = vec![1u8; dirty_offset]; assert!(range_is_clean(slice.bitmap(), 0, dirty_offset)); slice.copy_from(&buf); assert!(range_is_dirty(slice.bitmap(), 0, dirty_offset)); } // Test `copy_from` for size_of:: > 1. { let val = 1u32; let buf = vec![val; dirty_offset / size_of_val(&val)]; assert!(range_is_clean(slice3.bitmap(), 0, dirty_offset)); slice3.copy_from(&buf); assert!(range_is_dirty(slice3.bitmap(), 0, dirty_offset)); } unsafe { std::alloc::dealloc(buf, Layout::from_size_align(len, 8).unwrap()); } } #[test] fn test_volatile_ref_dirty_tracking() { let val = 123u64; let mut buf = vec![val]; let bitmap = AtomicBitmap::new(size_of_val(&val), DEFAULT_PAGE_SIZE); let vref = unsafe { VolatileRef::with_bitmap(buf.as_mut_ptr() as *mut u8, bitmap.slice_at(0), None) }; assert!(range_is_clean(vref.bitmap(), 0, vref.len())); vref.store(val); assert!(range_is_dirty(vref.bitmap(), 0, vref.len())); } fn test_volatile_array_ref_copy_from_tracking( buf: &mut [T], index: usize, page_size: NonZeroUsize, ) where T: ByteValued + From, { let bitmap = AtomicBitmap::new(size_of_val(buf), page_size); let arr = unsafe { VolatileArrayRef::with_bitmap( buf.as_mut_ptr() as *mut u8, index + 1, bitmap.slice_at(0), None, ) }; let val = T::from(123); let copy_buf = vec![val; index + 1]; assert!(range_is_clean(arr.bitmap(), 0, arr.len() * size_of::())); arr.copy_from(copy_buf.as_slice()); assert!(range_is_dirty(arr.bitmap(), 0, size_of_val(buf))); } #[test] fn test_volatile_array_ref_dirty_tracking() { let val = 123u64; let dirty_len = size_of_val(&val); let index = 0x1000; let dirty_offset = dirty_len * index; let mut buf = vec![0u64; index + 1]; let mut byte_buf = vec![0u8; index + 1]; // Test `ref_at`. { let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), DEFAULT_PAGE_SIZE); let arr = unsafe { VolatileArrayRef::with_bitmap( buf.as_mut_ptr() as *mut u8, index + 1, bitmap.slice_at(0), None, ) }; assert!(range_is_clean(arr.bitmap(), 0, arr.len() * dirty_len)); arr.ref_at(index).store(val); assert!(range_is_dirty(arr.bitmap(), dirty_offset, dirty_len)); } // Test `store`. { let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), DEFAULT_PAGE_SIZE); let arr = unsafe { VolatileArrayRef::with_bitmap( buf.as_mut_ptr() as *mut u8, index + 1, bitmap.slice_at(0), None, ) }; let slice = arr.to_slice(); assert!(range_is_clean(slice.bitmap(), 0, slice.len())); arr.store(index, val); assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len)); } // Test `copy_from` when size_of::() == 1. test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, DEFAULT_PAGE_SIZE); // Test `copy_from` when size_of::() > 1. test_volatile_array_ref_copy_from_tracking(&mut buf, index, DEFAULT_PAGE_SIZE); } }