"]
description = "Cross-platform virtual memory API"
documentation = "https://docs.rs/region"
edition = "2018"
homepage = "https://github.com/darfink/region-rs"
keywords = ["region", "page", "lock", "protect", "maps"]
license = "MIT"
name = "region"
readme = "README.md"
repository = "https://github.com/darfink/region-rs"
version = "3.0.2"
[dependencies]
bitflags = "1.0"
libc = "0.2.153"
[target."cfg(any(target_os = \"macos\", target_os = \"ios\"))".dependencies]
mach2 = "0.4"
[target.'cfg(windows)'.dependencies.windows-sys]
version = "0.52.0"
features = [
"Win32_Foundation",
"Win32_System_Memory",
"Win32_System_SystemInformation",
"Win32_System_Diagnostics_Debug",
]
[target."cfg(unix)".dev-dependencies]
mmap = { package = "mmap-fixed", version = "0.1.6" }
region-3.0.2/LICENSE 0000644 0000000 0000000 00000002042 10461020230 0012114 0 ustar 0000000 0000000 Copyright (c) 2016 Elliott Linder
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
region-3.0.2/README.md 0000644 0000000 0000000 00000005250 10461020230 0012372 0 ustar 0000000 0000000
# `region-rs`
## Cross-platform virtual memory API
[![GitHub CI Status][github-shield]][github]
[![crates.io version][crate-shield]][crate]
[![Documentation][docs-shield]][docs]
[![License][license-shield]][license]
This crate provides a cross-platform Rust API for allocating, querying and
manipulating virtual memory. It is a thin abstraction, with the underlying
interaction implemented using platform specific APIs (e.g `VirtualQuery`,
`VirtualAlloc`, `VirtualLock`, `mprotect`, `mmap`, `mlock`).
## Platforms
This library is continuously tested against these targets:
- Linux
* `aarch64-linux-android`
* `armv7-unknown-linux-gnueabihf`
* `i686-unknown-linux-gnu`
* `mips-unknown-linux-gnu`
* `x86_64-unknown-linux-gnu`
* `x86_64-unknown-linux-musl`
- Windows
* `i686-pc-windows-gnu`
* `i686-pc-windows-msvc`
* `x86_64-pc-windows-gnu`
* `x86_64-pc-windows-msvc`
- macOS
* `x86_64-apple-darwin`
- NetBSD
* `x86_64-unknown-netbsd`
- FreeBSD
* `x86_64-unknown-freebsd`
- OpenBSD
* `x86_64-unknown-openbsd`
... and continuously checked against these targets:
- Illumos
* `x86_64-unknown-illumos`
Beyond the aformentioned target triplets, the library is also expected to work
against a multitude of omitted architectures.
## Installation
Add this to your `Cargo.toml`:
```toml
[dependencies]
region = "3.0.2"
```
## Example
- Cross-platform equivalents:
```rust
let data = [0xDE, 0xAD, 0xBE, 0xEF];
// Page size
let pz = region::page::size();
// VirtualQuery | '/proc/self/maps'
let q = region::query(data.as_ptr())?;
let qr = region::query_range(data.as_ptr(), data.len())?;
// VirtualAlloc | mmap
let alloc = region::alloc(100, Protection::READ_WRITE)?;
// VirtualProtect | mprotect
region::protect(data.as_ptr(), data.len(), Protection::READ_WRITE_EXECUTE)?;
// ... you can also temporarily change one or more pages' protection
let handle = region::protect_with_handle(data.as_ptr(), data.len(), Protection::READ_WRITE_EXECUTE)?;
// VirtualLock | mlock
let guard = region::lock(data.as_ptr(), data.len())?;
```
[github-shield]: https://img.shields.io/github/actions/workflow/status/darfink/region-rs/ci.yml?branch=master&label=actions&logo=github&style=for-the-badge
[github]: https://github.com/darfink/region-rs/actions/workflows/ci.yml?query=branch%3Amaster
[crate-shield]: https://img.shields.io/crates/v/region.svg?style=for-the-badge
[crate]: https://crates.io/crates/region
[docs-shield]: https://img.shields.io/badge/docs-crates-green.svg?style=for-the-badge
[docs]: https://docs.rs/region/
[license-shield]: https://img.shields.io/crates/l/region.svg?style=for-the-badge
[license]: https://github.com/darfink/region-rs
region-3.0.2/rustfmt.toml 0000644 0000000 0000000 00000000137 10461020230 0013513 0 ustar 0000000 0000000 reorder_imports = true
tab_spaces = 2
use_field_init_shorthand = true
use_try_shorthand = true
region-3.0.2/src/alloc.rs 0000644 0000000 0000000 00000016104 10461020230 0013342 0 ustar 0000000 0000000 use crate::{os, page, util, Error, Protection, Result};
/// A handle to an owned region of memory.
///
/// This handle does not dereference to a slice, since the underlying memory may
/// have been created with [`Protection::NONE`].
#[allow(clippy::len_without_is_empty)]
pub struct Allocation {
base: *const (),
size: usize,
}
impl Allocation {
/// Returns a pointer to the allocation's base address.
///
/// The address is always aligned to the operating system's page size.
#[inline(always)]
pub fn as_ptr(&self) -> *const T {
self.base.cast()
}
/// Returns a mutable pointer to the allocation's base address.
#[inline(always)]
pub fn as_mut_ptr(&mut self) -> *mut T {
self.base as *mut T
}
/// Returns two raw pointers spanning the allocation's address space.
///
/// The returned range is half-open, which means that the end pointer points
/// one past the last element of the allocation. This way, an empty allocation
/// is represented by two equal pointers, and the difference between the two
/// pointers represents the size of the allocation.
#[inline(always)]
pub fn as_ptr_range(&self) -> std::ops::Range<*const T> {
let range = self.as_range();
(range.start as *const T)..(range.end as *const T)
}
/// Returns two mutable raw pointers spanning the allocation's address space.
#[inline(always)]
pub fn as_mut_ptr_range(&mut self) -> std::ops::Range<*mut T> {
let range = self.as_range();
(range.start as *mut T)..(range.end as *mut T)
}
/// Returns a range spanning the allocation's address space.
#[inline(always)]
pub fn as_range(&self) -> std::ops::Range {
(self.base as usize)..(self.base as usize).saturating_add(self.size)
}
/// Returns the size of the allocation in bytes.
///
/// The size is always aligned to a multiple of the operating system's page
/// size.
#[inline(always)]
pub fn len(&self) -> usize {
self.size
}
}
impl Drop for Allocation {
#[inline]
fn drop(&mut self) {
let result = unsafe { os::free(self.base, self.size) };
debug_assert!(result.is_ok(), "freeing region: {:?}", result);
}
}
/// Allocates one or more pages of memory, with a defined protection.
///
/// This function provides a very simple interface for allocating anonymous
/// virtual pages. The allocation address will be decided by the operating
/// system.
///
/// # Parameters
///
/// - The size may not be zero.
/// - The size is rounded up to the closest page boundary.
///
/// # Errors
///
/// - If an interaction with the underlying operating system fails, an error
/// will be returned.
/// - If size is zero, [`Error::InvalidParameter`] will be returned.
///
/// # OS-Specific Behavior
///
/// On NetBSD pages will be allocated without PaX memory protection restrictions
/// (i.e. pages will be allowed to be modified to any combination of `RWX`).
///
/// # Examples
///
/// ```
/// # fn main() -> region::Result<()> {
/// # if cfg!(any(target_arch = "x86", target_arch = "x86_64"))
/// # && !cfg!(any(target_os = "openbsd", target_os = "netbsd")) {
/// use region::Protection;
/// let ret5 = [0xB8, 0x05, 0x00, 0x00, 0x00, 0xC3u8];
///
/// let memory = region::alloc(100, Protection::READ_WRITE_EXECUTE)?;
/// let slice = unsafe {
/// std::slice::from_raw_parts_mut(memory.as_ptr::() as *mut u8, memory.len())
/// };
///
/// slice[..6].copy_from_slice(&ret5);
/// let x: extern "C" fn() -> i32 = unsafe { std::mem::transmute(slice.as_ptr()) };
///
/// assert_eq!(x(), 5);
/// # }
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn alloc(size: usize, protection: Protection) -> Result {
if size == 0 {
return Err(Error::InvalidParameter("size"));
}
let size = page::ceil(size as *const ()) as usize;
unsafe {
let base = os::alloc(std::ptr::null::<()>(), size, protection)?;
Ok(Allocation { base, size })
}
}
/// Allocates one or more pages of memory, at a specific address, with a defined
/// protection.
///
/// The returned memory allocation is not guaranteed to reside at the provided
/// address. E.g. on Windows, new allocations that do not reside within already
/// reserved memory, are aligned to the operating system's allocation
/// granularity (most commonly 64KB).
///
/// # Implementation
///
/// This function is implemented using `VirtualAlloc` on Windows, and `mmap`
/// with `MAP_FIXED` on POSIX.
///
/// # Parameters
///
/// - The address is rounded down to the closest page boundary.
/// - The size may not be zero.
/// - The size is rounded up to the closest page boundary, relative to the
/// address.
///
/// # Errors
///
/// - If an interaction with the underlying operating system fails, an error
/// will be returned.
/// - If size is zero, [`Error::InvalidParameter`] will be returned.
#[inline]
pub fn alloc_at(address: *const T, size: usize, protection: Protection) -> Result {
let (address, size) = util::round_to_page_boundaries(address, size)?;
unsafe {
let base = os::alloc(address.cast(), size, protection)?;
Ok(Allocation { base, size })
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn alloc_size_is_aligned_to_page_size() -> Result<()> {
let memory = alloc(1, Protection::NONE)?;
assert_eq!(memory.len(), page::size());
Ok(())
}
#[test]
fn alloc_rejects_empty_allocation() {
assert!(matches!(
alloc(0, Protection::NONE),
Err(Error::InvalidParameter(_))
));
}
#[test]
fn alloc_obtains_correct_properties() -> Result<()> {
let memory = alloc(1, Protection::READ_WRITE)?;
let region = crate::query(memory.as_ptr::<()>())?;
assert_eq!(region.protection(), Protection::READ_WRITE);
assert!(region.len() >= memory.len());
assert!(!region.is_guarded());
assert!(!region.is_shared());
assert!(region.is_committed());
Ok(())
}
#[test]
fn alloc_frees_memory_when_dropped() -> Result<()> {
// Designing these tests can be quite tricky sometimes. When a page is
// allocated and then released, a subsequent `query` may allocate memory in
// the same location that has just been freed. For instance, NetBSD's
// kinfo_getvmmap uses `mmap` internally, which can lead to potentially
// confusing outcomes. To mitigate this, an additional buffer region is
// allocated to ensure that any memory allocated indirectly through `query`
// occupies a separate location in memory.
let (start, _buffer) = (
alloc(1, Protection::READ_WRITE)?,
alloc(1, Protection::READ_WRITE)?,
);
let base = start.as_ptr::<()>();
std::mem::drop(start);
let query = crate::query(base);
assert!(matches!(query, Err(Error::UnmappedRegion)));
Ok(())
}
#[test]
fn alloc_can_allocate_unused_region() -> Result<()> {
let base = alloc(1, Protection::NONE)?.as_ptr::<()>();
let memory = alloc_at(base, 1, Protection::READ_WRITE)?;
assert_eq!(memory.as_ptr(), base);
Ok(())
}
#[test]
#[cfg(not(any(target_os = "openbsd", target_os = "netbsd")))]
fn alloc_can_allocate_executable_region() -> Result<()> {
let memory = alloc(1, Protection::WRITE_EXECUTE)?;
assert_eq!(memory.len(), page::size());
Ok(())
}
}
region-3.0.2/src/error.rs 0000644 0000000 0000000 00000002517 10461020230 0013404 0 ustar 0000000 0000000 //! Error types and utilities.
use std::error::Error as StdError;
use std::{fmt, io};
/// The result type used by this library.
pub type Result = std::result::Result;
/// A collection of possible errors.
#[derive(Debug)]
pub enum Error {
/// The queried memory is unmapped.
///
/// This does not necessarily mean that the memory region is available for
/// allocation. Besides OS-specific semantics, queried addresses outside of a
/// process' adress range are also identified as unmapped regions.
UnmappedRegion,
/// A supplied parameter is invalid.
InvalidParameter(&'static str),
/// A procfs region could not be parsed.
ProcfsInput(String),
/// A system call failed.
SystemCall(io::Error),
/// A macOS kernel call failed
MachCall(libc::c_int),
}
impl fmt::Display for Error {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::UnmappedRegion => write!(f, "Queried memory is unmapped"),
Error::InvalidParameter(param) => write!(f, "Invalid parameter value: {}", param),
Error::ProcfsInput(ref input) => write!(f, "Invalid procfs input: {}", input),
Error::SystemCall(ref error) => write!(f, "System call failed: {}", error),
Error::MachCall(code) => write!(f, "macOS kernel call failed: {}", code),
}
}
}
impl StdError for Error {}
region-3.0.2/src/lib.rs 0000644 0000000 0000000 00000031723 10461020230 0013022 0 ustar 0000000 0000000 #![deny(
clippy::all,
clippy::missing_inline_in_public_items,
clippy::ptr_as_ptr,
clippy::print_stdout,
missing_docs,
nonstandard_style,
unused,
warnings
)]
// Temporarily allow these until bitflags deps is upgraded to 2.x
#![allow(clippy::bad_bit_mask)]
//! Cross-platform virtual memory API.
//!
//! This crate provides a cross-platform Rust API for querying and manipulating
//! virtual memory. It is a thin abstraction, with the underlying interaction
//! implemented using platform specific APIs (e.g `VirtualQuery`, `VirtualLock`,
//! `mprotect`, `mlock`). Albeit not all OS specific quirks are abstracted away;
//! for instance, some OSs enforce memory pages to be readable, whilst other may
//! prevent pages from becoming executable (i.e DEP).
//!
//! This implementation operates with memory pages, which are aligned to the
//! operating system's page size. On some systems, but not all, the system calls
//! for these operations require input to be aligned to a page boundary. To
//! remedy this inconsistency, whenever applicable, input is aligned to its
//! closest page boundary.
//!
//! *Note: a region is a collection of one or more pages laying consecutively in
//! memory, with the same properties.*
//!
//! # Parallelism
//!
//! The properties of virtual memory pages can change at any time, unless all
//! threads that are unaccounted for in a process are stopped. Therefore to
//! obtain, e.g., a true picture of a process' virtual memory, all other threads
//! must be halted. Otherwise, a region descriptor only represents a snapshot in
//! time.
//!
//! # Installation
//!
//! This crate is [on crates.io](https://crates.io/crates/region) and can be
//! used by adding `region` to your dependencies in your project's `Cargo.toml`.
//!
//! ```toml
//! [dependencies]
//! region = "3.0.2"
//! ```
//!
//! # Examples
//!
//! - Cross-platform equivalents.
//!
//! ```rust
//! # unsafe fn example() -> region::Result<()> {
//! # use region::Protection;
//! let data = [0xDE, 0xAD, 0xBE, 0xEF];
//!
//! // Page size
//! let pz = region::page::size();
//! let pc = region::page::ceil(data.as_ptr());
//! let pf = region::page::floor(data.as_ptr());
//!
//! // VirtualQuery | '/proc/self/maps'
//! let q = region::query(data.as_ptr())?;
//! let qr = region::query_range(data.as_ptr(), data.len())?;
//!
//! // VirtualAlloc | mmap
//! let alloc = region::alloc(100, Protection::READ_WRITE)?;
//!
//! // VirtualProtect | mprotect
//! region::protect(data.as_ptr(), data.len(), Protection::READ_WRITE_EXECUTE)?;
//!
//! // ... you can also temporarily change one or more pages' protection
//! let handle = region::protect_with_handle(data.as_ptr(), data.len(), Protection::READ_WRITE_EXECUTE)?;
//!
//! // VirtualLock | mlock
//! let guard = region::lock(data.as_ptr(), data.len())?;
//! # Ok(())
//! # }
//! ```
#[macro_use]
extern crate bitflags;
pub use alloc::{alloc, alloc_at, Allocation};
pub use error::{Error, Result};
pub use lock::{lock, unlock, LockGuard};
pub use protect::{protect, protect_with_handle, ProtectGuard};
pub use query::{query, query_range, QueryIter};
mod alloc;
mod error;
mod lock;
mod os;
pub mod page;
mod protect;
mod query;
mod util;
/// A descriptor for a mapped memory region.
///
/// The region encompasses zero or more pages (e.g. OpenBSD can have null-sized
/// virtual pages).
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Region {
/// Base address of the region
base: *const (),
/// Whether the region is reserved or not
reserved: bool,
/// Whether the region is guarded or not
guarded: bool,
/// Protection of the region
protection: Protection,
/// Maximum protection of the region
max_protection: Protection,
/// Whether the region is shared or not
shared: bool,
/// Size of the region (multiple of page size)
size: usize,
}
impl Region {
/// Returns a pointer to the region's base address.
///
/// The address is always aligned to the operating system's page size.
#[inline(always)]
pub fn as_ptr(&self) -> *const T {
self.base.cast()
}
/// Returns a mutable pointer to the region's base address.
#[inline(always)]
pub fn as_mut_ptr(&mut self) -> *mut T {
self.base as *mut T
}
/// Returns two raw pointers spanning the region's address space.
///
/// The returned range is half-open, which means that the end pointer points
/// one past the last element of the region. This way, an empty region is
/// represented by two equal pointers, and the difference between the two
/// pointers represents the size of the region.
#[inline(always)]
pub fn as_ptr_range(&self) -> std::ops::Range<*const T> {
let range = self.as_range();
(range.start as *const T)..(range.end as *const T)
}
/// Returns two mutable raw pointers spanning the region's address space.
#[inline(always)]
pub fn as_mut_ptr_range(&mut self) -> std::ops::Range<*mut T> {
let range = self.as_range();
(range.start as *mut T)..(range.end as *mut T)
}
/// Returns a range spanning the region's address space.
#[inline(always)]
pub fn as_range(&self) -> std::ops::Range {
(self.base as usize)..(self.base as usize).saturating_add(self.size)
}
/// Returns whether the region is committed or not.
///
/// This is always true for all operating system's, the exception being
/// `MEM_RESERVE` pages on Windows.
#[inline(always)]
pub fn is_committed(&self) -> bool {
!self.reserved
}
/// Returns whether the region is readable or not.
#[inline(always)]
pub fn is_readable(&self) -> bool {
self.protection & Protection::READ == Protection::READ
}
/// Returns whether the region is writable or not.
#[inline(always)]
pub fn is_writable(&self) -> bool {
self.protection & Protection::WRITE == Protection::WRITE
}
/// Returns whether the region is executable or not.
#[inline(always)]
pub fn is_executable(&self) -> bool {
self.protection & Protection::EXECUTE == Protection::EXECUTE
}
/// Returns whether the region is guarded or not.
#[inline(always)]
pub fn is_guarded(&self) -> bool {
self.guarded
}
/// Returns whether the region is shared between processes or not.
#[inline(always)]
pub fn is_shared(&self) -> bool {
self.shared
}
/// Returns the size of the region in bytes.
///
/// The size is always aligned to a multiple of the operating system's page
/// size.
#[inline(always)]
pub fn len(&self) -> usize {
self.size
}
/// Returns whether region is empty or not.
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.size == 0
}
/// Returns the protection attributes of the region.
#[inline(always)]
pub fn protection(&self) -> Protection {
self.protection
}
}
impl Default for Region {
#[inline]
fn default() -> Self {
Self {
base: std::ptr::null(),
reserved: false,
guarded: false,
protection: Protection::NONE,
max_protection: Protection::NONE,
shared: false,
size: 0,
}
}
}
unsafe impl Send for Region {}
unsafe impl Sync for Region {}
bitflags! {
/// A bitflag of zero or more protection attributes.
///
/// Determines the access rights for a specific page and/or region. Some
/// combination of flags may not be applicable, depending on the OS (e.g macOS
/// enforces executable pages to be readable, OpenBSD requires W^X).
///
/// # OS-Specific Behavior
///
/// On Unix `Protection::from_bits_unchecked` can be used to apply
/// non-standard flags (e.g. `PROT_BTI`).
///
/// # Examples
///
/// ```
/// use region::Protection;
///
/// let combine = Protection::READ | Protection::WRITE;
/// let shorthand = Protection::READ_WRITE;
/// ```
#[derive(Default)]
pub struct Protection: usize {
/// No access allowed at all.
const NONE = 0;
/// Read access; writing and/or executing data will panic.
const READ = (1 << 0);
/// Write access; this flag alone may not be supported on all OSs.
const WRITE = (1 << 1);
/// Execute access; this may not be allowed depending on DEP.
const EXECUTE = (1 << 2);
/// Read and execute shorthand.
const READ_EXECUTE = (Self::READ.bits | Self::EXECUTE.bits);
/// Read and write shorthand.
const READ_WRITE = (Self::READ.bits | Self::WRITE.bits);
/// Read, write and execute shorthand.
const READ_WRITE_EXECUTE = (Self::READ.bits | Self::WRITE.bits | Self::EXECUTE.bits);
/// Write and execute shorthand.
const WRITE_EXECUTE = (Self::WRITE.bits | Self::EXECUTE.bits);
}
}
impl std::fmt::Display for Protection {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
const MAPPINGS: &[(Protection, char)] = &[
(Protection::READ, 'r'),
(Protection::WRITE, 'w'),
(Protection::EXECUTE, 'x'),
];
for (flag, symbol) in MAPPINGS {
if self.contains(*flag) {
write!(f, "{}", symbol)?;
} else {
write!(f, "-")?;
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn protection_implements_display() {
assert_eq!(Protection::READ.to_string(), "r--");
assert_eq!(Protection::READ_WRITE.to_string(), "rw-");
assert_eq!(Protection::READ_WRITE_EXECUTE.to_string(), "rwx");
assert_eq!(Protection::WRITE.to_string(), "-w-");
}
#[cfg(unix)]
pub mod util {
use crate::{page, Protection};
use mmap::{MapOption, MemoryMap};
use std::ops::Deref;
struct AllocatedPages(Vec);
impl Deref for AllocatedPages {
type Target = [u8];
fn deref(&self) -> &Self::Target {
unsafe { std::slice::from_raw_parts(self.0[0].data().cast(), self.0.len() * page::size()) }
}
}
#[allow(clippy::fallible_impl_from)]
impl From for &'static [MapOption] {
fn from(protection: Protection) -> Self {
match protection {
Protection::NONE => &[],
Protection::READ => &[MapOption::MapReadable],
Protection::READ_WRITE => &[MapOption::MapReadable, MapOption::MapWritable],
Protection::READ_EXECUTE => &[MapOption::MapReadable, MapOption::MapExecutable],
_ => panic!("Unsupported protection {:?}", protection),
}
}
}
/// Allocates one or more sequential pages for each protection flag.
pub fn alloc_pages(pages: &[Protection]) -> impl Deref {
// Find a region that fits all pages
let region = MemoryMap::new(page::size() * pages.len(), &[]).expect("allocating pages");
let mut page_address = region.data();
// Drop the region to ensure it's free
std::mem::forget(region);
// Allocate one page at a time, with explicit page permissions. This would
// normally introduce a race condition, but since only one thread is used
// during testing, it ensures each page remains available (in general,
// only one thread should ever be active when querying and/or manipulating
// memory regions).
let allocated_pages = pages
.iter()
.map(|protection| {
let mut options = vec![MapOption::MapAddr(page_address)];
options.extend_from_slice(Into::into(*protection));
let map = MemoryMap::new(page::size(), &options).expect("allocating page");
assert_eq!(map.data(), page_address);
assert_eq!(map.len(), page::size());
page_address = (page_address as usize + page::size()) as *mut _;
map
})
.collect::>();
AllocatedPages(allocated_pages)
}
}
#[cfg(windows)]
pub mod util {
use crate::{page, Protection};
use std::ops::Deref;
use windows_sys::Win32::System::Memory::{
VirtualAlloc, VirtualFree, MEM_COMMIT, MEM_RELEASE, MEM_RESERVE, PAGE_NOACCESS,
};
struct AllocatedPages(*const (), usize);
impl Deref for AllocatedPages {
type Target = [u8];
fn deref(&self) -> &Self::Target {
unsafe { std::slice::from_raw_parts(self.0 as *const _, self.1) }
}
}
impl Drop for AllocatedPages {
fn drop(&mut self) {
unsafe {
assert_ne!(VirtualFree(self.0 as *mut _, 0, MEM_RELEASE), 0);
}
}
}
/// Allocates one or more sequential pages for each protection flag.
pub fn alloc_pages(pages: &[Protection]) -> impl Deref {
// Reserve enough memory to fit each page
let total_size = page::size() * pages.len();
let allocation_base =
unsafe { VirtualAlloc(std::ptr::null_mut(), total_size, MEM_RESERVE, PAGE_NOACCESS) };
assert_ne!(allocation_base, std::ptr::null_mut());
let mut page_address = allocation_base;
// Commit one page at a time with the expected permissions
for protection in pages {
let address = unsafe {
VirtualAlloc(
page_address,
page::size(),
MEM_COMMIT,
protection.to_native(),
)
};
assert_eq!(address, page_address);
page_address = (address as usize + page::size()) as *mut _;
}
AllocatedPages(allocation_base as *const _, total_size)
}
}
}
region-3.0.2/src/lock.rs 0000644 0000000 0000000 00000006136 10461020230 0013204 0 ustar 0000000 0000000 use crate::{os, util, Result};
/// Locks one or more memory regions to RAM.
///
/// The memory pages within the address range is guaranteed to stay in RAM
/// except for specials cases, such as hibernation and memory starvation. It
/// returns a [`LockGuard`], which [`unlock`]s the affected regions once
/// dropped.
///
/// # Parameters
///
/// - The range is `[address, address + size)`
/// - The address is rounded down to the closest page boundary.
/// - The size may not be zero.
/// - The size is rounded up to the closest page boundary, relative to the
/// address.
///
/// # Errors
///
/// - If an interaction with the underlying operating system fails, an error
/// will be returned.
/// - If size is zero,
/// [`Error::InvalidParameter`](crate::Error::InvalidParameter) will be
/// returned.
///
/// # Examples
///
/// ```
/// # fn main() -> region::Result<()> {
/// let data = [0; 100];
/// let _guard = region::lock(data.as_ptr(), data.len())?;
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn lock(address: *const T, size: usize) -> Result {
let (address, size) = util::round_to_page_boundaries(address, size)?;
os::lock(address.cast(), size).map(|_| LockGuard::new(address, size))
}
/// Unlocks one or more memory regions from RAM.
///
/// If possible, prefer to use [`lock`] combined with the [`LockGuard`].
///
/// # Parameters
///
/// - The range is `[address, address + size)`
/// - The address is rounded down to the closest page boundary.
/// - The size may not be zero.
/// - The size is rounded up to the closest page boundary, relative to the
/// address.
///
/// # Errors
///
/// - If an interaction with the underlying operating system fails, an error
/// will be returned.
/// - If size is zero,
/// [`Error::InvalidParameter`](crate::Error::InvalidParameter) will be
/// returned.
#[inline]
pub fn unlock(address: *const T, size: usize) -> Result<()> {
let (address, size) = util::round_to_page_boundaries(address, size)?;
os::unlock(address.cast(), size)
}
/// A RAII implementation of a scoped lock.
///
/// When this structure is dropped (falls out of scope), the virtual lock will be
/// released.
#[must_use]
pub struct LockGuard {
address: *const (),
size: usize,
}
impl LockGuard {
#[inline(always)]
fn new(address: *const T, size: usize) -> Self {
Self {
address: address.cast(),
size,
}
}
}
impl Drop for LockGuard {
#[inline]
fn drop(&mut self) {
let result = os::unlock(self.address, self.size);
debug_assert!(result.is_ok(), "unlocking region: {:?}", result);
}
}
unsafe impl Send for LockGuard {}
unsafe impl Sync for LockGuard {}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::util::alloc_pages;
use crate::{page, Protection};
#[test]
fn lock_mapped_pages_succeeds() -> Result<()> {
let map = alloc_pages(&[Protection::READ_WRITE]);
let _guard = lock(map.as_ptr(), page::size())?;
Ok(())
}
#[test]
fn unlock_mapped_pages_succeeds() -> Result<()> {
let map = alloc_pages(&[Protection::READ_WRITE]);
std::mem::forget(lock(map.as_ptr(), page::size())?);
unlock(map.as_ptr(), page::size())
}
}
region-3.0.2/src/os/freebsd.rs 0000644 0000000 0000000 00000005113 10461020230 0014301 0 ustar 0000000 0000000 use crate::{Error, Protection, Region, Result};
use libc::{
c_int, c_void, free, getpid, kinfo_getvmmap, kinfo_vmentry, KVME_PROT_EXEC, KVME_PROT_READ,
KVME_PROT_WRITE, KVME_TYPE_DEFAULT,
};
use std::io;
pub struct QueryIter {
vmmap: *mut kinfo_vmentry,
vmmap_len: usize,
vmmap_index: usize,
upper_bound: usize,
}
impl QueryIter {
pub fn new(origin: *const (), size: usize) -> Result {
let mut vmmap_len = 0;
let vmmap = unsafe { kinfo_getvmmap(getpid(), &mut vmmap_len) };
if vmmap.is_null() {
return Err(Error::SystemCall(io::Error::last_os_error()));
}
Ok(QueryIter {
vmmap,
vmmap_len: vmmap_len as usize,
vmmap_index: 0,
upper_bound: (origin as usize).saturating_add(size),
})
}
pub fn upper_bound(&self) -> usize {
self.upper_bound
}
}
impl Iterator for QueryIter {
type Item = Result;
fn next(&mut self) -> Option {
if self.vmmap_index >= self.vmmap_len {
return None;
}
// Since the struct size is given in the struct, it can be used future-proof
// (the definition is not required to be updated when new fields are added).
let offset = unsafe { self.vmmap_index * (*self.vmmap).kve_structsize as usize };
let entry = unsafe { &*((self.vmmap as *const c_void).add(offset) as *const kinfo_vmentry) };
self.vmmap_index += 1;
Some(Ok(Region {
base: entry.kve_start as *const _,
protection: Protection::from_native(entry.kve_protection),
shared: entry.kve_type == KVME_TYPE_DEFAULT,
size: (entry.kve_end - entry.kve_start) as _,
..Default::default()
}))
}
}
impl Drop for QueryIter {
fn drop(&mut self) {
unsafe { free(self.vmmap as *mut c_void) }
}
}
impl Protection {
fn from_native(protection: c_int) -> Self {
const MAPPINGS: &[(c_int, Protection)] = &[
(KVME_PROT_READ, Protection::READ),
(KVME_PROT_WRITE, Protection::WRITE),
(KVME_PROT_EXEC, Protection::EXECUTE),
];
MAPPINGS
.iter()
.filter(|(flag, _)| protection & *flag == *flag)
.fold(Protection::NONE, |acc, (_, prot)| acc | *prot)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn protection_flags_are_mapped_from_native() {
let rw = KVME_PROT_READ | KVME_PROT_WRITE;
let rwx = rw | KVME_PROT_EXEC;
assert_eq!(Protection::from_native(0), Protection::NONE);
assert_eq!(Protection::from_native(KVME_PROT_READ), Protection::READ);
assert_eq!(Protection::from_native(rw), Protection::READ_WRITE);
assert_eq!(Protection::from_native(rwx), Protection::READ_WRITE_EXECUTE);
}
}
region-3.0.2/src/os/illumos.rs 0000644 0000000 0000000 00000006057 10461020230 0014363 0 ustar 0000000 0000000 use crate::{Error, Protection, Region, Result};
use std::fs::File;
use std::io::Read;
pub struct QueryIter {
vmmap: Vec,
vmmap_index: usize,
upper_bound: usize,
}
impl QueryIter {
pub fn new(origin: *const (), size: usize) -> Result {
// Do not use a buffered reader here to avoid multiple read(2) calls to the
// proc file, ensuring a consistent snapshot of the virtual memory.
let mut file = File::open("/proc/self/map").map_err(Error::SystemCall)?;
let mut vmmap: Vec = Vec::with_capacity(8 * PRMAP_SIZE);
let bytes_read = file.read_to_end(&mut vmmap).map_err(Error::SystemCall)?;
if bytes_read % PRMAP_SIZE != 0 {
return Err(Error::ProcfsInput(format!(
"file size {} is not a multiple of prmap_t size ({})",
bytes_read, PRMAP_SIZE
)));
}
Ok(QueryIter {
vmmap,
vmmap_index: 0,
upper_bound: (origin as usize).saturating_add(size),
})
}
pub fn upper_bound(&self) -> usize {
self.upper_bound
}
}
impl Iterator for QueryIter {
type Item = Result;
fn next(&mut self) -> Option {
let (pfx, maps, sfx) = unsafe { self.vmmap.align_to::() };
if !pfx.is_empty() || !sfx.is_empty() {
panic!(
"data was not aligned ({}; {}/{}/{})?",
self.vmmap.len(),
pfx.len(),
maps.len(),
sfx.len()
);
}
let map = maps.get(self.vmmap_index)?;
self.vmmap_index += 1;
Some(Ok(Region {
base: map.pr_vaddr,
protection: Protection::from_native(map.pr_mflags),
shared: map.pr_mflags & MA_SHARED != 0,
size: map.pr_size,
..Default::default()
}))
}
}
impl Protection {
fn from_native(protection: i32) -> Self {
const MAPPINGS: &[(i32, Protection)] = &[
(MA_READ, Protection::READ),
(MA_WRITE, Protection::WRITE),
(MA_EXEC, Protection::EXECUTE),
];
MAPPINGS
.iter()
.filter(|(flag, _)| protection & *flag == *flag)
.fold(Protection::NONE, |acc, (_, prot)| acc | *prot)
}
}
// As per proc(4), the file `/proc/$PID/map` contains an array of C structs of
// type `prmap_t`. The layout of this struct, and thus this file, is a stable
// interface.
#[repr(C)]
struct PrMap {
pr_vaddr: *const (),
pr_size: usize,
pr_mapname: [i8; 64],
pr_offset: isize,
pr_mflags: i32,
pr_pagesize: i32,
pr_shmid: i32,
_pr_filler: [i32; 1],
}
const PRMAP_SIZE: usize = std::mem::size_of::();
// These come from , describing bits in the pr_mflags member:
const MA_EXEC: i32 = 0x1;
const MA_WRITE: i32 = 0x2;
const MA_READ: i32 = 0x4;
const MA_SHARED: i32 = 0x8;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn protection_flags_are_mapped_from_native() {
let rw = MA_READ | MA_WRITE;
let rwx = rw | MA_EXEC;
assert_eq!(Protection::from_native(0), Protection::NONE);
assert_eq!(Protection::from_native(MA_READ), Protection::READ);
assert_eq!(Protection::from_native(rw), Protection::READ_WRITE);
assert_eq!(Protection::from_native(rwx), Protection::READ_WRITE_EXECUTE);
}
}
region-3.0.2/src/os/linux.rs 0000644 0000000 0000000 00000005567 10461020230 0014043 0 ustar 0000000 0000000 use crate::{Error, Protection, Region, Result};
use std::fs;
pub struct QueryIter {
proc_maps: String,
upper_bound: usize,
offset: usize,
}
impl QueryIter {
pub fn new(origin: *const (), size: usize) -> Result {
// Do not use a buffered reader here to avoid multiple read(2) calls to the
// proc file, ensuring a consistent snapshot of the virtual memory.
let proc_maps = fs::read_to_string("/proc/self/maps").map_err(Error::SystemCall)?;
Ok(Self {
proc_maps,
upper_bound: (origin as usize).saturating_add(size),
offset: 0,
})
}
pub fn upper_bound(&self) -> usize {
self.upper_bound
}
}
impl Iterator for QueryIter {
type Item = Result;
fn next(&mut self) -> Option {
let (line, _) = self.proc_maps.get(self.offset..)?.split_once('\n')?;
self.offset += line.len() + 1;
Some(parse_procfs_line(line).ok_or_else(|| Error::ProcfsInput(line.to_string())))
}
}
/// Parses flags from /proc/[pid]/maps (e.g 'r--p').
fn parse_procfs_flags(protection: &str) -> (Protection, bool) {
const MAPPINGS: &[Protection] = &[Protection::READ, Protection::WRITE, Protection::EXECUTE];
let result = protection
.chars()
.zip(MAPPINGS.iter())
.filter(|(c, _)| *c != '-')
.fold(Protection::NONE, |acc, (_, prot)| acc | *prot);
(result, protection.ends_with('s'))
}
/// Parses a line from /proc/[pid]/maps.
fn parse_procfs_line(input: &str) -> Option {
let mut parts = input.split_whitespace();
let mut memory = parts
.next()?
.split('-')
.filter_map(|value| usize::from_str_radix(value, 16).ok());
let (lower, upper) = (memory.next()?, memory.next()?);
let flags = parts.next()?;
let (protection, shared) = parse_procfs_flags(flags);
Some(Region {
base: lower as *const _,
protection,
shared,
size: upper - lower,
..Region::default()
})
}
#[cfg(test)]
mod tests {
use super::{parse_procfs_flags, parse_procfs_line};
use crate::Protection;
#[test]
fn procfs_flags_are_parsed() {
let rwx = Protection::READ_WRITE_EXECUTE;
assert_eq!(parse_procfs_flags("r--s"), (Protection::READ, true));
assert_eq!(parse_procfs_flags("rw-p"), (Protection::READ_WRITE, false));
assert_eq!(parse_procfs_flags("r-xs"), (Protection::READ_EXECUTE, true));
assert_eq!(parse_procfs_flags("rwxs"), (rwx, true));
assert_eq!(parse_procfs_flags("--xp"), (Protection::EXECUTE, false));
assert_eq!(parse_procfs_flags("-w-s"), (Protection::WRITE, true));
}
#[test]
fn procfs_regions_are_parsed() {
let line = "00400000-00409000 r-xs 00000000 08:00 16088 /usr/bin/head";
let region = parse_procfs_line(line).unwrap();
assert_eq!(region.as_ptr(), 0x40_0000 as *mut ());
assert_eq!(region.protection(), Protection::READ_EXECUTE);
assert_eq!(region.len(), 0x9000);
assert!(!region.is_guarded());
assert!(region.is_shared());
}
}
region-3.0.2/src/os/macos.rs 0000644 0000000 0000000 00000006325 10461020230 0013777 0 ustar 0000000 0000000 use crate::{Error, Protection, Region, Result};
use mach2::vm_prot::*;
pub struct QueryIter {
region_address: mach2::vm_types::mach_vm_address_t,
upper_bound: usize,
}
impl QueryIter {
pub fn new(origin: *const (), size: usize) -> Result {
Ok(QueryIter {
region_address: origin as _,
upper_bound: (origin as usize).saturating_add(size),
})
}
pub fn upper_bound(&self) -> usize {
self.upper_bound
}
}
impl Iterator for QueryIter {
type Item = Result;
fn next(&mut self) -> Option {
// The possible memory share modes
const SHARE_MODES: [u8; 3] = [
mach2::vm_region::SM_SHARED,
mach2::vm_region::SM_TRUESHARED,
mach2::vm_region::SM_SHARED_ALIASED,
];
// Check if the search area has been passed
if self.region_address as usize >= self.upper_bound {
return None;
}
let mut region_size: mach2::vm_types::mach_vm_size_t = 0;
let mut info: mach2::vm_region::vm_region_submap_info_64 =
mach2::vm_region::vm_region_submap_info_64::default();
let mut depth = u32::MAX;
let result = unsafe {
mach2::vm::mach_vm_region_recurse(
mach2::traps::mach_task_self(),
&mut self.region_address,
&mut region_size,
&mut depth,
(&mut info as *mut _) as mach2::vm_region::vm_region_recurse_info_t,
&mut mach2::vm_region::vm_region_submap_info_64::count(),
)
};
match result {
// The end of the process' address space has been reached
mach2::kern_return::KERN_INVALID_ADDRESS => None,
mach2::kern_return::KERN_SUCCESS => {
// The returned region may have a different address than the request
if self.region_address as usize >= self.upper_bound {
return None;
}
let region = Region {
base: self.region_address as *const _,
guarded: (info.user_tag == mach2::vm_statistics::VM_MEMORY_GUARD),
protection: Protection::from_native(info.protection),
max_protection: Protection::from_native(info.max_protection),
shared: SHARE_MODES.contains(&info.share_mode),
size: region_size as usize,
..Default::default()
};
self.region_address = self.region_address.saturating_add(region_size);
Some(Ok(region))
}
_ => Some(Err(Error::MachCall(result))),
}
}
}
impl Protection {
fn from_native(protection: vm_prot_t) -> Self {
const MAPPINGS: &[(vm_prot_t, Protection)] = &[
(VM_PROT_READ, Protection::READ),
(VM_PROT_WRITE, Protection::WRITE),
(VM_PROT_EXECUTE, Protection::EXECUTE),
];
MAPPINGS
.iter()
.filter(|(flag, _)| protection & *flag == *flag)
.fold(Protection::NONE, |acc, (_, prot)| acc | *prot)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn protection_flags_are_mapped_from_native() {
let rw = VM_PROT_READ | VM_PROT_WRITE;
let rwx = rw | VM_PROT_EXECUTE;
assert_eq!(Protection::from_native(0), Protection::NONE);
assert_eq!(Protection::from_native(VM_PROT_READ), Protection::READ);
assert_eq!(Protection::from_native(rw), Protection::READ_WRITE);
assert_eq!(Protection::from_native(rwx), Protection::READ_WRITE_EXECUTE);
}
}
region-3.0.2/src/os/mod.rs 0000644 0000000 0000000 00000001467 10461020230 0013456 0 ustar 0000000 0000000 #[cfg(windows)]
mod windows;
#[cfg(windows)]
pub use self::windows::*;
#[cfg(unix)]
mod unix;
#[cfg(unix)]
pub use self::unix::*;
#[cfg(any(target_os = "macos", target_os = "ios"))]
mod macos;
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub use self::macos::*;
#[cfg(any(target_os = "linux", target_os = "android"))]
mod linux;
#[cfg(any(target_os = "linux", target_os = "android"))]
pub use self::linux::*;
#[cfg(target_os = "freebsd")]
mod freebsd;
#[cfg(target_os = "freebsd")]
pub use self::freebsd::*;
#[cfg(target_os = "illumos")]
mod illumos;
#[cfg(target_os = "illumos")]
pub use self::illumos::*;
#[cfg(target_os = "openbsd")]
mod openbsd;
#[cfg(target_os = "openbsd")]
pub use self::openbsd::*;
#[cfg(target_os = "netbsd")]
mod netbsd;
#[cfg(target_os = "netbsd")]
pub use self::netbsd::*;
region-3.0.2/src/os/netbsd.rs 0000644 0000000 0000000 00000006366 10461020230 0014161 0 ustar 0000000 0000000 use crate::{Error, Protection, Region, Result};
use libc::{c_char, c_int, c_void, free, getpid, pid_t};
use std::io;
pub struct QueryIter {
vmmap: *mut kinfo_vmentry,
vmmap_len: usize,
vmmap_index: usize,
upper_bound: usize,
}
impl QueryIter {
pub fn new(origin: *const (), size: usize) -> Result {
let mut vmmap_len = 0;
let vmmap = unsafe { kinfo_getvmmap(getpid(), &mut vmmap_len) };
if vmmap.is_null() {
return Err(Error::SystemCall(io::Error::last_os_error()));
}
Ok(QueryIter {
vmmap,
vmmap_len: vmmap_len as usize,
vmmap_index: 0,
upper_bound: (origin as usize).saturating_add(size),
})
}
pub fn upper_bound(&self) -> usize {
self.upper_bound
}
}
impl Iterator for QueryIter {
type Item = Result;
fn next(&mut self) -> Option {
if self.vmmap_index >= self.vmmap_len {
return None;
}
let offset = self.vmmap_index * std::mem::size_of::();
let entry = unsafe { &*((self.vmmap as *const c_void).add(offset) as *const kinfo_vmentry) };
self.vmmap_index += 1;
Some(Ok(Region {
base: entry.kve_start as *const _,
protection: Protection::from_native(entry.kve_protection as i32),
max_protection: Protection::from_native(entry.kve_max_protection as i32),
shared: (entry.kve_flags & KVME_FLAG_COW as u32) == 0,
size: (entry.kve_end - entry.kve_start) as _,
..Default::default()
}))
}
}
impl Drop for QueryIter {
fn drop(&mut self) {
unsafe { free(self.vmmap as *mut c_void) }
}
}
impl Protection {
fn from_native(protection: c_int) -> Self {
const MAPPINGS: &[(c_int, Protection)] = &[
(KVME_PROT_READ, Protection::READ),
(KVME_PROT_WRITE, Protection::WRITE),
(KVME_PROT_EXEC, Protection::EXECUTE),
];
MAPPINGS
.iter()
.filter(|(flag, _)| protection & *flag == *flag)
.fold(Protection::NONE, |acc, (_, prot)| acc | *prot)
}
}
// These defintions come from , describing data returned by the
// `kinfo_getvmmap` system call.
#[repr(C)]
struct kinfo_vmentry {
kve_start: u64,
kve_end: u64,
kve_offset: u64,
kve_type: u32,
kve_flags: u32,
kve_count: u32,
kve_wired_count: u32,
kve_advice: u32,
kve_attributes: u32,
kve_protection: u32,
kve_max_protection: u32,
kve_ref_count: u32,
kve_inheritance: u32,
kve_vn_fileid: u64,
kve_vn_size: u64,
kve_vn_fsid: u64,
kve_vn_rdev: u64,
kve_vn_type: u32,
kve_vn_mode: u32,
kve_path: [[c_char; 32]; 32],
}
const KVME_FLAG_COW: c_int = 0x00000001;
const KVME_PROT_READ: c_int = 0x00000001;
const KVME_PROT_WRITE: c_int = 0x00000002;
const KVME_PROT_EXEC: c_int = 0x00000004;
#[link(name = "util")]
extern "C" {
fn kinfo_getvmmap(pid: pid_t, cntp: *mut c_int) -> *mut kinfo_vmentry;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn protection_flags_are_mapped_from_native() {
let rw = KVME_PROT_READ | KVME_PROT_WRITE;
let rwx = rw | KVME_PROT_EXEC;
assert_eq!(Protection::from_native(0), Protection::NONE);
assert_eq!(Protection::from_native(KVME_PROT_READ), Protection::READ);
assert_eq!(Protection::from_native(rw), Protection::READ_WRITE);
assert_eq!(Protection::from_native(rwx), Protection::READ_WRITE_EXECUTE);
}
}
region-3.0.2/src/os/openbsd.rs 0000644 0000000 0000000 00000007312 10461020230 0014324 0 ustar 0000000 0000000 use crate::{Error, Protection, Region, Result};
use libc::{c_int, c_uint, c_ulong, getpid, sysctl, CTL_KERN, KERN_PROC_VMMAP};
use std::io;
pub struct QueryIter {
mib: [c_int; 3],
vmentry: kinfo_vmentry,
previous_boundary: usize,
upper_bound: usize,
}
impl QueryIter {
pub fn new(origin: *const (), size: usize) -> Result {
Ok(QueryIter {
mib: [CTL_KERN, KERN_PROC_VMMAP, unsafe { getpid() }],
vmentry: unsafe { std::mem::zeroed() },
upper_bound: (origin as usize).saturating_add(size),
previous_boundary: 0,
})
}
pub fn upper_bound(&self) -> usize {
self.upper_bound
}
}
impl Iterator for QueryIter {
type Item = Result;
fn next(&mut self) -> Option {
let mut len = std::mem::size_of::();
// Although it would be preferred to query the information for all virtual
// pages at once, the system call does not seem to respond consistently. If
// called once during a process' lifetime, it returns all pages, but if
// called again, it returns an empty buffer. This may be caused due to an
// oversight, but in this case, the solution is to query one memory region
// at a time.
let result = unsafe {
sysctl(
self.mib.as_ptr(),
self.mib.len() as c_uint,
&mut self.vmentry as *mut _ as *mut _,
&mut len,
std::ptr::null_mut(),
0,
)
};
if result == -1 {
return Some(Err(Error::SystemCall(io::Error::last_os_error())));
}
if len == 0 || self.vmentry.kve_end as usize == self.previous_boundary {
return None;
}
let region = Region {
base: self.vmentry.kve_start as *const _,
protection: Protection::from_native(self.vmentry.kve_protection),
max_protection: Protection::from_native(self.vmentry.kve_max_protection),
shared: (self.vmentry.kve_etype & KVE_ET_COPYONWRITE) == 0,
size: (self.vmentry.kve_end - self.vmentry.kve_start) as _,
..Default::default()
};
// Since OpenBSD returns the first region whose base address is at, or after
// `kve_start`, the address can simply be incremented by one to retrieve the
// next region.
self.vmentry.kve_start += 1;
self.previous_boundary = self.vmentry.kve_end as usize;
Some(Ok(region))
}
}
impl Protection {
fn from_native(protection: c_int) -> Self {
const MAPPINGS: &[(c_int, Protection)] = &[
(KVE_PROT_READ, Protection::READ),
(KVE_PROT_WRITE, Protection::WRITE),
(KVE_PROT_EXEC, Protection::EXECUTE),
];
MAPPINGS
.iter()
.filter(|(flag, _)| protection & *flag == *flag)
.fold(Protection::NONE, |acc, (_, prot)| acc | *prot)
}
}
// These defintions come from , describing data returned by the
// `KERN_PROC_VMMAP` system call.
#[repr(C)]
struct kinfo_vmentry {
kve_start: c_ulong,
kve_end: c_ulong,
kve_guard: c_ulong,
kve_fspace: c_ulong,
kve_fspace_augment: c_ulong,
kve_offset: u64,
kve_wired_count: c_int,
kve_etype: c_int,
kve_protection: c_int,
kve_max_protection: c_int,
kve_advice: c_int,
kve_inheritance: c_int,
kve_flags: u8,
}
const KVE_PROT_READ: c_int = 1;
const KVE_PROT_WRITE: c_int = 2;
const KVE_PROT_EXEC: c_int = 4;
const KVE_ET_COPYONWRITE: c_int = 4;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn protection_flags_are_mapped_from_native() {
let rw = KVE_PROT_READ | KVE_PROT_WRITE;
let rwx = rw | KVE_PROT_EXEC;
assert_eq!(Protection::from_native(0), Protection::NONE);
assert_eq!(Protection::from_native(KVE_PROT_READ), Protection::READ);
assert_eq!(Protection::from_native(rw), Protection::READ_WRITE);
assert_eq!(Protection::from_native(rwx), Protection::READ_WRITE_EXECUTE);
}
}
region-3.0.2/src/os/unix.rs 0000644 0000000 0000000 00000006175 10461020230 0013663 0 ustar 0000000 0000000 use crate::{Error, Protection, Result};
use libc::{MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE};
use libc::{PROT_EXEC, PROT_READ, PROT_WRITE};
use std::io;
pub fn page_size() -> usize {
unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
}
pub unsafe fn alloc(base: *const (), size: usize, protection: Protection) -> Result<*const ()> {
let mut native_prot = protection.to_native();
// This adjustment ensures that the behavior of memory allocation is
// orthogonal across all platforms by aligning NetBSD's protection flags and
// PaX behavior with those of other operating systems.
if cfg!(target_os = "netbsd") {
let max_protection = (PROT_READ | PROT_WRITE | PROT_EXEC) << 3;
native_prot |= max_protection;
}
let mut flags = MAP_PRIVATE | MAP_ANON;
if !base.is_null() {
flags |= MAP_FIXED;
}
#[cfg(all(target_vendor = "apple", target_arch = "aarch64"))]
if matches!(
protection,
Protection::WRITE_EXECUTE | Protection::READ_WRITE_EXECUTE
) {
// On hardened context, MAP_JIT is necessary (on arm64) to allow W/X'ed regions.
flags |= libc::MAP_JIT;
}
match libc::mmap(base as *mut _, size, native_prot, flags, -1, 0) {
MAP_FAILED => Err(Error::SystemCall(io::Error::last_os_error())),
address => Ok(address as *const ()),
}
}
pub unsafe fn free(base: *const (), size: usize) -> Result<()> {
match libc::munmap(base as *mut _, size) {
0 => Ok(()),
_ => Err(Error::SystemCall(io::Error::last_os_error())),
}
}
pub unsafe fn protect(base: *const (), size: usize, protection: Protection) -> Result<()> {
match libc::mprotect(base as *mut _, size, protection.to_native()) {
0 => Ok(()),
_ => Err(Error::SystemCall(io::Error::last_os_error())),
}
}
pub fn lock(base: *const (), size: usize) -> Result<()> {
match unsafe { libc::mlock(base.cast(), size) } {
0 => Ok(()),
_ => Err(Error::SystemCall(io::Error::last_os_error())),
}
}
pub fn unlock(base: *const (), size: usize) -> Result<()> {
match unsafe { libc::munlock(base.cast(), size) } {
0 => Ok(()),
_ => Err(Error::SystemCall(io::Error::last_os_error())),
}
}
impl Protection {
fn to_native(self) -> libc::c_int {
// This is directly mapped to its native counterpart to allow users to
// include non-standard flags with `Protection::from_bits_unchecked`.
self.bits as libc::c_int
}
}
#[cfg(test)]
mod tests {
use super::*;
use libc::PROT_NONE;
#[test]
fn protection_flags_match_unix_constants() {
assert_eq!(Protection::NONE.bits, PROT_NONE as usize);
assert_eq!(Protection::READ.bits, PROT_READ as usize);
assert_eq!(Protection::WRITE.bits, PROT_WRITE as usize);
assert_eq!(
Protection::READ_WRITE_EXECUTE,
Protection::from_bits_truncate((PROT_READ | PROT_WRITE | PROT_EXEC) as usize)
);
}
#[test]
fn protection_flags_are_mapped_to_native() {
let rwx = PROT_READ | PROT_WRITE | PROT_EXEC;
assert_eq!(Protection::NONE.to_native(), 0);
assert_eq!(Protection::READ.to_native(), PROT_READ);
assert_eq!(Protection::READ_WRITE.to_native(), PROT_READ | PROT_WRITE);
assert_eq!(Protection::READ_WRITE_EXECUTE.to_native(), rwx);
}
}
region-3.0.2/src/os/windows.rs 0000644 0000000 0000000 00000012115 10461020230 0014361 0 ustar 0000000 0000000 use crate::{Error, Protection, Region, Result};
use std::cmp::{max, min};
use std::ffi::c_void;
use std::io;
use std::mem::{size_of, MaybeUninit};
use std::sync::Once;
use windows_sys::Win32::System::Memory::{
VirtualAlloc, VirtualFree, VirtualLock, VirtualProtect, VirtualQuery, VirtualUnlock,
MEMORY_BASIC_INFORMATION, MEM_COMMIT, MEM_PRIVATE, MEM_RELEASE, MEM_RESERVE, PAGE_EXECUTE,
PAGE_EXECUTE_READ, PAGE_EXECUTE_READWRITE, PAGE_EXECUTE_WRITECOPY, PAGE_GUARD, PAGE_NOACCESS,
PAGE_NOCACHE, PAGE_READONLY, PAGE_READWRITE, PAGE_WRITECOMBINE, PAGE_WRITECOPY,
};
use windows_sys::Win32::System::SystemInformation::{GetNativeSystemInfo, SYSTEM_INFO};
pub struct QueryIter {
region_address: usize,
upper_bound: usize,
}
impl QueryIter {
pub fn new(origin: *const (), size: usize) -> Result {
let system = system_info();
Ok(QueryIter {
region_address: max(origin as usize, system.lpMinimumApplicationAddress as usize),
upper_bound: min(
(origin as usize).saturating_add(size),
system.lpMaximumApplicationAddress as usize,
),
})
}
pub fn upper_bound(&self) -> usize {
self.upper_bound
}
}
impl Iterator for QueryIter {
type Item = Result;
fn next(&mut self) -> Option {
let mut info: MEMORY_BASIC_INFORMATION = unsafe { std::mem::zeroed() };
while self.region_address < self.upper_bound {
let bytes = unsafe {
VirtualQuery(
self.region_address as *mut c_void,
&mut info,
size_of::(),
)
};
if bytes == 0 {
return Some(Err(Error::SystemCall(io::Error::last_os_error())));
}
self.region_address = (info.BaseAddress as usize).saturating_add(info.RegionSize);
// Only mapped memory regions are of interest
if info.State == MEM_RESERVE || info.State == MEM_COMMIT {
let mut region = Region {
base: info.BaseAddress as *const _,
reserved: info.State != MEM_COMMIT,
guarded: (info.Protect & PAGE_GUARD) != 0,
shared: (info.Type & MEM_PRIVATE) == 0,
size: info.RegionSize as usize,
..Default::default()
};
if region.is_committed() {
region.protection = Protection::from_native(info.Protect);
}
return Some(Ok(region));
}
}
None
}
}
pub fn page_size() -> usize {
system_info().dwPageSize as usize
}
pub unsafe fn alloc(base: *const (), size: usize, protection: Protection) -> Result<*const ()> {
let allocation = VirtualAlloc(
base as *mut c_void,
size,
MEM_COMMIT | MEM_RESERVE,
protection.to_native(),
);
if allocation.is_null() {
return Err(Error::SystemCall(io::Error::last_os_error()));
}
Ok(allocation as *const ())
}
pub unsafe fn free(base: *const (), _size: usize) -> Result<()> {
match VirtualFree(base as *mut c_void, 0, MEM_RELEASE) {
0 => Err(Error::SystemCall(io::Error::last_os_error())),
_ => Ok(()),
}
}
pub unsafe fn protect(base: *const (), size: usize, protection: Protection) -> Result<()> {
let result = VirtualProtect(base as *mut c_void, size, protection.to_native(), &mut 0);
if result == 0 {
Err(Error::SystemCall(io::Error::last_os_error()))
} else {
Ok(())
}
}
pub fn lock(base: *const (), size: usize) -> Result<()> {
let result = unsafe { VirtualLock(base as *mut c_void, size) };
if result == 0 {
Err(Error::SystemCall(io::Error::last_os_error()))
} else {
Ok(())
}
}
pub fn unlock(base: *const (), size: usize) -> Result<()> {
let result = unsafe { VirtualUnlock(base as *mut c_void, size) };
if result == 0 {
Err(Error::SystemCall(io::Error::last_os_error()))
} else {
Ok(())
}
}
fn system_info() -> &'static SYSTEM_INFO {
static INIT: Once = Once::new();
static mut INFO: MaybeUninit = MaybeUninit::uninit();
unsafe {
INIT.call_once(|| GetNativeSystemInfo(INFO.as_mut_ptr()));
&*INFO.as_ptr()
}
}
impl Protection {
fn from_native(protection: u32) -> Self {
// Ignore unsupported flags (TODO: Preserve this information?)
let ignored = PAGE_GUARD | PAGE_NOCACHE | PAGE_WRITECOMBINE;
match protection & !ignored {
PAGE_EXECUTE => Protection::EXECUTE,
PAGE_EXECUTE_READ => Protection::READ_EXECUTE,
PAGE_EXECUTE_READWRITE => Protection::READ_WRITE_EXECUTE,
PAGE_EXECUTE_WRITECOPY => Protection::READ_WRITE_EXECUTE,
PAGE_NOACCESS => Protection::NONE,
PAGE_READONLY => Protection::READ,
PAGE_READWRITE => Protection::READ_WRITE,
PAGE_WRITECOPY => Protection::READ_WRITE,
_ => unreachable!("Protection: 0x{:X}", protection),
}
}
pub(crate) fn to_native(self) -> u32 {
match self {
Protection::NONE => PAGE_NOACCESS,
Protection::READ => PAGE_READONLY,
Protection::EXECUTE => PAGE_EXECUTE,
Protection::READ_EXECUTE => PAGE_EXECUTE_READ,
Protection::READ_WRITE => PAGE_READWRITE,
Protection::READ_WRITE_EXECUTE => PAGE_EXECUTE_READWRITE,
Protection::WRITE_EXECUTE => PAGE_EXECUTE_READWRITE,
_ => unreachable!("Protection: {:?}", self),
}
}
}
region-3.0.2/src/page.rs 0000644 0000000 0000000 00000003735 10461020230 0013172 0 ustar 0000000 0000000 //! Page related functions.
use crate::os;
use std::sync::Once;
/// Returns the operating system's page size.
///
/// This function uses an internally cached page size, and can be called
/// repeatedly without incurring a significant performance penalty.
///
/// # Examples
///
/// ```
/// # use region::page;
/// let size = page::size(); // Most likely 4096
/// ```
#[inline]
pub fn size() -> usize {
static INIT: Once = Once::new();
static mut PAGE_SIZE: usize = 0;
unsafe {
INIT.call_once(|| PAGE_SIZE = os::page_size());
PAGE_SIZE
}
}
/// Rounds an address down to its closest page boundary.
///
/// # Examples
///
/// ```
/// # use region::page;
/// let unaligned_pointer = (page::size() + 1) as *const ();
///
/// assert_eq!(page::floor(unaligned_pointer), page::size() as *const _);
/// ```
#[inline]
pub fn floor(address: *const T) -> *const T {
(address as usize & !(size() - 1)) as *const T
}
/// Rounds an address up to its closest page boundary.
///
/// # Examples
///
/// ```
/// # use region::page;
/// let unaligned_pointer = (page::size() - 1) as *const ();
///
/// assert_eq!(page::ceil(unaligned_pointer), page::size() as *const _);
/// ```
#[inline]
pub fn ceil(address: *const T) -> *const T {
match (address as usize).checked_add(size()) {
Some(offset) => ((offset - 1) & !(size() - 1)) as *const T,
None => floor(address),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn page_size_is_reasonable() {
let pz = size();
assert!(pz > 0);
assert_eq!(pz % 2, 0);
assert_eq!(pz, size());
}
#[test]
fn page_rounding_works() {
let pz = size();
let point = 1 as *const ();
assert_eq!(floor(point) as usize, 0);
assert_eq!(floor(pz as *const ()) as usize, pz);
assert_eq!(floor(usize::max_value() as *const ()) as usize % pz, 0);
assert_eq!(ceil(point) as usize, pz);
assert_eq!(ceil(pz as *const ()) as usize, pz);
assert_eq!(ceil(usize::max_value() as *const ()) as usize % pz, 0);
}
}
region-3.0.2/src/protect.rs 0000644 0000000 0000000 00000022173 10461020230 0013733 0 ustar 0000000 0000000 use crate::{os, util, Protection, QueryIter, Region, Result};
/// Changes the memory protection of one or more pages.
///
/// The address range may overlap one or more pages, and if so, all pages
/// spanning the range will be modified. The previous protection flags are not
/// preserved (if you desire to preserve the protection flags, use
/// [`protect_with_handle`]).
///
/// # Parameters
///
/// - The range is `[address, address + size)`
/// - The address is rounded down to the closest page boundary.
/// - The size may not be zero.
/// - The size is rounded up to the closest page boundary, relative to the
/// address.
///
/// # Errors
///
/// - If an interaction with the underlying operating system fails, an error
/// will be returned.
/// - If size is zero,
/// [`Error::InvalidParameter`](crate::Error::InvalidParameter) will be
/// returned.
///
/// # Safety
///
/// This function can violate memory safety in a myriad of ways. Read-only memory
/// can become writable, the executable properties of code segments can be
/// removed, etc.
///
/// # Examples
///
/// - Make an array of x86 assembly instructions executable.
///
/// ```
/// # fn main() -> region::Result<()> {
/// # if cfg!(any(target_arch = "x86", target_arch = "x86_64"))
/// # && !cfg!(any(target_os = "openbsd", target_os = "netbsd")) {
/// use region::Protection;
/// let ret5 = [0xB8, 0x05, 0x00, 0x00, 0x00, 0xC3u8];
///
/// let x: extern "C" fn() -> i32 = unsafe {
/// region::protect(ret5.as_ptr(), ret5.len(), region::Protection::READ_WRITE_EXECUTE)?;
/// std::mem::transmute(ret5.as_ptr())
/// };
///
/// assert_eq!(x(), 5);
/// # }
/// # Ok(())
/// # }
/// ```
#[inline]
pub unsafe fn protect(address: *const T, size: usize, protection: Protection) -> Result<()> {
let (address, size) = util::round_to_page_boundaries(address, size)?;
os::protect(address.cast(), size, protection)
}
/// Temporarily changes the memory protection of one or more pages.
///
/// The address range may overlap one or more pages, and if so, all pages within
/// the range will be modified. The protection flag for each page will be reset
/// once the handle is dropped. To conditionally prevent a reset, use
/// [`std::mem::forget`].
///
/// This function uses [`query_range`](crate::query_range) internally and is
/// therefore less performant than [`protect`]. Use this function only if you
/// need to reapply the memory protection flags of one or more regions after
/// operations.
///
/// # Guard
///
/// Remember not to conflate the *black hole* syntax with the ignored, but
/// unused, variable syntax. Otherwise the [`ProtectGuard`] instantly resets the
/// protection flags of all pages.
///
/// ```ignore
/// let _ = protect_with_handle(...); // Pages are instantly reset
/// let _guard = protect_with_handle(...); // Pages are reset once `_guard` is dropped.
/// ```
///
/// # Parameters
///
/// - The range is `[address, address + size)`
/// - The address is rounded down to the closest page boundary.
/// - The size may not be zero.
/// - The size is rounded up to the closest page boundary, relative to the
/// address.
///
/// # Errors
///
/// - If an interaction with the underlying operating system fails, an error
/// will be returned.
/// - If size is zero,
/// [`Error::InvalidParameter`](crate::Error::InvalidParameter) will be
/// returned.
///
/// # Safety
///
/// See [protect].
#[allow(clippy::missing_inline_in_public_items)]
pub unsafe fn protect_with_handle(
address: *const T,
size: usize,
protection: Protection,
) -> Result {
let (address, size) = util::round_to_page_boundaries(address, size)?;
// Preserve the current regions' flags
let mut regions = QueryIter::new(address, size)?.collect::>>()?;
// Apply the desired protection flags
protect(address, size, protection)?;
if let Some(region) = regions.first_mut() {
// Offset the lower region to the smallest page boundary
region.base = address.cast();
region.size -= address as usize - region.as_range().start;
}
if let Some(region) = regions.last_mut() {
// Truncate the upper region to the smallest page boundary
let protect_end = address as usize + size;
region.size -= region.as_range().end - protect_end;
}
Ok(ProtectGuard::new(regions))
}
/// A RAII implementation of a scoped protection guard.
///
/// When this structure is dropped (falls out of scope), the memory regions'
/// protection will be reset.
#[must_use]
pub struct ProtectGuard {
regions: Vec,
}
impl ProtectGuard {
#[inline(always)]
fn new(regions: Vec) -> Self {
Self { regions }
}
}
impl Drop for ProtectGuard {
#[inline]
fn drop(&mut self) {
let result = self
.regions
.iter()
.try_for_each(|region| unsafe { protect(region.base, region.size, region.protection) });
debug_assert!(result.is_ok(), "restoring region protection: {:?}", result);
}
}
unsafe impl Send for ProtectGuard {}
unsafe impl Sync for ProtectGuard {}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::util::alloc_pages;
use crate::{page, query, query_range};
#[test]
fn protect_null_fails() {
assert!(unsafe { protect(std::ptr::null::<()>(), 0, Protection::NONE) }.is_err());
}
#[test]
#[cfg(not(any(
target_os = "openbsd",
target_os = "netbsd",
all(target_vendor = "apple", target_arch = "aarch64")
)))]
fn protect_can_alter_text_segments() {
#[allow(clippy::ptr_as_ptr)]
let address = &mut protect_can_alter_text_segments as *mut _ as *mut u8;
unsafe {
protect(address, 1, Protection::READ_WRITE_EXECUTE).unwrap();
*address = 0x90;
}
}
#[test]
fn protect_updates_both_pages_for_straddling_range() -> Result<()> {
let pz = page::size();
// Create a page boundary with different protection flags in the upper and
// lower span, so the intermediate region sizes are fixed to one page.
let map = alloc_pages(&[
Protection::READ,
Protection::READ_EXECUTE,
Protection::READ_WRITE,
Protection::READ,
]);
let exec_page = unsafe { map.as_ptr().add(pz) };
let exec_page_end = unsafe { exec_page.add(pz - 1) };
// Change the protection over two page boundaries
unsafe {
protect(exec_page_end, 2, Protection::NONE)?;
}
// Query the two inner pages
let result = query_range(exec_page, pz * 2)?.collect::>>()?;
// On some OSs the pages are merged into one region
assert!(matches!(result.len(), 1 | 2));
assert_eq!(result.iter().map(Region::len).sum::(), pz * 2);
assert_eq!(result[0].protection(), Protection::NONE);
Ok(())
}
#[test]
fn protect_has_inclusive_lower_and_exclusive_upper_bound() -> Result<()> {
let map = alloc_pages(&[
Protection::READ_WRITE,
Protection::READ,
Protection::READ_WRITE,
Protection::READ,
]);
// Alter the protection of the second page
let second_page = unsafe { map.as_ptr().add(page::size()) };
unsafe {
let second_page_end = second_page.offset(page::size() as isize - 1);
protect(second_page_end, 1, Protection::NONE)?;
}
let regions = query_range(map.as_ptr(), page::size() * 3)?.collect::>>()?;
assert_eq!(regions.len(), 3);
assert_eq!(regions[0].protection(), Protection::READ_WRITE);
assert_eq!(regions[1].protection(), Protection::NONE);
assert_eq!(regions[2].protection(), Protection::READ_WRITE);
// Alter the protection of '2nd_page_start .. 2nd_page_end + 1'
unsafe {
protect(second_page, page::size() + 1, Protection::READ_EXECUTE)?;
}
let regions = query_range(map.as_ptr(), page::size() * 3)?.collect::>>()?;
assert!(regions.len() >= 2);
assert_eq!(regions[0].protection(), Protection::READ_WRITE);
assert_eq!(regions[1].protection(), Protection::READ_EXECUTE);
assert!(regions[1].len() >= page::size());
Ok(())
}
#[test]
fn protect_with_handle_resets_protection() -> Result<()> {
let map = alloc_pages(&[Protection::READ]);
unsafe {
let _handle = protect_with_handle(map.as_ptr(), page::size(), Protection::READ_WRITE)?;
assert_eq!(query(map.as_ptr())?.protection(), Protection::READ_WRITE);
};
assert_eq!(query(map.as_ptr())?.protection(), Protection::READ);
Ok(())
}
#[test]
fn protect_with_handle_only_alters_protection_of_affected_pages() -> Result<()> {
let pages = [
Protection::READ_WRITE,
Protection::READ,
Protection::READ_WRITE,
Protection::READ_EXECUTE,
Protection::NONE,
];
let map = alloc_pages(&pages);
let second_page = unsafe { map.as_ptr().add(page::size()) };
let region_size = page::size() * 3;
unsafe {
let _handle = protect_with_handle(second_page, region_size, Protection::NONE)?;
let region = query(second_page)?;
assert_eq!(region.protection(), Protection::NONE);
assert_eq!(region.as_ptr(), second_page);
}
let regions =
query_range(map.as_ptr(), page::size() * pages.len())?.collect::>>()?;
assert_eq!(regions.len(), 5);
assert_eq!(regions[0].as_ptr(), map.as_ptr());
for i in 0..pages.len() {
assert_eq!(regions[i].protection(), pages[i]);
}
Ok(())
}
}
region-3.0.2/src/query.rs 0000644 0000000 0000000 00000022016 10461020230 0013414 0 ustar 0000000 0000000 use crate::{os, util, Error, Region, Result};
/// An iterator over the [`Region`]s that encompass an address range.
///
/// This `struct` is created by [`query_range`]. See its documentation for more.
pub struct QueryIter {
iterator: Option,
origin: *const (),
}
impl QueryIter {
pub(crate) fn new(origin: *const T, size: usize) -> Result {
let origin = origin.cast();
os::QueryIter::new(origin, size).map(|iterator| Self {
iterator: Some(iterator),
origin,
})
}
}
impl Iterator for QueryIter {
type Item = Result;
/// Advances the iterator and returns the next region.
///
/// If the iterator has been exhausted (i.e. all [`Region`]s have been
/// queried), or if an error is encountered during iteration, all further
/// invocations will return [`None`] (in the case of an error, the error will
/// be the last item that is yielded before the iterator is fused).
#[allow(clippy::missing_inline_in_public_items)]
fn next(&mut self) -> Option {
let regions = self.iterator.as_mut()?;
while let Some(result) = regions.next() {
match result {
Ok(region) => {
let range = region.as_range();
// Skip the region if it precedes the queried range
if range.end <= self.origin as usize {
continue;
}
// Stop iteration if the region is past the queried range
if range.start >= regions.upper_bound() {
break;
}
return Some(Ok(region));
}
Err(error) => {
self.iterator.take();
return Some(Err(error));
}
}
}
self.iterator.take();
None
}
}
impl std::iter::FusedIterator for QueryIter {}
unsafe impl Send for QueryIter {}
unsafe impl Sync for QueryIter {}
/// Queries the OS with an address, returning the region it resides within.
///
/// If the queried address does not reside within any mapped region, or if it's
/// outside the process' address space, the function will error with
/// [`Error::UnmappedRegion`].
///
/// # Parameters
///
/// - The enclosing region can be of multiple page sizes.
/// - The address is rounded down to the closest page boundary.
///
/// # Errors
///
/// - If an interaction with the underlying operating system fails, an error
/// will be returned.
///
/// # Examples
///
/// ```
/// # fn main() -> region::Result<()> {
/// use region::Protection;
///
/// let data = [0; 100];
/// let region = region::query(data.as_ptr())?;
///
/// assert_eq!(region.protection(), Protection::READ_WRITE);
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn query(address: *const T) -> Result {
// For UNIX systems, the address must be aligned to the closest page boundary
let (address, size) = util::round_to_page_boundaries(address, 1)?;
QueryIter::new(address, size)?
.next()
.ok_or(Error::UnmappedRegion)?
}
/// Queries the OS for mapped regions that overlap with the specified range.
///
/// The implementation clamps any input that exceeds the boundaries of a
/// process' address space. Therefore it's safe to, e.g., pass in
/// [`std::ptr::null`] and [`usize::max_value`] to iterate the mapped memory
/// pages of an entire process.
///
/// If an error is encountered during iteration, the error will be the last item
/// that is yielded. Thereafter the iterator becomes fused.
///
/// A 2-byte range straddling a page boundary, will return both pages (or one
/// region, if the pages share the same properties).
///
/// This function only returns mapped regions. If required, unmapped regions can
/// be manually identified by inspecting the potential gaps between two
/// neighboring regions.
///
/// # Parameters
///
/// - The range is `[address, address + size)`
/// - The address is rounded down to the closest page boundary.
/// - The size may not be zero.
/// - The size is rounded up to the closest page boundary, relative to the
/// address.
///
/// # Errors
///
/// - If an interaction with the underlying operating system fails, an error
/// will be returned.
/// - If size is zero, [`Error::InvalidParameter`] will be returned.
///
/// # Examples
///
/// ```
/// # use region::Result;
/// # fn main() -> Result<()> {
/// let data = [0; 100];
/// let region = region::query_range(data.as_ptr(), data.len())?
/// .collect::>>()?;
///
/// assert_eq!(region.len(), 1);
/// assert_eq!(region[0].protection(), region::Protection::READ_WRITE);
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn query_range(address: *const T, size: usize) -> Result {
let (address, size) = util::round_to_page_boundaries(address, size)?;
QueryIter::new(address, size)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::util::alloc_pages;
use crate::{page, Protection};
const TEXT_SEGMENT_PROT: Protection = if cfg!(target_os = "openbsd") {
Protection::EXECUTE
} else {
Protection::READ_EXECUTE
};
#[test]
fn query_returns_unmapped_for_oob_address() {
let (min, max) = (std::ptr::null::<()>(), usize::max_value() as *const ());
assert!(matches!(query(min), Err(Error::UnmappedRegion)));
assert!(matches!(query(max), Err(Error::UnmappedRegion)));
}
#[test]
fn query_returns_correct_descriptor_for_text_segment() -> Result<()> {
let region = query(query_returns_correct_descriptor_for_text_segment as *const ())?;
assert_eq!(region.protection(), TEXT_SEGMENT_PROT);
assert_eq!(region.is_shared(), cfg!(windows));
assert!(!region.is_guarded());
Ok(())
}
#[test]
fn query_returns_one_region_for_multiple_page_allocation() -> Result<()> {
let alloc = crate::alloc(page::size() + 1, Protection::READ_EXECUTE)?;
let region = query(alloc.as_ptr::<()>())?;
assert_eq!(region.protection(), Protection::READ_EXECUTE);
assert_eq!(region.as_ptr::<()>(), alloc.as_ptr());
assert_eq!(region.len(), alloc.len());
assert!(!region.is_guarded());
Ok(())
}
#[test]
#[cfg(not(target_os = "android"))] // TODO: Determine why this fails on Android in QEMU
fn query_is_not_off_by_one() -> Result<()> {
let pages = [Protection::READ, Protection::READ_EXECUTE, Protection::READ];
let map = alloc_pages(&pages);
let page_mid = unsafe { map.as_ptr().add(page::size()) };
let region = query(page_mid)?;
assert_eq!(region.protection(), Protection::READ_EXECUTE);
assert_eq!(region.len(), page::size());
let region = query(unsafe { page_mid.offset(-1) })?;
assert_eq!(region.protection(), Protection::READ);
assert_eq!(region.len(), page::size());
Ok(())
}
#[test]
fn query_range_does_not_return_unmapped_regions() -> Result<()> {
let regions = query_range(std::ptr::null::<()>(), 1)?.collect::>>()?;
assert!(regions.is_empty());
Ok(())
}
#[test]
fn query_range_returns_both_regions_for_straddling_range() -> Result<()> {
let pages = [Protection::READ_EXECUTE, Protection::READ_WRITE];
let map = alloc_pages(&pages);
// Query an area that overlaps both pages
let address = unsafe { map.as_ptr().offset(page::size() as isize - 1) };
let regions = query_range(address, 2)?.collect::>>()?;
assert_eq!(regions.len(), pages.len());
for (page, region) in pages.iter().zip(regions.iter()) {
assert_eq!(*page, region.protection);
}
Ok(())
}
#[test]
fn query_range_has_inclusive_lower_and_exclusive_upper_bound() -> Result<()> {
let pages = [Protection::READ, Protection::READ_WRITE, Protection::READ];
let map = alloc_pages(&pages);
let regions = query_range(map.as_ptr(), page::size())?.collect::>>()?;
assert_eq!(regions.len(), 1);
assert_eq!(regions[0].protection(), Protection::READ);
let regions = query_range(map.as_ptr(), page::size() + 1)?.collect::>>()?;
assert_eq!(regions.len(), 2);
assert_eq!(regions[0].protection(), Protection::READ);
assert_eq!(regions[1].protection(), Protection::READ_WRITE);
Ok(())
}
#[test]
fn query_range_can_iterate_over_entire_process() -> Result<()> {
let regions =
query_range(std::ptr::null::<()>(), usize::max_value())?.collect::>>()?;
// This test is a bit rough around the edges
assert!(regions
.iter()
.any(|region| region.protection() == Protection::READ));
assert!(regions
.iter()
.any(|region| region.protection() == Protection::READ_WRITE));
assert!(regions
.iter()
.any(|region| region.protection() == TEXT_SEGMENT_PROT));
assert!(regions.len() > 5);
Ok(())
}
#[test]
fn query_range_iterator_is_fused_after_exhaustion() -> Result<()> {
let pages = [Protection::READ, Protection::READ_WRITE];
let map = alloc_pages(&pages);
let mut iter = query_range(map.as_ptr(), page::size() + 1)?;
assert_eq!(
iter.next().transpose()?.map(|r| r.protection()),
Some(Protection::READ)
);
assert_eq!(
iter.next().transpose()?.map(|r| r.protection()),
Some(Protection::READ_WRITE)
);
assert_eq!(iter.next().transpose()?, None);
assert_eq!(iter.next().transpose()?, None);
Ok(())
}
}
region-3.0.2/src/util.rs 0000644 0000000 0000000 00000002061 10461020230 0013222 0 ustar 0000000 0000000 use crate::{page, Error, Result};
/// Validates & rounds an address-size pair to their respective page boundary.
pub fn round_to_page_boundaries(address: *const T, size: usize) -> Result<(*const T, usize)> {
if size == 0 {
return Err(Error::InvalidParameter("size"));
}
let size = (address as usize % page::size()).saturating_add(size);
let size = page::ceil(size as *const T) as usize;
Ok((page::floor(address), size))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn round_to_page_boundaries_works() -> Result<()> {
let pz = page::size();
let values = &[
((1, pz), (0, pz * 2)),
((0, pz - 1), (0, pz)),
((0, pz + 1), (0, pz * 2)),
((pz - 1, 1), (0, pz)),
((pz + 1, pz), (pz, pz * 2)),
((pz, pz), (pz, pz)),
];
for ((before_address, before_size), (after_address, after_size)) in values {
let (address, size) = round_to_page_boundaries(*before_address as *const (), *before_size)?;
assert_eq!((address, size), (*after_address as *const (), *after_size));
}
Ok(())
}
}