rkyv-0.8.9/.cargo_vcs_info.json0000644000000001420000000000100120420ustar { "git": { "sha1": "9febe6f809a96c223b318ad67b7c8fed5cb307b0" }, "path_in_vcs": "rkyv" }rkyv-0.8.9/Cargo.lock0000644000000375370000000000100100370ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "ahash" version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "getrandom", "once_cell", "version_check", "zerocopy", ] [[package]] name = "anstyle" version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "arrayvec" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "bytecheck" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c8f430744b23b54ad15161fcbc22d82a29b73eacbe425fea23ec822600bc6f" dependencies = [ "bytecheck_derive", "ptr_meta", "rancor", "simdutf8", "uuid", ] [[package]] name = "bytecheck_derive" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "523363cbe1df49b68215efdf500b103ac3b0fb4836aed6d15689a076eadb8fff" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "bytes" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" version = "4.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73" dependencies = [ "anstyle", "clap_lex", ] [[package]] name = "clap_lex" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "condtype" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf0a07a401f374238ab8e2f11a104d2851bf9ce711ec69804834de8af45c7af" [[package]] name = "divan" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0d567df2c9c2870a43f3f2bd65aaeb18dbce1c18f217c3e564b4fbaeb3ee56c" dependencies = [ "cfg-if", "clap", "condtype", "divan-macros", "libc", "regex-lite", ] [[package]] name = "divan-macros" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27540baf49be0d484d8f0130d7d8da3011c32a44d4fc873368154f1510e574a2" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "getrandom" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "glob" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" [[package]] name = "indexmap" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.5", ] [[package]] name = "itoa" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "libc" version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "munge" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64142d38c84badf60abf06ff9bd80ad2174306a5b11bd4706535090a30a419df" dependencies = [ "munge_macro", ] [[package]] name = "munge_macro" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bb5c1d8184f13f7d0ccbeeca0def2f9a181bce2624302793005f5ca8aa62e5e" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "once_cell" version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ea5043e58958ee56f3e15a90aee535795cd7dfd319846288d93c5b57d85cbe" [[package]] name = "proc-macro2" version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] [[package]] name = "ptr_meta" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe9e76f66d3f9606f44e45598d155cb13ecf09f4a28199e48daf8c8fc937ea90" dependencies = [ "ptr_meta_derive", ] [[package]] name = "ptr_meta_derive" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca414edb151b4c8d125c12566ab0d74dc9cdba36fb80eb7b848c15f495fd32d1" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "quote" version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] [[package]] name = "rancor" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caf5f7161924b9d1cea0e4cabc97c372cea92b5f927fc13c6bca67157a0ad947" dependencies = [ "ptr_meta", ] [[package]] name = "regex-lite" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" [[package]] name = "rend" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a35e8a6bf28cd121053a66aa2e6a2e3eaffad4a60012179f0e864aa5ffeff215" dependencies = [ "bytecheck", ] [[package]] name = "rkyv" version = "0.8.9" dependencies = [ "ahash", "arrayvec", "bytecheck", "bytes", "divan", "hashbrown 0.14.5", "hashbrown 0.15.1", "indexmap", "munge", "ptr_meta", "rancor", "rend", "rkyv_derive", "rustversion", "smallvec", "smol_str 0.2.2", "smol_str 0.3.1", "thin-vec", "tinyvec", "triomphe", "trybuild", "uuid", ] [[package]] name = "rkyv_derive" version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "beb382a4d9f53bd5c0be86b10d8179c3f8a14c30bf774ff77096ed6581e35981" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "rustversion" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "ryu" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "serde" version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", "ryu", "serde", ] [[package]] name = "serde_spanned" version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" dependencies = [ "serde", ] [[package]] name = "simdutf8" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" [[package]] name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smol_str" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd538fb6910ac1099850255cf94a94df6551fbdd602454387d0adb2d1ca6dead" [[package]] name = "smol_str" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66eaf762c5af19db3108300515c8aa7a50efc90ff745f4c62288052ebf9fdd25" [[package]] name = "syn" version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "termcolor" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] [[package]] name = "thin-vec" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "tinyvec" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "toml" version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", "toml_datetime", "toml_edit", ] [[package]] name = "toml_datetime" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] [[package]] name = "toml_edit" version = "0.22.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" dependencies = [ "indexmap", "serde", "serde_spanned", "toml_datetime", "winnow", ] [[package]] name = "triomphe" version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6631e42e10b40c0690bf92f404ebcfe6e1fdb480391d15f17cc8e96eeed5369" [[package]] name = "trybuild" version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "207aa50d36c4be8d8c6ea829478be44a372c6a77669937bb39c698e52f1491e8" dependencies = [ "glob", "serde", "serde_derive", "serde_json", "termcolor", "toml", ] [[package]] name = "unicode-ident" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "uuid" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" [[package]] name = "version_check" version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "winapi-util" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ "windows-sys", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ "memchr", ] [[package]] name = "zerocopy" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", "syn", ] rkyv-0.8.9/Cargo.toml0000644000000107170000000000100100510ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.81" name = "rkyv" version = "0.8.9" authors = ["David Koloski "] build = false autobins = false autoexamples = false autotests = false autobenches = false description = "Zero-copy deserialization framework for Rust" documentation = "https://docs.rs/rkyv" readme = "README.md" keywords = [ "archive", "rkyv", "serialization", "zero-copy", "no_std", ] categories = [ "encoding", "no-std", "no-std::no-alloc", ] license = "MIT" repository = "https://github.com/rkyv/rkyv" [package.metadata.docs.rs] features = ["bytecheck"] [lib] name = "rkyv" path = "src/lib.rs" [[example]] name = "backwards_compat" path = "examples/backwards_compat.rs" [[example]] name = "complex_wrapper_types" path = "examples/complex_wrapper_types.rs" [[example]] name = "derive_partial_ord" path = "examples/derive_partial_ord.rs" [[example]] name = "explicit_enum_discriminants" path = "examples/explicit_enum_discriminants.rs" [[example]] name = "json_like_schema" path = "examples/json_like_schema.rs" [[example]] name = "readme" path = "examples/readme.rs" [[example]] name = "remote_types" path = "examples/remote_types.rs" [[test]] name = "derive" path = "tests/derive.rs" [[test]] name = "ui" path = "tests/ui.rs" [[bench]] name = "log" path = "benches/log.rs" harness = false [[bench]] name = "mesh" path = "benches/mesh.rs" harness = false [[bench]] name = "minecraft_savedata" path = "benches/minecraft_savedata.rs" harness = false [dependencies.arrayvec-0_7] version = "0.7" optional = true default-features = false package = "arrayvec" [dependencies.bytecheck] version = "0.8" features = ["simdutf8"] optional = true default-features = false [dependencies.bytes-1] version = "1" optional = true default-features = false package = "bytes" [dependencies.hashbrown] version = "0.15" optional = true default-features = false [dependencies.hashbrown-0_14] version = "0.14" optional = true default-features = false package = "hashbrown" [dependencies.indexmap-2] version = "2" optional = true default-features = false package = "indexmap" [dependencies.munge] version = "0.4" default-features = false [dependencies.ptr_meta] version = "0.3" default-features = false [dependencies.rancor] version = "0.1" default-features = false [dependencies.rend] version = "0.5" default-features = false [dependencies.rkyv_derive] version = "=0.8.9" default-features = false [dependencies.smallvec-1] version = "1" optional = true default-features = false package = "smallvec" [dependencies.smol_str-0_2] version = "0.2" optional = true default-features = false package = "smol_str" [dependencies.smol_str-0_3] version = "0.3" optional = true default-features = false package = "smol_str" [dependencies.thin-vec-0_2] version = "0.2.12" optional = true default-features = false package = "thin-vec" [dependencies.tinyvec-1] version = "1" optional = true default-features = false package = "tinyvec" [dependencies.triomphe-0_1] version = "0.1" optional = true default-features = false package = "triomphe" [dependencies.uuid-1] version = "1" optional = true default-features = false package = "uuid" [dev-dependencies.ahash] version = "0.8" [dev-dependencies.divan] version = "0.1" default-features = false [dev-dependencies.rustversion] version = "1" default-features = false [dev-dependencies.trybuild] version = "1" default-features = false [features] aligned = [] alloc = [ "dep:hashbrown", "tinyvec-1?/alloc", "rancor/alloc", ] big_endian = [] bytecheck = [ "dep:bytecheck", "rend/bytecheck", "rkyv_derive/bytecheck", ] default = [ "std", "bytecheck", ] hashbrown-0_15 = ["dep:hashbrown"] indexmap-2 = [ "dep:indexmap-2", "alloc", ] little_endian = [] pointer_width_16 = [] pointer_width_32 = [] pointer_width_64 = [] std = [ "alloc", "bytes-1?/std", "indexmap-2?/std", "ptr_meta/std", "uuid-1?/std", ] triomphe-0_1 = [ "dep:triomphe-0_1", "alloc", ] unaligned = [] uuid-1 = [ "dep:uuid-1", "bytecheck?/uuid-1", ] rkyv-0.8.9/Cargo.toml.orig000064400000000000000000000065621046102023000135350ustar 00000000000000[package] name = "rkyv" description = "Zero-copy deserialization framework for Rust" version.workspace = true edition.workspace = true rust-version.workspace = true authors.workspace = true license.workspace = true readme = "../README.md" repository.workspace = true keywords = ["archive", "rkyv", "serialization", "zero-copy", "no_std"] categories = ["encoding", "no-std", "no-std::no-alloc"] documentation = "https://docs.rs/rkyv" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] bytecheck = { workspace = true, optional = true } hashbrown = { workspace = true, optional = true } munge.workspace = true ptr_meta.workspace = true rancor.workspace = true rend.workspace = true rkyv_derive.workspace = true # Support for various common crates. These are primarily to get users off the # ground and build some momentum. # These are NOT PLANNED to remain in rkyv for the 1.0 release. Much like serde, # these implementations should be moved into their respective crates over time. # Before adding support for another crate, please consider getting rkyv support # in the crate instead. arrayvec-0_7 = { package = "arrayvec", version = "0.7", optional = true, default-features = false } bytes-1 = { package = "bytes", version = "1", optional = true, default-features = false } hashbrown-0_14 = { package = "hashbrown", version = "0.14", optional = true, default-features = false } # rkyv already depends on hashbrown 0.15, so we can't duplicate this, but we can expose it as a feature below # hashbrown-0_15 = { package = "hashbrown", version = "0.15", optional = true, default-features = false } indexmap-2 = { package = "indexmap", version = "2", optional = true, default-features = false } smallvec-1 = { package = "smallvec", version = "1", optional = true, default-features = false } smol_str-0_2 = { package = "smol_str", version = "0.2", optional = true, default-features = false } smol_str-0_3 = { package = "smol_str", version = "0.3", optional = true, default-features = false } thin-vec-0_2 = { package = "thin-vec", version = "0.2.12", optional = true, default-features = false } tinyvec-1 = { package = "tinyvec", version = "1", optional = true, default-features = false } triomphe-0_1 = { package = "triomphe", version = "0.1", optional = true, default-features = false } uuid-1 = { package = "uuid", version = "1", optional = true, default-features = false } [features] default = ["std", "bytecheck"] little_endian = [] big_endian = [] aligned = [] unaligned = [] pointer_width_16 = [] pointer_width_32 = [] pointer_width_64 = [] alloc = ["dep:hashbrown", "tinyvec-1?/alloc", "rancor/alloc"] std = ["alloc", "bytes-1?/std", "indexmap-2?/std", "ptr_meta/std", "uuid-1?/std"] bytecheck = ["dep:bytecheck", "rend/bytecheck", "rkyv_derive/bytecheck"] # External crate support hashbrown-0_15 = ["dep:hashbrown"] indexmap-2 = ["dep:indexmap-2", "alloc"] triomphe-0_1 = ["dep:triomphe-0_1", "alloc"] uuid-1 = ["dep:uuid-1", "bytecheck?/uuid-1"] [package.metadata.docs.rs] features = ["bytecheck"] [dev-dependencies] ahash = "0.8" benchlib = { path = "../benchlib" } divan.workspace = true rustversion.workspace = true trybuild.workspace = true [[bench]] name = "log" harness = false [[bench]] name = "mesh" harness = false [[bench]] name = "minecraft_savedata" harness = false rkyv-0.8.9/LICENSE000064400000000000000000000020351046102023000116420ustar 00000000000000Copyright 2021 David Koloski Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. rkyv-0.8.9/README.md000064400000000000000000000102001046102023000121050ustar 00000000000000

rkyv

rkyv (archive) is a zero-copy deserialization framework for Rust

Discord crates.io docs.rs MIT license

# Resources ## Learning Materials - The [rkyv book](https://rkyv.github.io/rkyv) covers the motivation, architecture, and major features of rkyv - The [rkyv discord](https://discord.gg/65F6MdnbQh) is a great place to get help with specific issues and meet other people using rkyv ## Documentation - [rkyv](https://docs.rs/rkyv), the core library - [rkyv_dyn](https://docs.rs/rkyv_dyn), which adds trait object support to rkyv ## Benchmarks - The [rust serialization benchmark](https://github.com/djkoloski/rust_serialization_benchmark) is a shootout style benchmark comparing many rust serialization solutions. It includes special benchmarks for zero-copy serialization solutions like rkyv. ## Sister Crates - [rend](https://github.com/rkyv/rend), which rkyv uses for endian-agnostic features - [bytecheck](https://github.com/rkyv/bytecheck), which rkyv uses for validation - [rancor](https://github.com/rkyv/rancor), which rkyv uses for error handling - [ptr_meta](https://github.com/rkyv/ptr_meta), which rkyv uses for pointer manipulation # Example ```rust use rkyv::{deserialize, rancor::Error, Archive, Deserialize, Serialize}; #[derive(Archive, Deserialize, Serialize, Debug, PartialEq)] #[rkyv( // This will generate a PartialEq impl between our unarchived // and archived types compare(PartialEq), // Derives can be passed through to the generated type: derive(Debug), )] struct Test { int: u8, string: String, option: Option>, } fn main() { let value = Test { int: 42, string: "hello world".to_string(), option: Some(vec![1, 2, 3, 4]), }; // Serializing is as easy as a single function call let _bytes = rkyv::to_bytes::(&value).unwrap(); // Or you can customize your serialization for better performance or control // over resource usage use rkyv::{api::high::to_bytes_with_alloc, ser::allocator::Arena}; let mut arena = Arena::new(); let bytes = to_bytes_with_alloc::<_, Error>(&value, arena.acquire()).unwrap(); // You can use the safe API for fast zero-copy deserialization let archived = rkyv::access::(&bytes[..]).unwrap(); assert_eq!(archived, &value); // Or you can use the unsafe API for maximum performance let archived = unsafe { rkyv::access_unchecked::(&bytes[..]) }; assert_eq!(archived, &value); // And you can always deserialize back to the original type let deserialized = deserialize::(archived).unwrap(); assert_eq!(deserialized, value); } ``` _Note: the safe API requires the `bytecheck` feature (enabled by default)_ _Read more about [available features](https://docs.rs/rkyv/latest/rkyv/#features)._ # Thanks Thanks to all the sponsors that keep development sustainable. Special thanks to the following sponsors for going above and beyond supporting rkyv: ## Bronze Sponsors

Climatiq

> Climatiq provides an embedded carbon intelligence solution that enables developers to automate GHG emission calculations based on verified scientific models. Its suite of products includes the largest dataset of emission factors, and intelligent APIs that integrate with any software platform for real time monitoring of greenhouse gas emissions. rkyv-0.8.9/benches/log.rs000064400000000000000000000067151046102023000134040ustar 00000000000000use benchlib::{bench_dataset, generate_vec, Generate, Rng}; use rkyv::{Archive, Deserialize, Serialize}; #[derive(Archive, Serialize, Deserialize, Clone, Copy, PartialEq)] pub struct Address { pub x0: u8, pub x1: u8, pub x2: u8, pub x3: u8, } impl Generate for Address { fn generate(rand: &mut R) -> Self { Self { x0: rand.gen_range(0..=255), x1: rand.gen_range(0..=255), x2: rand.gen_range(0..=255), x3: rand.gen_range(0..=255), } } } #[derive(Archive, Serialize, Deserialize, Clone, PartialEq)] pub struct Log { pub address: Address, pub identity: String, pub userid: String, pub date: String, pub request: String, pub code: u16, pub size: u64, } impl Generate for Log { fn generate(rand: &mut R) -> Self { const USERID: [&str; 9] = [ "-", "alice", "bob", "carmen", "david", "eric", "frank", "george", "harry", ]; const MONTHS: [&str; 12] = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ]; const TIMEZONE: [&str; 25] = [ "-1200", "-1100", "-1000", "-0900", "-0800", "-0700", "-0600", "-0500", "-0400", "-0300", "-0200", "-0100", "+0000", "+0100", "+0200", "+0300", "+0400", "+0500", "+0600", "+0700", "+0800", "+0900", "+1000", "+1100", "+1200", ]; let date = format!( "{}/{}/{}:{}:{}:{} {}", rand.gen_range(1..=28), MONTHS[rand.gen_range(0..12)], rand.gen_range(1970..=2021), rand.gen_range(0..24), rand.gen_range(0..60), rand.gen_range(0..60), TIMEZONE[rand.gen_range(0..25)], ); const CODES: [u16; 63] = [ 100, 101, 102, 103, 200, 201, 202, 203, 204, 205, 206, 207, 208, 226, 300, 301, 302, 303, 304, 305, 306, 307, 308, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 421, 422, 423, 424, 425, 426, 428, 429, 431, 451, 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, 511, ]; const METHODS: [&str; 5] = ["GET", "POST", "PUT", "UPDATE", "DELETE"]; const ROUTES: [&str; 7] = [ "/favicon.ico", "/css/index.css", "/css/font-awsome.min.css", "/img/logo-full.svg", "/img/splash.jpg", "/api/login", "/api/logout", ]; const PROTOCOLS: [&str; 4] = ["HTTP/1.0", "HTTP/1.1", "HTTP/2", "HTTP/3"]; let request = format!( "{} {} {}", METHODS[rand.gen_range(0..5)], ROUTES[rand.gen_range(0..7)], PROTOCOLS[rand.gen_range(0..4)], ); Self { address: Address::generate(rand), identity: "-".into(), userid: USERID[rand.gen_range(0..USERID.len())].into(), date, request, code: CODES[rand.gen_range(0..CODES.len())], size: rand.gen_range(0..100_000_000), } } } #[derive(Archive, Serialize, Deserialize, Clone, PartialEq)] pub struct Logs { pub logs: Vec, } pub fn generate_logs() -> Logs { let mut rng = benchlib::rng(); const LOGS: usize = 10_000; Logs { logs: generate_vec::<_, Log>(&mut rng, LOGS..LOGS + 1), } } bench_dataset!(Logs = generate_logs()); rkyv-0.8.9/benches/mesh.rs000064400000000000000000000024521046102023000135510ustar 00000000000000use benchlib::{bench_dataset, generate_vec, Generate, Rng}; use rkyv::{Archive, Deserialize, Serialize}; #[derive(Archive, Serialize, Deserialize, Clone, Copy, PartialEq)] pub struct Vector3 { pub x: f32, pub y: f32, pub z: f32, } impl Generate for Vector3 { fn generate(rand: &mut R) -> Self { Self { x: rand.gen(), y: rand.gen(), z: rand.gen(), } } } #[derive(Archive, Serialize, Deserialize, Clone, Copy, PartialEq)] pub struct Triangle { pub v0: Vector3, pub v1: Vector3, pub v2: Vector3, pub normal: Vector3, } impl Generate for Triangle { fn generate(rand: &mut R) -> Self { Self { v0: Vector3::generate(rand), v1: Vector3::generate(rand), v2: Vector3::generate(rand), normal: Vector3::generate(rand), } } } #[derive( rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, Clone, PartialEq, )] pub struct Mesh { pub triangles: Vec, } pub fn generate_mesh() -> Mesh { let mut rng = benchlib::rng(); const TRIANGLES: usize = 125_000; Mesh { triangles: generate_vec::<_, Triangle>( &mut rng, TRIANGLES..TRIANGLES + 1, ), } } bench_dataset!(Mesh = generate_mesh()); rkyv-0.8.9/benches/minecraft_savedata.rs000064400000000000000000000206461046102023000164420ustar 00000000000000use benchlib::{bench_dataset, generate_vec, Generate, Rng}; use rkyv::{Archive, Deserialize, Serialize}; #[derive(Archive, Serialize, Deserialize, Clone, Copy, Debug)] #[repr(u8)] pub enum GameType { Survival, Creative, Adventure, Spectator, } impl Generate for GameType { fn generate(rand: &mut R) -> Self { match rand.gen_range(0..4) { 0 => GameType::Survival, 1 => GameType::Creative, 2 => GameType::Adventure, 3 => GameType::Spectator, _ => unsafe { core::hint::unreachable_unchecked() }, } } } #[derive(Archive, Serialize, Deserialize, Debug)] pub struct Item { count: i8, slot: u8, id: String, } impl Generate for Item { fn generate(rng: &mut R) -> Self { const IDS: [&str; 8] = [ "dirt", "stone", "pickaxe", "sand", "gravel", "shovel", "chestplate", "steak", ]; Self { count: rng.gen(), slot: rng.gen(), id: IDS[rng.gen_range(0..IDS.len())].to_string(), } } } #[derive(Archive, Serialize, Clone, Copy, Deserialize, Debug)] pub struct Abilities { walk_speed: f32, fly_speed: f32, may_fly: bool, flying: bool, invulnerable: bool, may_build: bool, instabuild: bool, } impl Generate for Abilities { fn generate(rng: &mut R) -> Self { Self { walk_speed: rng.gen(), fly_speed: rng.gen(), may_fly: rng.gen_bool(0.5), flying: rng.gen_bool(0.5), invulnerable: rng.gen_bool(0.5), may_build: rng.gen_bool(0.5), instabuild: rng.gen_bool(0.5), } } } #[derive(Archive, Serialize, Deserialize, Debug)] pub struct Entity { id: String, pos: [f64; 3], motion: [f64; 3], rotation: [f32; 2], fall_distance: f32, fire: u16, air: u16, on_ground: bool, no_gravity: bool, invulnerable: bool, portal_cooldown: i32, uuid: [u32; 4], custom_name: Option, custom_name_visible: bool, silent: bool, glowing: bool, } impl Generate for Entity { fn generate(rng: &mut R) -> Self { const IDS: [&str; 8] = [ "cow", "sheep", "zombie", "skeleton", "spider", "creeper", "parrot", "bee", ]; const CUSTOM_NAMES: [&str; 8] = [ "rainbow", "princess", "steve", "johnny", "missy", "coward", "fairy", "howard", ]; Self { id: IDS[rng.gen_range(0..IDS.len())].to_string(), pos: <[f64; 3] as Generate>::generate(rng), motion: <[f64; 3] as Generate>::generate(rng), rotation: <[f32; 2] as Generate>::generate(rng), fall_distance: rng.gen(), fire: rng.gen(), air: rng.gen(), on_ground: rng.gen_bool(0.5), no_gravity: rng.gen_bool(0.5), invulnerable: rng.gen_bool(0.5), portal_cooldown: rng.gen(), uuid: <[u32; 4] as Generate>::generate(rng), custom_name: as Generate>::generate(rng).map(|_| { CUSTOM_NAMES[rng.gen_range(0..CUSTOM_NAMES.len())].to_string() }), custom_name_visible: rng.gen_bool(0.5), silent: rng.gen_bool(0.5), glowing: rng.gen_bool(0.5), } } } #[derive(Archive, Serialize, Deserialize, Debug)] pub struct RecipeBook { recipes: Vec, to_be_displayed: Vec, is_filtering_craftable: bool, is_gui_open: bool, is_furnace_filtering_craftable: bool, is_furnace_gui_open: bool, is_blasting_furnace_filtering_craftable: bool, is_blasting_furnace_gui_open: bool, is_smoker_filtering_craftable: bool, is_smoker_gui_open: bool, } impl Generate for RecipeBook { fn generate(rng: &mut R) -> Self { const RECIPES: [&str; 8] = [ "pickaxe", "torch", "bow", "crafting table", "furnace", "shears", "arrow", "tnt", ]; const MAX_RECIPES: usize = 30; const MAX_DISPLAYED_RECIPES: usize = 10; Self { recipes: generate_vec::<_, ()>(rng, 0..MAX_RECIPES) .iter() .map(|_| RECIPES[rng.gen_range(0..RECIPES.len())].to_string()) .collect(), to_be_displayed: generate_vec::<_, ()>( rng, 0..MAX_DISPLAYED_RECIPES, ) .iter() .map(|_| RECIPES[rng.gen_range(0..RECIPES.len())].to_string()) .collect(), is_filtering_craftable: rng.gen_bool(0.5), is_gui_open: rng.gen_bool(0.5), is_furnace_filtering_craftable: rng.gen_bool(0.5), is_furnace_gui_open: rng.gen_bool(0.5), is_blasting_furnace_filtering_craftable: rng.gen_bool(0.5), is_blasting_furnace_gui_open: rng.gen_bool(0.5), is_smoker_filtering_craftable: rng.gen_bool(0.5), is_smoker_gui_open: rng.gen_bool(0.5), } } } #[derive(Archive, Serialize, Deserialize, Debug)] pub struct RootVehicle { attach: [u32; 4], entity: Entity, } impl Generate for RootVehicle { fn generate(rng: &mut R) -> Self { Self { attach: <[u32; 4] as Generate>::generate(rng), entity: ::generate(rng), } } } #[derive(Archive, Serialize, Deserialize, Debug)] pub struct Player { game_type: GameType, previous_game_type: GameType, score: u64, dimension: String, selected_item_slot: u32, selected_item: Item, spawn_dimension: Option, spawn_x: i64, spawn_y: i64, spawn_z: i64, spawn_forced: Option, sleep_timer: u16, food_exhaustion_level: f32, food_saturation_level: f32, food_tick_timer: u32, xp_level: u32, xp_p: f32, xp_total: i32, xp_seed: i32, inventory: Vec, ender_items: Vec, abilities: Abilities, entered_nether_position: Option<[f64; 3]>, root_vehicle: Option, shoulder_entity_left: Option, shoulder_entity_right: Option, seen_credits: bool, recipe_book: RecipeBook, } #[derive(Archive, Serialize, Deserialize, Debug)] pub struct Players { pub players: Vec, } impl Generate for Player { fn generate(rng: &mut R) -> Self { const DIMENSIONS: [&str; 3] = ["overworld", "nether", "end"]; const MAX_ITEMS: usize = 40; const MAX_ENDER_ITEMS: usize = 27; Self { game_type: GameType::generate(rng), previous_game_type: GameType::generate(rng), score: rng.gen(), dimension: DIMENSIONS[rng.gen_range(0..DIMENSIONS.len())] .to_string(), selected_item_slot: rng.gen(), selected_item: Item::generate(rng), spawn_dimension: as Generate>::generate(rng).map( |_| DIMENSIONS[rng.gen_range(0..DIMENSIONS.len())].to_string(), ), spawn_x: rng.gen(), spawn_y: rng.gen(), spawn_z: rng.gen(), spawn_forced: as Generate>::generate(rng), sleep_timer: rng.gen(), food_exhaustion_level: rng.gen(), food_saturation_level: rng.gen(), food_tick_timer: rng.gen(), xp_level: rng.gen(), xp_p: rng.gen(), xp_total: rng.gen(), xp_seed: rng.gen(), inventory: generate_vec(rng, 0..MAX_ITEMS), ender_items: generate_vec(rng, 0..MAX_ENDER_ITEMS), abilities: Abilities::generate(rng), entered_nether_position: as Generate>::generate( rng, ), root_vehicle: as Generate>::generate(rng), shoulder_entity_left: as Generate>::generate(rng), shoulder_entity_right: as Generate>::generate(rng), seen_credits: rng.gen_bool(0.5), recipe_book: RecipeBook::generate(rng), } } } pub fn generate_players() -> Players { let mut rng = benchlib::rng(); const PLAYERS: usize = 500; Players { players: generate_vec::<_, Player>(&mut rng, PLAYERS..PLAYERS + 1), } } bench_dataset!(Players = generate_players()); rkyv-0.8.9/examples/backwards_compat.rs000064400000000000000000000052341046102023000163310ustar 00000000000000use rkyv::{rancor::Error, with::AsBox, Archive, Deserialize, Serialize}; // This is the version used by the older client, which can read newer versions // from senders. #[derive(Archive, Deserialize, Serialize)] struct ExampleV1 { a: i32, b: u32, } // This is the version used by the newer client, which can send newer versions // to receivers. #[derive(Archive, Deserialize, Serialize)] struct ExampleV2 { a: i32, b: i32, c: String, } // This wrapper type serializes the contained value out-of-line so that newer // versions can be viewed as the older version. // // In a complete message format, sending a version number along with the buffer // would allow clients to reject incompatible messages before validating the // buffer. #[derive(Archive, Deserialize, Serialize)] #[repr(transparent)] struct Versioned(#[rkyv(with = AsBox)] pub T); // This is some code running on the older client. It accepts the older version // of the struct and prints out the `a` and `b` fields. fn print_v1(value: &ArchivedExampleV1) { println!("v1: a = {}, b = {}", value.a, value.b); } // This is some code running on the newer client. It can also print out the `c` // field for newer versions. fn print_v2(value: &ArchivedExampleV2) { println!("v2: a = {}, b = {}, c = {}", value.a, value.b, value.c); } fn main() { // These two different versions of the type will be serialized and accessed. let v1 = Versioned(ExampleV1 { a: 10, b: 20 }); let v2 = Versioned(ExampleV2 { a: 30, b: 50, c: "hello world".to_string(), }); // v1 is serialized into v1_bytes let v1_bytes = rkyv::to_bytes::(&v1).expect("failed to serialize v1"); // v2 is serialized into v2_bytes let v2_bytes = rkyv::to_bytes::(&v2).expect("failed to serialize v2"); // We can view a v1 as a v1 let v1_as_v1 = rkyv::access::>, Error>(&v1_bytes) .unwrap(); print_v1(&v1_as_v1.0); // We can view a v2 as a v1 let v2_as_v1 = rkyv::access::>, Error>(&v2_bytes) .unwrap(); print_v1(&v2_as_v1.0); // And we can view a v2 as a v2 let v2_as_v2 = rkyv::access::>, Error>(&v2_bytes) .unwrap(); print_v2(&v2_as_v2.0); // But we can't view a v1 as a v2 because v1 is not forward-compatible with // v2 if rkyv::access::>, Error>(&v1_bytes) .is_ok() { panic!("v1 bytes should not validate as v2"); } else { println!("verified that v1 cannot be viewed as v2"); } } rkyv-0.8.9/examples/complex_wrapper_types.rs000064400000000000000000000142411046102023000174560ustar 00000000000000use rkyv::{ access_unchecked, deserialize, rancor::{Error, Fallible}, ser::{Allocator, Writer}, vec::{ArchivedVec, VecResolver}, with::{ArchiveWith, DeserializeWith, SerializeWith}, Archive, Archived, Deserialize, Place, Serialize, }; #[derive(Debug, PartialEq, Eq)] pub enum Opcode { // A 1-byte opcode OneByte, // A 2-byte opcode TwoBytes(u8), // A 3-byte opcode ThreeBytes(u16), // A 5-byte opcode FiveBytes(u32), // A 9-byte opcode NineBytes(u64), // A variable-length opcode VariableLength(usize), } pub struct EncodeOpcodes; pub struct OpcodesResolver { len: usize, inner: VecResolver, } impl ArchiveWith> for EncodeOpcodes { type Archived = ArchivedVec; type Resolver = OpcodesResolver; fn resolve_with( _: &Vec, resolver: Self::Resolver, out: Place, ) { ArchivedVec::resolve_from_len(resolver.len, resolver.inner, out); } } impl SerializeWith, S> for EncodeOpcodes where S: Fallible + Allocator + Writer + ?Sized, { fn serialize_with( field: &Vec, serializer: &mut S, ) -> Result { // Encode opcodes into a compact binary format // We'll do it manually here, but you could just as easily proxy out to // a serialization framework like postcard let mut encoded = Vec::new(); for opcode in field.iter() { match opcode { Opcode::OneByte => encoded.push(0), Opcode::TwoBytes(arg) => { encoded.push(1); encoded.extend(arg.to_le_bytes()); } Opcode::ThreeBytes(arg) => { encoded.push(2); encoded.extend(arg.to_le_bytes()); } Opcode::FiveBytes(arg) => { encoded.push(3); encoded.extend(arg.to_le_bytes()); } Opcode::NineBytes(arg) => { encoded.push(4); encoded.extend(arg.to_le_bytes()); } Opcode::VariableLength(arg) => { let mut arg = *arg; let bytes = arg.to_le_bytes(); let mut len = 1; while arg >= 256 { arg >>= 8; len += 1; } encoded.push(4 + len as u8); encoded.extend(&bytes[0..len]); } } } // Serialize encoded opcodes Ok(OpcodesResolver { len: encoded.len(), inner: ArchivedVec::serialize_from_slice( encoded.as_slice(), serializer, )?, }) } } impl DeserializeWith>, Vec, D> for EncodeOpcodes where D: Fallible + ?Sized, { fn deserialize_with( field: &Archived>, _: &mut D, ) -> Result, D::Error> { let mut result = Vec::new(); // Decode opcodes from a compact binary format let mut bytes = field.iter().cloned(); while let Some(op) = bytes.next() { match op { 0 => result.push(Opcode::OneByte), 1 => { let arg = bytes.next().unwrap(); result.push(Opcode::TwoBytes(arg)); } 2 => { let arg = bytes.next().unwrap() as u16 | (bytes.next().unwrap() as u16) << 8; result.push(Opcode::ThreeBytes(arg)); } 3 => { let arg = bytes.next().unwrap() as u32 | (bytes.next().unwrap() as u32) << 8 | (bytes.next().unwrap() as u32) << 16 | (bytes.next().unwrap() as u32) << 24; result.push(Opcode::FiveBytes(arg)); } 4 => { let arg = bytes.next().unwrap() as u64 | (bytes.next().unwrap() as u64) << 8 | (bytes.next().unwrap() as u64) << 16 | (bytes.next().unwrap() as u64) << 24 | (bytes.next().unwrap() as u64) << 32 | (bytes.next().unwrap() as u64) << 40 | (bytes.next().unwrap() as u64) << 48 | (bytes.next().unwrap() as u64) << 56; result.push(Opcode::NineBytes(arg)); } n @ 5..=12 => { let len = n - 4; let mut arg = 0; for i in 0..len { arg |= (bytes.next().unwrap() as usize) << (8 * i); } result.push(Opcode::VariableLength(arg)); } // Either the deserializer can be bound to support decode // errors, or the opcodes can be checked during // validation with bytecheck _ => panic!("unexpected opcode"), } } Ok(result) } } #[derive(Archive, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Program { #[rkyv(with = EncodeOpcodes)] opcodes: Vec, } fn main() { let program = Program { opcodes: vec![ Opcode::OneByte, Opcode::TwoBytes(42), Opcode::ThreeBytes(27774), Opcode::FiveBytes(31415926), Opcode::NineBytes(123456789123456789), Opcode::VariableLength(27774), ], }; println!("opcodes: {:?}", program.opcodes); let buf = rkyv::to_bytes::(&program).unwrap(); let archived_program = unsafe { access_unchecked::(&buf) }; println!("encoded: {:?}", archived_program.opcodes); assert_eq!(archived_program.opcodes.len(), 23); let deserialized_program = deserialize::(archived_program).unwrap(); println!("deserialized opcodes: {:?}", deserialized_program.opcodes); assert_eq!(program, deserialized_program); } rkyv-0.8.9/examples/derive_partial_ord.rs000064400000000000000000000022521046102023000166600ustar 00000000000000fn main() { use rancor::Panic; use rkyv::{Archive, Deserialize, Serialize}; // Struct #[derive(Archive, Deserialize, Serialize, Debug, PartialEq, PartialOrd)] #[rkyv(compare(PartialEq, PartialOrd), derive(Debug))] pub enum Struct { A { a: i32 }, } let small = Struct::A { a: 0 }; let big = Struct::A { a: 1 }; assert!(small < big); let big_bytes = rkyv::to_bytes::(&big).expect("failed to serialize value"); let big_archived = unsafe { rkyv::access_unchecked::(&big_bytes) }; assert!((&small as &dyn PartialOrd) < big_archived); // Enum #[derive(Archive, Deserialize, Serialize, Debug, PartialEq, PartialOrd)] #[rkyv(compare(PartialEq, PartialOrd), derive(Debug))] pub struct Enum { a: i32, } let small = Enum { a: 0 }; let big = Enum { a: 1 }; assert!(small < big); let big_bytes = rkyv::to_bytes::(&big).expect("failed to serialize value"); let big_archived = unsafe { rkyv::access_unchecked::(&big_bytes) }; assert!((&small as &dyn PartialOrd) < big_archived); } rkyv-0.8.9/examples/explicit_enum_discriminants.rs000064400000000000000000000005001046102023000206100ustar 00000000000000use rkyv::{Archive, Deserialize, Serialize}; fn main() { #[derive(Archive, Deserialize, Serialize)] enum Foo { A = 2, B = 4, C = 6, } assert_eq!(ArchivedFoo::A as usize, 2); assert_eq!(ArchivedFoo::B as usize, 4); assert_eq!(ArchivedFoo::C as usize, 6); } rkyv-0.8.9/examples/json_like_schema.rs000064400000000000000000000077741046102023000163350ustar 00000000000000use std::{collections::HashMap, fmt}; use rkyv::{access, rancor::Error, Archive, Deserialize, Serialize}; #[derive(Archive, Debug, Deserialize, Serialize)] // We have a recursive type, which requires some special handling // // First the compiler will return an error: // // > error[E0275]: overflow evaluating the requirement `HashMap JsonValue>: Archive` // // This is because the implementation of Archive for Json value requires that // JsonValue: Archive, which is recursive! // We can fix this by adding #[omit_bounds] on the recursive fields. This will // prevent the derive from automatically adding a `HashMap: // Archive` bound on the generated impl. // // Next, the compiler will return these errors: // // > error[E0277]: the trait bound `__S: ScratchSpace` is not satisfied // > error[E0277]: the trait bound `__S: Serializer` is not satisfied // // This is because those bounds are required by HashMap and Vec, but we removed // the default generated bounds to prevent a recursive impl. // We can fix this by manually specifying the bounds required by HashMap and Vec // in an attribute, and then everything will compile: #[rkyv(serialize_bounds( __S: rkyv::ser::Writer + rkyv::ser::Allocator, __S::Error: rkyv::rancor::Source, ))] #[rkyv(deserialize_bounds(__D::Error: rkyv::rancor::Source))] // We need to manually add the appropriate non-recursive bounds to our // `CheckBytes` derive. In our case, we need to bound // `__C: rkyv::validation::ArchiveContext`. This will make sure that our `Vec` // and `HashMap` have the `ArchiveContext` trait implemented on the validator. // This is a necessary requirement for containers to check their bytes. // // With those two changes, our recursive type can be validated with `access`! #[rkyv(bytecheck( bounds( __C: rkyv::validation::ArchiveContext, ) ))] pub enum JsonValue { Null, Bool(bool), Number(JsonNumber), String(String), Array(#[rkyv(omit_bounds)] Vec), Object(#[rkyv(omit_bounds)] HashMap), } impl fmt::Display for ArchivedJsonValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Null => write!(f, "null")?, Self::Bool(b) => write!(f, "{}", b)?, Self::Number(n) => write!(f, "{}", n)?, Self::String(s) => write!(f, "{}", s)?, Self::Array(a) => { write!(f, "[")?; for (i, value) in a.iter().enumerate() { write!(f, "{}", value)?; if i < a.len() - 1 { write!(f, ", ")?; } } write!(f, "]")?; } Self::Object(h) => { write!(f, "{{")?; for (i, (key, value)) in h.iter().enumerate() { write!(f, "\"{}\": {}", key, value)?; if i < h.len() - 1 { write!(f, ", ")?; } } write!(f, "}}")?; } } Ok(()) } } #[derive(Archive, Debug, Deserialize, Serialize)] pub enum JsonNumber { PosInt(u64), NegInt(i64), Float(f64), } impl fmt::Display for ArchivedJsonNumber { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::PosInt(n) => write!(f, "{}", n), Self::NegInt(n) => write!(f, "{}", n), Self::Float(n) => write!(f, "{}", n), } } } fn main() { let mut hash_map = HashMap::new(); hash_map.insert("name".into(), JsonValue::String("ferris".into())); hash_map.insert("age".into(), JsonValue::Number(JsonNumber::PosInt(10))); hash_map.insert("is_crab".into(), JsonValue::Bool(true)); hash_map.insert("project".into(), JsonValue::Null); let value = JsonValue::Object(hash_map); let buf = rkyv::to_bytes::(&value).unwrap(); let archived_value = access::(&buf).unwrap(); println!("{}", archived_value); } rkyv-0.8.9/examples/readme.rs000064400000000000000000000027641046102023000142670ustar 00000000000000use rkyv::{deserialize, rancor::Error, Archive, Deserialize, Serialize}; #[derive(Archive, Deserialize, Serialize, Debug, PartialEq)] #[rkyv( // This will generate a PartialEq impl between our unarchived // and archived types compare(PartialEq), // Derives can be passed through to the generated type: derive(Debug), )] struct Test { int: u8, string: String, option: Option>, } fn main() { let value = Test { int: 42, string: "hello world".to_string(), option: Some(vec![1, 2, 3, 4]), }; // Serializing is as easy as a single function call let _bytes = rkyv::to_bytes::(&value).unwrap(); // Or you can customize your serialization for better performance or control // over resource usage use rkyv::{api::high::to_bytes_with_alloc, ser::allocator::Arena}; let mut arena = Arena::new(); let bytes = to_bytes_with_alloc::<_, Error>(&value, arena.acquire()).unwrap(); // You can use the safe API for fast zero-copy deserialization let archived = rkyv::access::(&bytes[..]).unwrap(); assert_eq!(archived, &value); // Or you can use the unsafe API for maximum performance let archived = unsafe { rkyv::access_unchecked::(&bytes[..]) }; assert_eq!(archived, &value); // And you can always deserialize back to the original type let deserialized = deserialize::(archived).unwrap(); assert_eq!(deserialized, value); } rkyv-0.8.9/examples/remote_types.rs000064400000000000000000000105461046102023000155460ustar 00000000000000use rancor::Failure; use rkyv::{with::With, Archive, Deserialize, Serialize}; // Assume this is a remote module or crate that you cannot modify. mod remote { // Notably, this type does not implement the rkyv traits #[derive(Debug, PartialEq)] pub struct Foo { pub ch: char, pub bytes: [u8; 4], pub _uninteresting: u32, // ... and even has private fields bar: Bar, } #[derive(Debug, PartialEq)] pub struct Bar(pub T); impl Foo { // A constructor which is necessary for deserialization because there // are private fields. pub fn new( ch: char, bytes: [u8; 4], _uninteresting: u32, bar: Bar, ) -> Self { Self { ch, bytes, _uninteresting, bar, } } // The getter for a private field. pub fn bar(&self) -> &Bar { &self.bar } } } // Let's create a local type that will serve as `with`-wrapper for `Foo`. // Fields must have the same name and type but it's not required to define all // fields. #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = remote::Foo)] // <- #[rkyv(archived = ArchivedFoo)] // ^ not necessary but we might as well replace the default name // `ArchivedFooDef` with `ArchivedFoo`. struct FooDef { // The field's type implements `Archive` and we don't want to apply any // conversion for the archived type so we don't need to specify // `#[rkyv(with = ..)]`. ch: char, // The field is private in the remote type so we need to specify a getter // to access it. Also, its type doesn't implement `Archive` so we need // to specify a `with`-wrapper too. #[rkyv(getter = remote::Foo::bar, with = BarDef)] bar: remote::Bar, // The remote `bytes` field is public but we can still customize our local // field when using a getter. #[rkyv(getter = get_first_byte)] first_byte: u8, } fn get_first_byte(foo: &remote::Foo) -> u8 { foo.bytes[0] } // Deriving `Deserialize` with `remote = ..` requires a `From` implementation. impl From for remote::Foo { fn from(value: FooDef) -> Self { remote::Foo::new(value.ch, [value.first_byte, 2, 3, 4], 567, value.bar) } } #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = remote::Bar)] struct BarDef(i32); impl From for remote::Bar { fn from(BarDef(value): BarDef) -> Self { remote::Bar(value) } } fn main() -> Result<(), Failure> { let foo = remote::Foo::new('!', [1, 2, 3, 4], 567, remote::Bar(89)); // To make use of all the utility functions for serialization, accessing, // and deserialization, we can use the `With` type. let bytes = rkyv::to_bytes(With::::cast(&foo))?; let archived: &ArchivedFoo = rkyv::access(&bytes)?; let deserialized: remote::Foo = rkyv::deserialize(With::::cast(archived))?; assert_eq!(foo, deserialized); // ... or better yet, incorporate the remote type in our own types! #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] struct Baz { #[rkyv(with = FooDef)] foo: remote::Foo, } let baz = Baz { foo }; let bytes = rkyv::to_bytes(&baz)?; let archived: &ArchivedBaz = rkyv::access(&bytes)?; let deserialized: Baz = rkyv::deserialize(archived)?; assert_eq!(baz, deserialized); Ok(()) } #[allow(unused)] mod another_remote { // Another remote type, this time an enum. #[non_exhaustive] // <- notice this inconvenience too pub enum Qux { Unit, Tuple(i32), Struct { value: bool }, } } #[allow(unused)] // Enums work similarly #[derive(Archive, Serialize)] #[rkyv(remote = another_remote::Qux)] enum QuxDef { // Variants must have the same name and type, e.g. a remote *tuple* // variant requires a local *tuple* variant. Unit, // Same as for actual structs - fields of struct variants may be omitted. Struct {}, // If `Serialize` should be derived and either the remote enum is // `#[non_exhaustive]` or any of its variants were omitted (notice the // `Tuple(i32)` variant is missing), then the last variant *must* be a // unit variant with the `#[rkyv(other)]` attribute. #[rkyv(other)] Unknown, } rkyv-0.8.9/src/_macros.rs000064400000000000000000000010571046102023000134200ustar 00000000000000#[cfg(feature = "pointer_width_16")] macro_rules! match_pointer_width { ($s16:ty, $s32:ty, $s64:ty $(,)?) => { $s16 }; } // If neither `pointer_width_16` nor `pointer_width_64` are enabled, then set // the pointer width to 32. #[cfg(not(any(feature = "pointer_width_16", feature = "pointer_width_64")))] macro_rules! match_pointer_width { ($s16:ty, $s32:ty, $s64:ty $(,)?) => { $s32 }; } #[cfg(feature = "pointer_width_64")] macro_rules! match_pointer_width { ($s16:ty, $s32:ty, $s64:ty $(,)?) => { $s64 }; } rkyv-0.8.9/src/alias.rs000064400000000000000000000021271046102023000130650ustar 00000000000000use crate::{ primitive::ArchivedIsize, rel_ptr, traits::ArchivePointee, Archive, ArchiveUnsized, }; /// The default raw relative pointer. /// /// This will use an archived [`FixedIsize`](crate::primitive::FixedIsize) to /// hold the offset. pub type RawRelPtr = rel_ptr::RawRelPtr; /// The default relative pointer. /// /// This will use an archived [`FixedIsize`](crate::primitive::FixedIsize) to /// hold the offset. pub type RelPtr = rel_ptr::RelPtr; /// Alias for the archived version of some [`Archive`] type. /// /// This can be useful for reducing the lengths of type definitions. pub type Archived = ::Archived; /// Alias for the resolver for some [`Archive`] type. /// /// This can be useful for reducing the lengths of type definitions. pub type Resolver = ::Resolver; /// Alias for the archived metadata for some [`ArchiveUnsized`] type. /// /// This can be useful for reducing the lengths of type definitions. pub type ArchivedMetadata = <::Archived as ArchivePointee>::ArchivedMetadata; rkyv-0.8.9/src/api/checked.rs000064400000000000000000000115021046102023000141300ustar 00000000000000use bytecheck::CheckBytes; use ptr_meta::Pointee; use rancor::{Source, Strategy}; use crate::{ api::{access_pos_unchecked, root_position}, validation::{ArchiveContext, ArchiveContextExt}, Portable, }; /// Check a byte slice with a given root position and context. /// /// Most of the time, `access` is a more ergonomic way to check and access a /// byte slice. /// /// # Example /// /// ``` /// use rkyv::{ /// api::{check_pos_with_context, root_position}, /// rancor::Error, /// to_bytes, /// validation::{ /// archive::ArchiveValidator, shared::SharedValidator, Validator, /// }, /// Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// /// check_pos_with_context::( /// &*bytes, /// root_position::(bytes.len()), /// &mut Validator::new( /// ArchiveValidator::new(&*bytes), /// SharedValidator::new(), /// ), /// ) /// .unwrap(); /// ``` pub fn check_pos_with_context( bytes: &[u8], pos: usize, context: &mut C, ) -> Result<(), E> where T: CheckBytes> + Pointee, C: ArchiveContext + ?Sized, E: Source, { let context = Strategy::::wrap(context); let ptr = bytes.as_ptr().wrapping_add(pos).cast::(); context.in_subtree(ptr, |context| { // SAFETY: `in_subtree` has guaranteed that `ptr` is properly aligned // and points to enough bytes for a `T`. unsafe { T::check_bytes(ptr, context) } }) } /// Access a byte slice with a given root position and context. /// /// This is a safe alternative to [`access_pos_unchecked`]. /// /// Most of the time, the context should be newly-created and not reused. Prefer /// `access_pos` whenever possible. /// /// # Example /// /// ``` /// use rkyv::{ /// api::{access_pos_with_context, root_position}, /// rancor::Error, /// to_bytes, /// validation::{ /// archive::ArchiveValidator, shared::SharedValidator, Validator, /// }, /// Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// /// let archived = access_pos_with_context::( /// &*bytes, /// root_position::(bytes.len()), /// &mut Validator::new( /// ArchiveValidator::new(&*bytes), /// SharedValidator::new(), /// ), /// ) /// .unwrap(); /// /// assert_eq!(archived.name.as_str(), "pi"); /// assert_eq!(archived.value.to_native(), 31415926); /// ``` pub fn access_pos_with_context<'a, T, C, E>( bytes: &'a [u8], pos: usize, context: &mut C, ) -> Result<&'a T, E> where T: Portable + CheckBytes> + Pointee, C: ArchiveContext + ?Sized, E: Source, { check_pos_with_context::(bytes, pos, context)?; unsafe { Ok(access_pos_unchecked::(bytes, pos)) } } /// Access a byte slice with a given context. /// /// This is a safe alternative to [`access_unchecked`]. /// /// Most of the time, the context should be newly-created and not reused. Prefer /// `access` whenever possible. /// /// [`access_unchecked`]: crate::api::access_unchecked /// /// # Example /// /// ``` /// use rkyv::{ /// api::{access_with_context, root_position}, /// rancor::Error, /// to_bytes, /// validation::{ /// archive::ArchiveValidator, shared::SharedValidator, Validator, /// }, /// Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// /// let archived = access_with_context::( /// &*bytes, /// &mut Validator::new( /// ArchiveValidator::new(&*bytes), /// SharedValidator::new(), /// ), /// ) /// .unwrap(); /// /// assert_eq!(archived.name.as_str(), "pi"); /// assert_eq!(archived.value.to_native(), 31415926); /// ``` pub fn access_with_context<'a, T, C, E>( bytes: &'a [u8], context: &mut C, ) -> Result<&'a T, E> where T: Portable + CheckBytes> + Pointee, C: ArchiveContext + ?Sized, E: Source, { access_pos_with_context::( bytes, root_position::(bytes.len()), context, ) } rkyv-0.8.9/src/api/high/checked.rs000064400000000000000000000154231046102023000150550ustar 00000000000000//! High-level checked APIs. //! //! These APIs have default writers, automatically manage allocators, and //! support shared pointers. use bytecheck::CheckBytes; use rancor::{Source, Strategy}; use crate::{ api::{ access_pos_unchecked_mut, access_pos_with_context, access_with_context, check_pos_with_context, deserialize_using, root_position, }, de::pooling::Pool, seal::Seal, validation::{ archive::ArchiveValidator, shared::SharedValidator, Validator, }, Archive, Deserialize, Portable, }; /// A high-level validator. /// /// This is part of the [high-level API](crate::api::high). pub type HighValidator<'a, E> = Strategy, SharedValidator>, E>; fn validator(bytes: &[u8]) -> Validator, SharedValidator> { Validator::new(ArchiveValidator::new(bytes), SharedValidator::new()) } /// Access a byte slice with a given root position. /// /// This is a safe alternative to [`access_pos_unchecked`] and is part of the /// [high-level API](crate::api::high). /// /// [`access_pos_unchecked`]: crate::api::access_pos_unchecked /// /// # Example /// /// ``` /// use rkyv::{ /// api::{high::access_pos, root_position}, /// bytecheck::CheckBytes, /// rancor::Error, /// to_bytes, Archive, Archived, Serialize, /// }; /// /// #[derive(Archive, Serialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// let archived = access_pos::( /// &bytes, /// root_position::(bytes.len()), /// ) /// .unwrap(); /// /// assert_eq!(archived.name, "pi"); /// assert_eq!(archived.value, 31415926); /// ``` pub fn access_pos(bytes: &[u8], pos: usize) -> Result<&T, E> where T: Portable + for<'a> CheckBytes>, E: Source, { access_pos_with_context::<_, _, E>(bytes, pos, &mut validator(bytes)) } /// Access a byte slice. /// /// This is a safe alternative to [`access_unchecked`] and is part of the /// [high-level API](crate::api::high). /// /// [`access_unchecked`]: crate::access_unchecked /// /// # Example /// /// ``` /// use rkyv::{ /// access, bytecheck::CheckBytes, rancor::Error, to_bytes, Archive, /// Archived, Serialize, /// }; /// /// #[derive(Archive, Serialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// let archived = access::(&bytes).unwrap(); /// /// assert_eq!(archived.name, "pi"); /// assert_eq!(archived.value, 31415926); /// ``` pub fn access(bytes: &[u8]) -> Result<&T, E> where T: Portable + for<'a> CheckBytes>, E: Source, { access_with_context::<_, _, E>(bytes, &mut validator(bytes)) } /// Mutably access a byte slice with a given root position. /// /// This is a safe alternative to [`access_pos_unchecked_mut`] and is part of /// the [high-level API](crate::api::high). /// /// # Example /// /// ``` /// use rkyv::{ /// api::{high::access_pos_mut, root_position}, /// bytecheck::CheckBytes, /// rancor::Error, munge::munge, /// to_bytes, Archive, Archived, Serialize, /// }; /// /// #[derive(Archive, Serialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let mut bytes = to_bytes::(&value).unwrap(); /// let root_pos = root_position::(bytes.len()); /// /// let mut archived = /// access_pos_mut::(&mut bytes, root_pos).unwrap(); /// /// // Because the access is mutable, we can mutate the archived data /// munge!(let ArchivedExample { mut value, .. } = archived); /// assert_eq!(*value, 31415926); /// *value = 12345.into(); /// assert_eq!(*value, 12345); /// ``` pub fn access_pos_mut( bytes: &mut [u8], pos: usize, ) -> Result, E> where T: Portable + for<'a> CheckBytes>, E: Source, { let mut context = validator(bytes); check_pos_with_context::(bytes, pos, &mut context)?; unsafe { Ok(access_pos_unchecked_mut::(bytes, pos)) } } /// Mutably access a byte slice. /// /// This is a safe alternative to [`access_unchecked_mut`] and is part of the /// [high-level API](crate::api::high). /// /// [`access_unchecked_mut`]: crate::api::access_unchecked_mut /// /// # Example /// /// ``` /// use rkyv::{ /// access_mut, /// bytecheck::CheckBytes, /// rancor::Error, munge::munge, /// to_bytes, Archive, Archived, Serialize, /// }; /// /// #[derive(Archive, Serialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let mut bytes = to_bytes::(&value).unwrap(); /// /// let mut archived = access_mut::(&mut bytes) /// .unwrap(); /// /// // Because the access is mutable, we can mutate the archived data /// munge!(let ArchivedExample { mut value, .. } = archived); /// assert_eq!(*value, 31415926); /// *value = 12345.into(); /// assert_eq!(*value, 12345); /// ``` pub fn access_mut(bytes: &mut [u8]) -> Result, E> where T: Portable + for<'a> CheckBytes>, E: Source, { let mut context = validator(bytes); let pos = root_position::(bytes.len()); check_pos_with_context::(bytes, pos, &mut context)?; unsafe { Ok(access_pos_unchecked_mut::(bytes, pos)) } } /// Deserialize a value from the given bytes. /// /// This is a safe alternative to [`from_bytes_unchecked`] and is part of the /// [high-level API](crate::api::high). /// /// [`from_bytes_unchecked`]: crate::api::high::from_bytes_unchecked /// /// # Example /// /// ``` /// use rkyv::{ /// from_bytes, rancor::Error, to_bytes, Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// let deserialized = from_bytes::(&bytes).unwrap(); /// /// assert_eq!(deserialized, value); /// ``` pub fn from_bytes(bytes: &[u8]) -> Result where T: Archive, T::Archived: for<'a> CheckBytes> + Deserialize>, E: Source, { let mut deserializer = Pool::default(); deserialize_using(access::(bytes)?, &mut deserializer) } rkyv-0.8.9/src/api/high/mod.rs000064400000000000000000000174031046102023000142460ustar 00000000000000//! APIs for environments where allocations can be made. //! //! These APIs have default writers, automatically manage allocators, and //! support shared pointers. #[cfg(feature = "bytecheck")] mod checked; use rancor::Strategy; #[cfg(feature = "bytecheck")] pub use self::checked::*; use crate::{ access_unchecked, api::{deserialize_using, serialize_using}, de::Pool, ser::{ allocator::ArenaHandle, sharing::Share, Allocator, Serializer, Writer, }, util::{with_arena, AlignedVec}, Archive, Deserialize, Serialize, }; /// A high-level serializer. /// /// This is part of the [high-level API](crate::api::high). pub type HighSerializer = Strategy, E>; /// A high-level deserializer. /// /// This is part of the [high-level API](crate::api::high). pub type HighDeserializer = Strategy; /// Serialize a value to bytes. /// /// Returns the serialized bytes in an [`AlignedVec`]. /// /// This is part of the [high-level API](crate::api::high). /// /// # Example /// /// ``` /// use rkyv::{ /// from_bytes, rancor::Error, to_bytes, Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// let deserialized = from_bytes::(&bytes).unwrap(); /// /// assert_eq!(deserialized, value); /// ``` pub fn to_bytes( // rustfmt insists on inlining this parameter even though it exceeds the // max line length #[rustfmt::skip] value: &impl for<'a> Serialize< HighSerializer, E>, >, ) -> Result where E: rancor::Source, { to_bytes_in(value, AlignedVec::new()) } /// Serialize a value and write the bytes to the given writer. /// /// This is part of the [high-level API](crate::api::high). /// /// # Example /// /// ``` /// use rkyv::{ /// api::high::to_bytes_in, from_bytes, rancor::Error, util::AlignedVec, /// Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = /// to_bytes_in::<_, Error>(&value, AlignedVec::<8>::new()).unwrap(); /// let deserialized = from_bytes::(&bytes).unwrap(); /// /// assert_eq!(deserialized, value); /// ``` pub fn to_bytes_in( value: &impl for<'a> Serialize, E>>, writer: W, ) -> Result where W: Writer, E: rancor::Source, { with_arena(|arena| to_bytes_in_with_alloc(value, writer, arena.acquire())) } /// Serialize a value using the given allocator. /// /// This is part of the [high-level API](crate::api::high). /// /// # Example /// /// ``` /// use rkyv::{ /// api::high::to_bytes_with_alloc, from_bytes, rancor::Error, /// util::with_arena, Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// with_arena(|arena| { /// let bytes = /// to_bytes_with_alloc::<_, Error>(&value, arena.acquire()).unwrap(); /// let deserialized = from_bytes::(&bytes).unwrap(); /// /// assert_eq!(deserialized, value); /// }); /// ``` pub fn to_bytes_with_alloc( value: &impl Serialize>, alloc: A, ) -> Result where A: Allocator, E: rancor::Source, { to_bytes_in_with_alloc(value, AlignedVec::new(), alloc) } /// Serialize a value using the given allocator and write the bytes to the given /// writer. /// /// This is part of the [high-level API](crate::api::high). /// /// # Example /// /// ``` /// use rkyv::{ /// api::high::to_bytes_in_with_alloc, /// from_bytes, /// rancor::Error, /// util::{with_arena, AlignedVec}, /// Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// with_arena(|arena| { /// let bytes = to_bytes_in_with_alloc::<_, _, Error>( /// &value, /// AlignedVec::<8>::new(), /// arena.acquire(), /// ) /// .expect("failed to serialize vec"); /// /// let deserialized = from_bytes::(&bytes) /// .expect("failed to deserialize vec"); /// /// assert_eq!(deserialized, value); /// }); /// ``` pub fn to_bytes_in_with_alloc( value: &impl Serialize>, writer: W, alloc: A, ) -> Result where W: Writer, A: Allocator, E: rancor::Source, { let mut serializer = Serializer::new(writer, alloc, Share::new()); serialize_using(value, &mut serializer)?; Ok(serializer.into_writer()) } /// Deserialize a value from the given bytes. /// /// This function does not check that the data is valid. Use [`from_bytes`] to /// validate the data instead. /// /// This is part of the [high-level API](crate::api::high). /// /// # Safety /// /// The byte slice must represent a valid archived type when accessed at the /// default root position. See the [module docs](crate::api) for more /// information. /// /// # Example /// /// ``` /// use rkyv::{ /// from_bytes_unchecked, rancor::Error, to_bytes, Archive, Deserialize, /// Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// let deserialized = /// unsafe { from_bytes_unchecked::(&bytes).unwrap() }; /// /// assert_eq!(deserialized, value); /// ``` pub unsafe fn from_bytes_unchecked(bytes: &[u8]) -> Result where T: Archive, T::Archived: Deserialize>, { // SAFETY: The caller has guaranteed that a valid `T` is located at the root // position in the byte slice. let archived = unsafe { access_unchecked::(bytes) }; deserialize(archived) } /// Deserialize a value from the given archived value. /// /// This is part of the [high-level API](crate::api::high). /// /// # Example /// /// ``` /// use rkyv::{ /// access, deserialize, rancor::Error, to_bytes, Archive, Deserialize, /// Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// let archived = access::(&*bytes).unwrap(); /// let deserialized = deserialize::(archived).unwrap(); /// /// assert_eq!(deserialized, value); /// ``` pub fn deserialize( value: &impl Deserialize>, ) -> Result { deserialize_using(value, &mut Pool::new()) } #[cfg(test)] mod tests { use rancor::Panic; use crate::{ alloc::{string::ToString, vec::Vec}, api::high::to_bytes_in, }; #[test] fn to_bytes_in_vec() { let value = "hello world".to_string(); let bytes = to_bytes_in::<_, Panic>(&value, Vec::new()).unwrap(); assert!(!bytes.is_empty()); } } rkyv-0.8.9/src/api/low/checked.rs000064400000000000000000000205601046102023000147350ustar 00000000000000//! Low-level checked APIs. //! //! These APIs require user-provided writers and allocators, and do not support //! shared pointers. use bytecheck::CheckBytes; use rancor::{Source, Strategy}; use crate::{ api::{ access_pos_unchecked_mut, access_pos_with_context, access_with_context, check_pos_with_context, deserialize_using, root_position, }, de::pooling::Unpool, seal::Seal, validation::{archive::ArchiveValidator, Validator}, Archive, Deserialize, Portable, }; /// A low-level validator. /// /// This is part of the [low-level API](crate::api::low). pub type LowValidator<'a, E> = Strategy, ()>, E>; fn validator(bytes: &[u8]) -> Validator, ()> { Validator::new(ArchiveValidator::new(bytes), ()) } /// Access a byte slice with a given root position. /// /// This is a safe alternative to [`access_pos_unchecked`] and is part of the /// [low-level API](crate::api::low). /// /// [`access_pos_unchecked`]: crate::api::access_pos_unchecked /// /// # Example /// /// ``` /// use core::mem::MaybeUninit; /// /// use rkyv::{ /// api::{ /// low::{access_pos, to_bytes_in_with_alloc}, /// root_position, /// }, /// rancor::Failure, /// ser::{allocator::SubAllocator, writer::Buffer}, /// util::Align, /// with::InlineAsBox, /// Archive, Serialize, /// }; /// /// let mut output = Align([MaybeUninit::::uninit(); 256]); /// let mut alloc = [MaybeUninit::::uninit(); 256]; /// /// #[derive(Archive, Serialize)] /// struct Example<'a> { /// #[rkyv(with = InlineAsBox)] /// inner: &'a i32, /// } /// /// let forty_two = 42; /// let value = Example { inner: &forty_two }; /// /// let bytes = to_bytes_in_with_alloc::<_, _, Failure>( /// &value, /// Buffer::from(&mut *output), /// SubAllocator::new(&mut alloc), /// ) /// .unwrap(); /// /// let archived = access_pos::, Failure>( /// &*bytes, /// root_position::>(bytes.len()), /// ) /// .unwrap(); /// assert_eq!(*archived.inner, 42); /// ``` pub fn access_pos(bytes: &[u8], pos: usize) -> Result<&T, E> where T: Portable + for<'a> CheckBytes>, E: Source, { access_pos_with_context::<_, _, E>(bytes, pos, &mut validator(bytes)) } /// Access a byte slice. /// /// This is a safe alternative to [`access_unchecked`] and is part of the /// [low-level API](crate::api::low). /// /// [`access_unchecked`]: crate::api::access_unchecked /// /// # Example /// /// ``` /// use core::mem::MaybeUninit; /// /// use rkyv::{ /// api::{ /// low::{access, to_bytes_in_with_alloc}, /// root_position, /// }, /// rancor::Failure, /// ser::{allocator::SubAllocator, writer::Buffer}, /// util::Align, /// with::InlineAsBox, /// Archive, Serialize, /// }; /// /// let mut output = Align([MaybeUninit::::uninit(); 256]); /// let mut alloc = [MaybeUninit::::uninit(); 256]; /// /// #[derive(Archive, Serialize)] /// struct Example<'a> { /// #[rkyv(with = InlineAsBox)] /// inner: &'a i32, /// } /// /// let forty_two = 42; /// let value = Example { inner: &forty_two }; /// /// let bytes = to_bytes_in_with_alloc::<_, _, Failure>( /// &value, /// Buffer::from(&mut *output), /// SubAllocator::new(&mut alloc), /// ) /// .unwrap(); /// /// let archived = access::, Failure>(&*bytes).unwrap(); /// assert_eq!(*archived.inner, 42); /// ``` pub fn access(bytes: &[u8]) -> Result<&T, E> where T: Portable + for<'a> CheckBytes>, E: Source, { access_with_context::<_, _, E>(bytes, &mut validator(bytes)) } /// Mutably access a byte slice with a given root position. /// /// This is a safe alternative to [`access_pos_unchecked_mut`] and is part of /// the [low-level API](crate::api::low). /// /// [`access_pos_unchecked_mut`]: crate::api::access_pos_unchecked_mut /// /// # Example /// /// ``` /// use core::mem::MaybeUninit; /// /// use rkyv::{ /// api::{root_position, low::{to_bytes_in_with_alloc, access_pos_mut}}, /// rancor::Failure, /// ser::{allocator::SubAllocator, writer::Buffer}, /// util::Align, /// with::InlineAsBox, /// Archive, Serialize, /// munge::munge, /// }; /// /// let mut output = Align([MaybeUninit::::uninit(); 256]); /// let mut alloc = [MaybeUninit::::uninit(); 256]; /// /// #[derive(Archive, Serialize)] /// struct Example { /// inner: i32, /// } /// /// let value = Example { inner: 42 }; /// /// let mut bytes = to_bytes_in_with_alloc::<_, _, Failure>( /// &value, /// Buffer::from(&mut *output), /// SubAllocator::new(&mut alloc), /// ) /// .unwrap(); /// /// let root_pos = root_position::(bytes.len()); /// let mut archived = access_pos_mut::( /// &mut *bytes, /// root_pos, /// ).unwrap(); /// /// // Because the access is mutable, we can mutate the archived data /// munge!(let ArchivedExample { mut inner, .. } = archived); /// assert_eq!(*inner, 42); /// *inner = 12345.into(); /// assert_eq!(*inner, 12345); /// ``` pub fn access_pos_mut( bytes: &mut [u8], pos: usize, ) -> Result, E> where T: Portable + for<'a> CheckBytes>, E: Source, { let mut context = validator(bytes); check_pos_with_context::(bytes, pos, &mut context)?; unsafe { Ok(access_pos_unchecked_mut::(bytes, pos)) } } /// Mutably accesses a byte slice. /// /// This is a safe alternative to [`access_unchecked_mut`] and is part of the /// [low-level API](crate::api::low). /// /// [`access_unchecked_mut`]: crate::api::access_unchecked_mut /// /// # Example /// /// ``` /// use core::mem::MaybeUninit; /// /// use rkyv::{ /// api::low::{to_bytes_in_with_alloc, access_mut}, /// rancor::Failure, /// ser::{allocator::SubAllocator, writer::Buffer}, /// util::Align, /// with::InlineAsBox, /// Archive, Serialize, /// munge::munge, /// }; /// /// let mut output = Align([MaybeUninit::::uninit(); 256]); /// let mut alloc = [MaybeUninit::::uninit(); 256]; /// /// #[derive(Archive, Serialize)] /// struct Example { /// inner: i32, /// } /// /// let value = Example { inner: 42 }; /// /// let mut bytes = to_bytes_in_with_alloc::<_, _, Failure>( /// &value, /// Buffer::from(&mut *output), /// SubAllocator::new(&mut alloc), /// ) /// .unwrap(); /// /// let mut archived = access_mut::( /// &mut *bytes, /// ).unwrap(); /// /// // Because the access is mutable, we can mutate the archived data /// munge!(let ArchivedExample { mut inner, .. } = archived); /// assert_eq!(*inner, 42); /// *inner = 12345.into(); /// assert_eq!(*inner, 12345); /// ``` pub fn access_mut(bytes: &mut [u8]) -> Result, E> where T: Portable + for<'a> CheckBytes>, E: Source, { let mut context = validator(bytes); let pos = root_position::(bytes.len()); check_pos_with_context::(bytes, pos, &mut context)?; unsafe { Ok(access_pos_unchecked_mut::(bytes, pos)) } } /// Deserialize a value from the given bytes. /// /// This is a safe alternative to [`from_bytes_unchecked`] and is part of the /// [low-level API](crate::api::low). /// /// [`from_bytes_unchecked`]: crate::api::low::from_bytes_unchecked /// /// # Example /// /// ``` /// use core::mem::MaybeUninit; /// /// use rkyv::{ /// api::low::{from_bytes, to_bytes_in_with_alloc}, /// rancor::Failure, /// ser::{allocator::SubAllocator, writer::Buffer}, /// util::Align, /// Archive, Deserialize, Serialize, /// }; /// /// let mut output = Align([MaybeUninit::::uninit(); 256]); /// let mut alloc = [MaybeUninit::::uninit(); 256]; /// /// #[derive(Archive, Serialize, Deserialize, PartialEq, Debug)] /// struct Example { /// inner: i32, /// } /// /// let value = Example { inner: 42 }; /// /// let bytes = to_bytes_in_with_alloc::<_, _, Failure>( /// &value, /// Buffer::from(&mut *output), /// SubAllocator::new(&mut alloc), /// ) /// .unwrap(); /// /// let deserialized = from_bytes::(&*bytes).unwrap(); /// assert_eq!(value, deserialized); /// ``` pub fn from_bytes(bytes: &[u8]) -> Result where T: Archive, T::Archived: for<'a> CheckBytes> + Deserialize>, E: Source, { deserialize_using(access::(bytes)?, &mut Unpool) } rkyv-0.8.9/src/api/low/mod.rs000064400000000000000000000123151046102023000141250ustar 00000000000000//! APIs for environments where allocations cannot be made. //! //! These APIs require user-provided writers and allocators, and do not support //! shared pointers. #[cfg(feature = "bytecheck")] mod checked; use rancor::Strategy; #[cfg(feature = "bytecheck")] pub use self::checked::*; use crate::{ access_unchecked, api::{deserialize_using, serialize_using}, ser::{Allocator, Serializer, Writer}, Archive, Deserialize, Serialize, }; /// A general-purpose serializer suitable for environments where allocations /// cannot be made. /// /// This is part of the [low-level API](crate::api::low). pub type LowSerializer = Strategy, E>; /// A general-purpose deserializer suitable for environments where allocations /// cannot be made. /// /// This is part of the [low-level API](crate::api::low). pub type LowDeserializer = Strategy<(), E>; /// Serialize a value using the given allocator and write the bytes to the given /// writer. /// /// This is part of the [low-level API](crate::api::low). /// /// # Example /// /// ``` /// use core::mem::MaybeUninit; /// /// use rkyv::{ /// access_unchecked, /// api::low::to_bytes_in_with_alloc, /// rancor::Failure, /// ser::{allocator::SubAllocator, writer::Buffer}, /// util::Align, /// with::InlineAsBox, /// Archive, Serialize, /// }; /// /// let mut output = Align([MaybeUninit::::uninit(); 256]); /// let mut alloc = [MaybeUninit::::uninit(); 256]; /// /// #[derive(Archive, Serialize)] /// struct Example<'a> { /// #[rkyv(with = InlineAsBox)] /// inner: &'a i32, /// } /// /// let forty_two = 42; /// let value = Example { inner: &forty_two }; /// /// let bytes = to_bytes_in_with_alloc::<_, _, Failure>( /// &value, /// Buffer::from(&mut *output), /// SubAllocator::new(&mut alloc), /// ) /// .unwrap(); /// /// let archived = unsafe { access_unchecked::>(&*bytes) }; /// assert_eq!(*archived.inner, 42); /// ``` pub fn to_bytes_in_with_alloc( value: &impl Serialize>, writer: W, alloc: A, ) -> Result where W: Writer, A: Allocator, E: rancor::Source, { let mut serializer = Serializer::new(writer, alloc, ()); serialize_using(value, &mut serializer)?; Ok(serializer.into_writer()) } /// Deserialize a value from the given bytes. /// /// This function does not check that the data is valid. Use [`from_bytes`] to /// validate the data instead. /// /// This is part of the [low-level API](crate::api::low). /// /// # Safety /// /// The byte slice must represent a valid archived type when accessed at the /// default root position. See the [module docs](crate::api) for more /// information. /// /// # Example /// /// ``` /// use core::mem::MaybeUninit; /// /// use rkyv::{ /// access_unchecked, /// api::low::{from_bytes_unchecked, to_bytes_in_with_alloc}, /// rancor::Failure, /// ser::{allocator::SubAllocator, writer::Buffer}, /// util::Align, /// with::InlineAsBox, /// Archive, Deserialize, Serialize, /// }; /// /// let mut output = Align([MaybeUninit::::uninit(); 256]); /// let mut alloc = [MaybeUninit::::uninit(); 256]; /// /// #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] /// struct Example { /// inner: i32, /// } /// /// let value = Example { inner: 42 }; /// /// let bytes = to_bytes_in_with_alloc::<_, _, Failure>( /// &value, /// Buffer::from(&mut *output), /// SubAllocator::new(&mut alloc), /// ) /// .unwrap(); /// /// let deserialized = /// unsafe { from_bytes_unchecked::(&*bytes).unwrap() }; /// assert_eq!(value, deserialized); /// ``` pub unsafe fn from_bytes_unchecked(bytes: &[u8]) -> Result where T: Archive, T::Archived: Deserialize>, { // SAFETY: The caller has guaranteed that a valid `T` is located at the root // position in the byte slice. let archived = unsafe { access_unchecked::(bytes) }; deserialize(archived) } /// Deserialize a value from the given archived value. /// /// This is part of the [low-level API](crate::api::low). /// /// # Example /// /// ``` /// use core::mem::MaybeUninit; /// /// use rkyv::{ /// access_unchecked, /// api::low::{deserialize, to_bytes_in_with_alloc}, /// rancor::Failure, /// ser::{allocator::SubAllocator, writer::Buffer}, /// util::Align, /// with::InlineAsBox, /// Archive, Deserialize, Serialize, /// }; /// /// let mut output = Align([MaybeUninit::::uninit(); 256]); /// let mut alloc = [MaybeUninit::::uninit(); 256]; /// /// #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] /// struct Example { /// inner: i32, /// } /// /// let value = Example { inner: 42 }; /// /// let bytes = to_bytes_in_with_alloc::<_, _, Failure>( /// &value, /// Buffer::from(&mut *output), /// SubAllocator::new(&mut alloc), /// ) /// .unwrap(); /// /// let archived = unsafe { access_unchecked::(&*bytes) }; /// let deserialized = deserialize::(archived).unwrap(); /// assert_eq!(value, deserialized); /// ``` pub fn deserialize( value: &impl Deserialize>, ) -> Result { deserialize_using(value, &mut ()) } rkyv-0.8.9/src/api/mod.rs000064400000000000000000000305741046102023000133330ustar 00000000000000//! APIs for producing and using archived data. //! //! # Accessing byte slices //! //! The safety requirements for accessing a byte slice will often state that a //! byte slice must "represent a valid archived type". The specific validity //! requirements may vary widely depending on the types being accessed, and so //! in general the only way to guarantee that this call is safe is to have //! previously validated the byte slice. //! //! Using techniques such as cryptographic signing can provide a more performant //! way to verify data integrity from trusted sources. //! //! It is generally safe to assume that unchanged and properly-aligned //! serialized bytes are always safe to access without validation. By contrast, //! bytes from a potentially-malicious source should always be validated prior //! to access. #[cfg(feature = "bytecheck")] mod checked; #[cfg(feature = "alloc")] pub mod high; pub mod low; #[cfg(test)] pub mod test; use core::mem::size_of; use rancor::Strategy; #[cfg(feature = "bytecheck")] pub use self::checked::*; use crate::{seal::Seal, ser::Writer, Deserialize, Portable, SerializeUnsized}; #[cfg(debug_assertions)] fn sanity_check_buffer(ptr: *const u8, pos: usize, size: usize) { use core::mem::{align_of, size_of}; let root_size = size_of::(); let min_size = pos + root_size; debug_assert!( min_size <= size, concat!( "buffer too small, expected at least {} bytes but found {} bytes\n", "help: the root type at offset {} requires at least {} bytes", ), min_size, size, pos, root_size, ); let expect_align = align_of::(); let actual_align = (ptr as usize) & (expect_align - 1); debug_assert_eq!( actual_align, 0, concat!( "unaligned buffer, expected alignment {} but found alignment {}\n", "help: rkyv requires byte buffers to be aligned to access the \ data inside.\n", " Using an AlignedVec or manually aligning your data with \ `#[align(...)]` may resolve this issue.\n", " Alternatively, you may enable the `unaligned` feature to \ relax the alignment requirements for your archived data.\n", " `unaligned` is a format control feature, and enabling it \ may change the format of your serialized data)", ), expect_align, 1 << actual_align.trailing_zeros() ); } /// Return the position of the root within a buffer of `length` bytes. /// /// Most accessing functions have a variant which automatically calculates this /// value for you. For example, prefer to call [`access_unchecked`] over /// [`access_pos_unchecked`]. /// /// The root position of a buffer is calculated by subtracing the size of the /// root object from the end of the buffer. If the buffer size is too small to /// accomodate a root of the given type, then this function will return zero. /// /// # Example /// /// ``` /// use rkyv::{api::root_position, Archive}; /// /// #[derive(Archive)] /// pub struct MyData { /// inner: u32, /// } /// /// assert_eq!(size_of::(), 4); /// /// // This is too small, and so returns 0 /// assert_eq!(root_position::(3), 0); /// assert_eq!(root_position::(4), 0); /// assert_eq!(root_position::(5), 1); /// ``` pub fn root_position(size: usize) -> usize { size.saturating_sub(size_of::()) } /// Access a byte slice with a given root position. /// /// Most of the time, the root position should be calculated using the root type /// and size of the buffer. Prefer [`access_unchecked`] whenever possible. /// /// While the root of the archived data is located at the given position, the /// reachable data may be located throughout the byte slice. /// /// This function does not check that the bytes are valid to access. Use /// [`access_pos`](high::access_pos) to safely access the buffer using /// validation. /// /// # Safety /// /// The byte slice must represent a valid archived type when accessed with the /// given root position. See the [module docs](crate::api) for more information. /// /// # Example /// /// ``` /// use rkyv::{ /// api::{access_pos_unchecked, root_position}, /// rancor::Error, /// to_bytes, Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// /// let archived = unsafe { /// access_pos_unchecked::( /// &*bytes, /// root_position::(bytes.len()), /// ) /// }; /// assert_eq!(archived.name, "pi"); /// assert_eq!(archived.value, 31415926); /// ``` pub unsafe fn access_pos_unchecked( bytes: &[u8], pos: usize, ) -> &T { #[cfg(debug_assertions)] sanity_check_buffer::(bytes.as_ptr(), pos, bytes.len()); // SAFETY: The caller has guaranteed that a valid `T` is located at `pos` in // the byte slice. unsafe { &*bytes.as_ptr().add(pos).cast() } } /// Mutably access a byte slice with a given root position. /// /// Most of the time, the root position should be calculated using the root type /// and size of the buffer. Prefer [`access_unchecked_mut`] whenever possible. /// /// While the root of the archived data is located at the given position, the /// reachable data may be located throughout the byte slice. /// /// This function does not check that the bytes are valid to access. Use /// [`access_pos_mut`](high::access_pos_mut) to safely access the buffer using /// validation. /// /// The returned `Seal` restricts the mutating operations that may be safely /// performed on the returned reference. See [`Seal`] for more information. /// /// # Safety /// /// The byte slice must represent a valid archived type when accessed with the /// given root position. See the [module docs](crate::api) for more information. /// /// # Example /// /// ``` /// use rkyv::{ /// to_bytes, api::{root_position, access_pos_unchecked_mut}, util::Align, /// Archive, Serialize, Deserialize, munge::munge, rancor::Error, /// }; /// /// #[derive(Archive, Serialize, Deserialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let mut bytes = to_bytes::(&value).unwrap(); /// let root_pos = root_position::(bytes.len()); /// /// let mut archived = unsafe { /// access_pos_unchecked_mut::(&mut *bytes, root_pos) /// }; /// assert_eq!(archived.name, "pi"); /// assert_eq!(archived.value, 31415926); /// /// // Because the access is mutable, we can mutate the archived data /// munge!(let ArchivedExample { mut value, .. } = archived); /// assert_eq!(*value, 31415926); /// *value = 12345.into(); /// assert_eq!(*value, 12345); /// ``` pub unsafe fn access_pos_unchecked_mut( bytes: &mut [u8], pos: usize, ) -> Seal<'_, T> { #[cfg(debug_assertions)] sanity_check_buffer::(bytes.as_ptr(), pos, bytes.len()); // SAFETY: The caller has guaranteed that the data at the given position // passes validation when passed to `access_pos_mut`. unsafe { Seal::new(&mut *bytes.as_mut_ptr().add(pos).cast()) } } /// Access a byte slice. /// /// This function does not check that the bytes are valid to access. Use /// [`access`](high::access) to safely access the buffer using validation. /// /// # Safety /// /// The byte slice must represent a valid archived type when accessed at the /// default root position. See the [module docs](crate::api) for more /// information. /// /// # Example /// /// ``` /// use rkyv::{ /// access_unchecked, rancor::Error, to_bytes, Archive, Deserialize, /// Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// /// let archived = unsafe { access_unchecked::(&*bytes) }; /// assert_eq!(archived.name, "pi"); /// assert_eq!(archived.value, 31415926); /// ``` pub unsafe fn access_unchecked(bytes: &[u8]) -> &T { // SAFETY: The caller has guaranteed that a valid `T` is located at the root // position in the byte slice. unsafe { access_pos_unchecked::(bytes, root_position::(bytes.len())) } } /// Mutably access a byte slice. /// /// This function does not check that the bytes are valid to access. Use /// [`access_mut`](high::access_mut) to safely access the buffer using /// validation. /// /// # Safety /// /// The byte slice must represent a valid archived type when accessed at the /// default root position. See the [module docs](crate::api) for more /// information. /// /// # Example /// /// ``` /// use rkyv::{ /// to_bytes, access_unchecked_mut, util::Align, Archive, /// munge::munge, Serialize, Deserialize, rancor::Error, /// }; /// /// #[derive(Archive, Serialize, Deserialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let mut bytes = to_bytes::(&value).unwrap(); /// /// let mut archived = unsafe { /// access_unchecked_mut::(&mut *bytes) /// }; /// assert_eq!(archived.name, "pi"); /// assert_eq!(archived.value, 31415926); /// /// // Because the access is mutable, we can mutate the archived data /// munge!(let ArchivedExample { mut value, .. } = archived); /// assert_eq!(*value, 31415926); /// *value = 12345.into(); /// assert_eq!(*value, 12345); /// ``` pub unsafe fn access_unchecked_mut( bytes: &mut [u8], ) -> Seal<'_, T> { // SAFETY: The caller has guaranteed that the given bytes pass validation // when passed to `access_mut`. unsafe { access_pos_unchecked_mut::(bytes, root_position::(bytes.len())) } } /// Serialize a value using the given serializer. /// /// Returns the position of the serialized value. /// /// Most of the time, [`to_bytes`](high::to_bytes) is a more ergonomic way to /// serialize a value to bytes. /// /// # Example /// /// ``` /// use rkyv::{ /// access, /// api::serialize_using, /// rancor::Error, /// ser::{sharing::Share, Serializer}, /// util::{with_arena, AlignedVec}, /// Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let bytes = with_arena(|arena| { /// let mut serializer = Serializer::new( /// AlignedVec::<4>::new(), /// arena.acquire(), /// Share::new(), /// ); /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// serialize_using::<_, Error>(&value, &mut serializer).unwrap(); /// serializer.into_writer() /// }); /// /// let archived = access::(&*bytes).unwrap(); /// assert_eq!(archived.value, 31415926); /// ``` pub fn serialize_using( value: &impl SerializeUnsized>, serializer: &mut S, ) -> Result where S: Writer + ?Sized, { value.serialize_unsized(Strategy::wrap(serializer)) } /// Deserialize a value using the given deserializer. /// /// Most of the time, [`deserialize`](high::deserialize) is a more ergonomic way /// to deserialize an archived value. /// /// # Example /// /// ``` /// use rkyv::{ /// access, api::deserialize_using, de::Pool, rancor::Error, to_bytes, /// Archive, Deserialize, Serialize, /// }; /// /// #[derive(Archive, Serialize, Deserialize)] /// struct Example { /// name: String, /// value: i32, /// } /// /// let value = Example { /// name: "pi".to_string(), /// value: 31415926, /// }; /// /// let bytes = to_bytes::(&value).unwrap(); /// let archived = access::(&bytes).unwrap(); /// let deserialized = /// deserialize_using::(archived, &mut Pool::new()) /// .unwrap(); /// ``` pub fn deserialize_using( value: &impl Deserialize>, deserializer: &mut D, ) -> Result { value.deserialize(Strategy::wrap(deserializer)) } rkyv-0.8.9/src/api/test/inner_checked.rs000064400000000000000000000043001046102023000163000ustar 00000000000000use core::fmt::Debug; use bytecheck::CheckBytes; use rancor::Panic; #[cfg(feature = "alloc")] use crate::api::high::{access_mut, HighValidator as TestValidator}; #[cfg(not(feature = "alloc"))] use crate::api::low::{access_mut, LowValidator as TestValidator}; use crate::{ api::test::{deserialize, to_bytes, TestDeserializer, TestSerializer}, seal::Seal, Archive, Deserialize, Serialize, }; /// Serializes the given type to bytes, accesses the archived version, and calls /// the given function with it. pub fn to_archived(value: &T, f: impl FnOnce(Seal<'_, T::Archived>)) where T: for<'a> Serialize>, T::Archived: for<'a> CheckBytes>, { to_bytes(value, |bytes| to_archived_from_bytes::(bytes, f)); } /// Accesses the archived version and calls the given function with it. pub fn to_archived_from_bytes( bytes: &mut [u8], f: impl FnOnce(Seal<'_, T::Archived>), ) where T: Archive, T::Archived: for<'a> CheckBytes>, { let archived_value = access_mut::(bytes).unwrap(); f(archived_value); } /// Serializes and deserializes the given value, checking for equality with the /// archived and deserialized values using the given comparison function. pub fn roundtrip_with(value: &T, cmp: impl Fn(&T, &T::Archived)) where T: Debug + PartialEq + for<'a> Serialize>, T::Archived: Debug + Deserialize + for<'a> CheckBytes>, { to_archived(value, |archived_value| { cmp(value, &*archived_value); let deserialized = deserialize::(&*archived_value); assert_eq!(value, &deserialized); }); } /// Serializes and deserializes the given value, checking for equality with the /// archived and deserialized values. pub fn roundtrip(value: &T) where T: Debug + PartialEq + for<'a> Serialize>, T::Archived: Debug + PartialEq + Deserialize + for<'a> CheckBytes>, { roundtrip_with(value, |a, b| assert_eq!(b, a)); } rkyv-0.8.9/src/api/test/inner_unchecked.rs000064400000000000000000000033701046102023000166510ustar 00000000000000#![allow(dead_code)] use core::fmt::Debug; use crate::{ access_unchecked_mut, api::test::{deserialize, to_bytes, TestDeserializer, TestSerializer}, seal::Seal, Archive, Deserialize, Serialize, }; /// Serializes the given type to bytes, accesses the archived version, and calls /// the given function with it. pub fn to_archived(value: &T, f: impl FnOnce(Seal<'_, T::Archived>)) where T: for<'a> Serialize>, { to_bytes(value, |bytes| to_archived_from_bytes::(bytes, f)); } /// Accesses the archived version and calls the given function with it. pub fn to_archived_from_bytes( bytes: &mut [u8], f: impl FnOnce(Seal<'_, T::Archived>), ) where T: Archive, { let archived_value = unsafe { access_unchecked_mut::(bytes) }; f(archived_value); } /// Serializes and deserializes the given value, checking for equality with the /// archived and deserialized values using the given comparison function. pub fn roundtrip_with(value: &T, cmp: impl Fn(&T, &T::Archived)) where T: Debug + PartialEq + for<'a> Serialize>, T::Archived: Debug + Deserialize, { to_archived(value, |archived_value| { cmp(value, &*archived_value); let deserialized = deserialize::(&*archived_value); assert_eq!(value, &deserialized); }); } /// Serializes and deserializes the given value, checking for equality with the /// archived and deserialized values. pub fn roundtrip(value: &T) where T: Debug + PartialEq + for<'a> Serialize>, T::Archived: Debug + PartialEq + Deserialize, { roundtrip_with(value, |a, b| assert_eq!(b, a)); } rkyv-0.8.9/src/api/test/mod.rs000064400000000000000000000017061046102023000143050ustar 00000000000000//! APIs for testing code that uses rkyv. //! //! These APIs are test-only. The exact signatures of these APIs change //! depending on which features are enabled so that they can be used uniformly //! across: //! //! - `std`, no-std, and no-std-no-alloc configurations //! - `bytecheck` enabled or disabled //! //! In the no-std-no-alloc configuration, the amount of data that can be //! serialized or allocated during serialization is limited. If you test in //! these configurations, keep your data sizes relatively small. #[cfg(feature = "alloc")] mod outer_high; #[cfg(not(feature = "alloc"))] mod outer_low; #[cfg(feature = "bytecheck")] mod inner_checked; #[cfg(not(feature = "bytecheck"))] mod inner_unchecked; #[cfg(feature = "bytecheck")] pub use self::inner_checked::*; #[cfg(not(feature = "bytecheck"))] pub use self::inner_unchecked::*; #[cfg(feature = "alloc")] pub use self::outer_high::*; #[cfg(not(feature = "alloc"))] pub use self::outer_low::*; rkyv-0.8.9/src/api/test/outer_high.rs000064400000000000000000000017571046102023000156710ustar 00000000000000use rancor::Panic; use crate::{ api::high::{HighDeserializer, HighSerializer}, de::Pool, ser::allocator::ArenaHandle, util::AlignedVec, Deserialize, Serialize, }; /// The serializer type for tests. pub type TestSerializer<'a> = HighSerializer, Panic>; /// The deserializer type for tests. pub type TestDeserializer = HighDeserializer; /// Serializes the given value to bytes using the test serializer, then calls /// the given function on those bytes. pub fn to_bytes(value: &T, f: impl FnOnce(&mut [u8])) where T: for<'a> Serialize>, { let mut bytes = crate::api::high::to_bytes(value).expect("failed to serialize value"); f(&mut bytes); } /// Deserializes the given value using the test deserializer. pub fn deserialize(value: &impl Deserialize) -> T { crate::api::deserialize_using::(value, &mut Pool::new()) .expect("failed to deserialize value") } rkyv-0.8.9/src/api/test/outer_low.rs000064400000000000000000000023441046102023000155440ustar 00000000000000use core::mem::MaybeUninit; use rancor::Panic; use crate::{ api::low::{to_bytes_in_with_alloc, LowDeserializer, LowSerializer}, ser::{allocator::SubAllocator, writer::Buffer}, util::Align, Deserialize, Serialize, }; /// The serializer type for tests. pub type TestSerializer<'a> = LowSerializer, SubAllocator<'a>, Panic>; /// The deserializer type for tests. pub type TestDeserializer = LowDeserializer; /// Serializes the given value to bytes using the test serializer, then calls /// the given function on those bytes. pub fn to_bytes(value: &T, f: impl FnOnce(&mut [u8])) where T: for<'a> Serialize>, { let mut output = Align([MaybeUninit::::uninit(); 256]); let mut alloc = [MaybeUninit::::uninit(); 256]; let mut bytes = to_bytes_in_with_alloc( value, Buffer::from(&mut *output), SubAllocator::new(&mut alloc), ) .expect("failed to serialize value"); f(&mut bytes); } /// Deserializes the given value using the test deserializer. pub fn deserialize(value: &impl Deserialize) -> T { crate::api::deserialize_using::(value, &mut ()) .expect("failed to deserialize value") } rkyv-0.8.9/src/boxed.rs000064400000000000000000000121701046102023000130740ustar 00000000000000//! An archived version of `Box`. use core::{borrow::Borrow, cmp, fmt, hash, ops::Deref}; use munge::munge; use rancor::Fallible; use crate::{ primitive::FixedUsize, seal::Seal, traits::ArchivePointee, ArchiveUnsized, Place, Portable, RelPtr, SerializeUnsized, }; /// An archived [`Box`]. /// /// This is a thin `#[repr(transparent)]` wrapper around a [`RelPtr`] to the /// archived type. #[derive(Portable)] #[rkyv(crate)] #[cfg_attr( feature = "bytecheck", derive(bytecheck::CheckBytes), bytecheck(verify) )] #[repr(transparent)] pub struct ArchivedBox { ptr: RelPtr, } impl ArchivedBox { /// Returns a reference to the value of this archived box. pub fn get(&self) -> &T { unsafe { &*self.ptr.as_ptr() } } /// Returns a sealed mutable reference to the value of this archived box. pub fn get_seal(this: Seal<'_, Self>) -> Seal<'_, T> { munge!(let Self { ptr } = this); Seal::new(unsafe { &mut *RelPtr::as_mut_ptr(ptr) }) } /// Resolves an archived box from the given value and parameters. pub fn resolve_from_ref + ?Sized>( value: &U, resolver: BoxResolver, out: Place, ) { Self::resolve_from_raw_parts(resolver, value.archived_metadata(), out) } /// Serializes an archived box from the given value and serializer. pub fn serialize_from_ref( value: &U, serializer: &mut S, ) -> Result where U: SerializeUnsized + ?Sized, S: Fallible + ?Sized, { Ok(BoxResolver { pos: value.serialize_unsized(serializer)? as FixedUsize, }) } /// Resolves an archived box from a [`BoxResolver`] and the raw metadata /// directly. pub fn resolve_from_raw_parts( resolver: BoxResolver, metadata: T::ArchivedMetadata, out: Place, ) { munge!(let ArchivedBox { ptr } = out); RelPtr::emplace_unsized(resolver.pos as usize, metadata, ptr); } } impl AsRef for ArchivedBox { fn as_ref(&self) -> &T { self.get() } } impl Borrow for ArchivedBox { fn borrow(&self) -> &T { self.get() } } impl fmt::Debug for ArchivedBox where T::ArchivedMetadata: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("ArchivedBox").field(&self.ptr).finish() } } impl Deref for ArchivedBox { type Target = T; fn deref(&self) -> &Self::Target { self.get() } } impl fmt::Display for ArchivedBox { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.get().fmt(f) } } impl Eq for ArchivedBox {} impl hash::Hash for ArchivedBox { fn hash(&self, state: &mut H) { self.get().hash(state); } } impl Ord for ArchivedBox { fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_ref().cmp(other.as_ref()) } } impl + ?Sized, U: ArchivePointee + ?Sized> PartialEq> for ArchivedBox { fn eq(&self, other: &ArchivedBox) -> bool { self.get().eq(other.get()) } } impl PartialOrd for ArchivedBox { fn partial_cmp(&self, other: &Self) -> Option { self.get().partial_cmp(other.get()) } } impl fmt::Pointer for ArchivedBox { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let ptr = self.get() as *const T; fmt::Pointer::fmt(&ptr, f) } } /// The resolver for `Box`. pub struct BoxResolver { pos: FixedUsize, } impl BoxResolver { /// Creates a new [`BoxResolver`] from the position of a serialized value. /// /// In most cases, you won't need to create a [`BoxResolver`] yourself and /// can instead obtain it through [`ArchivedBox::serialize_from_ref`]. pub fn from_pos(pos: usize) -> Self { Self { pos: pos as FixedUsize, } } } #[cfg(feature = "bytecheck")] mod verify { use bytecheck::{ rancor::{Fallible, Source}, CheckBytes, Verify, }; use crate::{ boxed::ArchivedBox, traits::{ArchivePointee, LayoutRaw}, validation::{ArchiveContext, ArchiveContextExt}, }; unsafe impl Verify for ArchivedBox where T: ArchivePointee + CheckBytes + LayoutRaw + ?Sized, T::ArchivedMetadata: CheckBytes, C: Fallible + ArchiveContext + ?Sized, C::Error: Source, { fn verify(&self, context: &mut C) -> Result<(), C::Error> { let ptr = self.ptr.as_ptr_wrapping(); context.in_subtree(ptr, |context| unsafe { T::check_bytes(ptr, context) }) } } } rkyv-0.8.9/src/collections/btree_map/iter.rs000064400000000000000000000175331046102023000172220ustar 00000000000000use core::{marker::PhantomData, ptr::addr_of_mut}; use crate::{ alloc::vec::Vec, collections::btree_map::{ entries_to_height, ArchivedBTreeMap, InnerNode, LeafNode, Node, NodeKind, }, seal::Seal, RelPtr, }; impl ArchivedBTreeMap { /// Gets an iterator over the entries of the map, sorted by key. pub fn iter(&self) -> Iter<'_, K, V, E> { let this = (self as *const Self).cast_mut(); Iter { inner: unsafe { RawIter::new(this) }, _phantom: PhantomData, } } /// Gets a mutable iterator over the entires of the map, sorted by key. pub fn iter_seal(this: Seal<'_, Self>) -> IterSeal<'_, K, V, E> { let this = unsafe { Seal::unseal_unchecked(this) as *mut Self }; IterSeal { inner: unsafe { RawIter::new(this) }, _phantom: PhantomData, } } /// Gets an iterator over the sorted keys of the map. pub fn keys(&self) -> Keys<'_, K, V, E> { let this = (self as *const Self).cast_mut(); Keys { inner: unsafe { RawIter::new(this) }, _phantom: PhantomData, } } /// Gets an iterator over the values of the map. pub fn values(&self) -> Values<'_, K, V, E> { let this = (self as *const Self).cast_mut(); Values { inner: unsafe { RawIter::new(this) }, _phantom: PhantomData, } } /// Gets a mutable iterator over the values of the map. pub fn values_seal(this: Seal<'_, Self>) -> ValuesSeal<'_, K, V, E> { let this = unsafe { Seal::unseal_unchecked(this) as *mut Self }; ValuesSeal { inner: unsafe { RawIter::new(this) }, _phantom: PhantomData, } } } /// An iterator over the entires of an `ArchivedBTreeMap`. /// /// This struct is created by the [`iter`](ArchivedBTreeMap::iter) method on /// [`ArchivedBTreeMap`]. See its documentation for more. pub struct Iter<'a, K, V, const E: usize> { inner: RawIter, _phantom: PhantomData<&'a ArchivedBTreeMap>, } impl<'a, K, V, const E: usize> Iterator for Iter<'a, K, V, E> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option { self.inner .next() .map(|(k, v)| (unsafe { &*k }, unsafe { &*v })) } } /// An iterator over the entires of an `ArchivedBTreeMap`. /// /// This struct is created by the [`iter_pin`](ArchivedBTreeMap::iter_pin) /// method on [`ArchivedBTreeMap`]. See its documentation for more. pub struct IterSeal<'a, K, V, const E: usize> { inner: RawIter, _phantom: PhantomData>>, } impl<'a, K, V, const E: usize> Iterator for IterSeal<'a, K, V, E> { type Item = (&'a K, Seal<'a, V>); fn next(&mut self) -> Option { self.inner .next() .map(|(k, v)| (unsafe { &*k }, Seal::new(unsafe { &mut *v }))) } } /// An iterator over the keys of an `ArchivedBTreeMap`. /// /// This struct is created by the [`keys`](ArchivedBTreeMap::keys) method on /// [`ArchivedBTreeMap`]. See its documentation for more. pub struct Keys<'a, K, V, const E: usize> { inner: RawIter, _phantom: PhantomData<&'a ArchivedBTreeMap>, } impl<'a, K, V, const E: usize> Iterator for Keys<'a, K, V, E> { type Item = &'a K; fn next(&mut self) -> Option { self.inner.next().map(|(k, _)| unsafe { &*k }) } } /// An iterator over the values of an `ArchivedBTreeMap`. /// /// This struct is created by the [`values`](ArchivedBTreeMap::keys) method on /// [`ArchivedBTreeMap`]. See its documentation for more. pub struct Values<'a, K, V, const E: usize> { inner: RawIter, _phantom: PhantomData<&'a ArchivedBTreeMap>, } impl<'a, K, V, const E: usize> Iterator for Values<'a, K, V, E> { type Item = &'a V; fn next(&mut self) -> Option { self.inner.next().map(|(_, v)| unsafe { &*v }) } } /// A mutable iterator over the values of an `ArchivedBTreeMap`. /// /// This struct is created by the [`values_pin`](ArchivedBTreeMap::keys) method /// on [`ArchivedBTreeMap`]. See its documentation for more. pub struct ValuesSeal<'a, K, V, const E: usize> { inner: RawIter, _phantom: PhantomData>>, } impl<'a, K, V, const E: usize> Iterator for ValuesSeal<'a, K, V, E> { type Item = Seal<'a, V>; fn next(&mut self) -> Option { self.inner .next() .map(|(_, v)| Seal::new(unsafe { &mut *v })) } } struct RawIter { remaining: usize, stack: Vec<(*mut Node, usize)>, } impl RawIter { unsafe fn new(map: *mut ArchivedBTreeMap) -> Self { let remaining = unsafe { (*map).len.to_native() as usize }; let mut stack = Vec::new(); if remaining != 0 { stack.reserve(entries_to_height::(remaining) as usize); let mut current = unsafe { RelPtr::as_ptr_raw(addr_of_mut!((*map).root)) }; loop { stack.push((current, 0)); let kind = unsafe { (*current).kind }; match kind { NodeKind::Inner => { let inner = current.cast::>(); let lesser = unsafe { addr_of_mut!((*inner).lesser_nodes[0]) }; current = unsafe { RelPtr::as_ptr_raw(lesser) }; } NodeKind::Leaf => break, } } } Self { remaining, stack } } } impl Iterator for RawIter { type Item = (*mut K, *mut V); fn next(&mut self) -> Option { let (current, i) = self.stack.pop()?; self.remaining -= 1; let k = unsafe { addr_of_mut!((*current).keys[i]).cast::() }; let v = unsafe { addr_of_mut!((*current).values[i]).cast::() }; let next_i = i + 1; // Advance to the next item let kind = unsafe { (*current).kind }; match kind { NodeKind::Inner => { let inner = current.cast::>(); if next_i < E { // More values in the current node self.stack.push((current, next_i)); // Recurse to a lesser if valid let next_lesser = unsafe { addr_of_mut!((*inner).lesser_nodes[next_i]) }; let next_lesser_is_invalid = unsafe { RelPtr::is_invalid_raw(next_lesser) }; if !next_lesser_is_invalid { self.stack.push(( unsafe { RelPtr::as_ptr_raw(next_lesser).cast() }, 0, )); } } else { // Recurse to a greater if valid let next_greater = unsafe { addr_of_mut!((*inner).greater_node) }; let next_greater_is_invalid = unsafe { RelPtr::is_invalid_raw(next_greater) }; if !next_greater_is_invalid { self.stack.push(( unsafe { RelPtr::as_ptr_raw(next_greater).cast() }, 0, )); } } } NodeKind::Leaf => { let leaf = current.cast::>(); let len = unsafe { (*leaf).len.to_native() as usize }; if next_i < len { self.stack.push((current, next_i)); } } } Some((k, v)) } } rkyv-0.8.9/src/collections/btree_map/mod.rs000064400000000000000000001075731046102023000170420ustar 00000000000000//! [`Archive`](crate::Archive) implementation for B-tree maps. use core::{ borrow::Borrow, cmp::Ordering, fmt, marker::PhantomData, mem::{size_of, MaybeUninit}, ops::{ControlFlow, Index}, ptr::addr_of_mut, slice, }; use munge::munge; use rancor::{fail, Fallible, Source}; use crate::{ collections::util::IteratorLengthMismatch, primitive::{ArchivedUsize, FixedUsize}, seal::Seal, ser::{Allocator, Writer, WriterExt as _}, traits::NoUndef, util::{InlineVec, SerVec}, Place, Portable, RelPtr, Serialize, }; // TODO(#515): Get Iterator APIs working without the `alloc` feature enabled #[cfg(feature = "alloc")] mod iter; // B-trees are typically characterized as having a branching factor of B. // However, in this implementation our B-trees are characterized as having a // number of entries per node E where E = B - 1. This is done because it's // easier to add an additional node pointer to each inner node than it is to // store one less entry per inner node. Because generic const exprs are not // stable, we can't declare a field `entries: [Entry; { B - 1 }]`. But we can // declare `branches: [RelPtr; E]` and then add another `last: RelPtr` // field. When the branching factor B is needed, it will be calculated as E + 1. const fn nodes_in_level(i: u32) -> usize { // The root of the tree has one node, and each level down has B times as // many nodes at the last. Therefore, the number of nodes in the I-th level // is equal to B^I. (E + 1).pow(i) } const fn entries_in_full_tree(h: u32) -> usize { // The number of nodes in each layer I of a B-tree is equal to B^I. At layer // I = 0, the number of nodes is exactly one. At layer I = 1, the number of // nodes is B, at layer I = 2 the number of nodes is B^2, and so on. The // total number of nodes is equal to the sum from 0 to H - 1 of B^I. Since // this is the sum of a geometric progression, we have the closed-form // solution N = (B^H - 1) / (B - 1). Since the number of entries per node is // equal to B - 1, we thus have the solution that the number of entries in a // B-tree of height H is equal to B^H - 1. // Note that this is one less than the number of nodes in the level after // the final level of the B-tree. nodes_in_level::(h) - 1 } const fn entries_to_height(n: usize) -> u32 { // Solving B^H - 1 = N for H yields H = log_B(N + 1). However, we'll be // using an integer logarithm, and so the value of H will be rounded down // which underestimates the height of the tree: // => H = ilog_B(N + 1) = floor(log_B(N + 1)). // To compensate for this, we'll calculate the height for a tree with a // greater number of nodes and choose this greater number so that rounding // down will always yield the correct result. // The minimum value which yields a height of H is exactly B^H - 1, so we // need to add a large enough correction to always be greater than or equal // to that value. The maximum value which yields a height of H is one less // than the number of nodes in the next-largest B-tree, which is equal to // B^(H + 1) - 1. This gives the following relationships for N: // => B^(H - 1) - 1 < N <= B^H - 1 // And the desired relationships for the corrected number of entries C(N): // => B^H - 1 <= C(N) < B^(H + 1) - 1 // First, we can add 1 to the two ends of our first set of relationships // to change whether equality is allowed. We can do this because all entries // are integers. This makes the relationships match the desired // relationships for C(N): // => B^(H - 1) - 1 + 1 <= N < B^H - 1 + 1 // => B^(H - 1) <= N < B^H // Let's choose a function to map the lower bound for N to the desired lower // bound for C(N): // => C(B^(H - 1)) = B^(H - 1) // A straightforward choice would be C(N) = B * N - 1. Substituting yields: // => C(B^(H - 1)) <= C(N) < C(B^H) // => B * B^(H - 1) - 1 <= B * N - 1 < B * B^H - 1 // => B^H - 1 <= B * N - 1 < B^(H + 1) - 1 // These exactly match the desired bounds, so this is the function we want. // Putting it all together: // => H = ilog_B(C(N) + 1) = ilog_b(B * N - 1 + 1) = ilog_b(B * N) // => H = 1 + ilog_b(N) 1 + n.ilog(E + 1) } const fn ll_entries(height: u32, n: usize) -> usize { // The number of entries not in the last level is equal to the number of // entries in a full B-tree of height H - 1. The number of entries in // the last level is thus the total number of entries minus the number // of entries not in the last level. n - entries_in_full_tree::(height - 1) } #[derive(Clone, Copy, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(u8)] enum NodeKind { Leaf, Inner, } // SAFETY: `NodeKind` is `repr(u8)` and so always consists of a single // well-defined byte. unsafe impl NoUndef for NodeKind {} #[derive(Portable)] #[rkyv(crate)] #[repr(C)] struct Node { kind: NodeKind, keys: [MaybeUninit; E], values: [MaybeUninit; E], } #[derive(Portable)] #[rkyv(crate)] #[repr(C)] struct LeafNode { node: Node, len: ArchivedUsize, } #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[derive(Portable)] #[rkyv(crate)] #[repr(C)] struct InnerNode { node: Node, lesser_nodes: [RelPtr>; E], greater_node: RelPtr>, } /// An archived [`BTreeMap`](crate::alloc::collections::BTreeMap). #[cfg_attr( feature = "bytecheck", derive(bytecheck::CheckBytes), bytecheck(verify) )] #[derive(Portable)] #[rkyv(crate)] #[repr(C)] pub struct ArchivedBTreeMap { // The type of the root node is determined at runtime because it may point // to: // - Nothing if the length is zero // - A leaf node if there is only one node // - Or an inner node if there are multiple nodes root: RelPtr>, len: ArchivedUsize, _phantom: PhantomData<(K, V)>, } impl ArchivedBTreeMap { /// Returns whether the B-tree map contains the given key. pub fn contains_key(&self, key: &Q) -> bool where Q: Ord + ?Sized, K: Borrow + Ord, { self.get_key_value(key).is_some() } /// Returns the value associated with the given key, or `None` if the key is /// not present in the B-tree map. pub fn get(&self, key: &Q) -> Option<&V> where Q: Ord + ?Sized, K: Borrow + Ord, { Some(self.get_key_value(key)?.1) } /// Returns the mutable value associated with the given key, or `None` if /// the key is not present in the B-tree map. pub fn get_seal<'a, Q>(this: Seal<'a, Self>, key: &Q) -> Option> where Q: Ord + ?Sized, K: Borrow + Ord, { Some(Self::get_key_value_seal(this, key)?.1) } /// Returns true if the B-tree map contains no entries. pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the number of entries in the B-tree map. pub fn len(&self) -> usize { self.len.to_native() as usize } /// Gets the key-value pair associated with the given key, or `None` if the /// key is not present in the B-tree map. pub fn get_key_value(&self, key: &Q) -> Option<(&K, &V)> where Q: Ord + ?Sized, K: Borrow + Ord, { let this = (self as *const Self).cast_mut(); Self::get_key_value_raw(this, key) .map(|(k, v)| (unsafe { &*k }, unsafe { &*v })) } /// Gets the mutable key-value pair associated with the given key, or `None` /// if the key is not present in the B-tree map. pub fn get_key_value_seal<'a, Q>( this: Seal<'a, Self>, key: &Q, ) -> Option<(&'a K, Seal<'a, V>)> where Q: Ord + ?Sized, K: Borrow + Ord, { let this = unsafe { Seal::unseal_unchecked(this) as *mut Self }; Self::get_key_value_raw(this, key) .map(|(k, v)| (unsafe { &*k }, Seal::new(unsafe { &mut *v }))) } fn get_key_value_raw( this: *mut Self, key: &Q, ) -> Option<(*mut K, *mut V)> where Q: Ord + ?Sized, K: Borrow + Ord, { let len = unsafe { (*this).len.to_native() }; if len == 0 { return None; } let root_ptr = unsafe { addr_of_mut!((*this).root) }; let mut current = unsafe { RelPtr::as_ptr_raw(root_ptr) }; 'outer: loop { let kind = unsafe { (*current).kind }; match kind { NodeKind::Leaf => { let leaf = current.cast::>(); let len = unsafe { (*leaf).len }; for i in 0..len.to_native() as usize { let k = unsafe { addr_of_mut!((*current).keys[i]).cast::() }; let ordering = key.cmp(unsafe { (*k).borrow() }); match ordering { Ordering::Equal => { let v = unsafe { addr_of_mut!((*current).values[i]) .cast::() }; return Some((k, v)); } Ordering::Less => return None, Ordering::Greater => (), } } return None; } NodeKind::Inner => { let inner = current.cast::>(); for i in 0..E { let k = unsafe { addr_of_mut!((*current).keys[i]).cast::() }; let ordering = key.cmp(unsafe { (*k).borrow() }); match ordering { Ordering::Equal => { let v = unsafe { addr_of_mut!((*current).values[i]) .cast::() }; return Some((k, v)); } Ordering::Less => { let lesser = unsafe { addr_of_mut!((*inner).lesser_nodes[i]) }; let lesser_is_invalid = unsafe { RelPtr::is_invalid_raw(lesser) }; if !lesser_is_invalid { current = unsafe { RelPtr::as_ptr_raw(lesser) }; continue 'outer; } else { return None; } } Ordering::Greater => (), } } let inner = current.cast::>(); let greater = unsafe { addr_of_mut!((*inner).greater_node) }; let greater_is_invalid = unsafe { RelPtr::is_invalid_raw(greater) }; if !greater_is_invalid { current = unsafe { RelPtr::as_ptr_raw(greater) }; } else { return None; } } } } } /// Resolves an `ArchivedBTreeMap` from the given length, resolver, and /// output place. pub fn resolve_from_len( len: usize, resolver: BTreeMapResolver, out: Place, ) { munge!(let ArchivedBTreeMap { root, len: out_len, _phantom: _ } = out); if len == 0 { RelPtr::emplace_invalid(root); } else { RelPtr::emplace(resolver.root_node_pos as usize, root); } out_len.write(ArchivedUsize::from_native(len as FixedUsize)); } /// Serializes an `ArchivedBTreeMap` from the given iterator and serializer. pub fn serialize_from_ordered_iter( mut iter: I, serializer: &mut S, ) -> Result where I: ExactSizeIterator, BKU: Borrow, BVU: Borrow, KU: Serialize, VU: Serialize, S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { let len = iter.len(); if len == 0 { let actual = iter.count(); if actual != 0 { fail!(IteratorLengthMismatch { expected: 0, actual, }); } return Ok(BTreeMapResolver { root_node_pos: 0 }); } let height = entries_to_height::(len); let ll_entries = ll_entries::(height, len); SerVec::with_capacity( serializer, height as usize - 1, |open_inners, serializer| { for _ in 0..height - 1 { open_inners .push(InlineVec::<(BKU, BVU, Option), E>::new()); } let mut open_leaf = InlineVec::<(BKU, BVU), E>::new(); let mut child_node_pos = None; let mut leaf_entries = 0; while let Some((key, value)) = iter.next() { open_leaf.push((key, value)); leaf_entries += 1; if leaf_entries == ll_entries || open_leaf.len() == open_leaf.capacity() { // Close open leaf child_node_pos = Some(Self::close_leaf(&open_leaf, serializer)?); open_leaf.clear(); // If on the transition node, fill and close open inner if leaf_entries == ll_entries { if let Some(mut inner) = open_inners.pop() { while inner.len() < inner.capacity() { if let Some((k, v)) = iter.next() { inner.push((k, v, child_node_pos)); child_node_pos = None; } else { break; } } child_node_pos = Some(Self::close_inner( &inner, child_node_pos, serializer, )?); } } // Add closed node to open inner let mut popped = 0; while let Some(last_inner) = open_inners.last_mut() { if last_inner.len() == last_inner.capacity() { // Close open inner child_node_pos = Some(Self::close_inner( last_inner, child_node_pos, serializer, )?); open_inners.pop(); popped += 1; } else { let (key, value) = iter.next().unwrap(); last_inner.push((key, value, child_node_pos)); child_node_pos = None; for _ in 0..popped { open_inners.push(InlineVec::default()); } break; } } } } if !open_leaf.is_empty() { // Close open leaf child_node_pos = Some(Self::close_leaf(&open_leaf, serializer)?); open_leaf.clear(); } // Close open inners while let Some(inner) = open_inners.pop() { child_node_pos = Some(Self::close_inner( &inner, child_node_pos, serializer, )?); } debug_assert!(open_inners.is_empty()); debug_assert!(open_leaf.is_empty()); let leftovers = iter.count(); if leftovers != 0 { fail!(IteratorLengthMismatch { expected: len, actual: len + leftovers, }); } Ok(BTreeMapResolver { root_node_pos: child_node_pos.unwrap() as FixedUsize, }) }, )? } fn close_leaf( items: &[(BKU, BVU)], serializer: &mut S, ) -> Result where BKU: Borrow, BVU: Borrow, KU: Serialize, VU: Serialize, S: Writer + Fallible + ?Sized, { let mut resolvers = InlineVec::<(KU::Resolver, VU::Resolver), E>::new(); for (key, value) in items { resolvers.push(( key.borrow().serialize(serializer)?, value.borrow().serialize(serializer)?, )); } let pos = serializer.align_for::>()?; let mut node = MaybeUninit::>::uninit(); // SAFETY: `node` is properly aligned and valid for writes of // `size_of::>()` bytes. unsafe { node.as_mut_ptr().write_bytes(0, 1); } let node_place = unsafe { Place::new_unchecked(pos, node.as_mut_ptr()) }; munge! { let LeafNode { node: Node { kind, keys, values, }, len, } = node_place; } kind.write(NodeKind::Leaf); len.write(ArchivedUsize::from_native(items.len() as FixedUsize)); for (i, ((k, v), (kr, vr))) in items.iter().zip(resolvers.drain()).enumerate() { let out_key = unsafe { keys.index(i).cast_unchecked() }; k.borrow().resolve(kr, out_key); let out_value = unsafe { values.index(i).cast_unchecked() }; v.borrow().resolve(vr, out_value); } let bytes = unsafe { slice::from_raw_parts( node.as_ptr().cast::(), size_of::>(), ) }; serializer.write(bytes)?; Ok(pos) } fn close_inner( items: &[(BKU, BVU, Option)], greater_node_pos: Option, serializer: &mut S, ) -> Result where BKU: Borrow, BVU: Borrow, KU: Serialize, VU: Serialize, S: Writer + Fallible + ?Sized, { debug_assert_eq!(items.len(), E); let mut resolvers = InlineVec::<(KU::Resolver, VU::Resolver), E>::new(); for (key, value, _) in items { resolvers.push(( key.borrow().serialize(serializer)?, value.borrow().serialize(serializer)?, )); } let pos = serializer.align_for::>()?; let mut node = MaybeUninit::>::uninit(); // SAFETY: `node` is properly aligned and valid for writes of // `size_of::>()` bytes. unsafe { node.as_mut_ptr().write_bytes(0, 1); } let node_place = unsafe { Place::new_unchecked(pos, node.as_mut_ptr()) }; munge! { let InnerNode { node: Node { kind, keys, values, }, lesser_nodes, greater_node, } = node_place; } kind.write(NodeKind::Inner); for (i, ((k, v, l), (kr, vr))) in items.iter().zip(resolvers.drain()).enumerate() { let out_key = unsafe { keys.index(i).cast_unchecked() }; k.borrow().resolve(kr, out_key); let out_value = unsafe { values.index(i).cast_unchecked() }; v.borrow().resolve(vr, out_value); let out_lesser_node = unsafe { lesser_nodes.index(i) }; if let Some(lesser_node) = l { RelPtr::emplace(*lesser_node, out_lesser_node); } else { RelPtr::emplace_invalid(out_lesser_node); } } if let Some(greater_node_pos) = greater_node_pos { RelPtr::emplace(greater_node_pos, greater_node); } else { RelPtr::emplace_invalid(greater_node); } let bytes = unsafe { slice::from_raw_parts( node.as_ptr().cast::(), size_of::>(), ) }; serializer.write(bytes)?; Ok(pos) } /// Visits every key-value pair in the B-tree with a function. /// /// If `f` returns `ControlFlow::Break`, `visit` will return `Some` with the /// broken value. If `f` returns `Continue` for every pair in the tree, /// `visit` will return `None`. pub fn visit( &self, mut f: impl FnMut(&K, &V) -> ControlFlow, ) -> Option { if self.is_empty() { None } else { let root = &self.root; let root_ptr = unsafe { root.as_ptr().cast::>() }; let mut call_inner = |k: *mut K, v: *mut V| unsafe { f(&*k, &*v) }; match Self::visit_raw(root_ptr.cast_mut(), &mut call_inner) { ControlFlow::Continue(()) => None, ControlFlow::Break(x) => Some(x), } } } /// Visits every mutable key-value pair in the B-tree with a function. /// /// If `f` returns `ControlFlow::Break`, `visit` will return `Some` with the /// broken value. If `f` returns `Continue` for every pair in the tree, /// `visit` will return `None`. pub fn visit_seal( this: Seal<'_, Self>, mut f: impl FnMut(&K, Seal<'_, V>) -> ControlFlow, ) -> Option { if this.is_empty() { None } else { munge!(let Self { root, .. } = this); let root_ptr = unsafe { RelPtr::as_mut_ptr(root).cast::>() }; let mut call_inner = |k: *mut K, v: *mut V| unsafe { f(&*k, Seal::new(&mut *v)) }; match Self::visit_raw(root_ptr, &mut call_inner) { ControlFlow::Continue(()) => None, ControlFlow::Break(x) => Some(x), } } } fn visit_raw( current: *mut Node, f: &mut impl FnMut(*mut K, *mut V) -> ControlFlow, ) -> ControlFlow { let kind = unsafe { (*current).kind }; match kind { NodeKind::Leaf => { let leaf = current.cast::>(); let len = unsafe { (*leaf).len }; for i in 0..len.to_native() as usize { Self::visit_key_value_raw(current, i, f)?; } } NodeKind::Inner => { let inner = current.cast::>(); // Visit lesser nodes and key-value pairs for i in 0..E { let lesser = unsafe { addr_of_mut!((*inner).lesser_nodes[i]) }; let lesser_is_invalid = unsafe { RelPtr::is_invalid_raw(lesser) }; if !lesser_is_invalid { let lesser_ptr = unsafe { RelPtr::as_ptr_raw(lesser) }; Self::visit_raw(lesser_ptr, f)?; } Self::visit_key_value_raw(current, i, f)?; } // Visit greater node let greater = unsafe { addr_of_mut!((*inner).greater_node) }; let greater_is_invalid = unsafe { RelPtr::is_invalid_raw(greater) }; if !greater_is_invalid { let greater_ptr = unsafe { RelPtr::as_ptr_raw(greater).cast::>() }; Self::visit_raw(greater_ptr, f)?; } } } ControlFlow::Continue(()) } fn visit_key_value_raw( current: *mut Node, i: usize, f: &mut impl FnMut(*mut K, *mut V) -> ControlFlow, ) -> ControlFlow { let key_ptr = unsafe { addr_of_mut!((*current).keys[i]).cast::() }; let value_ptr = unsafe { addr_of_mut!((*current).values[i]).cast::() }; f(key_ptr, value_ptr) } } impl fmt::Debug for ArchivedBTreeMap where K: fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut map = f.debug_map(); self.visit(|k, v| { map.entry(k, v); ControlFlow::<()>::Continue(()) }); map.finish() } } // TODO(#515): ungate this impl #[cfg(feature = "alloc")] impl Eq for ArchivedBTreeMap where K: PartialEq, V: PartialEq, { } impl Index<&Q> for ArchivedBTreeMap where Q: Ord + ?Sized, K: Borrow + Ord, { type Output = V; fn index(&self, key: &Q) -> &Self::Output { self.get(key).unwrap() } } // TODO(#515): ungate this impl #[cfg(feature = "alloc")] impl PartialEq> for ArchivedBTreeMap where K: PartialEq, V: PartialEq, { fn eq(&self, other: &ArchivedBTreeMap) -> bool { if self.len() != other.len() { return false; } let mut i = other.iter(); self.visit(|lk, lv| { let (rk, rv) = i.next().unwrap(); if lk != rk || lv != rv { ControlFlow::Break(()) } else { ControlFlow::Continue(()) } }) .is_none() } } /// The resolver for [`ArchivedBTreeMap`]. pub struct BTreeMapResolver { root_node_pos: FixedUsize, } #[cfg(feature = "bytecheck")] mod verify { use core::{alloc::Layout, error::Error, fmt, ptr::addr_of}; use bytecheck::{CheckBytes, Verify}; use rancor::{fail, Fallible, Source}; use super::{ArchivedBTreeMap, InnerNode, Node}; use crate::{ collections::btree_map::{LeafNode, NodeKind}, validation::{ArchiveContext, ArchiveContextExt as _}, RelPtr, }; #[derive(Debug)] struct InvalidLength { len: usize, maximum: usize, } impl fmt::Display for InvalidLength { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Invalid length in B-tree node: len {} was greater than \ maximum {}", self.len, self.maximum ) } } impl Error for InvalidLength {} unsafe impl Verify for ArchivedBTreeMap where C: Fallible + ArchiveContext + ?Sized, C::Error: Source, K: CheckBytes, V: CheckBytes, { fn verify(&self, context: &mut C) -> Result<(), C::Error> { let len = self.len(); if len == 0 { return Ok(()); } check_node_rel_ptr::(&self.root, context) } } fn check_node_rel_ptr( node_rel_ptr: &RelPtr>, context: &mut C, ) -> Result<(), C::Error> where C: Fallible + ArchiveContext + ?Sized, C::Error: Source, K: CheckBytes, V: CheckBytes, { let node_ptr = node_rel_ptr.as_ptr_wrapping().cast::>(); context.check_subtree_ptr( node_ptr.cast::(), &Layout::new::>(), )?; // SAFETY: We checked to make sure that `node_ptr` is properly aligned // and dereferenceable by calling `check_subtree_ptr`. let kind_ptr = unsafe { addr_of!((*node_ptr).kind) }; // SAFETY: `kind_ptr` is a pointer to a subfield of `node_ptr` and so is // also properly aligned and dereferenceable. unsafe { CheckBytes::check_bytes(kind_ptr, context)?; } // SAFETY: `kind_ptr` was always properly aligned and dereferenceable, // and we just checked to make sure it pointed to a valid `NodeKind`. let kind = unsafe { kind_ptr.read() }; match kind { NodeKind::Leaf => { // SAFETY: // We checked to make sure that `node_ptr` is properly aligned, // dereferenceable, and contained entirely within `context`'s // buffer by calling `check_subtree_ptr`. unsafe { check_leaf_node::(node_ptr.cast(), context)? } } NodeKind::Inner => { // SAFETY: // We checked to make sure that `node_ptr` is properly aligned // and dereferenceable. unsafe { check_inner_node::(node_ptr.cast(), context)? } } } Ok(()) } /// # Safety /// /// `node_ptr` must be properly aligned, dereferenceable, and contained /// within `context`'s buffer. unsafe fn check_leaf_node( node_ptr: *const LeafNode, context: &mut C, ) -> Result<(), C::Error> where C: Fallible + ArchiveContext + ?Sized, C::Error: Source, K: CheckBytes, V: CheckBytes, { context.in_subtree(node_ptr, |context| { // SAFETY: We checked to make sure that `node_ptr` is properly // aligned and dereferenceable by calling // `check_subtree_ptr`. let len_ptr = unsafe { addr_of!((*node_ptr).len) }; // SAFETY: `len_ptr` is a pointer to a subfield of `node_ptr` and so // is also properly aligned and dereferenceable. unsafe { CheckBytes::check_bytes(len_ptr, context)?; } // SAFETY: `len_ptr` was always properly aligned and // dereferenceable, and we just checked to make sure it // pointed to a valid `ArchivedUsize`. let len = unsafe { &*len_ptr }; let len = len.to_native() as usize; if len > E { fail!(InvalidLength { len, maximum: E }); } // SAFETY: We checked that `node_ptr` is properly-aligned and // dereferenceable. let node_ptr = unsafe { addr_of!((*node_ptr).node) }; // SAFETY: // - We checked that `node_ptr` is properly aligned and // dereferenceable. // - We checked that `len` is less than or equal to `E`. unsafe { check_node_entries(node_ptr, len, context)?; } Ok(()) }) } /// # Safety /// /// - `node_ptr` must point to a valid `Node`. /// - `len` must be less than or equal to `E`. unsafe fn check_node_entries( node_ptr: *const Node, len: usize, context: &mut C, ) -> Result<(), C::Error> where C: Fallible + ArchiveContext + ?Sized, C::Error: Source, K: CheckBytes, V: CheckBytes, { for i in 0..len { // SAFETY: The caller has guaranteed that `node_ptr` is properly // aligned and dereferenceable. let key_ptr = unsafe { addr_of!((*node_ptr).keys[i]).cast::() }; // SAFETY: The caller has guaranteed that `node_ptr` is properly // aligned and dereferenceable. let value_ptr = unsafe { addr_of!((*node_ptr).values[i]).cast::() }; unsafe { K::check_bytes(key_ptr, context)?; } // SAFETY: `value_ptr` is a subfield of a node, and so is guaranteed // to be properly aligned and point to enough bytes for a `V`. unsafe { V::check_bytes(value_ptr, context)?; } } Ok(()) } /// # Safety /// /// - `node_ptr` must be properly aligned and dereferenceable. /// - `len` must be less than or equal to `E`. unsafe fn check_inner_node( node_ptr: *const InnerNode, context: &mut C, ) -> Result<(), C::Error> where C: Fallible + ArchiveContext + ?Sized, C::Error: Source, K: CheckBytes, V: CheckBytes, { context.in_subtree(node_ptr, |context| { for i in 0..E { // SAFETY: `in_subtree` guarantees that `node_ptr` is properly // aligned and dereferenceable. let lesser_node_ptr = unsafe { addr_of!((*node_ptr).lesser_nodes[i]) }; // SAFETY: `lesser_node_ptr` is a subfield of an inner node, and // so is guaranteed to be properly aligned and point to enough // bytes for a `RelPtr`. unsafe { RelPtr::check_bytes(lesser_node_ptr, context)?; } // SAFETY: We just checked the `lesser_node_ptr` and it // succeeded, so it's safe to dereference. let lesser_node = unsafe { &*lesser_node_ptr }; if !lesser_node.is_invalid() { check_node_rel_ptr::(lesser_node, context)?; } } // SAFETY: We checked that `node_ptr` is properly aligned and // dereferenceable. let greater_node_ptr = unsafe { addr_of!((*node_ptr).greater_node) }; // SAFETY: `greater_node_ptr` is a subfield of an inner node, and so // is guaranteed to be properly aligned and point to enough bytes // for a `RelPtr`. unsafe { RelPtr::check_bytes(greater_node_ptr, context)?; } // SAFETY: We just checked the `greater_node_ptr` and it succeeded, // so it's safe to dereference. let greater_node = unsafe { &*greater_node_ptr }; if !greater_node.is_invalid() { check_node_rel_ptr::(greater_node, context)?; } // SAFETY: We checked that `node_ptr` is properly aligned and // dereferenceable. let node_ptr = unsafe { addr_of!((*node_ptr).node) }; // SAFETY: // - The caller has guaranteed that `node_ptr` points to a valid // `Node`. // - All inner nodes have `E` items, and `E` is less than or equal // to `E`. unsafe { check_node_entries::(node_ptr, E, context)?; } Ok(()) }) } } rkyv-0.8.9/src/collections/btree_set.rs000064400000000000000000000067231046102023000162740ustar 00000000000000//! [`Archive`](crate::Archive) implementation for B-tree sets. use core::{borrow::Borrow, fmt, ops::ControlFlow}; use munge::munge; use rancor::{Fallible, Source}; use crate::{ collections::btree_map::{ArchivedBTreeMap, BTreeMapResolver}, ser::{Allocator, Writer}, Place, Portable, Serialize, }; /// An archived `BTreeSet`. This is a wrapper around a B-tree map with the same /// key and a value of `()`. #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[derive(Portable)] #[rkyv(crate)] #[repr(transparent)] pub struct ArchivedBTreeSet(ArchivedBTreeMap); impl ArchivedBTreeSet { /// Returns `true` if the set contains a value for the specified key. /// /// The key may be any borrowed form of the set's key type, but the ordering /// on the borrowed form _must_ match the ordering on the key type. pub fn contains_key(&self, key: &Q) -> bool where K: Borrow + Ord, { self.0.contains_key(key) } /// Returns a reference to the value in the set, if any, that is equal to /// the given value. /// /// The value may be any borrowed form of the set's value type, but the /// ordering on the borrowed form _must_ match the ordering on the value /// type. pub fn get(&self, value: &Q) -> Option<&K> where K: Borrow + Ord, { self.0.get_key_value(value).map(|(key, _)| key) } /// Returns `true` if the set contains no elements. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns the number of items in the archived B-tree set. pub fn len(&self) -> usize { self.0.len() } /// Resolves a B-tree set from its length. pub fn resolve_from_len( len: usize, resolver: BTreeSetResolver, out: Place, ) { munge!(let ArchivedBTreeSet(inner) = out); ArchivedBTreeMap::::resolve_from_len(len, resolver.0, inner); } /// Serializes an `ArchivedBTreeSet` from the given iterator and serializer. pub fn serialize_from_ordered_iter( iter: I, serializer: &mut S, ) -> Result where I: ExactSizeIterator, I::Item: Borrow, KU: Serialize, S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { ArchivedBTreeMap::::serialize_from_ordered_iter::< _, _, _, _, (), _, >(iter.map(|k| (k, &())), serializer) .map(BTreeSetResolver) } /// Visits every key in the B-tree with a function. /// /// If `f` returns `ControlFlow::Break`, `visit` will return `Some` with the /// broken value. If `f` returns `Continue` for every key in the tree, /// `visit` will return `None`. pub fn visit( &self, mut f: impl FnMut(&K) -> ControlFlow, ) -> Option { self.0.visit(|k, _| f(k)) } } impl fmt::Debug for ArchivedBTreeSet where K: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut set = f.debug_set(); self.visit(|k| { set.entry(k); ControlFlow::<()>::Continue(()) }); set.finish() } } /// The resolver for archived B-tree sets. pub struct BTreeSetResolver(BTreeMapResolver); rkyv-0.8.9/src/collections/mod.rs000064400000000000000000000002001046102023000150570ustar 00000000000000//! Archived versions of standard library containers. pub mod btree_map; pub mod btree_set; pub mod swiss_table; pub mod util; rkyv-0.8.9/src/collections/swiss_table/index_map.rs000064400000000000000000000346361046102023000206070ustar 00000000000000//! An archived index map implementation based on Google's high-performance //! SwissTable hash map. use core::{ borrow::Borrow, fmt, hash::{Hash, Hasher}, iter::FusedIterator, marker::PhantomData, slice::{from_raw_parts, from_raw_parts_mut}, }; use munge::munge; use rancor::{Fallible, Source}; use crate::{ collections::{ swiss_table::{ArchivedHashTable, HashTableResolver}, util::{Entry, EntryAdapter, EntryResolver}, }, hash::{hash_value, FxHasher64}, primitive::{ArchivedUsize, FixedUsize}, seal::Seal, ser::{Allocator, Writer, WriterExt as _}, Place, Portable, RelPtr, Serialize, }; /// An archived `IndexMap`. #[derive(Portable)] #[cfg_attr( feature = "bytecheck", derive(bytecheck::CheckBytes), bytecheck(verify) )] #[rkyv(crate)] #[repr(C)] pub struct ArchivedIndexMap { table: ArchivedHashTable, entries: RelPtr>, _phantom: PhantomData, } impl ArchivedIndexMap { fn entries(&self) -> &[Entry] { unsafe { from_raw_parts(self.entries.as_ptr(), self.len()) } } fn entries_seal(this: Seal<'_, Self>) -> Seal<'_, [Entry]> { let len = this.len(); munge!(let Self { entries, .. } = this); let slice = unsafe { from_raw_parts_mut(RelPtr::as_mut_ptr(entries), len) }; Seal::new(slice) } /// Returns `true` if the map contains no elements. pub const fn is_empty(&self) -> bool { self.len() == 0 } unsafe fn raw_iter(&self) -> RawIter { unsafe { RawIter::new(self.entries.as_ptr().cast(), self.len()) } } /// Returns an iterator over the key-value pairs of the map in order pub fn iter(&self) -> Iter { Iter { inner: unsafe { self.raw_iter() }, } } /// Returns an iterator over the keys of the map in order pub fn keys(&self) -> Keys { Keys { inner: unsafe { self.raw_iter() }, } } /// Gets the number of items in the index map. pub const fn len(&self) -> usize { self.table.len() } /// Returns an iterator over the values of the map in order. pub fn values(&self) -> Values { Values { inner: unsafe { self.raw_iter() }, } } } impl ArchivedIndexMap { /// Gets the index, key, and value corresponding to the supplied key using /// the given comparison function. pub fn get_full_with( &self, key: &Q, cmp: C, ) -> Option<(usize, &K, &V)> where Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { let index = self.get_index_of_with(key, cmp)?; let entries = self.entries(); let entry = &entries[index]; Some((index, &entry.key, &entry.value)) } /// Gets the index, key, and value corresponding to the supplied key. pub fn get_full(&self, key: &Q) -> Option<(usize, &K, &V)> where K: Borrow, Q: Hash + Eq + ?Sized, { self.get_full_with(key, |q, k| q == k.borrow()) } /// Returns the key-value pair corresponding to the supplied key using the /// given comparison function. pub fn get_key_value_with(&self, key: &Q, cmp: C) -> Option<(&K, &V)> where Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { let (_, k, v) = self.get_full_with(key, cmp)?; Some((k, v)) } /// Returns the key-value pair corresponding to the supplied key. pub fn get_key_value(&self, key: &Q) -> Option<(&K, &V)> where K: Borrow, Q: Hash + Eq + ?Sized, { let (_, k, v) = self.get_full(key)?; Some((k, v)) } /// Returns a reference to the value corresponding to the supplied key using /// the given comparison function. pub fn get_with(&self, key: &Q, cmp: C) -> Option<&V> where Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { Some(self.get_full_with(key, cmp)?.2) } /// Returns a reference to the value corresponding to the supplied key. pub fn get(&self, key: &Q) -> Option<&V> where K: Borrow, Q: Hash + Eq + ?Sized, { Some(self.get_full(key)?.2) } /// Gets the mutable index, key, and value corresponding to the supplied key /// using the given comparison function. pub fn get_full_seal_with<'a, Q, C>( this: Seal<'a, Self>, key: &Q, cmp: C, ) -> Option<(usize, &'a K, Seal<'a, V>)> where Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { let index = this.get_index_of_with(key, cmp)?; let entry = Seal::index(Self::entries_seal(this), index); munge!(let Entry { key, value } = entry); Some((index, key.unseal_ref(), value)) } /// Gets the mutable index, key, and value corresponding to the supplied /// key. pub fn get_full_seal<'a, Q>( this: Seal<'a, Self>, key: &Q, ) -> Option<(usize, &'a K, Seal<'a, V>)> where K: Borrow, Q: Hash + Eq + ?Sized, { Self::get_full_seal_with(this, key, |q, k| q == k.borrow()) } /// Returns the mutable key-value pair corresponding to the supplied key /// using the given comparison function. pub fn get_key_value_seal_with<'a, Q, C>( this: Seal<'a, Self>, key: &Q, cmp: C, ) -> Option<(&'a K, Seal<'a, V>)> where K: Borrow, Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { let (_, k, v) = Self::get_full_seal_with(this, key, cmp)?; Some((k, v)) } /// Returns the mutable key-value pair corresponding to the supplied key. pub fn get_key_value_seal<'a, Q>( this: Seal<'a, Self>, key: &Q, ) -> Option<(&'a K, Seal<'a, V>)> where K: Borrow, Q: Hash + Eq + ?Sized, { let (_, k, v) = Self::get_full_seal(this, key)?; Some((k, v)) } /// Returns a mutable reference to the value corresponding to the supplied /// key using the given comparison function. pub fn get_seal_with<'a, Q, C>( this: Seal<'a, Self>, key: &Q, cmp: C, ) -> Option> where K: Borrow, Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { Some(Self::get_full_seal_with(this, key, cmp)?.2) } /// Returns a mutable reference to the value corresponding to the supplied /// key. pub fn get_seal<'a, Q>(this: Seal<'a, Self>, key: &Q) -> Option> where K: Borrow, Q: Hash + Eq + ?Sized, { Some(Self::get_full_seal(this, key)?.2) } /// Returns whether a key is present in the hash map. pub fn contains_key(&self, key: &Q) -> bool where K: Borrow, Q: Hash + Eq + ?Sized, { self.get(key).is_some() } /// Gets a key-value pair by index. pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { if index < self.len() { let entry = &self.entries()[index]; Some((&entry.key, &entry.value)) } else { None } } /// Gets the index of a key if it exists in the map using the given /// comparison function. pub fn get_index_of_with(&self, key: &Q, cmp: C) -> Option where Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { let entries = self.entries(); let index = self.table.get_with(hash_value::(key), |i| { cmp(key, &entries[i.to_native() as usize].key) })?; Some(index.to_native() as usize) } /// Gets the index of a key if it exists in the map. pub fn get_index_of(&self, key: &Q) -> Option where K: Borrow, Q: Hash + Eq + ?Sized, { self.get_index_of_with(key, |q, k| q == k.borrow()) } /// Resolves an archived index map from a given length and parameters. pub fn resolve_from_len( len: usize, load_factor: (usize, usize), resolver: IndexMapResolver, out: Place, ) { munge!(let ArchivedIndexMap { table, entries, _phantom: _ } = out); ArchivedHashTable::resolve_from_len( len, load_factor, resolver.table_resolver, table, ); RelPtr::emplace(resolver.entries_pos as usize, entries); } /// Serializes an iterator of key-value pairs as an index map. pub fn serialize_from_iter( iter: I, load_factor: (usize, usize), serializer: &mut S, ) -> Result where I: Clone + ExactSizeIterator, BKU: Borrow, BVU: Borrow, KU: Serialize + Hash + Eq, VU: Serialize, S: Fallible + Writer + Allocator + ?Sized, S::Error: Source, { use crate::util::SerVec; // Serialize hash table let table_resolver = ArchivedHashTable::::serialize_from_iter( 0..iter.len(), iter.clone() .map(|(key, _)| hash_value::(key.borrow())), load_factor, serializer, )?; // Serialize entries SerVec::with_capacity( serializer, iter.len(), |resolvers, serializer| { for (key, value) in iter.clone() { resolvers.push(EntryResolver { key: key.borrow().serialize(serializer)?, value: value.borrow().serialize(serializer)?, }); } let entries_pos = serializer.align_for::>()?; for ((key, value), resolver) in iter.clone().zip(resolvers.drain()) { unsafe { serializer.resolve_aligned( &EntryAdapter::new(key, value), resolver, )?; } } Ok(IndexMapResolver { table_resolver, entries_pos: entries_pos as FixedUsize, }) }, )? } } impl fmt::Debug for ArchivedIndexMap where K: fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } impl PartialEq for ArchivedIndexMap where K: PartialEq, V: PartialEq, { fn eq(&self, other: &Self) -> bool { self.iter().eq(other.iter()) } } impl Eq for ArchivedIndexMap {} struct RawIter<'a, K, V> { current: *const Entry, remaining: usize, _phantom: PhantomData<(&'a K, &'a V)>, } impl RawIter<'_, K, V> { fn new(pairs: *const Entry, len: usize) -> Self { Self { current: pairs, remaining: len, _phantom: PhantomData, } } } impl<'a, K, V> Iterator for RawIter<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option { unsafe { if self.remaining == 0 { None } else { let result = self.current; self.current = self.current.add(1); self.remaining -= 1; let entry = &*result; Some((&entry.key, &entry.value)) } } } fn size_hint(&self) -> (usize, Option) { (self.remaining, Some(self.remaining)) } } impl ExactSizeIterator for RawIter<'_, K, V> {} impl FusedIterator for RawIter<'_, K, V> {} /// An iterator over the key-value pairs of an index map. #[repr(transparent)] pub struct Iter<'a, K, V> { inner: RawIter<'a, K, V>, } impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option { self.inner.next() } fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } impl ExactSizeIterator for Iter<'_, K, V> {} impl FusedIterator for Iter<'_, K, V> {} /// An iterator over the keys of an index map. #[repr(transparent)] pub struct Keys<'a, K, V> { inner: RawIter<'a, K, V>, } impl<'a, K, V> Iterator for Keys<'a, K, V> { type Item = &'a K; fn next(&mut self) -> Option { self.inner.next().map(|(k, _)| k) } fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } impl ExactSizeIterator for Keys<'_, K, V> {} impl FusedIterator for Keys<'_, K, V> {} /// An iterator over the values of an index map. #[repr(transparent)] pub struct Values<'a, K, V> { inner: RawIter<'a, K, V>, } impl<'a, K, V> Iterator for Values<'a, K, V> { type Item = &'a V; fn next(&mut self) -> Option { self.inner.next().map(|(_, v)| v) } fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } impl ExactSizeIterator for Values<'_, K, V> {} impl FusedIterator for Values<'_, K, V> {} /// The resolver for an `IndexMap`. pub struct IndexMapResolver { table_resolver: HashTableResolver, entries_pos: FixedUsize, } #[cfg(feature = "bytecheck")] mod verify { use bytecheck::{CheckBytes, Verify}; use rancor::{Fallible, Source}; use super::ArchivedIndexMap; use crate::{ collections::util::Entry, validation::{ArchiveContext, ArchiveContextExt}, }; unsafe impl Verify for ArchivedIndexMap where C: Fallible + ArchiveContext + ?Sized, C::Error: Source, K: CheckBytes, V: CheckBytes, { fn verify( &self, context: &mut C, ) -> Result<(), ::Error> { let ptr = core::ptr::slice_from_raw_parts( self.entries.as_ptr_wrapping(), self.table.len(), ); context.in_subtree(ptr, |context| { // SAFETY: `in_subtree` has checked that `ptr` is aligned and // points to enough bytes to represent its slice. unsafe { <[Entry]>::check_bytes(ptr, context) } }) } } } rkyv-0.8.9/src/collections/swiss_table/index_set.rs000064400000000000000000000073361046102023000206220ustar 00000000000000//! An archived index set implementation based on Google's high-performance //! SwissTable hash map. use core::{ borrow::Borrow, fmt, hash::{Hash, Hasher}, }; use munge::munge; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::{ index_map::Keys, ArchivedIndexMap, IndexMapResolver, }, hash::FxHasher64, ser::{Allocator, Writer}, Place, Portable, Serialize, }; /// An archived `IndexSet`. #[derive(Portable)] #[rkyv(crate)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[repr(transparent)] pub struct ArchivedIndexSet { inner: ArchivedIndexMap, } impl ArchivedIndexSet { /// Returns whether the index set contains no values. pub fn is_empty(&self) -> bool { self.inner.is_empty() } /// Returns an iterator over the keys of the index set in order. pub fn iter(&self) -> Keys { self.inner.keys() } /// Returns the number of elements in the index set. pub fn len(&self) -> usize { self.inner.len() } } impl ArchivedIndexSet { /// Returns whether a key is present in the hash set. pub fn contains(&self, k: &Q) -> bool where K: Borrow, Q: Hash + Eq + ?Sized, { self.inner.contains_key(k) } /// Returns the value stored in the set, if any. pub fn get(&self, k: &Q) -> Option<&K> where K: Borrow, Q: Hash + Eq + ?Sized, { self.inner.get_full(k).map(|(_, k, _)| k) } /// Returns the item index and value stored in the set, if any. pub fn get_full(&self, k: &Q) -> Option<(usize, &K)> where K: Borrow, Q: Hash + Eq + ?Sized, { self.inner.get_full(k).map(|(i, k, _)| (i, k)) } /// Gets a key by index. pub fn get_index(&self, index: usize) -> Option<&K> { self.inner.get_index(index).map(|(k, _)| k) } /// Returns the index of a key if it exists in the set. pub fn get_index_of(&self, key: &Q) -> Option where K: Borrow, Q: Hash + Eq + ?Sized, { self.inner.get_index_of(key) } /// Resolves an archived index map from a given length and parameters. pub fn resolve_from_len( len: usize, load_factor: (usize, usize), resolver: IndexSetResolver, out: Place, ) { munge!(let ArchivedIndexSet { inner } = out); ArchivedIndexMap::resolve_from_len(len, load_factor, resolver.0, inner); } /// Serializes an iterator of keys as an index set. pub fn serialize_from_iter( iter: I, load_factor: (usize, usize), serializer: &mut S, ) -> Result where I: Clone + ExactSizeIterator, I::Item: Borrow, UK: Serialize + Hash + Eq, S: Fallible + Writer + Allocator + ?Sized, S::Error: Source, { Ok(IndexSetResolver( ArchivedIndexMap::::serialize_from_iter::< _, _, (), _, _, _, >(iter.map(|x| (x, ())), load_factor, serializer)?, )) } } impl fmt::Debug for ArchivedIndexSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() } } impl PartialEq for ArchivedIndexSet { fn eq(&self, other: &Self) -> bool { self.iter().eq(other.iter()) } } impl Eq for ArchivedIndexSet {} /// The resolver for archived index sets. pub struct IndexSetResolver(IndexMapResolver); rkyv-0.8.9/src/collections/swiss_table/map.rs000064400000000000000000000270561046102023000174160ustar 00000000000000//! Archived hash map implementation using an archived SwissTable. use core::{ borrow::Borrow, fmt, hash::{Hash, Hasher}, iter::FusedIterator, marker::PhantomData, ops::Index, }; use munge::munge; use rancor::{Fallible, Source}; use crate::{ collections::{ swiss_table::table::{ArchivedHashTable, HashTableResolver, RawIter}, util::{Entry, EntryAdapter}, }, hash::{hash_value, FxHasher64}, seal::Seal, ser::{Allocator, Writer}, Place, Portable, Serialize, }; /// An archived SwissTable hash map. #[derive(Portable)] #[rkyv(crate)] #[repr(transparent)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] pub struct ArchivedHashMap { table: ArchivedHashTable>, _phantom: PhantomData, } impl ArchivedHashMap { /// Returns whether the hash map is empty. pub const fn is_empty(&self) -> bool { self.table.is_empty() } /// Returns the number of elements in the hash map. pub const fn len(&self) -> usize { self.table.len() } /// Returns the total capacity of the hash map. pub fn capacity(&self) -> usize { self.table.capacity() } /// Returns an iterator over the key-value entries in the hash map. pub fn iter(&self) -> Iter<'_, K, V, H> { Iter { raw: self.table.raw_iter(), _phantom: PhantomData, } } /// Returns an iterator over the sealed key-value entries in the hash map. pub fn iter_seal(this: Seal<'_, Self>) -> IterMut<'_, K, V, H> { munge!(let Self { table, .. } = this); IterMut { raw: ArchivedHashTable::raw_iter_seal(table), _phantom: PhantomData, } } /// Returns an iterator over the keys in the hash map. pub fn keys(&self) -> Keys<'_, K, V, H> { Keys { raw: self.table.raw_iter(), _phantom: PhantomData, } } /// Returns an iterator over the values in the hash map. pub fn values(&self) -> Values<'_, K, V, H> { Values { raw: self.table.raw_iter(), _phantom: PhantomData, } } /// Returns an iterator over the mutable values in the hash map. pub fn values_seal(this: Seal<'_, Self>) -> ValuesMut<'_, K, V, H> { munge!(let Self { table, .. } = this); ValuesMut { raw: ArchivedHashTable::raw_iter_seal(table), _phantom: PhantomData, } } } impl ArchivedHashMap { /// Returns the key-value pair corresponding to the supplied key using the /// given comparison function. pub fn get_key_value_with(&self, key: &Q, cmp: C) -> Option<(&K, &V)> where Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { let entry = self .table .get_with(hash_value::(key), |e| cmp(key, &e.key))?; Some((&entry.key, &entry.value)) } /// Returns the key-value pair corresponding to the supplied key. pub fn get_key_value(&self, key: &Q) -> Option<(&K, &V)> where K: Borrow, Q: Hash + Eq + ?Sized, { self.get_key_value_with(key, |q, k| q == k.borrow()) } /// Returns a reference to the value corresponding to the supplied key using /// the given comparison function. pub fn get_with(&self, key: &Q, cmp: C) -> Option<&V> where Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { Some(self.get_key_value_with(key, cmp)?.1) } /// Returns a reference to the value corresponding to the supplied key. pub fn get(&self, key: &Q) -> Option<&V> where K: Borrow, Q: Hash + Eq + ?Sized, { Some(self.get_key_value(key)?.1) } /// Returns the mutable key-value pair corresponding to the supplied key /// using the given comparison function. pub fn get_key_value_seal_with<'a, Q, C>( this: Seal<'a, Self>, key: &Q, cmp: C, ) -> Option<(&'a K, Seal<'a, V>)> where K: Borrow, Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { munge!(let Self { table, .. } = this); let entry = ArchivedHashTable::get_seal_with( table, hash_value::(key), |e| cmp(key, &e.key), )?; munge!(let Entry { key, value } = entry); Some((key.unseal_ref(), value)) } /// Returns the mutable key-value pair corresponding to the supplied key. pub fn get_key_value_seal<'a, Q>( this: Seal<'a, Self>, key: &Q, ) -> Option<(&'a K, Seal<'a, V>)> where K: Borrow, Q: Hash + Eq + ?Sized, { Self::get_key_value_seal_with(this, key, |q, k| q == k.borrow()) } /// Returns a mutable reference to the value corresponding to the supplied /// key using the given comparison function. pub fn get_seal_with<'a, Q, C>( this: Seal<'a, Self>, key: &Q, cmp: C, ) -> Option> where K: Borrow, Q: Hash + Eq + ?Sized, C: Fn(&Q, &K) -> bool, { Some(Self::get_key_value_seal_with(this, key, cmp)?.1) } /// Returns a mutable reference to the value corresponding to the supplied /// key. pub fn get_seal<'a, Q>(this: Seal<'a, Self>, key: &Q) -> Option> where K: Borrow, Q: Hash + Eq + ?Sized, { Some(Self::get_key_value_seal(this, key)?.1) } /// Returns whether the hash map contains the given key. pub fn contains_key(&self, key: &Q) -> bool where K: Borrow, Q: Hash + Eq + ?Sized, { self.get(key).is_some() } /// Serializes an iterator of key-value pairs as a hash map. pub fn serialize_from_iter( iter: I, load_factor: (usize, usize), serializer: &mut S, ) -> Result where I: Clone + ExactSizeIterator, BKU: Borrow, BVU: Borrow, KU: Serialize + Hash + Eq, VU: Serialize, S: Fallible + Writer + Allocator + ?Sized, S::Error: Source, { ArchivedHashTable::>::serialize_from_iter( iter.clone() .map(|(key, value)| EntryAdapter::new(key, value)), iter.map(|(key, _)| hash_value::(key.borrow())), load_factor, serializer, ) .map(HashMapResolver) } /// Resolves an archived hash map from a given length and parameters. pub fn resolve_from_len( len: usize, load_factor: (usize, usize), resolver: HashMapResolver, out: Place, ) { munge!(let ArchivedHashMap { table, _phantom: _ } = out); ArchivedHashTable::>::resolve_from_len( len, load_factor, resolver.0, table, ) } } impl fmt::Debug for ArchivedHashMap where K: fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } impl Eq for ArchivedHashMap where K: Hash + Eq, V: Eq, H: Default + Hasher, { } impl PartialEq for ArchivedHashMap where K: Hash + Eq, V: PartialEq, H: Default + Hasher, { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { false } else { self.iter().all(|(key, value)| { other.get(key).is_some_and(|v| *value == *v) }) } } } impl Index<&'_ Q> for ArchivedHashMap where K: Eq + Hash + Borrow, Q: Eq + Hash + ?Sized, H: Default + Hasher, { type Output = V; fn index(&self, key: &Q) -> &V { self.get(key).unwrap() } } /// The resolver for [`ArchivedHashMap`]. pub struct HashMapResolver(HashTableResolver); /// An iterator over the key-value pairs of an [`ArchivedHashMap`]. pub struct Iter<'a, K, V, H> { raw: RawIter>, _phantom: PhantomData<&'a ArchivedHashMap>, } impl<'a, K, V, H> Iterator for Iter<'a, K, V, H> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option { self.raw.next().map(|entry| { let entry = unsafe { entry.as_ref() }; (&entry.key, &entry.value) }) } } impl ExactSizeIterator for Iter<'_, K, V, H> { fn len(&self) -> usize { self.raw.len() } } impl FusedIterator for Iter<'_, K, V, H> {} /// An iterator over the mutable key-value pairs of an [`ArchivedHashMap`]. pub struct IterMut<'a, K, V, H> { raw: RawIter>, _phantom: PhantomData<&'a ArchivedHashMap>, } impl<'a, K, V, H> Iterator for IterMut<'a, K, V, H> { type Item = (&'a K, Seal<'a, V>); fn next(&mut self) -> Option { self.raw.next().map(|mut entry| { let entry = unsafe { entry.as_mut() }; (&entry.key, Seal::new(&mut entry.value)) }) } } impl ExactSizeIterator for IterMut<'_, K, V, H> { fn len(&self) -> usize { self.raw.len() } } impl FusedIterator for IterMut<'_, K, V, H> {} /// An iterator over the keys of an [`ArchivedHashMap`]. pub struct Keys<'a, K, V, H> { raw: RawIter>, _phantom: PhantomData<&'a ArchivedHashMap>, } impl<'a, K, V, H> Iterator for Keys<'a, K, V, H> { type Item = &'a K; fn next(&mut self) -> Option { self.raw.next().map(|entry| { let entry = unsafe { entry.as_ref() }; &entry.key }) } } impl ExactSizeIterator for Keys<'_, K, V, H> { fn len(&self) -> usize { self.raw.len() } } impl FusedIterator for Keys<'_, K, V, H> {} /// An iterator over the values of an [`ArchivedHashMap`]. pub struct Values<'a, K, V, H> { raw: RawIter>, _phantom: PhantomData<&'a ArchivedHashMap>, } impl<'a, K, V, H> Iterator for Values<'a, K, V, H> { type Item = &'a V; fn next(&mut self) -> Option { self.raw.next().map(|entry| { let entry = unsafe { entry.as_ref() }; &entry.value }) } } impl ExactSizeIterator for Values<'_, K, V, H> { fn len(&self) -> usize { self.raw.len() } } impl FusedIterator for Values<'_, K, V, H> {} /// An iterator over the mutable values of an [`ArchivedHashMap`]. pub struct ValuesMut<'a, K, V, H> { raw: RawIter>, _phantom: PhantomData<&'a ArchivedHashMap>, } impl<'a, K, V, H> Iterator for ValuesMut<'a, K, V, H> { type Item = Seal<'a, V>; fn next(&mut self) -> Option { self.raw.next().map(|mut entry| { let entry = unsafe { entry.as_mut() }; Seal::new(&mut entry.value) }) } } impl ExactSizeIterator for ValuesMut<'_, K, V, H> { fn len(&self) -> usize { self.raw.len() } } impl FusedIterator for ValuesMut<'_, K, V, H> {} rkyv-0.8.9/src/collections/swiss_table/mod.rs000064400000000000000000000006441046102023000174120ustar 00000000000000//! SwissTable-based implementation for archived hash map and hash set. pub mod index_map; pub mod index_set; pub mod map; pub mod set; pub mod table; pub use index_map::{ArchivedIndexMap, IndexMapResolver}; pub use index_set::{ArchivedIndexSet, IndexSetResolver}; pub use map::{ArchivedHashMap, HashMapResolver}; pub use set::{ArchivedHashSet, HashSetResolver}; pub use table::{ArchivedHashTable, HashTableResolver}; rkyv-0.8.9/src/collections/swiss_table/set.rs000064400000000000000000000062411046102023000174250ustar 00000000000000//! Archived hash set implementation using an archived SwissTable. use core::{ borrow::Borrow, fmt, hash::{Hash, Hasher}, }; use munge::munge; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::map::{ArchivedHashMap, HashMapResolver, Keys}, hash::FxHasher64, ser::{Allocator, Writer}, Place, Portable, Serialize, }; /// An archived `HashSet`. This is a wrapper around a hash map with the same key /// and unit value. #[derive(Portable)] #[rkyv(crate)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[repr(transparent)] pub struct ArchivedHashSet { inner: ArchivedHashMap, } impl ArchivedHashSet { /// Gets the number of items in the hash set. pub const fn len(&self) -> usize { self.inner.len() } /// Returns whether there are no items in the hash set. pub const fn is_empty(&self) -> bool { self.inner.is_empty() } /// Gets an iterator over the keys of the underlying hash map. pub fn iter(&self) -> Keys { self.inner.keys() } } impl ArchivedHashSet { /// Gets the key corresponding to the given key in the hash set. pub fn get(&self, k: &Q) -> Option<&K> where K: Borrow, Q: Hash + Eq + ?Sized, { self.inner.get_key_value(k).map(|(k, _)| k) } /// Returns whether the given key is in the hash set. pub fn contains(&self, k: &Q) -> bool where K: Borrow, Q: Hash + Eq + ?Sized, { self.inner.contains_key(k) } /// Resolves an archived hash set from the given length and parameters. pub fn resolve_from_len( len: usize, load_factor: (usize, usize), resolver: HashSetResolver, out: Place, ) { munge!(let ArchivedHashSet { inner } = out); ArchivedHashMap::resolve_from_len(len, load_factor, resolver.0, inner); } /// Serializes an iterator of keys as a hash set. pub fn serialize_from_iter( iter: I, load_factor: (usize, usize), serializer: &mut S, ) -> Result where I: Clone + ExactSizeIterator, I::Item: Borrow, KU: Serialize + Hash + Eq, S: Fallible + Writer + Allocator + ?Sized, S::Error: Source, { Ok(HashSetResolver( ArchivedHashMap::::serialize_from_iter::< _, _, (), _, _, _, >(iter.map(|x| (x, ())), load_factor, serializer)?, )) } } impl fmt::Debug for ArchivedHashSet { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() } } impl PartialEq for ArchivedHashSet { fn eq(&self, other: &Self) -> bool { self.inner == other.inner } } impl Eq for ArchivedHashSet {} /// The resolver for archived hash sets. pub struct HashSetResolver(HashMapResolver); rkyv-0.8.9/src/collections/swiss_table/table.rs000064400000000000000000000544041046102023000177250ustar 00000000000000//! An archived hash table implementation based on Google's high-performance //! SwissTable hash map. //! //! Notable differences from other implementations: //! //! - The number of control bytes is rounded up to a maximum group width (16) //! instead of the next power of two. This reduces the number of empty buckets //! on the wire. Since this collection is immutable after writing, we'll never //! benefit from having more buckets than we need. //! - Because the bucket count is not a power of two, the triangular probing //! sequence simply skips any indices larger than the actual size of the //! buckets array. //! - Instead of the final control bytes always being marked EMPTY, the last //! control bytes repeat the first few. This helps reduce the number of //! lookups when probing at the end of the control bytes. //! - Because the available SIMD group width may be less than the maximum group //! width, each probe reads N groups before striding where N is the maximum //! group width divided by the SIMD group width. use core::{ alloc::Layout, borrow::Borrow, error::Error, fmt, marker::PhantomData, mem::{size_of, MaybeUninit}, ptr::{self, null, NonNull}, slice::from_raw_parts, }; use munge::munge; use rancor::{fail, Fallible, ResultExt as _, Source}; use crate::{ collections::util::IteratorLengthMismatch, primitive::{ArchivedUsize, FixedUsize}, seal::Seal, ser::{Allocator, Writer, WriterExt}, simd::{Bitmask, Group, MAX_GROUP_WIDTH}, util::SerVec, Archive as _, Place, Portable, RawRelPtr, Serialize, }; /// A low-level archived SwissTable hash table with explicit hashing. #[derive(Portable)] #[cfg_attr( feature = "bytecheck", derive(bytecheck::CheckBytes), bytecheck(verify) )] #[rkyv(crate)] #[repr(C)] pub struct ArchivedHashTable { ptr: RawRelPtr, len: ArchivedUsize, cap: ArchivedUsize, _phantom: PhantomData, } #[inline] fn h1(hash: u64) -> usize { hash as usize } #[inline] fn h2(hash: u64) -> u8 { (hash >> 57) as u8 } struct ProbeSeq { pos: usize, stride: usize, } impl ProbeSeq { #[inline] fn move_next(&mut self, bucket_mask: usize) { self.stride += MAX_GROUP_WIDTH; self.pos += self.stride; self.pos &= bucket_mask; } } impl ArchivedHashTable { fn probe_seq(hash: u64, capacity: usize) -> ProbeSeq { ProbeSeq { pos: h1(hash) % capacity, stride: 0, } } /// # Safety /// /// - `this` must point to a valid `ArchivedHashTable` /// - `index` must be less than `len()` unsafe fn control_raw(this: *mut Self, index: usize) -> *const u8 { debug_assert!(unsafe { !(*this).is_empty() }); // SAFETY: As an invariant of `ArchivedHashTable`, if `self` is not // empty then `self.ptr` is a valid relative pointer. Since `index` is // at least 0 and strictly less than `len()`, this table must not be // empty. let ptr = unsafe { RawRelPtr::as_ptr_raw(ptr::addr_of_mut!((*this).ptr)) }; // SAFETY: The caller has guaranteed that `index` is less than `len()`, // and the first `len()` bytes following `ptr` are the control bytes of // the hash table. unsafe { ptr.cast::().add(index) } } /// # Safety /// /// - `this` must point to a valid `ArchivedHashTable` /// - `index` must be less than `len()` unsafe fn bucket_raw(this: *mut Self, index: usize) -> NonNull { unsafe { NonNull::new_unchecked( RawRelPtr::as_ptr_raw(ptr::addr_of_mut!((*this).ptr)) .cast::() .sub(index + 1), ) } } fn bucket_mask(capacity: usize) -> usize { capacity.checked_next_power_of_two().unwrap() - 1 } /// # Safety /// /// `this` must point to a valid `ArchivedHashTable` unsafe fn get_entry_raw( this: *mut Self, hash: u64, cmp: C, ) -> Option> where C: Fn(&T) -> bool, { let is_empty = unsafe { (*this).is_empty() }; if is_empty { return None; } let capacity = unsafe { (*this).capacity() }; let probe_cap = Self::probe_cap(capacity); let control_count = Self::control_count(probe_cap); let h2_hash = h2(hash); let mut probe_seq = Self::probe_seq(hash, capacity); let bucket_mask = Self::bucket_mask(control_count); loop { let mut any_empty = false; for i in 0..MAX_GROUP_WIDTH / Group::WIDTH { let pos = probe_seq.pos + i * Group::WIDTH; let group = unsafe { Group::read(Self::control_raw(this, pos)) }; for bit in group.match_byte(h2_hash) { let index = (pos + bit) % capacity; let bucket_ptr = unsafe { Self::bucket_raw(this, index) }; let bucket = unsafe { bucket_ptr.as_ref() }; // Opt: These can be marked as likely true on nightly. if cmp(bucket) { return Some(bucket_ptr); } } // Opt: These can be marked as likely true on nightly. any_empty = any_empty || group.match_empty().any_bit_set(); } if any_empty { return None; } loop { probe_seq.move_next(bucket_mask); if probe_seq.pos < probe_cap { break; } } } } /// Returns the key-value pair corresponding to the supplied key. pub fn get_with(&self, hash: u64, cmp: C) -> Option<&T> where C: Fn(&T) -> bool, { let this = (self as *const Self).cast_mut(); let ptr = unsafe { Self::get_entry_raw(this, hash, |e| cmp(e))? }; Some(unsafe { ptr.as_ref() }) } /// Returns the mutable key-value pair corresponding to the supplied key. pub fn get_seal_with( this: Seal<'_, Self>, hash: u64, cmp: C, ) -> Option> where C: Fn(&T) -> bool, { let mut ptr = unsafe { Self::get_entry_raw(this.unseal_unchecked(), hash, |e| cmp(e))? }; Some(Seal::new(unsafe { ptr.as_mut() })) } /// Returns whether the hash table is empty. pub const fn is_empty(&self) -> bool { self.len.to_native() == 0 } /// Returns the number of elements in the hash table. pub const fn len(&self) -> usize { self.len.to_native() as usize } /// Returns the total capacity of the hash table. pub fn capacity(&self) -> usize { self.cap.to_native() as usize } /// # Safety /// /// This hash table must not be empty. unsafe fn control_iter(this: *mut Self) -> ControlIter { ControlIter { current_mask: unsafe { Group::read(Self::control_raw(this, 0)).match_full() }, next_group: unsafe { Self::control_raw(this, Group::WIDTH) }, } } /// Returns an iterator over the entry pointers in the hash table. pub fn raw_iter(&self) -> RawIter { if self.is_empty() { RawIter::empty() } else { let this = (self as *const Self).cast_mut(); RawIter { // SAFETY: We have checked that `self` is not empty. controls: unsafe { Self::control_iter(this) }, entries: unsafe { NonNull::new_unchecked(self.ptr.as_ptr().cast_mut().cast()) }, items_left: self.len(), } } } /// Returns a sealed iterator over the entry pointers in the hash table. pub fn raw_iter_seal(mut this: Seal<'_, Self>) -> RawIter { if this.is_empty() { RawIter::empty() } else { // SAFETY: We have checked that `this` is not empty. let controls = unsafe { Self::control_iter(this.as_mut().unseal_unchecked()) }; let items_left = this.len(); munge!(let Self { ptr, .. } = this); RawIter { controls, entries: unsafe { NonNull::new_unchecked(RawRelPtr::as_mut_ptr(ptr).cast()) }, items_left, } } } fn capacity_from_len(len: usize, load_factor: (usize, usize)) -> usize { if len == 0 { 0 } else { usize::max(len * load_factor.1 / load_factor.0, len + 1) } } fn probe_cap(capacity: usize) -> usize { capacity.next_multiple_of(MAX_GROUP_WIDTH) } fn control_count(probe_cap: usize) -> usize { probe_cap + MAX_GROUP_WIDTH - 1 } #[allow(dead_code)] fn memory_layout( capacity: usize, control_count: usize, ) -> Result<(Layout, usize), E> { let buckets_layout = Layout::array::(capacity).into_error()?; let control_layout = Layout::array::(control_count).into_error()?; buckets_layout.extend(control_layout).into_error() } /// Serializes an iterator of items as a hash table. pub fn serialize_from_iter( items: I, hashes: H, load_factor: (usize, usize), serializer: &mut S, ) -> Result where I: Clone + ExactSizeIterator, I::Item: Borrow, U: Serialize, H: ExactSizeIterator, S: Fallible + Writer + Allocator + ?Sized, S::Error: Source, { #[derive(Debug)] struct InvalidLoadFactor { numerator: usize, denominator: usize, } impl fmt::Display for InvalidLoadFactor { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "invalid load factor {} / {}, load factor must be a \ fraction in the range (0, 1]", self.numerator, self.denominator ) } } impl Error for InvalidLoadFactor {} if load_factor.0 == 0 || load_factor.1 == 0 || load_factor.0 > load_factor.1 { fail!(InvalidLoadFactor { numerator: load_factor.0, denominator: load_factor.1, }); } let len = items.len(); if len == 0 { let count = items.count(); if count != 0 { fail!(IteratorLengthMismatch { expected: 0, actual: count, }); } return Ok(HashTableResolver { pos: 0 }); } let capacity = Self::capacity_from_len(len, load_factor); let probe_cap = Self::probe_cap(capacity); let control_count = Self::control_count(probe_cap); // Determine hash locations for all items SerVec::with_capacity( serializer, capacity, |ordered_items, serializer| { for _ in 0..capacity { unsafe { ordered_items.push_unchecked(None); } } SerVec::::with_capacity( serializer, control_count, |control_bytes, serializer| { // Initialize all control bytes to EMPTY (0xFF) unsafe { control_bytes .as_mut_ptr() .write_bytes(0xff, control_bytes.capacity()); control_bytes.set_len(control_bytes.capacity()); } let bucket_mask = Self::bucket_mask(control_count); for (item, hash) in items.zip(hashes) { let h2_hash = h2(hash); let mut probe_seq = Self::probe_seq(hash, capacity); 'insert: loop { for i in 0..MAX_GROUP_WIDTH / Group::WIDTH { let pos = probe_seq.pos + i * Group::WIDTH; let group = unsafe { Group::read( control_bytes.as_ptr().add(pos), ) }; if let Some(bit) = group.match_empty().lowest_set_bit() { let index = (pos + bit) % capacity; // Update control byte control_bytes[index] = h2_hash; // If it's near the beginning of the // control bytes, // update the wraparound control byte if index < (control_count - capacity) { control_bytes[capacity + index] = h2_hash; } ordered_items[index] = Some(item); break 'insert; } } loop { probe_seq.move_next(bucket_mask); if probe_seq.pos < probe_cap { break; } } } } let mut zeros = MaybeUninit::::uninit(); unsafe { zeros.as_mut_ptr().write_bytes(0, 1); } let zeros = unsafe { from_raw_parts( zeros.as_ptr().cast::(), size_of::(), ) }; SerVec::with_capacity( serializer, len, |resolvers, serializer| { for item in ordered_items .iter() .filter_map(|x| x.as_ref()) { resolvers.push( item.borrow().serialize(serializer)?, ); } serializer.align_for::()?; let mut resolvers = resolvers.drain().rev(); for item in ordered_items.iter().rev() { if let Some(item) = item { unsafe { serializer.resolve_aligned( item.borrow(), resolvers.next().unwrap(), )?; } } else { serializer.write(zeros)?; } } let pos = serializer.pos(); serializer.write(control_bytes)?; Ok(HashTableResolver { pos: pos as FixedUsize, }) }, )? }, )? }, )? } /// Resolves an archived hash table from a given length and parameters. pub fn resolve_from_len( len: usize, load_factor: (usize, usize), resolver: HashTableResolver, out: Place, ) { munge!(let Self { ptr, len: out_len, cap, _phantom: _ } = out); if len == 0 { RawRelPtr::emplace_invalid(ptr); } else { RawRelPtr::emplace(resolver.pos as usize, ptr); } len.resolve((), out_len); let capacity = Self::capacity_from_len(len, load_factor); capacity.resolve((), cap); // PhantomData doesn't need to be initialized } } /// The resolver for [`ArchivedHashTable`]. pub struct HashTableResolver { pos: FixedUsize, } struct ControlIter { current_mask: Bitmask, next_group: *const u8, } unsafe impl Send for ControlIter {} unsafe impl Sync for ControlIter {} impl ControlIter { fn none() -> Self { Self { current_mask: Bitmask::EMPTY, next_group: null(), } } #[inline] fn next_full(&mut self) -> Option { let bit = self.current_mask.lowest_set_bit()?; self.current_mask = self.current_mask.remove_lowest_bit(); Some(bit) } #[inline] fn move_next(&mut self) { self.current_mask = unsafe { Group::read(self.next_group).match_full() }; self.next_group = unsafe { self.next_group.add(Group::WIDTH) }; } } /// An iterator over the entry pointers of an [`ArchivedHashTable`]. pub struct RawIter { controls: ControlIter, entries: NonNull, items_left: usize, } impl RawIter { /// Returns a raw iterator which yields no elements. pub fn empty() -> Self { Self { controls: ControlIter::none(), entries: NonNull::dangling(), items_left: 0, } } } impl Iterator for RawIter { type Item = NonNull; fn next(&mut self) -> Option { if self.items_left == 0 { None } else { let bit = loop { if let Some(bit) = self.controls.next_full() { break bit; } self.controls.move_next(); self.entries = unsafe { NonNull::new_unchecked( self.entries.as_ptr().sub(Group::WIDTH), ) }; }; self.items_left -= 1; let entry = unsafe { NonNull::new_unchecked(self.entries.as_ptr().sub(bit + 1)) }; Some(entry) } } } impl ExactSizeIterator for RawIter { fn len(&self) -> usize { self.items_left } } #[cfg(feature = "bytecheck")] mod verify { use core::{error::Error, fmt}; use bytecheck::{CheckBytes, Verify}; use rancor::{fail, Fallible, Source}; use super::ArchivedHashTable; use crate::{ simd::Group, validation::{ArchiveContext, ArchiveContextExt as _}, }; #[derive(Debug)] struct InvalidLength { len: usize, cap: usize, } impl fmt::Display for InvalidLength { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "hash table length must be strictly less than its capacity \ (length: {}, capacity: {})", self.len, self.cap, ) } } impl Error for InvalidLength {} #[derive(Debug)] struct UnwrappedControlByte { index: usize, } impl fmt::Display for UnwrappedControlByte { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "unwrapped control byte at index {}", self.index,) } } impl Error for UnwrappedControlByte {} unsafe impl Verify for ArchivedHashTable where C: Fallible + ArchiveContext + ?Sized, C::Error: Source, T: CheckBytes, { fn verify(&self, context: &mut C) -> Result<(), C::Error> { let len = self.len(); let cap = self.capacity(); if len == 0 && cap == 0 { return Ok(()); } if len >= cap { fail!(InvalidLength { len, cap }); } // Check memory allocation let probe_cap = Self::probe_cap(cap); let control_count = Self::control_count(probe_cap); let (layout, control_offset) = Self::memory_layout(cap, control_count)?; let ptr = self .ptr .as_ptr_wrapping() .cast::() .wrapping_sub(control_offset); context.in_subtree_raw(ptr, layout, |context| { // Check each non-empty bucket let this = (self as *const Self).cast_mut(); // SAFETY: We have checked that `self` is not empty. let mut controls = unsafe { Self::control_iter(this) }; let mut base_index = 0; 'outer: while base_index < cap { while let Some(bit) = controls.next_full() { let index = base_index + bit; if index >= cap { break 'outer; } unsafe { T::check_bytes( Self::bucket_raw(this, index).as_ptr(), context, )?; } } controls.move_next(); base_index += Group::WIDTH; } // Verify that wrapped bytes are set correctly for i in cap..usize::min(2 * cap, control_count - cap) { let byte = unsafe { *Self::control_raw(this, i) }; let wrapped = unsafe { *Self::control_raw(this, i % cap) }; if wrapped != byte { fail!(UnwrappedControlByte { index: i }) } } Ok(()) }) } } } rkyv-0.8.9/src/collections/util.rs000064400000000000000000000053631046102023000152740ustar 00000000000000//! Utilities for archived collections. use core::{borrow::Borrow, error::Error, fmt, marker::PhantomData}; use munge::munge; use rancor::Fallible; use crate::{Archive, Place, Portable, Serialize}; /// An adapter which serializes and resolves its key and value references. pub struct EntryAdapter { /// The key to serialize and resolve. pub key: BK, /// The value to serialize and resolve. pub value: BV, _phantom: PhantomData<(K, V)>, } impl EntryAdapter { /// Returns a new `EntryAdapter` for the given key and value. pub fn new(key: BK, value: BV) -> Self { Self { key, value, _phantom: PhantomData, } } } /// A resolver for a key-value pair. pub struct EntryResolver { /// The key resolver. pub key: K, /// The value resolver. pub value: V, } impl Archive for EntryAdapter where BK: Borrow, BV: Borrow, K: Archive, V: Archive, { type Archived = Entry; type Resolver = EntryResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { munge!(let Entry { key, value } = out); K::resolve(self.key.borrow(), resolver.key, key); V::resolve(self.value.borrow(), resolver.value, value); } } impl Serialize for EntryAdapter where S: Fallible + ?Sized, BK: Borrow, BV: Borrow, K: Serialize, V: Serialize, { fn serialize( &self, serializer: &mut S, ) -> Result { Ok(EntryResolver { key: self.key.borrow().serialize(serializer)?, value: self.value.borrow().serialize(serializer)?, }) } } /// A key-value entry. #[derive(Debug, Portable, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct Entry { /// The entry's key. pub key: K, /// The entry's value. pub value: V, } /// An error describing that an iterator's length did not match the number of /// elements it yielded. #[derive(Debug)] pub struct IteratorLengthMismatch { /// The number of expected elements. pub expected: usize, /// The actual number of elements. pub actual: usize, } impl fmt::Display for IteratorLengthMismatch { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "iterator claimed that it contained {} elements, but yielded {} \ items during iteration", self.expected, self.actual, ) } } impl Error for IteratorLengthMismatch {} rkyv-0.8.9/src/de/mod.rs000064400000000000000000000001651046102023000131430ustar 00000000000000//! Deserialization traits, deserializers, and adapters. pub mod pooling; #[doc(inline)] pub use self::pooling::*; rkyv-0.8.9/src/de/pooling/alloc.rs000064400000000000000000000055631046102023000151340ustar 00000000000000use core::{error::Error, fmt, hash::BuildHasherDefault}; use hashbrown::hash_map::{Entry, HashMap}; use rancor::{fail, Source}; use crate::{ de::pooling::{ErasedPtr, Pooling, PoolingState}, hash::FxHasher64, }; #[derive(Debug)] struct SharedPointer { ptr: ErasedPtr, drop: unsafe fn(ErasedPtr), } impl Drop for SharedPointer { fn drop(&mut self) { unsafe { (self.drop)(self.ptr); } } } /// A shared pointer strategy that pools together deserializations of the same /// shared pointer. #[derive(Default)] pub struct Pool { shared_pointers: HashMap, BuildHasherDefault>, } impl Pool { /// Creates a new shared pointer unifier. #[inline] pub fn new() -> Self { Self::default() } /// Creates a new shared pointer unifier with initial capacity. #[inline] pub fn with_capacity(capacity: usize) -> Self { Self { shared_pointers: HashMap::with_capacity_and_hasher( capacity, Default::default(), ), } } } impl fmt::Debug for Pool { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.shared_pointers.iter()).finish() } } #[derive(Debug)] struct NotStarted; impl fmt::Display for NotStarted { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "shared pointer was not started pooling") } } impl Error for NotStarted {} #[derive(Debug)] struct AlreadyFinished; impl fmt::Display for AlreadyFinished { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "shared pointer was already finished pooling") } } impl Error for AlreadyFinished {} impl Pooling for Pool { fn start_pooling(&mut self, address: usize) -> PoolingState { match self.shared_pointers.entry(address) { Entry::Vacant(vacant) => { vacant.insert(None); PoolingState::Started } Entry::Occupied(occupied) => { if let Some(shared) = occupied.get() { PoolingState::Finished(shared.ptr) } else { PoolingState::Pending } } } } unsafe fn finish_pooling( &mut self, address: usize, ptr: ErasedPtr, drop: unsafe fn(ErasedPtr), ) -> Result<(), E> { match self.shared_pointers.entry(address) { Entry::Vacant(_) => fail!(NotStarted), Entry::Occupied(mut occupied) => { let inner = occupied.get_mut(); if inner.is_some() { fail!(AlreadyFinished) } else { *inner = Some(SharedPointer { ptr, drop }); Ok(()) } } } } } rkyv-0.8.9/src/de/pooling/core.rs000064400000000000000000000007651046102023000147710ustar 00000000000000use crate::de::pooling::{ErasedPtr, Pooling, PoolingState}; /// A shared pointer strategy that duplicates deserializations of the same /// shared pointer. #[derive(Debug, Default)] pub struct Unpool; impl Pooling for Unpool { fn start_pooling(&mut self, _: usize) -> PoolingState { PoolingState::Started } unsafe fn finish_pooling( &mut self, _: usize, _: ErasedPtr, _: unsafe fn(ErasedPtr), ) -> Result<(), E> { Ok(()) } } rkyv-0.8.9/src/de/pooling/mod.rs000064400000000000000000000172451046102023000146210ustar 00000000000000//! Deserializers that can be used standalone and provide basic capabilities. #[cfg(feature = "alloc")] mod alloc; mod core; use ::core::{ alloc::LayoutError, error::Error, fmt, mem::transmute, ptr::NonNull, }; use ptr_meta::{from_raw_parts_mut, metadata, DynMetadata, Pointee}; use rancor::{fail, Fallible, ResultExt as _, Source, Strategy}; #[cfg(feature = "alloc")] pub use self::alloc::*; pub use self::core::*; use crate::{traits::LayoutRaw, ArchiveUnsized, DeserializeUnsized}; /// Type-erased pointer metadata. #[derive(Clone, Copy)] pub union Metadata { unit: (), usize: usize, vtable: DynMetadata<()>, } impl fmt::Debug for Metadata { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "") } } impl From<()> for Metadata { fn from(value: ()) -> Self { Self { unit: value } } } impl From for Metadata { fn from(value: usize) -> Self { Self { usize: value } } } impl From> for Metadata { fn from(value: DynMetadata) -> Self { Self { vtable: unsafe { transmute::, DynMetadata<()>>(value) }, } } } // These impls are sound because `Metadata` has the type-level invariant that // `From` will only be called on `Metadata` created from pointers with the // corresponding metadata. impl From for () { fn from(value: Metadata) -> Self { unsafe { value.unit } } } impl From for usize { fn from(value: Metadata) -> Self { unsafe { value.usize } } } impl From for DynMetadata { fn from(value: Metadata) -> Self { unsafe { transmute::, DynMetadata>(value.vtable) } } } /// A type-erased pointer. #[derive(Clone, Copy, Debug)] pub struct ErasedPtr { data_address: NonNull<()>, metadata: Metadata, } impl ErasedPtr { /// Returns an erased pointer corresponding to the given pointer. #[inline] pub fn new(ptr: NonNull) -> Self where T: Pointee + ?Sized, T::Metadata: Into, { Self { data_address: ptr.cast(), metadata: metadata(ptr.as_ptr()).into(), } } /// Returns the data address corresponding to this erased pointer. #[inline] pub fn data_address(&self) -> *mut () { self.data_address.as_ptr() } /// # Safety /// /// `self` must be created from a valid pointer to `T`. #[inline] unsafe fn downcast_unchecked(&self) -> *mut T where T: Pointee + ?Sized, Metadata: Into, { from_raw_parts_mut(self.data_address.as_ptr(), self.metadata.into()) } } /// A deserializable shared pointer type. /// /// # Safety /// /// `alloc` and `from_value` must return pointers which are non-null, writeable, /// and properly aligned for `T`. pub unsafe trait SharedPointer { /// Allocates space for a value with the given metadata. fn alloc(metadata: T::Metadata) -> Result<*mut T, LayoutError>; /// Creates a new `Self` from a pointer to a valid `T`. /// /// # Safety /// /// `ptr` must have been allocated via `alloc`. `from_value` must not have /// been called on `ptr` yet. unsafe fn from_value(ptr: *mut T) -> *mut T; /// Drops a pointer created by `from_value`. /// /// # Safety /// /// - `ptr` must have been created using `from_value`. /// - `drop` must only be called once per `ptr`. unsafe fn drop(ptr: *mut T); } /// The result of starting to deserialize a shared pointer. pub enum PoolingState { /// The caller started pooling this value. They should proceed to /// deserialize the shared value and call `finish_pooling`. Started, /// Another caller started pooling this value, but has not finished yet. /// This can only occur with cyclic shared pointer structures, and so rkyv /// treats this as an error by default. Pending, /// This value has already been pooled. The caller should use the returned /// pointer to pool its value. Finished(ErasedPtr), } /// A shared pointer deserialization strategy. /// /// This trait is required to deserialize `Rc` and `Arc`. pub trait Pooling::Error> { /// Starts pooling the value associated with the given address. fn start_pooling(&mut self, address: usize) -> PoolingState; /// Finishes pooling the value associated with the given address. /// /// Returns an error if the given address was not pending. /// /// # Safety /// /// The given `drop` function must be valid to call with `ptr`. unsafe fn finish_pooling( &mut self, address: usize, ptr: ErasedPtr, drop: unsafe fn(ErasedPtr), ) -> Result<(), E>; } impl Pooling for Strategy where T: Pooling, { fn start_pooling(&mut self, address: usize) -> PoolingState { T::start_pooling(self, address) } unsafe fn finish_pooling( &mut self, address: usize, ptr: ErasedPtr, drop: unsafe fn(ErasedPtr), ) -> Result<(), E> { // SAFETY: The safety requirements for `finish_pooling` are the same as // the requirements for calling this function. unsafe { T::finish_pooling(self, address, ptr, drop) } } } #[derive(Debug)] struct CyclicSharedPointerError; impl fmt::Display for CyclicSharedPointerError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "encountered cyclic shared pointers while deserializing\nhelp: \ change your deserialization strategy to `Unpool` or use the \ `Unpool` wrapper type to break the cycle", ) } } impl Error for CyclicSharedPointerError {} /// Helper methods for [`Pooling`]. pub trait PoolingExt: Pooling { /// Checks whether the given reference has been deserialized and either uses /// the existing shared pointer to it, or deserializes it and converts /// it to a shared pointer with `to_shared`. fn deserialize_shared( &mut self, value: &T::Archived, ) -> Result<*mut T, Self::Error> where T: ArchiveUnsized + Pointee + LayoutRaw + ?Sized, T::Metadata: Into, Metadata: Into, T::Archived: DeserializeUnsized, P: SharedPointer, Self: Fallible, E: Source, { unsafe fn drop_shared(ptr: ErasedPtr) where T: Pointee + ?Sized, Metadata: Into, P: SharedPointer, { unsafe { P::drop(ptr.downcast_unchecked::()) } } let address = value as *const T::Archived as *const () as usize; let metadata = T::Archived::deserialize_metadata(value); match self.start_pooling(address) { PoolingState::Started => { let out = P::alloc(metadata).into_error()?; unsafe { value.deserialize_unsized(self, out)? }; let ptr = unsafe { NonNull::new_unchecked(P::from_value(out)) }; unsafe { self.finish_pooling( address, ErasedPtr::new(ptr), drop_shared::, )?; } Ok(ptr.as_ptr()) } PoolingState::Pending => fail!(CyclicSharedPointerError), PoolingState::Finished(ptr) => { Ok(from_raw_parts_mut(ptr.data_address.as_ptr(), metadata)) } } } } impl PoolingExt for T where T: Pooling + ?Sized {} rkyv-0.8.9/src/ffi.rs000064400000000000000000000107761046102023000125510ustar 00000000000000//! Archived versions of FFI types. use core::{ borrow::Borrow, cmp, ffi::CStr, fmt, hash, ops::{Deref, Index, RangeFull}, }; use munge::munge; use rancor::Fallible; use crate::{ primitive::FixedUsize, ser::Writer, ArchiveUnsized, Place, Portable, RelPtr, SerializeUnsized, }; /// An archived [`CString`](crate::alloc::ffi::CString). /// /// Uses a [`RelPtr`] to a `CStr` under the hood. #[derive(Portable)] #[rkyv(crate)] #[cfg_attr( feature = "bytecheck", derive(bytecheck::CheckBytes), bytecheck(verify) )] #[repr(transparent)] pub struct ArchivedCString { ptr: RelPtr, } impl ArchivedCString { /// Returns the contents of this CString as a slice of bytes. /// /// The returned slice does **not** contain the trailing nul terminator, and /// it is guaranteed to not have any interior nul bytes. If you need the /// nul terminator, use /// [`as_bytes_with_nul`][ArchivedCString::as_bytes_with_nul()] instead. #[inline] pub fn as_bytes(&self) -> &[u8] { self.as_c_str().to_bytes() } /// Equivalent to [`as_bytes`][ArchivedCString::as_bytes()] except that the /// returned slice includes the trailing nul terminator. #[inline] pub fn as_bytes_with_nul(&self) -> &[u8] { self.as_c_str().to_bytes_with_nul() } /// Extracts a `CStr` slice containing the entire string. #[inline] pub fn as_c_str(&self) -> &CStr { unsafe { &*self.ptr.as_ptr() } } /// Resolves an archived C string from the given C string and parameters. #[inline] pub fn resolve_from_c_str( c_str: &CStr, resolver: CStringResolver, out: Place, ) { munge!(let ArchivedCString { ptr } = out); RelPtr::emplace_unsized( resolver.pos as usize, c_str.archived_metadata(), ptr, ); } /// Serializes a C string. pub fn serialize_from_c_str( c_str: &CStr, serializer: &mut S, ) -> Result { Ok(CStringResolver { pos: c_str.serialize_unsized(serializer)? as FixedUsize, }) } } impl AsRef for ArchivedCString { fn as_ref(&self) -> &CStr { self.as_c_str() } } impl Borrow for ArchivedCString { #[inline] fn borrow(&self) -> &CStr { self.as_c_str() } } impl fmt::Debug for ArchivedCString { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.as_c_str().fmt(f) } } impl Deref for ArchivedCString { type Target = CStr; #[inline] fn deref(&self) -> &Self::Target { self.as_c_str() } } impl Eq for ArchivedCString {} impl hash::Hash for ArchivedCString { fn hash(&self, state: &mut H) { self.as_bytes_with_nul().hash(state); } } impl Index for ArchivedCString { type Output = CStr; #[inline] fn index(&self, _: RangeFull) -> &Self::Output { self.as_c_str() } } impl Ord for ArchivedCString { #[inline] fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_bytes().cmp(other.as_bytes()) } } impl PartialEq for ArchivedCString { #[inline] fn eq(&self, other: &Self) -> bool { self.as_bytes() == other.as_bytes() } } impl PartialEq<&CStr> for ArchivedCString { #[inline] fn eq(&self, other: &&CStr) -> bool { PartialEq::eq(self.as_c_str(), other) } } impl PartialEq for &CStr { #[inline] fn eq(&self, other: &ArchivedCString) -> bool { PartialEq::eq(other.as_c_str(), self) } } impl PartialOrd for ArchivedCString { #[inline] fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } /// The resolver for `CString`. pub struct CStringResolver { pos: FixedUsize, } #[cfg(feature = "bytecheck")] mod verify { use core::ffi::CStr; use bytecheck::{ rancor::{Fallible, Source}, CheckBytes, Verify, }; use crate::{ ffi::ArchivedCString, validation::{ArchiveContext, ArchiveContextExt}, }; unsafe impl Verify for ArchivedCString where C: Fallible + ArchiveContext + ?Sized, C::Error: Source, { fn verify(&self, context: &mut C) -> Result<(), C::Error> { let ptr = self.ptr.as_ptr_wrapping(); context.in_subtree(ptr, |context| unsafe { CStr::check_bytes(ptr, context) }) } } } rkyv-0.8.9/src/fmt.rs000064400000000000000000000004211046102023000125550ustar 00000000000000use core::fmt; const PTR_WIDTH: usize = (usize::BITS / 4 + 2) as usize; pub struct Pointer(pub usize); impl fmt::Display for Pointer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:#0w$x}", self.0, w = PTR_WIDTH) } } rkyv-0.8.9/src/hash.rs000064400000000000000000000060651046102023000127240ustar 00000000000000//! Hashing support for archived hash maps and sets. use core::{ hash::{Hash, Hasher}, ops::BitXor as _, }; use crate::primitive::{FixedIsize, FixedUsize}; /// A cross-platform 64-bit implementation of fxhash. #[derive(Default)] pub struct FxHasher64 { hash: u64, } #[inline] fn hash_word(hash: u64, word: u64) -> u64 { const ROTATE: u32 = 5; const SEED: u64 = 0x51_7c_c1_b7_27_22_0a_95; hash.rotate_left(ROTATE).bitxor(word).wrapping_mul(SEED) } #[inline] fn hash_bytes(mut hash: u64, bytes: &[u8]) -> u64 { let ptr = bytes.as_ptr(); let len = bytes.len(); for i in 0..len / 8 { let bytes = unsafe { ptr.cast::<[u8; 8]>().add(i).read_unaligned() }; hash = hash_word(hash, u64::from_le_bytes(bytes)); } if bytes.len() & 4 != 0 { let bytes = unsafe { ptr.add(bytes.len() & !7).cast::<[u8; 4]>().read_unaligned() }; hash = hash_word(hash, u32::from_le_bytes(bytes).into()); } if bytes.len() & 2 != 0 { let bytes = unsafe { ptr.add(bytes.len() & !3).cast::<[u8; 2]>().read_unaligned() }; hash = hash_word(hash, u16::from_le_bytes(bytes).into()); } if bytes.len() & 1 != 0 { let byte = unsafe { ptr.add(len - 1).read() }; hash = hash_word(hash, byte.into()); } hash } impl Hasher for FxHasher64 { #[inline] fn write(&mut self, bytes: &[u8]) { self.hash = hash_bytes(self.hash, bytes); } #[inline] fn finish(&self) -> u64 { self.hash } #[inline] fn write_u8(&mut self, i: u8) { self.hash = hash_word(self.hash, i as u64); } #[inline] fn write_u16(&mut self, i: u16) { self.hash = hash_word(self.hash, i as u64); } #[inline] fn write_u32(&mut self, i: u32) { self.hash = hash_word(self.hash, i as u64); } #[inline] fn write_u64(&mut self, i: u64) { self.hash = hash_word(self.hash, i); } #[inline] fn write_u128(&mut self, i: u128) { let bytes = i.to_ne_bytes(); let ptr = bytes.as_ptr().cast::<[u8; 8]>(); #[cfg(target_endian = "little")] let (first, second) = (unsafe { ptr.read_unaligned() }, unsafe { ptr.add(1).read_unaligned() }); #[cfg(target_endian = "big")] let (first, second) = (unsafe { ptr.add(1).read_unaligned() }, unsafe { ptr.read_unaligned() }); self.hash = hash_word( hash_word(self.hash, u64::from_ne_bytes(first)), u64::from_ne_bytes(second), ); } #[inline] fn write_usize(&mut self, i: usize) { self.hash = hash_word(self.hash, i as FixedUsize as u64); } #[inline] fn write_isize(&mut self, i: isize) { self.write_i64(i as FixedIsize as i64) } } /// Hashes the given value with the default value of the specified `Hasher`. pub fn hash_value(value: &Q) -> u64 where Q: Hash + ?Sized, { let mut state = H::default(); value.hash(&mut state); state.finish() } rkyv-0.8.9/src/impls/alloc/boxed.rs000064400000000000000000000113551046102023000153160ustar 00000000000000use core::cmp; use rancor::{Fallible, ResultExt as _, Source}; use crate::{ alloc::{alloc::alloc, boxed::Box}, boxed::{ArchivedBox, BoxResolver}, niche::option_box::ArchivedOptionBox, traits::{ArchivePointee, LayoutRaw}, Archive, ArchiveUnsized, Deserialize, DeserializeUnsized, Place, Serialize, SerializeUnsized, }; impl Archive for Box { type Archived = ArchivedBox; type Resolver = BoxResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedBox::resolve_from_ref(self.as_ref(), resolver, out); } } impl Serialize for Box where T: SerializeUnsized + ?Sized, S: Fallible + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedBox::serialize_from_ref(self.as_ref(), serializer) } } impl Deserialize, D> for ArchivedBox where T: ArchiveUnsized + LayoutRaw + ?Sized, T::Archived: DeserializeUnsized, D: Fallible + ?Sized, D::Error: Source, { fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { let metadata = self.get().deserialize_metadata(); let layout = T::layout_raw(metadata).into_error()?; let data_address = if layout.size() > 0 { unsafe { alloc(layout) } } else { crate::polyfill::dangling(&layout).as_ptr() }; let out = ptr_meta::from_raw_parts_mut(data_address.cast(), metadata); unsafe { self.get().deserialize_unsized(deserializer, out)?; } unsafe { Ok(Box::from_raw(out)) } } } impl PartialEq> for ArchivedBox where T: ArchivePointee + PartialEq + ?Sized, U: ?Sized, { fn eq(&self, other: &Box) -> bool { self.get().eq(other.as_ref()) } } impl PartialOrd> for ArchivedBox where T: ArchivePointee + PartialOrd + ?Sized, U: ?Sized, { fn partial_cmp(&self, other: &Box) -> Option { self.get().partial_cmp(other.as_ref()) } } impl PartialEq>> for ArchivedOptionBox where T: ?Sized, U: ArchivePointee + PartialEq + ?Sized, { fn eq(&self, other: &Option>) -> bool { match (self.as_deref(), other.as_deref()) { (Some(self_value), Some(other_value)) => self_value.eq(other_value), (None, None) => true, _ => false, } } } #[cfg(test)] mod tests { use crate::{ alloc::{boxed::Box, string::ToString, vec, vec::Vec}, api::test::roundtrip, }; #[test] fn roundtrip_box() { roundtrip(&Box::new(42)); roundtrip(&Box::new([1, 2, 3, 4, 5, 6])); } #[test] fn roundtrip_boxed_str() { roundtrip(&"".to_string().into_boxed_str()); roundtrip(&"hello world".to_string().into_boxed_str()); } #[test] fn roundtrip_boxed_slice() { roundtrip(&Vec::::new().into_boxed_slice()); roundtrip(&vec![1, 2, 3, 4].into_boxed_slice()); } #[test] fn roundtrip_box_zsts() { roundtrip(&Box::new(())); roundtrip(&Vec::<()>::new().into_boxed_slice()); roundtrip(&vec![(), (), (), ()].into_boxed_slice()); } #[test] fn roundtrip_option_box() { roundtrip(&Some(Box::new(42))); roundtrip(&Some(Box::new([1, 2, 3, 4, 5, 6]))); } #[test] fn roundtrip_option_box_str() { roundtrip(&Some("".to_string().into_boxed_str())); roundtrip(&Some("hello world".to_string().into_boxed_str())); } #[test] fn roundtrip_option_box_slice() { roundtrip(&Some(Vec::::new().into_boxed_slice())); roundtrip(&Some(vec![1, 2, 3, 4].into_boxed_slice())); } #[test] fn roundtrip_result_box() { roundtrip(&Ok::<_, ()>(Box::new(42))); roundtrip(&Ok::<_, ()>(Box::new([1, 2, 3, 4, 5, 6]))); roundtrip(&Err::<(), _>(Box::new(42))); roundtrip(&Err::<(), _>(Box::new([1, 2, 3, 4, 5, 6]))); } #[test] fn roundtrip_result_box_str() { roundtrip(&Ok::<_, ()>("".to_string().into_boxed_str())); roundtrip(&Ok::<_, ()>("hello world".to_string().into_boxed_str())); roundtrip(&Err::<(), _>("".to_string().into_boxed_str())); roundtrip(&Err::<(), _>("hello world".to_string().into_boxed_str())); } #[test] fn roundtrip_result_box_slice() { roundtrip(&Ok::<_, ()>(Vec::::new().into_boxed_slice())); roundtrip(&Ok::<_, ()>(vec![1, 2, 3, 4].into_boxed_slice())); roundtrip(&Err::<(), _>(Vec::::new().into_boxed_slice())); roundtrip(&Err::<(), _>(vec![1, 2, 3, 4].into_boxed_slice())); } } rkyv-0.8.9/src/impls/alloc/collections/btree_map.rs000064400000000000000000000202141046102023000204630ustar 00000000000000use core::ops::ControlFlow; use rancor::{Fallible, Source}; use crate::{ alloc::collections::BTreeMap, collections::btree_map::{ArchivedBTreeMap, BTreeMapResolver}, ser::{Allocator, Writer}, Archive, Deserialize, Place, Serialize, }; impl Archive for BTreeMap where K::Archived: Ord, { type Archived = ArchivedBTreeMap; type Resolver = BTreeMapResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { Self::Archived::resolve_from_len(self.len(), resolver, out); } } impl Serialize for BTreeMap where K: Serialize + Ord, K::Archived: Ord, V: Serialize, S: Allocator + Fallible + Writer + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { Self::Archived::serialize_from_ordered_iter::<_, _, _, K, V, _>( self.iter(), serializer, ) } } impl Deserialize, D> for ArchivedBTreeMap where K: Archive + Ord, K::Archived: Deserialize + Ord, V: Archive, V::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = BTreeMap::new(); let r = self.visit(|ak, av| { let k = match ak.deserialize(deserializer) { Ok(k) => k, Err(e) => return ControlFlow::Break(e), }; let v = match av.deserialize(deserializer) { Ok(v) => v, Err(e) => return ControlFlow::Break(e), }; result.insert(k, v); ControlFlow::Continue(()) }); match r { Some(e) => Err(e), None => Ok(result), } } } impl PartialEq> for ArchivedBTreeMap where AK: PartialEq, AV: PartialEq, { fn eq(&self, other: &BTreeMap) -> bool { if self.len() != other.len() { false } else { let mut iter = other.iter(); self.visit(|ak, av| { if let Some((k, v)) = iter.next() { if ak.eq(k) && av.eq(v) { return ControlFlow::Continue(()); } } ControlFlow::Break(()) }) .is_none() } } } #[cfg(test)] mod tests { use core::ops::ControlFlow; use crate::{ alloc::{ collections::BTreeMap, string::{String, ToString}, vec, vec::Vec, }, api::test::{roundtrip, to_archived}, collections::btree_map::ArchivedBTreeMap, primitive::ArchivedI32, seal::Seal, Archive, Deserialize, Serialize, }; #[test] fn roundtrip_btree_map() { let mut value = BTreeMap::new(); value.insert("foo".to_string(), 10); value.insert("bar".to_string(), 20); value.insert("baz".to_string(), 40); value.insert("bat".to_string(), 80); roundtrip(&value); } #[test] fn roundtrip_empty_btree_map() { roundtrip(&BTreeMap::::new()); } #[test] fn roundtrip_btree_map_zst() { let mut value = BTreeMap::new(); value.insert(0, ()); value.insert(1, ()); roundtrip(&value); let mut value = BTreeMap::new(); value.insert((), 10); roundtrip(&value); let mut value = BTreeMap::new(); value.insert((), ()); roundtrip(&value); } #[test] fn roundtrip_btree_map_increasing_sizes() { // These sizes are chosen based on a branching factor of 6. // 0-5: Leaf root node, variably-filled // 6: Inner root node and one leaf node with one element // 17: Inner root node, two filled leaf nodes, and one with two elements // 35: Two full levels // 36: Two full levels and one additional element // 112: Two full levels and a third level half-filled // 215: Three full levels const SIZES: &[usize] = &[0, 1, 2, 3, 4, 5, 6, 17, 35, 36, 112, 215]; for &size in SIZES { let mut value = BTreeMap::new(); for i in 0..size { value.insert(i.to_string(), i as i32); } roundtrip(&value); } } // This test creates structures too big to fit in 16-bit offsets, and MIRI // can't run it quickly enough. #[cfg(not(any(feature = "pointer_width_16", miri)))] #[test] fn roundtrip_large_btree_map() { const ENTRIES: usize = 100_000; let mut value = BTreeMap::new(); for i in 0..ENTRIES { value.insert(i.to_string(), i as i32); } roundtrip(&value); } #[test] fn roundtrip_btree_map_with_struct_member() { #[derive( Archive, Serialize, Deserialize, Debug, Default, PartialEq, )] #[rkyv(crate, compare(PartialEq), derive(Debug))] pub struct MyType { pub some_list: BTreeMap>, pub values: Vec, } let mut value = MyType::default(); value .some_list .entry("Asdf".to_string()) .and_modify(|e| e.push(1.0)) .or_insert_with(|| vec![2.0]); roundtrip(&value); } #[test] fn mutable_btree_map() { let mut value = BTreeMap::new(); value.insert("foo".to_string(), 10); value.insert("bar".to_string(), 20); value.insert("baz".to_string(), 40); value.insert("bat".to_string(), 80); to_archived(&value, |mut archived| { ArchivedBTreeMap::visit_seal( archived.as_mut(), |_, mut v: Seal<'_, ArchivedI32>| { *v = ArchivedI32::from_native(v.to_native() + 10); ControlFlow::<(), ()>::Continue(()) }, ); assert_eq!(archived.get("foo").map(|x| x.to_native()), Some(20)); assert_eq!(archived.get("bar").map(|x| x.to_native()), Some(30)); assert_eq!(archived.get("baz").map(|x| x.to_native()), Some(50)); assert_eq!(archived.get("bat").map(|x| x.to_native()), Some(90)); *ArchivedBTreeMap::get_seal(archived.as_mut(), "foo").unwrap() = ArchivedI32::from_native(123); *ArchivedBTreeMap::get_seal(archived.as_mut(), "bat").unwrap() = ArchivedI32::from_native(456); assert_eq!(archived.get("foo").map(|x| x.to_native()), Some(123)); assert_eq!(archived.get("bat").map(|x| x.to_native()), Some(456)); }); } #[test] fn btree_map_iter() { let mut value = BTreeMap::::new(); value.insert("foo".to_string(), 10); value.insert("bar".to_string(), 20); value.insert("baz".to_string(), 40); value.insert("bat".to_string(), 80); to_archived(&value, |archived| { let mut i = archived.iter().map(|(k, v)| (k.as_str(), v.to_native())); assert_eq!(i.next(), Some(("bar", 20))); assert_eq!(i.next(), Some(("bat", 80))); assert_eq!(i.next(), Some(("baz", 40))); assert_eq!(i.next(), Some(("foo", 10))); assert_eq!(i.next(), None); }); } #[test] fn btree_map_mutable_iter() { let mut value = BTreeMap::::new(); value.insert("foo".to_string(), 10); value.insert("bar".to_string(), 20); value.insert("baz".to_string(), 40); value.insert("bat".to_string(), 80); to_archived(&value, |archived| { let mut i = archived.iter().map(|(k, v)| (k.as_str(), v.to_native())); assert_eq!(i.next(), Some(("bar", 20))); assert_eq!(i.next(), Some(("bat", 80))); assert_eq!(i.next(), Some(("baz", 40))); assert_eq!(i.next(), Some(("foo", 10))); assert_eq!(i.next(), None); }); } } rkyv-0.8.9/src/impls/alloc/collections/btree_set.rs000064400000000000000000000054371046102023000205130ustar 00000000000000use core::ops::ControlFlow; use rancor::{Fallible, Source}; use crate::{ alloc::collections::BTreeSet, collections::btree_set::{ArchivedBTreeSet, BTreeSetResolver}, ser::{Allocator, Writer}, Archive, Deserialize, Place, Serialize, }; impl Archive for BTreeSet where K::Archived: Ord, { type Archived = ArchivedBTreeSet; type Resolver = BTreeSetResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedBTreeSet::::resolve_from_len( self.len(), resolver, out, ); } } impl Serialize for BTreeSet where K: Serialize + Ord, K::Archived: Ord, S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { Self::Archived::serialize_from_ordered_iter::<_, K, _>( self.iter(), serializer, ) } } impl Deserialize, D> for ArchivedBTreeSet where K: Archive + Ord, K::Archived: Deserialize + Ord, D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = BTreeSet::new(); let r = self.visit(|ak| { let k = match ak.deserialize(deserializer) { Ok(k) => k, Err(e) => return ControlFlow::Break(e), }; result.insert(k); ControlFlow::Continue(()) }); match r { Some(e) => Err(e), None => Ok(result), } } } impl> PartialEq> for ArchivedBTreeSet { fn eq(&self, other: &BTreeSet) -> bool { if self.len() != other.len() { false } else { let mut iter = other.iter(); self.visit(|ak| { if let Some(k) = iter.next() { if ak.eq(k) { return ControlFlow::Continue(()); } } ControlFlow::Break(()) }) .is_none() } } } #[cfg(test)] mod tests { use crate::{ alloc::{collections::BTreeSet, string::ToString}, api::test::roundtrip, }; #[test] fn roundtrip_btree_set() { let mut value = BTreeSet::new(); value.insert("foo".to_string()); value.insert("bar".to_string()); value.insert("baz".to_string()); value.insert("bat".to_string()); roundtrip(&value); } #[test] fn roundtrip_btree_set_zst() { let mut value = BTreeSet::new(); value.insert(()); roundtrip(&value); } } rkyv-0.8.9/src/impls/alloc/collections/mod.rs000064400000000000000000000000551046102023000173050ustar 00000000000000mod btree_map; mod btree_set; mod vec_deque; rkyv-0.8.9/src/impls/alloc/collections/vec_deque.rs000064400000000000000000000071001046102023000204640ustar 00000000000000use core::cmp::Ordering; use rancor::{Fallible, ResultExt, Source}; use crate::{ alloc::{alloc::alloc, boxed::Box, collections::VecDeque, vec::Vec}, ser::{Allocator, Writer}, traits::LayoutRaw, vec::{ArchivedVec, VecResolver}, Archive, Deserialize, DeserializeUnsized, Place, Serialize, }; impl Archive for VecDeque { type Archived = ArchivedVec; type Resolver = VecResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedVec::resolve_from_len(self.len(), resolver, out); } } impl Serialize for VecDeque where T: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { let (a, b) = self.as_slices(); if b.is_empty() { ArchivedVec::::serialize_from_slice(a, serializer) } else if a.is_empty() { ArchivedVec::::serialize_from_slice(b, serializer) } else { ArchivedVec::::serialize_from_iter::( self.iter(), serializer, ) } } } impl Deserialize, D> for ArchivedVec where T: Archive, [T::Archived]: DeserializeUnsized<[T], D>, D: Fallible + ?Sized, D::Error: Source, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let metadata = self.as_slice().deserialize_metadata(); let layout = <[T] as LayoutRaw>::layout_raw(metadata).into_error()?; let data_address = if layout.size() > 0 { unsafe { alloc(layout) } } else { crate::polyfill::dangling(&layout).as_ptr() }; let out = ptr_meta::from_raw_parts_mut(data_address.cast(), metadata); unsafe { self.as_slice().deserialize_unsized(deserializer, out)?; } let boxed = unsafe { Box::<[T]>::from_raw(out) }; Ok(VecDeque::from(Vec::from(boxed))) } } impl, U> PartialEq> for ArchivedVec { fn eq(&self, other: &VecDeque) -> bool { self.len() == other.len() && self.iter().eq(other.iter()) } } impl PartialOrd> for ArchivedVec { fn partial_cmp(&self, other: &VecDeque) -> Option { self.iter().partial_cmp(other.iter()) } } #[cfg(test)] mod tests { use crate::{ access_unchecked, alloc::collections::VecDeque, api::test::deserialize, rancor::Error, to_bytes, vec::ArchivedVec, Archived, }; #[test] fn vecdeque() { for n in 2..10 { for k in 1..n { let mut deque = VecDeque::with_capacity(n as usize + 1); for x in k..n { deque.push_back(x); } for x in (0..k).rev() { deque.push_front(x); } assert!(deque.iter().copied().eq(0..n)); // Now serialize and deserialize and verify that the // deserialized version contains `0..n`. let bytes = to_bytes::(&deque).unwrap(); let archived = unsafe { access_unchecked::>>(&bytes) }; assert!(archived.iter().copied().eq(0..n)); let deserialized = deserialize::>(archived); assert_eq!(deque, deserialized); } } } } rkyv-0.8.9/src/impls/alloc/ffi.rs000064400000000000000000000044541046102023000147630ustar 00000000000000use core::ffi::CStr; use rancor::{Fallible, ResultExt, Source}; use crate::{ alloc::{alloc::alloc, boxed::Box, ffi::CString}, ffi::{ArchivedCString, CStringResolver}, ser::Writer, traits::LayoutRaw, Archive, Deserialize, DeserializeUnsized, Place, Serialize, }; // CString impl Archive for CString { type Archived = ArchivedCString; type Resolver = CStringResolver; #[inline] fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedCString::resolve_from_c_str(self.as_c_str(), resolver, out); } } impl Serialize for CString { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedCString::serialize_from_c_str(self.as_c_str(), serializer) } } impl Deserialize for ArchivedCString where D: Fallible + ?Sized, D::Error: Source, CStr: DeserializeUnsized, { fn deserialize(&self, deserializer: &mut D) -> Result { let metadata = self.as_c_str().deserialize_metadata(); let layout = ::layout_raw(metadata).into_error()?; let data_address = if layout.size() > 0 { unsafe { alloc(layout) } } else { crate::polyfill::dangling(&layout).as_ptr() }; let out = ptr_meta::from_raw_parts_mut(data_address.cast(), metadata); unsafe { self.as_c_str().deserialize_unsized(deserializer, out)?; } let boxed = unsafe { Box::::from_raw(out) }; Ok(CString::from(boxed)) } } impl PartialEq for ArchivedCString { #[inline] fn eq(&self, other: &CString) -> bool { PartialEq::eq(self.as_c_str(), other.as_c_str()) } } impl PartialEq for CString { #[inline] fn eq(&self, other: &ArchivedCString) -> bool { PartialEq::eq(other.as_c_str(), self.as_c_str()) } } #[cfg(test)] mod tests { use crate::{ alloc::{ffi::CString, string::String}, api::test::roundtrip, }; #[test] fn roundtrip_c_string() { let value = unsafe { CString::from_vec_unchecked( String::from("hello world").into_bytes(), ) }; roundtrip(&value); } } rkyv-0.8.9/src/impls/alloc/mod.rs000064400000000000000000000001141046102023000147630ustar 00000000000000mod boxed; mod collections; mod ffi; mod rc; mod string; mod vec; mod with; rkyv-0.8.9/src/impls/alloc/rc/atomic.rs000064400000000000000000000107421046102023000160740ustar 00000000000000use core::alloc::LayoutError; use ptr_meta::{from_raw_parts_mut, Pointee}; use rancor::{Fallible, Source}; use crate::{ alloc::{alloc::alloc, boxed::Box, sync}, de::{Metadata, Pooling, PoolingExt as _, SharedPointer}, rc::{ArcFlavor, ArchivedRc, ArchivedRcWeak, RcResolver, RcWeakResolver}, ser::{Sharing, Writer}, traits::{ArchivePointee, LayoutRaw}, Archive, ArchiveUnsized, Deserialize, DeserializeUnsized, Place, Serialize, SerializeUnsized, }; // Arc impl Archive for sync::Arc { type Archived = ArchivedRc; type Resolver = RcResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedRc::resolve_from_ref(self.as_ref(), resolver, out); } } impl Serialize for sync::Arc where T: SerializeUnsized + ?Sized + 'static, S: Fallible + Writer + Sharing + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedRc::::serialize_from_ref( self.as_ref(), serializer, ) } } unsafe impl SharedPointer for sync::Arc { fn alloc(metadata: T::Metadata) -> Result<*mut T, LayoutError> { let layout = T::layout_raw(metadata)?; let data_address = if layout.size() > 0 { unsafe { alloc(layout) } } else { crate::polyfill::dangling(&layout).as_ptr() }; let ptr = from_raw_parts_mut(data_address.cast(), metadata); Ok(ptr) } unsafe fn from_value(ptr: *mut T) -> *mut T { let arc = sync::Arc::::from(unsafe { Box::from_raw(ptr) }); sync::Arc::into_raw(arc).cast_mut() } unsafe fn drop(ptr: *mut T) { drop(unsafe { sync::Arc::from_raw(ptr) }); } } impl Deserialize, D> for ArchivedRc where T: ArchiveUnsized + LayoutRaw + Pointee + ?Sized + 'static, T::Archived: DeserializeUnsized, T::Metadata: Into, Metadata: Into, D: Fallible + Pooling + ?Sized, D::Error: Source, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let raw_shared_ptr = deserializer.deserialize_shared::<_, sync::Arc>(self.get())?; unsafe { sync::Arc::::increment_strong_count(raw_shared_ptr); } unsafe { Ok(sync::Arc::::from_raw(raw_shared_ptr)) } } } impl PartialEq> for ArchivedRc where T: ArchivePointee + PartialEq + ?Sized, U: ?Sized, { fn eq(&self, other: &sync::Arc) -> bool { self.get().eq(other.as_ref()) } } // sync::Weak impl Archive for sync::Weak { type Archived = ArchivedRcWeak; type Resolver = RcWeakResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedRcWeak::resolve_from_ref( self.upgrade().as_ref().map(|v| v.as_ref()), resolver, out, ); } } impl Serialize for sync::Weak where T: SerializeUnsized + ?Sized + 'static, S: Fallible + Writer + Sharing + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedRcWeak::::serialize_from_ref( self.upgrade().as_ref().map(|v| v.as_ref()), serializer, ) } } // Deserialize can only be implemented for sized types because weak pointers // don't have from/into raw functions. impl Deserialize, D> for ArchivedRcWeak where // Deserialize can only be implemented for sized types because weak pointers // to unsized types don't have `new` functions. T: ArchiveUnsized + LayoutRaw + Pointee // + ?Sized + 'static, T::Archived: DeserializeUnsized, T::Metadata: Into, Metadata: Into, D: Fallible + Pooling + ?Sized, D::Error: Source, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { Ok(match self.upgrade() { None => sync::Weak::new(), Some(r) => sync::Arc::downgrade(&r.deserialize(deserializer)?), }) } } rkyv-0.8.9/src/impls/alloc/rc/mod.rs000064400000000000000000000332261046102023000154010ustar 00000000000000#[cfg(target_has_atomic = "ptr")] mod atomic; use core::alloc::LayoutError; use ptr_meta::{from_raw_parts_mut, Pointee}; use rancor::{Fallible, Source}; use crate::{ alloc::{alloc::alloc, boxed::Box, rc}, de::{Metadata, Pooling, PoolingExt as _, SharedPointer}, rc::{ArchivedRc, ArchivedRcWeak, RcFlavor, RcResolver, RcWeakResolver}, ser::{Sharing, Writer}, traits::{ArchivePointee, LayoutRaw}, Archive, ArchiveUnsized, Deserialize, DeserializeUnsized, Place, Serialize, SerializeUnsized, }; // Rc impl Archive for rc::Rc { type Archived = ArchivedRc; type Resolver = RcResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedRc::resolve_from_ref(self.as_ref(), resolver, out); } } impl Serialize for rc::Rc where T: SerializeUnsized + ?Sized + 'static, S: Fallible + Writer + Sharing + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedRc::::serialize_from_ref( self.as_ref(), serializer, ) } } unsafe impl SharedPointer for rc::Rc { fn alloc(metadata: T::Metadata) -> Result<*mut T, LayoutError> { let layout = T::layout_raw(metadata)?; let data_address = if layout.size() > 0 { unsafe { alloc(layout) } } else { crate::polyfill::dangling(&layout).as_ptr() }; let ptr = from_raw_parts_mut(data_address.cast(), metadata); Ok(ptr) } unsafe fn from_value(ptr: *mut T) -> *mut T { let rc = rc::Rc::::from(unsafe { Box::from_raw(ptr) }); rc::Rc::into_raw(rc).cast_mut() } unsafe fn drop(ptr: *mut T) { drop(unsafe { rc::Rc::from_raw(ptr) }); } } impl Deserialize, D> for ArchivedRc where T: ArchiveUnsized + LayoutRaw + Pointee + ?Sized + 'static, T::Archived: DeserializeUnsized, T::Metadata: Into, Metadata: Into, D: Fallible + Pooling + ?Sized, D::Error: Source, { fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { let raw_shared_ptr = deserializer.deserialize_shared::<_, rc::Rc>(self.get())?; unsafe { rc::Rc::::increment_strong_count(raw_shared_ptr); } unsafe { Ok(rc::Rc::::from_raw(raw_shared_ptr)) } } } impl PartialEq> for ArchivedRc where T: ArchivePointee + PartialEq + ?Sized, U: ?Sized, { fn eq(&self, other: &rc::Rc) -> bool { self.get().eq(other.as_ref()) } } // rc::Weak impl Archive for rc::Weak { type Archived = ArchivedRcWeak; type Resolver = RcWeakResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedRcWeak::resolve_from_ref( self.upgrade().as_ref().map(|v| v.as_ref()), resolver, out, ); } } impl Serialize for rc::Weak where T: SerializeUnsized + ?Sized + 'static, S: Fallible + Writer + Sharing + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedRcWeak::::serialize_from_ref( self.upgrade().as_ref().map(|v| v.as_ref()), serializer, ) } } impl Deserialize, D> for ArchivedRcWeak where // Deserialize can only be implemented for sized types because weak pointers // to unsized types don't have `new` functions. T: ArchiveUnsized + LayoutRaw + Pointee // + ?Sized + 'static, T::Archived: DeserializeUnsized, T::Metadata: Into, Metadata: Into, D: Fallible + Pooling + ?Sized, D::Error: Source, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { Ok(match self.upgrade() { None => rc::Weak::new(), Some(r) => rc::Rc::downgrade(&r.deserialize(deserializer)?), }) } } #[cfg(test)] mod tests { use munge::munge; use rancor::{Failure, Panic}; use crate::{ access_unchecked, access_unchecked_mut, alloc::{ rc::{Rc, Weak}, string::{String, ToString}, vec, }, api::{ deserialize_using, test::{roundtrip, to_archived}, }, de::Pool, rc::{ArchivedRc, ArchivedRcWeak}, to_bytes, Archive, Deserialize, Serialize, }; #[test] fn roundtrip_rc() { #[derive(Debug, Eq, PartialEq, Archive, Deserialize, Serialize)] #[rkyv(crate, compare(PartialEq), derive(Debug))] struct Test { a: Rc, b: Rc, } let shared = Rc::new(10); let value = Test { a: shared.clone(), b: shared.clone(), }; to_archived(&value, |mut archived| { assert_eq!(*archived, value); munge!(let ArchivedTest { a, .. } = archived.as_mut()); unsafe { *ArchivedRc::get_seal_unchecked(a) = 42u32.into(); } assert_eq!(*archived.a, 42); assert_eq!(*archived.b, 42); munge!(let ArchivedTest { b, .. } = archived.as_mut()); unsafe { *ArchivedRc::get_seal_unchecked(b) = 17u32.into(); } assert_eq!(*archived.a, 17); assert_eq!(*archived.b, 17); let mut deserializer = Pool::new(); let deserialized = deserialize_using::( &*archived, &mut deserializer, ) .unwrap(); assert_eq!(*deserialized.a, 17); assert_eq!(*deserialized.b, 17); assert_eq!( &*deserialized.a as *const u32, &*deserialized.b as *const u32 ); assert_eq!(Rc::strong_count(&deserialized.a), 3); assert_eq!(Rc::strong_count(&deserialized.b), 3); assert_eq!(Rc::weak_count(&deserialized.a), 0); assert_eq!(Rc::weak_count(&deserialized.b), 0); core::mem::drop(deserializer); assert_eq!(*deserialized.a, 17); assert_eq!(*deserialized.b, 17); assert_eq!( &*deserialized.a as *const u32, &*deserialized.b as *const u32 ); assert_eq!(Rc::strong_count(&deserialized.a), 2); assert_eq!(Rc::strong_count(&deserialized.b), 2); assert_eq!(Rc::weak_count(&deserialized.a), 0); assert_eq!(Rc::weak_count(&deserialized.b), 0); }); } #[test] fn roundtrip_rc_zst() { #[derive(Archive, Deserialize, Serialize, Debug, PartialEq)] #[rkyv(crate, compare(PartialEq), derive(Debug))] struct TestRcZST { a: Rc<()>, b: Rc<()>, } let rc_zst = Rc::new(()); roundtrip(&TestRcZST { a: rc_zst.clone(), b: rc_zst.clone(), }); } #[test] fn roundtrip_unsized_shared_ptr() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, compare(PartialEq), derive(Debug))] struct Test { a: Rc<[String]>, b: Rc<[String]>, } let rc_slice = Rc::<[String]>::from( vec!["hello".to_string(), "world".to_string()].into_boxed_slice(), ); let value = Test { a: rc_slice.clone(), b: rc_slice, }; roundtrip(&value); } #[test] fn roundtrip_unsized_shared_ptr_empty() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, compare(PartialEq), derive(Debug))] struct Test { a: Rc<[u32]>, b: Rc<[u32]>, } let a_rc_slice = Rc::<[u32]>::from(vec![].into_boxed_slice()); let b_rc_slice = Rc::<[u32]>::from(vec![100].into_boxed_slice()); let value = Test { a: a_rc_slice, b: b_rc_slice.clone(), }; roundtrip(&value); } #[test] fn roundtrip_weak_ptr() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test { a: Rc, b: Weak, } let shared = Rc::new(10); let value = Test { a: shared.clone(), b: Rc::downgrade(&shared), }; let mut buf = to_bytes::(&value).unwrap(); let archived = unsafe { access_unchecked::(buf.as_ref()) }; assert_eq!(*archived.a, 10); assert!(archived.b.upgrade().is_some()); assert_eq!(**archived.b.upgrade().unwrap(), 10); let mut mutable_archived = unsafe { access_unchecked_mut::(buf.as_mut()) }; munge!(let ArchivedTest { a, .. } = mutable_archived.as_mut()); unsafe { *ArchivedRc::get_seal_unchecked(a) = 42u32.into(); } let archived = unsafe { access_unchecked::(buf.as_ref()) }; assert_eq!(*archived.a, 42); assert!(archived.b.upgrade().is_some()); assert_eq!(**archived.b.upgrade().unwrap(), 42); let mut mutable_archived = unsafe { access_unchecked_mut::(buf.as_mut()) }; munge!(let ArchivedTest { b, .. } = mutable_archived.as_mut()); unsafe { *ArchivedRc::get_seal_unchecked( ArchivedRcWeak::upgrade_seal(b).unwrap(), ) = 17u32.into(); } let archived = unsafe { access_unchecked::(buf.as_ref()) }; assert_eq!(*archived.a, 17); assert!(archived.b.upgrade().is_some()); assert_eq!(**archived.b.upgrade().unwrap(), 17); let mut deserializer = Pool::new(); let deserialized = deserialize_using::(archived, &mut deserializer) .unwrap(); assert_eq!(*deserialized.a, 17); assert!(deserialized.b.upgrade().is_some()); assert_eq!(*deserialized.b.upgrade().unwrap(), 17); assert_eq!( &*deserialized.a as *const u32, &*deserialized.b.upgrade().unwrap() as *const u32 ); assert_eq!(Rc::strong_count(&deserialized.a), 2); assert_eq!(Weak::strong_count(&deserialized.b), 2); assert_eq!(Rc::weak_count(&deserialized.a), 1); assert_eq!(Weak::weak_count(&deserialized.b), 1); core::mem::drop(deserializer); assert_eq!(*deserialized.a, 17); assert!(deserialized.b.upgrade().is_some()); assert_eq!(*deserialized.b.upgrade().unwrap(), 17); assert_eq!( &*deserialized.a as *const u32, &*deserialized.b.upgrade().unwrap() as *const u32 ); assert_eq!(Rc::strong_count(&deserialized.a), 1); assert_eq!(Weak::strong_count(&deserialized.b), 1); assert_eq!(Rc::weak_count(&deserialized.a), 1); assert_eq!(Weak::weak_count(&deserialized.b), 1); } #[test] fn serialize_cyclic_error() { use rancor::{Fallible, Source}; use crate::{ de::Pooling, ser::{Sharing, Writer}, }; #[derive(Archive, Serialize, Deserialize)] #[rkyv( crate, serialize_bounds( __S: Sharing + Writer, <__S as Fallible>::Error: Source, ), deserialize_bounds( __D: Pooling, <__D as Fallible>::Error: Source, ) )] #[cfg_attr( feature = "bytecheck", rkyv(bytecheck(bounds( __C: crate::validation::ArchiveContext + crate::validation::SharedContext, <__C as Fallible>::Error: Source, ))), )] struct Inner { #[rkyv(omit_bounds)] weak: Weak, } #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Outer { inner: Rc, } let value = Outer { inner: Rc::new_cyclic(|weak| Inner { weak: weak.clone() }), }; assert!(to_bytes::(&value).is_err()); } #[cfg(all( feature = "bytecheck", not(feature = "big_endian"), not(any(feature = "pointer_width_16", feature = "pointer_width_64")), ))] #[test] fn recursive_stack_overflow() { use rancor::{Fallible, Source}; use crate::{ access, de::Pooling, util::Align, validation::{ArchiveContext, SharedContext}, }; #[derive(Archive, Deserialize)] #[rkyv( crate, bytecheck(bounds(__C: ArchiveContext + SharedContext)), deserialize_bounds( __D: Pooling, <__D as Fallible>::Error: Source, ), derive(Debug), )] enum AllValues { Rc(#[rkyv(omit_bounds)] Rc), } let data = Align([ 0x00, 0x00, 0x00, 0xff, // B: AllValues::Rc 0xfc, 0xff, 0xff, 0xff, // RelPtr with offset -4 (B) 0x00, 0x00, 0xf6, 0xff, // A: AllValues::Rc 0xf4, 0xff, 0xff, 0xff, // RelPtr with offset -12 (B) ]); access::(&*data).unwrap_err(); } } rkyv-0.8.9/src/impls/alloc/string.rs000064400000000000000000000045311046102023000155210ustar 00000000000000use core::cmp::Ordering; use rancor::{Fallible, Source}; use crate::{ alloc::string::{String, ToString}, string::{ArchivedString, StringResolver}, Archive, Deserialize, DeserializeUnsized, Place, Serialize, SerializeUnsized, }; impl Archive for String { type Archived = ArchivedString; type Resolver = StringResolver; #[inline] fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedString::resolve_from_str(self.as_str(), resolver, out); } } impl Serialize for String where S::Error: Source, str: SerializeUnsized, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedString::serialize_from_str(self.as_str(), serializer) } } impl Deserialize for ArchivedString where str: DeserializeUnsized, { fn deserialize(&self, _: &mut D) -> Result { Ok(self.as_str().to_string()) } } impl PartialEq for ArchivedString { #[inline] fn eq(&self, other: &String) -> bool { PartialEq::eq(self.as_str(), other.as_str()) } } impl PartialEq for String { #[inline] fn eq(&self, other: &ArchivedString) -> bool { PartialEq::eq(other.as_str(), self.as_str()) } } impl PartialOrd for ArchivedString { #[inline] fn partial_cmp(&self, other: &String) -> Option { self.as_str().partial_cmp(other.as_str()) } } impl PartialOrd for String { #[inline] fn partial_cmp(&self, other: &ArchivedString) -> Option { self.as_str().partial_cmp(other.as_str()) } } #[cfg(test)] mod tests { use crate::{alloc::string::ToString, api::test::roundtrip}; #[test] fn roundtrip_string() { roundtrip(&"".to_string()); roundtrip(&"hello world".to_string()); } #[test] fn roundtrip_option_string() { roundtrip(&Some("".to_string())); roundtrip(&Some("hello world".to_string())); } #[test] fn roundtrip_result_string() { roundtrip(&Ok::<_, ()>("".to_string())); roundtrip(&Ok::<_, ()>("hello world".to_string())); roundtrip(&Err::<(), _>("".to_string())); roundtrip(&Err::<(), _>("hello world".to_string())); } } rkyv-0.8.9/src/impls/alloc/vec.rs000064400000000000000000000055031046102023000147700ustar 00000000000000use rancor::{Fallible, ResultExt as _, Source}; use crate::{ alloc::{alloc::alloc, boxed::Box, vec::Vec}, ser::{Allocator, Writer}, traits::LayoutRaw, vec::{ArchivedVec, VecResolver}, Archive, Deserialize, DeserializeUnsized, Place, Serialize, }; impl Archive for Vec { type Archived = ArchivedVec; type Resolver = VecResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedVec::resolve_from_slice(self.as_slice(), resolver, out); } } impl, S: Fallible + Allocator + Writer + ?Sized> Serialize for Vec { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedVec::::serialize_from_slice( self.as_slice(), serializer, ) } } impl Deserialize, D> for ArchivedVec where T: Archive, [T::Archived]: DeserializeUnsized<[T], D>, D: Fallible + ?Sized, D::Error: Source, { fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { let metadata = self.as_slice().deserialize_metadata(); let layout = <[T] as LayoutRaw>::layout_raw(metadata).into_error()?; let data_address = if layout.size() > 0 { unsafe { alloc(layout) } } else { crate::polyfill::dangling(&layout).as_ptr() }; let out = ptr_meta::from_raw_parts_mut(data_address.cast(), metadata); unsafe { self.as_slice().deserialize_unsized(deserializer, out)?; } unsafe { Ok(Box::<[T]>::from_raw(out).into()) } } } impl, U> PartialEq> for ArchivedVec { fn eq(&self, other: &Vec) -> bool { self.as_slice().eq(other.as_slice()) } } impl, U> PartialOrd> for ArchivedVec { fn partial_cmp(&self, other: &Vec) -> Option<::core::cmp::Ordering> { crate::impls::lexicographical_partial_ord( self.as_slice(), other.as_slice(), ) } } #[cfg(test)] mod tests { use crate::{ alloc::{vec, vec::Vec}, api::test::roundtrip, }; #[test] fn roundtrip_vec() { roundtrip(&Vec::::new()); roundtrip(&vec![1, 2, 3, 4]); } #[test] fn roundtrip_vec_zst() { roundtrip(&Vec::<()>::new()); roundtrip(&vec![(), (), (), ()]); } #[test] fn roundtrip_option_vec() { roundtrip(&Some(Vec::::new())); roundtrip(&Some(vec![1, 2, 3, 4])); } #[test] fn roundtrip_result_vec() { roundtrip(&Ok::<_, ()>(Vec::::new())); roundtrip(&Ok::<_, ()>(vec![1, 2, 3, 4])); roundtrip(&Err::<(), _>(Vec::::new())); roundtrip(&Err::<(), _>(vec![1, 2, 3, 4])); } } rkyv-0.8.9/src/impls/alloc/with.rs000064400000000000000000000557011046102023000151730ustar 00000000000000use core::{marker::PhantomData, ops::ControlFlow}; use ptr_meta::Pointee; use rancor::{Fallible, Source}; use crate::{ alloc::{ borrow::Cow, boxed::Box, collections::{BTreeMap, BTreeSet}, rc::Rc, vec::Vec, }, collections::{ btree_map::{ArchivedBTreeMap, BTreeMapResolver}, util::{Entry, EntryAdapter}, }, impls::core::with::RefWrapper, niche::option_box::{ArchivedOptionBox, OptionBoxResolver}, ser::{Allocator, Writer}, string::{ArchivedString, StringResolver}, traits::LayoutRaw, vec::{ArchivedVec, VecResolver}, with::{ ArchiveWith, AsOwned, AsVec, DeserializeWith, Map, MapKV, Niche, SerializeWith, Unshare, }, Archive, ArchiveUnsized, ArchivedMetadata, Deserialize, DeserializeUnsized, Place, Serialize, SerializeUnsized, }; // Implementation for `MapKV` impl ArchiveWith> for MapKV where A: ArchiveWith, B: ArchiveWith, { type Archived = ArchivedBTreeMap< >::Archived, >::Archived, >; type Resolver = BTreeMapResolver; fn resolve_with( field: &BTreeMap, resolver: Self::Resolver, out: Place, ) { ArchivedBTreeMap::resolve_from_len(field.len(), resolver, out) } } impl SerializeWith, S> for MapKV where A: ArchiveWith + SerializeWith, B: ArchiveWith + SerializeWith, >::Archived: Ord, S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { fn serialize_with( field: &BTreeMap, serializer: &mut S, ) -> Result::Error> { ArchivedBTreeMap::<_, _, 5>::serialize_from_ordered_iter( field.iter().map(|(k, v)| { ( RefWrapper::<'_, A, K>(k, PhantomData::), RefWrapper::<'_, B, V>(v, PhantomData::), ) }), serializer, ) } } impl DeserializeWith< ArchivedBTreeMap< >::Archived, >::Archived, >, BTreeMap, D, > for MapKV where A: ArchiveWith + DeserializeWith<>::Archived, K, D>, B: ArchiveWith + DeserializeWith<>::Archived, V, D>, K: Ord, D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedBTreeMap< >::Archived, >::Archived, >, deserializer: &mut D, ) -> Result, ::Error> { let mut result = BTreeMap::new(); let r = field.visit(|ak, av| { let k = match A::deserialize_with(ak, deserializer) { Ok(k) => k, Err(e) => return ControlFlow::Break(e), }; let v = match B::deserialize_with(av, deserializer) { Ok(v) => v, Err(e) => return ControlFlow::Break(e), }; result.insert(k, v); ControlFlow::Continue(()) }); match r { Some(e) => Err(e), None => Ok(result), } } } // Implementations for `Map` impl ArchiveWith> for Map where A: ArchiveWith, { type Archived = ArchivedVec<>::Archived>; type Resolver = VecResolver; fn resolve_with( field: &Vec, resolver: Self::Resolver, out: Place, ) { ArchivedVec::resolve_from_len(field.len(), resolver, out) } } impl SerializeWith, S> for Map where S: Fallible + Allocator + Writer + ?Sized, A: ArchiveWith + SerializeWith, { fn serialize_with( field: &Vec, s: &mut S, ) -> Result { // Wrapper for O so that we have an Archive and Serialize implementation // and ArchivedVec::serialize_from_* is happy about the bound // constraints struct RefWrapper<'o, A, O>(&'o O, PhantomData); impl, O> Archive for RefWrapper<'_, A, O> { type Archived = >::Archived; type Resolver = >::Resolver; fn resolve( &self, resolver: Self::Resolver, out: Place, ) { A::resolve_with(self.0, resolver, out) } } impl Serialize for RefWrapper<'_, A, O> where A: ArchiveWith + SerializeWith, S: Fallible + Writer + ?Sized, { fn serialize(&self, s: &mut S) -> Result { A::serialize_with(self.0, s) } } let iter = field .iter() .map(|value| RefWrapper::<'_, A, O>(value, PhantomData)); ArchivedVec::serialize_from_iter(iter, s) } } impl DeserializeWith>::Archived>, Vec, D> for Map where A: ArchiveWith + DeserializeWith<>::Archived, O, D>, D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedVec<>::Archived>, d: &mut D, ) -> Result, D::Error> { field .iter() .map(|value| A::deserialize_with(value, d)) .collect() } } // AsOwned impl<'a, F: Archive + Clone> ArchiveWith> for AsOwned { type Archived = F::Archived; type Resolver = F::Resolver; fn resolve_with( field: &Cow<'a, F>, resolver: Self::Resolver, out: Place, ) { field.resolve(resolver, out); } } impl<'a, F, S> SerializeWith, S> for AsOwned where F: Serialize + Clone, S: Fallible + ?Sized, { fn serialize_with( field: &Cow<'a, F>, serializer: &mut S, ) -> Result { field.serialize(serializer) } } impl DeserializeWith for AsOwned where T: Archive + Clone, T::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize_with( field: &T::Archived, deserializer: &mut D, ) -> Result { field.deserialize(deserializer) } } impl<'a, T: Archive + Clone> ArchiveWith> for AsOwned { type Archived = ArchivedVec; type Resolver = VecResolver; fn resolve_with( field: &Cow<'a, [T]>, resolver: Self::Resolver, out: Place, ) { ArchivedVec::resolve_from_slice(field, resolver, out); } } impl<'a, T, S> SerializeWith, S> for AsOwned where T: Serialize + Clone, S: Fallible + Allocator + Writer + ?Sized, { fn serialize_with( field: &Cow<'a, [T]>, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_slice(field, serializer) } } impl<'a, T, D> DeserializeWith, Cow<'a, [T]>, D> for AsOwned where T: Archive + Clone, T::Archived: Deserialize, D: Fallible + ?Sized, D::Error: Source, { fn deserialize_with( field: &ArchivedVec, deserializer: &mut D, ) -> Result, D::Error> { Ok(Cow::Owned(field.deserialize(deserializer)?)) } } impl<'a> ArchiveWith> for AsOwned { type Archived = ArchivedString; type Resolver = StringResolver; fn resolve_with( field: &Cow<'a, str>, resolver: Self::Resolver, out: Place, ) { ArchivedString::resolve_from_str(field, resolver, out); } } impl<'a, S> SerializeWith, S> for AsOwned where S: Fallible + Writer + ?Sized, S::Error: Source, { fn serialize_with( field: &Cow<'a, str>, serializer: &mut S, ) -> Result { ArchivedString::serialize_from_str(field, serializer) } } impl<'a, D> DeserializeWith, D> for AsOwned where D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedString, deserializer: &mut D, ) -> Result, D::Error> { Ok(Cow::Owned(field.deserialize(deserializer)?)) } } // AsVec impl ArchiveWith> for AsVec { type Archived = ArchivedVec>; type Resolver = VecResolver; fn resolve_with( field: &BTreeMap, resolver: Self::Resolver, out: Place, ) { ArchivedVec::resolve_from_len(field.len(), resolver, out); } } impl SerializeWith, S> for AsVec where K: Serialize, V: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize_with( field: &BTreeMap, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_iter( field.iter().map(|(key, value)| { EntryAdapter::<_, _, K, V>::new(key, value) }), serializer, ) } } impl DeserializeWith< ArchivedVec>, BTreeMap, D, > for AsVec where K: Archive + Ord, V: Archive, K::Archived: Deserialize, V::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedVec>, deserializer: &mut D, ) -> Result, D::Error> { let mut result = BTreeMap::new(); for entry in field.iter() { result.insert( entry.key.deserialize(deserializer)?, entry.value.deserialize(deserializer)?, ); } Ok(result) } } impl ArchiveWith> for AsVec { type Archived = ArchivedVec; type Resolver = VecResolver; fn resolve_with( field: &BTreeSet, resolver: Self::Resolver, out: Place, ) { ArchivedVec::resolve_from_len(field.len(), resolver, out); } } impl SerializeWith, S> for AsVec where T: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize_with( field: &BTreeSet, serializer: &mut S, ) -> Result { ArchivedVec::::serialize_from_iter::( field.iter(), serializer, ) } } impl DeserializeWith, BTreeSet, D> for AsVec where T: Archive + Ord, T::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedVec, deserializer: &mut D, ) -> Result, D::Error> { let mut result = BTreeSet::new(); for key in field.iter() { result.insert(key.deserialize(deserializer)?); } Ok(result) } } // Niche impl ArchiveWith>> for Niche where T: ArchiveUnsized + ?Sized, ArchivedMetadata: Default, { type Archived = ArchivedOptionBox; type Resolver = OptionBoxResolver; fn resolve_with( field: &Option>, resolver: Self::Resolver, out: Place, ) { ArchivedOptionBox::resolve_from_option(field.as_deref(), resolver, out); } } impl SerializeWith>, S> for Niche where T: SerializeUnsized + ?Sized, S: Fallible + Writer + ?Sized, ArchivedMetadata: Default, { fn serialize_with( field: &Option>, serializer: &mut S, ) -> Result { ArchivedOptionBox::serialize_from_option(field.as_deref(), serializer) } } impl DeserializeWith, Option>, D> for Niche where T: ArchiveUnsized + LayoutRaw + Pointee + ?Sized, T::Archived: DeserializeUnsized, D: Fallible + ?Sized, D::Error: Source, { fn deserialize_with( field: &ArchivedOptionBox, deserializer: &mut D, ) -> Result>, D::Error> { if let Some(value) = field.as_ref() { Ok(Some(value.deserialize(deserializer)?)) } else { Ok(None) } } } // Unshare #[cfg(target_has_atomic = "ptr")] impl ArchiveWith> for Unshare { type Archived = T::Archived; type Resolver = T::Resolver; fn resolve_with( x: &crate::alloc::sync::Arc, resolver: Self::Resolver, out: Place, ) { x.as_ref().resolve(resolver, out) } } #[cfg(target_has_atomic = "ptr")] impl SerializeWith, S> for Unshare where T: Serialize, S: Fallible + ?Sized, { fn serialize_with( x: &crate::alloc::sync::Arc, s: &mut S, ) -> Result { x.as_ref().serialize(s) } } #[cfg(target_has_atomic = "ptr")] impl DeserializeWith, D> for Unshare where A: Deserialize, D: Fallible + ?Sized, { fn deserialize_with( x: &A, d: &mut D, ) -> Result, D::Error> { Ok(crate::alloc::sync::Arc::new(A::deserialize(x, d)?)) } } impl ArchiveWith> for Unshare { type Archived = T::Archived; type Resolver = T::Resolver; fn resolve_with( x: &Rc, resolver: Self::Resolver, out: Place, ) { x.as_ref().resolve(resolver, out) } } impl, S: Fallible + ?Sized> SerializeWith, S> for Unshare { fn serialize_with( x: &Rc, s: &mut S, ) -> Result { x.as_ref().serialize(s) } } impl DeserializeWith, D> for Unshare where A: Deserialize, D: Fallible + ?Sized, { fn deserialize_with(x: &A, d: &mut D) -> Result, D::Error> { Ok(Rc::new(A::deserialize(x, d)?)) } } #[cfg(test)] mod tests { use core::mem::size_of; use crate::{ alloc::{ borrow::Cow, boxed::Box, collections::{BTreeMap, BTreeSet}, string::{String, ToString}, }, api::test::{roundtrip, roundtrip_with, to_archived}, niche::niching::Null, with::{ AsOwned, AsVec, DefaultNiche, InlineAsBox, Map, MapKV, Niche, NicheInto, }, Archive, Deserialize, Serialize, }; #[derive(Debug, Archive, Deserialize, Serialize, PartialEq)] #[rkyv(crate, compare(PartialEq), derive(Debug))] struct Test { value: Option>, } #[test] fn roundtrip_niche_none() { roundtrip(&Test { value: None }); } #[test] fn roundtrip_niche_some() { roundtrip(&Test { value: Some(Box::new(128)), }); } #[test] fn ambiguous_niched_archived_box() { #[derive(Archive, Deserialize, Serialize, Debug, PartialEq)] #[rkyv(crate, compare(PartialEq), derive(Debug))] struct HasNiche { #[rkyv(with = Niche)] inner: Option>, } roundtrip(&HasNiche { inner: Some(Box::<[u32]>::from([])), }); roundtrip(&HasNiche { inner: None }); } #[test] fn with_as_owned() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test<'a> { #[rkyv(with = AsOwned)] a: Cow<'a, u32>, #[rkyv(with = AsOwned)] b: Cow<'a, [u32]>, #[rkyv(with = AsOwned)] c: Cow<'a, str>, } let value = Test { a: Cow::Borrowed(&100), b: Cow::Borrowed(&[1, 2, 3, 4, 5, 6]), c: Cow::Borrowed("hello world"), }; to_archived(&value, |archived| { assert_eq!(archived.a, 100); assert_eq!(archived.b, [1, 2, 3, 4, 5, 6]); assert_eq!(archived.c, "hello world"); }); } #[test] fn with_as_map() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test<'a> { #[rkyv(with = Map)] a: Option<&'a str>, #[rkyv(with = Map)] b: Option<&'a str>, } let value = Test { a: Some("foo"), b: None, }; to_archived(&value, |archived| { assert!(archived.a.is_some()); assert!(archived.b.is_none()); }); } #[test] fn with_as_mapkv() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test<'a> { #[rkyv(with = MapKV)] a: BTreeMap<&'a str, &'a str>, } let mut a = BTreeMap::new(); a.insert("foo", "bar"); a.insert("woo", "roo"); let value = Test { a }; to_archived(&value, |archived| { assert_eq!(archived.a.len(), 2); assert!(archived.a.contains_key("foo")); assert_eq!(**archived.a.get("woo").unwrap(), *"roo"); }); } #[test] fn with_as_vec() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test { #[rkyv(with = AsVec)] a: BTreeMap, #[rkyv(with = AsVec)] b: BTreeSet, #[rkyv(with = AsVec)] c: BTreeMap, } let mut a = BTreeMap::new(); a.insert("foo".to_string(), "hello".to_string()); a.insert("bar".to_string(), "world".to_string()); a.insert("baz".to_string(), "bat".to_string()); let mut b = BTreeSet::new(); b.insert("foo".to_string()); b.insert("hello world!".to_string()); b.insert("bar".to_string()); b.insert("fizzbuzz".to_string()); let c = BTreeMap::new(); let value = Test { a, b, c }; to_archived(&value, |archived| { assert_eq!(archived.a.len(), 3); assert!(archived .a .iter() .find(|&e| e.key == "foo" && e.value == "hello") .is_some()); assert!(archived .a .iter() .find(|&e| e.key == "bar" && e.value == "world") .is_some()); assert!(archived .a .iter() .find(|&e| e.key == "baz" && e.value == "bat") .is_some()); assert_eq!(archived.b.len(), 4); assert!(archived.b.iter().find(|&e| e == "foo").is_some()); assert!(archived.b.iter().find(|&e| e == "hello world!").is_some()); assert!(archived.b.iter().find(|&e| e == "bar").is_some()); assert!(archived.b.iter().find(|&e| e == "fizzbuzz").is_some()); }); } #[cfg(feature = "alloc")] #[test] fn with_niche_box() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct TestNiche { #[rkyv(with = Niche)] inner: Option>, } #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct TestNullNiche { #[rkyv(with = NicheInto)] inner: Option>, } #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct TestNoNiching { inner: Option>, } let value = TestNiche { inner: Some(Box::new("hello world".to_string())), }; to_archived(&value, |archived| { assert!(archived.inner.is_some()); assert_eq!(&**archived.inner.as_ref().unwrap(), "hello world"); assert_eq!(archived.inner, value.inner); }); let value = TestNiche { inner: None }; to_archived(&value, |archived| { assert!(archived.inner.is_none()); assert_eq!(archived.inner, value.inner); }); assert!( size_of::() < size_of::() ); let value = TestNullNiche { inner: Some(Box::new("hello world".to_string())), }; to_archived(&value, |archived| { assert!(archived.inner.is_some()); assert_eq!(&**archived.inner.as_ref().unwrap(), "hello world"); assert_eq!(archived.inner, value.inner); }); let value = TestNullNiche { inner: None }; to_archived(&value, |archived| { assert!(archived.inner.is_none()); assert_eq!(archived.inner, value.inner); }); assert!( size_of::() < size_of::() ); } #[test] fn with_null_niching() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Nichable { #[rkyv(niche)] // Default = Null boxed: Box, } #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Outer { #[rkyv(with = DefaultNiche)] field: Option, } assert_eq!(size_of::(), size_of::()); let values = [ Outer { field: None }, Outer { field: Some(Nichable { boxed: Box::new(727), }), }, ]; roundtrip_with(&values[0], |_, archived| { assert!(archived.field.is_none()); }); roundtrip_with(&values[1], |_, archived| { let nichable = archived.field.as_ref().unwrap(); assert_eq!(nichable.boxed.as_ref().to_native(), 727); }); } } rkyv-0.8.9/src/impls/core/ffi.rs000064400000000000000000000046341046102023000146210ustar 00000000000000use core::{ alloc::{Layout, LayoutError}, ffi::{c_char, CStr}, ptr, }; use ptr_meta::Pointee; use rancor::Fallible; use crate::{ primitive::ArchivedUsize, ser::Writer, traits::{ArchivePointee, LayoutRaw}, ArchiveUnsized, ArchivedMetadata, DeserializeUnsized, Portable, SerializeUnsized, }; // CStr impl LayoutRaw for CStr { #[inline] fn layout_raw( metadata: ::Metadata, ) -> Result { Layout::array::(metadata) } } // SAFETY: `CStr` is a byte slice and so has a stable, well-defined layout that // is the same on all targets unsafe impl Portable for CStr {} impl ArchiveUnsized for CStr { type Archived = CStr; #[inline] fn archived_metadata(&self) -> ArchivedMetadata { ArchivedUsize::from_native(ptr_meta::metadata(self) as _) } } impl ArchivePointee for CStr { type ArchivedMetadata = ArchivedUsize; #[inline] fn pointer_metadata( archived: &Self::ArchivedMetadata, ) -> ::Metadata { <[u8]>::pointer_metadata(archived) } } impl SerializeUnsized for CStr { fn serialize_unsized(&self, serializer: &mut S) -> Result { let result = serializer.pos(); serializer.write(self.to_bytes_with_nul())?; Ok(result) } } impl DeserializeUnsized for CStr { unsafe fn deserialize_unsized( &self, _: &mut D, out: *mut CStr, ) -> Result<(), D::Error> { let slice = self.to_bytes_with_nul(); // SAFETY: The caller has guaranteed that `out` is non-null, properly // aligned, valid for writes, and points to memory allocated according // to the layout for the metadata returned from `deserialize_metadata`. // Therefore, `out` points to at least `self.len()` bytes. // `self.as_ptr()` is valid for reads and points to the bytes of `self` // which are also at least `self.len()` bytes. Note here that the length // of the `CStr` contains the null terminator. unsafe { ptr::copy_nonoverlapping( slice.as_ptr(), out.cast::(), slice.len(), ); } Ok(()) } fn deserialize_metadata(&self) -> ::Metadata { ptr_meta::metadata(self) } } rkyv-0.8.9/src/impls/core/mod.rs000064400000000000000000000404741046102023000146360ustar 00000000000000use core::{ alloc::{Layout, LayoutError}, marker::{PhantomData, PhantomPinned}, mem::{ManuallyDrop, MaybeUninit}, ptr::{self, addr_of_mut}, str, }; use ptr_meta::Pointee; use rancor::Fallible; use crate::{ primitive::ArchivedUsize, ser::{Allocator, Writer, WriterExt as _}, traits::{ArchivePointee, CopyOptimization, LayoutRaw, NoUndef}, tuple::*, Archive, ArchiveUnsized, ArchivedMetadata, Deserialize, DeserializeUnsized, Place, Portable, Serialize, SerializeUnsized, }; mod ffi; mod net; mod ops; mod option; mod primitive; mod result; mod time; pub(crate) mod with; impl LayoutRaw for T { fn layout_raw( _: ::Metadata, ) -> Result { Ok(Layout::new::()) } } impl LayoutRaw for [T] { fn layout_raw( metadata: ::Metadata, ) -> Result { Layout::array::(metadata) } } impl LayoutRaw for str { #[inline] fn layout_raw( metadata: ::Metadata, ) -> Result { Layout::array::(metadata) } } impl ArchivePointee for T { type ArchivedMetadata = (); fn pointer_metadata( _: &Self::ArchivedMetadata, ) -> ::Metadata { } } impl ArchiveUnsized for T { type Archived = T::Archived; fn archived_metadata(&self) -> ArchivedMetadata {} } impl SerializeUnsized for T where T: Serialize, S: Fallible + Writer + ?Sized, { fn serialize_unsized(&self, serializer: &mut S) -> Result { let resolver = self.serialize(serializer)?; serializer.align_for::()?; unsafe { serializer.resolve_aligned(self, resolver) } } } impl DeserializeUnsized for T::Archived where T: Archive, D: Fallible + ?Sized, T::Archived: Deserialize, { unsafe fn deserialize_unsized( &self, deserializer: &mut D, out: *mut T, ) -> Result<(), D::Error> { // SAFETY: The caller has guaranteed that `out` is non-null, properly // aligned, valid for writes, and allocated according to the layout of // the deserialized metadata (the unit type for sized types). unsafe { out.write(self.deserialize(deserializer)?); } Ok(()) } fn deserialize_metadata(&self) -> ::Metadata {} } macro_rules! impl_tuple { ($name:ident, $($type:ident $index:tt),*) => { impl<$($type),*> Archive for ($($type,)*) where $($type: Archive,)* { type Archived = $name<$($type::Archived,)*>; type Resolver = ($($type::Resolver,)*); fn resolve( &self, resolver: Self::Resolver, out: Place, ) { // SAFETY: This pointer will only be used to manually project // to each of the fields to wrap them in a `Place` again. let out_ptr = unsafe { out.ptr() }; $( // SAFETY: `out_ptr` is guaranteed to be properly aligned // and dereferenceable. let ptr = unsafe { addr_of_mut!((*out_ptr).$index) }; // SAFETY: // - `ptr` points to the `$index` field of `out` // - `ptr` is properly aligned, dereferenceable, and all of // its bytes are initialized let out_field = unsafe { Place::from_field_unchecked(out, ptr) }; self.$index.resolve(resolver.$index, out_field); )* } } impl<$($type,)* S> Serialize for ($($type,)*) where $($type: Serialize,)* S: Fallible + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { Ok(( $(self.$index.serialize(serializer)?,)* )) } } impl<$($type,)* D> Deserialize<($($type,)*), D> for $name<$($type::Archived,)*> where D: Fallible + ?Sized, $($type: Archive,)* $($type::Archived: Deserialize<$type, D>,)* { fn deserialize( &self, deserializer: &mut D, ) -> Result<($($type,)*), D::Error> { Ok(( $(self.$index.deserialize(deserializer)?,)* )) } } }; } impl_tuple!(ArchivedTuple1, T0 0); impl_tuple!(ArchivedTuple2, T0 0, T1 1); impl_tuple!(ArchivedTuple3, T0 0, T1 1, T2 2); impl_tuple!(ArchivedTuple4, T0 0, T1 1, T2 2, T3 3); impl_tuple!(ArchivedTuple5, T0 0, T1 1, T2 2, T3 3, T4 4); impl_tuple!(ArchivedTuple6, T0 0, T1 1, T2 2, T3 3, T4 4, T5 5); impl_tuple!(ArchivedTuple7, T0 0, T1 1, T2 2, T3 3, T4 4, T5 5, T6 6); impl_tuple!(ArchivedTuple8, T0 0, T1 1, T2 2, T3 3, T4 4, T5 5, T6 6, T7 7); impl_tuple!( ArchivedTuple9, T0 0, T1 1, T2 2, T3 3, T4 4, T5 5, T6 6, T7 7, T8 8 ); impl_tuple!( ArchivedTuple10, T0 0, T1 1, T2 2, T3 3, T4 4, T5 5, T6 6, T7 7, T8 8, T9 9 ); impl_tuple!( ArchivedTuple11, T0 0, T1 1, T2 2, T3 3, T4 4, T5 5, T6 6, T7 7, T8 8, T9 9, T10 10 ); impl_tuple!( ArchivedTuple12, T0 0, T1 1, T2 2, T3 3, T4 4, T5 5, T6 6, T7 7, T8 8, T9 9, T10 10, T11 11 ); impl_tuple!( ArchivedTuple13, T0 0, T1 1, T2 2, T3 3, T4 4, T5 5, T6 6, T7 7, T8 8, T9 9, T10 10, T11 11, T12 12 ); // Arrays // SAFETY: `[T; N]` is a `T` array and so is portable as long as `T` is also // `Portable`. unsafe impl Portable for [T; N] {} impl Archive for [T; N] { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable_if(T::COPY_OPTIMIZATION.is_enabled()) }; type Archived = [T::Archived; N]; type Resolver = [T::Resolver; N]; fn resolve(&self, resolver: Self::Resolver, out: Place) { for (i, (value, resolver)) in self.iter().zip(resolver).enumerate() { let out_i = unsafe { out.index(i) }; value.resolve(resolver, out_i); } } } impl Serialize for [T; N] where T: Serialize, S: Fallible + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { let mut result = core::mem::MaybeUninit::::uninit(); let result_ptr = result.as_mut_ptr().cast::(); for (i, value) in self.iter().enumerate() { unsafe { result_ptr.add(i).write(value.serialize(serializer)?); } } unsafe { Ok(result.assume_init()) } } } impl Deserialize<[T; N], D> for [T::Archived; N] where T: Archive, T::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize(&self, deserializer: &mut D) -> Result<[T; N], D::Error> { let mut result = core::mem::MaybeUninit::<[T; N]>::uninit(); let result_ptr = result.as_mut_ptr().cast::(); for (i, value) in self.iter().enumerate() { unsafe { result_ptr.add(i).write(value.deserialize(deserializer)?); } } unsafe { Ok(result.assume_init()) } } } // Slices // SAFETY: `[T]` is a `T` slice and so is portable as long as `T` is also // `Portable`. unsafe impl Portable for [T] {} impl ArchiveUnsized for [T] { type Archived = [T::Archived]; fn archived_metadata(&self) -> ArchivedMetadata { ArchivedUsize::from_native(ptr_meta::metadata(self) as _) } } impl ArchivePointee for [T] { type ArchivedMetadata = ArchivedUsize; fn pointer_metadata( archived: &Self::ArchivedMetadata, ) -> ::Metadata { archived.to_native() as usize } } impl SerializeUnsized for [T] where T: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize_unsized(&self, serializer: &mut S) -> Result { if T::COPY_OPTIMIZATION.is_enabled() { let result = serializer.align_for::()?; let as_bytes = unsafe { core::slice::from_raw_parts( self.as_ptr().cast::(), core::mem::size_of_val(self), ) }; serializer.write(as_bytes)?; Ok(result) } else { use crate::util::SerVec; SerVec::with_capacity( serializer, self.len(), |resolvers, serializer| { for value in self.iter() { unsafe { resolvers .push_unchecked(value.serialize(serializer)?); } } let result = serializer.align_for::()?; for (value, resolver) in self.iter().zip(resolvers.drain()) { unsafe { serializer.resolve_aligned(value, resolver)?; } } Ok(result) }, )? } } } impl DeserializeUnsized<[U], D> for [T] where T: Deserialize, D: Fallible + ?Sized, { unsafe fn deserialize_unsized( &self, deserializer: &mut D, out: *mut [U], ) -> Result<(), D::Error> { for (i, item) in self.iter().enumerate() { // SAFETY: The caller has guaranteed that `out` points to a slice // with a length guaranteed to match the length of `self`. Since `i` // is less than the length of the slice, the result of the pointer // add is always in-bounds. let out_ptr = unsafe { out.cast::().add(i) }; // SAFETY: `out_ptr` points to an element of `out` and so is // guaranteed to be non-null, properly aligned, and valid for // writes. unsafe { out_ptr.write(item.deserialize(deserializer)?); } } Ok(()) } fn deserialize_metadata(&self) -> <[U] as Pointee>::Metadata { ptr_meta::metadata(self) } } // `str` // SAFETY: `str` is a byte slice and so has a stable, well-defined layout that // is the same on all targets. It doesn't have any interior mutability. unsafe impl Portable for str {} // SAFETY: `str` is a byte slice and so does not contain any uninit bytes. unsafe impl NoUndef for str {} impl ArchiveUnsized for str { type Archived = str; #[inline] fn archived_metadata(&self) -> ArchivedMetadata { ArchivedUsize::from_native(ptr_meta::metadata(self) as _) } } impl ArchivePointee for str { type ArchivedMetadata = ArchivedUsize; #[inline] fn pointer_metadata( archived: &Self::ArchivedMetadata, ) -> ::Metadata { <[u8]>::pointer_metadata(archived) } } impl SerializeUnsized for str { fn serialize_unsized(&self, serializer: &mut S) -> Result { let result = serializer.pos(); serializer.write(self.as_bytes())?; Ok(result) } } impl DeserializeUnsized for str { unsafe fn deserialize_unsized( &self, _: &mut D, out: *mut str, ) -> Result<(), D::Error> { // SAFETY: The caller has guaranteed that `out` is non-null, properly // aligned, valid for writes, and points to memory allocated according // to the layout for the metadata returned from `deserialize_metadata`. // Therefore, `out` points to at least `self.len()` bytes. // `self.as_ptr()` is valid for reads and points to the bytes of `self` // which are also at least `self.len()` bytes. unsafe { ptr::copy_nonoverlapping( self.as_ptr(), out.cast::(), self.len(), ); } Ok(()) } fn deserialize_metadata(&self) -> ::Metadata { ptr_meta::metadata(self) } } // PhantomData // SAFETY: `PhantomData` always a size of 0 and align of 1, and so has a stable, // well-defined layout that is the same on all targets. unsafe impl Portable for PhantomData {} impl Archive for PhantomData { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable() }; type Archived = PhantomData; type Resolver = (); fn resolve(&self, _: Self::Resolver, _: Place) {} } impl Serialize for PhantomData { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize, D> for PhantomData { fn deserialize(&self, _: &mut D) -> Result, D::Error> { Ok(PhantomData) } } // PhantomPinned // SAFETY: `PhantomPinned` always a size of 0 and align of 1, and so has a // stable, well-defined layout that is the same on all targets. unsafe impl Portable for PhantomPinned {} impl Archive for PhantomPinned { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable() }; type Archived = PhantomPinned; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, _: Place) {} } impl Serialize for PhantomPinned { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for PhantomPinned { fn deserialize(&self, _: &mut D) -> Result { Ok(PhantomPinned) } } // `ManuallyDrop` // SAFETY: `ManuallyDrop` is guaranteed to have the same layout and bit // validity as `T`, so it is `Portable` when `T` is `Portable`. unsafe impl Portable for ManuallyDrop {} impl Archive for ManuallyDrop { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable_if(T::COPY_OPTIMIZATION.is_enabled()) }; type Archived = ManuallyDrop; type Resolver = T::Resolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { let out_inner = unsafe { out.cast_unchecked::() }; T::resolve(self, resolver, out_inner) } } impl, S: Fallible + ?Sized> Serialize for ManuallyDrop { fn serialize( &self, serializer: &mut S, ) -> Result { T::serialize(self, serializer) } } impl Deserialize, D> for ManuallyDrop where T: Archive, T::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { T::Archived::deserialize(self, deserializer).map(ManuallyDrop::new) } } // `MaybeUninit` // SAFETY: `MaybeUninit` is guaranteed to have the same layout as `T`, and `T` // is portable. `MaybeUninit` does not have interior mutability. unsafe impl Portable for MaybeUninit {} #[cfg(test)] mod tests { use core::{ marker::{PhantomData, PhantomPinned}, mem::ManuallyDrop, }; use crate::{ api::test::{roundtrip, roundtrip_with}, tuple::ArchivedTuple3, }; #[test] fn roundtrip_tuple() { roundtrip_with( &(24, true, 16f32), |(a, b, c), ArchivedTuple3(d, e, f)| { assert_eq!(a, d); assert_eq!(b, e); assert_eq!(c, f); }, ); } #[test] fn roundtrip_array() { roundtrip(&[1, 2, 3, 4, 5, 6]); roundtrip(&[(); 0]); roundtrip(&[(), (), (), ()]); } #[test] fn roundtrip_phantoms() { roundtrip(&PhantomData::<&'static u8>); roundtrip(&PhantomPinned); } #[test] fn roundtrip_manually_drop() { roundtrip(&ManuallyDrop::new(123i8)); } } rkyv-0.8.9/src/impls/core/net.rs000064400000000000000000000322751046102023000146450ustar 00000000000000use core::{ cmp, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, }; use munge::munge; use rancor::Fallible; use crate::{ net::{ ArchivedIpAddr, ArchivedIpv4Addr, ArchivedIpv6Addr, ArchivedSocketAddr, ArchivedSocketAddrV4, ArchivedSocketAddrV6, }, traits::NoUndef, Archive, Deserialize, Place, Serialize, }; // Ipv4Addr impl Archive for Ipv4Addr { type Archived = ArchivedIpv4Addr; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { ArchivedIpv4Addr::emplace(self.octets(), out); } } impl Serialize for Ipv4Addr { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for ArchivedIpv4Addr { fn deserialize(&self, _: &mut D) -> Result { Ok(self.as_ipv4()) } } impl PartialEq for ArchivedIpv4Addr { #[inline] fn eq(&self, other: &Ipv4Addr) -> bool { self.as_ipv4().eq(other) } } impl PartialEq for Ipv4Addr { #[inline] fn eq(&self, other: &ArchivedIpv4Addr) -> bool { other.eq(self) } } impl PartialOrd for ArchivedIpv4Addr { #[inline] fn partial_cmp(&self, other: &Ipv4Addr) -> Option { self.as_ipv4().partial_cmp(other) } } impl PartialOrd for Ipv4Addr { #[inline] fn partial_cmp(&self, other: &ArchivedIpv4Addr) -> Option { other.partial_cmp(self) } } // Ipv6Addr impl Archive for Ipv6Addr { type Archived = ArchivedIpv6Addr; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { ArchivedIpv6Addr::emplace(self.octets(), out); } } impl Serialize for Ipv6Addr { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for ArchivedIpv6Addr { fn deserialize(&self, _: &mut D) -> Result { Ok(self.as_ipv6()) } } impl PartialEq for ArchivedIpv6Addr { #[inline] fn eq(&self, other: &Ipv6Addr) -> bool { self.as_ipv6().eq(other) } } impl PartialEq for Ipv6Addr { #[inline] fn eq(&self, other: &ArchivedIpv6Addr) -> bool { other.eq(self) } } impl PartialOrd for ArchivedIpv6Addr { #[inline] fn partial_cmp(&self, other: &Ipv6Addr) -> Option { self.as_ipv6().partial_cmp(other) } } impl PartialOrd for Ipv6Addr { #[inline] fn partial_cmp(&self, other: &ArchivedIpv6Addr) -> Option { other.partial_cmp(self) } } // IpAddr #[allow(dead_code)] #[repr(u8)] enum ArchivedIpAddrTag { V4, V6, } // SAFETY: `ArchivedIpArrdTag` is `repr(u8)` and so always consists of a single // well-defined byte. unsafe impl NoUndef for ArchivedIpAddrTag {} #[repr(C)] struct ArchivedIpAddrVariantV4(ArchivedIpAddrTag, ArchivedIpv4Addr); #[repr(C)] struct ArchivedIpAddrVariantV6(ArchivedIpAddrTag, ArchivedIpv6Addr); impl Archive for IpAddr { type Archived = ArchivedIpAddr; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { match self { IpAddr::V4(ipv4_addr) => { let out = unsafe { out.cast_unchecked::() }; munge!(let ArchivedIpAddrVariantV4(tag, out_ipv4_addr) = out); tag.write(ArchivedIpAddrTag::V4); ArchivedIpv4Addr::emplace(ipv4_addr.octets(), out_ipv4_addr); } IpAddr::V6(ipv6_addr) => { let out = unsafe { out.cast_unchecked::() }; munge!(let ArchivedIpAddrVariantV6(tag, out_ipv6_addr) = out); tag.write(ArchivedIpAddrTag::V6); ArchivedIpv6Addr::emplace(ipv6_addr.octets(), out_ipv6_addr); } } } } impl Serialize for IpAddr { fn serialize( &self, serializer: &mut S, ) -> Result { match self { IpAddr::V4(ipv4_addr) => ipv4_addr.serialize(serializer), IpAddr::V6(ipv6_addr) => ipv6_addr.serialize(serializer), } } } impl Deserialize for ArchivedIpAddr { fn deserialize(&self, deserializer: &mut D) -> Result { match self { ArchivedIpAddr::V4(ipv4_addr) => { Ok(IpAddr::V4(ipv4_addr.deserialize(deserializer)?)) } ArchivedIpAddr::V6(ipv6_addr) => { Ok(IpAddr::V6(ipv6_addr.deserialize(deserializer)?)) } } } } impl PartialEq for ArchivedIpAddr { #[inline] fn eq(&self, other: &IpAddr) -> bool { match self { ArchivedIpAddr::V4(self_ip) => { if let IpAddr::V4(other_ip) = other { self_ip.eq(other_ip) } else { false } } ArchivedIpAddr::V6(self_ip) => { if let IpAddr::V6(other_ip) = other { self_ip.eq(other_ip) } else { false } } } } } impl PartialEq for IpAddr { #[inline] fn eq(&self, other: &ArchivedIpAddr) -> bool { other.eq(self) } } impl PartialOrd for ArchivedIpAddr { #[inline] fn partial_cmp(&self, other: &IpAddr) -> Option { self.as_ipaddr().partial_cmp(other) } } impl PartialOrd for IpAddr { #[inline] fn partial_cmp(&self, other: &ArchivedIpAddr) -> Option { other.partial_cmp(self) } } // SocketAddrV4 impl Archive for SocketAddrV4 { type Archived = ArchivedSocketAddrV4; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { ArchivedSocketAddrV4::emplace(self, out); } } impl Serialize for SocketAddrV4 { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for ArchivedSocketAddrV4 where D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result { let ip = self.ip().deserialize(deserializer)?; Ok(SocketAddrV4::new(ip, self.port())) } } impl PartialEq for ArchivedSocketAddrV4 { #[inline] fn eq(&self, other: &SocketAddrV4) -> bool { self.as_socket_addr_v4().eq(other) } } impl PartialEq for SocketAddrV4 { #[inline] fn eq(&self, other: &ArchivedSocketAddrV4) -> bool { other.eq(self) } } impl PartialOrd for ArchivedSocketAddrV4 { #[inline] fn partial_cmp(&self, other: &SocketAddrV4) -> Option { self.as_socket_addr_v4().partial_cmp(other) } } impl PartialOrd for SocketAddrV4 { #[inline] fn partial_cmp( &self, other: &ArchivedSocketAddrV4, ) -> Option { other.partial_cmp(self) } } // SocketAddrV6 impl Archive for SocketAddrV6 { type Archived = ArchivedSocketAddrV6; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { ArchivedSocketAddrV6::emplace(self, out); } } impl Serialize for SocketAddrV6 { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for ArchivedSocketAddrV6 { fn deserialize( &self, deserializer: &mut D, ) -> Result { let ip = self.ip().deserialize(deserializer)?; Ok(SocketAddrV6::new( ip, self.port(), self.flowinfo(), self.scope_id(), )) } } impl PartialEq for ArchivedSocketAddrV6 { #[inline] fn eq(&self, other: &SocketAddrV6) -> bool { self.as_socket_addr_v6().eq(other) } } impl PartialEq for SocketAddrV6 { #[inline] fn eq(&self, other: &ArchivedSocketAddrV6) -> bool { other.eq(self) } } impl PartialOrd for ArchivedSocketAddrV6 { #[inline] fn partial_cmp(&self, other: &SocketAddrV6) -> Option { self.as_socket_addr_v6().partial_cmp(other) } } impl PartialOrd for SocketAddrV6 { #[inline] fn partial_cmp( &self, other: &ArchivedSocketAddrV6, ) -> Option { other.partial_cmp(self) } } // SocketAddr #[allow(dead_code)] #[repr(u8)] enum ArchivedSocketAddrTag { V4, V6, } // SAFETY: `ArchivedSocketAddrTag` is `repr(u8)` and so always consists of a // single well-defined byte. unsafe impl NoUndef for ArchivedSocketAddrTag {} #[repr(C)] struct ArchivedSocketAddrVariantV4(ArchivedSocketAddrTag, ArchivedSocketAddrV4); #[repr(C)] struct ArchivedSocketAddrVariantV6(ArchivedSocketAddrTag, ArchivedSocketAddrV6); impl Archive for SocketAddr { type Archived = ArchivedSocketAddr; type Resolver = (); #[inline] fn resolve(&self, resolver: Self::Resolver, out: Place) { match self { SocketAddr::V4(socket_addr) => { let out = unsafe { out.cast_unchecked::() }; munge! { let ArchivedSocketAddrVariantV4(tag, out_socket_addr) = out; } tag.write(ArchivedSocketAddrTag::V4); socket_addr.resolve(resolver, out_socket_addr); } SocketAddr::V6(socket_addr) => { let out = unsafe { out.cast_unchecked::() }; munge! { let ArchivedSocketAddrVariantV6(tag, out_socket_addr) = out; } tag.write(ArchivedSocketAddrTag::V6); socket_addr.resolve(resolver, out_socket_addr); } } } } impl Serialize for SocketAddr { fn serialize( &self, serializer: &mut S, ) -> Result { match self { SocketAddr::V4(socket_addr) => socket_addr.serialize(serializer), SocketAddr::V6(socket_addr) => socket_addr.serialize(serializer), } } } impl Deserialize for ArchivedSocketAddr { fn deserialize( &self, deserializer: &mut D, ) -> Result { match self { ArchivedSocketAddr::V4(socket_addr) => { Ok(SocketAddr::V4(socket_addr.deserialize(deserializer)?)) } ArchivedSocketAddr::V6(socket_addr) => { Ok(SocketAddr::V6(socket_addr.deserialize(deserializer)?)) } } } } impl PartialEq for ArchivedSocketAddr { #[inline] fn eq(&self, other: &SocketAddr) -> bool { self.as_socket_addr().eq(other) } } impl PartialEq for SocketAddr { #[inline] fn eq(&self, other: &ArchivedSocketAddr) -> bool { other.eq(self) } } impl PartialOrd for ArchivedSocketAddr { #[inline] fn partial_cmp(&self, other: &SocketAddr) -> Option { self.as_socket_addr().partial_cmp(other) } } impl PartialOrd for SocketAddr { #[inline] fn partial_cmp(&self, other: &ArchivedSocketAddr) -> Option { other.partial_cmp(self) } } #[cfg(test)] mod tests { use core::net::{ IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, }; use crate::api::test::roundtrip; #[test] fn roundtrip_ipv4_addr() { roundtrip(&Ipv4Addr::new(31, 41, 59, 26)); } #[test] fn roundtrip_ipv6_addr() { roundtrip(&Ipv6Addr::new(31, 41, 59, 26, 53, 58, 97, 93)); } #[test] fn roundtrip_ip_addr() { roundtrip(&IpAddr::V4(Ipv4Addr::new(31, 41, 59, 26))); roundtrip(&IpAddr::V6(Ipv6Addr::new(31, 41, 59, 26, 53, 58, 97, 93))); } #[test] fn roundtrip_socket_addr_v4() { roundtrip(&SocketAddrV4::new(Ipv4Addr::new(31, 41, 59, 26), 5358)); } #[test] fn roundtrip_socket_addr_v6() { roundtrip(&SocketAddrV6::new( Ipv6Addr::new(31, 31, 59, 26, 53, 58, 97, 93), 2384, 0, 0, )); } #[test] fn roundtrip_socket_addr() { roundtrip(&SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::new(31, 41, 59, 26), 5358, ))); roundtrip(&SocketAddr::V6(SocketAddrV6::new( Ipv6Addr::new(31, 31, 59, 26, 53, 58, 97, 93), 2384, 0, 0, ))); } } rkyv-0.8.9/src/impls/core/ops.rs000064400000000000000000000271101046102023000146500ustar 00000000000000use core::{ hint::unreachable_unchecked, ops::{ Bound, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, }, }; use munge::munge; use rancor::Fallible; use crate::{ ops::{ ArchivedBound, ArchivedRange, ArchivedRangeFrom, ArchivedRangeFull, ArchivedRangeInclusive, ArchivedRangeTo, ArchivedRangeToInclusive, }, traits::{CopyOptimization, NoUndef}, Archive, Deserialize, Place, Serialize, }; // RangeFull impl Archive for RangeFull { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable() }; type Archived = ArchivedRangeFull; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, _: Place) {} } impl Serialize for RangeFull { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for ArchivedRangeFull { fn deserialize(&self, _: &mut D) -> Result { Ok(RangeFull) } } impl PartialEq for ArchivedRangeFull { fn eq(&self, _: &RangeFull) -> bool { true } } // Range impl Archive for Range { type Archived = ArchivedRange; type Resolver = Range; fn resolve(&self, resolver: Self::Resolver, out: Place) { munge!(let ArchivedRange { start, end } = out); self.start.resolve(resolver.start, start); self.end.resolve(resolver.end, end); } } impl, S: Fallible + ?Sized> Serialize for Range { fn serialize( &self, serializer: &mut S, ) -> Result { Ok(Range { start: self.start.serialize(serializer)?, end: self.end.serialize(serializer)?, }) } } impl Deserialize, D> for ArchivedRange where T: Archive, T::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { Ok(Range { start: self.start.deserialize(deserializer)?, end: self.end.deserialize(deserializer)?, }) } } impl> PartialEq> for ArchivedRange { fn eq(&self, other: &Range) -> bool { self.start.eq(&other.start) && self.end.eq(&other.end) } } // RangeInclusive impl Archive for RangeInclusive { type Archived = ArchivedRangeInclusive; type Resolver = Range; fn resolve(&self, resolver: Self::Resolver, out: Place) { munge!(let ArchivedRangeInclusive { start, end } = out); self.start().resolve(resolver.start, start); self.end().resolve(resolver.end, end); } } impl, S: Fallible + ?Sized> Serialize for RangeInclusive { fn serialize( &self, serializer: &mut S, ) -> Result { Ok(Range { start: self.start().serialize(serializer)?, end: self.end().serialize(serializer)?, }) } } impl Deserialize, D> for ArchivedRangeInclusive where T: Archive, T::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { Ok(RangeInclusive::new( self.start.deserialize(deserializer)?, self.end.deserialize(deserializer)?, )) } } impl PartialEq> for ArchivedRangeInclusive where U: PartialEq, { fn eq(&self, other: &RangeInclusive) -> bool { self.start.eq(other.start()) && self.end.eq(other.end()) } } // RangeFrom impl Archive for RangeFrom { type Archived = ArchivedRangeFrom; type Resolver = RangeFrom; fn resolve(&self, resolver: Self::Resolver, out: Place) { munge!(let ArchivedRangeFrom { start } = out); self.start.resolve(resolver.start, start); } } impl, S: Fallible + ?Sized> Serialize for RangeFrom { fn serialize( &self, serializer: &mut S, ) -> Result { Ok(RangeFrom { start: self.start.serialize(serializer)?, }) } } impl Deserialize, D> for ArchivedRangeFrom where T: Archive, D: Fallible + ?Sized, T::Archived: Deserialize, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { Ok(RangeFrom { start: self.start.deserialize(deserializer)?, }) } } impl> PartialEq> for ArchivedRangeFrom { fn eq(&self, other: &RangeFrom) -> bool { self.start.eq(&other.start) } } // RangeTo impl Archive for RangeTo { type Archived = ArchivedRangeTo; type Resolver = RangeTo; fn resolve(&self, resolver: Self::Resolver, out: Place) { munge!(let ArchivedRangeTo { end } = out); self.end.resolve(resolver.end, end); } } impl, S: Fallible + ?Sized> Serialize for RangeTo { fn serialize( &self, serializer: &mut S, ) -> Result { Ok(RangeTo { end: self.end.serialize(serializer)?, }) } } impl Deserialize, D> for ArchivedRangeTo where T: Archive, D: Fallible + ?Sized, T::Archived: Deserialize, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { Ok(RangeTo { end: self.end.deserialize(deserializer)?, }) } } impl> PartialEq> for ArchivedRangeTo { fn eq(&self, other: &RangeTo) -> bool { self.end.eq(&other.end) } } // RangeToInclusive impl Archive for RangeToInclusive { type Archived = ArchivedRangeToInclusive; type Resolver = RangeToInclusive; fn resolve(&self, resolver: Self::Resolver, out: Place) { munge!(let ArchivedRangeToInclusive { end } = out); self.end.resolve(resolver.end, end); } } impl Serialize for RangeToInclusive where T: Serialize, S: Fallible + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { Ok(RangeToInclusive { end: self.end.serialize(serializer)?, }) } } impl Deserialize, D> for ArchivedRangeToInclusive where T: Archive, T::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { Ok(RangeToInclusive { end: self.end.deserialize(deserializer)?, }) } } impl PartialEq> for ArchivedRangeToInclusive where U: PartialEq, { fn eq(&self, other: &RangeToInclusive) -> bool { self.end.eq(&other.end) } } // Bound #[allow(dead_code)] #[repr(u8)] enum ArchivedBoundTag { Included, Excluded, Unbounded, } // SAFETY: `ArchivedBoundTag` is `repr(u8)` and so always consists of a single // well-defined byte. unsafe impl NoUndef for ArchivedBoundTag {} #[repr(C)] struct ArchivedBoundVariantIncluded(ArchivedBoundTag, T); #[repr(C)] struct ArchivedBoundVariantExcluded(ArchivedBoundTag, T); #[repr(C)] struct ArchivedBoundVariantUnbounded(ArchivedBoundTag); impl Archive for Bound { type Archived = ArchivedBound; type Resolver = Bound; fn resolve(&self, resolver: Self::Resolver, out: Place) { match resolver { Bound::Included(resolver) => { let out = unsafe { out.cast_unchecked::< ArchivedBoundVariantIncluded >() }; munge!(let ArchivedBoundVariantIncluded(tag, out_value) = out); tag.write(ArchivedBoundTag::Included); let value = if let Bound::Included(value) = self.as_ref() { value } else { unsafe { unreachable_unchecked(); } }; value.resolve(resolver, out_value); } Bound::Excluded(resolver) => { let out = unsafe { out.cast_unchecked::< ArchivedBoundVariantExcluded >() }; munge!(let ArchivedBoundVariantExcluded(tag, out_value) = out); tag.write(ArchivedBoundTag::Excluded); let value = if let Bound::Excluded(value) = self.as_ref() { value } else { unsafe { unreachable_unchecked(); } }; value.resolve(resolver, out_value); } Bound::Unbounded => { let out = unsafe { out.cast_unchecked::() }; munge!(let ArchivedBoundVariantUnbounded(tag) = out); tag.write(ArchivedBoundTag::Unbounded); } } } } impl, S: Fallible + ?Sized> Serialize for Bound { fn serialize( &self, serializer: &mut S, ) -> Result { match self.as_ref() { Bound::Included(x) => x.serialize(serializer).map(Bound::Included), Bound::Excluded(x) => x.serialize(serializer).map(Bound::Excluded), Bound::Unbounded => Ok(Bound::Unbounded), } } } impl Deserialize, D> for ArchivedBound where T: Archive, T::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result, ::Error> { Ok(match self { ArchivedBound::Included(value) => { Bound::Included(value.deserialize(deserializer)?) } ArchivedBound::Excluded(value) => { Bound::Excluded(value.deserialize(deserializer)?) } ArchivedBound::Unbounded => Bound::Unbounded, }) } } impl PartialEq> for ArchivedBound where U: PartialEq, { fn eq(&self, other: &Bound) -> bool { match (self, other) { (ArchivedBound::Included(this), Bound::Included(other)) | (ArchivedBound::Excluded(this), Bound::Excluded(other)) => { this.eq(other) } (ArchivedBound::Unbounded, Bound::Unbounded) => true, _ => false, } } } #[cfg(test)] mod tests { use core::ops::Bound; use crate::api::test::roundtrip; #[test] fn roundtrip_ranges() { roundtrip(&..); roundtrip(&(0u8..100u8)); roundtrip(&(0u8..=100u8)); roundtrip(&(0u8..)); roundtrip(&(..100u8)); roundtrip(&(..=100u8)); } #[test] fn roundtrip_bound() { roundtrip(&Bound::Included(100u8)); roundtrip(&Bound::Excluded(100u8)); roundtrip(&Bound::::Unbounded); } } rkyv-0.8.9/src/impls/core/option.rs000064400000000000000000000050621046102023000153610ustar 00000000000000use core::hint::unreachable_unchecked; use munge::munge; use rancor::Fallible; use crate::{ option::ArchivedOption, traits::NoUndef, Archive, Deserialize, Place, Serialize, }; #[allow(dead_code)] #[repr(u8)] enum ArchivedOptionTag { None, Some, } // SAFETY: `ArchivedOptionTag` is `repr(u8)` and so always consists of a single // well-defined byte. unsafe impl NoUndef for ArchivedOptionTag {} #[repr(C)] struct ArchivedOptionVariantNone(ArchivedOptionTag); #[repr(C)] struct ArchivedOptionVariantSome(ArchivedOptionTag, T); impl Archive for Option { type Archived = ArchivedOption; type Resolver = Option; fn resolve(&self, resolver: Self::Resolver, out: Place) { match resolver { None => { let out = unsafe { out.cast_unchecked::() }; munge!(let ArchivedOptionVariantNone(tag) = out); tag.write(ArchivedOptionTag::None); } Some(resolver) => { let out = unsafe { out .cast_unchecked::>() }; munge!(let ArchivedOptionVariantSome(tag, out_value) = out); tag.write(ArchivedOptionTag::Some); let value = if let Some(value) = self.as_ref() { value } else { unsafe { unreachable_unchecked(); } }; value.resolve(resolver, out_value); } } } } impl, S: Fallible + ?Sized> Serialize for Option { fn serialize( &self, serializer: &mut S, ) -> Result { self.as_ref() .map(|value| value.serialize(serializer)) .transpose() } } impl Deserialize, D> for ArchivedOption where T: Archive, T::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { Ok(match self { ArchivedOption::Some(value) => { Some(value.deserialize(deserializer)?) } ArchivedOption::None => None, }) } } #[cfg(test)] mod tests { use crate::api::test::roundtrip; #[test] fn roundtrip_option() { roundtrip(&Option::<()>::None); roundtrip(&Some(42)); } } rkyv-0.8.9/src/impls/core/primitive.rs000064400000000000000000000312151046102023000160600ustar 00000000000000use core::num::{ NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, }; use rancor::Fallible; use crate::{ primitive::{ ArchivedChar, ArchivedF32, ArchivedF64, ArchivedI128, ArchivedI16, ArchivedI32, ArchivedI64, ArchivedIsize, ArchivedNonZeroI128, ArchivedNonZeroI16, ArchivedNonZeroI32, ArchivedNonZeroI64, ArchivedNonZeroIsize, ArchivedNonZeroU128, ArchivedNonZeroU16, ArchivedNonZeroU32, ArchivedNonZeroU64, ArchivedNonZeroUsize, ArchivedU128, ArchivedU16, ArchivedU32, ArchivedU64, ArchivedUsize, }, traits::{CopyOptimization, NoUndef}, Archive, Deserialize, Place, Portable, Serialize, }; macro_rules! unsafe_impl_primitive { ($($ty:ty),* $(,)?) => { $( unsafe impl NoUndef for $ty {} unsafe impl Portable for $ty {} )* }; } unsafe_impl_primitive! { (), bool, i8, u8, NonZeroI8, NonZeroU8, rend::NonZeroI16_be, rend::NonZeroI16_le, rend::NonZeroI32_be, rend::NonZeroI32_le, rend::NonZeroI64_be, rend::NonZeroI64_le, rend::NonZeroI128_be, rend::NonZeroI128_le, rend::NonZeroU16_be, rend::NonZeroU16_le, rend::NonZeroU32_be, rend::NonZeroU32_le, rend::NonZeroU64_be, rend::NonZeroU64_le, rend::NonZeroU128_be, rend::NonZeroU128_le, rend::char_be, rend::char_le, rend::f32_be, rend::f32_le, rend::f64_be, rend::f64_le, rend::i16_be, rend::i16_le, rend::i32_be, rend::i32_le, rend::i64_be, rend::i64_le, rend::i128_be, rend::i128_le, rend::u16_be, rend::u16_le, rend::u32_be, rend::u32_le, rend::u64_be, rend::u64_le, rend::u128_be, rend::u128_le, rend::unaligned::NonZeroI16_ube, rend::unaligned::NonZeroI16_ule, rend::unaligned::NonZeroI32_ube, rend::unaligned::NonZeroI32_ule, rend::unaligned::NonZeroI64_ube, rend::unaligned::NonZeroI64_ule, rend::unaligned::NonZeroI128_ube, rend::unaligned::NonZeroI128_ule, rend::unaligned::NonZeroU16_ube, rend::unaligned::NonZeroU16_ule, rend::unaligned::NonZeroU32_ube, rend::unaligned::NonZeroU32_ule, rend::unaligned::NonZeroU64_ube, rend::unaligned::NonZeroU64_ule, rend::unaligned::NonZeroU128_ube, rend::unaligned::NonZeroU128_ule, rend::unaligned::char_ube, rend::unaligned::char_ule, rend::unaligned::f32_ube, rend::unaligned::f32_ule, rend::unaligned::f64_ube, rend::unaligned::f64_ule, rend::unaligned::i16_ube, rend::unaligned::i16_ule, rend::unaligned::i32_ube, rend::unaligned::i32_ule, rend::unaligned::i64_ube, rend::unaligned::i64_ule, rend::unaligned::i128_ube, rend::unaligned::i128_ule, rend::unaligned::u16_ube, rend::unaligned::u16_ule, rend::unaligned::u32_ube, rend::unaligned::u32_ule, rend::unaligned::u64_ube, rend::unaligned::u64_ule, rend::unaligned::u128_ube, rend::unaligned::u128_ule, } macro_rules! impl_serialize_noop { ($type:ty) => { impl Serialize for $type { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } }; } macro_rules! impl_archive_self_primitive { ($type:ty) => { impl Archive for $type { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable() }; type Archived = Self; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { out.write(*self); } } impl_serialize_noop!($type); impl Deserialize<$type, D> for $type { fn deserialize(&self, _: &mut D) -> Result<$type, D::Error> { Ok(*self) } } }; } macro_rules! impl_archive_self_primitives { ($($type:ty;)*) => { $( impl_archive_self_primitive!($type); )* } } impl_archive_self_primitives! { (); bool; i8; u8; NonZeroI8; NonZeroU8; } #[cfg(any( all(not(feature = "big_endian"), target_endian = "little"), all(feature = "big_endian", target_endian = "big"), ))] const MULTIBYTE_PRIMITIVES_ARE_TRIVIALLY_COPYABLE: bool = true; #[cfg(any( all(feature = "big_endian", target_endian = "little"), all(not(feature = "big_endian"), target_endian = "big"), ))] const MULTIBYTE_PRIMITIVES_ARE_TRIVIALLY_COPYABLE: bool = false; macro_rules! impl_multibyte_primitive { ($archived:ident : $type:ty) => { impl Archive for $type { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable_if( MULTIBYTE_PRIMITIVES_ARE_TRIVIALLY_COPYABLE, ) }; type Archived = $archived; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { out.write(<$archived>::from_native(*self)); } } impl_serialize_noop!($type); impl Deserialize<$type, D> for $archived { fn deserialize(&self, _: &mut D) -> Result<$type, D::Error> { Ok(self.to_native()) } } }; } macro_rules! impl_multibyte_primitives { ($($archived:ident: $type:ty),* $(,)?) => { $( impl_multibyte_primitive!($archived: $type); )* }; } impl_multibyte_primitives! { ArchivedI16: i16, ArchivedI32: i32, ArchivedI64: i64, ArchivedI128: i128, ArchivedU16: u16, ArchivedU32: u32, ArchivedU64: u64, ArchivedU128: u128, ArchivedF32: f32, ArchivedF64: f64, ArchivedChar: char, ArchivedNonZeroI16: NonZeroI16, ArchivedNonZeroI32: NonZeroI32, ArchivedNonZeroI64: NonZeroI64, ArchivedNonZeroI128: NonZeroI128, ArchivedNonZeroU16: NonZeroU16, ArchivedNonZeroU32: NonZeroU32, ArchivedNonZeroU64: NonZeroU64, ArchivedNonZeroU128: NonZeroU128, } // usize #[cfg(any( all(target_pointer_width = "16", feature = "pointer_width_16"), all( target_pointer_width = "32", not(any(feature = "pointer_width_16", feature = "pointer_width_64")), ), all(target_pointer_width = "64", feature = "pointer_width_64"), ))] const POINTER_WIDTH_EQUALS_ARCHIVED_POINTER_WIDTH: bool = true; #[cfg(not(any( all(target_pointer_width = "16", feature = "pointer_width_16"), all( target_pointer_width = "32", not(any(feature = "pointer_width_16", feature = "pointer_width_64")), ), all(target_pointer_width = "64", feature = "pointer_width_64"), )))] const POINTER_WIDTH_EQUALS_ARCHIVED_POINTER_WIDTH: bool = false; impl Archive for usize { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable_if( MULTIBYTE_PRIMITIVES_ARE_TRIVIALLY_COPYABLE && POINTER_WIDTH_EQUALS_ARCHIVED_POINTER_WIDTH, ) }; type Archived = ArchivedUsize; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { out.write(ArchivedUsize::from_native(*self as _)); } } impl Serialize for usize { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for ArchivedUsize { fn deserialize(&self, _: &mut D) -> Result { Ok(self.to_native() as usize) } } // isize impl Archive for isize { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable_if( MULTIBYTE_PRIMITIVES_ARE_TRIVIALLY_COPYABLE && POINTER_WIDTH_EQUALS_ARCHIVED_POINTER_WIDTH, ) }; type Archived = ArchivedIsize; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { out.write(ArchivedIsize::from_native(*self as _)); } } impl Serialize for isize { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for ArchivedIsize { fn deserialize(&self, _: &mut D) -> Result { Ok(self.to_native() as isize) } } // NonZeroUsize impl Archive for NonZeroUsize { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable_if( MULTIBYTE_PRIMITIVES_ARE_TRIVIALLY_COPYABLE && POINTER_WIDTH_EQUALS_ARCHIVED_POINTER_WIDTH, ) }; type Archived = ArchivedNonZeroUsize; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { let value = unsafe { ArchivedNonZeroUsize::new_unchecked(self.get() as _) }; out.write(value); } } impl Serialize for NonZeroUsize { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for ArchivedNonZeroUsize where D: Fallible + ?Sized, { fn deserialize(&self, _: &mut D) -> Result { Ok(unsafe { NonZeroUsize::new_unchecked(self.get() as usize) }) } } // NonZeroIsize impl Archive for NonZeroIsize { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable_if( MULTIBYTE_PRIMITIVES_ARE_TRIVIALLY_COPYABLE && POINTER_WIDTH_EQUALS_ARCHIVED_POINTER_WIDTH, ) }; type Archived = ArchivedNonZeroIsize; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { let value = unsafe { ArchivedNonZeroIsize::new_unchecked(self.get() as _) }; out.write(value); } } impl Serialize for NonZeroIsize { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for ArchivedNonZeroIsize where D: Fallible + ?Sized, { fn deserialize(&self, _: &mut D) -> Result { Ok(unsafe { NonZeroIsize::new_unchecked(self.get() as isize) }) } } #[cfg(test)] mod tests { use core::num::{ NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, }; use crate::api::test::{roundtrip, roundtrip_with}; #[test] fn roundtrip_portable_primitives() { roundtrip(&()); roundtrip(&true); roundtrip(&false); roundtrip(&123i8); roundtrip(&123u8); roundtrip(&NonZeroI8::new(123i8).unwrap()); roundtrip(&NonZeroU8::new(123u8).unwrap()); } #[test] fn roundtrip_multibyte_primitives() { roundtrip(&12345i16); roundtrip(&1234567890i32); roundtrip(&1234567890123456789i64); roundtrip(&123456789012345678901234567890123456789i128); roundtrip(&12345u16); roundtrip(&1234567890u32); roundtrip(&12345678901234567890u64); roundtrip(&123456789012345678901234567890123456789u128); roundtrip(&1234567f32); roundtrip(&12345678901234f64); roundtrip(&'x'); roundtrip(&'🥺'); roundtrip(&NonZeroI16::new(12345i16).unwrap()); roundtrip(&NonZeroI32::new(1234567890i32).unwrap()); roundtrip(&NonZeroI64::new(1234567890123456789i64).unwrap()); roundtrip( &NonZeroI128::new(123456789012345678901234567890123456789i128) .unwrap(), ); roundtrip(&NonZeroU16::new(12345u16).unwrap()); roundtrip(&NonZeroU32::new(1234567890u32).unwrap()); roundtrip(&NonZeroU64::new(12345678901234567890u64).unwrap()); roundtrip( &NonZeroU128::new(123456789012345678901234567890123456789u128) .unwrap(), ); } #[test] fn roundtrip_sizes() { roundtrip_with(&12345isize, |a, b| { assert_eq!(*a, isize::try_from(b.to_native()).unwrap()) }); roundtrip_with(&12345usize, |a, b| { assert_eq!(*a, usize::try_from(b.to_native()).unwrap()) }); roundtrip_with(&NonZeroIsize::new(12345isize).unwrap(), |a, b| { assert_eq!(*a, NonZeroIsize::try_from(b.to_native()).unwrap()) }); roundtrip_with(&NonZeroUsize::new(12345usize).unwrap(), |a, b| { assert_eq!(*a, NonZeroUsize::try_from(b.to_native()).unwrap()) }); } } rkyv-0.8.9/src/impls/core/result.rs000064400000000000000000000057451046102023000153770ustar 00000000000000use core::hint::unreachable_unchecked; use munge::munge; use rancor::Fallible; use crate::{ result::ArchivedResult, traits::NoUndef, Archive, Deserialize, Place, Serialize, }; #[allow(dead_code)] #[repr(u8)] enum ArchivedResultTag { Ok, Err, } // SAFETY: `ArchivedResultTag` is `repr(u8)` and so always consists of a single // well-defined byte. unsafe impl NoUndef for ArchivedResultTag {} #[repr(C)] struct ArchivedResultVariantOk(ArchivedResultTag, T); #[repr(C)] struct ArchivedResultVariantErr(ArchivedResultTag, U); impl Archive for Result { type Archived = ArchivedResult; type Resolver = Result; fn resolve(&self, resolver: Self::Resolver, out: Place) { match resolver { Ok(resolver) => { let out = unsafe { out.cast_unchecked::>() }; munge!(let ArchivedResultVariantOk(tag, out_value) = out); tag.write(ArchivedResultTag::Ok); match self.as_ref() { Ok(value) => value.resolve(resolver, out_value), Err(_) => unsafe { unreachable_unchecked() }, } } Err(resolver) => { let out = unsafe { out.cast_unchecked::>( ) }; munge!(let ArchivedResultVariantErr(tag, out_err) = out); tag.write(ArchivedResultTag::Err); match self.as_ref() { Ok(_) => unsafe { unreachable_unchecked() }, Err(err) => err.resolve(resolver, out_err), } } } } } impl Serialize for Result where T: Serialize, U: Serialize, S: Fallible + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { Ok(match self.as_ref() { Ok(value) => Ok(value.serialize(serializer)?), Err(value) => Err(value.serialize(serializer)?), }) } } impl Deserialize, D> for ArchivedResult where T: Archive, U: Archive, D: Fallible + ?Sized, T::Archived: Deserialize, U::Archived: Deserialize, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { match self { ArchivedResult::Ok(value) => { Ok(Ok(value.deserialize(deserializer)?)) } ArchivedResult::Err(err) => Ok(Err(err.deserialize(deserializer)?)), } } } #[cfg(test)] mod tests { use crate::api::test::roundtrip; #[test] fn roundtrip_result() { roundtrip(&Result::::Ok(12345i32)); roundtrip(&Result::::Err(12345u32)); } } rkyv-0.8.9/src/impls/core/time.rs000064400000000000000000000044161046102023000150110ustar 00000000000000use core::time::Duration; use rancor::Fallible; use crate::{time::ArchivedDuration, Archive, Deserialize, Place, Serialize}; impl Archive for Duration { type Archived = ArchivedDuration; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { unsafe { ArchivedDuration::emplace( self.as_secs(), self.subsec_nanos(), out.ptr(), ); } } } impl Serialize for Duration { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for ArchivedDuration { fn deserialize(&self, _: &mut D) -> Result { Ok(Duration::new(self.as_secs(), self.subsec_nanos())) } } impl PartialEq for ArchivedDuration { #[inline] fn eq(&self, other: &Duration) -> bool { self.as_nanos() == other.as_nanos() && self.as_secs() == other.as_secs() } } impl PartialEq for Duration { #[inline] fn eq(&self, other: &ArchivedDuration) -> bool { other.eq(self) } } impl From for Duration { #[inline] fn from(duration: ArchivedDuration) -> Self { Self::new(duration.as_secs(), duration.subsec_nanos()) } } #[cfg(test)] mod tests { use core::time::Duration; use crate::api::test::roundtrip; #[test] fn roundtrip_duration() { roundtrip(&Duration::new(1234, 5678)); } // Synthetic buffer is for 32-bit little-endian #[cfg(all( not(feature = "pointer_width_16"), not(feature = "pointer_width_64"), not(feature = "big_endian"), feature = "bytecheck", ))] #[test] fn invalid_duration() { use rancor::Failure; use crate::{api::low::from_bytes, util::Align}; // This buffer is invalid because `nanos` is equal to 1 billion (nanos // may not be one billion or more) let data = Align([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // secs 0x00, 0xca, 0x9a, 0x3b, // nanos 0x00, 0x00, 0x00, 0x00, // padding ]); from_bytes::(&*data).unwrap_err(); } } rkyv-0.8.9/src/impls/core/with/atomic/_macros.rs000064400000000000000000000007741046102023000177300ustar 00000000000000macro_rules! impl_serialize_with_atomic_load { ($atomic:ty) => { impl $crate::with::SerializeWith<$atomic, S> for $crate::with::AtomicLoad where S: $crate::rancor::Fallible + ?Sized, SO: $crate::impls::core::with::atomic::LoadOrdering, { fn serialize_with( _: &$atomic, _: &mut S, ) -> Result { Ok(()) } } }; } rkyv-0.8.9/src/impls/core/with/atomic/mod.rs000064400000000000000000000035451046102023000170630ustar 00000000000000#[macro_use] mod _macros; #[cfg(any( target_has_atomic = "16", target_has_atomic = "32", target_has_atomic = "64", ))] mod multibyte; use core::sync::atomic::Ordering; #[cfg(target_has_atomic = "8")] use core::sync::atomic::{AtomicBool, AtomicI8, AtomicU8}; use rancor::Fallible; use crate::{ with::{ Acquire, ArchiveWith, AtomicLoad, DeserializeWith, Relaxed, SeqCst, }, Place, }; trait LoadOrdering { const ORDERING: Ordering; } impl LoadOrdering for Relaxed { const ORDERING: Ordering = Ordering::Relaxed; } impl LoadOrdering for Acquire { const ORDERING: Ordering = Ordering::Acquire; } impl LoadOrdering for SeqCst { const ORDERING: Ordering = Ordering::SeqCst; } macro_rules! impl_single_byte_atomic { ($atomic:ty, $non_atomic:ty) => { impl ArchiveWith<$atomic> for AtomicLoad { type Archived = $non_atomic; type Resolver = (); fn resolve_with( field: &$atomic, _: Self::Resolver, out: Place, ) { out.write(field.load(SO::ORDERING)); } } impl_serialize_with_atomic_load!($atomic); impl DeserializeWith<$non_atomic, $atomic, D> for AtomicLoad where D: Fallible + ?Sized, { fn deserialize_with( field: &$non_atomic, _: &mut D, ) -> Result<$atomic, D::Error> { Ok(<$atomic>::new(*field)) } } }; } macro_rules! impl_single_byte_atomics { ($($atomic:ty, $non_atomic:ty);* $(;)?) => { $( impl_single_byte_atomic!($atomic, $non_atomic); )* } } #[cfg(target_has_atomic = "8")] impl_single_byte_atomics!( AtomicBool, bool; AtomicI8, i8; AtomicU8, u8; ); rkyv-0.8.9/src/impls/core/with/atomic/multibyte.rs000064400000000000000000000115641046102023000203220ustar 00000000000000use crate::{ impls::core::with::atomic::LoadOrdering, rancor::Fallible, with::{ArchiveWith, AtomicLoad, DeserializeWith}, Place, }; macro_rules! impl_multi_byte_atomic { ($atomic:ty, $archived:ty) => { impl ArchiveWith<$atomic> for AtomicLoad { type Archived = $archived; type Resolver = (); fn resolve_with( field: &$atomic, _: Self::Resolver, out: Place, ) { out.write(<$archived>::from_native(field.load(SO::ORDERING))); } } impl_serialize_with_atomic_load!($atomic); impl DeserializeWith<$archived, $atomic, D> for AtomicLoad where D: Fallible + ?Sized, { fn deserialize_with( field: &$archived, _: &mut D, ) -> Result<$atomic, D::Error> { Ok(<$atomic>::new(field.to_native())) } } }; } macro_rules! impl_multi_byte_atomics { ($($atomic:ty, $archived: ty);* $(;)?) => { $( impl_multi_byte_atomic!($atomic, $archived); )* } } #[cfg(target_has_atomic = "16")] impl_multi_byte_atomics! { core::sync::atomic::AtomicI16, crate::primitive::ArchivedI16; core::sync::atomic::AtomicU16, crate::primitive::ArchivedU16; rend::AtomicI16_le, rend::i16_le; rend::AtomicI16_be, rend::i16_be; rend::AtomicU16_le, rend::u16_le; rend::AtomicU16_be, rend::u16_be; } #[cfg(target_has_atomic = "32")] impl_multi_byte_atomics! { core::sync::atomic::AtomicI32, crate::primitive::ArchivedI32; core::sync::atomic::AtomicU32, crate::primitive::ArchivedU32; rend::AtomicI32_le, crate::primitive::ArchivedI32; rend::AtomicI32_be, crate::primitive::ArchivedI32; rend::AtomicU32_le, crate::primitive::ArchivedU32; rend::AtomicU32_be, crate::primitive::ArchivedU32; } #[cfg(target_has_atomic = "64")] impl_multi_byte_atomics! { core::sync::atomic::AtomicI64, crate::primitive::ArchivedI64; core::sync::atomic::AtomicU64, crate::primitive::ArchivedU64; rend::AtomicI64_le, crate::primitive::ArchivedI64; rend::AtomicI64_be, crate::primitive::ArchivedI64; rend::AtomicU64_le, crate::primitive::ArchivedU64; rend::AtomicU64_be, crate::primitive::ArchivedU64; } // AtomicUsize macro_rules! impl_atomic_size_type { ($atomic:ty, $archived:ty) => { impl ArchiveWith<$atomic> for AtomicLoad { type Archived = $archived; type Resolver = (); fn resolve_with( field: &$atomic, _: Self::Resolver, out: Place, ) { out.write(<$archived>::from_native( field.load(SO::ORDERING) as _ )); } } impl_serialize_with_atomic_load!($atomic); impl DeserializeWith<$archived, $atomic, D> for AtomicLoad where D: Fallible + ?Sized, { fn deserialize_with( field: &$archived, _: &mut D, ) -> Result<$atomic, D::Error> { Ok(<$atomic>::new(field.to_native() as _)) } } }; } macro_rules! impl_atomic_size_types { ($($atomic:ty, $archived:ty);* $(;)?) => { $( impl_atomic_size_type!($atomic, $archived); )* } } #[cfg(any( all(target_has_atomic = "16", feature = "pointer_width_16"), all( target_has_atomic = "32", not(any(feature = "pointer_width_16", feature = "pointer_width_64")), ), all(target_has_atomic = "64", feature = "pointer_width_64"), ))] impl_atomic_size_types! { core::sync::atomic::AtomicIsize, crate::primitive::ArchivedIsize; core::sync::atomic::AtomicUsize, crate::primitive::ArchivedUsize; } #[cfg(test)] mod tests { #[cfg(target_has_atomic = "32")] #[test] fn with_atomic_load() { use core::sync::atomic::{AtomicU32, Ordering}; use crate::{ api::test::roundtrip, with::{AtomicLoad, Relaxed}, Archive, Deserialize, Serialize, }; #[derive(Archive, Debug, Deserialize, Serialize)] #[rkyv(crate, derive(Debug))] struct Test { #[rkyv(with = AtomicLoad)] a: AtomicU32, } impl PartialEq for Test { fn eq(&self, other: &Self) -> bool { self.a.load(Ordering::Relaxed) == other.a.load(Ordering::Relaxed) } } impl PartialEq for ArchivedTest { fn eq(&self, other: &Test) -> bool { self.a == other.a.load(Ordering::Relaxed) } } let value = Test { a: AtomicU32::new(42), }; roundtrip(&value); } } rkyv-0.8.9/src/impls/core/with/mod.rs000064400000000000000000000777131046102023000156170ustar 00000000000000#[cfg(any( target_has_atomic = "8", target_has_atomic = "16", target_has_atomic = "32", target_has_atomic = "64", ))] mod atomic; mod niching; use core::{ cell::{Cell, UnsafeCell}, hash::{Hash, Hasher}, hint::unreachable_unchecked, marker::PhantomData, num::{ NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, }, }; use munge::munge; use rancor::Fallible; use crate::{ boxed::{ArchivedBox, BoxResolver}, niche::{ niched_option::NichedOption, niching::{DefaultNiche, Niching}, option_nonzero::{ ArchivedOptionNonZeroI128, ArchivedOptionNonZeroI16, ArchivedOptionNonZeroI32, ArchivedOptionNonZeroI64, ArchivedOptionNonZeroI8, ArchivedOptionNonZeroIsize, ArchivedOptionNonZeroU128, ArchivedOptionNonZeroU16, ArchivedOptionNonZeroU32, ArchivedOptionNonZeroU64, ArchivedOptionNonZeroU8, ArchivedOptionNonZeroUsize, }, }, option::ArchivedOption, primitive::{FixedNonZeroIsize, FixedNonZeroUsize}, traits::NoUndef, with::{ ArchiveWith, AsBox, DeserializeWith, Identity, Inline, InlineAsBox, Map, MapNiche, Niche, NicheInto, SerializeWith, Skip, Unsafe, }, Archive, ArchiveUnsized, Deserialize, Place, Serialize, SerializeUnsized, }; // Wrapper for O so that we have an Archive and Serialize implementation // and ArchivedVec::serialize_from_* is happy about the bound // constraints pub struct RefWrapper<'o, A, O>(pub &'o O, pub PhantomData); impl, O> Archive for RefWrapper<'_, A, O> { type Archived = >::Archived; type Resolver = >::Resolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { A::resolve_with(self.0, resolver, out) } } impl Serialize for RefWrapper<'_, A, O> where A: ArchiveWith + SerializeWith, S: Fallible + ?Sized, { fn serialize(&self, s: &mut S) -> Result { A::serialize_with(self.0, s) } } impl Hash for RefWrapper<'_, A, O> { fn hash(&self, state: &mut H) { self.0.hash(state) } } impl PartialEq for RefWrapper<'_, A, O> { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } impl Eq for RefWrapper<'_, A, O> {} // InlineAsBox impl ArchiveWith<&F> for InlineAsBox { type Archived = ArchivedBox; type Resolver = BoxResolver; fn resolve_with( field: &&F, resolver: Self::Resolver, out: Place, ) { ArchivedBox::resolve_from_ref(*field, resolver, out); } } impl SerializeWith<&F, S> for InlineAsBox where F: SerializeUnsized + ?Sized, S: Fallible + ?Sized, { fn serialize_with( field: &&F, serializer: &mut S, ) -> Result { ArchivedBox::serialize_from_ref(*field, serializer) } } // AsBox impl ArchiveWith for AsBox { type Archived = ArchivedBox; type Resolver = BoxResolver; fn resolve_with( field: &F, resolver: Self::Resolver, out: Place, ) { ArchivedBox::resolve_from_ref(field, resolver, out); } } impl SerializeWith for AsBox where F: SerializeUnsized + ?Sized, S: Fallible + ?Sized, { fn serialize_with( field: &F, serializer: &mut S, ) -> Result { ArchivedBox::serialize_from_ref(field, serializer) } } impl DeserializeWith, F, D> for AsBox where F: Archive, F::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedBox, deserializer: &mut D, ) -> Result { field.get().deserialize(deserializer) } } // Map // Copy-paste from Option's impls for the most part impl ArchiveWith> for Map where A: ArchiveWith, { type Archived = ArchivedOption<>::Archived>; type Resolver = Option<>::Resolver>; fn resolve_with( field: &Option, resolver: Self::Resolver, out: Place, ) { match resolver { None => { let out = unsafe { out.cast_unchecked::() }; munge!(let ArchivedOptionVariantNone(tag) = out); tag.write(ArchivedOptionTag::None); } Some(resolver) => { let out = unsafe { out.cast_unchecked::>::Archived, >>() }; munge!(let ArchivedOptionVariantSome(tag, out_value) = out); tag.write(ArchivedOptionTag::Some); let value = if let Some(value) = field.as_ref() { value } else { unsafe { unreachable_unchecked(); } }; A::resolve_with(value, resolver, out_value); } } } } impl SerializeWith, S> for Map where S: Fallible + ?Sized, A: ArchiveWith + SerializeWith, { fn serialize_with( field: &Option, s: &mut S, ) -> Result { field .as_ref() .map(|value| A::serialize_with(value, s)) .transpose() } } impl DeserializeWith< ArchivedOption<>::Archived>, Option, D, > for Map where D: Fallible + ?Sized, A: ArchiveWith + DeserializeWith<>::Archived, O, D>, { fn deserialize_with( field: &ArchivedOption<>::Archived>, d: &mut D, ) -> Result, D::Error> { match field { ArchivedOption::Some(value) => { Ok(Some(A::deserialize_with(value, d)?)) } ArchivedOption::None => Ok(None), } } } #[repr(u8)] enum ArchivedOptionTag { None, Some, } // SAFETY: `ArchivedOptionTag` is `repr(u8)` and so always consists of a single // well-defined byte. unsafe impl NoUndef for ArchivedOptionTag {} #[repr(C)] struct ArchivedOptionVariantNone(ArchivedOptionTag); #[repr(C)] struct ArchivedOptionVariantSome(ArchivedOptionTag, T); // Niche macro_rules! impl_nonzero_niche { ($ar:ty, $nz:ty, $ne:ty) => { impl ArchiveWith> for Niche { type Archived = $ar; type Resolver = (); #[inline] fn resolve_with( field: &Option<$nz>, _: Self::Resolver, out: Place, ) { <$ar>::resolve_from_option(*field, out); } } impl SerializeWith, S> for Niche { fn serialize_with( _: &Option<$nz>, _: &mut S, ) -> Result { Ok(()) } } impl DeserializeWith<$ar, Option<$nz>, D> for Niche where D: Fallible + ?Sized, { fn deserialize_with( field: &$ar, _: &mut D, ) -> Result, D::Error> { Ok(field.as_ref().map(|x| (*x).into())) } } }; } impl_nonzero_niche!(ArchivedOptionNonZeroI8, NonZeroI8, i8); impl_nonzero_niche!(ArchivedOptionNonZeroI16, NonZeroI16, i16); impl_nonzero_niche!(ArchivedOptionNonZeroI32, NonZeroI32, i32); impl_nonzero_niche!(ArchivedOptionNonZeroI64, NonZeroI64, i64); impl_nonzero_niche!(ArchivedOptionNonZeroI128, NonZeroI128, i128); impl_nonzero_niche!(ArchivedOptionNonZeroU8, NonZeroU8, u8); impl_nonzero_niche!(ArchivedOptionNonZeroU16, NonZeroU16, u16); impl_nonzero_niche!(ArchivedOptionNonZeroU32, NonZeroU32, u32); impl_nonzero_niche!(ArchivedOptionNonZeroU64, NonZeroU64, u64); impl_nonzero_niche!(ArchivedOptionNonZeroU128, NonZeroU128, u128); impl ArchiveWith> for Niche { type Archived = ArchivedOptionNonZeroIsize; type Resolver = (); #[inline] fn resolve_with( field: &Option, _: Self::Resolver, out: Place, ) { let f = field.as_ref().map(|&x| x.try_into().unwrap()); ArchivedOptionNonZeroIsize::resolve_from_option(f, out); } } impl SerializeWith, S> for Niche { fn serialize_with( _: &Option, _: &mut S, ) -> Result { Ok(()) } } impl DeserializeWith, D> for Niche where D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedOptionNonZeroIsize, _: &mut D, ) -> Result, D::Error> { // This conversion is necessary with archive_be and archive_le #[allow(clippy::useless_conversion)] Ok(field .as_ref() .map(|x| FixedNonZeroIsize::from(*x).try_into().unwrap())) } } impl ArchiveWith> for Niche { type Archived = ArchivedOptionNonZeroUsize; type Resolver = (); #[inline] fn resolve_with( field: &Option, _: Self::Resolver, out: Place, ) { let f = field.as_ref().map(|&x| x.try_into().unwrap()); ArchivedOptionNonZeroUsize::resolve_from_option(f, out); } } impl SerializeWith, S> for Niche { fn serialize_with( _: &Option, _: &mut S, ) -> Result { Ok(()) } } impl DeserializeWith, D> for Niche where D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedOptionNonZeroUsize, _: &mut D, ) -> Result, D::Error> { // This conversion is necessary with archive_be and archive_le #[allow(clippy::useless_conversion)] Ok(field .as_ref() .map(|x| FixedNonZeroUsize::from(*x).try_into().unwrap())) } } // NicheInto impl ArchiveWith> for NicheInto where T: Archive, N: Niching + ?Sized, { type Archived = NichedOption; type Resolver = Option; fn resolve_with( field: &Option, resolver: Self::Resolver, out: Place, ) { NichedOption::::resolve_from_option( field.as_ref(), resolver, out, ); } } impl SerializeWith, S> for NicheInto where T: Serialize, N: Niching + ?Sized, S: Fallible + ?Sized, { fn serialize_with( field: &Option, serializer: &mut S, ) -> Result { NichedOption::::serialize_from_option( field.as_ref(), serializer, ) } } impl DeserializeWith, Option, D> for NicheInto where T: Archive>, N: Niching + ?Sized, D: Fallible + ?Sized, { fn deserialize_with( field: &NichedOption, deserializer: &mut D, ) -> Result, D::Error> { Deserialize::deserialize(field, deserializer) } } impl Deserialize, D> for NichedOption where T: Archive>, N: Niching + ?Sized, D: Fallible + ?Sized, { fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { match self.as_ref() { Some(value) => value.deserialize(deserializer).map(Some), None => Ok(None), } } } // MapNiche impl ArchiveWith> for MapNiche where W: ArchiveWith + ?Sized, N: Niching<>::Archived> + ?Sized, { type Archived = NichedOption<>::Archived, N>; type Resolver = Option<>::Resolver>; fn resolve_with( field: &Option, resolver: Self::Resolver, out: Place, ) { let out = NichedOption::munge_place(out); match field { Some(value) => { let resolver = resolver.expect("non-niched resolver"); W::resolve_with(value, resolver, out); } None => N::resolve_niched(out), } } } impl SerializeWith, S> for MapNiche where W: SerializeWith + ?Sized, N: Niching<>::Archived> + ?Sized, S: Fallible + ?Sized, { fn serialize_with( field: &Option, serializer: &mut S, ) -> Result { match field { Some(value) => W::serialize_with(value, serializer).map(Some), None => Ok(None), } } } impl DeserializeWith< NichedOption<>::Archived, N>, Option, D, > for MapNiche where W: ArchiveWith + DeserializeWith<>::Archived, T, D>, N: Niching<>::Archived> + ?Sized, D: Fallible + ?Sized, { fn deserialize_with( field: &NichedOption<>::Archived, N>, deserializer: &mut D, ) -> Result, D::Error> { field .as_ref() .map(|value| W::deserialize_with(value, deserializer)) .transpose() } } // DefaultNiche impl ArchiveWith> for DefaultNiche where T: Archive, Self: Niching, { type Archived = NichedOption; type Resolver = Option; fn resolve_with( field: &Option, resolver: Self::Resolver, out: Place, ) { NicheInto::::resolve_with(field, resolver, out); } } impl SerializeWith, S> for DefaultNiche where T: Serialize, Self: Niching, S: Fallible + ?Sized, { fn serialize_with( field: &Option, serializer: &mut S, ) -> Result { NicheInto::::serialize_with(field, serializer) } } impl DeserializeWith, Option, D> for DefaultNiche where T: Archive>, Self: Niching, D: Fallible + ?Sized, { fn deserialize_with( field: &NichedOption, deserializer: &mut D, ) -> Result, D::Error> { NicheInto::::deserialize_with(field, deserializer) } } // Inline impl ArchiveWith<&F> for Inline { type Archived = F::Archived; type Resolver = F::Resolver; fn resolve_with( field: &&F, resolver: Self::Resolver, out: Place, ) { field.resolve(resolver, out); } } impl, S: Fallible + ?Sized> SerializeWith<&F, S> for Inline { fn serialize_with( field: &&F, serializer: &mut S, ) -> Result { field.serialize(serializer) } } // Unsafe impl ArchiveWith> for Unsafe { type Archived = F::Archived; type Resolver = F::Resolver; fn resolve_with( field: &UnsafeCell, resolver: Self::Resolver, out: Place, ) { let value = unsafe { &*field.get() }; F::resolve(value, resolver, out); } } impl SerializeWith, S> for Unsafe where F: Serialize, S: Fallible + ?Sized, { fn serialize_with( field: &UnsafeCell, serializer: &mut S, ) -> Result { unsafe { (*field.get()).serialize(serializer) } } } impl DeserializeWith, D> for Unsafe where F: Archive, F::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize_with( field: &F::Archived, deserializer: &mut D, ) -> Result, D::Error> { field.deserialize(deserializer).map(|x| UnsafeCell::new(x)) } } impl ArchiveWith> for Unsafe { type Archived = F::Archived; type Resolver = F::Resolver; fn resolve_with( field: &Cell, resolver: Self::Resolver, out: Place, ) { let value = unsafe { &*field.as_ptr() }; F::resolve(value, resolver, out); } } impl SerializeWith, S> for Unsafe where F: Serialize, S: Fallible + ?Sized, { fn serialize_with( field: &Cell, serializer: &mut S, ) -> Result { unsafe { (*field.as_ptr()).serialize(serializer) } } } impl DeserializeWith, D> for Unsafe where F: Archive, F::Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize_with( field: &F::Archived, deserializer: &mut D, ) -> Result, D::Error> { field.deserialize(deserializer).map(|x| Cell::new(x)) } } // Skip impl ArchiveWith for Skip { type Archived = (); type Resolver = (); fn resolve_with(_: &F, _: Self::Resolver, _: Place) {} } impl SerializeWith for Skip { fn serialize_with(_: &F, _: &mut S) -> Result<(), S::Error> { Ok(()) } } impl DeserializeWith<(), F, D> for Skip { fn deserialize_with(_: &(), _: &mut D) -> Result { Ok(Default::default()) } } // Identity impl ArchiveWith for Identity { type Archived = F::Archived; type Resolver = F::Resolver; fn resolve_with( field: &F, resolver: Self::Resolver, out: Place, ) { field.resolve(resolver, out) } } impl, S: Fallible + ?Sized> SerializeWith for Identity { fn serialize_with( field: &F, serializer: &mut S, ) -> Result { field.serialize(serializer) } } impl DeserializeWith for Identity where F: Deserialize, D: Fallible + ?Sized, { fn deserialize_with( field: &F, deserializer: &mut D, ) -> Result::Error> { field.deserialize(deserializer) } } #[cfg(test)] mod tests { use core::f32; use crate::{ api::test::{deserialize, roundtrip, roundtrip_with, to_archived}, niche::niching::{NaN, Zero}, rancor::Fallible, ser::Writer, with::{ ArchiveWith, AsBox, DeserializeWith, Identity, Inline, InlineAsBox, Niche, NicheInto, SerializeWith, Unsafe, With, }, Archive, Archived, Deserialize, Place, Serialize, }; struct AsFloat; impl ArchiveWith for AsFloat { type Archived = Archived; type Resolver = (); fn resolve_with( value: &i32, _: Self::Resolver, out: Place, ) { out.write(Archived::::from_native(*value as f32)); } } impl SerializeWith for AsFloat where S: Fallible + Writer + ?Sized, { fn serialize_with( _: &i32, _: &mut S, ) -> Result { Ok(()) } } impl DeserializeWith, i32, D> for AsFloat where D: Fallible + ?Sized, { fn deserialize_with( value: &Archived, _: &mut D, ) -> Result { Ok(value.to_native() as i32) } } #[test] fn with_struct() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Test { #[rkyv(with = AsFloat)] value: i32, other: i32, } let value = Test { value: 10, other: 10, }; roundtrip_with(&value, |_, archived| { assert_eq!(archived.value, 10.0); assert_eq!(archived.other, 10); }); } #[test] fn with_tuple_struct() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Test(#[rkyv(with = AsFloat)] i32, i32); let value = Test(10, 10); roundtrip_with(&value, |_, archived| { assert_eq!(archived.0, 10.0); assert_eq!(archived.1, 10); }); } #[test] fn with_enum() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] enum Test { A { #[rkyv(with = AsFloat)] value: i32, other: i32, }, B(#[rkyv(with = AsFloat)] i32, i32), } let value = Test::A { value: 10, other: 10, }; roundtrip_with(&value, |_, archived| { if let ArchivedTest::A { value, other } = archived { assert_eq!(*value, 10.0); assert_eq!(*other, 10); } else { panic!("expected variant A"); } }); let value = Test::B(10, 10); roundtrip_with(&value, |_, archived| { if let ArchivedTest::B(value, other) = archived { assert_eq!(*value, 10.0); assert_eq!(*other, 10); } else { panic!("expected variant B"); } }); } #[test] fn with_wrapper() { to_archived(With::<_, AsFloat>::cast(&10), |archived| { assert_eq!(archived.to_native(), 10.0); let original = deserialize(With::<_, AsFloat>::cast(&*archived)); assert_eq!(original, 10); }); } #[test] fn with_inline() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test<'a> { #[rkyv(with = Inline)] value: &'a i32, } let a = 42; let value = Test { value: &a }; to_archived(&value, |archived| { assert_eq!(archived.value, 42); }); } #[test] fn with_boxed() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test { #[rkyv(with = AsBox)] value: i32, } let value = Test { value: 42 }; to_archived(&value, |archived| { assert_eq!(archived.value.get(), &42); }); } #[test] fn with_boxed_inline() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test<'a> { #[rkyv(with = InlineAsBox)] value: &'a str, } let a = "hello world"; let value = Test { value: &a }; to_archived(&value, |archived| { assert_eq!(archived.value.as_ref(), "hello world"); }); } #[test] fn with_niche_nonzero() { use core::{ mem::size_of, num::{ NonZeroI32, NonZeroI8, NonZeroIsize, NonZeroU32, NonZeroU8, NonZeroUsize, }, }; #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct TestNiche { #[rkyv(with = Niche)] a: Option, #[rkyv(with = Niche)] b: Option, #[rkyv(with = Niche)] c: Option, #[rkyv(with = Niche)] d: Option, #[rkyv(with = Niche)] e: Option, #[rkyv(with = Niche)] f: Option, } #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct TestZeroNiche { #[rkyv(with = NicheInto)] a: Option, #[rkyv(with = NicheInto)] b: Option, #[rkyv(with = NicheInto)] c: Option, #[rkyv(with = NicheInto)] d: Option, #[rkyv(with = NicheInto)] e: Option, #[rkyv(with = NicheInto)] f: Option, } #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct TestNoNiching { a: Option, b: Option, c: Option, d: Option, e: Option, f: Option, } let value = TestNiche { a: Some(NonZeroI8::new(10).unwrap()), b: Some(NonZeroI32::new(10).unwrap()), c: Some(NonZeroIsize::new(10).unwrap()), d: Some(NonZeroU8::new(10).unwrap()), e: Some(NonZeroU32::new(10).unwrap()), f: Some(NonZeroUsize::new(10).unwrap()), }; to_archived(&value, |archived| { assert!(archived.a.is_some()); assert_eq!(archived.a.as_ref().unwrap().get(), 10); assert!(archived.b.is_some()); assert_eq!(archived.b.as_ref().unwrap().get(), 10); assert!(archived.c.is_some()); assert_eq!(archived.c.as_ref().unwrap().get(), 10); assert!(archived.d.is_some()); assert_eq!(archived.d.as_ref().unwrap().get(), 10); assert!(archived.e.is_some()); assert_eq!(archived.e.as_ref().unwrap().get(), 10); assert!(archived.f.is_some()); assert_eq!(archived.f.as_ref().unwrap().get(), 10); }); let value = TestNiche { a: None, b: None, c: None, d: None, e: None, f: None, }; to_archived(&value, |archived| { assert!(archived.a.is_none()); assert!(archived.b.is_none()); assert!(archived.c.is_none()); assert!(archived.d.is_none()); assert!(archived.e.is_none()); assert!(archived.f.is_none()); }); assert!( size_of::>() < size_of::>() ); let value = TestZeroNiche { a: Some(NonZeroI8::new(10).unwrap()), b: Some(NonZeroI32::new(10).unwrap()), c: Some(NonZeroIsize::new(10).unwrap()), d: Some(NonZeroU8::new(10).unwrap()), e: Some(NonZeroU32::new(10).unwrap()), f: Some(NonZeroUsize::new(10).unwrap()), }; to_archived(&value, |archived| { assert!(archived.a.is_some()); assert_eq!(archived.a.as_ref().unwrap().get(), 10); assert!(archived.b.is_some()); assert_eq!(archived.b.as_ref().unwrap().get(), 10); assert!(archived.c.is_some()); assert_eq!(archived.c.as_ref().unwrap().get(), 10); assert!(archived.d.is_some()); assert_eq!(archived.d.as_ref().unwrap().get(), 10); assert!(archived.e.is_some()); assert_eq!(archived.e.as_ref().unwrap().get(), 10); assert!(archived.f.is_some()); assert_eq!(archived.f.as_ref().unwrap().get(), 10); }); let value = TestZeroNiche { a: None, b: None, c: None, d: None, e: None, f: None, }; to_archived(&value, |archived| { assert!(archived.a.is_none()); assert!(archived.b.is_none()); assert!(archived.c.is_none()); assert!(archived.d.is_none()); assert!(archived.e.is_none()); assert!(archived.f.is_none()); }); assert!( size_of::>() < size_of::>() ); } #[test] fn with_niche_float_nan() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test { #[rkyv(with = NicheInto)] a: Option, #[rkyv(with = NicheInto)] b: Option, } #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct TestNoNiching { a: Option, b: Option, } let value = Test { a: Some(123.45), b: Some(123.45), }; to_archived(&value, |archived| { assert!(archived.a.is_some()); assert_eq!(archived.a.as_ref().unwrap().to_native(), 123.45); assert!(archived.b.is_some()); assert_eq!(archived.b.as_ref().unwrap().to_native(), 123.45); }); let value = Test { a: Some(f32::NAN), b: Some(f64::NAN), }; to_archived(&value, |archived| { assert!(archived.a.is_none()); assert!(archived.b.is_none()); }); let value = Test { a: None, b: None }; to_archived(&value, |archived| { assert!(archived.a.is_none()); assert!(archived.b.is_none()); }); assert!( size_of::>() < size_of::>() ); } #[test] fn with_unsafe() { use core::cell::Cell; #[derive(Archive, Debug, Deserialize, Serialize, PartialEq)] #[rkyv(crate, derive(Debug))] struct Test { #[rkyv(with = Unsafe)] inner: Cell, } impl PartialEq for ArchivedTest { fn eq(&self, other: &Test) -> bool { self.inner == other.inner.get() } } let value = Test { inner: Cell::new(100), }; roundtrip(&value); } #[test] fn with_identity() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Test { #[rkyv(with = Identity)] value: i32, other: i32, } let value = Test { value: 10, other: 10, }; roundtrip_with(&value, |_, archived| { assert_eq!(archived.value, 10); assert_eq!(archived.other, 10); }); } } rkyv-0.8.9/src/impls/core/with/niching.rs000064400000000000000000000266741046102023000164570ustar 00000000000000use core::num::{NonZeroI8, NonZeroU8}; use crate::{ boxed::ArchivedBox, niche::{ niched_option::NichedOption, niching::{ Bool, DefaultNiche, NaN, Niching, Null, SharedNiching, Zero, }, }, primitive::{ ArchivedF32, ArchivedF64, ArchivedI128, ArchivedI16, ArchivedI32, ArchivedI64, ArchivedNonZeroI128, ArchivedNonZeroI16, ArchivedNonZeroI32, ArchivedNonZeroI64, ArchivedNonZeroU128, ArchivedNonZeroU16, ArchivedNonZeroU32, ArchivedNonZeroU64, ArchivedU128, ArchivedU16, ArchivedU32, ArchivedU64, }, traits::ArchivePointee, Place, Portable, RelPtr, }; macro_rules! impl_default_niche { ($ty:ty, $niche:ty) => { impl Niching<$ty> for DefaultNiche { unsafe fn is_niched(niched: *const $ty) -> bool { unsafe { <$niche as Niching<$ty>>::is_niched(niched) } } fn resolve_niched(out: Place<$ty>) { <$niche as Niching<$ty>>::resolve_niched(out) } } }; } // Zero macro_rules! impl_nonzero_zero_niching { ($nz:ty, $int:ty) => { impl Niching<$nz> for Zero { unsafe fn is_niched(niched: *const $nz) -> bool { let value = unsafe { &*niched.cast::<$int>() }; *value == 0 } fn resolve_niched(out: Place<$nz>) { let out = unsafe { out.cast_unchecked::<$int>() }; out.write(0.into()); } } impl_default_niche!($nz, Zero); }; } impl_nonzero_zero_niching!(NonZeroU8, u8); impl_nonzero_zero_niching!(ArchivedNonZeroU16, ArchivedU16); impl_nonzero_zero_niching!(ArchivedNonZeroU32, ArchivedU32); impl_nonzero_zero_niching!(ArchivedNonZeroU64, ArchivedU64); impl_nonzero_zero_niching!(ArchivedNonZeroU128, ArchivedU128); impl_nonzero_zero_niching!(NonZeroI8, i8); impl_nonzero_zero_niching!(ArchivedNonZeroI16, ArchivedI16); impl_nonzero_zero_niching!(ArchivedNonZeroI32, ArchivedI32); impl_nonzero_zero_niching!(ArchivedNonZeroI64, ArchivedI64); impl_nonzero_zero_niching!(ArchivedNonZeroI128, ArchivedI128); // NaN macro_rules! impl_float_nan_niching { ($fl:ty, $ar:ty) => { impl Niching<$ar> for NaN { unsafe fn is_niched(niched: *const $ar) -> bool { unsafe { (*niched).to_native().is_nan() } } fn resolve_niched(out: Place<$ar>) { out.write(<$fl>::NAN.into()); } } }; } impl_float_nan_niching!(f32, ArchivedF32); impl_float_nan_niching!(f64, ArchivedF64); // Bool impl Niching for Bool { unsafe fn is_niched(niched: *const bool) -> bool { unsafe { (*niched.cast::()) > 1 } } fn resolve_niched(out: Place) { unsafe { out.cast_unchecked::().write(2) }; } } impl_default_niche!(bool, Bool); // Null impl Niching> for Null where T: ArchivePointee + Portable + ?Sized, { unsafe fn is_niched(niched: *const ArchivedBox) -> bool { unsafe { (*niched.cast::>()).is_invalid() } } fn resolve_niched(out: Place>) { let out = unsafe { out.cast_unchecked::>() }; RelPtr::emplace_invalid(out); } } impl Niching> for DefaultNiche where T: ArchivePointee + Portable + ?Sized, { unsafe fn is_niched(niched: *const ArchivedBox) -> bool { unsafe { >>::is_niched(niched) } } fn resolve_niched(out: Place>) { >>::resolve_niched(out); } } // SharedNiching impl Niching> for N2 where T: SharedNiching, N1: Niching, N2: Niching, { unsafe fn is_niched(niched: *const NichedOption) -> bool { unsafe { >::is_niched(niched.cast()) } } fn resolve_niched(out: Place>) { >::resolve_niched(unsafe { out.cast_unchecked() }) } } #[cfg(test)] mod tests { use core::num::NonZeroU32; use crate::{ api::test::{ deserialize, roundtrip_with, to_archived, to_archived_from_bytes, to_bytes, }, boxed::ArchivedBox, niche::niching::{DefaultNiche, NaN, Zero}, with::{AsBox, MapNiche, NicheInto}, Archive, Deserialize, Serialize, }; #[test] fn with_struct() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Nichable { #[rkyv(niche = NaN)] not_nan: f32, #[rkyv(niche = Zero)] int: NonZeroU32, #[rkyv(niche)] // Default = Bool boolean: bool, } impl Nichable { fn create() -> Self { Nichable { not_nan: 123.456, int: unsafe { NonZeroU32::new_unchecked(789) }, boolean: true, } } } #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Middle { #[rkyv(with = NicheInto, niche = NaN, niche)] // Default = Bool a: Option, #[rkyv(with = NicheInto, niche = Zero)] b: Option, } #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Outer { #[rkyv(with = DefaultNiche)] field: Option, } assert_eq!( size_of::(), 2 * size_of::() ); assert_eq!(size_of::(), size_of::()); let values = [ Outer { field: None }, Outer { field: Some(Middle { a: None, b: None }), }, Outer { field: Some(Middle { a: None, b: Some(Nichable::create()), }), }, ]; roundtrip_with(&values[0], |_, archived| { assert!(archived.field.is_none()); }); roundtrip_with(&values[1], |_, archived| { let middle = archived.field.as_ref().unwrap(); assert!(middle.a.is_none()); assert!(middle.b.is_none()); }); roundtrip_with(&values[2], |_, archived| { let middle = archived.field.as_ref().unwrap(); assert!(middle.a.is_none()); let b = middle.b.as_ref().unwrap(); assert_eq!(b.not_nan, 123.456); assert_eq!(b.int.get(), 789); assert_eq!(b.boolean, true); }); } #[test] fn with_enum() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] enum Nichable { A(#[rkyv(niche)] bool), B { #[rkyv(niche = NaN)] float: f32, }, C, } #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Middle { #[rkyv(with = DefaultNiche, niche = NaN)] nichable: Option, } #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Outer { #[rkyv(with = NicheInto)] field: Option, } assert_eq!(size_of::(), size_of::()); assert_eq!(size_of::(), size_of::()); let values = [ Outer { field: None }, Outer { field: Some(Middle { nichable: None }), }, Outer { field: Some(Middle { nichable: Some(Nichable::A(true)), }), }, Outer { field: Some(Middle { nichable: Some(Nichable::B { float: f32::NAN }), }), }, Outer { field: Some(Middle { nichable: Some(Nichable::B { float: 123.45 }), }), }, Outer { field: Some(Middle { nichable: Some(Nichable::C), }), }, ]; roundtrip_with(&values[0], |_, archived| { assert!(archived.field.is_none()); }); roundtrip_with(&values[1], |_, archived| { let middle = archived.field.as_ref().unwrap(); assert!(middle.nichable.is_none()); }); roundtrip_with(&values[2], |_, archived| { let middle = archived.field.as_ref().unwrap(); let nichable = middle.nichable.as_ref().unwrap(); match nichable { ArchivedNichable::A(b) => assert!(*b), _ => panic!("expected `ArchivedNichable::A`"), } }); to_archived(&values[3], |archived| { // no roundtrip; NAN will be interpreted as being niched assert!(archived.field.is_none()); }); roundtrip_with(&values[4], |_, archived| { let middle = archived.field.as_ref().unwrap(); let nichable = middle.nichable.as_ref().unwrap(); match nichable { ArchivedNichable::B { float } => { assert_eq!(float.to_native(), 123.45) } _ => panic!("expected `ArchivedNichable::B`"), } }); roundtrip_with(&values[5], |_, archived| { let middle = archived.field.as_ref().unwrap(); let nichable = middle.nichable.as_ref().unwrap(); match nichable { ArchivedNichable::C => {} _ => panic!("expected `ArchivedNichable::C`"), } }); } #[test] fn map_niche() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct Outer { #[rkyv(with = MapNiche)] opt: Option, } #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug))] struct NotNichable { int: i64, } let values = &[ Outer { opt: None }, Outer { opt: Some(NotNichable { int: 42 }), }, ]; to_bytes(&values[0], |bytes| { assert_eq!( bytes.len(), size_of::>() ); to_archived_from_bytes::(bytes, |archived| { assert!(archived.opt.as_ref().is_none()); let deserialized: Outer = deserialize(&*archived); assert_eq!(&values[0], &deserialized); }); }); roundtrip_with(&values[1], |_, archived| { let bar = archived.opt.as_ref().unwrap(); assert_eq!(bar.int.to_native(), 42); }); } } rkyv-0.8.9/src/impls/ext/arrayvec_0_7.rs000064400000000000000000000041141046102023000161770ustar 00000000000000use arrayvec_0_7::ArrayVec; use rancor::Fallible; use crate::{ ser::{Allocator, Writer}, vec::{ArchivedVec, VecResolver}, Archive, Archived, Deserialize, Place, Serialize, }; impl Archive for ArrayVec where T: Archive, { type Archived = ArchivedVec>; type Resolver = VecResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedVec::resolve_from_slice(self.as_slice(), resolver, out); } } impl Serialize for ArrayVec where T: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_slice(self.as_slice(), serializer) } } impl Deserialize, D> for ArchivedVec> where T: Archive, Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = ArrayVec::new(); for item in self.as_slice() { result.push(item.deserialize(deserializer)?); } Ok(result) } } impl PartialEq> for ArchivedVec where T: PartialEq, { fn eq(&self, other: &ArrayVec) -> bool { self.as_slice().eq(other.as_slice()) } } impl PartialOrd> for ArchivedVec where T: PartialOrd, { fn partial_cmp( &self, other: &ArrayVec, ) -> Option<::core::cmp::Ordering> { crate::impls::lexicographical_partial_ord( self.as_slice(), other.as_slice(), ) } } #[cfg(test)] mod tests { use super::ArrayVec; use crate::api::test::roundtrip_with; #[test] fn roundtrip_array_vec() { roundtrip_with(&ArrayVec::::from([10, 20, 40, 80]), |a, b| { assert_eq!(**a, **b) }); } } rkyv-0.8.9/src/impls/ext/bytes_1.rs000064400000000000000000000025211046102023000152640ustar 00000000000000use bytes_1::{Bytes, BytesMut}; use rancor::Fallible; use crate::{ ser::{Allocator, Writer}, vec::{ArchivedVec, VecResolver}, Archive, Archived, Deserialize, Place, Serialize, }; impl Archive for Bytes { type Archived = ArchivedVec; type Resolver = VecResolver; #[inline] fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedVec::resolve_from_slice(self, resolver, out); } } impl Serialize for Bytes { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_slice(self, serializer) } } impl Deserialize for ArchivedVec> { fn deserialize(&self, _deserializer: &mut D) -> Result { let mut result = BytesMut::new(); result.extend_from_slice(self.as_slice()); Ok(result.freeze()) } } impl PartialEq for ArchivedVec where Bytes: PartialEq<[T]>, { fn eq(&self, other: &Bytes) -> bool { other == self.as_slice() } } #[cfg(test)] mod tests { use super::Bytes; use crate::{alloc::vec, api::test::roundtrip}; #[test] fn roundtrip_bytes() { roundtrip(&Bytes::from(vec![10, 20, 40, 80])); } } rkyv-0.8.9/src/impls/ext/hashbrown_0_14/hash_map.rs000064400000000000000000000063571046102023000202270ustar 00000000000000use core::{ borrow::Borrow, hash::{BuildHasher, Hash}, }; use hashbrown_0_14::HashMap; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::map::{ArchivedHashMap, HashMapResolver}, ser::{Allocator, Writer}, Archive, Deserialize, Place, Serialize, }; impl Archive for HashMap where K: Archive + Hash + Eq, K::Archived: Hash + Eq, { type Archived = ArchivedHashMap; type Resolver = HashMapResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedHashMap::resolve_from_len(self.len(), (7, 8), resolver, out); } } impl Serialize for HashMap where K: Serialize + Hash + Eq, K::Archived: Hash + Eq, V: Serialize, S: Fallible + Writer + Allocator + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedHashMap::::serialize_from_iter::< _, _, _, K, V, _, >(self.iter(), (7, 8), serializer) } } impl Deserialize, D> for ArchivedHashMap where K: Archive + Hash + Eq, K::Archived: Deserialize + Hash + Eq, V: Archive, V::Archived: Deserialize, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = HashMap::with_capacity_and_hasher(self.len(), S::default()); for (k, v) in self.iter() { result.insert( k.deserialize(deserializer)?, v.deserialize(deserializer)?, ); } Ok(result) } } impl PartialEq> for ArchivedHashMap where K: Hash + Eq + Borrow, AK: Hash + Eq, AV: PartialEq, S: BuildHasher, { fn eq(&self, other: &HashMap) -> bool { if self.len() != other.len() { false } else { self.iter().all(|(key, value)| { other.get(key).map_or(false, |v| value.eq(v)) }) } } } #[cfg(test)] mod tests { use core::hash::BuildHasherDefault; use super::HashMap; use crate::{ alloc::string::String, api::test::roundtrip_with, hash::FxHasher64, }; #[test] fn index_map() { let mut value = HashMap::with_hasher(BuildHasherDefault::::default()); value.insert(String::from("foo"), 10); value.insert(String::from("bar"), 20); value.insert(String::from("baz"), 40); value.insert(String::from("bat"), 80); roundtrip_with(&value, |a, b| { assert_eq!(a.len(), b.len()); for (k, v) in a.iter() { let (ak, av) = b.get_key_value(k.as_str()).unwrap(); assert_eq!(k, ak); assert_eq!(v, av); } }); } } rkyv-0.8.9/src/impls/ext/hashbrown_0_14/hash_set.rs000064400000000000000000000057651046102023000202470ustar 00000000000000use core::{ borrow::Borrow, hash::{BuildHasher, Hash}, }; use hashbrown_0_14::HashSet; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::set::{ArchivedHashSet, HashSetResolver}, ser::{Allocator, Writer}, Archive, Deserialize, Place, Serialize, }; impl Archive for HashSet where K: Archive + Hash + Eq, K::Archived: Hash + Eq, { type Archived = ArchivedHashSet; type Resolver = HashSetResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedHashSet::::resolve_from_len( self.len(), (7, 8), resolver, out, ); } } impl Serialize for HashSet where K::Archived: Hash + Eq, K: Serialize + Hash + Eq, S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedHashSet::::serialize_from_iter::<_, K, _>( self.iter(), (7, 8), serializer, ) } } impl Deserialize, D> for ArchivedHashSet where K: Archive + Hash + Eq, K::Archived: Deserialize + Hash + Eq, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = HashSet::with_hasher(S::default()); for k in self.iter() { result.insert(k.deserialize(deserializer)?); } Ok(result) } } impl, AK: Hash + Eq, S: BuildHasher> PartialEq> for ArchivedHashSet { fn eq(&self, other: &HashSet) -> bool { if self.len() != other.len() { false } else { self.iter().all(|key| other.get(key).is_some()) } } } impl, AK: Hash + Eq, S: BuildHasher> PartialEq> for HashSet { fn eq(&self, other: &ArchivedHashSet) -> bool { other.eq(self) } } #[cfg(test)] mod tests { use core::hash::BuildHasherDefault; use super::HashSet; use crate::{ alloc::string::String, api::test::roundtrip_with, hash::FxHasher64, }; #[test] fn index_set() { let mut value = HashSet::with_hasher(BuildHasherDefault::::default()); value.insert(String::from("foo")); value.insert(String::from("bar")); value.insert(String::from("baz")); value.insert(String::from("bat")); roundtrip_with(&value, |a, b| { assert_eq!(a.len(), b.len()); for k in a.iter() { let ak = b.get(k.as_str()).unwrap(); assert_eq!(k, ak); } }); } } rkyv-0.8.9/src/impls/ext/hashbrown_0_14/mod.rs000064400000000000000000000000511046102023000172070ustar 00000000000000mod hash_map; mod hash_set; mod with; rkyv-0.8.9/src/impls/ext/hashbrown_0_14/with.rs000064400000000000000000000074771046102023000174260ustar 00000000000000use core::{ hash::{BuildHasher, Hash}, marker::PhantomData, }; use hashbrown_0_14::HashMap; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::{ArchivedHashMap, HashMapResolver}, impls::core::with::RefWrapper, ser::{Allocator, Writer}, with::{ArchiveWith, DeserializeWith, MapKV, SerializeWith}, Place, }; impl ArchiveWith> for MapKV where A: ArchiveWith, B: ArchiveWith, H: Default + BuildHasher, { type Archived = ArchivedHashMap< >::Archived, >::Archived, >; type Resolver = HashMapResolver; fn resolve_with( field: &HashMap, resolver: Self::Resolver, out: Place, ) { ArchivedHashMap::resolve_from_len(field.len(), (7, 8), resolver, out) } } impl SerializeWith, S> for MapKV where A: ArchiveWith + SerializeWith, B: ArchiveWith + SerializeWith, K: Hash + Eq, >::Archived: Eq + Hash, S: Fallible + Writer + Allocator + ?Sized, S::Error: Source, H: Default + BuildHasher, H::Hasher: Default, { fn serialize_with( field: &HashMap, serializer: &mut S, ) -> Result::Error> { ArchivedHashMap::<_, _, H::Hasher>::serialize_from_iter( field.iter().map(|(k, v)| { ( RefWrapper::<'_, A, K>(k, PhantomData::), RefWrapper::<'_, B, V>(v, PhantomData::), ) }), (7, 8), serializer, ) } } impl DeserializeWith< ArchivedHashMap< >::Archived, >::Archived, >, HashMap, D, > for MapKV where A: ArchiveWith + DeserializeWith<>::Archived, K, D>, B: ArchiveWith + DeserializeWith<>::Archived, V, D>, K: Ord + Hash + Eq, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize_with( field: &ArchivedHashMap< >::Archived, >::Archived, >, deserializer: &mut D, ) -> Result, ::Error> { let mut result = HashMap::with_capacity_and_hasher(field.len(), S::default()); for (k, v) in field.iter() { result.insert( A::deserialize_with(k, deserializer)?, B::deserialize_with(v, deserializer)?, ); } Ok(result) } } #[cfg(test)] mod tests { use core::hash::BuildHasherDefault; use rkyv_derive::{Archive, Deserialize, Serialize}; use super::HashMap; use crate::{ api::test::to_archived, hash::FxHasher64, with::{InlineAsBox, MapKV}, }; #[test] fn with_as_mapkv() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test<'a> { #[rkyv(with = MapKV)] a: HashMap<&'a str, &'a str, BuildHasherDefault>, } let mut a = HashMap::with_hasher(BuildHasherDefault::::default()); a.insert("foo", "bar"); a.insert("woo", "roo"); let value = Test { a }; to_archived(&value, |archived| { assert_eq!(archived.a.len(), 2); assert!(archived.a.contains_key("foo")); assert_eq!(**archived.a.get("woo").unwrap(), *"roo"); }); } } rkyv-0.8.9/src/impls/ext/hashbrown_0_15/hash_map.rs000064400000000000000000000063521046102023000202230ustar 00000000000000use core::{ borrow::Borrow, hash::{BuildHasher, Hash}, }; use hashbrown::HashMap; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::map::{ArchivedHashMap, HashMapResolver}, ser::{Allocator, Writer}, Archive, Deserialize, Place, Serialize, }; impl Archive for HashMap where K: Archive + Hash + Eq, K::Archived: Hash + Eq, { type Archived = ArchivedHashMap; type Resolver = HashMapResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedHashMap::resolve_from_len(self.len(), (7, 8), resolver, out); } } impl Serialize for HashMap where K: Serialize + Hash + Eq, K::Archived: Hash + Eq, V: Serialize, S: Fallible + Writer + Allocator + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedHashMap::::serialize_from_iter::< _, _, _, K, V, _, >(self.iter(), (7, 8), serializer) } } impl Deserialize, D> for ArchivedHashMap where K: Archive + Hash + Eq, K::Archived: Deserialize + Hash + Eq, V: Archive, V::Archived: Deserialize, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = HashMap::with_capacity_and_hasher(self.len(), S::default()); for (k, v) in self.iter() { result.insert( k.deserialize(deserializer)?, v.deserialize(deserializer)?, ); } Ok(result) } } impl PartialEq> for ArchivedHashMap where K: Hash + Eq + Borrow, AK: Hash + Eq, AV: PartialEq, S: BuildHasher, { fn eq(&self, other: &HashMap) -> bool { if self.len() != other.len() { false } else { self.iter().all(|(key, value)| { other.get(key).map_or(false, |v| value.eq(v)) }) } } } #[cfg(test)] mod tests { use core::hash::BuildHasherDefault; use super::HashMap; use crate::{ alloc::string::String, api::test::roundtrip_with, hash::FxHasher64, }; #[test] fn index_map() { let mut value = HashMap::with_hasher(BuildHasherDefault::::default()); value.insert(String::from("foo"), 10); value.insert(String::from("bar"), 20); value.insert(String::from("baz"), 40); value.insert(String::from("bat"), 80); roundtrip_with(&value, |a, b| { assert_eq!(a.len(), b.len()); for (k, v) in a.iter() { let (ak, av) = b.get_key_value(k.as_str()).unwrap(); assert_eq!(k, ak); assert_eq!(v, av); } }); } } rkyv-0.8.9/src/impls/ext/hashbrown_0_15/hash_set.rs000064400000000000000000000057601046102023000202430ustar 00000000000000use core::{ borrow::Borrow, hash::{BuildHasher, Hash}, }; use hashbrown::HashSet; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::set::{ArchivedHashSet, HashSetResolver}, ser::{Allocator, Writer}, Archive, Deserialize, Place, Serialize, }; impl Archive for HashSet where K: Archive + Hash + Eq, K::Archived: Hash + Eq, { type Archived = ArchivedHashSet; type Resolver = HashSetResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedHashSet::::resolve_from_len( self.len(), (7, 8), resolver, out, ); } } impl Serialize for HashSet where K::Archived: Hash + Eq, K: Serialize + Hash + Eq, S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedHashSet::::serialize_from_iter::<_, K, _>( self.iter(), (7, 8), serializer, ) } } impl Deserialize, D> for ArchivedHashSet where K: Archive + Hash + Eq, K::Archived: Deserialize + Hash + Eq, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = HashSet::with_hasher(S::default()); for k in self.iter() { result.insert(k.deserialize(deserializer)?); } Ok(result) } } impl, AK: Hash + Eq, S: BuildHasher> PartialEq> for ArchivedHashSet { fn eq(&self, other: &HashSet) -> bool { if self.len() != other.len() { false } else { self.iter().all(|key| other.get(key).is_some()) } } } impl, AK: Hash + Eq, S: BuildHasher> PartialEq> for HashSet { fn eq(&self, other: &ArchivedHashSet) -> bool { other.eq(self) } } #[cfg(test)] mod tests { use core::hash::BuildHasherDefault; use super::HashSet; use crate::{ alloc::string::String, api::test::roundtrip_with, hash::FxHasher64, }; #[test] fn index_set() { let mut value = HashSet::with_hasher(BuildHasherDefault::::default()); value.insert(String::from("foo")); value.insert(String::from("bar")); value.insert(String::from("baz")); value.insert(String::from("bat")); roundtrip_with(&value, |a, b| { assert_eq!(a.len(), b.len()); for k in a.iter() { let ak = b.get(k.as_str()).unwrap(); assert_eq!(k, ak); } }); } } rkyv-0.8.9/src/impls/ext/hashbrown_0_15/mod.rs000064400000000000000000000002341046102023000172130ustar 00000000000000mod hash_map; mod hash_set; mod with; // NOTE: When hashbrown updates to 0.16 or later, // this module will need to be updated to use hashbrown_0_15 rkyv-0.8.9/src/impls/ext/hashbrown_0_15/with.rs000064400000000000000000000074721046102023000174220ustar 00000000000000use core::{ hash::{BuildHasher, Hash}, marker::PhantomData, }; use hashbrown::HashMap; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::{ArchivedHashMap, HashMapResolver}, impls::core::with::RefWrapper, ser::{Allocator, Writer}, with::{ArchiveWith, DeserializeWith, MapKV, SerializeWith}, Place, }; impl ArchiveWith> for MapKV where A: ArchiveWith, B: ArchiveWith, H: Default + BuildHasher, { type Archived = ArchivedHashMap< >::Archived, >::Archived, >; type Resolver = HashMapResolver; fn resolve_with( field: &HashMap, resolver: Self::Resolver, out: Place, ) { ArchivedHashMap::resolve_from_len(field.len(), (7, 8), resolver, out) } } impl SerializeWith, S> for MapKV where A: ArchiveWith + SerializeWith, B: ArchiveWith + SerializeWith, K: Hash + Eq, >::Archived: Eq + Hash, S: Fallible + Writer + Allocator + ?Sized, S::Error: Source, H: Default + BuildHasher, H::Hasher: Default, { fn serialize_with( field: &HashMap, serializer: &mut S, ) -> Result::Error> { ArchivedHashMap::<_, _, H::Hasher>::serialize_from_iter( field.iter().map(|(k, v)| { ( RefWrapper::<'_, A, K>(k, PhantomData::), RefWrapper::<'_, B, V>(v, PhantomData::), ) }), (7, 8), serializer, ) } } impl DeserializeWith< ArchivedHashMap< >::Archived, >::Archived, >, HashMap, D, > for MapKV where A: ArchiveWith + DeserializeWith<>::Archived, K, D>, B: ArchiveWith + DeserializeWith<>::Archived, V, D>, K: Ord + Hash + Eq, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize_with( field: &ArchivedHashMap< >::Archived, >::Archived, >, deserializer: &mut D, ) -> Result, ::Error> { let mut result = HashMap::with_capacity_and_hasher(field.len(), S::default()); for (k, v) in field.iter() { result.insert( A::deserialize_with(k, deserializer)?, B::deserialize_with(v, deserializer)?, ); } Ok(result) } } #[cfg(test)] mod tests { use core::hash::BuildHasherDefault; use rkyv_derive::{Archive, Deserialize, Serialize}; use super::HashMap; use crate::{ api::test::to_archived, hash::FxHasher64, with::{InlineAsBox, MapKV}, }; #[test] fn with_as_mapkv() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test<'a> { #[rkyv(with = MapKV)] a: HashMap<&'a str, &'a str, BuildHasherDefault>, } let mut a = HashMap::with_hasher(BuildHasherDefault::::default()); a.insert("foo", "bar"); a.insert("woo", "roo"); let value = Test { a }; to_archived(&value, |archived| { assert_eq!(archived.a.len(), 2); assert!(archived.a.contains_key("foo")); assert_eq!(**archived.a.get("woo").unwrap(), *"roo"); }); } } rkyv-0.8.9/src/impls/ext/indexmap_2/index_map.rs000064400000000000000000000056251046102023000177200ustar 00000000000000use core::hash::{BuildHasher, Hash}; use indexmap_2::IndexMap; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::{ArchivedIndexMap, IndexMapResolver}, ser::{Allocator, Writer}, Archive, Deserialize, Place, Serialize, }; impl Archive for IndexMap { type Archived = ArchivedIndexMap; type Resolver = IndexMapResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedIndexMap::resolve_from_len(self.len(), (7, 8), resolver, out); } } impl Serialize for IndexMap where K: Hash + Eq + Serialize, V: Serialize, S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedIndexMap::::serialize_from_iter::< _, _, _, K, V, _, >(self.iter(), (7, 8), serializer) } } impl Deserialize, D> for ArchivedIndexMap where K: Archive + Hash + Eq, K::Archived: Deserialize, V: Archive, V::Archived: Deserialize, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = IndexMap::with_capacity_and_hasher(self.len(), S::default()); for (k, v) in self.iter() { result.insert( k.deserialize(deserializer)?, v.deserialize(deserializer)?, ); } Ok(result) } } impl PartialEq> for ArchivedIndexMap where K: PartialEq, V: PartialEq, S: BuildHasher, { fn eq(&self, other: &IndexMap) -> bool { self.iter() .zip(other.iter()) .all(|((ak, av), (bk, bv))| ak == bk && av == bv) } } #[cfg(test)] mod tests { use core::hash::BuildHasherDefault; use indexmap_2::IndexMap; use crate::{ alloc::string::String, api::test::roundtrip_with, hash::FxHasher64, }; #[test] fn index_map() { let mut value = IndexMap::with_hasher(BuildHasherDefault::::default()); value.insert(String::from("foo"), 10); value.insert(String::from("bar"), 20); value.insert(String::from("baz"), 40); value.insert(String::from("bat"), 80); roundtrip_with(&value, |a, b| { assert_eq!(a.len(), b.len()); for (k, v) in a.iter() { let (ak, av) = b.get_key_value(k.as_str()).unwrap(); assert_eq!(k, ak); assert_eq!(v, av); } }); } } rkyv-0.8.9/src/impls/ext/indexmap_2/index_set.rs000064400000000000000000000046701046102023000177350ustar 00000000000000use core::hash::{BuildHasher, Hash}; use indexmap_2::IndexSet; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::{ArchivedIndexSet, IndexSetResolver}, ser::{Allocator, Writer}, Archive, Deserialize, Place, Serialize, }; impl Archive for IndexSet { type Archived = ArchivedIndexSet; type Resolver = IndexSetResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedIndexSet::resolve_from_len(self.len(), (7, 8), resolver, out); } } impl Serialize for IndexSet where K: Hash + Eq + Serialize, S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedIndexSet::::serialize_from_iter::<_, K, _>( self.iter(), (7, 8), serializer, ) } } impl Deserialize, D> for ArchivedIndexSet where K: Archive + Hash + Eq, K::Archived: Deserialize, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = IndexSet::with_capacity_and_hasher(self.len(), S::default()); for k in self.iter() { result.insert(k.deserialize(deserializer)?); } Ok(result) } } impl, S: BuildHasher> PartialEq> for ArchivedIndexSet { fn eq(&self, other: &IndexSet) -> bool { self.iter().eq(other.iter()) } } #[cfg(test)] mod tests { use core::hash::BuildHasherDefault; use indexmap_2::IndexSet; use crate::{ alloc::string::String, api::test::roundtrip_with, hash::FxHasher64, }; #[test] fn index_set() { let mut value = IndexSet::with_hasher(BuildHasherDefault::::default()); value.insert(String::from("foo")); value.insert(String::from("bar")); value.insert(String::from("baz")); value.insert(String::from("bat")); roundtrip_with(&value, |a, b| { assert_eq!(a.len(), b.len()); for k in a.iter() { let ak = b.get(k.as_str()).unwrap(); assert_eq!(k, ak); } }); } } rkyv-0.8.9/src/impls/ext/indexmap_2/mod.rs000064400000000000000000000000361046102023000165220ustar 00000000000000mod index_map; mod index_set; rkyv-0.8.9/src/impls/ext/mod.rs000064400000000000000000000017501046102023000145000ustar 00000000000000// Support for various common crates. These are primarily to get users off the // ground and build some momentum. // These are NOT PLANNED to remain in rkyv for the final release. Much like // serde, these implementations should be moved into their respective crates // over time. Before adding support for another crate, please consider getting // rkyv support in the crate instead. #[cfg(feature = "arrayvec-0_7")] mod arrayvec_0_7; #[cfg(feature = "bytes-1")] mod bytes_1; #[cfg(feature = "hashbrown-0_14")] mod hashbrown_0_14; #[cfg(feature = "hashbrown-0_15")] mod hashbrown_0_15; #[cfg(feature = "indexmap-2")] mod indexmap_2; #[cfg(feature = "smallvec-1")] mod smallvec_1; #[cfg(feature = "smol_str-0_2")] mod smolstr_0_2; #[cfg(feature = "smol_str-0_3")] mod smolstr_0_3; #[cfg(feature = "thin-vec-0_2")] mod thin_vec_0_2; #[cfg(feature = "tinyvec-1")] mod tinyvec_1; #[cfg(feature = "triomphe-0_1")] mod triomphe_0_1; #[cfg(feature = "uuid-1")] mod uuid_1; rkyv-0.8.9/src/impls/ext/smallvec_1.rs000064400000000000000000000041321046102023000157440ustar 00000000000000use rancor::Fallible; use smallvec_1::{Array, SmallVec}; use crate::{ ser::{Allocator, Writer}, vec::{ArchivedVec, VecResolver}, Archive, Archived, Deserialize, Place, Serialize, }; impl Archive for SmallVec where A::Item: Archive, { type Archived = ArchivedVec>; type Resolver = VecResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedVec::resolve_from_slice(self.as_slice(), resolver, out); } } impl Serialize for SmallVec where A: Array, A::Item: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_slice(self.as_slice(), serializer) } } impl Deserialize, D> for ArchivedVec> where A: Array, A::Item: Archive, Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = SmallVec::new(); for item in self.as_slice() { result.push(item.deserialize(deserializer)?); } Ok(result) } } impl PartialEq> for ArchivedVec where A: Array, U: PartialEq, { fn eq(&self, other: &SmallVec) -> bool { self.as_slice().eq(other.as_slice()) } } impl PartialOrd> for ArchivedVec where A: Array, T: PartialOrd, { fn partial_cmp( &self, other: &SmallVec, ) -> Option<::core::cmp::Ordering> { crate::impls::lexicographical_partial_ord( self.as_slice(), other.as_slice(), ) } } #[cfg(test)] mod tests { use smallvec_1::{smallvec, SmallVec}; use crate::api::test::roundtrip_with; #[test] fn roundtrip_small_vec() { let value: SmallVec<[i32; 4]> = smallvec![10, 20, 40, 80]; roundtrip_with(&value, |a, b| assert_eq!(**a, **b)); } } rkyv-0.8.9/src/impls/ext/smolstr_0_2.rs000064400000000000000000000026511046102023000160650ustar 00000000000000use rancor::{Fallible, Source}; use smol_str_0_2::SmolStr; use crate::{ ser::{Allocator, Writer}, string::{ArchivedString, StringResolver}, Archive, Deserialize, Place, Serialize, }; impl Archive for SmolStr { type Archived = ArchivedString; type Resolver = StringResolver; #[inline] fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedString::resolve_from_str(self, resolver, out); } } impl Serialize for SmolStr where S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedString::serialize_from_str(self, serializer) } } impl Deserialize for ArchivedString { fn deserialize(&self, _deserializer: &mut D) -> Result { Ok(SmolStr::new(self.as_str())) } } impl PartialEq for ArchivedString { fn eq(&self, other: &SmolStr) -> bool { other.as_str() == self.as_str() } } impl PartialOrd for ArchivedString { fn partial_cmp(&self, other: &SmolStr) -> Option<::core::cmp::Ordering> { Some(self.as_str().cmp(other.as_str())) } } #[cfg(test)] mod tests { use super::SmolStr; use crate::api::test::roundtrip; #[test] fn roundtrip_smol_str() { roundtrip(&SmolStr::new("smol_str")); } } rkyv-0.8.9/src/impls/ext/smolstr_0_3.rs000064400000000000000000000026511046102023000160660ustar 00000000000000use rancor::{Fallible, Source}; use smol_str_0_3::SmolStr; use crate::{ ser::{Allocator, Writer}, string::{ArchivedString, StringResolver}, Archive, Deserialize, Place, Serialize, }; impl Archive for SmolStr { type Archived = ArchivedString; type Resolver = StringResolver; #[inline] fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedString::resolve_from_str(self, resolver, out); } } impl Serialize for SmolStr where S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedString::serialize_from_str(self, serializer) } } impl Deserialize for ArchivedString { fn deserialize(&self, _deserializer: &mut D) -> Result { Ok(SmolStr::new(self.as_str())) } } impl PartialEq for ArchivedString { fn eq(&self, other: &SmolStr) -> bool { other.as_str() == self.as_str() } } impl PartialOrd for ArchivedString { fn partial_cmp(&self, other: &SmolStr) -> Option<::core::cmp::Ordering> { Some(self.as_str().cmp(other.as_str())) } } #[cfg(test)] mod tests { use super::SmolStr; use crate::api::test::roundtrip; #[test] fn roundtrip_smol_str() { roundtrip(&SmolStr::new("smol_str")); } } rkyv-0.8.9/src/impls/ext/thin_vec_0_2.rs000064400000000000000000000044321046102023000161600ustar 00000000000000use rancor::Fallible; use thin_vec_0_2::ThinVec; use crate::{ ser::{Allocator, Writer}, vec::{ArchivedVec, VecResolver}, Archive, Archived, Deserialize, Place, Serialize, }; impl Archive for ThinVec where T: Archive, { type Archived = ArchivedVec>; type Resolver = VecResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedVec::resolve_from_slice(self.as_slice(), resolver, out); } } impl Serialize for ThinVec where T: Serialize, S: Allocator + Writer + Fallible + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_slice(self.as_slice(), serializer) } } impl Deserialize, D> for ArchivedVec> where T: Archive, Archived: Deserialize, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = ThinVec::with_capacity(self.len()); for item in self.as_slice() { result.push(item.deserialize(deserializer)?); } Ok(result) } } impl PartialEq> for ArchivedVec where T: PartialEq, { fn eq(&self, other: &ThinVec) -> bool { self.as_slice().eq(other.as_slice()) } } impl PartialOrd> for ArchivedVec where T: PartialOrd, { fn partial_cmp(&self, other: &ThinVec) -> Option<::core::cmp::Ordering> { crate::impls::lexicographical_partial_ord( self.as_slice(), other.as_slice(), ) } } #[cfg(test)] mod tests { use super::ThinVec; use crate::api::test::roundtrip_with; #[test] fn roundtrip_thin_vec() { roundtrip_with(&ThinVec::::from_iter([10, 20, 40, 80]), |a, b| { assert_eq!(**a, **b) }); } #[test] fn test_partial_eq() { use crate::Archive; #[derive(Archive)] #[rkyv(crate, compare(PartialEq, PartialOrd))] struct Inner { a: i32, } #[derive(Archive)] #[rkyv(crate, compare(PartialEq, PartialOrd))] struct Outer { a: ThinVec, } } } rkyv-0.8.9/src/impls/ext/tinyvec_1.rs000064400000000000000000000122151046102023000156200ustar 00000000000000use rancor::Fallible; #[cfg(feature = "alloc")] use tinyvec_1::TinyVec; use tinyvec_1::{Array, ArrayVec, SliceVec}; use crate::{ ser::{Allocator, Writer}, vec::{ArchivedVec, VecResolver}, Archive, Archived, Deserialize, Place, Serialize, }; // ArrayVec impl Archive for ArrayVec where A::Item: Archive, { type Archived = ArchivedVec>; type Resolver = VecResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedVec::resolve_from_slice(self.as_slice(), resolver, out); } } impl Serialize for ArrayVec where A: Array, A::Item: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_slice(self.as_slice(), serializer) } } impl Deserialize, D> for ArchivedVec> where A: Array, A::Item: Archive, Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = ArrayVec::new(); for item in self.as_slice() { result.push(item.deserialize(deserializer)?); } Ok(result) } } // SliceVec impl<'s, T: Archive> Archive for SliceVec<'s, T> { type Archived = ArchivedVec; type Resolver = VecResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedVec::resolve_from_slice(self.as_slice(), resolver, out); } } impl<'s, T, S> Serialize for SliceVec<'s, T> where T: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_slice(self.as_slice(), serializer) } } // SliceVec cannot be deserialized because it borrows backing memory // TinyVec #[cfg(feature = "alloc")] impl Archive for TinyVec where A::Item: Archive, { type Archived = ArchivedVec>; type Resolver = VecResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedVec::resolve_from_slice(self.as_slice(), resolver, out); } } #[cfg(feature = "alloc")] impl Serialize for TinyVec where A: Array, A::Item: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_slice(self.as_slice(), serializer) } } #[cfg(feature = "alloc")] impl Deserialize, D> for ArchivedVec> where A: Array, A::Item: Archive, Archived: Deserialize, D: Fallible + ?Sized, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = TinyVec::new(); for item in self.as_slice() { result.push(item.deserialize(deserializer)?); } Ok(result) } } impl PartialEq> for ArchivedVec where A: Array, T: PartialEq, { fn eq(&self, other: &ArrayVec) -> bool { self.as_slice().eq(other.as_slice()) } } impl PartialOrd> for ArchivedVec where A: Array, T: PartialOrd, { fn partial_cmp( &self, other: &ArrayVec, ) -> Option<::core::cmp::Ordering> { crate::impls::lexicographical_partial_ord( self.as_slice(), other.as_slice(), ) } } impl PartialEq> for ArchivedVec where T: PartialEq, { fn eq(&self, other: &SliceVec<'_, U>) -> bool { self.as_slice().eq(other.as_slice()) } } impl PartialOrd> for ArchivedVec where T: PartialOrd, { fn partial_cmp( &self, other: &SliceVec<'_, U>, ) -> Option<::core::cmp::Ordering> { crate::impls::lexicographical_partial_ord( self.as_slice(), other.as_slice(), ) } } #[cfg(test)] mod tests { use tinyvec_1::{array_vec, Array, SliceVec}; use crate::api::test::{roundtrip_with, to_archived}; #[test] fn roundtrip_array_vec() { roundtrip_with(&array_vec!([i32; 10] => 10, 20, 40, 80), |a, b| { assert_eq!(**a, **b) }); } #[test] fn serialize_slice_vec() { let mut backing = [0i32; 10]; let mut value = SliceVec::from_slice_len(backing.as_slice_mut(), 0); value.push(10); value.push(20); value.push(40); value.push(80); to_archived(&value, |archived| { assert_eq!(archived.as_slice(), &[10, 20, 40, 80]); }); } #[cfg(feature = "alloc")] #[test] fn roundtrip_tiny_vec() { use tinyvec_1::tiny_vec; use crate::alloc::vec; roundtrip_with(&tiny_vec!([i32; 10] => 10, 20, 40, 80), |a, b| { assert_eq!(**a, **b) }); } } rkyv-0.8.9/src/impls/ext/triomphe_0_1.rs000064400000000000000000000045761046102023000162200ustar 00000000000000use core::{ alloc::LayoutError, mem::{forget, MaybeUninit}, }; use ptr_meta::Pointee; use rancor::{Fallible, Source}; use triomphe_0_1::Arc; use crate::{ de::{Metadata, Pooling, PoolingExt, SharedPointer}, rc::{ArchivedRc, Flavor, RcResolver}, ser::{Sharing, Writer}, Archive, ArchiveUnsized, Deserialize, DeserializeUnsized, Place, Serialize, SerializeUnsized, }; pub struct TriompheArcFlavor; impl Flavor for TriompheArcFlavor { const ALLOW_CYCLES: bool = false; } unsafe impl SharedPointer for Arc { fn alloc(_: ::Metadata) -> Result<*mut T, LayoutError> { Ok(Arc::into_raw(Arc::>::new_uninit()) .cast::() .cast_mut()) } unsafe fn from_value(ptr: *mut T) -> *mut T { ptr } unsafe fn drop(ptr: *mut T) { drop(unsafe { Arc::from_raw(ptr) }) } } impl Archive for Arc { type Archived = ArchivedRc; type Resolver = RcResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedRc::resolve_from_ref(self.as_ref(), resolver, out); } } impl Serialize for Arc where T: SerializeUnsized + ?Sized + 'static, S: Writer + Sharing + Fallible + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedRc::::serialize_from_ref( self.as_ref(), serializer, ) } } impl Deserialize, D> for ArchivedRc where T: ArchiveUnsized + 'static, T::Metadata: Into, Metadata: Into, T::Archived: DeserializeUnsized, D: Pooling + Fallible + ?Sized, D::Error: Source, { fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { let raw_shared_ptr = deserializer.deserialize_shared::<_, Arc>(self.get())?; let shared_ptr = unsafe { Arc::::from_raw(raw_shared_ptr) }; forget(shared_ptr.clone()); Ok(shared_ptr) } } #[cfg(test)] mod tests { use triomphe_0_1::Arc; use crate::api::test::roundtrip_with; #[test] fn roundtrip_arc() { roundtrip_with(&Arc::new(100u8), |a, b| assert_eq!(**a, **b)); } } rkyv-0.8.9/src/impls/ext/uuid_1.rs000064400000000000000000000023721046102023000151100ustar 00000000000000use rancor::Fallible; use uuid_1::Uuid; use crate::{ traits::CopyOptimization, Archive, Deserialize, Place, Portable, Serialize, }; // SAFETY: `Uuid` has the same ABI has `Bytes`, and so is `Portable` when // `Bytes` is. unsafe impl Portable for Uuid where uuid_1::Bytes: Portable {} impl Archive for Uuid { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable() }; type Archived = Uuid; type Resolver = (); fn resolve(&self, _: Self::Resolver, out: Place) { // SAFETY: `Uuid` is guaranteed to have the same ABI as `[u8; 16]`, // which is always fully-initialized. unsafe { out.write_unchecked(*self); } } } impl Serialize for Uuid { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize for Uuid { fn deserialize(&self, _: &mut D) -> Result { Ok(*self) } } #[cfg(test)] mod tests { use super::Uuid; use crate::api::test::roundtrip; #[test] fn roundtrip_uuid() { roundtrip( &Uuid::parse_str("f9168c5e-ceb2-4faa-b6bf-329bf39fa1e4").unwrap(), ) } } rkyv-0.8.9/src/impls/mod.rs000064400000000000000000000522611046102023000137030ustar 00000000000000#[cfg(feature = "alloc")] mod alloc; mod core; mod rend; #[cfg(feature = "std")] mod std; mod ext; use ::core::cmp::Ordering; #[allow(dead_code)] #[inline] pub(crate) fn lexicographical_partial_ord( a: &[T], b: &[U], ) -> Option where T: PartialOrd, { for (a, b) in a.iter().zip(b.iter()) { match (*a).partial_cmp(b) { Some(Ordering::Equal) => {} ord => return ord, } } a.len().partial_cmp(&b.len()) } #[cfg(test)] mod core_tests { use munge::munge; use rancor::{Fallible, Source}; use crate::{ api::test::{roundtrip, to_archived}, option::ArchivedOption, primitive::{ArchivedI32, ArchivedU32}, seal::Seal, Archive, Deserialize, Place, Portable, Serialize, }; #[test] fn roundtrip_unit_struct() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, compare(PartialEq), derive(Debug))] struct Test; roundtrip(&Test); roundtrip(&[Test, Test]); } #[test] fn roundtrip_tuple_struct() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, compare(PartialEq), derive(Debug))] struct Test((), i32, Option); roundtrip(&Test((), 42, Some(42))); roundtrip(&[Test((), 42, Some(42)), Test((), 42, Some(42))]); } #[test] fn roundtrip_struct() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, compare(PartialEq), derive(Debug))] struct Test { a: (), b: i32, c: Option, } roundtrip(&Test { a: (), b: 42, c: Some(42), }); roundtrip(&[ Test { a: (), b: 42, c: Some(42), }, Test { a: (), b: 42, c: Some(42), }, ]); } #[test] fn roundtrip_generic_struct() { use core::fmt; pub trait TestTrait { type Associated: PartialEq; } impl TestTrait for () { type Associated = i32; } #[derive(Archive, Serialize, Deserialize, PartialEq)] #[rkyv(crate, compare(PartialEq))] struct Test { a: (), b: ::Associated, c: Option, } impl fmt::Debug for Test where T::Associated: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Test") .field("a", &self.a) .field("b", &self.b) .field("c", &self.c) .finish() } } impl fmt::Debug for ArchivedTest where T::Associated: Archive, ::Archived: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Test") .field("a", &self.a) .field("b", &self.b) .field("c", &self.c) .finish() } } roundtrip(&Test::<()> { a: (), b: 42, c: Some(42), }); roundtrip(&[ Test::<()> { a: (), b: 42, c: Some(42), }, Test::<()> { a: (), b: 42, c: Some(42), }, ]); } #[test] fn roundtrip_enum() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, compare(PartialEq), derive(Debug))] enum Test { A, B(i32), C { inner: i32 }, } roundtrip(&Test::A); roundtrip(&Test::B(42)); roundtrip(&Test::C { inner: 42 }); roundtrip(&[Test::A, Test::B(42), Test::C { inner: 42 }]); } #[test] fn roundtrip_generic_enum() { use core::fmt; pub trait TestTrait { type Associated: PartialEq; } impl TestTrait for () { type Associated = i32; } #[derive(Archive, Serialize, Deserialize, PartialEq)] #[rkyv(crate, compare(PartialEq))] enum Test { A, B(::Associated), C { inner: ::Associated }, } impl fmt::Debug for Test where T::Associated: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Test::A => f.debug_tuple("Test::A").finish(), Test::B(value) => { f.debug_tuple("Test::B").field(value).finish() } Test::C { inner } => { f.debug_struct("Test::C").field("inner", inner).finish() } } } } impl fmt::Debug for ArchivedTest where T::Associated: Archive, ::Archived: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ArchivedTest::A => { f.debug_tuple("ArchivedTest::A").finish() } ArchivedTest::B(value) => { f.debug_tuple("ArchivedTest::B").field(value).finish() } ArchivedTest::C { inner } => f .debug_struct("ArchivedTest::C") .field("inner", inner) .finish(), } } } roundtrip(&Test::<()>::A); roundtrip(&Test::<()>::B(42)); roundtrip(&Test::<()>::C { inner: 42 }); roundtrip(&[ Test::<()>::A, Test::<()>::B(42), Test::<()>::C { inner: 42 }, ]); } #[test] fn basic_mutable_refs() { to_archived(&42i32, |mut archived| { assert_eq!(*archived, 42); *archived = 11.into(); assert_eq!(*archived, 11); }); } #[test] fn struct_mutable_refs() { #[derive(Archive, Serialize)] #[rkyv(crate)] struct Opaque(i32); impl ArchivedOpaque { fn get(&self) -> i32 { self.0.into() } fn set(this: Seal<'_, Self>, value: i32) { munge!(let Self(mut inner) = this); *inner = value.into(); } } #[derive(Archive, Serialize)] #[rkyv(crate)] struct Test { a: Opaque, } let value = Test { a: Opaque(10) }; to_archived(&value, |mut archived| { assert_eq!(archived.a.get(), 10); munge!(let ArchivedTest { a } = archived.as_mut()); ArchivedOpaque::set(a, 50); assert_eq!(archived.a.get(), 50); }) } #[test] fn enum_mutable_ref() { #[allow(dead_code)] #[derive(Archive, Serialize)] #[rkyv(crate)] enum Test { A, B(char), C(i32), } let value = Test::A; to_archived(&value, |archived| { if let ArchivedTest::A = *archived { () } else { panic!("incorrect enum after archiving"); } let inner = unsafe { archived.unseal_unchecked() }; *inner = ArchivedTest::C(42.into()); if let ArchivedTest::C(i) = *inner { assert_eq!(i, 42); } else { panic!("incorrect enum after mutation"); } }); } #[test] fn complex_bounds() { use core::marker::PhantomData; trait MyTrait {} impl MyTrait for i32 {} #[derive(Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(transparent)] struct MyStruct { _phantom: PhantomData, } impl Archive for MyStruct { type Archived = MyStruct; type Resolver = (); fn resolve(&self, _: Self::Resolver, _: Place) {} } impl Serialize for MyStruct where T: Archive + MyTrait, S: Fallible + MyTrait + ?Sized, { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize, D> for MyStruct where T: Archive + MyTrait, D: Fallible + MyTrait + ?Sized, { fn deserialize(&self, _: &mut D) -> Result, D::Error> { Ok(MyStruct { _phantom: PhantomData, }) } } #[derive(Archive, Serialize, Deserialize)] #[rkyv( crate, archive_bounds(T: MyTrait), serialize_bounds(__S: MyTrait), deserialize_bounds(__D: MyTrait), )] enum Node { Nil, Cons { value: T, #[rkyv(omit_bounds)] next: MyStruct, }, } impl MyTrait for Node {} } #[test] fn derive_attributes() { #[derive(Archive, Debug, PartialEq)] #[rkyv( crate, archived = ATest, resolver = RTest, compare(PartialEq), derive(Debug), )] struct Test { a: i32, b: Option, } impl Serialize for Test where S: Fallible + ?Sized, S::Error: Source, i32: Serialize, Option: Serialize, { fn serialize(&self, serializer: &mut S) -> Result { Ok(RTest { a: self.a.serialize(serializer)?, b: self.b.serialize(serializer)?, }) } } impl Deserialize for ATest where D: Fallible + ?Sized, D::Error: Source, ArchivedI32: Deserialize, ArchivedOption: Deserialize, D>, { fn deserialize( &self, deserializer: &mut D, ) -> Result { Ok(Test { a: self.a.deserialize(deserializer)?, b: self.b.deserialize(deserializer)?, }) } } let value = Test { a: 42, b: Some(12) }; roundtrip(&value); } #[test] fn compare() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate, compare(PartialEq, PartialOrd))] pub struct UnitFoo; #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate, compare(PartialEq, PartialOrd))] pub struct TupleFoo(i32); #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate, compare(PartialEq, PartialOrd))] pub struct StructFoo { t: i32, } #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate, compare(PartialEq, PartialOrd))] pub enum EnumFoo { #[allow(dead_code)] Foo(i32), } } #[test] fn default_type_parameters() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] pub struct TupleFoo(T); #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] pub struct StructFoo { t: T, } #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] pub enum EnumFoo { #[allow(dead_code)] T(T), } } #[test] fn const_generics() { #[derive(Archive, Deserialize, Serialize, Debug, PartialEq)] #[rkyv(crate, compare(PartialEq), derive(Debug))] pub struct Const; roundtrip(&Const::<1>); roundtrip(&Const::<2>); roundtrip(&Const::<3>); #[derive(Archive, Deserialize, Serialize)] #[rkyv(crate)] pub struct Array([T; N]); } #[test] fn repr_c_packed() { #[derive(Archive)] #[rkyv(crate, attr(repr(C, packed)))] #[allow(dead_code)] struct CPackedRepr { a: u8, b: u32, c: u8, } assert_eq!(core::mem::size_of::(), 6); #[derive(Archive)] #[rkyv(crate, attr(repr(C), repr(packed)))] #[allow(dead_code)] struct CPackedRepr2 { a: u8, b: u32, c: u8, } assert_eq!(core::mem::size_of::(), 6); } #[test] fn repr_c_align() { #[derive(Archive)] #[rkyv(crate, attr(repr(C, align(8))))] #[allow(dead_code)] struct CAlignRepr { a: u8, } assert_eq!(core::mem::align_of::(), 8); #[derive(Archive)] #[rkyv(crate, attr(repr(C), repr(align(8))))] #[allow(dead_code)] struct CAlignRepr2 { a: u8, } assert_eq!(core::mem::align_of::(), 8); } #[test] fn archive_as_unit_struct() { #[derive( Archive, Serialize, Deserialize, Debug, Portable, PartialEq, )] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate, as = ExampleUnitStruct)] #[repr(C)] struct ExampleUnitStruct; roundtrip(&ExampleUnitStruct); } #[test] fn archive_as_tuple_struct() { #[derive(Archive, Serialize, Deserialize, Debug, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate, as = ExampleTupleStruct)] #[repr(transparent)] struct ExampleTupleStruct(T); impl, U> PartialEq> for ExampleTupleStruct { fn eq(&self, other: &ExampleTupleStruct) -> bool { self.0.eq(&other.0) } } roundtrip(&ExampleTupleStruct(42i32)); } #[test] fn archive_as_struct() { #[derive(Archive, Serialize, Deserialize, Debug, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate, as = ExampleStruct)] #[repr(transparent)] struct ExampleStruct { value: T, } impl PartialEq> for ExampleStruct where T: PartialEq, { fn eq(&self, other: &ExampleStruct) -> bool { self.value.eq(&other.value) } } roundtrip(&ExampleStruct { value: 42i32 }); } #[test] fn archive_as_enum() { #[derive(Archive, Serialize, Deserialize, Debug, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate, as = ExampleEnum)] #[repr(u8)] enum ExampleEnum { A(T), B, } impl, U> PartialEq> for ExampleEnum { fn eq(&self, other: &ExampleEnum) -> bool { match self { ExampleEnum::A(value) => { if let ExampleEnum::A(other) = other { value.eq(other) } else { false } } ExampleEnum::B => { if let ExampleEnum::B = other { true } else { false } } } } } roundtrip(&ExampleEnum::A(42i32)); } #[test] fn archive_as_self() { #[derive( Clone, Debug, Default, Archive, Deserialize, Portable, Serialize, )] #[rkyv(crate, as = Self)] #[repr(C)] struct Example { inner: bool, } } #[test] fn archive_as_generic() { #[derive(Portable)] #[rkyv(crate)] #[repr(C)] struct Wrapper { inner: T, } #[derive( Clone, Debug, Default, Archive, Deserialize, Portable, Serialize, )] #[rkyv(crate, as = Wrapper)] #[repr(C)] struct Example { inner: bool, } } #[test] fn archive_crate_path() { use crate as alt_path; #[derive(Archive, Deserialize, Serialize)] #[rkyv(crate = alt_path)] struct Test<'a> { #[rkyv(with = alt_path::with::InlineAsBox)] value: &'a str, other: i32, } } #[test] fn pass_thru_derive_with_option() { #[derive( Clone, Copy, Debug, PartialEq, Archive, Serialize, Deserialize, )] #[rkyv(crate, compare(PartialEq), derive(Clone, Copy, Debug))] enum ExampleEnum { Foo, Bar(u64), } #[derive( Clone, Copy, Debug, PartialEq, Archive, Serialize, Deserialize, )] #[rkyv(crate, compare(PartialEq), derive(Clone, Copy, Debug))] struct Example { x: i32, y: Option, } let _ = Example { x: 0, y: Some(ExampleEnum::Bar(0)), }; } } #[cfg(all(test, feature = "alloc"))] mod alloc_tests { use munge::munge; use rancor::Source; use crate::{ alloc::{ boxed::Box, string::{String, ToString}, vec, vec::Vec, }, api::test::{roundtrip, to_archived}, ser::Writer, Archive, Deserialize, Serialize, }; #[test] fn struct_container_mutable_refs() { use crate::{ boxed::ArchivedBox, string::ArchivedString, vec::ArchivedVec, }; #[derive(Archive, Serialize)] #[rkyv(crate)] struct Test { a: Box, b: Vec, } let value = Test { a: Box::new(10), b: vec!["hello".to_string(), "world".to_string()], }; to_archived(&value, |archived| { assert_eq!(*archived.a, 10); assert_eq!(archived.b.len(), 2); assert_eq!(archived.b[0], "hello"); assert_eq!(archived.b[1], "world"); munge!(let ArchivedTest { mut a, mut b } = archived); *ArchivedBox::get_seal(a.as_mut()) = 50.into(); assert_eq!(**a, 50); let mut slice = ArchivedVec::as_slice_seal(b.as_mut()); ArchivedString::as_str_seal(slice.as_mut().index(0)) .make_ascii_uppercase(); ArchivedString::as_str_seal(slice.as_mut().index(1)) .make_ascii_uppercase(); assert_eq!(b[0], "HELLO"); assert_eq!(b[1], "WORLD"); }); } #[test] fn recursive_structures() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv( crate, bytecheck(bounds(__C: crate::validation::ArchiveContext)), // The derive macros don't apply the right bounds from Box so we // have to manually specify what bounds to apply serialize_bounds(__S: Writer), deserialize_bounds(__D::Error: Source), compare(PartialEq), derive(Debug), )] enum Node { Nil, Cons(#[rkyv(omit_bounds)] Box), } roundtrip(&Node::Cons(Box::new(Node::Cons(Box::new(Node::Nil))))); } #[test] fn recursive_self_types() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv( crate, bytecheck(bounds(__C: crate::validation::ArchiveContext)), archive_bounds(T::Archived: core::fmt::Debug), // The derive macros don't apply the right bounds from Box so we // have to manually specify what bounds to apply serialize_bounds(__S: Writer), deserialize_bounds(__D::Error: Source), compare(PartialEq), derive(Debug), )] pub enum LinkedList { Empty, Node { val: T, #[rkyv(omit_bounds)] next: Box, }, } roundtrip(&LinkedList::Node { val: 42i32, next: Box::new(LinkedList::Node { val: 100i32, next: Box::new(LinkedList::Empty), }), }); } } rkyv-0.8.9/src/impls/rend.rs000064400000000000000000000121161046102023000140470ustar 00000000000000use rancor::Fallible; use crate::{ rend::*, traits::CopyOptimization, Archive, Deserialize, Place, Serialize, }; macro_rules! impl_rend_primitive { ($type:ty) => { impl Archive for $type { const COPY_OPTIMIZATION: CopyOptimization = unsafe { CopyOptimization::enable() }; type Archived = Self; type Resolver = (); #[inline] fn resolve(&self, _: Self::Resolver, out: Place) { out.write(*self); } } impl Serialize for $type { fn serialize(&self, _: &mut S) -> Result { Ok(()) } } impl Deserialize<$type, D> for $type { fn deserialize(&self, _: &mut D) -> Result<$type, D::Error> { Ok(*self) } } }; } macro_rules! impl_rend_primitives { ($($type:ty),* $(,)?) => { $(impl_rend_primitive!($type);)* }; } impl_rend_primitives!( i16_le, i32_le, i64_le, i128_le, u16_le, u32_le, u64_le, u128_le, f32_le, f64_le, char_le, NonZeroI16_le, NonZeroI32_le, NonZeroI64_le, NonZeroI128_le, NonZeroU16_le, NonZeroU32_le, NonZeroU64_le, NonZeroU128_le, i16_be, i32_be, i64_be, i128_be, u16_be, u32_be, u64_be, u128_be, f32_be, f64_be, char_be, NonZeroI16_be, NonZeroI32_be, NonZeroI64_be, NonZeroI128_be, NonZeroU16_be, NonZeroU32_be, NonZeroU64_be, NonZeroU128_be, ); #[cfg(test)] mod tests { use rend::*; use crate::api::test::{roundtrip, to_bytes}; #[test] fn roundtrip_integers() { roundtrip(&i16_be::from_native(12345i16)); roundtrip(&i32_be::from_native(1234567890i32)); roundtrip(&i64_be::from_native(1234567890123456789i64)); roundtrip(&i128_be::from_native( 123456789012345678901234567890123456789i128, )); roundtrip(&u16_be::from_native(12345u16)); roundtrip(&u32_be::from_native(1234567890u32)); roundtrip(&u64_be::from_native(12345678901234567890u64)); roundtrip(&u128_be::from_native( 123456789012345678901234567890123456789u128, )); roundtrip(&i16_le::from_native(12345i16)); roundtrip(&i32_le::from_native(1234567890i32)); roundtrip(&i64_le::from_native(1234567890123456789i64)); roundtrip(&i128_le::from_native( 123456789012345678901234567890123456789i128, )); roundtrip(&u16_le::from_native(12345u16)); roundtrip(&u32_le::from_native(1234567890u32)); roundtrip(&u64_le::from_native(12345678901234567890u64)); roundtrip(&u128_le::from_native( 123456789012345678901234567890123456789u128, )); } #[test] fn roundtrip_floats() { roundtrip(&f32_be::from_native(1234567f32)); roundtrip(&f64_be::from_native(12345678901234f64)); roundtrip(&f32_le::from_native(1234567f32)); roundtrip(&f64_le::from_native(12345678901234f64)); } #[test] fn roundtrip_chars() { roundtrip(&char_be::from_native('x')); roundtrip(&char_be::from_native('🥺')); roundtrip(&char_le::from_native('x')); roundtrip(&char_le::from_native('🥺')); } #[test] fn roundtrip_nonzero() { roundtrip(&NonZeroI16_be::new(12345).unwrap()); roundtrip(&NonZeroI32_be::new(1234567890).unwrap()); roundtrip(&NonZeroI64_be::new(1234567890123456789).unwrap()); roundtrip( &NonZeroI128_be::new(123456789012345678901234567890123456789) .unwrap(), ); roundtrip(&NonZeroU16_be::new(12345).unwrap()); roundtrip(&NonZeroU32_be::new(1234567890).unwrap()); roundtrip(&NonZeroU64_be::new(1234567890123456789).unwrap()); roundtrip( &NonZeroU128_be::new(123456789012345678901234567890123456789) .unwrap(), ); roundtrip(&NonZeroI16_le::new(12345).unwrap()); roundtrip(&NonZeroI32_le::new(1234567890).unwrap()); roundtrip(&NonZeroI64_le::new(1234567890123456789).unwrap()); roundtrip( &NonZeroI128_le::new(123456789012345678901234567890123456789) .unwrap(), ); roundtrip(&NonZeroU16_le::new(12345).unwrap()); roundtrip(&NonZeroU32_le::new(1234567890).unwrap()); roundtrip(&NonZeroU64_le::new(1234567890123456789).unwrap()); roundtrip( &NonZeroU128_le::new(123456789012345678901234567890123456789) .unwrap(), ); } #[test] fn verify_endianness() { // Big endian let value = i32_be::from_native(0x12345678); to_bytes(&value, |buf| { assert_eq!(&buf[0..4], &[0x12, 0x34, 0x56, 0x78]); }); // Little endian let value = i32_le::from_native(0x12345678i32); to_bytes(&value, |buf| { assert_eq!(&buf[0..4], &[0x78, 0x56, 0x34, 0x12]); }); } } rkyv-0.8.9/src/impls/std/collections/hash_map.rs000064400000000000000000000216201046102023000200070ustar 00000000000000use core::{ borrow::Borrow, hash::{BuildHasher, Hash}, }; use std::collections::HashMap; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::map::{ArchivedHashMap, HashMapResolver}, ser::{Allocator, Writer}, Archive, Deserialize, Place, Serialize, }; impl Archive for HashMap where K: Archive + Hash + Eq, K::Archived: Hash + Eq, { type Archived = ArchivedHashMap; type Resolver = HashMapResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedHashMap::resolve_from_len(self.len(), (7, 8), resolver, out); } } impl Serialize for HashMap where K: Serialize + Hash + Eq, K::Archived: Hash + Eq, V: Serialize, S: Fallible + Writer + Allocator + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedHashMap::::serialize_from_iter::< _, _, _, K, V, _, >(self.iter(), (7, 8), serializer) } } impl Deserialize, D> for ArchivedHashMap where K: Archive + Hash + Eq, K::Archived: Deserialize + Hash + Eq, V: Archive, V::Archived: Deserialize, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = HashMap::with_capacity_and_hasher(self.len(), S::default()); for (k, v) in self.iter() { result.insert( k.deserialize(deserializer)?, v.deserialize(deserializer)?, ); } Ok(result) } } impl PartialEq> for ArchivedHashMap where K: Hash + Eq + Borrow, AK: Hash + Eq, AV: PartialEq, S: BuildHasher, { fn eq(&self, other: &HashMap) -> bool { if self.len() != other.len() { false } else { self.iter() .all(|(key, value)| other.get(key).is_some_and(|v| value.eq(v))) } } } impl PartialEq> for HashMap where K: Hash + Eq + Borrow, AK: Hash + Eq, AV: PartialEq, { fn eq(&self, other: &ArchivedHashMap) -> bool { other.eq(self) } } #[cfg(test)] mod tests { use core::{fmt::Debug, hash::BuildHasher}; use std::collections::HashMap; use ahash::RandomState; use crate::{ api::test::{roundtrip, roundtrip_with, to_archived}, collections::swiss_table::ArchivedHashMap, string::ArchivedString, Archive, Archived, Deserialize, Serialize, }; fn assert_equal( a: &HashMap, b: &Archived>, ) where V: Archive + Debug + PartialEq, V::Archived: Debug + PartialEq, { assert_eq!(a.len(), b.len()); for (key, value) in a.iter() { assert!(b.contains_key(key.as_str())); assert_eq!(&b[key.as_str()], value); } for (key, value) in b.iter() { assert!(a.contains_key(key.as_str())); assert_eq!(&a[key.as_str()], value); } } #[test] fn roundtrip_empty_hash_map() { roundtrip(&HashMap::::default()); } #[test] fn roundtrip_hash_map_string_int() { let mut map = HashMap::new(); map.insert("Hello".to_string(), 12); map.insert("world".to_string(), 34); map.insert("foo".to_string(), 56); map.insert("bar".to_string(), 78); map.insert("baz".to_string(), 90); roundtrip_with(&map, assert_equal); } #[test] fn roundtrip_hash_map_string_string() { let mut hash_map = HashMap::new(); hash_map.insert("hello".to_string(), "world".to_string()); hash_map.insert("foo".to_string(), "bar".to_string()); hash_map.insert("baz".to_string(), "bat".to_string()); roundtrip_with(&hash_map, assert_equal); } #[test] fn roundtrip_hash_map_zsts() { let mut value = HashMap::new(); value.insert((), 10); roundtrip(&value); let mut value = HashMap::new(); value.insert((), ()); roundtrip(&value); } #[test] fn roundtrip_hash_map_with_custom_hasher_empty() { roundtrip(&HashMap::::default()); } #[test] fn roundtrip_hash_map_with_custom_hasher() { let mut hash_map: HashMap = HashMap::default(); hash_map.insert(1, 2); hash_map.insert(3, 4); hash_map.insert(5, 6); hash_map.insert(7, 8); roundtrip(&hash_map); } #[test] fn roundtrip_hash_map_with_custom_hasher_strings() { let mut hash_map: HashMap<_, _, RandomState> = HashMap::default(); hash_map.insert("hello".to_string(), "world".to_string()); hash_map.insert("foo".to_string(), "bar".to_string()); hash_map.insert("baz".to_string(), "bat".to_string()); roundtrip_with(&hash_map, assert_equal); } #[test] fn get_with() { #[derive(Archive, Serialize, Deserialize, Eq, Hash, PartialEq)] #[rkyv(crate, derive(Eq, Hash, PartialEq))] pub struct Pair(String, String); let mut hash_map = HashMap::new(); hash_map.insert( Pair("my".to_string(), "key".to_string()), "value".to_string(), ); hash_map.insert( Pair("wrong".to_string(), "key".to_string()), "wrong value".to_string(), ); to_archived(&hash_map, |archived| { let get_with = archived .get_with(&("my", "key"), |input_key, key| { &(key.0.as_str(), key.1.as_str()) == input_key }) .unwrap(); assert_eq!(get_with.as_str(), "value"); }); } #[test] fn get_seal() { let mut hash_map: HashMap<_, _, RandomState> = HashMap::default(); hash_map.insert("hello".to_string(), "world".to_string()); hash_map.insert("foo".to_string(), "bar".to_string()); hash_map.insert("baz".to_string(), "bat".to_string()); to_archived(&hash_map, |archived| { let mut value = ArchivedHashMap::get_seal(archived, "hello").unwrap(); assert_eq!("world", &*value); let mut string = ArchivedString::as_str_seal(value.as_mut()); string.make_ascii_uppercase(); assert_eq!("WORLD", &*value); }); } #[test] fn iter_seal() { let mut hash_map: HashMap<_, _, RandomState> = HashMap::default(); hash_map.insert("hello".to_string(), "world".to_string()); hash_map.insert("foo".to_string(), "bar".to_string()); hash_map.insert("baz".to_string(), "bat".to_string()); to_archived(&hash_map, |mut archived| { for value in ArchivedHashMap::values_seal(archived.as_mut()) { let mut string = ArchivedString::as_str_seal(value); string.make_ascii_uppercase(); } assert_eq!(archived.get("hello").unwrap(), "WORLD"); assert_eq!(archived.get("foo").unwrap(), "BAR"); assert_eq!(archived.get("baz").unwrap(), "BAT"); }); } #[test] fn large_hash_map() { let mut map = std::collections::HashMap::new(); for i in 0..100 { map.insert(i.to_string(), i); } roundtrip_with(&map, assert_equal); } #[cfg(feature = "bytecheck")] #[test] fn nested_hash_map() { use rancor::{Error, Panic}; use crate::{access, to_bytes}; #[derive( Hash, PartialEq, Eq, Archive, Serialize, Deserialize, Debug, )] #[rkyv(crate, derive(Hash, PartialEq, Eq, Debug))] struct Key(u8, u8); let mut nested_map = std::collections::HashMap::new(); nested_map.insert(1337u16, 42u16); type MyHashMap = HashMap>; let mut map: MyHashMap = std::collections::HashMap::new(); map.insert(Key(1, 2), nested_map.clone()); map.insert(Key(3, 4), nested_map.clone()); let encoded = to_bytes::(&map).unwrap(); eprintln!("{encoded:?}"); // This .unwrap() fails! let _decoded = access::, Panic>(&encoded).unwrap(); } } rkyv-0.8.9/src/impls/std/collections/hash_set.rs000064400000000000000000000060201046102023000200220ustar 00000000000000use core::{ borrow::Borrow, hash::{BuildHasher, Hash}, }; use std::collections::HashSet; use rancor::{Fallible, Source}; use crate::{ collections::swiss_table::set::{ArchivedHashSet, HashSetResolver}, ser::{Allocator, Writer}, Archive, Deserialize, Place, Serialize, }; impl Archive for HashSet where K: Archive + Hash + Eq, K::Archived: Hash + Eq, { type Archived = ArchivedHashSet; type Resolver = HashSetResolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { ArchivedHashSet::::resolve_from_len( self.len(), (7, 8), resolver, out, ); } } impl Serialize for HashSet where K::Archived: Hash + Eq, K: Serialize + Hash + Eq, S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, { fn serialize( &self, serializer: &mut S, ) -> Result { ArchivedHashSet::::serialize_from_iter::<_, K, _>( self.iter(), (7, 8), serializer, ) } } impl Deserialize, D> for ArchivedHashSet where K: Archive + Hash + Eq, K::Archived: Deserialize + Hash + Eq, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize( &self, deserializer: &mut D, ) -> Result, D::Error> { let mut result = HashSet::with_hasher(S::default()); for k in self.iter() { result.insert(k.deserialize(deserializer)?); } Ok(result) } } impl, AK: Hash + Eq, S: BuildHasher> PartialEq> for ArchivedHashSet { fn eq(&self, other: &HashSet) -> bool { if self.len() != other.len() { false } else { self.iter().all(|key| other.get(key).is_some()) } } } impl, AK: Hash + Eq, S: BuildHasher> PartialEq> for HashSet { fn eq(&self, other: &ArchivedHashSet) -> bool { other.eq(self) } } #[cfg(test)] mod tests { use std::collections::HashSet; use crate::api::test::{roundtrip, roundtrip_with}; #[test] fn roundtrip_hash_set() { let mut hash_set = HashSet::new(); hash_set.insert("hello".to_string()); hash_set.insert("world".to_string()); hash_set.insert("foo".to_string()); hash_set.insert("bar".to_string()); hash_set.insert("baz".to_string()); roundtrip_with(&hash_set, |a, b| { assert_eq!(a.len(), b.len()); for key in a.iter() { assert!(b.contains(key.as_str())); } for key in b.iter() { assert!(a.contains(key.as_str())); } }); } #[test] fn roundtrip_hash_set_zst() { let mut value = HashSet::new(); value.insert(()); roundtrip(&value); } } rkyv-0.8.9/src/impls/std/collections/mod.rs000064400000000000000000000000341046102023000170020ustar 00000000000000mod hash_map; mod hash_set; rkyv-0.8.9/src/impls/std/mod.rs000064400000000000000000000000441046102023000144650ustar 00000000000000mod collections; mod net; mod with; rkyv-0.8.9/src/impls/std/net.rs000064400000000000000000000015031046102023000144750ustar 00000000000000use core::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; use std::{io, net::ToSocketAddrs}; use crate::net::{ ArchivedSocketAddr, ArchivedSocketAddrV4, ArchivedSocketAddrV6, }; impl ToSocketAddrs for ArchivedSocketAddrV4 { type Iter = ::Iter; fn to_socket_addrs(&self) -> io::Result { self.as_socket_addr_v4().to_socket_addrs() } } impl ToSocketAddrs for ArchivedSocketAddrV6 { type Iter = ::Iter; fn to_socket_addrs(&self) -> io::Result { self.as_socket_addr_v6().to_socket_addrs() } } impl ToSocketAddrs for ArchivedSocketAddr { type Iter = ::Iter; fn to_socket_addrs(&self) -> io::Result { self.as_socket_addr().to_socket_addrs() } } rkyv-0.8.9/src/impls/std/with.rs000064400000000000000000000437311046102023000146730ustar 00000000000000use core::{error::Error, fmt, hash::BuildHasher}; use std::{ borrow::Cow, collections::{HashMap, HashSet}, ffi::{CStr, OsString}, hash::Hash, marker::PhantomData, path::{Path, PathBuf}, str::FromStr, sync::{Mutex, RwLock}, time::{SystemTime, UNIX_EPOCH}, }; use rancor::{Fallible, OptionExt, ResultExt, Source}; use crate::{ collections::{ swiss_table::{ArchivedHashMap, HashMapResolver}, util::{Entry, EntryAdapter}, }, ffi::{ArchivedCString, CStringResolver}, hash::FxHasher64, impls::core::with::RefWrapper, ser::{Allocator, Writer}, string::{ArchivedString, StringResolver}, time::ArchivedDuration, vec::{ArchivedVec, VecResolver}, with::{ ArchiveWith, AsOwned, AsString, AsUnixTime, AsVec, DeserializeWith, Lock, MapKV, SerializeWith, }, Archive, Deserialize, Place, Serialize, SerializeUnsized, }; // MapKV impl ArchiveWith> for MapKV where A: ArchiveWith, B: ArchiveWith, H: Default + BuildHasher, { type Archived = ArchivedHashMap< >::Archived, >::Archived, >; type Resolver = HashMapResolver; fn resolve_with( field: &HashMap, resolver: Self::Resolver, out: Place, ) { ArchivedHashMap::resolve_from_len(field.len(), (7, 8), resolver, out) } } impl SerializeWith, S> for MapKV where A: ArchiveWith + SerializeWith, B: ArchiveWith + SerializeWith, K: Hash + Eq, >::Archived: Eq + Hash, S: Fallible + Allocator + Writer + ?Sized, S::Error: Source, H: Default + BuildHasher, H::Hasher: Default, { fn serialize_with( field: &HashMap, serializer: &mut S, ) -> Result::Error> { ArchivedHashMap::<_, _, FxHasher64>::serialize_from_iter( field.iter().map(|(k, v)| { ( RefWrapper::<'_, A, K>(k, PhantomData::), RefWrapper::<'_, B, V>(v, PhantomData::), ) }), (7, 8), serializer, ) } } impl DeserializeWith< ArchivedHashMap< >::Archived, >::Archived, >, HashMap, D, > for MapKV where A: ArchiveWith + DeserializeWith<>::Archived, K, D>, B: ArchiveWith + DeserializeWith<>::Archived, V, D>, K: Ord + Hash + Eq, D: Fallible + ?Sized, S: Default + BuildHasher, { fn deserialize_with( field: &ArchivedHashMap< >::Archived, >::Archived, >, deserializer: &mut D, ) -> Result, ::Error> { let mut result = HashMap::with_capacity_and_hasher(field.len(), S::default()); for (k, v) in field.iter() { result.insert( A::deserialize_with(k, deserializer)?, B::deserialize_with(v, deserializer)?, ); } Ok(result) } } // AsString #[derive(Debug)] struct InvalidUtf8; impl fmt::Display for InvalidUtf8 { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "invalid UTF-8") } } impl Error for InvalidUtf8 {} impl ArchiveWith for AsString { type Archived = ArchivedString; type Resolver = StringResolver; #[inline] fn resolve_with( field: &OsString, resolver: Self::Resolver, out: Place, ) { // It's safe to unwrap here because if the OsString wasn't valid UTF-8 // it would have failed to serialize ArchivedString::resolve_from_str( field.to_str().unwrap(), resolver, out, ); } } impl SerializeWith for AsString where S: Fallible + ?Sized, S::Error: Source, str: SerializeUnsized, { fn serialize_with( field: &OsString, serializer: &mut S, ) -> Result { ArchivedString::serialize_from_str( field.to_str().into_trace(InvalidUtf8)?, serializer, ) } } impl DeserializeWith for AsString where D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedString, _: &mut D, ) -> Result { Ok(OsString::from_str(field.as_str()).unwrap()) } } impl ArchiveWith for AsString { type Archived = ArchivedString; type Resolver = StringResolver; #[inline] fn resolve_with( field: &PathBuf, resolver: Self::Resolver, out: Place, ) { // It's safe to unwrap here because if the OsString wasn't valid UTF-8 // it would have failed to serialize ArchivedString::resolve_from_str( field.to_str().unwrap(), resolver, out, ); } } impl SerializeWith for AsString where S: Fallible + ?Sized, S::Error: Source, str: SerializeUnsized, { fn serialize_with( field: &PathBuf, serializer: &mut S, ) -> Result { ArchivedString::serialize_from_str( field.to_str().into_trace(InvalidUtf8)?, serializer, ) } } impl DeserializeWith for AsString where D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedString, _: &mut D, ) -> Result { Ok(Path::new(field.as_str()).to_path_buf()) } } // Lock #[derive(Debug)] struct LockPoisoned; impl fmt::Display for LockPoisoned { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "lock poisoned") } } impl Error for LockPoisoned {} impl ArchiveWith> for Lock { type Archived = F::Archived; type Resolver = F::Resolver; fn resolve_with( field: &Mutex, resolver: Self::Resolver, out: Place, ) { // Unfortunately, we have to unwrap here because resolve must be // infallible // // An alternative would be to only implement ArchiveWith for // Arc>, copy an Arc into the resolver, and hold the // guard in there as well (as a reference to the internal mutex). // This unfortunately will cause a deadlock if two Arcs to the same // Mutex are serialized before the first is resolved. The // compromise is, unfortunately, to just unwrap poison // errors here and document it. field.lock().unwrap().resolve(resolver, out); } } impl SerializeWith, S> for Lock where F: Serialize, S: Fallible + ?Sized, S::Error: Source, { fn serialize_with( field: &Mutex, serializer: &mut S, ) -> Result { field .lock() .ok() .into_trace(LockPoisoned)? .serialize(serializer) } } impl DeserializeWith, D> for Lock where F: Deserialize, D: Fallible + ?Sized, { fn deserialize_with( field: &F, deserializer: &mut D, ) -> Result, D::Error> { Ok(Mutex::new(field.deserialize(deserializer)?)) } } impl ArchiveWith> for Lock { type Archived = F::Archived; type Resolver = F::Resolver; fn resolve_with( field: &RwLock, resolver: Self::Resolver, out: Place, ) { // Unfortunately, we have to unwrap here because resolve must be // infallible // // An alternative would be to only implement ArchiveWith for // Arc>, copy a an Arc into the resolver, and hold the // guard in there as well (as a reference to the internal // mutex). This unfortunately will cause a deadlock if two Arcs to the // same Mutex are serialized before the first is resolved. The // compromise is, unfortunately, to just unwrap poison errors // here and document it. field.read().unwrap().resolve(resolver, out); } } impl SerializeWith, S> for Lock where F: Serialize, S: Fallible + ?Sized, S::Error: Source, { fn serialize_with( field: &RwLock, serializer: &mut S, ) -> Result { field .read() .ok() .into_trace(LockPoisoned)? .serialize(serializer) } } impl DeserializeWith, D> for Lock where F: Deserialize, D: Fallible + ?Sized, { fn deserialize_with( field: &F, deserializer: &mut D, ) -> Result, D::Error> { Ok(RwLock::new(field.deserialize(deserializer)?)) } } // AsVec impl ArchiveWith> for AsVec { type Archived = ArchivedVec>; type Resolver = VecResolver; fn resolve_with( field: &HashMap, resolver: Self::Resolver, out: Place, ) { ArchivedVec::resolve_from_len(field.len(), resolver, out); } } impl SerializeWith, S> for AsVec where K: Serialize, V: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize_with( field: &HashMap, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_iter( field.iter().map(|(key, value)| { EntryAdapter::<_, _, K, V>::new(key, value) }), serializer, ) } } impl DeserializeWith< ArchivedVec>, HashMap, D, > for AsVec where K: Archive + Hash + Eq, V: Archive, K::Archived: Deserialize, V::Archived: Deserialize, H: BuildHasher + Default, D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedVec>, deserializer: &mut D, ) -> Result, D::Error> { let mut result = HashMap::with_capacity_and_hasher(field.len(), H::default()); for entry in field.iter() { result.insert( entry.key.deserialize(deserializer)?, entry.value.deserialize(deserializer)?, ); } Ok(result) } } impl ArchiveWith> for AsVec { type Archived = ArchivedVec; type Resolver = VecResolver; fn resolve_with( field: &HashSet, resolver: Self::Resolver, out: Place, ) { ArchivedVec::resolve_from_len(field.len(), resolver, out); } } impl SerializeWith, S> for AsVec where T: Serialize, S: Fallible + Allocator + Writer + ?Sized, { fn serialize_with( field: &HashSet, serializer: &mut S, ) -> Result { ArchivedVec::::serialize_from_iter::( field.iter(), serializer, ) } } impl DeserializeWith, HashSet, D> for AsVec where T: Archive + Hash + Eq, T::Archived: Deserialize, H: BuildHasher + Default, D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedVec, deserializer: &mut D, ) -> Result, D::Error> { let mut result = HashSet::with_capacity_and_hasher(field.len(), H::default()); for key in field.iter() { result.insert(key.deserialize(deserializer)?); } Ok(result) } } // UnixTimestamp impl ArchiveWith for AsUnixTime { type Archived = ArchivedDuration; type Resolver = (); #[inline] fn resolve_with( field: &SystemTime, resolver: Self::Resolver, out: Place, ) { // We already checked the duration during serialize_with let duration = field.duration_since(UNIX_EPOCH).unwrap(); Archive::resolve(&duration, resolver, out); } } impl SerializeWith for AsUnixTime where S: Fallible + ?Sized, S::Error: Source, { fn serialize_with( field: &SystemTime, _: &mut S, ) -> Result { field.duration_since(UNIX_EPOCH).into_error()?; Ok(()) } } impl DeserializeWith for AsUnixTime where D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedDuration, _: &mut D, ) -> Result { // `checked_add` forces correct type deduction when multiple `Duration` // are present. Ok(UNIX_EPOCH.checked_add((*field).into()).unwrap()) } } // AsOwned impl<'a> ArchiveWith> for AsOwned { type Archived = ArchivedCString; type Resolver = CStringResolver; #[inline] fn resolve_with( field: &Cow<'a, CStr>, resolver: Self::Resolver, out: Place, ) { ArchivedCString::resolve_from_c_str(field, resolver, out); } } impl<'a, S> SerializeWith, S> for AsOwned where S: Fallible + Writer + ?Sized, { fn serialize_with( field: &Cow<'a, CStr>, serializer: &mut S, ) -> Result { ArchivedCString::serialize_from_c_str(field, serializer) } } impl<'a, D> DeserializeWith, D> for AsOwned where D: Fallible + ?Sized, D::Error: Source, { fn deserialize_with( field: &ArchivedCString, deserializer: &mut D, ) -> Result, D::Error> { Ok(Cow::Owned(field.deserialize(deserializer)?)) } } #[cfg(test)] mod tests { use std::{ collections::BTreeMap, ffi::OsString, path::PathBuf, sync::{Mutex, RwLock}, }; use crate::{ alloc::collections::HashMap, api::test::{roundtrip_with, to_archived}, with::{AsString, InlineAsBox, Lock, MapKV}, Archive, Deserialize, Serialize, }; #[test] fn roundtrip_mutex() { #[derive(Archive, Serialize, Deserialize, Debug)] #[rkyv(crate, derive(Debug, PartialEq))] struct Test { #[rkyv(with = Lock)] value: Mutex, } impl PartialEq for Test { fn eq(&self, other: &Self) -> bool { let self_value = self.value.lock().unwrap(); let other_value = other.value.lock().unwrap(); *self_value == *other_value } } roundtrip_with( &Test { value: Mutex::new(10), }, |a, b| { let a_value = a.value.lock().unwrap(); assert_eq!(b.value, *a_value); }, ); } #[test] fn with_hash_map_mapkv() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test<'a> { #[rkyv(with = MapKV)] inner: HashMap<&'a str, &'a str>, } let mut inner = HashMap::new(); inner.insert("cat", "hat"); let value = Test { inner }; to_archived(&value, |archived| { assert_eq!(&**archived.inner.get("cat").unwrap(), "hat"); }); } #[test] fn with_btree_map_mapkv() { #[derive(Archive, Serialize, Deserialize)] #[rkyv(crate)] struct Test<'a> { #[rkyv(with = MapKV)] inner: BTreeMap<&'a str, &'a str>, } let mut inner = BTreeMap::new(); inner.insert("cat", "hat"); let value = Test { inner }; to_archived(&value, |archived| { assert_eq!(&**archived.inner.get("cat").unwrap(), "hat"); }); } #[test] fn roundtrip_rwlock() { #[derive(Archive, Serialize, Deserialize, Debug)] #[rkyv(crate, derive(Debug, PartialEq))] struct Test { #[rkyv(with = Lock)] value: RwLock, } impl PartialEq for Test { fn eq(&self, other: &Self) -> bool { let self_value = self.value.try_read().unwrap(); let other_value = other.value.try_read().unwrap(); *self_value == *other_value } } roundtrip_with( &Test { value: RwLock::new(10), }, |a, b| { let a_value = a.value.try_read().unwrap(); assert_eq!(b.value, *a_value); }, ); } #[test] fn roundtrip_os_string() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug, PartialEq))] struct Test { #[rkyv(with = AsString)] value: OsString, } roundtrip_with( &Test { value: OsString::from("hello world"), }, |a, b| { assert_eq!(a.value.as_os_str().to_str().unwrap(), b.value); }, ); } #[test] fn roundtrip_path_buf() { #[derive(Archive, Serialize, Deserialize, Debug, PartialEq)] #[rkyv(crate, derive(Debug, PartialEq))] struct Test { #[rkyv(with = AsString)] value: PathBuf, } roundtrip_with( &Test { value: PathBuf::from("hello world"), }, |a, b| { assert_eq!(a.value.as_os_str().to_str().unwrap(), b.value); }, ); } } rkyv-0.8.9/src/lib.rs000064400000000000000000000303671046102023000125510ustar 00000000000000//! rkyv is a zero-copy deserialization framework for Rust. //! //! ## Overview //! //! rkyv uses Rust's powerful trait system to serialize data without reflection. //! Many zero-copy deserialization frameworks use external schemas and heavily //! restrict the available data types. By contrast, rkyv allows all serialized //! types to be defined in code and can serialize a wide variety of types that //! other frameworks cannot. //! //! rkyv scales to highly-capable as well as highly-restricted environments. Not //! only does rkyv support "no-std" builds for targets without a standard //! library implementation, it also supports "no-alloc" builds for targets where //! allocations cannot be made. //! //! rkyv supports limited in-place data mutation, and so can access and update //! data without ever deserializing back to native types. When rkyv's in-place //! mutation is too limited, rkyv also provides ergonomic and performant //! deserialization back into native types. //! //! rkyv prioritizes performance, and is one of the fastest serialization //! frameworks available. All of rkyv's features can be individually enabled and //! disabled, so you only pay for what you use. Additionally, all of rkyv's //! zero-copy types are designed to have little to no overhead. In most cases, //! rkyv's types will have exactly the same performance as native types. //! //! See the [rkyv book] for guide-level documentation and usage examples. //! //! [rkyv book]: https://rkyv.org //! //! ## Components //! //! rkyv has [a hash map implementation] that is built for zero-copy //! deserialization, with the same lookup and iteration performance as the //! standard library hash maps. The hash map implementation is based on //! [Swiss Tables] and uses a target-independent version of FxHash to ensure //! that all targets compute the same hashes. //! //! It also has [a B-tree implementation] that has the same performance //! characteristics as the standard library B-tree maps. Its compact //! representation and localized data storage is best-suited for very large //! amounts of data. //! //! rkyv supports [shared pointers] by default, and is able to serialize and //! deserialize them without duplicating the underlying data. Shared pointers //! which point to the same data when serialized will still point to the same //! data when deserialized. By default, rkyv only supports non-cyclic data //! structures. //! //! Alongside its [unchecked API], rkyv also provides optional [validation] so //! you can ensure safety and data integrity at the cost of some overhead. //! Because checking serialized data can generally be done without allocations, //! the cost of checking and zero-copy access can be much lower than that of //! traditional deserialization. //! //! rkyv is trait-oriented from top to bottom, and is made to be extended with //! custom and specialized types. Serialization, deserialization, and //! validation traits all accept generic context types, making it easy to add //! new capabilities without degrading ergonomics. //! //! [a hash map implementation]: collections::swiss_table::ArchivedHashMap //! [Swiss Tables]: https://abseil.io/about/design/swisstables //! [a B-tree implementation]: collections::btree_map::ArchivedBTreeMap //! [shared pointers]: rc //! [unchecked API]: access_unchecked //! [validation]: access //! //! ## Features //! //! rkyv has several feature flags which can be used to modify its behavior. By //! default, rkyv enables the `std`, `alloc`, and `bytecheck` features. //! //! ### Format control //! //! These features control how rkyv formats its serialized data. Enabling and //! disabling these features may change rkyv's serialized format, and as such //! can cause previously-serialized data to become unreadable. Enabling format //! control features that are not the default should be considered a breaking //! change to rkyv's serialized format. //! //! Binaries should consider explicitly choosing format control options from the //! start, even though doing so is not required. This ensures that developers //! stay informed about the specific choices being made, and prevents any //! unexpected compatibility issues with libraries they depend on. //! //! Libraries should avoid enabling format control features unless they intend //! to only support rkyv when those specific format control features are //! enabled. In general, libraries should be able to support all format control //! options if they use rkyv's exported types and aliases. //! //! #### Endianness //! //! If an endianness feature is not enabled, rkyv will use little-endian byte //! ordering by default. //! //! - `little_endian`: Forces data serialization to use little-endian byte //! ordering. This optimizes serialized data for little-endian architectures. //! - `big_endian`: Forces data serialization to use big-endian byte ordering. //! This optimizes serialized data for big-endian architectures. //! //! #### Alignment //! //! If an alignment feature is not enabled, rkyv will use aligned primitives by //! default. //! //! - `aligned`: Forces data serialization to use aligned primitives. This adds //! alignment requirements for accessing data and prevents rkyv from working //! with unaligned data. //! - `unaligned`: Forces data serialization to use unaligned primitives. This //! removes alignment requirements for accessing data and allows rkyv to work //! with unaligned data more easily. //! //! #### Pointer width //! //! If a pointer width feature is not enabled, rkyv will serialize `isize` and //! `usize` as 32-bit integers by default. //! //! - `pointer_width_16`: Serializes `isize` and `usize` as 16-bit integers. //! This is intended to be used only for small data sizes and may not handle //! large amounts of data. //! - `pointer_width_32`: Serializes `isize` and `usize` as 32-bit integers. //! This is a good choice for most data, and balances the storage overhead //! with support for large data sizes. //! - `pointer_width_64`: Serializes `isize` and `usize` as 64-bit integers. //! This is intended to be used only for extremely large data sizes and may //! cause unnecessary data bloat for smaller amounts of data. //! //! ### Functionality //! //! These features enable more built-in functionality and provide more powerful //! and ergonomic APIs. Enabling and disabling these features does not change //! rkyv's serialized format. //! //! - `alloc`: Enables support for the `alloc` crate. Enabled by default. //! - `std`: Enables standard library support. Enabled by default. //! - `bytecheck`: Enables data validation through `bytecheck`. Enabled by //! default. //! //! ### Crates //! //! rkyv provides integrations for some common crates by default. In the future, //! crates should depend on rkyv and provide their own integration. Enabling and //! disabling these features does not change rkyv's serialized format. //! //! - [`arrayvec-0_7`](https://docs.rs/arrayvec/0.7) //! - [`bytes-1`](https://docs.rs/bytes/1) //! - [`hashbrown-0_14`](https://docs.rs/hashbrown/0.14) //! - [`hashbrown-0_15`](https://docs.rs/hashbrown/0.15) //! - [`indexmap-2`](https://docs.rs/indexmap/2) //! - [`smallvec-1`](https://docs.rs/smallvec/1) //! - [`smol_str-0_2`](https://docs.rs/smol_str/0.2) //! - [`smol_str-0_3`](https://docs.rs/smol_str/0.3) //! - [`thin-vec-0_2`](https://docs.rs/thin-vec/0.2) //! - [`tinyvec-1`](https://docs.rs/tinyvec/1) //! - [`triomphe-0_1`](https://docs.rs/triomphe/0.1) //! - [`uuid-1`](https://docs.rs/uuid/1) //! //! ## Compatibility //! //! Serialized data can be accessed later as long as: //! //! - The underlying schema has not changed //! - The serialized format has not been changed by format control features //! - The data was serialized by a semver-compatible version of rkyv // Crate attributes #![deny( future_incompatible, missing_docs, nonstandard_style, unsafe_op_in_unsafe_fn, unused, warnings, clippy::all, clippy::missing_safety_doc, // TODO(#114): re-enable this lint after justifying unsafe blocks // clippy::undocumented_unsafe_blocks, rustdoc::broken_intra_doc_links, rustdoc::missing_crate_level_docs )] #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(all(docsrs, not(doctest)), feature(doc_cfg, doc_auto_cfg))] #![doc(html_favicon_url = r#" data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 26.458 26.458'%3E%3Cpath d='M0 0v26.458h26.458V0zm9.175 3.772l8.107 8.106 2.702-2.702 2.702 13.512-13.512-2.702 2.703-2.702-8.107-8.107z'/%3E %3C/svg%3E "#)] #![doc(html_logo_url = r#" data:image/svg+xml,%3Csvg xmlns="http://www.w3.org/2000/svg" width="100" height="100" viewBox="0 0 26.458 26.458"%3E%3Cpath d="M0 0v26.458h26.458V0zm9.175 3.772l8.107 8.106 2.702-2.702 2.702 13.512-13.512-2.702 2.703-2.702-8.107-8.107z"/%3E%3C/svg%3E "#)] #![cfg_attr(miri, feature(alloc_layout_extra))] // Extern crates #[cfg(all(feature = "alloc", not(feature = "std")))] extern crate alloc; #[cfg(feature = "std")] use std as alloc; // Re-exports #[cfg(feature = "bytecheck")] pub use ::bytecheck; pub use ::munge; pub use ::ptr_meta; pub use ::rancor; pub use ::rend; // Modules mod alias; #[macro_use] mod _macros; pub mod api; pub mod boxed; pub mod collections; pub mod de; pub mod ffi; mod fmt; pub mod hash; mod impls; pub mod net; pub mod niche; pub mod ops; pub mod option; pub mod place; mod polyfill; pub mod primitive; pub mod rc; pub mod rel_ptr; pub mod result; pub mod seal; pub mod ser; mod simd; pub mod string; pub mod time; pub mod traits; pub mod tuple; pub mod util; #[cfg(feature = "bytecheck")] pub mod validation; pub mod vec; pub mod with; // Exports #[cfg(all(feature = "bytecheck", feature = "alloc"))] #[doc(inline)] pub use api::high::{access, access_mut, from_bytes}; #[cfg(feature = "alloc")] #[doc(inline)] pub use api::high::{deserialize, from_bytes_unchecked, to_bytes}; #[doc(inline)] pub use crate::{ alias::*, api::{access_unchecked, access_unchecked_mut}, place::Place, traits::{ Archive, ArchiveUnsized, Deserialize, DeserializeUnsized, Portable, Serialize, SerializeUnsized, }, }; // Check endianness feature flag settings #[cfg(all(feature = "little_endian", feature = "big_endian"))] core::compiler_error!( "\"little_endian\" and \"big_endian\" are mutually-exclusive features. \ You may need to set `default-features = false` or compile with \ `--no-default-features`." ); // Check alignment feature flag settings #[cfg(all(feature = "aligned", feature = "unaligned"))] core::compiler_error!( "\"aligned\" and \"unaligned\" are mutually-exclusive features. You may \ need to set `default-features = false` or compile with \ `--no-default-features`." ); // Check pointer width feature flag settings #[cfg(all( feature = "pointer_width_16", feature = "pointer_width_32", not(feature = "pointer_width_64") ))] core::compile_error!( "\"pointer_width_16\" and \"pointer_width_32\" are mutually-exclusive \ features. You may need to set `default-features = false` or compile with \ `--no-default-features`." ); #[cfg(all( feature = "pointer_width_16", feature = "pointer_width_64", not(feature = "pointer_width_32") ))] core::compile_error!( "\"pointer_width_16\" and \"pointer_width_64\" are mutually-exclusive \ features. You may need to set `default-features = false` or compile with \ `--no-default-features`." ); #[cfg(all( feature = "pointer_width_32", feature = "pointer_width_64", not(feature = "pointer_width_16") ))] core::compile_error!( "\"pointer_width_32\" and \"pointer_width_64\" are mutually-exclusive \ features. You may need to set `default-features = false` or compile with \ `--no-default-features`." ); #[cfg(all( feature = "pointer_width_16", feature = "pointer_width_32", feature = "pointer_width_64" ))] core::compile_error!( "\"pointer_width_16\", \"pointer_width_32\", and \"pointer_width_64\" are \ mutually-exclusive features. You may need to set `default-features = \ false` or compile with `--no-default-features`." ); rkyv-0.8.9/src/net.rs000064400000000000000000000326661046102023000125750ustar 00000000000000//! Archived versions of network types. use core::net::{ IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, }; use munge::munge; use crate::{ primitive::{ArchivedU16, ArchivedU32}, Archive, Place, Portable, }; /// An archived [`Ipv4Addr`]. #[derive(Portable)] #[rkyv(crate)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[repr(transparent)] pub struct ArchivedIpv4Addr { octets: [u8; 4], } impl ArchivedIpv4Addr { /// Returns the four eight-bit integers that make up this address. #[inline] pub const fn octets(&self) -> [u8; 4] { self.octets } /// Returns an [`Ipv4Addr`] with the same value. #[inline] pub const fn as_ipv4(&self) -> Ipv4Addr { let octets = self.octets(); Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3]) } /// Returns `true` if this is a broadcast address (255.255.255.255). /// /// See [`Ipv4Addr::is_broadcast`] for more details. #[inline] pub const fn is_broadcast(&self) -> bool { self.as_ipv4().is_broadcast() } /// Returns `true` if this address is in a range designated for /// documentation. /// /// See [`Ipv4Addr::is_documentation`] for more details. #[inline] pub const fn is_documentation(&self) -> bool { self.as_ipv4().is_documentation() } /// Returns `true` if the address is link-local (169.254.0.0/16). /// /// See [`Ipv4Addr::is_link_local`] for more details. #[inline] pub const fn is_link_local(&self) -> bool { self.as_ipv4().is_link_local() } /// Returns `true` if this is a loopback address (127.0.0.0/8). /// /// See [`Ipv4Addr::is_loopback`] for more details. #[inline] pub const fn is_loopback(&self) -> bool { self.as_ipv4().is_loopback() } /// Returns `true` if this is a multicast address (224.0.0.0/4). /// /// See [`Ipv4Addr::is_multicast`] for more details. #[inline] pub const fn is_multicast(&self) -> bool { self.as_ipv4().is_multicast() } /// Returns `true` if this is a private address. /// /// See [`Ipv4Addr::is_private`] for more details. #[inline] pub const fn is_private(&self) -> bool { self.as_ipv4().is_private() } /// Returns `true` for the special 'unspecified' address (0.0.0.0). /// /// See [`Ipv4Addr::is_unspecified`] for more details. #[inline] pub const fn is_unspecified(&self) -> bool { self.as_ipv4().is_unspecified() } /// Converts this address to an IPv4-compatible /// [`IPv6` address](std::net::Ipv6Addr). /// /// See [`Ipv4Addr::to_ipv6_compatible`] for more /// details. #[inline] #[allow(clippy::wrong_self_convention)] pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { self.as_ipv4().to_ipv6_compatible() } /// Converts this address to an IPv4-mapped /// [`IPv6` address](std::net::Ipv6Addr). /// /// See [`Ipv4Addr::to_ipv6_mapped`] for more details. #[inline] #[allow(clippy::wrong_self_convention)] pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { self.as_ipv4().to_ipv6_mapped() } /// Emplaces an `ArchivedIpv4Addr` with the given octets into a place. #[inline] pub fn emplace(octets: [u8; 4], out: Place) { unsafe { out.cast_unchecked::<[u8; 4]>().write(octets); } } } /// An archived [`Ipv6Addr`]. #[derive(Portable)] #[rkyv(crate)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] #[repr(transparent)] pub struct ArchivedIpv6Addr { octets: [u8; 16], } impl ArchivedIpv6Addr { /// Returns the eight 16-bit segments that make up this address. #[inline] pub const fn segments(&self) -> [u16; 8] { [ u16::from_be_bytes([self.octets[0], self.octets[1]]), u16::from_be_bytes([self.octets[2], self.octets[3]]), u16::from_be_bytes([self.octets[4], self.octets[5]]), u16::from_be_bytes([self.octets[6], self.octets[7]]), u16::from_be_bytes([self.octets[8], self.octets[9]]), u16::from_be_bytes([self.octets[10], self.octets[11]]), u16::from_be_bytes([self.octets[12], self.octets[13]]), u16::from_be_bytes([self.octets[14], self.octets[15]]), ] } /// Returns an [`Ipv6Addr`] with the same value. #[inline] pub const fn as_ipv6(&self) -> Ipv6Addr { let segments = self.segments(); Ipv6Addr::new( segments[0], segments[1], segments[2], segments[3], segments[4], segments[5], segments[6], segments[7], ) } /// Returns `true` if this is a loopback address (::1). /// /// See [`Ipv6Addr::is_loopback()`](std::net::Ipv6Addr::is_loopback()) for /// more details. #[inline] pub const fn is_loopback(&self) -> bool { self.as_ipv6().is_loopback() } /// Returns `true` if this is a multicast address (ff00::/8). /// /// See [`Ipv6Addr::is_multicast()`](std::net::Ipv6Addr::is_multicast()) for /// more details. #[inline] pub const fn is_multicast(&self) -> bool { self.as_ipv6().is_multicast() } /// Returns `true` for the special 'unspecified' address (::). /// /// See [`Ipv6Addr::is_unspecified()`](std::net::Ipv6Addr::is_unspecified()) /// for more details. #[inline] pub const fn is_unspecified(&self) -> bool { self.as_ipv6().is_unspecified() } /// Returns the sixteen eight-bit integers the IPv6 address consists of. #[inline] pub const fn octets(&self) -> [u8; 16] { self.as_ipv6().octets() } /// Converts this address to an [`IPv4` address](std::net::Ipv4Addr). /// Returns [`None`] if this address is neither IPv4-compatible or /// IPv4-mapped. #[inline] #[allow(clippy::wrong_self_convention)] pub const fn to_ipv4(&self) -> Option { self.as_ipv6().to_ipv4() } /// Emplaces an `ArchivedIpv6Addr` with the given octets into a place. #[inline] pub fn emplace(octets: [u8; 16], out: Place) { unsafe { out.cast_unchecked::<[u8; 16]>().write(octets); } } } /// An archived [`IpAddr`]. #[derive(Portable)] #[rkyv(crate)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[repr(u8)] pub enum ArchivedIpAddr { /// An IPv4 address. V4(ArchivedIpv4Addr), /// An IPv6 address. V6(ArchivedIpv6Addr), } impl ArchivedIpAddr { /// Returns `true` if this address is an [`IPv4` /// address](std::net::IpAddr::V4), and `false` otherwise. #[inline] pub const fn is_ipv4(&self) -> bool { matches!(self, ArchivedIpAddr::V4(_)) } /// Returns `true` if this address is an [`IPv6` /// address](std::net::IpAddr::V6), and `false` otherwise. #[inline] pub const fn is_ipv6(&self) -> bool { matches!(self, ArchivedIpAddr::V6(_)) } /// Returns an [`IpAddr`] with the same value. #[inline] pub const fn as_ipaddr(&self) -> IpAddr { match self { ArchivedIpAddr::V4(ipv4) => IpAddr::V4(ipv4.as_ipv4()), ArchivedIpAddr::V6(ipv6) => IpAddr::V6(ipv6.as_ipv6()), } } /// Returns `true` if this is a loopback address. /// /// See [`IpAddr::is_loopback()`](std::net::IpAddr::is_loopback()) for more /// details. #[inline] pub const fn is_loopback(&self) -> bool { match self { ArchivedIpAddr::V4(ip) => ip.is_loopback(), ArchivedIpAddr::V6(ip) => ip.is_loopback(), } } /// Returns `true` if this is a multicast address. /// /// See [`IpAddr::is_multicast()`](std::net::IpAddr::is_multicast()) for /// more details. #[inline] pub const fn is_multicast(&self) -> bool { match self { ArchivedIpAddr::V4(ip) => ip.is_multicast(), ArchivedIpAddr::V6(ip) => ip.is_multicast(), } } /// Returns `true` for the special 'unspecified' address. /// /// See [`IpAddr::is_unspecified()`](std::net::IpAddr::is_unspecified()) for /// more details. #[inline] pub const fn is_unspecified(&self) -> bool { match self { ArchivedIpAddr::V4(ip) => ip.is_unspecified(), ArchivedIpAddr::V6(ip) => ip.is_unspecified(), } } } /// An archived [`SocketAddrV4`]. #[derive( Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, Portable, PartialOrd, )] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct ArchivedSocketAddrV4 { ip: ArchivedIpv4Addr, port: ArchivedU16, } impl ArchivedSocketAddrV4 { /// Returns the IP address associated with this socket address. #[inline] pub const fn ip(&self) -> &ArchivedIpv4Addr { &self.ip } /// Returns the port number associated with this socket address. #[inline] pub const fn port(&self) -> u16 { self.port.to_native() } /// Returns a [`SocketAddrV4`] with the same value. #[inline] pub fn as_socket_addr_v4(&self) -> SocketAddrV4 { SocketAddrV4::new(self.ip().as_ipv4(), self.port()) } /// Emplaces an `ArchivedSocketAddrV4` of the given `value` into a place. #[inline] pub fn emplace(value: &SocketAddrV4, out: Place) { munge!(let ArchivedSocketAddrV4 { ip, port } = out); value.ip().resolve((), ip); value.port().resolve((), port); } } /// An archived [`SocketAddrV6`]. #[derive( Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, Portable, PartialOrd, )] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct ArchivedSocketAddrV6 { ip: ArchivedIpv6Addr, port: ArchivedU16, flowinfo: ArchivedU32, scope_id: ArchivedU32, } impl ArchivedSocketAddrV6 { /// Returns the flow information associated with this address. /// /// See [`SocketAddrV6::flowinfo()`](std::net::SocketAddrV6::flowinfo()) for /// more details. #[inline] pub const fn flowinfo(&self) -> u32 { self.flowinfo.to_native() } /// Returns the IP address associated with this socket address. #[inline] pub const fn ip(&self) -> &ArchivedIpv6Addr { &self.ip } /// Returns the port number associated with this socket address. #[inline] pub const fn port(&self) -> u16 { self.port.to_native() } /// Returns the scope ID associated with this address. /// /// See [`SocketAddrV6::scope_id()`](std::net::SocketAddrV6::scope_id()) for /// more details. #[inline] pub const fn scope_id(&self) -> u32 { self.scope_id.to_native() } /// Returns a [`SocketAddrV6`] with the same value. #[inline] pub fn as_socket_addr_v6(&self) -> SocketAddrV6 { SocketAddrV6::new( self.ip().as_ipv6(), self.port(), self.flowinfo(), self.scope_id(), ) } /// Emplaces an `ArchivedSocketAddrV6` of the given `value` into a place. #[inline] pub fn emplace(value: &SocketAddrV6, out: Place) { munge!(let ArchivedSocketAddrV6 { ip, port, flowinfo, scope_id } = out); value.ip().resolve((), ip); value.port().resolve((), port); value.flowinfo().resolve((), flowinfo); value.scope_id().resolve((), scope_id); } } /// An archived [`SocketAddr`]. #[derive(Portable)] #[rkyv(crate)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[repr(u8)] pub enum ArchivedSocketAddr { /// An IPv4 socket address. V4(ArchivedSocketAddrV4), /// An IPv6 socket address. V6(ArchivedSocketAddrV6), } impl ArchivedSocketAddr { /// Returns the port number associated with this socket address. #[inline] pub fn port(&self) -> u16 { match self { ArchivedSocketAddr::V4(addr) => addr.port(), ArchivedSocketAddr::V6(addr) => addr.port(), } } /// Returns `true` if the [IP address](std::net::IpAddr) in this /// `ArchivedSocketAddr` is an [`IPv4` address](std::net::IpAddr::V4), /// and `false` otherwise. #[inline] pub fn is_ipv4(&self) -> bool { matches!(self, ArchivedSocketAddr::V4(_)) } /// Returns `true` if the [IP address](std::net::IpAddr) in this /// `ArchivedSocketAddr` is an [`IPv6` address](std::net::IpAddr::V6), /// and `false` otherwise. #[inline] pub fn is_ipv6(&self) -> bool { matches!(self, ArchivedSocketAddr::V6(_)) } /// Returns a [`SocketAddr`] with the same value. #[inline] pub fn as_socket_addr(&self) -> SocketAddr { match self { ArchivedSocketAddr::V4(addr) => { SocketAddr::V4(addr.as_socket_addr_v4()) } ArchivedSocketAddr::V6(addr) => { SocketAddr::V6(addr.as_socket_addr_v6()) } } } /// Returns the IP address associated with this socket address. #[inline] pub fn ip(&self) -> IpAddr { match self { ArchivedSocketAddr::V4(addr) => IpAddr::V4(addr.ip().as_ipv4()), ArchivedSocketAddr::V6(addr) => IpAddr::V6(addr.ip().as_ipv6()), } } } rkyv-0.8.9/src/niche/mod.rs000064400000000000000000000001741046102023000136410ustar 00000000000000//! Manually niched type replacements. pub mod niched_option; pub mod niching; pub mod option_box; pub mod option_nonzero; rkyv-0.8.9/src/niche/niched_option.rs000064400000000000000000000134251046102023000157070ustar 00000000000000//! A niched `ArchivedOption` that uses less space based on a [`Niching`]. use core::{cmp, fmt, marker::PhantomData, mem::MaybeUninit, ops::Deref}; use munge::munge; use rancor::Fallible; use super::niching::Niching; use crate::{seal::Seal, Archive, Place, Portable, Serialize}; /// A niched `ArchivedOption`. /// /// It has the same layout as `T`, and thus uses less space by storing the /// `None` variant in a custom way based on `N`. #[derive(Portable)] #[rkyv(crate)] #[repr(transparent)] pub struct NichedOption { repr: MaybeUninit, _niching: PhantomData, } #[cfg(feature = "bytecheck")] const _: () = { use core::ptr::addr_of; use crate::bytecheck::CheckBytes; unsafe impl CheckBytes for NichedOption where T: CheckBytes, N: Niching + ?Sized, C: Fallible + ?Sized, { unsafe fn check_bytes( value: *const Self, context: &mut C, ) -> Result<(), C::Error> { let ptr = unsafe { addr_of!((*value).repr).cast::() }; let is_niched = unsafe { N::is_niched(ptr) }; if !is_niched { unsafe { T::check_bytes(ptr, context)?; } } Ok(()) } } }; impl + ?Sized> NichedOption { /// Returns `true` if the option is a `None` value. pub fn is_none(&self) -> bool { unsafe { N::is_niched(self.repr.as_ptr()) } } /// Returns `true` if the option is a `Some` value. pub fn is_some(&self) -> bool { !self.is_none() } /// Converts to an `Option<&T>`. pub fn as_ref(&self) -> Option<&T> { if self.is_none() { None } else { Some(unsafe { self.repr.assume_init_ref() }) } } /// Converts to an `Option<&mut T>`. pub fn as_mut(&mut self) -> Option<&mut T> { if self.is_none() { None } else { Some(unsafe { self.repr.assume_init_mut() }) } } /// Converts from `Seal<'_, NichedOption>` to `Option>`. pub fn as_seal(this: Seal<'_, Self>) -> Option> { let this = unsafe { Seal::unseal_unchecked(this) }; this.as_mut().map(Seal::new) } /// Returns an iterator over the possibly-contained value. pub fn iter(&self) -> Iter<&'_ T> { Iter::new(self.as_ref()) } /// Returns an iterator over the mutable possibly-contained value. pub fn iter_mut(&mut self) -> Iter<&'_ mut T> { Iter::new(self.as_mut()) } /// Returns an iterator over the sealed possibly-contained value. pub fn iter_seal(this: Seal<'_, Self>) -> Iter> { Iter::new(Self::as_seal(this)) } /// Resolves a `NichedOption` from an `Option<&U>`. pub fn resolve_from_option( option: Option<&U>, resolver: Option, out: Place, ) where U: Archive, { let out = Self::munge_place(out); match option { Some(value) => { let resolver = resolver.expect("non-niched resolver"); value.resolve(resolver, out); } None => N::resolve_niched(out), } } /// Serializes a `NichedOption` from an `Option<&U>`. pub fn serialize_from_option( option: Option<&U>, serializer: &mut S, ) -> Result, S::Error> where U: Serialize, S: Fallible + ?Sized, { match option { Some(value) => value.serialize(serializer).map(Some), None => Ok(None), } } pub(crate) fn munge_place(out: Place) -> Place { munge!(let Self { repr, .. } = out); unsafe { repr.cast_unchecked::() } } } impl NichedOption where T: Deref, N: Niching + ?Sized, { /// Converts from `&NichedOption` to `Option<&T::Target>`. pub fn as_deref(&self) -> Option<&::Target> { self.as_ref().map(Deref::deref) } } impl fmt::Debug for NichedOption where T: fmt::Debug, N: Niching + ?Sized, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.as_ref().fmt(f) } } impl Eq for NichedOption where T: Eq, N: Niching + ?Sized, { } impl PartialEq for NichedOption where T: PartialEq, N: Niching + ?Sized, { fn eq(&self, other: &Self) -> bool { self.as_ref().eq(&other.as_ref()) } } impl PartialEq> for NichedOption where T: PartialEq, N: Niching + ?Sized, { fn eq(&self, other: &Option) -> bool { match (self.as_ref(), other) { (Some(self_value), Some(other_value)) => self_value.eq(other_value), (None, None) => true, _ => false, } } } impl Ord for NichedOption where T: Ord, N: Niching + ?Sized, { fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_ref().cmp(&other.as_ref()) } } impl PartialOrd for NichedOption where T: PartialOrd, N: Niching + ?Sized, { fn partial_cmp(&self, other: &Self) -> Option { self.as_ref().partial_cmp(&other.as_ref()) } } /// An iterator over a reference to the `Some` variant of a `NichedOption`. /// /// This iterator yields one value if the `NichedOption` is a `Some`, otherwise /// none. pub type Iter

= crate::option::Iter

; rkyv-0.8.9/src/niche/niching.rs000064400000000000000000000057641046102023000145130ustar 00000000000000//! [`Niching`] implementors for [`NicheInto`]. //! //! [`NicheInto`]: crate::with::NicheInto use crate::Place; /// A type that can be used to niche a value with [`NicheInto`]. /// /// # Example /// /// ``` /// use rkyv::{ /// niche::niching::Niching, primitive::ArchivedU32, with::NicheInto, /// Archive, Archived, Place, Serialize, /// }; /// /// // Let's niche `Option` by using odd values /// struct NeverOdd; /// /// impl Niching for NeverOdd { /// unsafe fn is_niched(niched: *const ArchivedU32) -> bool { /// // Interprete odd values as "niched" /// unsafe { *niched % 2 == 1 } /// } /// /// fn resolve_niched(out: Place) { /// // To niche, we use the value `1` /// out.write(ArchivedU32::from_native(1)) /// } /// } /// /// #[derive(Archive)] /// struct Basic { /// field: Option, /// } /// /// #[derive(Archive, Serialize)] /// struct Niched { /// #[rkyv(with = NicheInto)] /// field: Option, /// } /// /// # fn main() -> Result<(), rkyv::rancor::Error> { /// // Indeed, we have a smaller archived representation /// assert!(size_of::() < size_of::()); /// /// let values: Vec = /// (0..4).map(|n| Niched { field: Some(n) }).collect(); /// /// let bytes = rkyv::to_bytes(&values)?; /// let archived = rkyv::access::>, _>(&bytes)?; /// assert_eq!(archived[0].field.as_ref(), Some(&0.into())); /// assert_eq!(archived[1].field.as_ref(), None); /// assert_eq!(archived[2].field.as_ref(), Some(&2.into())); /// assert_eq!(archived[3].field.as_ref(), None); /// # Ok(()) } /// ``` /// /// [`NicheInto`]: crate::with::NicheInto pub trait Niching { /// Returns whether the given value has been niched. /// /// While `niched` is guaranteed to point to bytes which are all valid to /// read, the value it points to is not guaranteed to be a valid instance of /// `T`. /// /// # Safety /// /// `niched` must be non-null, properly-aligned, and safe for reads. It does /// not have to point to a valid `T`. unsafe fn is_niched(niched: *const T) -> bool; /// Writes data to `out` indicating that a `T` is niched. fn resolve_niched(out: Place); } /// Trait to allow `NichedOption` to be niched further by `N2`. /// /// # Safety /// /// Implementors must ensure that the memory regions within `Self` that are used /// for [`Niching`] impls of `N1` and `N2` are mutually exclusive. pub unsafe trait SharedNiching {} /// Default [`Niching`] for various types. /// /// Also serves as with-wrapper by being shorthand for /// `NicheInto`. pub struct DefaultNiche; /// [`Niching`] for zero-niched values. pub struct Zero; /// [`Niching`] for NaN-niched values. pub struct NaN; /// [`Niching`] for null-pointer-niched values. pub struct Null; /// [`Niching`] for booleans. pub struct Bool; rkyv-0.8.9/src/niche/option_box.rs000064400000000000000000000165601046102023000152500ustar 00000000000000//! A niched archived `Option>` that uses less space. use core::{ cmp, fmt, hash, hint::unreachable_unchecked, mem::ManuallyDrop, ops::Deref, }; use munge::munge; use rancor::Fallible; use crate::{ boxed::{ArchivedBox, BoxResolver}, seal::Seal, ser::Writer, traits::ArchivePointee, ArchiveUnsized, Place, Portable, RelPtr, SerializeUnsized, }; /// A niched archived `Option>`. /// /// It uses less space by storing the `None` variant as a null pointer. #[derive(Portable)] #[rkyv(crate)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[repr(transparent)] pub struct ArchivedOptionBox { repr: Repr, } #[derive(Portable)] #[rkyv(crate)] #[repr(C)] union Repr { boxed: ManuallyDrop>, ptr: ManuallyDrop>, } impl Repr { fn is_invalid(&self) -> bool { unsafe { self.ptr.is_invalid() } } } #[cfg(feature = "bytecheck")] const _: () = { use crate::{ bytecheck::{CheckBytes, Verify}, rancor::Source, traits::LayoutRaw, validation::ArchiveContext, }; unsafe impl CheckBytes for Repr where T: ArchivePointee + ?Sized, C: Fallible + ?Sized, RelPtr: CheckBytes, Self: Verify, { unsafe fn check_bytes( value: *const Self, context: &mut C, ) -> Result<(), C::Error> { // SAFETY: `Repr` is a `#[repr(C)]` union of an `ArchivedBox` // and a `RelPtr`, and so is guaranteed to be aligned and point // to enough bytes for a `RelPtr`. unsafe { RelPtr::check_bytes(value.cast::>(), context)?; } // verify with null check Self::verify(unsafe { &*value }, context) } } unsafe impl Verify for Repr where T: ArchivePointee + CheckBytes + LayoutRaw + ?Sized, T::ArchivedMetadata: CheckBytes, C: Fallible + ArchiveContext + ?Sized, C::Error: Source, { fn verify(&self, context: &mut C) -> Result<(), C::Error> { let is_invalid = unsafe { self.ptr.is_invalid() }; if is_invalid { // This is a `None` and doesn't need to be checked further Ok(()) } else { unsafe { self.boxed.verify(context) } } } } }; impl ArchivedOptionBox { /// Returns `true` if the option box is a `None` value. pub fn is_none(&self) -> bool { self.as_ref().is_none() } /// Returns `true` if the option box is a `Some` value. pub fn is_some(&self) -> bool { self.as_ref().is_some() } /// Converts to an `Option<&ArchivedBox>`. pub fn as_ref(&self) -> Option<&ArchivedBox> { if self.repr.is_invalid() { None } else { unsafe { Some(&self.repr.boxed) } } } /// Converts to an `Option<&mut ArchivedBox>`. pub fn as_mut(&mut self) -> Option<&mut ArchivedBox> { if self.repr.is_invalid() { None } else { unsafe { Some(&mut self.repr.boxed) } } } /// Converts from `Seal<'_, ArchivedOption>` to `Option>>`. pub fn as_seal(this: Seal<'_, Self>) -> Option>> { let this = unsafe { Seal::unseal_unchecked(this) }; this.as_mut().map(Seal::new) } /// Returns an iterator over the possibly-contained value. pub fn iter(&self) -> Iter<&'_ ArchivedBox> { Iter::new(self.as_ref()) } /// Returns an iterator over the mutable possibly-contained value. pub fn iter_mut(&mut self) -> Iter<&'_ mut ArchivedBox> { Iter::new(self.as_mut()) } /// Returns an iterator over the sealed possibly-contained value. pub fn iter_seal(this: Seal<'_, Self>) -> Iter>> { Iter::new(Self::as_seal(this)) } /// Converts from `&ArchivedOptionBox` to `Option<&T>`. /// /// Leaves the original `ArchivedOptionBox` in-place, creating a new one /// with a reference to the original one. pub fn as_deref(&self) -> Option<&T> { self.as_ref().map(|x| (*x).deref()) } } impl ArchivedOptionBox { /// Resolves an `ArchivedOptionBox` from an `Option<&T>`. pub fn resolve_from_option + ?Sized>( field: Option<&U>, resolver: OptionBoxResolver, out: Place, ) { munge!(let Self { repr } = out); if let Some(value) = field { let resolver = if let OptionBoxResolver::Some(metadata_resolver) = resolver { metadata_resolver } else { unsafe { unreachable_unchecked(); } }; let out = unsafe { repr.cast_unchecked::>() }; ArchivedBox::resolve_from_ref(value, resolver, out) } else { let out = unsafe { repr.cast_unchecked::>() }; RelPtr::emplace_invalid(out); } } /// Serializes an `ArchivedOptionBox` from an `Option<&T>`. pub fn serialize_from_option( field: Option<&U>, serializer: &mut S, ) -> Result where U: SerializeUnsized + ?Sized, S: Fallible + Writer + ?Sized, { if let Some(value) = field { Ok(OptionBoxResolver::Some(ArchivedBox::serialize_from_ref( value, serializer, )?)) } else { Ok(OptionBoxResolver::None) } } } impl fmt::Debug for ArchivedOptionBox where T::ArchivedMetadata: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.as_ref() { Some(inner) => inner.fmt(f), None => f.debug_tuple("None").finish(), } } } impl Eq for ArchivedOptionBox {} impl hash::Hash for ArchivedOptionBox { fn hash(&self, state: &mut H) { self.as_ref().hash(state) } } impl Ord for ArchivedOptionBox { fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_ref().cmp(&other.as_ref()) } } impl PartialEq for ArchivedOptionBox { fn eq(&self, other: &Self) -> bool { self.as_ref().eq(&other.as_ref()) } } impl PartialOrd for ArchivedOptionBox { fn partial_cmp(&self, other: &Self) -> Option { self.as_ref().partial_cmp(&other.as_ref()) } } /// An iterator over a reference to the `Some` variant of an /// `ArchivedOptionBox`. /// /// This iterator yields one value if the `ArchivedOptionBox` is a `Some`, /// otherwise none. pub type Iter

= crate::option::Iter

; /// The resolver for [`ArchivedOptionBox`]. pub enum OptionBoxResolver { /// The `ArchivedOptionBox` was `None` None, /// The resolver for the `ArchivedBox` Some(BoxResolver), } rkyv-0.8.9/src/niche/option_nonzero.rs000064400000000000000000000201411046102023000161400ustar 00000000000000//! Niched archived `Option` integers that use less space. use core::{ cmp, fmt, hash, num::{ NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, }, }; use munge::munge; use crate::{seal::Seal, traits::NoUndef, Archived, Place, Portable}; macro_rules! impl_archived_option_nonzero { ($ar:ident, $nz:ty, $ne:ty) => { #[doc = concat!("A niched archived `Option<", stringify!($nz), ">`")] #[derive(Copy, Clone, Portable)] #[rkyv(crate)] #[repr(transparent)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] pub struct $ar { inner: Archived<$ne>, } impl $ar { /// Returns `true` if the option is a `None` value. #[inline] pub fn is_none(&self) -> bool { self.inner == 0 } /// Returns `true` if the option is a `Some` value. #[inline] pub fn is_some(&self) -> bool { self.inner != 0 } #[rustfmt::skip] #[doc = concat!( "Converts to an `Option<&Archived<", stringify!($nz), ">>`" )] #[inline] pub fn as_ref(&self) -> Option<&Archived<$nz>> { if self.inner != 0 { let as_nonzero = unsafe { // SAFETY: NonZero types have the same memory layout and // bit patterns as their integer counterparts, // regardless of endianness. &*(&self.inner as *const _ as *const Archived<$nz>) }; Some(as_nonzero) } else { None } } #[rustfmt::skip] #[doc = concat!( "Converts to an `Option<&mut Archived<", stringify!($nz), ">>`", )] #[inline] pub fn as_mut(&mut self) -> Option<&mut Archived<$nz>> { if self.inner != 0 { let as_nonzero = unsafe { // SAFETY: NonZero types have the same memory layout and // bit patterns as their integer counterparts, // regardless of endianness. &mut *(&mut self.inner as *mut _ as *mut Archived<$nz>) }; Some(as_nonzero) } else { None } } #[rustfmt::skip] #[doc = concat!( "Converts from `Seal<'_, ArchivedOption", stringify!($nz), ">` to `Option>>`.", )] #[inline] pub fn as_seal( this: Seal<'_, Self>, ) -> Option>> { let this = unsafe { Seal::unseal_unchecked(this) }; this.as_mut().map(Seal::new) } /// Takes the value out of the option, leaving a `None` in its /// place. #[inline] pub fn take(&mut self) -> Option> { if self.inner != 0 { // SAFETY: self.inner is nonzero let result = unsafe { Archived::<$nz>::new_unchecked(self.inner.into()) }; self.inner = 0.into(); Some(result) } else { None } } /// Returns an iterator over the possibly-contained value. #[inline] pub fn iter(&self) -> Iter<&'_ Archived<$nz>> { Iter::new(self.as_ref()) } /// Returns an iterator over the mutable possibly-contained value. #[inline] pub fn iter_mut(&mut self) -> Iter<&'_ mut Archived<$nz>> { Iter::new(self.as_mut()) } /// Returns an iterator over the sealed mutable possibly-contained /// value. #[inline] pub fn iter_seal( this: Seal<'_, Self>, ) -> Iter>> { Iter::new(Self::as_seal(this)) } /// Inserts `v` into the option if it is `None`, then returns a /// mutable reference to the contained value. #[inline] pub fn get_or_insert(&mut self, v: $nz) -> &mut Archived<$nz> { self.get_or_insert_with(move || v) } /// Inserts a value computed from `f` into the option if it is /// `None`, then returns a mutable reference to the contained value. pub fn get_or_insert_with(&mut self, f: F) -> &mut Archived<$nz> where F: FnOnce() -> $nz, { if self.inner == 0 { self.inner = f().get().into(); } unsafe { // SAFETY: self.inner is nonzero &mut *(&mut self.inner as *mut _ as *mut Archived<$nz>) } } /// Resolves an `ArchivedOptionNonZero` from an `Option`. #[inline] pub fn resolve_from_option( field: Option<$nz>, out: Place, ) { munge!(let Self { inner } = out); if let Some(nz) = field { inner.write(nz.get().into()); } else { inner.write((0 as $ne).into()); } } } impl fmt::Debug for $ar { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.as_ref().fmt(f) } } impl Eq for $ar {} impl hash::Hash for $ar { fn hash(&self, state: &mut H) { self.as_ref().hash(state) } } impl Ord for $ar { #[inline] fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_ref().cmp(&other.as_ref()) } } impl PartialEq for $ar { #[inline] fn eq(&self, other: &Self) -> bool { self.as_ref().eq(&other.as_ref()) } } impl PartialOrd for $ar { #[inline] fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } unsafe impl NoUndef for $ar {} }; } impl_archived_option_nonzero!(ArchivedOptionNonZeroI8, NonZeroI8, i8); impl_archived_option_nonzero!(ArchivedOptionNonZeroI16, NonZeroI16, i16); impl_archived_option_nonzero!(ArchivedOptionNonZeroI32, NonZeroI32, i32); impl_archived_option_nonzero!(ArchivedOptionNonZeroI64, NonZeroI64, i64); impl_archived_option_nonzero!(ArchivedOptionNonZeroI128, NonZeroI128, i128); /// A niched archived `Option` pub type ArchivedOptionNonZeroIsize = match_pointer_width!( ArchivedOptionNonZeroI16, ArchivedOptionNonZeroI32, ArchivedOptionNonZeroI64, ); impl_archived_option_nonzero!(ArchivedOptionNonZeroU8, NonZeroU8, u8); impl_archived_option_nonzero!(ArchivedOptionNonZeroU16, NonZeroU16, u16); impl_archived_option_nonzero!(ArchivedOptionNonZeroU32, NonZeroU32, u32); impl_archived_option_nonzero!(ArchivedOptionNonZeroU64, NonZeroU64, u64); impl_archived_option_nonzero!(ArchivedOptionNonZeroU128, NonZeroU128, u128); /// A niched archived `Option` pub type ArchivedOptionNonZeroUsize = match_pointer_width!( ArchivedOptionNonZeroU16, ArchivedOptionNonZeroU32, ArchivedOptionNonZeroU64, ); /// An iterator over a reference to the `Some` variant of an /// `ArchivedOptionNonZero` integer. /// /// This iterator yields one value if the `ArchivedOptionNonZero` integer is a /// `Some`, otherwise none. pub type Iter

= crate::option::Iter

; rkyv-0.8.9/src/ops.rs000064400000000000000000000174601046102023000126030ustar 00000000000000//! Archived versions of `ops` types. use core::{ cmp, fmt, ops::{Bound, RangeBounds}, }; use crate::{seal::Seal, Portable}; /// An archived [`RangeFull`](::core::ops::RangeFull). #[derive(Clone, Copy, Default, PartialEq, Eq, Hash, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct ArchivedRangeFull; impl fmt::Debug for ArchivedRangeFull { #[inline] fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "..") } } /// An archived [`Range`](::core::ops::Range). #[derive(Clone, Default, PartialEq, Eq, Hash, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct ArchivedRange { /// The lower bound of the range (inclusive). pub start: T, /// The upper bound of the range (inclusive). pub end: T, } impl fmt::Debug for ArchivedRange { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { self.start.fmt(fmt)?; write!(fmt, "..")?; self.end.fmt(fmt)?; Ok(()) } } impl> ArchivedRange { /// Returns `true` if `item` is contained in the range. pub fn contains(&self, item: &U) -> bool where T: PartialOrd, U: PartialOrd + ?Sized, { >::contains(self, item) } /// Returns `true` if the range contains no items. pub fn is_empty(&self) -> bool { match self.start.partial_cmp(&self.end) { None | Some(cmp::Ordering::Greater) | Some(cmp::Ordering::Equal) => true, Some(cmp::Ordering::Less) => false, } } } impl RangeBounds for ArchivedRange { fn start_bound(&self) -> Bound<&T> { Bound::Included(&self.start) } fn end_bound(&self) -> Bound<&T> { Bound::Excluded(&self.end) } } /// An archived [`RangeInclusive`](::core::ops::RangeInclusive). #[derive(Clone, Default, PartialEq, Eq, Hash, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct ArchivedRangeInclusive { /// The lower bound of the range (inclusive). pub start: T, /// The upper bound of the range (inclusive). pub end: T, } impl fmt::Debug for ArchivedRangeInclusive { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { self.start.fmt(fmt)?; write!(fmt, "..=")?; self.end.fmt(fmt)?; Ok(()) } } impl> ArchivedRangeInclusive { /// Returns `true` if `item` is contained in the range. pub fn contains(&self, item: &U) -> bool where T: PartialOrd, U: PartialOrd + ?Sized, { >::contains(self, item) } /// Returns `true` if the range contains no items. pub fn is_empty(&self) -> bool { match self.start.partial_cmp(&self.end) { None | Some(cmp::Ordering::Greater) => true, Some(cmp::Ordering::Less) | Some(cmp::Ordering::Equal) => false, } } } impl RangeBounds for ArchivedRangeInclusive { fn start_bound(&self) -> Bound<&T> { Bound::Included(&self.start) } fn end_bound(&self) -> Bound<&T> { Bound::Included(&self.end) } } /// An archived [`RangeFrom`](::core::ops::RangeFrom). #[derive(Clone, Default, PartialEq, Eq, Hash, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct ArchivedRangeFrom { /// The lower bound of the range (inclusive). pub start: T, } impl fmt::Debug for ArchivedRangeFrom { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { self.start.fmt(fmt)?; write!(fmt, "..")?; Ok(()) } } impl> ArchivedRangeFrom { /// Returns `true` if `item` is contained in the range. pub fn contains(&self, item: &U) -> bool where T: PartialOrd, U: ?Sized + PartialOrd, { >::contains(self, item) } } impl RangeBounds for ArchivedRangeFrom { fn start_bound(&self) -> Bound<&T> { Bound::Included(&self.start) } fn end_bound(&self) -> Bound<&T> { Bound::Unbounded } } /// An archived [`RangeTo`](::core::ops::RangeTo). #[derive(Clone, Default, PartialEq, Eq, Hash, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct ArchivedRangeTo { /// The upper bound of the range (exclusive). pub end: T, } impl fmt::Debug for ArchivedRangeTo { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "..")?; self.end.fmt(fmt)?; Ok(()) } } impl> ArchivedRangeTo { /// Returns `true` if `item` is contained in the range. pub fn contains(&self, item: &U) -> bool where T: PartialOrd, U: ?Sized + PartialOrd, { >::contains(self, item) } } impl RangeBounds for ArchivedRangeTo { fn start_bound(&self) -> Bound<&T> { Bound::Unbounded } fn end_bound(&self) -> Bound<&T> { Bound::Excluded(&self.end) } } /// An archived [`RangeToInclusive`](::core::ops::RangeToInclusive). #[derive(Clone, Default, PartialEq, Eq, Hash, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct ArchivedRangeToInclusive { /// The upper bound of the range (inclusive). pub end: T, } impl fmt::Debug for ArchivedRangeToInclusive { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "..=")?; self.end.fmt(fmt)?; Ok(()) } } impl> ArchivedRangeToInclusive { /// Returns `true` if `item` is contained in the range. pub fn contains(&self, item: &U) -> bool where T: PartialOrd, U: ?Sized + PartialOrd, { >::contains(self, item) } } impl RangeBounds for ArchivedRangeToInclusive { fn start_bound(&self) -> Bound<&T> { Bound::Unbounded } fn end_bound(&self) -> Bound<&T> { Bound::Included(&self.end) } } /// An archived [`Bound`]. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[repr(u8)] #[rkyv(crate)] pub enum ArchivedBound { /// An inclusive bound. Included(T), /// An exclusive bound. Excluded(T), /// An infinite endpoint. Indicates that there is no bound in this /// direction. Unbounded, } impl ArchivedBound { /// Converts from `&ArchivedBound` to `Bound<&T>`. pub fn as_ref(&self) -> Bound<&T> { match self { ArchivedBound::Included(x) => Bound::Included(x), ArchivedBound::Excluded(x) => Bound::Excluded(x), ArchivedBound::Unbounded => Bound::Unbounded, } } /// Converts from `&mut ArchivedBound` to `Bound<&mut T>`. pub fn as_mut(&mut self) -> Bound<&mut T> { match self { ArchivedBound::Included(x) => Bound::Included(x), ArchivedBound::Excluded(x) => Bound::Excluded(x), ArchivedBound::Unbounded => Bound::Unbounded, } } /// Converts from `Seal<&ArchivedBound>` to `Bound>`. pub fn as_seal(this: Seal<'_, Self>) -> Bound> { let this = unsafe { Seal::unseal_unchecked(this) }; match this { ArchivedBound::Included(x) => Bound::Included(Seal::new(x)), ArchivedBound::Excluded(x) => Bound::Excluded(Seal::new(x)), ArchivedBound::Unbounded => Bound::Unbounded, } } } rkyv-0.8.9/src/option.rs000064400000000000000000000216321046102023000133060ustar 00000000000000//! An archived version of `Option`. use core::{ cmp, hash, mem, ops::{Deref, DerefMut}, }; use crate::{seal::Seal, Portable}; /// An archived [`Option`]. /// /// It functions identically to [`Option`] but has a different internal /// representation to allow for archiving. #[derive(Clone, Copy, Debug, Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[repr(u8)] #[rkyv(crate)] pub enum ArchivedOption { /// No value None, /// Some value `T` Some(T), } impl ArchivedOption { /// Transforms the `ArchivedOption` into a `Result`, mapping /// `Some(v)` to `Ok(v)` and `None` to `Err(err)`. pub fn ok_or(self, err: E) -> Result { match self { ArchivedOption::None => Err(err), ArchivedOption::Some(x) => Ok(x), } } /// Returns the contained [`Some`] value, consuming the `self` value. pub fn unwrap(self) -> T { match self { ArchivedOption::None => { panic!("called `ArchivedOption::unwrap()` on a `None` value") } ArchivedOption::Some(value) => value, } } /// Returns the contained [`Some`] value or a provided default. pub fn unwrap_or(self, default: T) -> T { match self { ArchivedOption::None => default, ArchivedOption::Some(value) => value, } } /// Returns the contained [`Some`] value or computes it from a closure. pub fn unwrap_or_else T>(self, f: F) -> T { match self { ArchivedOption::None => f(), ArchivedOption::Some(value) => value, } } /// Returns `true` if the option is a `None` value. pub fn is_none(&self) -> bool { match self { ArchivedOption::None => true, ArchivedOption::Some(_) => false, } } /// Returns `true` if the option is a `Some` value. pub fn is_some(&self) -> bool { match self { ArchivedOption::None => false, ArchivedOption::Some(_) => true, } } /// Converts to an `Option<&T>`. pub const fn as_ref(&self) -> Option<&T> { match self { ArchivedOption::None => None, ArchivedOption::Some(value) => Some(value), } } /// Converts to an `Option<&mut T>`. pub fn as_mut(&mut self) -> Option<&mut T> { match self { ArchivedOption::None => None, ArchivedOption::Some(value) => Some(value), } } /// Converts from `Seal<'_, ArchivedOption>` to `Option>`. pub fn as_seal(this: Seal<'_, Self>) -> Option> { let inner = unsafe { Seal::unseal_unchecked(this) }; inner.as_mut().map(Seal::new) } /// Returns an iterator over the possibly-contained value. pub const fn iter(&self) -> Iter<&'_ T> { Iter { inner: self.as_ref(), } } /// Returns an iterator over the mutable possibly-contained value. pub fn iter_mut(&mut self) -> Iter<&'_ mut T> { Iter { inner: self.as_mut(), } } /// Returns an iterator over the sealed possibly-contained value. pub fn iter_seal(this: Seal<'_, Self>) -> Iter> { Iter { inner: Self::as_seal(this), } } /// Inserts `v` into the option if it is `None`, then returns a mutable /// reference to the contained value. pub fn get_or_insert(&mut self, v: T) -> &mut T { self.get_or_insert_with(move || v) } /// Inserts a value computed from `f` into the option if it is `None`, then /// returns a mutable reference to the contained value. pub fn get_or_insert_with T>(&mut self, f: F) -> &mut T { if let ArchivedOption::Some(ref mut value) = self { value } else { *self = ArchivedOption::Some(f()); self.as_mut().unwrap() } } } impl ArchivedOption { /// Converts from `&ArchivedOption` to `Option<&T::Target>`. /// /// Leaves the original `ArchivedOption` in-place, creating a new one with a /// reference to the original one, additionally coercing the contents /// via `Deref`. pub fn as_deref(&self) -> Option<&::Target> { self.as_ref().map(|x| x.deref()) } } impl ArchivedOption { /// Converts from `&mut ArchivedOption` to `Option<&mut T::Target>`. /// /// Leaves the original `ArchivedOption` in-place, creating a new `Option` /// with a mutable reference to the inner type's `Deref::Target` type. pub fn as_deref_mut(&mut self) -> Option<&mut ::Target> { self.as_mut().map(|x| x.deref_mut()) } } impl Eq for ArchivedOption {} impl hash::Hash for ArchivedOption { fn hash(&self, state: &mut H) { self.as_ref().hash(state) } } impl Ord for ArchivedOption { fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_ref().cmp(&other.as_ref()) } } impl PartialEq for ArchivedOption { fn eq(&self, other: &Self) -> bool { self.as_ref().eq(&other.as_ref()) } } impl PartialOrd for ArchivedOption { fn partial_cmp(&self, other: &Self) -> Option { self.as_ref().partial_cmp(&other.as_ref()) } } impl> PartialOrd> for ArchivedOption { fn partial_cmp(&self, other: &Option) -> Option { match (self, other) { (ArchivedOption::None, None) => Some(cmp::Ordering::Equal), (ArchivedOption::None, Some(_)) => Some(cmp::Ordering::Less), (ArchivedOption::Some(_), None) => Some(cmp::Ordering::Greater), (ArchivedOption::Some(self_value), Some(other_value)) => { self_value.partial_cmp(other_value) } } } } impl> PartialEq> for ArchivedOption { fn eq(&self, other: &Option) -> bool { if let ArchivedOption::Some(self_value) = self { if let Some(other_value) = other { self_value.eq(other_value) } else { false } } else { other.is_none() } } } impl From for ArchivedOption { /// Moves `val` into a new [`Some`]. /// /// # Examples /// /// ``` /// # use rkyv::option::ArchivedOption; /// let o: ArchivedOption = ArchivedOption::from(67); /// /// assert!(matches!(o, ArchivedOption::Some(67))); /// ``` fn from(val: T) -> ArchivedOption { ArchivedOption::Some(val) } } /// An iterator over a reference to the `Some` variant of an `ArchivedOption`. /// /// This iterator yields one value if the `ArchivedOption` is a `Some`, /// otherwise none. pub struct Iter

{ inner: Option

, } impl

Iter

{ /// Returns an iterator over the given `Option`. pub fn new(inner: Option

) -> Self { Self { inner } } } impl

Iterator for Iter

{ type Item = P; fn next(&mut self) -> Option { let mut result = None; mem::swap(&mut self.inner, &mut result); result } } impl

DoubleEndedIterator for Iter

{ fn next_back(&mut self) -> Option { self.next() } } impl<'a, T> IntoIterator for &'a ArchivedOption { type Item = &'a T; type IntoIter = Iter<&'a T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, T> IntoIterator for &'a mut ArchivedOption { type Item = &'a mut T; type IntoIter = Iter<&'a mut T>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl<'a, T> IntoIterator for Seal<'a, ArchivedOption> { type Item = Seal<'a, T>; type IntoIter = Iter>; fn into_iter(self) -> Self::IntoIter { ArchivedOption::iter_seal(self) } } #[cfg(test)] mod tests { use super::*; #[test] fn partial_ord_option() { use core::cmp::Ordering; use super::ArchivedOption; let a: ArchivedOption = ArchivedOption::Some(42); let b = Some(42); assert_eq!(Some(Ordering::Equal), a.partial_cmp(&b)); let a: ArchivedOption = ArchivedOption::Some(1); let b = Some(2); assert_eq!(Some(Ordering::Less), a.partial_cmp(&b)); let a: ArchivedOption = ArchivedOption::Some(2); let b = Some(1); assert_eq!(Some(Ordering::Greater), a.partial_cmp(&b)); } #[test] fn into_iter() { let x: ArchivedOption = ArchivedOption::Some(1); let mut iter = IntoIterator::into_iter(&x); assert_eq!(iter.next(), Some(&1)); assert_eq!(iter.next(), None); let x: ArchivedOption = ArchivedOption::None; let mut iter = IntoIterator::into_iter(&x); assert_eq!(iter.next(), None); } } rkyv-0.8.9/src/place.rs000064400000000000000000000124151046102023000130610ustar 00000000000000//! An initialized, writeable location in memory. use core::{mem::size_of, ptr::NonNull}; use munge::{Borrow, Destructure, Restructure}; use crate::traits::{LayoutRaw, NoUndef}; /// A place to write a `T` paired with its position in the output buffer. pub struct Place { pos: usize, ptr: NonNull, } impl Clone for Place { fn clone(&self) -> Self { *self } } impl Copy for Place {} impl Place { /// Creates a new `Place` from an output pointer. /// /// # Safety /// /// `ptr` must be properly aligned, dereferenceable, and all of its bytes /// must be initialized. pub unsafe fn new_unchecked(pos: usize, ptr: *mut T) -> Self { unsafe { Self { pos, ptr: NonNull::new_unchecked(ptr), } } } /// Creates a new `Place` from a parent pointer and the field the place /// points to. /// /// # Safety /// /// - `ptr` must point to a field of `parent` /// - `ptr` must be properly aligned, dereferenceable, and all of its bytes /// must be initialized pub unsafe fn from_field_unchecked( parent: Place, ptr: *mut T, ) -> Self { // SAFETY: We won't write anything to the parent pointer, so we // definitely won't write any uninitialized bytes. let parent_ptr = unsafe { parent.ptr() }; let offset = ptr as *mut () as usize - parent_ptr as *mut () as usize; // SAFETY: The caller has guaranteed that `ptr` is properly aligned, // dereferenceable, and all of its bytes are initialized. unsafe { Self::new_unchecked(parent.pos() + offset, ptr) } } /// Returns the position of the place. pub fn pos(&self) -> usize { self.pos } /// Returns the pointer associated with this place. /// /// # Safety /// /// Uninitialized bytes must not be written to the returned pointer. pub unsafe fn ptr(&self) -> *mut T { self.ptr.as_ptr() } /// Writes the provided value to this place. /// /// # Safety /// /// `value` must not have any uninitialized bytes (e.g. padding). pub unsafe fn write_unchecked(&self, value: T) where T: Sized, { unsafe { self.ptr().write(value); } } /// Writes the provided value to this place. pub fn write(&self, value: T) where T: NoUndef + Sized, { unsafe { self.write_unchecked(value); } } /// Returns this place casted to the given type. /// /// # Safety /// /// This place must point to a valid `U`. pub unsafe fn cast_unchecked(&self) -> Place where T: Sized, { Place { pos: self.pos, ptr: self.ptr.cast(), } } /// Returns a slice of the bytes this place points to. pub fn as_slice(&self) -> &[u8] where T: LayoutRaw, { let ptr = self.ptr.as_ptr(); let len = T::layout_raw(ptr_meta::metadata(ptr)).unwrap().size(); // SAFETY: The pointers of places are always properly aligned and // dereferenceable. All of the bytes this place points to are guaranteed // to be initialized at all times. unsafe { core::slice::from_raw_parts(ptr.cast::(), len) } } } impl Place<[T]> { /// Gets a `Place` to the `i`-th element of the slice. /// /// # Safety /// /// `i` must be in-bounds for the slice pointed to by this place. pub unsafe fn index(&self, i: usize) -> Place { // SAFETY: The caller has guaranteed that `i` is in-bounds for the slice // pointed to by this place. let ptr = unsafe { self.ptr().cast::().add(i) }; // SAFETY: `ptr` is an element of `self`, and so is also properly // aligned, dereferenceable, and all of its bytes are initialized. unsafe { Place::new_unchecked(self.pos() + i * size_of::(), ptr) } } } impl Place<[T; N]> { /// Gets a `Place` to the `i`-th element of the array. /// /// # Safety /// /// `i` must be in-bounds for the array pointed to by this place. pub unsafe fn index(&self, i: usize) -> Place { // SAFETY: The caller has guaranteed that `i` is in-bounds for the array // pointed to by this place. let ptr = unsafe { self.ptr().cast::().add(i) }; // SAFETY: `ptr` is an element of `self`, and so is also properly // aligned, dereferenceable, and all of its bytes are initialized. unsafe { Place::new_unchecked(self.pos() + i * size_of::(), ptr) } } } unsafe impl Destructure for Place { type Underlying = T; type Destructuring = Borrow; fn underlying(&mut self) -> *mut Self::Underlying { self.ptr.as_ptr() } } unsafe impl Restructure for Place { type Restructured = Place; unsafe fn restructure(&self, ptr: *mut U) -> Self::Restructured { // SAFETY: `ptr` is a pointer to a subfield of the underlying pointer, // and so is also properly aligned, dereferenceable, and all of its // bytes are initialized. unsafe { Place::from_field_unchecked(*self, ptr) } } } rkyv-0.8.9/src/polyfill.rs000064400000000000000000000004541046102023000136270ustar 00000000000000#[cfg(feature = "alloc")] use core::{alloc::Layout, ptr::NonNull}; #[cfg(feature = "alloc")] pub fn dangling(layout: &Layout) -> NonNull { #[cfg(miri)] { layout.dangling() } #[cfg(not(miri))] unsafe { NonNull::new_unchecked(layout.align() as *mut u8) } } rkyv-0.8.9/src/primitive.rs000064400000000000000000000155341046102023000140120ustar 00000000000000//! Definitions of archived primitives and type aliases based on enabled //! features. // Unaligned big-endian #[cfg(all(feature = "unaligned", feature = "big_endian"))] use crate::rend::unaligned::{ char_ube, f32_ube, f64_ube, i128_ube, i16_ube, i32_ube, i64_ube, u128_ube, u16_ube, u32_ube, u64_ube, NonZeroI128_ube, NonZeroI16_ube, NonZeroI32_ube, NonZeroI64_ube, NonZeroU128_ube, NonZeroU16_ube, NonZeroU32_ube, NonZeroU64_ube, }; // Unaligned little-endian #[cfg(all(feature = "unaligned", not(feature = "big_endian")))] use crate::rend::unaligned::{ char_ule, f32_ule, f64_ule, i128_ule, i16_ule, i32_ule, i64_ule, u128_ule, u16_ule, u32_ule, u64_ule, NonZeroI128_ule, NonZeroI16_ule, NonZeroI32_ule, NonZeroI64_ule, NonZeroU128_ule, NonZeroU16_ule, NonZeroU32_ule, NonZeroU64_ule, }; // Aligned big-endian #[cfg(all(not(feature = "unaligned"), feature = "big_endian"))] use crate::rend::{ char_be, f32_be, f64_be, i128_be, i16_be, i32_be, i64_be, u128_be, u16_be, u32_be, u64_be, NonZeroI128_be, NonZeroI16_be, NonZeroI32_be, NonZeroI64_be, NonZeroU128_be, NonZeroU16_be, NonZeroU32_be, NonZeroU64_be, }; // Aligned little-endian #[cfg(all(not(feature = "unaligned"), not(feature = "big_endian")))] use crate::rend::{ char_le, f32_le, f64_le, i128_le, i16_le, i32_le, i64_le, u128_le, u16_le, u32_le, u64_le, NonZeroI128_le, NonZeroI16_le, NonZeroI32_le, NonZeroI64_le, NonZeroU128_le, NonZeroU16_le, NonZeroU32_le, NonZeroU64_le, }; #[rustfmt::skip] macro_rules! define_archived_type_alias { ($archived:ident: $name:ident, $ty:ty) => { #[doc = concat!( "The archived version of `", stringify!($name), "`.", )] pub type $archived = $ty; }; } macro_rules! define_archived_primitive { ($archived:ident: $name:ident, $le:ty, $be:ty) => { #[cfg(not(feature = "big_endian"))] define_archived_type_alias!($archived: $name, $le); #[cfg(feature = "big_endian")] define_archived_type_alias!($archived: $name, $be); } } macro_rules! define_multibyte_primitive { ($archived:ident: $name:ident, $le:ty, $ule:ty, $be:ty, $ube:ty) => { #[cfg(not(feature = "unaligned"))] define_archived_primitive!($archived: $name, $le, $be); #[cfg(feature = "unaligned")] define_archived_primitive!($archived: $name, $ule, $ube); }; } macro_rules! define_multibyte_primitives { ( $($archived:ident: $name:ident, $le:ty, $ule:ty, $be:ty, $ube:ty);* $(;)? ) => { $( define_multibyte_primitive!($archived: $name, $le, $ule, $be, $ube); )* } } define_multibyte_primitives! { ArchivedI16: i16, i16_le, i16_ule, i16_be, i16_ube; ArchivedI32: i32, i32_le, i32_ule, i32_be, i32_ube; ArchivedI64: i64, i64_le, i64_ule, i64_be, i64_ube; ArchivedI128: i128, i128_le, i128_ule, i128_be, i128_ube; ArchivedU16: u16, u16_le, u16_ule, u16_be, u16_ube; ArchivedU32: u32, u32_le, u32_ule, u32_be, u32_ube; ArchivedU64: u64, u64_le, u64_ule, u64_be, u64_ube; ArchivedU128: u128, u128_le, u128_ule, u128_be, u128_ube; ArchivedF32: f32, f32_le, f32_ule, f32_be, f32_ube; ArchivedF64: f64, f64_le, f64_ule, f64_be, f64_ube; ArchivedChar: char, char_le, char_ule, char_be, char_ube; } /// The native type that `isize` is converted to for archiving. /// /// This will be `i16`, `i32`, or `i64` when the `pointer_width_16`, /// `pointer_width_32`, or `pointer_width_64` features are enabled, /// respectively. With no pointer width features enabled, it defaults to `i32`. pub type FixedIsize = match_pointer_width!(i16, i32, i64); /// The archived version of `isize` chosen based on the currently-enabled /// `pointer_width_*` feature. pub type ArchivedIsize = match_pointer_width!(ArchivedI16, ArchivedI32, ArchivedI64); /// The native type that `usize` is converted to for archiving. /// /// This will be `u16`, `u32`, or `u64` when the `pointer_width_16`, /// `pointer_width_32`, or `pointer_width_64` features are enabled, /// respectively. With no pointer width features enabled, it defaults to `u32`. pub type FixedUsize = match_pointer_width!(u16, u32, u64); /// The archived version of `isize` chosen based on the currently-enabled /// `pointer_width_*` feature. pub type ArchivedUsize = match_pointer_width!(ArchivedU16, ArchivedU32, ArchivedU64); define_multibyte_primitives! { ArchivedNonZeroI16: NonZeroI16, NonZeroI16_le, NonZeroI16_ule, NonZeroI16_be, NonZeroI16_ube; ArchivedNonZeroI32: NonZeroI32, NonZeroI32_le, NonZeroI32_ule, NonZeroI32_be, NonZeroI32_ube; ArchivedNonZeroI64: NonZeroI64, NonZeroI64_le, NonZeroI64_ule, NonZeroI64_be, NonZeroI64_ube; ArchivedNonZeroI128: NonZeroI128, NonZeroI128_le, NonZeroI128_ule, NonZeroI128_be, NonZeroI128_ube; ArchivedNonZeroU16: NonZeroU16, NonZeroU16_le, NonZeroU16_ule, NonZeroU16_be, NonZeroU16_ube; ArchivedNonZeroU32: NonZeroU32, NonZeroU32_le, NonZeroU32_ule, NonZeroU32_be, NonZeroU32_ube; ArchivedNonZeroU64: NonZeroU64, NonZeroU64_le, NonZeroU64_ule, NonZeroU64_be, NonZeroU64_ube; ArchivedNonZeroU128: NonZeroU128, NonZeroU128_le, NonZeroU128_ule, NonZeroU128_be, NonZeroU128_ube; } /// The native type that `NonZeroIsize` is converted to for archiving. /// /// This will be `NonZeroI16`, `NonZeroI32`, or `NonZeroI64` when the /// `pointer_width_16`, `pointer_width_32`, or `pointer_width_64` features are /// enabled, respectively. With no pointer width features enabled, it defaults /// to `NonZeroI32`. pub type FixedNonZeroIsize = match_pointer_width!( ::core::num::NonZeroI16, ::core::num::NonZeroI32, ::core::num::NonZeroI64, ); /// The archived version of `NonZeroIsize` chosen based on the currently-enabled /// `pointer_width_*` feature. pub type ArchivedNonZeroIsize = match_pointer_width!( ArchivedNonZeroI16, ArchivedNonZeroI32, ArchivedNonZeroI64 ); /// The native type that `NonZeroUsize` is converted to for archiving. /// /// This will be `NonZeroU16`, `NonZeroU32`, or `NonZeroU64` when the /// `pointer_width_16`, `pointer_width_32`, or `pointer_width_64` features are /// enabled, respectively. With no pointer width features enabled, it defaults /// to `NonZeroU32`. pub type FixedNonZeroUsize = match_pointer_width!( ::core::num::NonZeroU16, ::core::num::NonZeroU32, ::core::num::NonZeroU64, ); /// The archived version of `NonZeroUsize` chosen based on the currently-enabled /// `pointer_width_*` feature. pub type ArchivedNonZeroUsize = match_pointer_width!( ArchivedNonZeroU16, ArchivedNonZeroU32, ArchivedNonZeroU64 ); rkyv-0.8.9/src/rc.rs000064400000000000000000000254451046102023000124100ustar 00000000000000//! Archived versions of shared pointers. use core::{borrow::Borrow, cmp, fmt, hash, marker::PhantomData, ops::Deref}; use munge::munge; use rancor::{Fallible, Source}; use crate::{ primitive::FixedUsize, seal::Seal, ser::{Sharing, SharingExt, Writer, WriterExt as _}, traits::ArchivePointee, ArchiveUnsized, Place, Portable, RelPtr, SerializeUnsized, }; /// A type marker for `ArchivedRc`. pub trait Flavor: 'static { /// If `true`, cyclic `ArchivedRc`s with this flavor will not fail /// validation. If `false`, cyclic `ArchivedRc`s with this flavor will fail /// validation. const ALLOW_CYCLES: bool; } /// The flavor type for [`Rc`](crate::alloc::rc::Rc). pub struct RcFlavor; impl Flavor for RcFlavor { const ALLOW_CYCLES: bool = false; } /// The flavor type for [`Arc`](crate::alloc::sync::Arc). pub struct ArcFlavor; impl Flavor for ArcFlavor { const ALLOW_CYCLES: bool = false; } /// An archived `Rc`. /// /// This is a thin wrapper around a [`RelPtr`] to the archived type paired with /// a "flavor" type. Because there may be many varieties of shared pointers and /// they may not be used together, the flavor helps check that memory is not /// being shared incorrectly during validation. #[derive(Portable)] #[rkyv(crate)] #[repr(transparent)] #[cfg_attr( feature = "bytecheck", derive(bytecheck::CheckBytes), bytecheck(verify) )] pub struct ArchivedRc { ptr: RelPtr, _phantom: PhantomData, } impl ArchivedRc { /// Gets the value of the `ArchivedRc`. pub fn get(&self) -> &T { unsafe { &*self.ptr.as_ptr() } } /// Gets the sealed value of this `ArchivedRc`. /// /// # Safety /// /// Any other pointers to the same value must not be dereferenced for the /// duration of the returned borrow. pub unsafe fn get_seal_unchecked(this: Seal<'_, Self>) -> Seal<'_, T> { munge!(let Self { ptr, _phantom: _ } = this); Seal::new(unsafe { &mut *RelPtr::as_mut_ptr(ptr) }) } /// Resolves an archived `Rc` from a given reference. pub fn resolve_from_ref + ?Sized>( value: &U, resolver: RcResolver, out: Place, ) { munge!(let ArchivedRc { ptr, .. } = out); RelPtr::emplace_unsized( resolver.pos as usize, value.archived_metadata(), ptr, ); } /// Serializes an archived `Rc` from a given reference. pub fn serialize_from_ref( value: &U, serializer: &mut S, ) -> Result where U: SerializeUnsized + ?Sized, S: Fallible + Writer + Sharing + ?Sized, S::Error: Source, { let pos = serializer.serialize_shared(value)?; // The positions of serialized `Rc` values must be unique. If we didn't // write any data by serializing `value`, pad the serializer by a byte // to ensure that our position will be unique. if serializer.pos() == pos { serializer.pad(1)?; } Ok(RcResolver { pos: pos as FixedUsize, }) } } impl AsRef for ArchivedRc where T: ArchivePointee + ?Sized, { fn as_ref(&self) -> &T { self.get() } } impl Borrow for ArchivedRc where T: ArchivePointee + ?Sized, { fn borrow(&self) -> &T { self.get() } } impl fmt::Debug for ArchivedRc where T: ArchivePointee + fmt::Debug + ?Sized, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.get().fmt(f) } } impl Deref for ArchivedRc where T: ArchivePointee + ?Sized, { type Target = T; fn deref(&self) -> &Self::Target { self.get() } } impl fmt::Display for ArchivedRc where T: ArchivePointee + fmt::Display + ?Sized, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.get().fmt(f) } } impl Eq for ArchivedRc where T: ArchivePointee + Eq + ?Sized {} impl hash::Hash for ArchivedRc where T: ArchivePointee + hash::Hash + ?Sized, { fn hash(&self, state: &mut H) { self.get().hash(state) } } impl Ord for ArchivedRc where T: ArchivePointee + Ord + ?Sized, { fn cmp(&self, other: &Self) -> cmp::Ordering { self.get().cmp(other.get()) } } impl PartialEq> for ArchivedRc where T: ArchivePointee + PartialEq + ?Sized, U: ArchivePointee + ?Sized, { fn eq(&self, other: &ArchivedRc) -> bool { self.get().eq(other.get()) } } impl PartialOrd> for ArchivedRc where T: ArchivePointee + PartialOrd + ?Sized, U: ArchivePointee + ?Sized, { fn partial_cmp(&self, other: &ArchivedRc) -> Option { self.get().partial_cmp(other.get()) } } impl fmt::Pointer for ArchivedRc { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&self.ptr.base(), f) } } /// The resolver for `Rc`. pub struct RcResolver { pos: FixedUsize, } impl RcResolver { /// Creates a new [`RcResolver`] from the position of a serialized value. /// /// In most cases, you won't need to create a [`RcResolver`] yourself and /// can instead obtain it through [`ArchivedRc::serialize_from_ref`]. pub fn from_pos(pos: usize) -> Self { Self { pos: pos as FixedUsize, } } } /// An archived `rc::Weak`. /// /// This is essentially just an optional [`ArchivedRc`]. #[derive(Portable)] #[rkyv(crate)] #[repr(transparent)] #[cfg_attr( feature = "bytecheck", derive(bytecheck::CheckBytes), bytecheck(verify) )] pub struct ArchivedRcWeak { ptr: RelPtr, _phantom: PhantomData, } impl ArchivedRcWeak { /// Attempts to upgrade the weak pointer to an `ArchivedArc`. /// /// Returns `None` if a null weak pointer was serialized. pub fn upgrade(&self) -> Option<&ArchivedRc> { if self.ptr.is_invalid() { None } else { Some(unsafe { &*(self as *const Self).cast() }) } } /// Attempts to upgrade a sealed weak pointer. pub fn upgrade_seal( this: Seal<'_, Self>, ) -> Option>> { let this = unsafe { this.unseal_unchecked() }; if this.ptr.is_invalid() { None } else { Some(Seal::new(unsafe { &mut *(this as *mut Self).cast() })) } } /// Resolves an archived `Weak` from a given optional reference. pub fn resolve_from_ref + ?Sized>( value: Option<&U>, resolver: RcWeakResolver, out: Place, ) { match value { None => { munge!(let ArchivedRcWeak { ptr, _phantom: _ } = out); RelPtr::emplace_invalid(ptr); } Some(value) => { let out = unsafe { out.cast_unchecked::>() }; ArchivedRc::resolve_from_ref(value, resolver.inner, out); } } } /// Serializes an archived `Weak` from a given optional reference. pub fn serialize_from_ref( value: Option<&U>, serializer: &mut S, ) -> Result where U: SerializeUnsized + ?Sized, S: Fallible + Writer + Sharing + ?Sized, S::Error: Source, { Ok(match value { None => RcWeakResolver { inner: RcResolver { pos: 0 }, }, Some(r) => RcWeakResolver { inner: ArchivedRc::::serialize_from_ref(r, serializer)?, }, }) } } impl fmt::Debug for ArchivedRcWeak { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "(Weak)") } } /// The resolver for `rc::Weak`. pub struct RcWeakResolver { inner: RcResolver, } #[cfg(feature = "bytecheck")] mod verify { use core::{any::TypeId, error::Error, fmt}; use bytecheck::{ rancor::{Fallible, Source}, CheckBytes, Verify, }; use rancor::fail; use crate::{ rc::{ArchivedRc, ArchivedRcWeak, Flavor}, traits::{ArchivePointee, LayoutRaw}, validation::{ shared::ValidationState, ArchiveContext, ArchiveContextExt, SharedContext, }, }; #[derive(Debug)] struct CyclicSharedPointerError; impl fmt::Display for CyclicSharedPointerError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "encountered cyclic shared pointers while validating") } } impl Error for CyclicSharedPointerError {} unsafe impl Verify for ArchivedRc where T: ArchivePointee + CheckBytes + LayoutRaw + ?Sized + 'static, T::ArchivedMetadata: CheckBytes, F: Flavor, C: Fallible + ArchiveContext + SharedContext + ?Sized, C::Error: Source, { fn verify(&self, context: &mut C) -> Result<(), C::Error> { let ptr = self.ptr.as_ptr_wrapping(); let type_id = TypeId::of::>(); let addr = ptr as *const u8 as usize; match context.start_shared(addr, type_id)? { ValidationState::Started => { context.in_subtree(ptr, |context| unsafe { T::check_bytes(ptr, context) })?; context.finish_shared(addr, type_id)?; } ValidationState::Pending => { if !F::ALLOW_CYCLES { fail!(CyclicSharedPointerError) } } ValidationState::Finished => (), } Ok(()) } } unsafe impl Verify for ArchivedRcWeak where T: ArchivePointee + CheckBytes + LayoutRaw + ?Sized + 'static, T::ArchivedMetadata: CheckBytes, F: Flavor, C: Fallible + ArchiveContext + SharedContext + ?Sized, C::Error: Source, { fn verify(&self, context: &mut C) -> Result<(), C::Error> { if self.ptr.is_invalid() { Ok(()) } else { // SAFETY: `ArchivedRc` and `ArchivedRcWeak` are // `repr(transparent)` and so have the same layout as each // other. let rc = unsafe { &*(self as *const Self).cast::>() }; rc.verify(context) } } } } rkyv-0.8.9/src/rel_ptr.rs000064400000000000000000000576701046102023000134600ustar 00000000000000//! Relative pointer implementations and options. use core::{ error::Error, fmt, marker::{PhantomData, PhantomPinned}, ptr::addr_of_mut, }; use munge::munge; use rancor::{fail, Panic, ResultExt as _, Source}; use crate::{ primitive::{ ArchivedI16, ArchivedI32, ArchivedI64, ArchivedU16, ArchivedU32, ArchivedU64, }, seal::Seal, traits::{ArchivePointee, NoUndef}, Place, Portable, }; #[derive(Clone, Copy, Debug, PartialEq, Eq)] struct IsizeOverflow; impl fmt::Display for IsizeOverflow { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "the offset overflowed the range of `isize`") } } impl Error for IsizeOverflow {} /// A offset that can be used with [`RawRelPtr`]. pub trait Offset: Copy + NoUndef { /// Creates a new offset between a `from` position and a `to` position. fn from_isize(value: isize) -> Result; /// Gets the offset as an `isize`. fn to_isize(self) -> isize; } macro_rules! impl_offset_single_byte { ($ty:ty) => { impl Offset for $ty { fn from_isize(value: isize) -> Result { // `pointer::add`` and `pointer::offset` require that the // computed offsets cannot overflow an isize, which is why we're // using signed_offset instead of `checked_sub` for unsized // types. Self::try_from(value).into_error() } #[inline] fn to_isize(self) -> isize { // We're guaranteed that our offset will not exceed the // capacity of an `isize` self as isize } } }; } impl_offset_single_byte!(i8); impl_offset_single_byte!(u8); macro_rules! impl_offset_multi_byte { ($ty:ty, $archived:ty) => { impl Offset for $archived { fn from_isize(value: isize) -> Result { // `pointer::add`` and `pointer::offset` require that the // computed offsets cannot overflow an isize, which is why we're // using signed_offset instead of `checked_sub` for unsized // types. Ok(<$archived>::from_native( <$ty>::try_from(value).into_error()?, )) } #[inline] fn to_isize(self) -> isize { // We're guaranteed that our offset will not exceed the // capacity of an `isize`. self.to_native() as isize } } }; } impl_offset_multi_byte!(i16, ArchivedI16); impl_offset_multi_byte!(i32, ArchivedI32); impl_offset_multi_byte!(i64, ArchivedI64); impl_offset_multi_byte!(u16, ArchivedU16); impl_offset_multi_byte!(u32, ArchivedU32); impl_offset_multi_byte!(u64, ArchivedU64); /// An untyped pointer which resolves relative to its position in memory. /// /// This is the most fundamental building block in rkyv. It allows the /// construction and use of pointers that can be safely relocated as long as the /// source and target are moved together. This is what allows memory to be moved /// from disk into memory and accessed without decoding. /// /// Regular pointers are *absolute*, meaning that the pointer can be moved /// without being invalidated. However, the pointee **cannot** be moved, /// otherwise the pointer is invalidated. /// /// Relative pointers are *relative*, meaning that the **pointer** can be moved /// with the **pointee** without invalidating the pointer. However, if either /// the **pointer** or the **pointee** move independently, the pointer will be /// invalidated. #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[derive(Portable)] #[rkyv(crate)] #[repr(transparent)] pub struct RawRelPtr { offset: O, _phantom: PhantomPinned, } /// Calculates the offset between two positions as an `isize`. /// /// This function exists solely to get the distance between two `usizes` as an /// `isize` with a full range of values. /// /// # Examples /// /// ``` /// # use rkyv::rel_ptr::signed_offset; /// # use rancor::Error; /// assert!(signed_offset::(0, 1).is_ok_and(|x| x == 1)); /// assert!(signed_offset::(1, 0).is_ok_and(|x| x == -1)); /// assert!(signed_offset::(0, isize::MAX as usize) /// .is_ok_and(|x| x == isize::MAX)); /// assert!(signed_offset::(isize::MAX as usize, 0) /// .is_ok_and(|x| x == -isize::MAX)); /// assert!(signed_offset::(0, isize::MAX as usize + 1).is_err()); /// assert!(signed_offset::(isize::MAX as usize + 1, 0) /// .is_ok_and(|x| x == isize::MIN)); /// assert!(signed_offset::(0, isize::MAX as usize + 2).is_err()); /// assert!(signed_offset::(isize::MAX as usize + 2, 0).is_err()); /// ``` pub fn signed_offset(from: usize, to: usize) -> Result { let (result, overflow) = to.overflowing_sub(from); if (!overflow && result <= (isize::MAX as usize)) || (overflow && result >= (isize::MIN as usize)) { Ok(result as isize) } else { fail!(IsizeOverflow); } } impl RawRelPtr { /// Attempts to create an invalid `RawRelPtr` in-place. pub fn try_emplace_invalid(out: Place) -> Result<(), E> { Self::try_emplace::(out.pos() + 1, out) } /// Creates an invalid `RawRelPtr` in-place. /// /// # Panics /// /// - If an offset of `1` does not fit in an `isize` /// - If an offset of `1` exceeds the offset storage pub fn emplace_invalid(out: Place) { Self::try_emplace_invalid::(out).always_ok(); } /// Attempts to create a new `RawRelPtr` in-place between the given `from` /// and `to` positions. pub fn try_emplace( to: usize, out: Place, ) -> Result<(), E> { let offset = O::from_isize(signed_offset(out.pos(), to)?)?; munge!(let Self { offset: out_offset, _phantom: _ } = out); out_offset.write(offset); Ok(()) } /// Creates a new `RawRelPtr` in-place between the given `from` and `to` /// positions. /// /// # Panics /// /// - If the offset between `out` and `to` does not fit in an `isize` /// - If the offset between `out` and `to` exceeds the offset storage pub fn emplace(to: usize, out: Place) { Self::try_emplace::(to, out).always_ok() } /// Gets the base pointer for the pointed-to relative pointer. pub fn base_raw(this: *mut Self) -> *mut u8 { this.cast() } /// Gets the offset of the pointed-to relative pointer from its base. /// /// # Safety /// /// `this` must be non-null, properly-aligned, and point to a valid /// `RawRelPtr`. pub unsafe fn offset_raw(this: *mut Self) -> isize { // SAFETY: The caller has guaranteed that `this` is safe to dereference unsafe { addr_of_mut!((*this).offset).read().to_isize() } } /// Calculates the memory address being pointed to by the pointed-to /// relative pointer. /// /// # Safety /// /// - `this` must be non-null, properly-aligned, and point to a valid /// `RawRelPtr`. /// - The offset of this relative pointer, when added to its base, must be /// located in the same allocated object as it. pub unsafe fn as_ptr_raw(this: *mut Self) -> *mut () { // SAFETY: // - The caller has guaranteed that `this` is safe to dereference. // - The caller has guaranteed that offsetting the base pointer by its // offset will yield a pointer in the same allocated object. unsafe { Self::base_raw(this).offset(Self::offset_raw(this)).cast() } } /// Calculates the memory address being pointed to by the pointed-to /// relative pointer using wrapping methods. /// /// This method is a safer but potentially slower version of `as_ptr_raw`. /// /// # Safety /// /// `this` must be non-null, properly-aligned, and point to a valid /// `RawRelPtr`. pub unsafe fn as_ptr_wrapping_raw(this: *mut Self) -> *mut () { // SAFETY: The safety requirements of `offset_raw` are the same as the // safety requirements for `as_ptr_wrapping_raw`. let offset = unsafe { Self::offset_raw(this) }; Self::base_raw(this).wrapping_offset(offset).cast() } /// Gets whether the offset of the pointed-to relative pointer is invalid. /// /// # Safety /// /// `this` must be non-null, properly-aligned, and point to a valid /// `RawRelPtr`. pub unsafe fn is_invalid_raw(this: *mut Self) -> bool { // SAFETY: The safety requirements of `offset_raw` are the same as the // safety requirements for `is_invalid_raw`. unsafe { Self::offset_raw(this) == 1 } } /// Gets the base pointer for the relative pointer. pub fn base(&self) -> *const u8 { Self::base_raw((self as *const Self).cast_mut()).cast_const() } /// Gets the mutable base pointer for the relative pointer. pub fn base_mut(this: Seal<'_, Self>) -> *mut u8 { // SAFETY: The value pointed to by `this` is not moved and no bytes are // written through it. let this = unsafe { Seal::unseal_unchecked(this) }; Self::base_raw(this as *mut Self) } /// Gets the offset of the relative pointer from its base. pub fn offset(&self) -> isize { let this = self as *const Self; // SAFETY: `self` is a reference, so it's guaranteed to be non-null, // properly-aligned, and point to a valid `RawRelPtr`. unsafe { Self::offset_raw(this.cast_mut()) } } /// Gets whether the offset of the relative pointer is invalid. pub fn is_invalid(&self) -> bool { let this = self as *const Self; // SAFETY: `self` is a reference, so it's guaranteed to be non-null, // properly-aligned, and point to a valid `RawRelPtr`. unsafe { Self::is_invalid_raw(this.cast_mut()) } } /// Calculates the memory address being pointed to by this relative pointer. /// /// # Safety /// /// The offset of this relative pointer, when added to its base, must be /// located in the same allocated object as it. pub unsafe fn as_ptr(&self) -> *const () { let this = self as *const Self; // SAFETY: // - `self` is a reference, so it's guaranteed to be non-null, // properly-aligned, and point to a valid `RawRelPtr`. // - The caller has guaranteed that the offset of this relative pointer, // when added to its base, is located in the same allocated object as // it. unsafe { Self::as_ptr_raw(this.cast_mut()).cast_const() } } /// Calculates the mutable memory address being pointed to by this relative /// pointer. /// /// # Safety /// /// The offset of this relative pointer, when added to its base, must be /// located in the same allocated object as it. pub unsafe fn as_mut_ptr(this: Seal<'_, Self>) -> *mut () { // SAFETY: The value pointed to by `this` is not moved and no bytes are // written through it. let this = unsafe { Seal::unseal_unchecked(this) }; // SAFETY: // - `this` is a reference, so it's guaranteed to be non-null, // properly-aligned, and point to a valid `RawRelPtr`. // - The caller has guaranteed that the offset of this relative pointer, // when added to its base, is located in the same allocated object as // it. unsafe { Self::as_ptr_raw(this as *mut Self) } } /// Calculates the memory address being pointed to by this relative pointer /// using wrapping methods. /// /// This method is a safer but potentially slower version of `as_ptr`. pub fn as_ptr_wrapping(&self) -> *const () { let this = self as *const Self; // SAFETY: `self` is a reference, so it's guaranteed to be non-null, // properly-aligned, and point to a valid `RawRelPtr`. unsafe { Self::as_ptr_wrapping_raw(this.cast_mut()).cast_const() } } /// Calculates the mutable memory address being pointed to by this relative /// pointer using wrapping methods. /// /// This method is a safer but potentially slower version of `as_mut_ptr`. pub fn as_mut_ptr_wrapping(this: Seal<'_, Self>) -> *mut () { // SAFETY: The value pointed to by `this` is not moved and no bytes are // written through it. let this = unsafe { Seal::unseal_unchecked(this) }; // SAFETY: `this` is a reference, so it's guaranteed to be non-null, // properly-aligned, and point to a valid `RawRelPtr`. unsafe { Self::as_ptr_wrapping_raw(this as *mut Self) } } } impl fmt::Debug for RawRelPtr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawRelPtr") .field("offset", &self.offset) .finish() } } impl fmt::Pointer for RawRelPtr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr_wrapping(), f) } } /// A raw relative pointer that uses an archived `i8` as the underlying offset. pub type RawRelPtrI8 = RawRelPtr; /// A raw relative pointer that uses an archived `i16` as the underlying offset. pub type RawRelPtrI16 = RawRelPtr; /// A raw relative pointer that uses an archived `i32` as the underlying offset. pub type RawRelPtrI32 = RawRelPtr; /// A raw relative pointer that uses an archived `i64` as the underlying offset. pub type RawRelPtrI64 = RawRelPtr; /// A raw relative pointer that uses an archived `u8` as the underlying offset. pub type RawRelPtrU8 = RawRelPtr; /// A raw relative pointer that uses an archived `u16` as the underlying offset. pub type RawRelPtrU16 = RawRelPtr; /// A raw relative pointer that uses an archived `u32` as the underlying offset. pub type RawRelPtrU32 = RawRelPtr; /// A raw relative pointer that uses an archived `u64` as the underlying offset. pub type RawRelPtrU64 = RawRelPtr; /// A pointer which resolves to relative to its position in memory. /// /// This is a strongly-typed version of [`RawRelPtr`]. /// /// See [`Archive`](crate::Archive) for an example of creating one. #[derive(Portable)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct RelPtr { raw_ptr: RawRelPtr, metadata: T::ArchivedMetadata, _phantom: PhantomData, } impl RelPtr { /// Attempts to create a relative pointer from one position to another. pub fn try_emplace( to: usize, out: Place, ) -> Result<(), E> { munge!(let RelPtr { raw_ptr, metadata: _, _phantom: _ } = out); // Skip metadata since sized T is guaranteed to be () RawRelPtr::try_emplace(to, raw_ptr) } /// Creates a relative pointer from one position to another. /// /// # Panics /// /// - If the offset between `from` and `to` does not fit in an `isize` /// - If the offset between `from` and `to` exceeds the offset storage pub fn emplace(to: usize, out: Place) { Self::try_emplace::(to, out).always_ok() } } impl RelPtr { /// Attempts to create an invalid relative pointer with default metadata. pub fn try_emplace_invalid(out: Place) -> Result<(), E> { munge!(let RelPtr { raw_ptr, metadata, _phantom: _ } = out); RawRelPtr::try_emplace_invalid(raw_ptr)?; metadata.write(Default::default()); Ok(()) } /// Creates an invalid relative pointer with default metadata. /// /// # Panics /// /// - If an offset of `1` does not fit in an `isize` /// - If an offset of `1` exceeds the offset storage pub fn emplace_invalid(out: Place) { Self::try_emplace_invalid::(out).always_ok() } /// Attempts to create a relative pointer from one position to another. pub fn try_emplace_unsized( to: usize, metadata: T::ArchivedMetadata, out: Place, ) -> Result<(), E> { munge!(let RelPtr { raw_ptr, metadata: out_meta, _phantom: _ } = out); RawRelPtr::try_emplace(to, raw_ptr)?; out_meta.write(metadata); Ok(()) } /// Creates a relative pointer from one position to another. /// /// # Panics /// /// - If the offset between `from` and `to` does not fit in an `isize` /// - If the offset between `from` and `to` exceeds the offset storage pub fn emplace_unsized( to: usize, metadata: T::ArchivedMetadata, out: Place, ) { Self::try_emplace_unsized::(to, metadata, out).always_ok() } /// Gets the base pointer for the pointed-to relative pointer. pub fn base_raw(this: *mut Self) -> *mut u8 { RawRelPtr::::base_raw(this.cast()) } /// Gets the offset of the pointed-to relative pointer from its base. /// /// # Safety /// /// `this` must be non-null, properly-aligned, and point to a valid /// `RelPtr`. pub unsafe fn offset_raw(this: *mut Self) -> isize { // SAFETY: `RelPtr` is `#[repr(C)]`, so the `RawRelPtr` member of the // `RelPtr` will have the same address as the `RelPtr`. Because `this` // is non-null, properly-aligned, and points to a valid `RelPtr`, a // pointer to its first field will also be non-null, properly-aligned, // and point to a valid `RawRelPtr`. unsafe { RawRelPtr::::offset_raw(this.cast()) } } /// Calculates the memory address being pointed to by the pointed-to /// relative pointer. /// /// # Safety /// /// - `this` must be non-null, properly-aligned, and point to a valid /// `RelPtr`. /// - The offset of this relative pointer, when added to its base, must be /// located in the same allocated object as it. pub unsafe fn as_ptr_raw(this: *mut Self) -> *mut T { // SAFETY: // - `RelPtr` is `#[repr(C)]`, so the `RawRelPtr` member of the `RelPtr` // will have the same address as the `RelPtr`. Because `this` is // non-null, properly-aligned, and points to a valid `RelPtr`, a // pointer to its first field will also be non-null, properly-aligned, // and point to a valid `RawRelPtr`. // - The base and offset of the `RawRelPtr` are guaranteed to be the // same as the base and offset of the `RelPtr`. let data_address = unsafe { RawRelPtr::::as_ptr_raw(this.cast()) }; // SAFETY: The caller has guaranteed that `this` points to a valid // `RelPtr`. let metadata = unsafe { T::pointer_metadata(&(*this).metadata) }; ptr_meta::from_raw_parts_mut(data_address, metadata) } /// Calculates the memory address being pointed to by the pointed-to /// relative pointer using wrapping methods. /// /// This method is a safer but potentially slower version of `as_ptr_raw`. /// /// # Safety /// /// `this` must be non-null, properly-aligned, and point to a valid /// `RelPtr`. pub unsafe fn as_ptr_wrapping_raw(this: *mut Self) -> *mut T { // SAFETY: `RelPtr` is `#[repr(C)]`, so the `RawRelPtr` member of the // `RelPtr` will have the same address as the `RelPtr`. Because `this` // is non-null, properly-aligned, and points to a valid `RelPtr`, a // pointer to its first field will also be non-null, properly-aligned, // and point to a valid `RawRelPtr`. let data_address = unsafe { RawRelPtr::::as_ptr_wrapping_raw(this.cast()) }; // SAFETY: The caller has guaranteed that `this` points to a valid // `RelPtr`. let metadata = unsafe { T::pointer_metadata(&(*this).metadata) }; ptr_meta::from_raw_parts_mut(data_address, metadata) } /// Gets whether the offset of the pointed-to relative pointer is invalid. /// /// # Safety /// /// `this` must be non-null, properly-aligned, and point to a valid /// `RawRelPtr`. pub unsafe fn is_invalid_raw(this: *mut Self) -> bool { // SAFETY: `RelPtr` is `#[repr(C)]`, so the `RawRelPtr` member of the // `RelPtr` will have the same address as the `RelPtr`. Because `this` // is non-null, properly-aligned, and points to a valid `RelPtr`, a // pointer to its first field will also be non-null, properly-aligned, // and point to a valid `RawRelPtr`. unsafe { RawRelPtr::::is_invalid_raw(this.cast()) } } /// Gets the base pointer for the relative pointer. pub fn base(&self) -> *const u8 { self.raw_ptr.base() } /// Gets the mutable base pointer for this relative pointer. pub fn base_mut(this: Seal<'_, Self>) -> *mut u8 { munge!(let Self { raw_ptr, .. } = this); RawRelPtr::base_mut(raw_ptr) } /// Gets the offset of the relative pointer from its base. pub fn offset(&self) -> isize { self.raw_ptr.offset() } /// Gets whether the offset of the relative pointer is 0. pub fn is_invalid(&self) -> bool { self.raw_ptr.is_invalid() } /// Gets the metadata of the relative pointer. pub fn metadata(&self) -> &T::ArchivedMetadata { &self.metadata } /// Calculates the memory address being pointed to by this relative pointer. /// /// # Safety /// /// The offset of this relative pointer, when added to its base, must be /// located in the same allocated object as it. pub unsafe fn as_ptr(&self) -> *const T { ptr_meta::from_raw_parts( // SAFETY: The safety requirements for `RawRelPtr::as_ptr` are the // same as those for `RelPtr::as_ptr``. unsafe { self.raw_ptr.as_ptr() }, T::pointer_metadata(&self.metadata), ) } /// Calculates the mutable memory address being pointed to by this relative /// pointer. /// /// # Safety /// /// The offset of this relative pointer, when added to its base, must be /// located in the same allocated object as it. pub unsafe fn as_mut_ptr(this: Seal<'_, Self>) -> *mut T { munge!(let Self { raw_ptr, metadata, _phantom: _ } = this); let metadata = T::pointer_metadata(&*metadata); ptr_meta::from_raw_parts_mut( // SAFETY: The safety requirements for `RawRelPtr::as_mut_ptr` are // the same as those for `RelPtr::as_mut_ptr``. unsafe { RawRelPtr::as_mut_ptr(raw_ptr) }, metadata, ) } /// Calculates the memory address being pointed to by this relative pointer /// using wrapping methods. /// /// This method is a safer but potentially slower version of `as_ptr`. pub fn as_ptr_wrapping(&self) -> *const T { ptr_meta::from_raw_parts( self.raw_ptr.as_ptr_wrapping(), T::pointer_metadata(&self.metadata), ) } /// Calculates the mutable memory address being pointed to by this relative /// pointer using wrapping methods. /// /// This method is a safer but potentially slower version of `as_ptr`. pub fn as_mut_ptr_wrapping(this: Seal<'_, Self>) -> *mut T { munge!(let Self { raw_ptr, metadata, _phantom: _ } = this); let metadata = T::pointer_metadata(&*metadata); ptr_meta::from_raw_parts_mut( RawRelPtr::as_mut_ptr_wrapping(raw_ptr), metadata, ) } } impl fmt::Debug for RelPtr where T::ArchivedMetadata: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RelPtr") .field("raw_ptr", &self.raw_ptr) .field("metadata", &self.metadata) .finish() } } impl fmt::Pointer for RelPtr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr_wrapping(), f) } } rkyv-0.8.9/src/result.rs000064400000000000000000000140361046102023000133140ustar 00000000000000//! An archived version of `Result`. use core::{ cmp::Ordering, hash, ops::{Deref, DerefMut}, }; use crate::{seal::Seal, Portable}; /// An archived [`Result`] that represents either success /// ([`Ok`](ArchivedResult::Ok)) or failure ([`Err`](ArchivedResult::Err)). #[derive(Debug, Portable)] #[rkyv(crate)] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[repr(u8)] pub enum ArchivedResult { /// Contains the success value Ok(T), /// Contains the error value Err(E), } impl ArchivedResult { /// Converts from `ArchivedResult` to `Option`. pub fn ok(self) -> Option { match self { ArchivedResult::Ok(value) => Some(value), ArchivedResult::Err(_) => None, } } /// Returns the contained [`Ok`](ArchivedResult::Ok) value, consuming the /// `self` value. pub fn unwrap(self) -> T { match self { ArchivedResult::Ok(value) => value, ArchivedResult::Err(_) => { panic!("called `ArchivedResult::unwrap()` on an `Err` value") } } } /// Returns the contained `Ok` value or computes it from a closure. pub fn unwrap_or_else(self, op: F) -> T where F: FnOnce(E) -> T, { match self { ArchivedResult::Ok(t) => t, ArchivedResult::Err(e) => op(e), } } /// Returns `true` if the result is [`Ok`](ArchivedResult::Ok). pub const fn is_ok(&self) -> bool { matches!(self, ArchivedResult::Ok(_)) } /// Returns `true` if the result is [`Err`](ArchivedResult::Err). pub const fn is_err(&self) -> bool { matches!(self, ArchivedResult::Err(_)) } /// Returns a `Result` containing the success and error values of this /// `ArchivedResult`. pub fn as_ref(&self) -> Result<&T, &E> { match self { ArchivedResult::Ok(value) => Ok(value), ArchivedResult::Err(err) => Err(err), } } /// Converts from `&mut ArchivedResult` to `Result<&mut T, &mut E>`. pub fn as_mut(&mut self) -> Result<&mut T, &mut E> { match self { ArchivedResult::Ok(value) => Ok(value), ArchivedResult::Err(err) => Err(err), } } /// Converts from `Seal<'_, ArchivedResult>` to /// `Result, Seal<'_, E>>`. pub fn as_seal(this: Seal<'_, Self>) -> Result, Seal<'_, E>> { let this = unsafe { Seal::unseal_unchecked(this) }; match this { ArchivedResult::Ok(value) => Ok(Seal::new(value)), ArchivedResult::Err(err) => Err(Seal::new(err)), } } /// Returns an iterator over the possibly-contained value. /// /// The iterator yields one value if the result is `ArchivedResult::Ok`, /// otherwise none. pub fn iter(&self) -> Iter<&'_ T> { Iter::new(self.as_ref().ok()) } /// Returns an iterator over the mutable possibly-contained value. /// /// The iterator yields one value if the result is `ArchivedResult::Ok`, /// otherwise none. pub fn iter_mut(&mut self) -> Iter<&'_ mut T> { Iter::new(self.as_mut().ok()) } /// Returns an iterator over the sealed possibly-contained value. /// /// The iterator yields one value if the result is `ArchivedResult::Ok`, /// otherwise none. pub fn iter_seal(this: Seal<'_, Self>) -> Iter> { Iter::new(Self::as_seal(this).ok()) } } impl ArchivedResult { /// Converts from `&ArchivedResult` to `Result<&::Target, /// &E>`. /// /// Coerces the `Ok` variant of the original `ArchivedResult` via `Deref` /// and returns the new `Result`. pub fn as_deref(&self) -> Result<&::Target, &E> { match self { ArchivedResult::Ok(value) => Ok(value.deref()), ArchivedResult::Err(err) => Err(err), } } } impl ArchivedResult { /// Converts from `&mut ArchivedResult` to `Result<&mut ::Target, &mut E>`. /// /// Coerces the `Ok` variant of the original `ArchivedResult` via `DerefMut` /// and returns the new `Result`. pub fn as_deref_mut( &mut self, ) -> Result<&mut ::Target, &mut E> { match self { ArchivedResult::Ok(value) => Ok(value.deref_mut()), ArchivedResult::Err(err) => Err(err), } } } /// An iterator over a reference to the `Ok` variant of an [`ArchivedResult`]. /// /// The iterator yields one value if the result is `Ok`, otherwise none. /// /// Created by [`ArchivedResult::iter`]. pub type Iter

= crate::option::Iter

; impl Eq for ArchivedResult {} impl hash::Hash for ArchivedResult { fn hash(&self, state: &mut H) { self.as_ref().hash(state) } } impl Ord for ArchivedResult { fn cmp(&self, other: &Self) -> Ordering { self.as_ref().cmp(&other.as_ref()) } } impl PartialEq for ArchivedResult { fn eq(&self, other: &Self) -> bool { self.as_ref().eq(&other.as_ref()) } } impl PartialOrd for ArchivedResult { fn partial_cmp(&self, other: &Self) -> Option { self.as_ref().partial_cmp(&other.as_ref()) } } impl PartialEq> for ArchivedResult where U: PartialEq, F: PartialEq, { fn eq(&self, other: &Result) -> bool { match self { ArchivedResult::Ok(self_value) => { if let Ok(other_value) = other { self_value.eq(other_value) } else { false } } ArchivedResult::Err(self_err) => { if let Err(other_err) = other { self_err.eq(other_err) } else { false } } } } } rkyv-0.8.9/src/seal.rs000064400000000000000000000064701046102023000127250ustar 00000000000000//! Mutable references to values which may not be moved or de-initialized. use core::{ ops::{Deref, DerefMut}, slice::SliceIndex, }; use munge::{Borrow, Destructure, Restructure}; use crate::traits::NoUndef; /// A mutable reference which may not be moved or assigned. /// /// A `Seal` restricts a mutable reference so that the referenced value cannot /// be moved or assigned unless it is `Unpin` and `NoUndef`. These properties /// allow the safe use of mutable archived values. /// /// Unlike `Pin`, all fields of `Seal`ed values are also sealed. There is no /// notion of "structural sealing" as there is structural pinning. This has the /// upside that a `Seal` can be uniformly destructured with `munge`, which is /// the recommended replacement for `Pin`'s `map_unchecked_mut` function. Also /// unlike `Pin`, `Seal`ing a reference does not require upholding the invariant /// that the sealed value is dropped before its backing memory is reused. This /// means that creating a `Seal` from a mutable reference is completely safe to /// do. pub struct Seal<'a, T: ?Sized> { inner: &'a mut T, } impl<'a, T: ?Sized> Seal<'a, T> { /// Returns a new `Seal` wrapping the given reference. pub fn new(inner: &'a mut T) -> Self { Self { inner } } /// Returns the underlying reference for types that implement `NoUndef` /// and `Unpin`. pub fn unseal(self) -> &'a mut T where T: NoUndef + Unpin, { self.inner } /// Returns the underlying reference as shared for types that implement /// `Portable`. pub fn unseal_ref(self) -> &'a T { self.inner } /// Returns the underlying reference. /// /// # Safety /// /// The returned reference may not be moved unless `T` is `Unpin`. /// Uninitialized bytes may not be written through the `Seal`. pub unsafe fn unseal_unchecked(self) -> &'a mut T { self.inner } /// Mutably reborrows the `Seal`. pub fn as_mut(&mut self) -> Seal<'_, T> { Seal::new(self.inner) } } impl AsRef for Seal<'_, T> { fn as_ref(&self) -> &T { self.inner } } impl Deref for Seal<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { self.as_ref() } } impl DerefMut for Seal<'_, T> { fn deref_mut(&mut self) -> &mut Self::Target { self.as_mut().unseal() } } unsafe impl Destructure for Seal<'_, T> { type Underlying = T; type Destructuring = Borrow; fn underlying(&mut self) -> *mut Self::Underlying { self.inner } } unsafe impl<'a, T: ?Sized, U: 'a + ?Sized> Restructure for Seal<'a, T> { type Restructured = Seal<'a, U>; unsafe fn restructure(&self, ptr: *mut U) -> Self::Restructured { // SAFETY: `ptr` is a pointer to a subfield of the underlying pointer, // and so is also properly aligned, and dereferenceable. Seal::new(unsafe { &mut *ptr }) } } impl<'a, T> Seal<'a, [T]> { /// Indexes the `Seal`. /// /// # Panics /// /// May panic if the index is out of bounds. pub fn index>( self, index: I, ) -> Seal<'a, >::Output> { let ptr = unsafe { Seal::unseal_unchecked(self) }; Seal::new(&mut ptr[index]) } } rkyv-0.8.9/src/ser/allocator/alloc.rs000064400000000000000000000222761046102023000156460ustar 00000000000000use core::{ alloc::Layout, marker::PhantomData, mem::{align_of, size_of, ManuallyDrop}, ptr::{slice_from_raw_parts_mut, NonNull}, }; use crate::{ alloc::alloc::{alloc, dealloc, handle_alloc_error}, ser::Allocator, }; struct Block { next_ptr: NonNull, next_size: usize, } impl Block { fn alloc(size: usize) -> NonNull { debug_assert!(size >= size_of::()); let layout = Layout::from_size_align(size, align_of::()).unwrap(); let ptr = unsafe { alloc(layout).cast::() }; let Some(ptr) = NonNull::new(ptr) else { handle_alloc_error(layout) }; unsafe { ptr.as_ptr().write(Self { next_ptr: ptr, next_size: layout.size(), }); } ptr } unsafe fn dealloc(ptr: NonNull, size: usize) { let layout = unsafe { Layout::from_size_align(size, align_of::()).unwrap_unchecked() }; unsafe { dealloc(ptr.as_ptr().cast(), layout); } } /// # Safety /// /// `tail_ptr` and `new_ptr` must point to valid `Block`s and `new_ptr` must /// be the only block in its loop. unsafe fn push_next( mut tail_ptr: NonNull, mut new_ptr: NonNull, ) { let tail = unsafe { tail_ptr.as_mut() }; let new = unsafe { new_ptr.as_mut() }; debug_assert!(new.next_ptr == new_ptr); let head = tail.next_ptr; let head_cap = tail.next_size; tail.next_ptr = new_ptr; tail.next_size = new.next_size; new.next_ptr = head; new.next_size = head_cap; } } /// An arena allocator for allocations. /// /// Reusing the same arena for multiple serializations will reduce the number of /// global allocations, which can save a considerable amount of time. pub struct Arena { head_ptr: NonNull, } // SAFETY: Arena is safe to send to other threads unsafe impl Send for Arena {} impl Drop for Arena { fn drop(&mut self) { self.shrink(); let head_size = unsafe { self.head_ptr.as_ref().next_size }; unsafe { Block::dealloc(self.head_ptr, head_size); } } } impl Arena { /// The default capacity for arenas. pub const DEFAULT_CAPACITY: usize = 1024; /// Creates a new `Arena` with the default capacity. pub fn new() -> Self { Self::with_capacity(Self::DEFAULT_CAPACITY) } /// Creates a new `Arena` with at least the requested capacity. pub fn with_capacity(cap: usize) -> Self { let head_size = (cap + size_of::()).next_power_of_two(); let head_ptr = Block::alloc(head_size); Self { head_ptr } } /// Cleans up allocated blocks which are no longer in use. /// /// The arena is automatically shrunk by [`acquire`](Self::acquire). pub fn shrink(&mut self) -> usize { let (mut current_ptr, mut current_size) = { let head = unsafe { self.head_ptr.as_ref() }; (head.next_ptr, head.next_size) }; loop { let current = unsafe { current_ptr.as_mut() }; if current.next_ptr == current_ptr { // There was only one block in the loop. No deallocating needed. break; } let next_ptr = current.next_ptr; let next_size = current.next_size; if next_ptr == self.head_ptr { // End of the loop. Free the head block. unsafe { Block::dealloc(next_ptr, next_size); } // Loop the head back on itself. current.next_ptr = current_ptr; current.next_size = current_size; self.head_ptr = current_ptr; break; } unsafe { Block::dealloc(current_ptr, current_size); } current_ptr = next_ptr; current_size = next_size; } current_size - size_of::() } /// Returns the available capacity of the arena. pub fn capacity(&self) -> usize { let mut current_ptr = self.head_ptr; loop { let current = unsafe { current_ptr.as_ref() }; if current.next_ptr == self.head_ptr { break current.next_size - size_of::(); } current_ptr = current.next_ptr; } } /// Acquires a handle to the arena. /// /// The returned handle has exclusive allocation rights in the arena. pub fn acquire(&mut self) -> ArenaHandle<'_> { self.shrink(); ArenaHandle { tail_ptr: self.head_ptr, tail_size: unsafe { self.head_ptr.as_ref().next_size }, used: size_of::(), _phantom: PhantomData, } } /// Consumes the `Arena`, returning a raw pointer. pub fn into_raw(self) -> NonNull<()> { let this = ManuallyDrop::new(self); this.head_ptr.cast() } /// Constructs an arena from a raw pointer. /// /// # Safety /// /// `raw` must have been returned from `into_raw`. `from_raw` takes /// ownership over the pointer, and so `from_raw` must not be called on the /// same pointer more than once. pub unsafe fn from_raw(raw: NonNull<()>) -> Self { Self { head_ptr: raw.cast(), } } } impl Default for Arena { fn default() -> Self { Self::new() } } /// A handle which can allocate within an arena. pub struct ArenaHandle<'a> { tail_ptr: NonNull, tail_size: usize, used: usize, _phantom: PhantomData<&'a mut Arena>, } // SAFETY: ArenaHandle is safe to send to other threads unsafe impl Send for ArenaHandle<'_> {} unsafe impl Allocator for ArenaHandle<'_> { unsafe fn push_alloc( &mut self, layout: Layout, ) -> Result, E> { let pos = self.tail_ptr.as_ptr() as usize + self.used; let pad = 0usize.wrapping_sub(pos) % layout.align(); if pad + layout.size() <= self.tail_size - self.used { self.used += pad; } else { // Allocation request is too large, allocate a new block let size = usize::max( 2 * self.tail_size, (size_of::() + layout.size() + layout.align()) .next_power_of_two(), ); let next = Block::alloc(size); unsafe { Block::push_next(self.tail_ptr, next); } self.tail_ptr = next; self.tail_size = size; let pos = self.tail_ptr.as_ptr() as usize + size_of::(); let pad = 0usize.wrapping_sub(pos) % layout.align(); self.used = size_of::() + pad; } // SAFETY: `self.used` is always less than the length of the allocated // block that `tail_ptr` points to. let ptr = unsafe { self.tail_ptr.as_ptr().cast::().add(self.used) }; let slice_ptr = slice_from_raw_parts_mut(ptr, layout.size()); // SAFETY: `slice_ptr` is guaranteed not to be null because it is offset // from `self.tail_ptr` which is always non-null. let result = unsafe { NonNull::new_unchecked(slice_ptr) }; self.used += layout.size(); Ok(result) } unsafe fn pop_alloc( &mut self, ptr: NonNull, _: Layout, ) -> Result<(), E> { // If the popped allocation was in the current tail block, then we can // reduce the amount of used space. let start = self.tail_ptr.as_ptr() as usize; let end = start + self.tail_size; let pos = ptr.as_ptr() as usize; if (start..end).contains(&pos) { self.used = pos - start; } Ok(()) } } #[cfg(test)] mod tests { use core::alloc::Layout; use rancor::{Panic, ResultExt}; use crate::{ alloc::{string::ToString, vec}, api::high::to_bytes_in_with_alloc, ser::{allocator::Arena, Allocator}, util::AlignedVec, }; #[test] fn reuse_arena() { let mut arena = Arena::with_capacity(2); let value = vec![ "hello".to_string(), "world".to_string(), "foo".to_string(), "bar".to_string(), "baz".to_string(), ]; for _ in 0..10 { to_bytes_in_with_alloc::<_, _, Panic>( &value, AlignedVec::<16>::new(), arena.acquire(), ) .unwrap(); } } #[test] fn pop_non_tail() { let mut arena = Arena::new(); let mut handle = arena.acquire(); let layout = Layout::from_size_align(Arena::DEFAULT_CAPACITY, 1).unwrap(); unsafe { let a = Allocator::::push_alloc(&mut handle, layout).always_ok(); let b = Allocator::::push_alloc(&mut handle, layout).always_ok(); Allocator::::pop_alloc(&mut handle, b.cast(), layout) .always_ok(); Allocator::::pop_alloc(&mut handle, a.cast(), layout) .always_ok(); } } } rkyv-0.8.9/src/ser/allocator/core.rs000064400000000000000000000051001046102023000154670ustar 00000000000000use core::{ alloc::Layout, error::Error, fmt, marker::PhantomData, mem::MaybeUninit, ptr::{slice_from_raw_parts_mut, NonNull}, }; use rancor::{fail, Source}; use crate::ser::Allocator; #[derive(Debug)] struct OutOfSpaceError { layout: Layout, } impl fmt::Display for OutOfSpaceError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "not enough space to allocate request of size {} and align {}", self.layout.size(), self.layout.align() ) } } impl Error for OutOfSpaceError {} /// An allocator that sub-allocates a fixed-size memory space. #[derive(Debug)] pub struct SubAllocator<'a> { bytes: NonNull, used: usize, size: usize, _phantom: PhantomData<&'a mut [MaybeUninit]>, } impl<'a> SubAllocator<'a> { /// Creates an empty suballocator. pub fn empty() -> Self { Self { bytes: NonNull::dangling(), used: 0, size: 0, _phantom: PhantomData, } } /// Creates a new sub-allocator from the given byte slice. pub fn new(bytes: &'a mut [MaybeUninit]) -> Self { Self { bytes: unsafe { NonNull::new_unchecked(bytes.as_mut_ptr().cast()) }, used: 0, size: bytes.len(), _phantom: PhantomData, } } } unsafe impl Allocator for SubAllocator<'_> where E: Source, { unsafe fn push_alloc( &mut self, layout: Layout, ) -> Result, E> { let pos = self.bytes.as_ptr() as usize + self.used; let pad = 0usize.wrapping_sub(pos) % layout.align(); if pad + layout.size() <= self.size - self.used { self.used += pad; } else { fail!(OutOfSpaceError { layout }); } // SAFETY: `self.used` is always less than the length of the allocated // block that `self.bytes` points to. let ptr = unsafe { self.bytes.as_ptr().add(self.used) }; let slice_ptr = slice_from_raw_parts_mut(ptr, layout.size()); // SAFETY: `slice_ptr` is guaranteed not to be null because it is // offset from `self.bytes` which is always non-null. let result = unsafe { NonNull::new_unchecked(slice_ptr) }; self.used += layout.size(); Ok(result) } unsafe fn pop_alloc( &mut self, ptr: NonNull, _: Layout, ) -> Result<(), E> { let bytes = self.bytes.as_ptr(); self.used = ptr.as_ptr() as usize - bytes as usize; Ok(()) } } rkyv-0.8.9/src/ser/allocator/mod.rs000064400000000000000000000161521046102023000153270ustar 00000000000000//! Allocators for serializers to use during serialization. #[cfg(feature = "alloc")] mod alloc; mod core; use ::core::{alloc::Layout, ptr::NonNull}; use rancor::{Fallible, Strategy}; #[cfg(feature = "alloc")] pub use self::alloc::*; pub use self::core::*; /// A serializer that can allocate scratch space. /// /// # Safety /// /// `push_alloc` must return a pointer to unaliased memory which fits the /// provided layout. pub unsafe trait Allocator::Error> { /// Allocates scratch space of the requested size. /// /// # Safety /// /// `layout` must have non-zero size. unsafe fn push_alloc(&mut self, layout: Layout) -> Result, E>; /// Deallocates previously allocated scratch space. /// /// # Safety /// /// - The allocations pushed on top of the given allocation must not be /// popped after calling `pop_alloc`. /// - `layout` must be the same layout that was used to allocate the block /// of memory for the given pointer. unsafe fn pop_alloc( &mut self, ptr: NonNull, layout: Layout, ) -> Result<(), E>; } unsafe impl, E> Allocator for Strategy { unsafe fn push_alloc( &mut self, layout: Layout, ) -> Result, E> { // SAFETY: The safety requirements for `push_alloc()` are the same as // the requirements for `T::push_alloc`. unsafe { T::push_alloc(self, layout) } } unsafe fn pop_alloc( &mut self, ptr: NonNull, layout: Layout, ) -> Result<(), E> { // SAFETY: The safety requirements for `pop_alloc()` are the same as // the requirements for `T::pop_alloc`. unsafe { T::pop_alloc(self, ptr, layout) } } } /// Statistics for the allocations which occurred during serialization. #[derive(Debug)] pub struct AllocationStats { bytes_allocated: usize, allocations: usize, /// Returns the maximum number of bytes that were concurrently allocated. pub max_bytes_allocated: usize, /// Returns the maximum number of concurrent allocations. pub max_allocations: usize, /// Returns the maximum alignment of requested allocations. pub max_alignment: usize, } impl AllocationStats { /// Returns the minimum arena capacity required to serialize the same data. /// /// This calculation takes into account packing efficiency for slab /// allocated space. It is not exact, and has an error bound of /// `max_allocations * (max_alignment - 1)` bytes. This should be suitably /// small for most use cases. #[inline] pub fn min_arena_capacity(&self) -> usize { self.max_bytes_allocated + self.min_arena_capacity_max_error() } /// Returns the maximum error term for the minimum arena capacity /// calculation. #[inline] pub fn min_arena_capacity_max_error(&self) -> usize { self.max_allocations * (self.max_alignment - 1) } } impl AllocationStats { #[inline] fn push(&mut self, layout: Layout) { self.bytes_allocated += layout.size(); self.allocations += 1; self.max_bytes_allocated = usize::max(self.bytes_allocated, self.max_bytes_allocated); self.max_allocations = usize::max(self.allocations, self.max_allocations); self.max_alignment = usize::max(self.max_alignment, layout.align()); } #[inline] fn pop(&mut self, layout: Layout) { self.bytes_allocated -= layout.size(); self.allocations -= 1; } } /// A passthrough allocator that tracks usage. pub struct AllocationTracker { inner: T, stats: AllocationStats, } impl AllocationTracker { /// Returns a new allocation tracker wrapping the given allocator. pub fn new(inner: T) -> Self { Self { inner, stats: AllocationStats { bytes_allocated: 0, allocations: 0, max_bytes_allocated: 0, max_allocations: 0, max_alignment: 1, }, } } /// Returns the allocation stats accumulated during serialization. pub fn into_stats(self) -> AllocationStats { self.stats } } unsafe impl, E> Allocator for AllocationTracker { unsafe fn push_alloc( &mut self, layout: Layout, ) -> Result, E> { self.stats.push(layout); // SAFETY: The safety requirements for `push_alloc` are the same as the // requirements for `inner.push_alloc`. unsafe { self.inner.push_alloc(layout) } } unsafe fn pop_alloc( &mut self, ptr: NonNull, layout: Layout, ) -> Result<(), E> { self.stats.pop(layout); // SAFETY: The safety requirements for `pop_alloc` are the same as the // requirements for `inner.pop_alloc`. unsafe { self.inner.pop_alloc(ptr, layout) } } } impl From for AllocationTracker { fn from(inner: T) -> Self { Self::new(inner) } } #[cfg(test)] mod tests { use core::mem::MaybeUninit; use rancor::{Panic, Strategy}; use crate::{ api::serialize_using, ser::{ allocator::{AllocationStats, AllocationTracker, SubAllocator}, sharing::Unshare, writer::Buffer, Serializer, }, util::Align, Serialize, }; type TrackerSerializer<'a> = Strategy< Serializer, AllocationTracker>, Unshare>, Panic, >; fn track_serialize(value: &T) -> AllocationStats where T: for<'a> Serialize>, { let mut output = Align([MaybeUninit::::uninit(); 256]); let mut scratch = [MaybeUninit::::uninit(); 256]; let mut serializer = Serializer::new( Buffer::from(&mut *output), AllocationTracker::new(SubAllocator::new(&mut scratch)), Unshare, ); serialize_using(value, &mut serializer).unwrap(); serializer.into_raw_parts().1.into_stats() } #[test] fn simple() { let stats = track_serialize(&42); assert_eq!(stats.max_bytes_allocated, 0); assert_eq!(stats.max_allocations, 0); assert_eq!(stats.max_alignment, 1); assert_eq!(stats.min_arena_capacity(), 0); assert_eq!(stats.min_arena_capacity_max_error(), 0); } #[cfg(feature = "alloc")] #[test] fn nested() { use crate::alloc::vec; let stats = track_serialize(&vec![1, 2, 3, 4]); assert_eq!(stats.max_bytes_allocated, 0); assert_eq!(stats.max_allocations, 0); assert_eq!(stats.max_alignment, 1); assert_eq!(stats.min_arena_capacity(), 0); assert_eq!(stats.min_arena_capacity_max_error(), 0); } #[cfg(feature = "alloc")] #[test] fn doubly_nested() { use crate::alloc::vec; let stats = track_serialize(&vec![vec![1, 2], vec![3, 4]]); assert_ne!(stats.max_bytes_allocated, 0); assert_eq!(stats.max_allocations, 1); assert_ne!(stats.min_arena_capacity(), 0); } } rkyv-0.8.9/src/ser/mod.rs000064400000000000000000000047271046102023000133540ustar 00000000000000//! Serialization traits and adapters. pub mod allocator; pub mod sharing; pub mod writer; use ::core::{alloc::Layout, ptr::NonNull}; #[doc(inline)] pub use self::{ allocator::Allocator, sharing::{Sharing, SharingExt}, writer::{Positional, Writer, WriterExt}, }; /// A serializer built from composeable pieces. #[derive(Debug, Default)] pub struct Serializer { /// The writer of the serializer. pub writer: W, /// The allocator of the serializer. pub allocator: A, /// The pointer sharing of the serializer. pub sharing: S, } impl Serializer { /// Creates a new serializer from a writer, allocator, and pointer sharing. pub fn new(writer: W, allocator: A, sharing: S) -> Self { Self { writer, allocator, sharing, } } /// Consumes the serializer and returns the components. pub fn into_raw_parts(self) -> (W, A, S) { (self.writer, self.allocator, self.sharing) } /// Consumes the serializer and returns the writer. /// /// The allocator and pointer sharing are discarded. pub fn into_writer(self) -> W { self.writer } } impl Positional for Serializer { fn pos(&self) -> usize { self.writer.pos() } } impl, A, S, E> Writer for Serializer { fn write(&mut self, bytes: &[u8]) -> Result<(), E> { self.writer.write(bytes) } } unsafe impl, S, E> Allocator for Serializer { unsafe fn push_alloc( &mut self, layout: Layout, ) -> Result, E> { // SAFETY: The safety requirements for `A::push_alloc()` are the same as // the safety requirements for `push_alloc()`. unsafe { self.allocator.push_alloc(layout) } } unsafe fn pop_alloc( &mut self, ptr: NonNull, layout: Layout, ) -> Result<(), E> { // SAFETY: The safety requirements for `A::pop_alloc()` are the same as // the safety requirements for `pop_alloc()`. unsafe { self.allocator.pop_alloc(ptr, layout) } } } impl, E> Sharing for Serializer { fn start_sharing(&mut self, address: usize) -> sharing::SharingState { self.sharing.start_sharing(address) } fn finish_sharing(&mut self, address: usize, pos: usize) -> Result<(), E> { self.sharing.finish_sharing(address, pos) } } rkyv-0.8.9/src/ser/sharing/alloc.rs000064400000000000000000000050151046102023000153110ustar 00000000000000use core::{error::Error, fmt, hash::BuildHasherDefault}; use hashbrown::hash_map::{Entry, HashMap}; use rancor::{fail, Source}; use crate::{ hash::FxHasher64, ser::{sharing::SharingState, Sharing}, }; /// A shared pointer strategy that shares serializations of the same shared /// pointer. #[derive(Debug, Default)] pub struct Share { shared_address_to_pos: HashMap, BuildHasherDefault>, } impl Share { /// Creates a new shared pointer unifier. #[inline] pub fn new() -> Self { Self::default() } /// Creates a new shared pointer unifier with initial capacity. #[inline] pub fn with_capacity(capacity: usize) -> Self { Self { shared_address_to_pos: HashMap::with_capacity_and_hasher( capacity, Default::default(), ), } } /// Clears the shared pointer unifier for reuse. pub fn clear(&mut self) { self.shared_address_to_pos.clear(); } } #[derive(Debug)] struct NotStarted; impl fmt::Display for NotStarted { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "shared pointer was not started sharing") } } impl Error for NotStarted {} #[derive(Debug)] struct AlreadyFinished; impl fmt::Display for AlreadyFinished { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "shared pointer was already finished sharing") } } impl Error for AlreadyFinished {} impl Sharing for Share { fn start_sharing(&mut self, address: usize) -> SharingState { match self.shared_address_to_pos.entry(address) { Entry::Vacant(vacant) => { vacant.insert(None); SharingState::Started } Entry::Occupied(occupied) => { if let Some(pos) = occupied.get() { SharingState::Finished(*pos) } else { SharingState::Pending } } } } fn finish_sharing(&mut self, address: usize, pos: usize) -> Result<(), E> { match self.shared_address_to_pos.entry(address) { Entry::Vacant(_) => fail!(NotStarted), Entry::Occupied(mut occupied) => { let inner = occupied.get_mut(); if inner.is_some() { fail!(AlreadyFinished); } else { *inner = Some(pos); Ok(()) } } } } } rkyv-0.8.9/src/ser/sharing/core.rs000064400000000000000000000006401046102023000151460ustar 00000000000000use crate::ser::{sharing::SharingState, Sharing}; /// A shared pointer strategy that duplicates serializations of the same shared /// pointer. #[derive(Debug, Default)] pub struct Unshare; impl Sharing for Unshare { fn start_sharing(&mut self, _: usize) -> SharingState { SharingState::Started } fn finish_sharing(&mut self, _: usize, _: usize) -> Result<(), E> { Ok(()) } } rkyv-0.8.9/src/ser/sharing/mod.rs000064400000000000000000000066661046102023000150130ustar 00000000000000//! Shared pointer serialization. #[cfg(feature = "alloc")] mod alloc; mod core; use ::core::{error::Error, fmt}; use rancor::{fail, Fallible, Source, Strategy}; #[cfg(feature = "alloc")] pub use self::alloc::*; pub use self::core::*; use crate::SerializeUnsized; /// The result of starting to serialize a shared pointer. pub enum SharingState { /// The caller started sharing this value. They should proceed to serialize /// the shared value and call `finish_sharing`. Started, /// Another caller started sharing this value, but has not finished yet. /// This can only occur with cyclic shared pointer structures, and so rkyv /// treats this as an error by default. Pending, /// This value has already been shared. The caller should use the returned /// address to share its value. Finished(usize), } /// A shared pointer serialization strategy. /// /// This trait is required to serialize `Rc` and `Arc`. pub trait Sharing::Error> { /// Starts sharing the value associated with the given address. fn start_sharing(&mut self, address: usize) -> SharingState; /// Finishes sharing the value associated with the given address. /// /// Returns an error if the given address was not pending. fn finish_sharing(&mut self, address: usize, pos: usize) -> Result<(), E>; } impl Sharing for &mut T where T: Sharing + ?Sized, { fn start_sharing(&mut self, address: usize) -> SharingState { T::start_sharing(*self, address) } fn finish_sharing(&mut self, address: usize, pos: usize) -> Result<(), E> { T::finish_sharing(*self, address, pos) } } impl Sharing for Strategy where T: Sharing + ?Sized, { fn start_sharing(&mut self, address: usize) -> SharingState { T::start_sharing(self, address) } fn finish_sharing(&mut self, address: usize, pos: usize) -> Result<(), E> { T::finish_sharing(self, address, pos) } } #[derive(Debug)] struct CyclicSharedPointerError; impl fmt::Display for CyclicSharedPointerError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "encountered cyclic shared pointers while serializing\nhelp: \ change your serialization strategy to `Unshare` or use the \ `Unshare` wrapper type to break the cycle", ) } } impl Error for CyclicSharedPointerError {} /// Helper methods for [`Sharing`]. pub trait SharingExt: Sharing { /// Serializes the given shared value and returns its position. If the value /// has already been serialized then it returns the position of the /// previously added value. /// /// Returns an error if cyclic shared pointers are encountered. fn serialize_shared + ?Sized>( &mut self, value: &T, ) -> Result::Error> where Self: Fallible, E: Source, { let addr = value as *const T as *const () as usize; match self.start_sharing(addr) { SharingState::Started => { let pos = value.serialize_unsized(self)?; self.finish_sharing(addr, pos)?; Ok(pos) } SharingState::Pending => fail!(CyclicSharedPointerError), SharingState::Finished(pos) => Ok(pos), } } } impl SharingExt for S where S: Sharing + ?Sized {} rkyv-0.8.9/src/ser/writer/alloc.rs000064400000000000000000000012001046102023000151620ustar 00000000000000use crate::{ alloc::vec::Vec, ser::{Positional, Writer}, util::AlignedVec, }; impl Positional for Vec { #[inline] fn pos(&self) -> usize { self.len() } } impl Writer for Vec { fn write(&mut self, bytes: &[u8]) -> Result<(), E> { self.extend_from_slice(bytes); Ok(()) } } impl Positional for AlignedVec { #[inline] fn pos(&self) -> usize { self.len() } } impl Writer for AlignedVec { fn write(&mut self, bytes: &[u8]) -> Result<(), E> { self.extend_from_slice(bytes); Ok(()) } } rkyv-0.8.9/src/ser/writer/core.rs000064400000000000000000000115461046102023000150360ustar 00000000000000use core::{ error::Error, fmt, marker::PhantomData, mem::MaybeUninit, ops::{Deref, DerefMut}, ptr::{copy_nonoverlapping, NonNull}, slice, }; use rancor::{fail, Source}; use crate::ser::{Positional, Writer}; #[derive(Debug)] struct BufferOverflow { write_len: usize, cap: usize, len: usize, } impl fmt::Display for BufferOverflow { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "overflowed buffer while writing {} bytes into buffer of length \ {} (capacity is {})", self.write_len, self.len, self.cap, ) } } impl Error for BufferOverflow {} /// Wraps a byte buffer and equips it with [`Writer`]. /// /// Common uses include archiving in `#![no_std]` environments and archiving /// small objects without allocating. /// /// # Examples /// /// ``` /// use core::mem::MaybeUninit; /// /// use rkyv::{ /// access_unchecked, /// api::high::to_bytes_in, /// rancor::{Error, Strategy}, /// ser::{writer::Buffer, Writer}, /// util::Align, /// Archive, Archived, Serialize, /// }; /// /// #[derive(Archive, Serialize)] /// enum Event { /// Spawn, /// Speak(String), /// Die, /// } /// /// let event = Event::Speak("Help me!".to_string()); /// let mut bytes = Align([MaybeUninit::uninit(); 256]); /// let buffer = to_bytes_in::<_, Error>(&event, Buffer::from(&mut *bytes)) /// .expect("failed to serialize event"); /// let archived = unsafe { access_unchecked::>(&buffer) }; /// if let Archived::::Speak(message) = archived { /// assert_eq!(message.as_str(), "Help me!"); /// } else { /// panic!("archived event was of the wrong type"); /// } /// ``` #[derive(Debug)] pub struct Buffer<'a> { ptr: NonNull, cap: usize, len: usize, _phantom: PhantomData<&'a mut [u8]>, } impl<'a, const N: usize> From<&'a mut [u8; N]> for Buffer<'a> { fn from(bytes: &'a mut [u8; N]) -> Self { Self { ptr: NonNull::from(bytes).cast(), cap: N, len: 0, _phantom: PhantomData, } } } impl<'a> From<&'a mut [u8]> for Buffer<'a> { fn from(bytes: &'a mut [u8]) -> Self { let size = bytes.len(); Self { ptr: NonNull::from(bytes).cast(), cap: size, len: 0, _phantom: PhantomData, } } } impl<'a, const N: usize> From<&'a mut [MaybeUninit; N]> for Buffer<'a> { fn from(bytes: &'a mut [MaybeUninit; N]) -> Self { Self { ptr: NonNull::from(bytes).cast(), cap: N, len: 0, _phantom: PhantomData, } } } impl<'a> From<&'a mut [MaybeUninit]> for Buffer<'a> { fn from(bytes: &'a mut [MaybeUninit]) -> Self { let size = bytes.len(); Self { ptr: NonNull::from(bytes).cast(), cap: size, len: 0, _phantom: PhantomData, } } } impl Deref for Buffer<'_> { type Target = [u8]; fn deref(&self) -> &Self::Target { unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } } impl DerefMut for Buffer<'_> { fn deref_mut(&mut self) -> &mut Self::Target { unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } } } impl Positional for Buffer<'_> { #[inline] fn pos(&self) -> usize { self.len } } impl Writer for Buffer<'_> { fn write(&mut self, bytes: &[u8]) -> Result<(), E> { if bytes.len() > self.cap - self.len { fail!(BufferOverflow { write_len: bytes.len(), cap: self.cap, len: self.len, }); } else { unsafe { copy_nonoverlapping( bytes.as_ptr(), self.ptr.as_ptr().add(self.len), bytes.len(), ); } self.len += bytes.len(); Ok(()) } } } #[cfg(test)] mod tests { use core::mem::MaybeUninit; use rancor::Panic; use crate::{ api::serialize_using, ser::{writer::Buffer, Serializer}, }; #[test] fn zeros_padding() { use core::mem::size_of; use crate::{Archive, Serialize}; #[derive(Archive, Serialize)] #[rkyv(crate)] pub struct PaddedExample { a: u8, b: u64, } let mut bytes = [MaybeUninit::::new(0xcc); 256]; let mut serializer = Serializer::new(Buffer::from(&mut bytes), (), ()); serialize_using::<_, Panic>( &PaddedExample { a: 0u8, b: 0u64 }, &mut serializer, ) .unwrap(); let buffer = serializer.into_writer(); assert!(&buffer[0..size_of::()] .iter() .all(|&b| b == 0)); } } rkyv-0.8.9/src/ser/writer/mod.rs000064400000000000000000000132701046102023000146610ustar 00000000000000//! Writing backends for serializers. #[cfg(feature = "alloc")] mod alloc; mod core; #[cfg(feature = "std")] mod std; use ::core::mem; use rancor::{Fallible, Strategy}; pub use self::core::*; #[cfg(feature = "std")] pub use self::std::*; use crate::{Archive, ArchiveUnsized, Place, RelPtr}; /// A writer that knows its current position. pub trait Positional { /// Returns the current position of the writer. fn pos(&self) -> usize; } impl Positional for &T where T: Positional + ?Sized, { fn pos(&self) -> usize { T::pos(*self) } } impl Positional for &mut T where T: Positional + ?Sized, { fn pos(&self) -> usize { T::pos(*self) } } impl Positional for Strategy where T: Positional + ?Sized, { fn pos(&self) -> usize { T::pos(self) } } /// A type that writes bytes to some output. /// /// A type that is [`Write`](::std::io::Write) can be wrapped in an [`IoWriter`] /// to equip it with `Write`. /// /// It's important that the memory for archived objects is properly aligned /// before attempting to read objects out of it; use an /// [`AlignedVec`](crate::util::AlignedVec) or the [`Align`](crate::util::Align) /// wrapper as appropriate. pub trait Writer::Error>: Positional { /// Attempts to write the given bytes to the serializer. fn write(&mut self, bytes: &[u8]) -> Result<(), E>; } impl Writer for &mut T where T: Writer + ?Sized, { fn write(&mut self, bytes: &[u8]) -> Result<(), E> { T::write(*self, bytes) } } impl Writer for Strategy where T: Writer + ?Sized, { fn write(&mut self, bytes: &[u8]) -> Result<(), E> { T::write(self, bytes) } } /// Helper methods for [`Writer`]. pub trait WriterExt: Writer { /// Advances the given number of bytes as padding. fn pad(&mut self, padding: usize) -> Result<(), E> { const MAX_ZEROS: usize = 32; const ZEROS: [u8; MAX_ZEROS] = [0; MAX_ZEROS]; debug_assert!(padding < MAX_ZEROS); self.write(&ZEROS[0..padding]) } /// Aligns the position of the serializer to the given alignment. fn align(&mut self, align: usize) -> Result { let mask = align - 1; debug_assert_eq!(align & mask, 0); self.pad((align - (self.pos() & mask)) & mask)?; Ok(self.pos()) } /// Aligns the position of the serializer to be suitable to write the given /// type. fn align_for(&mut self) -> Result { self.align(mem::align_of::()) } /// Resolves the given value with its resolver and writes the archived type. /// /// Returns the position of the written archived type. /// /// # Safety /// /// - `resolver` must be the result of serializing `value` /// - The serializer must be aligned for a `T::Archived` unsafe fn resolve_aligned( &mut self, value: &T, resolver: T::Resolver, ) -> Result { let pos = self.pos(); debug_assert_eq!(pos & (mem::align_of::() - 1), 0); let mut resolved = mem::MaybeUninit::::uninit(); // SAFETY: `resolved` is properly aligned and valid for writes of // `size_of::()` bytes. unsafe { resolved.as_mut_ptr().write_bytes(0, 1); } // SAFETY: `resolved.as_mut_ptr()` points to a local zeroed // `MaybeUninit`, and so is properly aligned, dereferenceable, and all // of its bytes are initialized. let out = unsafe { Place::new_unchecked(pos, resolved.as_mut_ptr()) }; value.resolve(resolver, out); self.write(out.as_slice())?; Ok(pos) } /// Resolves the given reference with its resolver and writes the archived /// reference. /// /// Returns the position of the written archived `RelPtr`. /// /// # Safety /// /// The serializer must be aligned for a `RelPtr`. unsafe fn resolve_unsized_aligned( &mut self, value: &T, to: usize, ) -> Result { let from = self.pos(); debug_assert_eq!( from & (mem::align_of::>() - 1), 0 ); let mut resolved = mem::MaybeUninit::>::uninit(); // SAFETY: `resolved` is properly aligned and valid for writes of // `size_of::>()` bytes. unsafe { resolved.as_mut_ptr().write_bytes(0, 1); } // SAFETY: `resolved.as_mut_ptr()` points to a local zeroed // `MaybeUninit`, and so is properly aligned, dereferenceable, and all // of its bytes are initialized. let out = unsafe { Place::new_unchecked(from, resolved.as_mut_ptr()) }; RelPtr::emplace_unsized(to, value.archived_metadata(), out); self.write(out.as_slice())?; Ok(from) } } impl WriterExt for T where T: Writer + ?Sized {} #[cfg(test)] mod tests { #[cfg(feature = "alloc")] #[test] fn reusable_writer() { use rend::{u16_le, u32_le}; use crate::{api::high::to_bytes_in, util::AlignedVec}; let mut writer = AlignedVec::<16>::new(); _ = to_bytes_in::<_, rancor::Error>( &u32_le::from_native(42), &mut writer, ); assert_eq!(&writer[..], &[42, 0, 0, 0]); writer.clear(); // keeps capacity of 4 _ = to_bytes_in::<_, rancor::Error>( &u16_le::from_native(1337), &mut writer, ); assert_eq!(&writer[..], &[57, 5]); writer.clear(); assert_eq!(writer.capacity(), 4); } } rkyv-0.8.9/src/ser/writer/std.rs000064400000000000000000000043531046102023000146760ustar 00000000000000use std::io; use rancor::{ResultExt as _, Source}; use crate::ser::{Positional, Writer}; /// Wraps a type that implements [`io::Write`](std::io::Write) and equips it /// with [`Writer`]. /// /// # Examples /// ``` /// # use rkyv::ser::{Writer, Positional, writer::IoWriter}; /// use rkyv::rancor::{Error, Strategy}; /// let mut io_writer = IoWriter::new(Vec::new()); /// // In most cases, calling a method like `serialize` will wrap the writer in /// // a Strategy for us. /// let mut writer = Strategy::<_, Error>::wrap(&mut io_writer); /// assert_eq!(writer.pos(), 0); /// writer.write(&[0u8, 1u8, 2u8, 3u8]); /// assert_eq!(writer.pos(), 4); /// let buf = io_writer.into_inner(); /// assert_eq!(buf.len(), 4); /// assert_eq!(buf, vec![0u8, 1u8, 2u8, 3u8]); /// ``` #[derive(Debug)] pub struct IoWriter { inner: W, pos: usize, } impl IoWriter { /// Creates a new serializer from a writer. pub fn new(inner: W) -> Self { Self::with_pos(inner, 0) } /// Creates a new serializer from a writer, and assumes that the underlying /// writer is currently at the given position. pub fn with_pos(inner: W, pos: usize) -> Self { Self { inner, pos } } /// Consumes the serializer and returns the internal writer used to create /// it. pub fn into_inner(self) -> W { self.inner } } impl Positional for IoWriter { fn pos(&self) -> usize { self.pos } } impl Writer for IoWriter { fn write(&mut self, bytes: &[u8]) -> Result<(), E> { self.inner.write_all(bytes).into_error()?; self.pos += bytes.len(); Ok(()) } } #[cfg(test)] mod tests { use rancor::Failure; use crate::{ api::serialize_using, ser::writer::IoWriter, util::Align, Archive, Serialize, }; #[test] fn write_serializer() { #[derive(Archive, Serialize)] #[rkyv(crate, attr(repr(C)))] struct Example { x: i32, } let mut buf = Align([0u8; 3]); let mut ser = IoWriter::new(&mut buf[..]); let foo = Example { x: 100 }; serialize_using::<_, Failure>(&foo, &mut ser) .expect_err("serialized to an undersized buffer must fail"); } } rkyv-0.8.9/src/simd/generic.rs000064400000000000000000000046511046102023000143500ustar 00000000000000use core::mem::size_of; #[cfg(any( target_pointer_width = "64", target_arch = "aarch64", target_arch = "x86_64", target_arch = "wasm32", ))] mod detail { pub type Word = u64; pub type NonZeroWord = core::num::NonZeroU64; } #[cfg(not(any( target_pointer_width = "64", target_arch = "aarch64", target_arch = "x86_64", target_arch = "wasm32", )))] mod detail { pub type Word = u32; pub type NonZeroWord = core::num::NonZeroU32; } use detail::*; #[derive(Clone, Copy)] pub struct Bitmask(Word); impl Bitmask { pub const EMPTY: Self = Bitmask(0); #[inline] pub fn any_bit_set(self) -> bool { self.0 != 0 } #[inline] pub fn remove_lowest_bit(self) -> Self { Self(self.0 & (self.0 - 1)) } #[inline] pub fn lowest_set_bit(self) -> Option { let nonzero = NonZeroWord::new(self.0)?; Some(nonzero.trailing_zeros() as usize / 8) } } impl Iterator for Bitmask { type Item = usize; fn next(&mut self) -> Option { let bit = self.lowest_set_bit()?; *self = self.remove_lowest_bit(); Some(bit) } } #[derive(Clone, Copy)] pub struct Group(Word); impl Group { pub const WIDTH: usize = size_of::(); const fn repeat(byte: u8) -> Word { Word::from_ne_bytes([byte; Self::WIDTH]) } /// # Safety /// /// `ptr` must be valid for reads and point to enough bytes for a `Word`. #[inline] pub unsafe fn read(ptr: *const u8) -> Self { // SAFETY: The caller has guaranteed that `ptr` is valid for reads and // points to enough bytes for a `Word`. #[cfg(target_endian = "little")] unsafe { Self(core::ptr::read_unaligned(ptr.cast())) } #[cfg(target_endian = "big")] unsafe { Self(core::ptr::read_unaligned(ptr.cast::()).swap_bytes()) } } #[inline] pub fn match_byte(self, byte: u8) -> Bitmask { let zero_mask = self.0 ^ Self::repeat(byte); let bits = zero_mask.wrapping_sub(Self::repeat(0x01)) & !zero_mask & Self::repeat(0x80); Bitmask(bits) } #[inline] pub fn match_empty(self) -> Bitmask { let bits = self.0 & Self::repeat(0x80); Bitmask(bits) } #[inline] pub fn match_full(self) -> Bitmask { let bits = !self.0 & Self::repeat(0x80); Bitmask(bits) } } rkyv-0.8.9/src/simd/mod.rs000064400000000000000000000032601046102023000135060ustar 00000000000000#[cfg(all( target_feature = "sse2", any(target_arch = "x86", target_arch = "x86_64"), not(miri), ))] mod sse2; #[cfg(all( target_feature = "sse2", any(target_arch = "x86", target_arch = "x86_64"), not(miri), ))] pub use self::sse2::*; #[cfg(all( target_feature = "neon", target_arch = "aarch64", // NEON intrinsics are currently broken on big-endian targets. // See https://github.com/rust-lang/stdarch/issues/1484. target_endian = "little", not(miri), ))] mod neon; #[cfg(all( target_feature = "neon", target_arch = "aarch64", // NEON intrinsics are currently broken on big-endian targets. // See https://github.com/rust-lang/stdarch/issues/1484. target_endian = "little", not(miri), ))] pub use self::neon::*; #[cfg(all( not(all( target_feature = "sse2", any(target_arch = "x86", target_arch = "x86_64"), not(miri), )), not(all( target_feature = "neon", target_arch = "aarch64", // NEON intrinsics are currently broken on big-endian targets. // See https://github.com/rust-lang/stdarch/issues/1484. target_endian = "little", not(miri), )), ))] mod generic; #[cfg(all( not(all( target_feature = "sse2", any(target_arch = "x86", target_arch = "x86_64"), not(miri), )), not(all( target_feature = "neon", target_arch = "aarch64", // NEON intrinsics are currently broken on big-endian targets. // See https://github.com/rust-lang/stdarch/issues/1484. target_endian = "little", not(miri), )), ))] pub use self::generic::*; pub const MAX_GROUP_WIDTH: usize = 16; rkyv-0.8.9/src/simd/neon.rs000064400000000000000000000044531046102023000136730ustar 00000000000000use core::{arch::aarch64, mem::size_of, num::NonZeroU64}; type Word = aarch64::uint8x16_t; #[derive(Clone, Copy)] pub struct Bitmask(u64); impl Bitmask { pub const EMPTY: Self = Self(0); #[inline] pub fn any_bit_set(self) -> bool { self.0 != 0 } #[inline] pub fn remove_lowest_bit(self) -> Self { Self(self.0 & (self.0 - 1)) } #[inline] pub fn lowest_set_bit(self) -> Option { let nonzero = NonZeroU64::new(self.0)?; Some(nonzero.trailing_zeros() as usize / 4) } } impl Iterator for Bitmask { type Item = usize; fn next(&mut self) -> Option { let bit = self.lowest_set_bit()?; *self = self.remove_lowest_bit(); Some(bit) } } #[derive(Clone, Copy)] pub struct Group(Word); impl Group { pub const WIDTH: usize = size_of::(); /// # Safety /// /// `ptr` must be valid for reads and point to enough bytes for a `Word`. #[inline] pub unsafe fn read(ptr: *const u8) -> Self { // SAFETY: The caller has guaranteed that `ptr` is valid for reads and // points to enough bytes for a `Word`. unsafe { Self(aarch64::vld1q_u8(ptr)) } } #[inline] fn unpack(cmp: Word) -> Bitmask { // 0xFF_FF_FF_00_00_FF_00_00 => 0xFF_F0_0F_00 let nibbles = unsafe { aarch64::vshrn_n_u16(aarch64::vreinterpretq_u16_u8(cmp), 4) }; // 0xFF_F0_0F_00 => 0x88_80_08_00 let bits = unsafe { aarch64::vand_u8(nibbles, aarch64::vdup_n_u8(0x88)) }; // 0x88_80_08_00 => 0x88800800 let result = unsafe { aarch64::vget_lane_u64(aarch64::vreinterpret_u64_u8(bits), 0) }; Bitmask(result) } #[inline] pub fn match_byte(self, byte: u8) -> Bitmask { unsafe { Self::unpack(aarch64::vceqq_u8(self.0, aarch64::vdupq_n_u8(byte))) } } #[inline] pub fn match_empty(self) -> Bitmask { unsafe { Self::unpack(aarch64::vcltzq_s8(aarch64::vreinterpretq_s8_u8( self.0, ))) } } #[inline] pub fn match_full(self) -> Bitmask { unsafe { Self::unpack(aarch64::vcgezq_s8(aarch64::vreinterpretq_s8_u8( self.0, ))) } } } rkyv-0.8.9/src/simd/sse2.rs000064400000000000000000000034431046102023000136060ustar 00000000000000#[cfg(target_arch = "x86")] use core::arch::x86; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as x86; use core::{mem::size_of, num::NonZeroU16}; type Word = x86::__m128i; #[derive(Clone, Copy)] pub struct Bitmask(u16); impl Bitmask { pub const EMPTY: Self = Self(0); #[inline] pub fn any_bit_set(self) -> bool { self.0 != 0 } #[inline] pub fn remove_lowest_bit(self) -> Self { Self(self.0 & (self.0 - 1)) } #[inline] pub fn lowest_set_bit(self) -> Option { let nonzero = NonZeroU16::new(self.0)?; Some(nonzero.trailing_zeros() as usize) } } impl Iterator for Bitmask { type Item = usize; fn next(&mut self) -> Option { let bit = self.lowest_set_bit()?; *self = self.remove_lowest_bit(); Some(bit) } } #[derive(Clone, Copy)] pub struct Group(Word); impl Group { pub const WIDTH: usize = size_of::(); /// # Safety /// /// `ptr` must be valid for reads and point to enough bytes for a `Word`. #[inline] pub unsafe fn read(ptr: *const u8) -> Self { // SAFETY: The caller has guaranteed that `ptr` is valid for reads and // points to enough bytes for a `Word`. unsafe { Self(x86::_mm_loadu_si128(ptr.cast())) } } #[inline] pub fn match_byte(self, byte: u8) -> Bitmask { unsafe { let cmp = x86::_mm_cmpeq_epi8(self.0, x86::_mm_set1_epi8(byte as i8)); Bitmask(x86::_mm_movemask_epi8(cmp) as u16) } } #[inline] pub fn match_empty(self) -> Bitmask { unsafe { Bitmask(x86::_mm_movemask_epi8(self.0) as u16) } } #[inline] pub fn match_full(self) -> Bitmask { unsafe { Bitmask(!x86::_mm_movemask_epi8(self.0) as u16) } } } rkyv-0.8.9/src/string/mod.rs000064400000000000000000000167771046102023000141010ustar 00000000000000//! Archived versions of string types. pub mod repr; use core::{ borrow::Borrow, cmp, error::Error, fmt, hash, ops::{ Deref, Index, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, }, str, }; use munge::munge; use rancor::{fail, Fallible, Source}; use repr::{ArchivedStringRepr, INLINE_CAPACITY}; use crate::{ primitive::FixedUsize, seal::Seal, Place, Portable, SerializeUnsized, }; /// An archived [`String`]. /// /// This has inline and out-of-line representations. Short strings will use the /// available space inside the structure to store the string, and long strings /// will store a [`RelPtr`](crate::RelPtr) to a `str` instead. #[repr(transparent)] #[cfg_attr( feature = "bytecheck", derive(bytecheck::CheckBytes), bytecheck(verify) )] #[derive(Portable)] #[rkyv(crate)] pub struct ArchivedString { repr: ArchivedStringRepr, } impl ArchivedString { /// Extracts a string slice containing the entire `ArchivedString`. #[inline] pub fn as_str(&self) -> &str { self.repr.as_str() } /// Extracts a sealed mutable string slice containing the entire /// `ArchivedString`. #[inline] pub fn as_str_seal(this: Seal<'_, Self>) -> Seal<'_, str> { munge!(let Self { repr } = this); ArchivedStringRepr::as_str_seal(repr) } /// Resolves an archived string from a given `str`. #[inline] pub fn resolve_from_str( value: &str, resolver: StringResolver, out: Place, ) { munge!(let ArchivedString { repr } = out); if value.len() <= repr::INLINE_CAPACITY { unsafe { ArchivedStringRepr::emplace_inline(value, repr.ptr()); } } else { unsafe { ArchivedStringRepr::emplace_out_of_line( value, resolver.pos as usize, repr, ); } } } /// Serializes an archived string from a given `str`. pub fn serialize_from_str( value: &str, serializer: &mut S, ) -> Result where S::Error: Source, str: SerializeUnsized, { if value.len() <= INLINE_CAPACITY { Ok(StringResolver { pos: 0 }) } else if value.len() > repr::OUT_OF_LINE_CAPACITY { #[derive(Debug)] struct StringTooLongError; impl fmt::Display for StringTooLongError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "String was too long for the archived representation", ) } } impl Error for StringTooLongError {} fail!(StringTooLongError); } else { Ok(StringResolver { pos: value.serialize_unsized(serializer)? as FixedUsize, }) } } } impl AsRef for ArchivedString { #[inline] fn as_ref(&self) -> &str { self.as_str() } } impl Borrow for ArchivedString { #[inline] fn borrow(&self) -> &str { self.as_str() } } impl fmt::Debug for ArchivedString { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(self.as_str(), f) } } impl Deref for ArchivedString { type Target = str; #[inline] fn deref(&self) -> &Self::Target { self.as_str() } } impl fmt::Display for ArchivedString { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self.as_str(), f) } } impl Eq for ArchivedString {} impl hash::Hash for ArchivedString { fn hash(&self, state: &mut H) { self.as_str().hash(state) } } macro_rules! impl_index { ($index:ty) => { impl Index<$index> for ArchivedString { type Output = str; #[inline] fn index(&self, index: $index) -> &Self::Output { self.as_str().index(index) } } }; } impl_index!(Range); impl_index!(RangeFrom); impl_index!(RangeFull); impl_index!(RangeInclusive); impl_index!(RangeTo); impl_index!(RangeToInclusive); impl Ord for ArchivedString { #[inline] fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_str().cmp(other.as_str()) } } impl PartialEq for ArchivedString { #[inline] fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } impl PartialOrd for ArchivedString { #[inline] fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl PartialEq<&str> for ArchivedString { #[inline] fn eq(&self, other: &&str) -> bool { PartialEq::eq(self.as_str(), *other) } } impl PartialEq for ArchivedString { #[inline] fn eq(&self, other: &str) -> bool { PartialEq::eq(self.as_str(), other) } } impl PartialEq for &str { #[inline] fn eq(&self, other: &ArchivedString) -> bool { PartialEq::eq(other.as_str(), *self) } } impl PartialEq for str { #[inline] fn eq(&self, other: &ArchivedString) -> bool { PartialEq::eq(other.as_str(), self) } } impl PartialOrd<&str> for ArchivedString { #[inline] fn partial_cmp(&self, other: &&str) -> Option { self.as_str().partial_cmp(*other) } } impl PartialOrd for ArchivedString { #[inline] fn partial_cmp(&self, other: &str) -> Option { self.as_str().partial_cmp(other) } } impl PartialOrd for &str { #[inline] fn partial_cmp(&self, other: &ArchivedString) -> Option { self.partial_cmp(&other.as_str()) } } impl PartialOrd for str { #[inline] fn partial_cmp(&self, other: &ArchivedString) -> Option { self.partial_cmp(other.as_str()) } } /// The resolver for `String`. pub struct StringResolver { pos: FixedUsize, } #[cfg(feature = "bytecheck")] mod verify { use bytecheck::{ rancor::{Fallible, Source}, CheckBytes, Verify, }; use crate::{ string::{repr::ArchivedStringRepr, ArchivedString}, validation::{ArchiveContext, ArchiveContextExt}, }; unsafe impl Verify for ArchivedString where C: Fallible + ArchiveContext + ?Sized, C::Error: Source, { fn verify(&self, context: &mut C) -> Result<(), C::Error> { if self.repr.is_inline() { unsafe { str::check_bytes(self.repr.as_str_ptr(), context)?; } } else { let base = (&self.repr as *const ArchivedStringRepr).cast::(); let offset = unsafe { self.repr.out_of_line_offset() }; let metadata = self.repr.len(); let address = base.wrapping_offset(offset).cast::<()>(); let ptr = ptr_meta::from_raw_parts(address, metadata); context.in_subtree(ptr, |context| { // SAFETY: `in_subtree` has guaranteed that `ptr` is // properly aligned and points to enough bytes to represent // the pointed-to `str`. unsafe { str::check_bytes(ptr, context) } })?; } Ok(()) } } } rkyv-0.8.9/src/string/repr.rs000064400000000000000000000212541046102023000142540ustar 00000000000000//! An archived string representation that supports inlining short strings. use core::{ marker::PhantomPinned, mem, ptr::{self, copy_nonoverlapping, write_bytes}, slice, str, }; use munge::munge; use rancor::{Panic, ResultExt as _, Source}; use crate::{ primitive::{ArchivedIsize, ArchivedUsize, FixedIsize, FixedUsize}, seal::Seal, Place, Portable, }; #[derive(Clone, Copy, Portable)] #[rkyv(crate)] #[repr(C)] struct OutOfLineRepr { len: ArchivedUsize, offset: ArchivedIsize, _phantom: PhantomPinned, } /// The maximum number of bytes that can be inlined. pub const INLINE_CAPACITY: usize = mem::size_of::(); /// The maximum number of bytes that can be out-of-line. pub const OUT_OF_LINE_CAPACITY: usize = !(0b11 << (FixedUsize::BITS - 2)); #[derive(Clone, Copy, Portable)] #[rkyv(crate)] #[repr(C)] struct InlineRepr { bytes: [u8; INLINE_CAPACITY], } /// An archived string representation that can inline short strings. #[derive(Portable)] #[rkyv(crate)] #[repr(C)] pub union ArchivedStringRepr { out_of_line: OutOfLineRepr, inline: InlineRepr, } impl ArchivedStringRepr { /// Returns whether the representation is inline. #[inline] pub fn is_inline(&self) -> bool { unsafe { self.inline.bytes[0] & 0xc0 != 0x80 } } /// Returns the offset of the representation. /// /// # Safety /// /// The internal representation must be out-of-line. #[inline] pub unsafe fn out_of_line_offset(&self) -> isize { // SAFETY: The caller has guaranteed that the internal representation is // out-of-line unsafe { self.out_of_line.offset.to_native() as isize } } /// Returns a pointer to the bytes of the string. #[inline] pub fn as_ptr(&self) -> *const u8 { if self.is_inline() { unsafe { self.inline.bytes.as_ptr() } } else { unsafe { (self as *const Self) .cast::() .offset(self.out_of_line_offset()) } } } /// Returns a mutable pointer to the bytes of the string. #[inline] pub fn as_mut_ptr(this: Seal<'_, Self>) -> *mut u8 { let this = unsafe { this.unseal_unchecked() }; if this.is_inline() { unsafe { this.inline.bytes.as_mut_ptr() } } else { unsafe { (this as *mut Self) .cast::() .offset(this.out_of_line_offset()) } } } /// Returns the length of the string. #[inline] pub fn len(&self) -> usize { if self.is_inline() { unsafe { self.inline .bytes .iter() .position(|b| *b == 0xff) .unwrap_or(INLINE_CAPACITY) } } else { let len = unsafe { self.out_of_line.len.to_native() }; #[cfg(not(feature = "big_endian"))] let len = (len & 0b0011_1111) | (len & !0xff) >> 2; #[cfg(feature = "big_endian")] let len = len & (FixedUsize::MAX >> 2); len as usize } } /// Returns whether the string is empty. #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns a pointer to the string as a `str`. #[inline] pub fn as_str_ptr(&self) -> *const str { ptr_meta::from_raw_parts(self.as_ptr().cast(), self.len()) } /// Returns a slice of the bytes of the string. #[inline] pub fn as_bytes(&self) -> &[u8] { unsafe { slice::from_raw_parts(self.as_ptr(), self.len()) } } /// Returns a mutable slice of the bytes of the string. #[inline] pub fn as_bytes_seal(this: Seal<'_, Self>) -> Seal<'_, [u8]> { let len = this.len(); let slice = unsafe { slice::from_raw_parts_mut(Self::as_mut_ptr(this), len) }; Seal::new(slice) } /// Returns a reference to the string as a `str`. #[inline] pub fn as_str(&self) -> &str { unsafe { str::from_utf8_unchecked(self.as_bytes()) } } /// Returns a mutable reference to the string as a `str`. #[inline] pub fn as_str_seal(this: Seal<'_, Self>) -> Seal<'_, str> { let bytes = unsafe { Seal::unseal_unchecked(Self::as_bytes_seal(this)) }; Seal::new(unsafe { str::from_utf8_unchecked_mut(bytes) }) } /// Emplaces a new inline representation for the given `str`. /// /// This function is guaranteed not to write any uninitialized bytes to /// `out`. /// /// # Safety /// /// - The length of `value` must be less than or equal to /// [`INLINE_CAPACITY`]. /// - `out` must point to a valid location to write the inline /// representation. #[inline] pub unsafe fn emplace_inline(value: &str, out: *mut Self) { debug_assert!(value.len() <= INLINE_CAPACITY); // SAFETY: The caller has guaranteed that `out` points to a // dereferenceable location. let out_bytes = unsafe { ptr::addr_of_mut!((*out).inline.bytes) }; // SAFETY: The caller has guaranteed that the length of `value` is less // than or equal to `INLINE_CAPACITY`. We know that `out_bytes` is a // valid pointer to bytes because it is a subfield of `out` which the // caller has guaranteed points to a valid location. unsafe { write_bytes(out_bytes, 0xff, 1); copy_nonoverlapping( value.as_bytes().as_ptr(), out_bytes.cast(), value.len(), ); } } /// Emplaces a new out-of-line representation for the given `str`. /// /// # Safety /// /// The length of `str` must be greater than [`INLINE_CAPACITY`] and less /// than or equal to [`OUT_OF_LINE_CAPACITY`]. pub unsafe fn try_emplace_out_of_line( value: &str, target: usize, out: Place, ) -> Result<(), E> { munge! { let ArchivedStringRepr { out_of_line: OutOfLineRepr { len, offset, _phantom: _ } } = out; } let l = value.len() as FixedUsize; #[cfg(not(feature = "big_endian"))] let l = (l & 0x3f) | 0b1000_0000 | (l & !0b0011_1111) << 2; #[cfg(feature = "big_endian")] let l = l & (FixedUsize::MAX >> 2) | (1 << FixedUsize::BITS - 1); len.write(ArchivedUsize::from_native(l)); let off = crate::rel_ptr::signed_offset(out.pos(), target)?; offset.write(ArchivedIsize::from_native(off as FixedIsize)); Ok(()) } /// Emplaces a new out-of-line representation for the given `str`. /// /// # Panics /// /// - The offset calculated for the repr does not fit in an `isize` /// - The offset calculated for the repr exceeds the offset storage /// /// # Safety /// /// The length of `str` must be greater than [`INLINE_CAPACITY`] and less /// than or equal to [`OUT_OF_LINE_CAPACITY`]. #[inline] pub unsafe fn emplace_out_of_line( value: &str, target: usize, out: Place, ) { // SAFETY: The safety conditions for `emplace_out_of_line()` are the // same as the safety conditions for `try_emplace_out_of_line()`. unsafe { Self::try_emplace_out_of_line::(value, target, out) .always_ok() } } } #[cfg(feature = "bytecheck")] const _: () = { use core::{error::Error, fmt}; use bytecheck::{rancor::Fallible, CheckBytes}; use rancor::fail; /// An error resulting from an invalid string representation. /// /// Strings that are inline must have a length of at most /// [`INLINE_CAPACITY`]. #[derive(Debug)] pub struct CheckStringReprError; impl fmt::Display for CheckStringReprError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "String representation was out-of-line but the length was too \ short", ) } } impl Error for CheckStringReprError {} unsafe impl CheckBytes for ArchivedStringRepr where C: Fallible + ?Sized, C::Error: Source, { unsafe fn check_bytes( value: *const Self, _: &mut C, ) -> Result<(), C::Error> { // SAFETY: The fields of `ArchivedStringRepr` are always valid for // every bit pattern. let repr = unsafe { &*value }; if !repr.is_inline() && repr.len() <= INLINE_CAPACITY { fail!(CheckStringReprError); } else { Ok(()) } } } }; rkyv-0.8.9/src/time.rs000064400000000000000000000132261046102023000127340ustar 00000000000000//! Archived versions of `time` types. use crate::{ primitive::{ArchivedU32, ArchivedU64}, Portable, }; /// An archived [`Duration`](core::time::Duration). #[derive( Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd, Portable, )] #[cfg_attr( feature = "bytecheck", derive(bytecheck::CheckBytes), bytecheck(verify) )] #[rkyv(crate)] #[repr(C)] pub struct ArchivedDuration { secs: ArchivedU64, nanos: ArchivedU32, } const NANOS_PER_SEC: u32 = 1_000_000_000; const NANOS_PER_MILLI: u32 = 1_000_000; const NANOS_PER_MICRO: u32 = 1_000; const MILLIS_PER_SEC: u64 = 1_000; const MICROS_PER_SEC: u64 = 1_000_000; impl ArchivedDuration { /// Returns the number of _whole_ seconds contained by this /// `ArchivedDuration`. /// /// The returned value does not include the fractional (nanosecond) part of /// the duration, which can be obtained using [`subsec_nanos`]. /// /// [`subsec_nanos`]: ArchivedDuration::subsec_nanos #[inline] pub const fn as_secs(&self) -> u64 { self.secs.to_native() } /// Returns the fractional part of this `ArchivedDuration`, in whole /// milliseconds. /// /// This method does **not** return the length of the duration when /// represented by milliseconds. The returned number always represents a /// fractional portion of a second (i.e., it is less than one thousand). #[inline] pub const fn subsec_millis(&self) -> u32 { self.nanos.to_native() / NANOS_PER_MILLI } /// Returns the fractional part of this `ArchivedDuration`, in whole /// microseconds. /// /// This method does **not** return the length of the duration when /// represented by microseconds. The returned number always represents a /// fractional portion of a second (i.e., it is less than one million). #[inline] pub const fn subsec_micros(&self) -> u32 { self.nanos.to_native() / NANOS_PER_MICRO } /// Returns the fractional part of this `Duration`, in nanoseconds. /// /// This method does **not** return the length of the duration when /// represented by nanoseconds. The returned number always represents a /// fractional portion of a second (i.e., it is less than one billion). #[inline] pub const fn subsec_nanos(&self) -> u32 { self.nanos.to_native() } /// Returns the total number of whole milliseconds contained by this /// `ArchivedDuration`. #[inline] pub const fn as_millis(&self) -> u128 { self.as_secs() as u128 * MILLIS_PER_SEC as u128 + (self.subsec_nanos() / NANOS_PER_MILLI) as u128 } /// Returns the total number of whole microseconds contained by this /// `ArchivedDuration`. #[inline] pub const fn as_micros(&self) -> u128 { self.as_secs() as u128 * MICROS_PER_SEC as u128 + (self.subsec_nanos() / NANOS_PER_MICRO) as u128 } /// Returns the total number of nanoseconds contained by this /// `ArchivedDuration`. #[inline] pub const fn as_nanos(&self) -> u128 { self.as_secs() as u128 * NANOS_PER_SEC as u128 + self.subsec_nanos() as u128 } /// Returns the number of seconds contained by this `ArchivedDuration` as /// `f64`. /// /// The returned value does include the fractional (nanosecond) part of the /// duration. #[inline] pub fn as_secs_f64(&self) -> f64 { (self.as_secs() as f64) + (self.subsec_nanos() as f64) / (NANOS_PER_SEC as f64) } /// Returns the number of seconds contained by this `ArchivedDuration` as /// `f32`. /// /// The returned value does include the fractional (nanosecond) part of the /// duration. #[inline] pub fn as_secs_f32(&self) -> f32 { (self.as_secs() as f32) + (self.subsec_nanos() as f32) / (NANOS_PER_SEC as f32) } /// Constructs an archived duration at the given position. /// /// This function is guaranteed not to write any uninitialized bytes to /// `out`. /// /// # Safety /// /// `out` must point to memory suitable for holding an `ArchivedDuration`. #[inline] pub unsafe fn emplace(secs: u64, nanos: u32, out: *mut ArchivedDuration) { use core::ptr::addr_of_mut; let out_secs = unsafe { addr_of_mut!((*out).secs) }; unsafe { out_secs.write(ArchivedU64::from_native(secs)); } let out_nanos = unsafe { addr_of_mut!((*out).nanos) }; unsafe { out_nanos.write(ArchivedU32::from_native(nanos)); } } } #[cfg(feature = "bytecheck")] mod verify { use core::{error::Error, fmt}; use bytecheck::{ rancor::{Fallible, Source}, Verify, }; use rancor::fail; use super::ArchivedDuration; /// An error resulting from an invalid duration. /// /// Durations must have a `nanos` field that is less than one billion. #[derive(Debug)] pub struct DurationError { nanos: u32, } impl fmt::Display for DurationError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "`nanos` field of `Duration` is greater than 1 billion: {}", self.nanos, ) } } impl Error for DurationError {} unsafe impl Verify for ArchivedDuration where C: Fallible + ?Sized, C::Error: Source, { fn verify(&self, _: &mut C) -> Result<(), C::Error> { let nanos = self.nanos.to_native(); if nanos >= 1_000_000_000 { fail!(DurationError { nanos }); } else { Ok(()) } } } } rkyv-0.8.9/src/traits.rs000064400000000000000000000442511046102023000133060ustar 00000000000000//! The core traits provided by rkyv. use core::{ alloc::{Layout, LayoutError}, hash::Hash, marker::PhantomData, }; pub use ::rkyv_derive::{Archive, Deserialize, Portable, Serialize}; use crate::{ptr_meta::Pointee, rancor::Fallible, ArchivedMetadata, Place}; /// A type with a stable, well-defined layout that is the same on all targets. /// /// # Safety /// /// The implementing type must have a stable, well-defined layout that is the /// same on all targets. Structs and unions must be `#[repr(transparent)]` or /// `#[repr(C)]`. Enums must be `#[repr(C)]`, `#[repr(int)]`, or `#[repr(C, /// int)]`. /// /// The implementing type must not have interior mutability (i.e. no /// `UnsafeCell`s). pub unsafe trait Portable {} /// A type with no undefined bytes. /// /// # Safety /// /// The bytes of types implementing `NoUndef` must always be well-defined. Among /// other things, this means that `NoUndef` types may not contain padding or /// uninitialized `MaybeUninit`s. pub unsafe trait NoUndef {} // SAFETY: An array of values which are all fully-initialized is also // fully-initalized. unsafe impl NoUndef for [T; N] {} /// Returns the layout of a type from its metadata. pub trait LayoutRaw where Self: Pointee, { /// Returns the layout of the type. fn layout_raw( metadata: ::Metadata, ) -> Result; } /// An optimization hint about whether `T` is trivially copyable. pub struct CopyOptimization(bool, PhantomData); impl CopyOptimization { /// Returns a `CopyOptimization` hint with the optimization enabled for `T`. /// /// # Safety /// /// `T` must not have any uninit bytes (e.g. padding). pub const unsafe fn enable() -> Self { Self(true, PhantomData) } /// Returns a `CopyOptimization` hint with the optimization enabled for `T` /// if `value` is `true`. /// /// # Safety /// /// `T` must not have any uninit bytes (e.g. padding) if `value` is `true`. pub const unsafe fn enable_if(value: bool) -> Self { Self(value, PhantomData) } /// Returns a `CopyOptimization` hint with the optimization disabled for /// `T`. pub const fn disable() -> Self { Self(false, PhantomData) } /// Returns whether the optimization is enabled for `T`. pub const fn is_enabled(&self) -> bool { self.0 } } /// A type that can be used without deserializing. /// /// `Archive` is one of three basic traits used to work with zero-copy data and /// controls the layout of the data in its archived zero-copy representation. /// The [`Serialize`] trait helps transform types into that representation, and /// the [`Deserialize`] trait helps transform types back out. /// /// Types that implement `Archive` must have a well-defined archived size. /// Unsized types can be supported using the [`ArchiveUnsized`] trait, along /// with [`SerializeUnsized`] and [`DeserializeUnsized`]. /// /// Archiving is done depth-first, writing any data owned by a type before /// writing the data for the type itself. The type must be able to create the /// archived type from only its own data and its resolver. /// /// Archived data is always treated as if it is tree-shaped, with the root /// owning its direct descendents and so on. Data that is not tree-shaped can be /// supported using special serializer and deserializer bounds (see /// [`ArchivedRc`](crate::rc::ArchivedRc) for example). In a buffer of /// serialized data, objects are laid out in *reverse order*. This means that /// the root object is located near the end of the buffer and leaf objects are /// located near the beginning. /// /// # Examples /// /// Most of the time, `#[derive(Archive)]` will create an acceptable /// implementation. You can use the `#[rkyv(...)]` attribute to control how the /// implementation is generated. See the [`Archive`](macro@crate::Archive) /// derive macro for more details. #[doc = concat!("```\n", include_str!("../examples/readme.rs"), "```\n")] /// _Note: the safe API requires the `bytecheck` feature._ /// /// Many of the core and standard library types already have `Archive` /// implementations available, but you may need to implement `Archive` for your /// own types in some cases the derive macro cannot handle. /// /// In this example, we add our own wrapper that serializes a `&'static str` as /// if it's owned. Normally you can lean on the archived version of `String` to /// do most of the work, or use the [`Inline`](crate::with::Inline) to do /// exactly this. This example does everything to demonstrate how to implement /// `Archive` for your own types. /// ``` /// use core::{slice, str}; /// /// use rkyv::{ /// access_unchecked, /// rancor::{Error, Fallible}, /// ser::Writer, /// to_bytes, /// Archive, ArchiveUnsized, Archived, Portable, RelPtr, Serialize, /// SerializeUnsized, munge::munge, Place, /// }; /// /// struct OwnedStr { /// inner: &'static str, /// } /// /// #[derive(Portable)] /// #[repr(transparent)] /// struct ArchivedOwnedStr { /// // This will be a relative pointer to our string /// ptr: RelPtr, /// } /// /// impl ArchivedOwnedStr { /// // This will help us get the bytes of our type as a str again. /// fn as_str(&self) -> &str { /// unsafe { /// // The as_ptr() function of RelPtr will get a pointer the str /// &*self.ptr.as_ptr() /// } /// } /// } /// /// struct OwnedStrResolver { /// // This will be the position that the bytes of our string are stored at. /// // We'll use this to resolve the relative pointer of our /// // ArchivedOwnedStr. /// pos: usize, /// } /// /// // The Archive implementation defines the archived version of our type and /// // determines how to turn the resolver into the archived form. The Serialize /// // implementations determine how to make a resolver from the original value. /// impl Archive for OwnedStr { /// type Archived = ArchivedOwnedStr; /// // This is the resolver we can create our Archived version from. /// type Resolver = OwnedStrResolver; /// /// // The resolve function consumes the resolver and produces the archived /// // value at the given position. /// fn resolve( /// &self, /// resolver: Self::Resolver, /// out: Place, /// ) { /// munge!(let ArchivedOwnedStr { ptr } = out); /// RelPtr::emplace_unsized( /// resolver.pos, /// self.inner.archived_metadata(), /// ptr, /// ); /// } /// } /// /// // We restrict our serializer types with Writer because we need its /// // capabilities to serialize the inner string. For other types, we might /// // need more or less restrictive bounds on the type of S. /// impl Serialize for OwnedStr { /// fn serialize( /// &self, /// serializer: &mut S, /// ) -> Result { /// // This is where we want to write the bytes of our string and return /// // a resolver that knows where those bytes were written. /// // We also need to serialize the metadata for our str. /// Ok(OwnedStrResolver { /// pos: self.inner.serialize_unsized(serializer)?, /// }) /// } /// } /// /// const STR_VAL: &'static str = "I'm in an OwnedStr!"; /// let value = OwnedStr { inner: STR_VAL }; /// // It works! /// let buf = to_bytes::(&value).expect("failed to serialize"); /// let archived = /// unsafe { access_unchecked::(buf.as_ref()) }; /// // Let's make sure our data got written correctly /// assert_eq!(archived.as_str(), STR_VAL); /// ``` pub trait Archive { /// An optimization flag that allows the bytes of this type to be copied /// directly to a writer instead of calling `serialize`. /// /// This optimization is disabled by default. To enable this optimization, /// you must unsafely attest that `Self` is trivially copyable using /// [`CopyOptimization::enable`] or [`CopyOptimization::enable_if`]. const COPY_OPTIMIZATION: CopyOptimization = CopyOptimization::disable(); /// The archived representation of this type. /// /// In this form, the data can be used with zero-copy deserialization. type Archived: Portable; /// The resolver for this type. It must contain all the additional /// information from serializing needed to make the archived type from /// the normal type. type Resolver; /// Creates the archived version of this value at the given position and /// writes it to the given output. /// /// The output should be initialized field-by-field rather than by writing a /// whole struct. Performing a typed copy will mark all of the padding /// bytes as uninitialized, but they must remain set to the value they /// currently have. This prevents leaking uninitialized memory to /// the final archive. fn resolve(&self, resolver: Self::Resolver, out: Place); } /// Converts a type to its archived form. /// /// Objects perform any supportive serialization during /// [`serialize`](Serialize::serialize). For types that reference nonlocal /// (pointed-to) data, this is when that data must be serialized to the output. /// These types will need to bound `S` to implement /// [`Writer`](crate::ser::Writer) and any other required traits (e.g. /// [`Sharing`](crate::ser::Sharing)). They should then serialize their /// dependencies during `serialize`. /// /// See [`Archive`] for examples of implementing `Serialize`. pub trait Serialize: Archive { /// Writes the dependencies for the object and returns a resolver that can /// create the archived type. fn serialize(&self, serializer: &mut S) -> Result; } /// Converts a type back from its archived form. /// /// Some types may require specific deserializer capabilities, such as `Rc` and /// `Arc`. In these cases, the deserializer type `D` should be bound so that it /// implements traits that provide those capabilities (e.g. /// [`Pooling`](crate::de::Pooling)). /// /// This can be derived with [`Deserialize`](macro@crate::Deserialize). pub trait Deserialize { /// Deserializes using the given deserializer fn deserialize(&self, deserializer: &mut D) -> Result; } /// A counterpart of [`Archive`] that's suitable for unsized types. /// /// Unlike `Archive`, types that implement `ArchiveUnsized` must be serialized /// separately from their owning object. For example, whereas an `i32` might be /// laid out as part of a larger struct, a `Box` would serialize the `i32` /// somewhere in the archive and the `Box` would point to it as part of the /// larger struct. Because of this, the equivalent /// [`Resolver`](Archive::Resolver) type for `ArchiveUnsized` is always a /// `usize` representing the position of the serialized value. /// /// `ArchiveUnsized` is automatically implemented for all types that implement /// [`Archive`]. Nothing special needs to be done to use them with types like /// `Box`, `Rc`, and `Arc`. It is also already implemented for slices and string /// slices, and the `rkyv_dyn` crate can be used to archive trait objects. Other /// unsized types must manually implement `ArchiveUnsized`. /// /// # Examples /// /// This example shows how to manually implement `ArchiveUnsized` for an unsized /// type. Special care must be taken to ensure that the types are laid out /// correctly. /// /// ``` /// use core::ops::{Deref, DerefMut}; /// /// use ptr_meta::Pointee; /// use rkyv::{ /// access_unchecked, /// primitive::ArchivedUsize, /// rancor::{Error, Fallible}, /// ser::{Positional, Writer, WriterExt as _}, /// to_bytes, /// traits::ArchivePointee, /// Archive, ArchiveUnsized, Archived, ArchivedMetadata, Portable, RelPtr, /// Serialize, SerializeUnsized, /// }; /// /// // We're going to be dealing mostly with blocks that have a trailing slice /// #[derive(Portable)] /// #[repr(C)] /// pub struct Block { /// head: H, /// tail: T, /// } /// /// unsafe impl Pointee for Block { /// type Metadata = <[T] as Pointee>::Metadata; /// } /// /// // ArchivePointee is automatically derived for sized types because pointers /// // to sized types don't need to store any extra information. Because we're /// // making an unsized block, we need to define what metadata gets stored with /// // our data pointer. /// impl ArchivePointee for Block { /// // This is the extra data that needs to get stored for blocks with /// // trailing slices /// type ArchivedMetadata = <[T] as ArchivePointee>::ArchivedMetadata; /// /// // We need to be able to turn our archived metadata into regular /// // metadata for our type /// fn pointer_metadata( /// metadata: &Self::ArchivedMetadata, /// ) -> ::Metadata { /// metadata.to_native() as usize /// } /// } /// /// // We're implementing ArchiveUnsized for just Block. We can still /// // implement Archive for blocks with sized tails and they won't conflict. /// impl ArchiveUnsized for Block { /// // We'll reuse our block type as our archived type. /// type Archived = Block, [Archived]>; /// /// // Here's where we make the metadata for our archived type. /// fn archived_metadata(&self) -> ArchivedMetadata { /// // Because the metadata for our `ArchivedBlock` is the metadata of /// // the trailing slice, we just need to return that archived /// // metadata. /// self.tail.archived_metadata() /// } /// } /// /// // The bounds we use on our serializer type indicate that we need basic /// // serializer capabilities, and then whatever capabilities our head and tail /// // types need to serialize themselves. /// impl SerializeUnsized for Block /// where /// H: Serialize, /// T: Serialize, /// S: Fallible + Writer + ?Sized, /// { /// // This is where we construct our unsized type in the serializer /// fn serialize_unsized( /// &self, /// serializer: &mut S, /// ) -> Result { /// // First, we serialize the head and all the tails. This will make /// // sure that when we finally build our block, we don't accidentally /// // mess up the structure with serialized dependencies. /// let head_resolver = self.head.serialize(serializer)?; /// let mut resolvers = Vec::new(); /// for tail in self.tail.iter() { /// resolvers.push(tail.serialize(serializer)?); /// } /// // Now we align our serializer for our archived type and resolve it. /// // We can't align for unsized types so we treat the trailing slice /// // like an array of 0 length for now. /// let result = serializer /// .align_for::, [Archived; 0]>>()?; /// unsafe { /// serializer.resolve_aligned(&self.head, head_resolver)?; /// } /// serializer.align_for::>()?; /// for (item, resolver) in self.tail.iter().zip(resolvers.drain(..)) { /// unsafe { /// serializer.resolve_aligned(item, resolver)?; /// } /// } /// Ok(result) /// } /// } /// /// let value = Box::new(Block { /// head: "Numbers 1-4".to_string(), /// tail: [1, 2, 3, 4], /// }); /// /// // We have a Box> but we want to it to be a /// // Box>, so we need manually "unsize" the pointer. /// let ptr = Box::into_raw(value); /// let unsized_ptr = ptr_meta::from_raw_parts_mut::>( /// ptr.cast::<()>(), /// 4, /// ); /// let unsized_value = unsafe { Box::from_raw(unsized_ptr) }; /// /// let bytes = to_bytes::(&unsized_value).unwrap(); /// /// let archived = unsafe { /// access_unchecked::>>>(&bytes) /// }; /// assert_eq!(archived.head, "Numbers 1-4"); /// assert_eq!(archived.tail.len(), 4); /// assert_eq!(archived.tail, [1, 2, 3, 4]); /// ``` pub trait ArchiveUnsized: Pointee { /// The archived counterpart of this type. Unlike `Archive`, it may be /// unsized. /// /// This type must implement [`ArchivePointee`], a trait that helps make /// valid pointers using archived pointer metadata. type Archived: ArchivePointee + Portable + ?Sized; /// Creates the archived version of the metadata for this value. fn archived_metadata(&self) -> ArchivedMetadata; } /// An archived type with associated metadata for its relative pointer. /// /// This is mostly used in the context of smart pointers and unsized types, and /// is implemented for all sized types by default. pub trait ArchivePointee: Pointee { /// The archived version of the pointer metadata for this type. type ArchivedMetadata: Copy + Send + Sync + Ord + Hash + Unpin + Portable + NoUndef + Default; /// Converts some archived metadata to the pointer metadata for itself. fn pointer_metadata( archived: &Self::ArchivedMetadata, ) -> ::Metadata; } /// A counterpart of [`Serialize`] that's suitable for unsized types. /// /// See [`ArchiveUnsized`] for examples of implementing `SerializeUnsized`. pub trait SerializeUnsized: ArchiveUnsized { /// Writes the object and returns the position of the archived type. fn serialize_unsized(&self, serializer: &mut S) -> Result; } /// A counterpart of [`Deserialize`] that's suitable for unsized types. pub trait DeserializeUnsized: ArchivePointee { /// Deserializes a reference to the given value. /// /// # Safety /// /// `out` must be non-null, properly-aligned, and valid for writes. It must /// be allocated according to the layout of the deserialized metadata. unsafe fn deserialize_unsized( &self, deserializer: &mut D, out: *mut T, ) -> Result<(), D::Error>; /// Deserializes the metadata for the given type. fn deserialize_metadata(&self) -> T::Metadata; } rkyv-0.8.9/src/tuple.rs000064400000000000000000000052711046102023000131300ustar 00000000000000//! Archived versions of tuple types. use crate::Portable; macro_rules! impl_tuple { ($name:ident $n:tt, $($t:ident $u:ident $index:tt),* $(,)?) => { #[doc = concat!("An archived tuple with ", stringify!($n), " elements")] #[derive( Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd, Portable, )] #[cfg_attr(feature = "bytecheck", derive(bytecheck::CheckBytes))] #[rkyv(crate)] #[repr(C)] pub struct $name<$($t),*>($(pub $t),*); impl<$($t,)* $($u),*> PartialEq<($($u,)*)> for $name<$($t),*> where $($t: PartialEq<$u>,)* { fn eq(&self, other: &($($u,)*)) -> bool { $(self.$index == other.$index)&&* } } }; } impl_tuple!(ArchivedTuple1 1, T0 U0 0); impl_tuple!(ArchivedTuple2 2, T0 U0 0, T1 U1 1); impl_tuple!(ArchivedTuple3 3, T0 U0 0, T1 U1 1, T2 U2 2); impl_tuple!(ArchivedTuple4 4, T0 U0 0, T1 U1 1, T2 U2 2, T3 U3 3); impl_tuple!(ArchivedTuple5 5, T0 U0 0, T1 U1 1, T2 U2 2, T3 U3 3, T4 U4 4); impl_tuple!( ArchivedTuple6 6, T0 U0 0, T1 U1 1, T2 U2 2, T3 U3 3, T4 U4 4, T5 U5 5 ); impl_tuple!( ArchivedTuple7 7, T0 U0 0, T1 U1 1, T2 U2 2, T3 U3 3, T4 U4 4, T5 U5 5, T6 U6 6, ); impl_tuple!( ArchivedTuple8 8, T0 U0 0, T1 U1 1, T2 U2 2, T3 U3 3, T4 U4 4, T5 U5 5, T6 U6 6, T7 U7 7, ); impl_tuple!( ArchivedTuple9 9, T0 U0 0, T1 U1 1, T2 U2 2, T3 U3 3, T4 U4 4, T5 U5 5, T6 U6 6, T7 U7 7, T8 U8 8, ); impl_tuple!( ArchivedTuple10 10, T0 U0 0, T1 U1 1, T2 U2 2, T3 U3 3, T4 U4 4, T5 U5 5, T6 U6 6, T7 U7 7, T8 U8 8, T9 U9 9 ); impl_tuple!( ArchivedTuple11 11, T0 U0 0, T1 U1 1, T2 U2 2, T3 U3 3, T4 U4 4, T5 U5 5, T6 U6 6, T7 U7 7, T8 U8 8, T9 U9 9, T10 U10 10, ); impl_tuple!( ArchivedTuple12 12, T0 U0 0, T1 U1 1, T2 U2 2, T3 U3 3, T4 U4 4, T5 U5 5, T6 U6 6, T7 U7 7, T8 U8 8, T9 U9 9, T10 U10 10, T11 U11 11, ); impl_tuple!( ArchivedTuple13 13, T0 U0 0, T1 U1 1, T2 U2 2, T3 U3 3, T4 U4 4, T5 U5 5, T6 U6 6, T7 U7 7, T8 U8 8, T9 U9 9, T10 U10 10, T11 U11 11, T12 U12 12, ); #[cfg(test)] mod tests { use crate::tuple::ArchivedTuple3; #[test] fn partial_eq() { assert_eq!(ArchivedTuple3(1, 2, 3), (1, 2, 3)); } } rkyv-0.8.9/src/util/alloc/aligned_vec.rs000064400000000000000000001004411046102023000163010ustar 00000000000000use core::{ alloc::Layout, borrow::{Borrow, BorrowMut}, fmt, ops::{Deref, DerefMut, Index, IndexMut}, ptr::NonNull, slice, }; use rancor::Fallible; use crate::{ alloc::{ alloc::{alloc, dealloc, handle_alloc_error, realloc}, boxed::Box, vec::Vec, }, ser::{Allocator, Writer}, vec::{ArchivedVec, VecResolver}, with::{ArchiveWith, AsVec, DeserializeWith, SerializeWith}, Place, }; /// A vector of bytes that aligns its memory to the specified alignment. /// /// ``` /// # use rkyv::util::AlignedVec; /// let bytes = AlignedVec::<4096>::with_capacity(1); /// assert_eq!(bytes.as_ptr() as usize % 4096, 0); /// ``` pub struct AlignedVec { ptr: NonNull, cap: usize, len: usize, } impl Drop for AlignedVec { fn drop(&mut self) { if self.cap != 0 { unsafe { dealloc(self.ptr.as_ptr(), self.layout()); } } } } impl AlignedVec { /// The alignment of the vector pub const ALIGNMENT: usize = ALIGNMENT; /// Maximum capacity of the vector. /// /// Dictated by the requirements of [`Layout`]. "`size`, when rounded up to /// the nearest multiple of `align`, must not overflow `isize` (i.e. the /// rounded value must be less than or equal to `isize::MAX`)". pub const MAX_CAPACITY: usize = isize::MAX as usize - (Self::ALIGNMENT - 1); /// Constructs a new, empty `AlignedVec`. /// /// The vector will not allocate until elements are pushed into it. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let mut vec = AlignedVec::<16>::new(); /// ``` pub fn new() -> Self { Self::with_capacity(0) } /// Constructs a new, empty `AlignedVec` with the specified capacity. /// /// The vector will be able to hold exactly `capacity` bytes without /// reallocating. If `capacity` is 0, the vector will not allocate. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let mut vec = AlignedVec::<16>::with_capacity(10); /// /// // The vector contains no items, even though it has capacity for more /// assert_eq!(vec.len(), 0); /// assert_eq!(vec.capacity(), 10); /// /// // These are all done without reallocating... /// for i in 0..10 { /// vec.push(i); /// } /// assert_eq!(vec.len(), 10); /// assert_eq!(vec.capacity(), 10); /// /// // ...but this may make the vector reallocate /// vec.push(11); /// assert_eq!(vec.len(), 11); /// assert!(vec.capacity() >= 11); /// ``` pub fn with_capacity(capacity: usize) -> Self { assert!(ALIGNMENT > 0, "ALIGNMENT must be 1 or more"); assert!( ALIGNMENT.is_power_of_two(), "ALIGNMENT must be a power of 2" ); // As `ALIGNMENT` has to be a power of 2, this caps `ALIGNMENT` at a max // of `(isize::MAX + 1) / 2` (1 GiB on 32-bit systems). assert!( ALIGNMENT < isize::MAX as usize, "ALIGNMENT must be less than isize::MAX" ); if capacity == 0 { Self { ptr: NonNull::dangling(), cap: 0, len: 0, } } else { assert!( capacity <= Self::MAX_CAPACITY, "`capacity` cannot exceed `Self::MAX_CAPACITY`" ); let ptr = unsafe { let layout = Layout::from_size_align_unchecked( capacity, Self::ALIGNMENT, ); let ptr = alloc(layout); if ptr.is_null() { handle_alloc_error(layout); } NonNull::new_unchecked(ptr) }; Self { ptr, cap: capacity, len: 0, } } } fn layout(&self) -> Layout { unsafe { Layout::from_size_align_unchecked(self.cap, Self::ALIGNMENT) } } /// Clears the vector, removing all values. /// /// Note that this method has no effect on the allocated capacity of the /// vector. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let mut v = AlignedVec::<16>::new(); /// v.extend_from_slice(&[1, 2, 3, 4]); /// /// v.clear(); /// /// assert!(v.is_empty()); /// ``` pub fn clear(&mut self) { self.len = 0; } /// Change capacity of vector. /// /// Will set capacity to exactly `new_cap`. /// Can be used to either grow or shrink capacity. /// Backing memory will be reallocated. /// /// Usually the safe methods `reserve` or `reserve_exact` are a better /// choice. This method only exists as a micro-optimization for very /// performance-sensitive code where where the calculation of capacity /// required has already been performed, and you want to avoid doing it /// again, or if you want to implement a different growth strategy. /// /// # Safety /// /// - `new_cap` must be less than or equal to /// [`MAX_CAPACITY`](AlignedVec::MAX_CAPACITY) /// - `new_cap` must be greater than or equal to [`len()`](AlignedVec::len) pub unsafe fn change_capacity(&mut self, new_cap: usize) { debug_assert!(new_cap <= Self::MAX_CAPACITY); debug_assert!(new_cap >= self.len); if new_cap > 0 { let new_ptr = if self.cap > 0 { // SAFETY: // - `self.ptr` is currently allocated because `self.cap` is // greater than zero. // - `self.layout()` always matches the layout used to allocate // the current block of memory. // - We checked that `new_cap` is greater than zero. let new_ptr = unsafe { realloc(self.ptr.as_ptr(), self.layout(), new_cap) }; if new_ptr.is_null() { // SAFETY: // - `ALIGNMENT` is always guaranteed to be a nonzero power // of two. // - We checked that `new_cap` doesn't overflow `isize` when // rounded up to the nearest power of two. let layout = unsafe { Layout::from_size_align_unchecked( new_cap, Self::ALIGNMENT, ) }; handle_alloc_error(layout); } new_ptr } else { // SAFETY: // - `ALIGNMENT` is always guaranteed to be a nonzero power of // two. // - We checked that `new_cap` doesn't overflow `isize` when // rounded up to the nearest power of two. let layout = unsafe { Layout::from_size_align_unchecked(new_cap, Self::ALIGNMENT) }; // SAFETY: We checked that `new_cap` has non-zero size. let new_ptr = unsafe { alloc(layout) }; if new_ptr.is_null() { handle_alloc_error(layout); } new_ptr }; // SAFETY: We checked that `new_ptr` is non-null in each of the // branches. self.ptr = unsafe { NonNull::new_unchecked(new_ptr) }; self.cap = new_cap; } else if self.cap > 0 { // SAFETY: Because the capacity is nonzero, `self.ptr` points to a // currently-allocated memory block. All memory blocks are allocated // with a layout of `self.layout()`. unsafe { dealloc(self.ptr.as_ptr(), self.layout()); } self.ptr = NonNull::dangling(); self.cap = 0; } } /// Shrinks the capacity of the vector as much as possible. /// /// It will drop down as close as possible to the length but the allocator /// may still inform the vector that there is space for a few more /// elements. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let mut vec = AlignedVec::<16>::with_capacity(10); /// vec.extend_from_slice(&[1, 2, 3]); /// assert_eq!(vec.capacity(), 10); /// vec.shrink_to_fit(); /// assert!(vec.capacity() >= 3); /// /// vec.clear(); /// vec.shrink_to_fit(); /// assert!(vec.capacity() == 0); /// ``` pub fn shrink_to_fit(&mut self) { if self.cap != self.len { // New capacity cannot exceed max as it's shrinking unsafe { self.change_capacity(self.len) }; } } /// Returns an unsafe mutable pointer to the vector's buffer. /// /// The caller must ensure that the vector outlives the pointer this /// function returns, or else it will end up pointing to garbage. /// Modifying the vector may cause its buffer to be reallocated, which /// would also make any pointers to it invalid. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// // Allocate 1-aligned vector big enough for 4 bytes. /// let size = 4; /// let mut x = AlignedVec::<1>::with_capacity(size); /// let x_ptr = x.as_mut_ptr(); /// /// // Initialize elements via raw pointer writes, then set length. /// unsafe { /// for i in 0..size { /// *x_ptr.add(i) = i as u8; /// } /// x.set_len(size); /// } /// assert_eq!(&*x, &[0, 1, 2, 3]); /// ``` pub fn as_mut_ptr(&mut self) -> *mut u8 { self.ptr.as_ptr() } /// Extracts a mutable slice of the entire vector. /// /// Equivalent to `&mut s[..]`. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let mut vec = AlignedVec::<16>::new(); /// vec.extend_from_slice(&[1, 2, 3, 4, 5]); /// assert_eq!(vec.as_mut_slice().len(), 5); /// for i in 0..5 { /// assert_eq!(vec.as_mut_slice()[i], i as u8 + 1); /// vec.as_mut_slice()[i] = i as u8; /// assert_eq!(vec.as_mut_slice()[i], i as u8); /// } /// ``` pub fn as_mut_slice(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } } /// Returns a raw pointer to the vector's buffer. /// /// The caller must ensure that the vector outlives the pointer this /// function returns, or else it will end up pointing to garbage. /// Modifying the vector may cause its buffer to be reallocated, which /// would also make any pointers to it invalid. /// /// The caller must also ensure that the memory the pointer /// (non-transitively) points to is never written to (except inside an /// `UnsafeCell`) using this pointer or any pointer derived from it. If /// you need to mutate the contents of the slice, use /// [`as_mut_ptr`](AlignedVec::as_mut_ptr). /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let mut x = AlignedVec::<16>::new(); /// x.extend_from_slice(&[1, 2, 4]); /// let x_ptr = x.as_ptr(); /// /// unsafe { /// for i in 0..x.len() { /// assert_eq!(*x_ptr.add(i), 1 << i); /// } /// } /// ``` pub fn as_ptr(&self) -> *const u8 { self.ptr.as_ptr() } /// Extracts a slice containing the entire vector. /// /// Equivalent to `&s[..]`. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let mut vec = AlignedVec::<16>::new(); /// vec.extend_from_slice(&[1, 2, 3, 4, 5]); /// assert_eq!(vec.as_slice().len(), 5); /// for i in 0..5 { /// assert_eq!(vec.as_slice()[i], i as u8 + 1); /// } /// ``` pub fn as_slice(&self) -> &[u8] { unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } /// Returns the number of elements the vector can hold without reallocating. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let vec = AlignedVec::<16>::with_capacity(10); /// assert_eq!(vec.capacity(), 10); /// ``` pub fn capacity(&self) -> usize { self.cap } /// Reserves capacity for at least `additional` more bytes to be inserted /// into the given `AlignedVec`. The collection may reserve more space /// to avoid frequent reallocations. After calling `reserve`, capacity /// will be greater than or equal to `self.len() + additional`. Does /// nothing if capacity is already sufficient. /// /// # Panics /// /// Panics if the new capacity exceeds `Self::MAX_CAPACITY` bytes. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// /// let mut vec = AlignedVec::<16>::new(); /// vec.push(1); /// vec.reserve(10); /// assert!(vec.capacity() >= 11); /// ``` pub fn reserve(&mut self, additional: usize) { // Cannot wrap because capacity always exceeds len, // but avoids having to handle potential overflow here let remaining = self.cap.wrapping_sub(self.len); if additional > remaining { self.do_reserve(additional); } } /// Extend capacity after `reserve` has found it's necessary. /// /// Actually performing the extension is in this separate function marked /// `#[cold]` to hint to compiler that this branch is not often taken. /// This keeps the path for common case where capacity is already sufficient /// as fast as possible, and makes `reserve` more likely to be inlined. /// This is the same trick that Rust's `Vec::reserve` uses. #[cold] fn do_reserve(&mut self, additional: usize) { let new_cap = self .len .checked_add(additional) .expect("cannot reserve a larger AlignedVec"); unsafe { self.grow_capacity_to(new_cap) }; } /// Grows total capacity of vector to `new_cap` or more. /// /// Capacity after this call will be `new_cap` rounded up to next power of /// 2, unless that would exceed maximum capacity, in which case capacity /// is capped at the maximum. /// /// This is same growth strategy used by `reserve`, `push` and /// `extend_from_slice`. /// /// Usually the safe methods `reserve` or `reserve_exact` are a better /// choice. This method only exists as a micro-optimization for very /// performance-sensitive code where where the calculation of capacity /// required has already been performed, and you want to avoid doing it /// again. /// /// Maximum capacity is `isize::MAX + 1 - Self::ALIGNMENT` bytes. /// /// # Panics /// /// Panics if `new_cap` exceeds `Self::MAX_CAPACITY` bytes. /// /// # Safety /// /// - `new_cap` must be greater than current /// [`capacity()`](AlignedVec::capacity) /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// /// let mut vec = AlignedVec::<16>::new(); /// vec.push(1); /// unsafe { vec.grow_capacity_to(50) }; /// assert_eq!(vec.len(), 1); /// assert_eq!(vec.capacity(), 64); /// ``` pub unsafe fn grow_capacity_to(&mut self, new_cap: usize) { debug_assert!(new_cap > self.cap); let new_cap = if new_cap > (isize::MAX as usize + 1) >> 1 { // Rounding up to next power of 2 would result in `isize::MAX + 1` // or higher, which exceeds max capacity. So cap at max // instead. assert!( new_cap <= Self::MAX_CAPACITY, "cannot reserve a larger AlignedVec" ); Self::MAX_CAPACITY } else { // Cannot overflow due to check above new_cap.next_power_of_two() }; // SAFETY: We just checked that `new_cap` is greater than or equal to // `len` and less than or equal to `MAX_CAPACITY`. unsafe { self.change_capacity(new_cap); } } /// Resizes the Vec in-place so that len is equal to new_len. /// /// If new_len is greater than len, the Vec is extended by the difference, /// with each additional slot filled with value. If new_len is less than /// len, the Vec is simply truncated. /// /// # Panics /// /// Panics if the new length exceeds `Self::MAX_CAPACITY` bytes. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// /// let mut vec = AlignedVec::<16>::new(); /// vec.push(3); /// vec.resize(3, 2); /// assert_eq!(vec.as_slice(), &[3, 2, 2]); /// /// let mut vec = AlignedVec::<16>::new(); /// vec.extend_from_slice(&[1, 2, 3, 4]); /// vec.resize(2, 0); /// assert_eq!(vec.as_slice(), &[1, 2]); /// ``` pub fn resize(&mut self, new_len: usize, value: u8) { if new_len > self.len { let additional = new_len - self.len; self.reserve(additional); unsafe { core::ptr::write_bytes( self.ptr.as_ptr().add(self.len), value, additional, ); } } unsafe { self.set_len(new_len); } } /// Returns `true` if the vector contains no elements. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// /// let mut v = Vec::new(); /// assert!(v.is_empty()); /// /// v.push(1); /// assert!(!v.is_empty()); /// ``` pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns the number of elements in the vector, also referred to as its /// 'length'. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// /// let mut a = AlignedVec::<16>::new(); /// a.extend_from_slice(&[1, 2, 3]); /// assert_eq!(a.len(), 3); /// ``` pub fn len(&self) -> usize { self.len } /// Copies and appends all bytes in a slice to the `AlignedVec`. /// /// The elements of the slice are appended in-order. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// /// let mut vec = AlignedVec::<16>::new(); /// vec.push(1); /// vec.extend_from_slice(&[2, 3, 4]); /// assert_eq!(vec.as_slice(), &[1, 2, 3, 4]); /// ``` pub fn extend_from_slice(&mut self, other: &[u8]) { self.reserve(other.len()); unsafe { core::ptr::copy_nonoverlapping( other.as_ptr(), self.as_mut_ptr().add(self.len()), other.len(), ); } self.len += other.len(); } /// Removes the last element from a vector and returns it, or `None` if it /// is empty. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// /// let mut vec = AlignedVec::<16>::new(); /// vec.extend_from_slice(&[1, 2, 3]); /// assert_eq!(vec.pop(), Some(3)); /// assert_eq!(vec.as_slice(), &[1, 2]); /// ``` pub fn pop(&mut self) -> Option { if self.len == 0 { None } else { let result = self[self.len - 1]; self.len -= 1; Some(result) } } /// Appends an element to the back of a collection. /// /// # Panics /// /// Panics if the new capacity exceeds `Self::MAX_CAPACITY` bytes. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// /// let mut vec = AlignedVec::<16>::new(); /// vec.extend_from_slice(&[1, 2]); /// vec.push(3); /// assert_eq!(vec.as_slice(), &[1, 2, 3]); /// ``` pub fn push(&mut self, value: u8) { if self.len == self.cap { self.reserve_for_push(); } unsafe { self.as_mut_ptr().add(self.len).write(value); self.len += 1; } } /// Extend capacity by at least 1 byte after `push` has found it's /// necessary. /// /// Actually performing the extension is in this separate function marked /// `#[cold]` to hint to compiler that this branch is not often taken. /// This keeps the path for common case where capacity is already sufficient /// as fast as possible, and makes `push` more likely to be inlined. /// This is the same trick that Rust's `Vec::push` uses. #[cold] fn reserve_for_push(&mut self) { // `len` is always less than `isize::MAX`, so no possibility of overflow // here let new_cap = self.len + 1; unsafe { self.grow_capacity_to(new_cap) }; } /// Reserves the minimum capacity for exactly `additional` more elements to /// be inserted in the given `AlignedVec`. After calling /// `reserve_exact`, capacity will be greater than or equal /// to `self.len() + additional`. Does nothing if the capacity is already /// sufficient. /// /// Note that the allocator may give the collection more space than it /// requests. Therefore, capacity can not be relied upon to be precisely /// minimal. Prefer reserve if future insertions are expected. /// /// # Panics /// /// Panics if the new capacity exceeds `Self::MAX_CAPACITY`. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// /// let mut vec = AlignedVec::<16>::new(); /// vec.push(1); /// vec.reserve_exact(10); /// assert!(vec.capacity() >= 11); /// ``` pub fn reserve_exact(&mut self, additional: usize) { // This function does not use the hot/cold paths trick that `reserve` // and `push` do, on assumption that user probably knows this will // require an increase in capacity. Otherwise, they'd likely use // `reserve`. let new_cap = self .len .checked_add(additional) .expect("cannot reserve a larger AlignedVec"); if new_cap > self.cap { assert!( new_cap <= Self::MAX_CAPACITY, "cannot reserve a larger AlignedVec" ); unsafe { self.change_capacity(new_cap) }; } } /// Forces the length of the vector to `new_len`. /// /// This is a low-level operation that maintains none of the normal /// invariants of the type. /// /// # Safety /// /// - `new_len` must be less than or equal to /// [`capacity()`](AlignedVec::capacity) /// - The elements at `old_len..new_len` must be initialized /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let mut vec = AlignedVec::<16>::with_capacity(3); /// vec.extend_from_slice(&[1, 2, 3]); /// /// // SAFETY: /// // 1. `old_len..0` is empty to no elements need to be initialized. /// // 2. `0 <= capacity` always holds whatever capacity is. /// unsafe { /// vec.set_len(0); /// } /// ``` pub unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity()); self.len = new_len; } /// Converts the vector into `Box<[u8]>`. The returned slice is 1-aligned. /// /// This method reallocates and copies the underlying bytes. Any excess /// capacity is dropped. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let mut v = AlignedVec::<16>::new(); /// v.extend_from_slice(&[1, 2, 3]); /// /// let slice = v.into_boxed_slice(); /// ``` /// /// Any excess capacity is removed: /// /// ``` /// # use rkyv::util::AlignedVec; /// let mut vec = AlignedVec::<16>::with_capacity(10); /// vec.extend_from_slice(&[1, 2, 3]); /// /// assert_eq!(vec.capacity(), 10); /// let slice = vec.into_boxed_slice(); /// assert_eq!(slice.len(), 3); /// ``` pub fn into_boxed_slice(self) -> Box<[u8]> { self.into_vec().into_boxed_slice() } /// Converts the vector into `Vec`. /// /// This method reallocates and copies the underlying bytes. Any excess /// capacity is dropped. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// let mut v = AlignedVec::<16>::new(); /// v.extend_from_slice(&[1, 2, 3]); /// /// let vec = v.into_vec(); /// assert_eq!(vec.len(), 3); /// assert_eq!(vec.as_slice(), &[1, 2, 3]); /// ``` pub fn into_vec(self) -> Vec { Vec::from(self.as_ref()) } } #[cfg(feature = "std")] const _: () = { use std::io; impl AlignedVec { /// Reads all bytes until EOF from `r` and appends them to this /// `AlignedVec`. /// /// If successful, this function will return the total number of bytes /// read. /// /// # Examples /// ``` /// # use rkyv::util::AlignedVec; /// /// let source = (0..4096).map(|x| (x % 256) as u8).collect::>(); /// let mut bytes = AlignedVec::<16>::new(); /// bytes.extend_from_reader(&mut source.as_slice()).unwrap(); /// /// assert_eq!(bytes.len(), 4096); /// assert_eq!(bytes[0], 0); /// assert_eq!(bytes[100], 100); /// assert_eq!(bytes[2945], 129); /// ``` pub fn extend_from_reader( &mut self, r: &mut R, ) -> io::Result { let start_len = self.len(); let start_cap = self.capacity(); // Extra initialized bytes from previous loop iteration. let mut initialized = 0; loop { if self.len() == self.capacity() { // No available capacity, reserve some space. self.reserve(32); } let read_buf_start = unsafe { self.as_mut_ptr().add(self.len) }; let read_buf_len = self.capacity() - self.len(); // Initialize the uninitialized portion of the available space. unsafe { // The first `initialized` bytes don't need to be zeroed. // This leaves us `read_buf_len - initialized` bytes to zero // starting at `initialized`. core::ptr::write_bytes( read_buf_start.add(initialized), 0, read_buf_len - initialized, ); } // The entire read buffer is now initialized, so we can create a // mutable slice of it. let read_buf = unsafe { core::slice::from_raw_parts_mut( read_buf_start, read_buf_len, ) }; match r.read(read_buf) { Ok(read) => { // We filled `read` additional bytes. unsafe { self.set_len(self.len() + read); } initialized = read_buf_len - read; if read == 0 { return Ok(self.len() - start_len); } } Err(e) if e.kind() == io::ErrorKind::Interrupted => { continue } Err(e) => return Err(e), } if self.len() == self.capacity() && self.capacity() == start_cap { // The buffer might be an exact fit. Let's read into a probe // buffer and see if it returns `Ok(0)`. // If so, we've avoided an unnecessary // doubling of the capacity. But if not, append the // probe buffer to the primary buffer and let its capacity // grow. let mut probe = [0u8; 32]; loop { match r.read(&mut probe) { Ok(0) => return Ok(self.len() - start_len), Ok(n) => { self.extend_from_slice(&probe[..n]); break; } Err(ref e) if e.kind() == io::ErrorKind::Interrupted => { continue } Err(e) => return Err(e), } } } } } } impl io::Write for AlignedVec { fn write(&mut self, buf: &[u8]) -> io::Result { self.extend_from_slice(buf); Ok(buf.len()) } fn write_vectored( &mut self, bufs: &[io::IoSlice<'_>], ) -> io::Result { let len = bufs.iter().map(|b| b.len()).sum(); self.reserve(len); for buf in bufs { self.extend_from_slice(buf); } Ok(len) } fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { self.extend_from_slice(buf); Ok(()) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } }; impl From> for Vec { fn from(aligned: AlignedVec) -> Self { aligned.to_vec() } } impl AsMut<[u8]> for AlignedVec { fn as_mut(&mut self) -> &mut [u8] { self.as_mut_slice() } } impl AsRef<[u8]> for AlignedVec { fn as_ref(&self) -> &[u8] { self.as_slice() } } impl Borrow<[u8]> for AlignedVec { fn borrow(&self) -> &[u8] { self.as_slice() } } impl BorrowMut<[u8]> for AlignedVec { fn borrow_mut(&mut self) -> &mut [u8] { self.as_mut_slice() } } impl Clone for AlignedVec { fn clone(&self) -> Self { unsafe { let mut result = Self::with_capacity(self.len); result.len = self.len; core::ptr::copy_nonoverlapping( self.as_ptr(), result.as_mut_ptr(), self.len, ); result } } } impl fmt::Debug for AlignedVec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.as_slice().fmt(f) } } impl Default for AlignedVec { fn default() -> Self { Self::new() } } impl Deref for AlignedVec { type Target = [u8]; fn deref(&self) -> &Self::Target { self.as_slice() } } impl DerefMut for AlignedVec { fn deref_mut(&mut self) -> &mut Self::Target { self.as_mut_slice() } } impl> Index for AlignedVec { type Output = >::Output; fn index(&self, index: I) -> &Self::Output { &self.as_slice()[index] } } impl> IndexMut for AlignedVec { fn index_mut(&mut self, index: I) -> &mut Self::Output { &mut self.as_mut_slice()[index] } } // SAFETY: AlignedVec is safe to send to another thread unsafe impl Send for AlignedVec {} // SAFETY: AlignedVec is safe to share between threads unsafe impl Sync for AlignedVec {} impl Unpin for AlignedVec {} impl ArchiveWith> for AsVec { type Archived = ArchivedVec; type Resolver = VecResolver; fn resolve_with( field: &AlignedVec, resolver: Self::Resolver, out: Place, ) { ArchivedVec::resolve_from_len(field.len(), resolver, out) } } impl SerializeWith, S> for AsVec where S: Allocator + Fallible + Writer + ?Sized, { fn serialize_with( field: &AlignedVec, serializer: &mut S, ) -> Result { ArchivedVec::serialize_from_slice(field.as_slice(), serializer) } } impl DeserializeWith, AlignedVec, D> for AsVec where D: Fallible + ?Sized, { fn deserialize_with( field: &ArchivedVec, _: &mut D, ) -> Result, D::Error> { let mut result = AlignedVec::with_capacity(field.len()); result.extend_from_slice(field.as_slice()); Ok(result) } } rkyv-0.8.9/src/util/alloc/arena.rs000064400000000000000000000063541046102023000151370ustar 00000000000000use crate::ser::allocator::Arena; #[cfg(feature = "std")] mod detail { use core::cell::Cell; use crate::ser::allocator::Arena; thread_local! { static THREAD_ARENA: Cell> = const { Cell::new(None) }; } pub fn with_arena(f: impl FnOnce(&mut Arena) -> T) -> T { THREAD_ARENA.with(|thread_arena| { let mut arena = thread_arena.take().unwrap_or_default(); let result = f(&mut arena); let capacity = arena.shrink(); if let Some(other) = thread_arena.take() { if other.capacity() > capacity { arena = other; } } thread_arena.set(Some(arena)); result }) } #[inline] pub fn clear_arena() { THREAD_ARENA.take(); } } #[cfg(all(not(feature = "std"), target_has_atomic = "ptr",))] mod detail { use core::{ ptr::{self, NonNull}, sync::atomic::{AtomicPtr, Ordering}, }; use crate::ser::allocator::Arena; static GLOBAL_ARENA: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut()); pub fn with_arena(f: impl FnOnce(&mut Arena) -> T) -> T { let ptr = GLOBAL_ARENA.swap(ptr::null_mut(), Ordering::AcqRel); let mut arena = if let Some(raw) = NonNull::new(ptr) { unsafe { Arena::from_raw(raw) } } else { Arena::new() }; let result = f(&mut arena); arena.shrink(); let raw = arena.into_raw(); let swap = GLOBAL_ARENA.compare_exchange( ptr::null_mut(), raw.as_ptr(), Ordering::AcqRel, Ordering::Relaxed, ); if swap.is_err() { // Another arena was swapped in while we were executing `f`. We need // to free the current arena. unsafe { drop(Arena::from_raw(raw)); } } result } #[inline] pub fn clear_arena() { let ptr = GLOBAL_ARENA.swap(ptr::null_mut(), Ordering::AcqRel); if let Some(raw) = NonNull::new(ptr) { unsafe { drop(Arena::from_raw(raw)); } } } } #[cfg(all(not(feature = "std"), not(target_has_atomic = "ptr"),))] mod detail { use crate::ser::allocator::Arena; pub fn with_arena(f: impl FnOnce(&mut Arena) -> T) -> T { let mut arena = Arena::new(); f(&mut arena) } #[inline] pub fn clear_arena() {} } /// Calls the given function with the builtin arena allocator. /// /// When the `std` feature is enabled, the builtin arena allocator is a /// thread-local variable, with one allocator per thread. When atomic pointers /// are supported, it is a global static and all threads share the same arena. /// Otherwise, this will create and drop a new arena each time it is called. pub fn with_arena(f: impl FnOnce(&mut Arena) -> T) -> T { detail::with_arena(f) } /// Clears the builtin arena allocator. /// /// When the `std` feature is enabled, this only clears the allocator for the /// current thread. When atomic pointers are supported, this will clear the /// allocator for all threads. Otherwise, this function does nothing. #[inline] pub fn clear_arena() { detail::clear_arena() } rkyv-0.8.9/src/util/alloc/mod.rs000064400000000000000000000001071046102023000146160ustar 00000000000000mod aligned_vec; mod arena; pub use self::{aligned_vec::*, arena::*}; rkyv-0.8.9/src/util/inline_vec.rs000064400000000000000000000261221046102023000150650ustar 00000000000000use core::{ borrow::{Borrow, BorrowMut}, fmt, marker::PhantomData, mem::MaybeUninit, ops, ptr::{self, NonNull}, slice::{self, from_raw_parts_mut}, }; /// A vector that uses inline-allocated memory. pub struct InlineVec { elements: [MaybeUninit; N], len: usize, } impl Drop for InlineVec { fn drop(&mut self) { self.clear() } } // SAFETY: InlineVec is safe to send to another thread is T is safe to send to // another thread unsafe impl Send for InlineVec {} // SAFETY: InlineVec is safe to share between threads if T is safe to share // between threads unsafe impl Sync for InlineVec {} impl InlineVec { /// Constructs a new, empty `InlineVec`. /// /// The vector will be able to hold exactly `N` elements. pub fn new() -> Self { Self { elements: unsafe { MaybeUninit::uninit().assume_init() }, len: 0, } } /// Clears the vector, removing all values. pub fn clear(&mut self) { for i in 0..self.len { unsafe { self.elements[i].as_mut_ptr().drop_in_place(); } } self.len = 0; } /// Returns an unsafe mutable pointer to the vector's buffer. /// /// The caller must ensure that the vector outlives the pointer this /// function returns, or else it will end up pointing to garbage. pub fn as_mut_ptr(&mut self) -> *mut T { self.elements.as_mut_ptr().cast() } /// Extracts a mutable slice of the entire vector. /// /// Equivalent to `&mut s[..]`. pub fn as_mut_slice(&mut self) -> &mut [T] { unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) } } /// Returns a raw pointer to the vector's buffer. /// /// The caller must ensure that the vector outlives the pointer this /// functions returns, or else it will end up pointing to garbage. /// /// The caller must also ensure that the memory the pointer /// (non-transitively) points to is never written to (except inside an /// `UnsafeCell`) using this pointer or any pointer derived from it. If /// you need to mutate the contents of the slice, use /// [`as_mut_ptr`](Self::as_mut_ptr). pub fn as_ptr(&self) -> *const T { self.elements.as_ptr().cast() } /// Extracts a slice containing the entire vector. /// /// Equivalent to `&s[..]`. pub fn as_slice(&self) -> &[T] { unsafe { slice::from_raw_parts(self.as_ptr(), self.len) } } /// Returns the number of elements the vector can hole without reallocating. pub const fn capacity(&self) -> usize { N } /// Ensures that there is capacity for at least `additional` more elements /// to be inserted into the `ScratchVec`. /// /// # Panics /// /// Panics if the required capacity exceeds the available capacity. pub fn reserve(&mut self, additional: usize) { if N - self.len < additional { Self::out_of_space(); } } #[cold] fn out_of_space() -> ! { panic!( "reserve requested more capacity than the InlineVec has available" ); } /// Returns `true` if the vector contains no elements. pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns the number of elements in the vector, also referred to as its /// `length`. pub fn len(&self) -> usize { self.len } /// Copies and appends all elements in a slice to the `ScratchVec`. /// /// The elements of the slice are appended in-order. pub fn extend_from_slice(&mut self, other: &[T]) { if !other.is_empty() { self.reserve(other.len()); unsafe { core::ptr::copy_nonoverlapping( other.as_ptr(), self.as_mut_ptr().add(self.len()), other.len(), ); } self.len += other.len(); } } /// Removes the last element from a vector and returns it, or `None` if it /// is empty. pub fn pop(&mut self) -> Option { if self.len == 0 { None } else { unsafe { self.len -= 1; Some(self.as_ptr().add(self.len()).read()) } } } /// Appends an element to the back of a collection without performing bounds /// checking. /// /// # Safety /// /// The vector must have enough space reserved for the pushed element. pub unsafe fn push_unchecked(&mut self, value: T) { unsafe { self.as_mut_ptr().add(self.len).write(value); self.len += 1; } } /// Appends an element to the back of a collection. pub fn push(&mut self, value: T) { if self.len == N { Self::out_of_space() } else { unsafe { self.push_unchecked(value); } } } /// Reserves the minimum capacity for exactly `additional` more elements to /// be inserted in the given `AlignedVec`. After calling /// `reserve_exact`, capacity will be greater than or equal /// to `self.len() + additional`. Does nothing if the capacity is already /// sufficient. /// /// # Panics /// /// Panics if the required capacity exceeds the available capacity. pub fn reserve_exact(&mut self, additional: usize) { self.reserve(additional); } /// Forces the length of the vector to `new_len`. /// /// This is a low-level operation that maintains none of the normal /// invariants of the type. /// /// # Safety /// /// - `new_len` must be less than or equal to [`capacity()`](Self::capacity) /// - The elements at `old_len..new_len` must be initialized pub unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity()); self.len = new_len; } /// Creates a draining iterator that removes all of the elements from the /// vector. pub fn drain(&mut self) -> Drain<'_, T, N> { let remaining = self.len(); unsafe { self.set_len(0); } Drain { current: unsafe { NonNull::new_unchecked(self.as_mut_ptr()) }, remaining, _phantom: PhantomData, } } } impl InlineVec, N> { /// Assuming that all the elements are initialized, removes the /// `MaybeUninit` wrapper from the vector. /// /// # Safety /// /// It is up to the caller to guarantee that the `MaybeUninit` elements /// really are in an initialized state. Calling this when the content is /// not yet fully initialized causes undefined behavior. pub fn assume_init(self) -> InlineVec { let mut elements = unsafe { MaybeUninit::<[MaybeUninit; N]>::uninit().assume_init() }; unsafe { ptr::copy_nonoverlapping( self.elements.as_ptr().cast(), elements.as_mut_ptr(), N, ); } InlineVec { elements, len: self.len, } } } impl AsMut<[T]> for InlineVec { fn as_mut(&mut self) -> &mut [T] { self.as_mut_slice() } } impl AsRef<[T]> for InlineVec { fn as_ref(&self) -> &[T] { self.as_slice() } } impl Borrow<[T]> for InlineVec { fn borrow(&self) -> &[T] { self.as_slice() } } impl BorrowMut<[T]> for InlineVec { fn borrow_mut(&mut self) -> &mut [T] { self.as_mut_slice() } } impl fmt::Debug for InlineVec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.as_slice().fmt(f) } } impl Default for InlineVec { fn default() -> Self { Self::new() } } impl ops::Deref for InlineVec { type Target = [T]; fn deref(&self) -> &Self::Target { self.as_slice() } } impl ops::DerefMut for InlineVec { fn deref_mut(&mut self) -> &mut Self::Target { self.as_mut_slice() } } impl, const N: usize> ops::Index for InlineVec { type Output = >::Output; fn index(&self, index: I) -> &Self::Output { &self.as_slice()[index] } } impl, const N: usize> ops::IndexMut for InlineVec { fn index_mut(&mut self, index: I) -> &mut Self::Output { &mut self.as_mut_slice()[index] } } /// A draining iterator for `InlineVec`. /// /// This `struct` is created by [`InlineVec::drain`]. See its documentation for /// more. pub struct Drain<'a, T: 'a, const N: usize> { current: NonNull, remaining: usize, _phantom: PhantomData<&'a mut InlineVec>, } impl fmt::Debug for Drain<'_, T, N> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Drain").field(&self.as_slice()).finish() } } impl Drain<'_, T, N> { /// Returns the remaining items of this iterator as a slice. pub fn as_slice(&self) -> &[T] { unsafe { from_raw_parts_mut(self.current.as_ptr(), self.remaining) } } } impl AsRef<[T]> for Drain<'_, T, N> { fn as_ref(&self) -> &[T] { self.as_slice() } } impl Iterator for Drain<'_, T, N> { type Item = T; fn next(&mut self) -> Option { if self.remaining > 0 { self.remaining -= 1; let result = unsafe { self.current.as_ptr().read() }; self.current = unsafe { NonNull::new_unchecked(self.current.as_ptr().add(1)) }; Some(result) } else { None } } fn size_hint(&self) -> (usize, Option) { (self.remaining, Some(self.remaining)) } } impl DoubleEndedIterator for Drain<'_, T, N> { fn next_back(&mut self) -> Option { if self.remaining > 0 { self.remaining -= 1; unsafe { Some(self.current.as_ptr().add(self.remaining).read()) } } else { None } } } impl Drop for Drain<'_, T, N> { fn drop(&mut self) { for i in 0..self.remaining { unsafe { self.current.as_ptr().add(i).drop_in_place(); } } } } impl ExactSizeIterator for Drain<'_, T, N> {} impl core::iter::FusedIterator for Drain<'_, T, N> {} #[cfg(test)] mod tests { use crate::util::InlineVec; #[test] fn drain() { let mut vec = InlineVec::<_, 8>::new(); for i in 0..100 { vec.push(i); if vec.len() == vec.capacity() { for j in vec.drain() { let _ = j; } } } } } rkyv-0.8.9/src/util/mod.rs000064400000000000000000000012401046102023000135230ustar 00000000000000//! Utilities for common operations. #[cfg(feature = "alloc")] mod alloc; mod inline_vec; mod ser_vec; use core::ops::{Deref, DerefMut}; #[doc(inline)] #[cfg(feature = "alloc")] pub use self::alloc::*; #[doc(inline)] pub use self::{inline_vec::InlineVec, ser_vec::SerVec}; /// A wrapper which aligns its inner value to 16 bytes. #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] pub struct Align( /// The inner value. pub T, ); impl Deref for Align { type Target = T; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Align { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } rkyv-0.8.9/src/util/ser_vec.rs000064400000000000000000000251111046102023000143750ustar 00000000000000use core::{ alloc::Layout, borrow::{Borrow, BorrowMut}, fmt, marker::PhantomData, mem::MaybeUninit, ops, ptr::NonNull, slice::{self, from_raw_parts_mut}, }; use rancor::Fallible; use crate::ser::Allocator; /// A vector that uses serializer-allocated memory. pub struct SerVec { ptr: NonNull, cap: usize, len: usize, } // SAFETY: SerVec is safe to send to another thread is T is safe to send to // another thread unsafe impl Send for SerVec {} // SAFETY: SerVec is safe to share between threads if T is safe to share // between threads unsafe impl Sync for SerVec {} impl SerVec { /// Constructs a new, empty `SerVec` with the specified capacity. /// /// The vector will be able to hold exactly `capacity` elements. If /// `capacity` is 0, the vector will not allocate. pub fn with_capacity( serializer: &mut S, cap: usize, f: impl FnOnce(&mut Self, &mut S) -> R, ) -> Result where S: Fallible + Allocator + ?Sized, { let layout = Layout::array::(cap).unwrap(); let mut vec = Self { ptr: if layout.size() != 0 { unsafe { serializer.push_alloc(layout)?.cast() } } else { NonNull::dangling() }, cap, len: 0, }; let result = f(&mut vec, serializer); vec.clear(); if layout.size() != 0 { unsafe { serializer.pop_alloc(vec.ptr.cast(), layout)?; } } Ok(result) } /// Clears the vector, removing all values. /// /// Note that this method has no effect on the allocated capacity of the /// vector. pub fn clear(&mut self) { for i in 0..self.len { unsafe { core::ptr::drop_in_place(self.ptr.as_ptr().add(i)); } } self.len = 0; } /// Returns an unsafe mutable pointer to the vector's buffer. /// /// The caller must ensure that the vector outlives the pointer this /// function returns, or else it will end up pointing to garbage. pub fn as_mut_ptr(&mut self) -> *mut T { self.ptr.as_ptr() } /// Extracts a mutable slice of the entire vector. /// /// Equivalent to `&mut s[..]`. pub fn as_mut_slice(&mut self) -> &mut [T] { unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) } } /// Returns a raw pointer to the vector's buffer. /// /// The caller must ensure that the vector outlives the pointer this /// functions returns, or else it will end up pointing to garbage. /// /// The caller must also ensure that the memory the pointer /// (non-transitively) points to is never written to (except inside an /// `UnsafeCell`) using this pointer or any pointer derived from it. If /// you need to mutate the contents of the slice, use /// [`as_mut_ptr`](Self::as_mut_ptr). pub fn as_ptr(&self) -> *const T { self.ptr.as_ptr() } /// Extracts a slice containing the entire vector. /// /// Equivalent to `&s[..]`. pub fn as_slice(&self) -> &[T] { unsafe { slice::from_raw_parts(self.as_ptr(), self.len) } } /// Returns the number of elements the vector can hole without reallocating. pub fn capacity(&self) -> usize { self.cap } /// Ensures that there is capacity for at least `additional` more elements /// to be inserted into the `ScratchVec`. /// /// # Panics /// /// Panics if the required capacity exceeds the available capacity. pub fn reserve(&mut self, additional: usize) { if self.cap - self.len < additional { Self::out_of_space(); } } #[cold] fn out_of_space() -> ! { panic!("reserve requested more capacity than the SerVec has available"); } /// Returns `true` if the vector contains no elements. pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns the number of elements in the vector, also referred to as its /// `length`. pub fn len(&self) -> usize { self.len } /// Copies and appends all elements in a slice to the `ScratchVec`. /// /// The elements of the slice are appended in-order. pub fn extend_from_slice(&mut self, other: &[T]) where T: Copy, { if !other.is_empty() { self.reserve(other.len()); unsafe { core::ptr::copy_nonoverlapping( other.as_ptr(), self.as_mut_ptr().add(self.len()), other.len(), ); } self.len += other.len(); } } /// Removes the last element from a vector and returns it, or `None` if it /// is empty. pub fn pop(&mut self) -> Option { if self.len == 0 { None } else { unsafe { self.len -= 1; Some(self.as_ptr().add(self.len()).read()) } } } /// Appends an element to the back of a collection without performing bounds /// checking. /// /// # Safety /// /// The vector must have enough space reserved for the pushed element. pub unsafe fn push_unchecked(&mut self, value: T) { unsafe { self.as_mut_ptr().add(self.len).write(value); } self.len += 1; } /// Appends an element to the back of a collection. pub fn push(&mut self, value: T) { if self.len == self.cap { Self::out_of_space() } else { unsafe { self.push_unchecked(value); } } } /// Reserves the minimum capacity for exactly `additional` more elements to /// be inserted in the given `AlignedVec`. After calling /// `reserve_exact`, capacity will be greater than or equal /// to `self.len() + additional`. Does nothing if the capacity is already /// sufficient. /// /// # Panics /// /// Panics if the required capacity exceeds the available capacity. pub fn reserve_exact(&mut self, additional: usize) { self.reserve(additional); } /// Forces the length of the vector to `new_len`. /// /// This is a low-level operation that maintains none of the normal /// invariants of the type. /// /// # Safety /// /// - `new_len` must be less than or equal to [`capacity()`](Self::capacity) /// - The elements at `old_len..new_len` must be initialized pub unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity()); self.len = new_len; } /// Creates a draining iterator that removes all of the elements from the /// vector. pub fn drain(&mut self) -> Drain<'_, T> { let remaining = self.len(); unsafe { self.set_len(0); } Drain { current: self.ptr, remaining, _phantom: PhantomData, } } } impl SerVec> { /// Assuming that all the elements are initialized, removes the /// `MaybeUninit` wrapper from the vector. /// /// # Safety /// /// It is up to the caller to guarantee that the `MaybeUninit` elements /// really are in an initialized state. Calling this when the content is /// not yet fully initialized causes undefined behavior. pub fn assume_init(self) -> SerVec { SerVec { ptr: self.ptr.cast(), cap: self.cap, len: self.len, } } } impl AsMut<[T]> for SerVec { fn as_mut(&mut self) -> &mut [T] { self.as_mut_slice() } } impl AsRef<[T]> for SerVec { fn as_ref(&self) -> &[T] { self.as_slice() } } impl Borrow<[T]> for SerVec { fn borrow(&self) -> &[T] { self.as_slice() } } impl BorrowMut<[T]> for SerVec { fn borrow_mut(&mut self) -> &mut [T] { self.as_mut_slice() } } impl fmt::Debug for SerVec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.as_slice().fmt(f) } } impl ops::Deref for SerVec { type Target = [T]; fn deref(&self) -> &Self::Target { self.as_slice() } } impl ops::DerefMut for SerVec { fn deref_mut(&mut self) -> &mut Self::Target { self.as_mut_slice() } } impl> ops::Index for SerVec { type Output = >::Output; fn index(&self, index: I) -> &Self::Output { &self.as_slice()[index] } } impl> ops::IndexMut for SerVec { fn index_mut(&mut self, index: I) -> &mut Self::Output { &mut self.as_mut_slice()[index] } } /// A draining iterator for `SerVec`. /// /// This `struct` is created by [`SerVec::drain`]. See its documentation for /// more. pub struct Drain<'a, T: 'a> { current: NonNull, remaining: usize, _phantom: PhantomData<&'a mut SerVec>, } impl fmt::Debug for Drain<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Drain").field(&self.as_slice()).finish() } } impl Drain<'_, T> { /// Returns the remaining items of this iterator as a slice. pub fn as_slice(&self) -> &[T] { unsafe { from_raw_parts_mut(self.current.as_ptr(), self.remaining) } } } impl AsRef<[T]> for Drain<'_, T> { fn as_ref(&self) -> &[T] { self.as_slice() } } impl Iterator for Drain<'_, T> { type Item = T; fn next(&mut self) -> Option { if self.remaining > 0 { self.remaining -= 1; let result = unsafe { self.current.as_ptr().read() }; self.current = unsafe { NonNull::new_unchecked(self.current.as_ptr().add(1)) }; Some(result) } else { None } } fn size_hint(&self) -> (usize, Option) { (self.remaining, Some(self.remaining)) } } impl DoubleEndedIterator for Drain<'_, T> { fn next_back(&mut self) -> Option { if self.remaining > 0 { self.remaining -= 1; unsafe { Some(self.current.as_ptr().add(self.remaining).read()) } } else { None } } } impl Drop for Drain<'_, T> { fn drop(&mut self) { for i in 0..self.remaining { unsafe { self.current.as_ptr().add(i).drop_in_place(); } } } } impl ExactSizeIterator for Drain<'_, T> {} impl core::iter::FusedIterator for Drain<'_, T> {} rkyv-0.8.9/src/validation/archive/mod.rs000064400000000000000000000107761046102023000163370ustar 00000000000000//! Basic archive buffer validation. mod validator; use core::{alloc::Layout, ops::Range}; use bytecheck::rancor::{Fallible, Source, Strategy}; use rancor::ResultExt as _; pub use self::validator::*; use crate::traits::LayoutRaw; /// A context that can validate nonlocal archive memory. /// /// # Safety /// /// `check_subtree_ptr` must only return true if `ptr` is located entirely /// within the subtree range and is safe to dereference. pub unsafe trait ArchiveContext::Error> { /// Checks that the given data address and layout is located completely /// within the subtree range. fn check_subtree_ptr( &mut self, ptr: *const u8, layout: &Layout, ) -> Result<(), E>; /// Pushes a new subtree range onto the validator and starts validating it. /// /// After calling `push_subtree_range`, the validator will have a subtree /// range starting at the original start and ending at `root`. After popping /// the returned range, the validator will have a subtree range starting at /// `end` and ending at the original end. /// /// # Safety /// /// `root` and `end` must be located inside the archive. unsafe fn push_subtree_range( &mut self, root: *const u8, end: *const u8, ) -> Result, E>; /// Pops the given range, restoring the original state with the pushed range /// removed. /// /// If the range was not popped in reverse order, an error is returned. /// /// # Safety /// /// `range` must be a range returned from this validator. unsafe fn pop_subtree_range( &mut self, range: Range, ) -> Result<(), E>; } unsafe impl ArchiveContext for Strategy where T: ArchiveContext + ?Sized, { fn check_subtree_ptr( &mut self, ptr: *const u8, layout: &Layout, ) -> Result<(), E> { T::check_subtree_ptr(self, ptr, layout) } unsafe fn push_subtree_range( &mut self, root: *const u8, end: *const u8, ) -> Result, E> { // SAFETY: This just forwards the call to the underlying context, which // has the same safety requirements. unsafe { T::push_subtree_range(self, root, end) } } unsafe fn pop_subtree_range( &mut self, range: Range, ) -> Result<(), E> { // SAFETY: This just forwards the call to the underlying context, which // has the same safety requirements. unsafe { T::pop_subtree_range(self, range) } } } /// Helper methods for [`ArchiveContext`]. pub trait ArchiveContextExt: ArchiveContext { /// Checks that the given pointer and layout are within the current subtree /// range of the context, then pushes a new subtree range onto the validator /// for it and calls the given function. fn in_subtree_raw( &mut self, ptr: *const u8, layout: Layout, f: impl FnOnce(&mut Self) -> Result, ) -> Result; /// Checks that the value the given pointer points to is within the current /// subtree range of the context, then pushes a new subtree range onto the /// validator for it and calls the given function. fn in_subtree( &mut self, ptr: *const T, f: impl FnOnce(&mut Self) -> Result, ) -> Result; } impl + ?Sized, E: Source> ArchiveContextExt for C { #[allow(clippy::not_unsafe_ptr_arg_deref)] fn in_subtree_raw( &mut self, ptr: *const u8, layout: Layout, f: impl FnOnce(&mut Self) -> Result, ) -> Result { self.check_subtree_ptr(ptr, &layout)?; // SAFETY: We checked that the entire range from `ptr` to // `ptr + layout.size()` is located within the buffer. let range = unsafe { self.push_subtree_range(ptr, ptr.add(layout.size()))? }; let result = f(self)?; // SAFETY: `range` was returned from `push_subtree_range`. unsafe { self.pop_subtree_range(range)?; } Ok(result) } #[allow(clippy::not_unsafe_ptr_arg_deref)] fn in_subtree( &mut self, ptr: *const T, f: impl FnOnce(&mut Self) -> Result, ) -> Result { let layout = T::layout_raw(ptr_meta::metadata(ptr)).into_error()?; let root = ptr as *const u8; self.in_subtree_raw(root, layout, f) } } rkyv-0.8.9/src/validation/archive/validator.rs000064400000000000000000000112621046102023000175340ustar 00000000000000use core::{ alloc::Layout, error::Error, fmt, marker::PhantomData, num::NonZeroUsize, ops::Range, }; use rancor::{fail, OptionExt, Source}; use crate::{fmt::Pointer, validation::ArchiveContext}; #[derive(Debug)] struct UnalignedPointer { address: usize, align: usize, } impl fmt::Display for UnalignedPointer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "unaligned pointer: ptr {} unaligned for alignment {}", Pointer(self.address), self.align, ) } } impl Error for UnalignedPointer {} #[derive(Debug)] struct InvalidSubtreePointer { address: usize, size: usize, subtree_range: Range, } impl fmt::Display for InvalidSubtreePointer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "subtree pointer overran range: ptr {} size {} in range {}..{}", Pointer(self.address), self.size, Pointer(self.subtree_range.start), Pointer(self.subtree_range.end), ) } } impl Error for InvalidSubtreePointer {} #[derive(Debug)] struct ExceededMaximumSubtreeDepth; impl fmt::Display for ExceededMaximumSubtreeDepth { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "pushed a subtree range that exceeded the maximum subtree depth", ) } } impl Error for ExceededMaximumSubtreeDepth {} #[derive(Debug)] struct RangePoppedTooManyTimes; impl fmt::Display for RangePoppedTooManyTimes { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "subtree range popped too many times") } } impl Error for RangePoppedTooManyTimes {} #[derive(Debug)] struct RangePoppedOutOfOrder; impl fmt::Display for RangePoppedOutOfOrder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "subtree range popped out of order") } } impl Error for RangePoppedOutOfOrder {} /// A validator that can verify archives with nonlocal memory. #[derive(Debug)] pub struct ArchiveValidator<'a> { subtree_range: Range, max_subtree_depth: Option, _phantom: PhantomData<&'a [u8]>, } impl<'a> ArchiveValidator<'a> { /// Creates a new bounds validator for the given bytes. #[inline] pub fn new(bytes: &'a [u8]) -> Self { Self::with_max_depth(bytes, None) } /// Crates a new bounds validator for the given bytes with a maximum /// validation depth. #[inline] pub fn with_max_depth( bytes: &'a [u8], max_subtree_depth: Option, ) -> Self { let Range { start, end } = bytes.as_ptr_range(); Self { subtree_range: Range { start: start as usize, end: end as usize, }, max_subtree_depth, _phantom: PhantomData, } } } unsafe impl ArchiveContext for ArchiveValidator<'_> { fn check_subtree_ptr( &mut self, ptr: *const u8, layout: &Layout, ) -> Result<(), E> { let start = ptr as usize; let end = ptr.wrapping_add(layout.size()) as usize; if start < self.subtree_range.start || end > self.subtree_range.end { fail!(InvalidSubtreePointer { address: start, size: layout.size(), subtree_range: self.subtree_range.clone(), }); } else if start & (layout.align() - 1) != 0 { fail!(UnalignedPointer { address: ptr as usize, align: layout.align(), }); } else { Ok(()) } } unsafe fn push_subtree_range( &mut self, root: *const u8, end: *const u8, ) -> Result, E> { if let Some(max_subtree_depth) = &mut self.max_subtree_depth { *max_subtree_depth = NonZeroUsize::new(max_subtree_depth.get() - 1) .into_trace(ExceededMaximumSubtreeDepth)?; } let result = Range { start: end as usize, end: self.subtree_range.end, }; self.subtree_range.end = root as usize; Ok(result) } unsafe fn pop_subtree_range( &mut self, range: Range, ) -> Result<(), E> { if range.start < self.subtree_range.end { fail!(RangePoppedOutOfOrder); } self.subtree_range = range; if let Some(max_subtree_depth) = &mut self.max_subtree_depth { *max_subtree_depth = max_subtree_depth .checked_add(1) .into_trace(RangePoppedTooManyTimes)?; } Ok(()) } } rkyv-0.8.9/src/validation/mod.rs000064400000000000000000000233421046102023000147070ustar 00000000000000//! Validation implementations and helper types. pub mod archive; pub mod shared; use core::{any::TypeId, ops::Range}; pub use self::{ archive::{ArchiveContext, ArchiveContextExt}, shared::SharedContext, }; /// The default validator. #[derive(Debug)] pub struct Validator { archive: A, shared: S, } impl Validator { /// Creates a new validator from a byte range. #[inline] pub fn new(archive: A, shared: S) -> Self { Self { archive, shared } } } unsafe impl ArchiveContext for Validator where A: ArchiveContext, { fn check_subtree_ptr( &mut self, ptr: *const u8, layout: &core::alloc::Layout, ) -> Result<(), E> { self.archive.check_subtree_ptr(ptr, layout) } unsafe fn push_subtree_range( &mut self, root: *const u8, end: *const u8, ) -> Result, E> { // SAFETY: This just forwards the call to the underlying `CoreValidator` // which has the same safety requirements. unsafe { self.archive.push_subtree_range(root, end) } } unsafe fn pop_subtree_range( &mut self, range: Range, ) -> Result<(), E> { // SAFETY: This just forwards the call to the underlying `CoreValidator` // which has the same safety requirements. unsafe { self.archive.pop_subtree_range(range) } } } impl SharedContext for Validator where S: SharedContext, { fn start_shared( &mut self, address: usize, type_id: TypeId, ) -> Result { self.shared.start_shared(address, type_id) } fn finish_shared( &mut self, address: usize, type_id: TypeId, ) -> Result<(), E> { self.shared.finish_shared(address, type_id) } } #[cfg(test)] mod tests { use rancor::Failure; use crate::{ api::low::{access, access_pos}, boxed::ArchivedBox, option::ArchivedOption, util::Align, Archived, }; #[test] fn basic_functionality() { #[cfg(all(feature = "pointer_width_16", not(feature = "big_endian")))] // Synthetic archive (correct) let synthetic_buf = Align([ // "Hello world" 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0u8, // padding to 2-alignment 1u8, 0u8, // Some + padding 0xf2u8, 0xffu8, // points 14 bytes backwards 11u8, 0u8, // string is 11 characters long ]); #[cfg(all(feature = "pointer_width_16", feature = "big_endian"))] // Synthetic archive (correct) let synthetic_buf = Align([ // "Hello world" 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0u8, // padding to 2-alignment 1u8, 0u8, // Some + padding 0xffu8, 0xf2u8, // points 14 bytes backwards 0u8, 11u8, // string is 11 characters long ]); #[cfg(all( not(any( feature = "pointer_width_16", feature = "pointer_width_64", )), not(feature = "big_endian"), ))] // Synthetic archive (correct) let synthetic_buf = Align([ // "Hello world" 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0u8, // padding to 4-alignment 1u8, 0u8, 0u8, 0u8, // Some + padding 0xf0u8, 0xffu8, 0xffu8, 0xffu8, // points 16 bytes backward 11u8, 0u8, 0u8, 0u8, // string is 11 characters long ]); #[cfg(all( not(any( feature = "pointer_width_16", feature = "pointer_width_64", )), feature = "big_endian", ))] // Synthetic archive (correct) let synthetic_buf = Align([ // "Hello world" 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0u8, // padding to 4-alignment 1u8, 0u8, 0u8, 0u8, // Some + padding 0xffu8, 0xffu8, 0xffu8, 0xf0u8, // points 16 bytes backward 0u8, 0u8, 0u8, 11u8, // string is 11 characters long ]); #[cfg(all(feature = "pointer_width_64", not(feature = "big_endian")))] // Synthetic archive (correct) let synthetic_buf = Align([ // "Hello world" 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0u8, 0u8, 0u8, 0u8, 0u8, // padding to 8-alignment 1u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, // Some + padding // points 24 bytes backward 0xe8u8, 0xffu8, 0xffu8, 0xffu8, 0xffu8, 0xffu8, 0xffu8, 0xffu8, 11u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, // string is 11 characters long ]); #[cfg(all(feature = "pointer_width_64", feature = "big_endian"))] // Synthetic archive (correct) let synthetic_buf = Align([ // "Hello world!!!!!" because otherwise the string will get inlined 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x21, 0x21, 0x21, 0x21, 0x21, 1u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, // Some + padding // points 24 bytes backward 0xffu8, 0xffu8, 0xffu8, 0xffu8, 0xffu8, 0xffu8, 0xffu8, 0xe8u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 11u8, // string is 11 characters long ]); let result = access::>, Failure>( &*synthetic_buf, ); result.unwrap(); // Out of bounds access_pos::, Failure>(&*Align([0, 1, 2, 3, 4]), 8) .expect_err("expected out of bounds error"); // Overrun access_pos::, Failure>(&*Align([0, 1, 2, 3, 4]), 4) .expect_err("expected overrun error"); // Unaligned access_pos::, Failure>(&*Align([0, 1, 2, 3, 4]), 1) .expect_err("expected unaligned error"); // Underaligned access_pos::, Failure>(&Align([0, 1, 2, 3, 4])[1..], 0) .expect_err("expected underaligned error"); // Undersized access::, Failure>(&*Align([])) .expect_err("expected out of bounds error"); } #[cfg(feature = "pointer_width_32")] #[test] fn invalid_tags() { // Invalid archive (invalid tag) let synthetic_buf = Align([ 2u8, 0u8, 0u8, 0u8, // invalid tag + padding 8u8, 0u8, 0u8, 0u8, // points 8 bytes forward 11u8, 0u8, 0u8, 0u8, // string is 11 characters long // "Hello world" 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, ]); let result = access_pos::>>, Failure>( &*synthetic_buf, 0, ); result.unwrap_err(); } #[cfg(feature = "pointer_width_32")] #[test] fn overlapping_claims() { // Invalid archive (overlapping claims) let synthetic_buf = Align([ // First string 16u8, 0u8, 0u8, 0u8, // points 16 bytes forward 11u8, 0u8, 0u8, 0u8, // string is 11 characters long // Second string 8u8, 0u8, 0u8, 0u8, // points 8 bytes forward 11u8, 0u8, 0u8, 0u8, // string is 11 characters long // "Hello world" 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, ]); access_pos::; 2]>, Failure>(&*synthetic_buf, 0) .unwrap_err(); } #[cfg(feature = "pointer_width_32")] #[test] fn cycle_detection() { use bytecheck::CheckBytes; use rancor::{Fallible, Source}; use crate::{ ser::Writer, validation::ArchiveContext, Archive, Serialize, }; #[allow(dead_code)] #[derive(Archive)] #[rkyv(crate, derive(Debug))] enum Node { Nil, Cons(#[omit_bounds] Box), } impl Serialize for Node { fn serialize( &self, serializer: &mut S, ) -> Result { Ok(match self { Node::Nil => NodeResolver::Nil, Node::Cons(inner) => { NodeResolver::Cons(inner.serialize(serializer)?) } }) } } unsafe impl CheckBytes for ArchivedNode where C: Fallible + ArchiveContext + ?Sized, C::Error: Source, { unsafe fn check_bytes( value: *const Self, context: &mut C, ) -> Result<(), C::Error> { let bytes = value.cast::(); let tag = unsafe { *bytes }; match tag { 0 => (), 1 => unsafe { > as CheckBytes>::check_bytes( bytes.add(4).cast(), context, )?; }, _ => panic!(), } Ok(()) } } // Invalid archive (cyclic claims) let synthetic_buf = Align([ // First node 1u8, 0u8, 0u8, 0u8, // Cons 4u8, 0u8, 0u8, 0u8, // Node is 4 bytes forward // Second string 1u8, 0u8, 0u8, 0u8, // Cons 244u8, 255u8, 255u8, 255u8, // Node is 12 bytes back ]); access_pos::(&*synthetic_buf, 0).unwrap_err(); } } rkyv-0.8.9/src/validation/shared/mod.rs000064400000000000000000000036511046102023000161560ustar 00000000000000//! Shared pointer validation. #[cfg(feature = "alloc")] mod validator; use core::any::TypeId; use rancor::{Fallible, Strategy}; #[cfg(feature = "alloc")] pub use self::validator::*; /// The result of starting to validate a shared pointer. pub enum ValidationState { /// The caller started validating this value. They should proceed to check /// the shared value and call `finish_shared`. Started, /// Another caller started validating this value, but has not finished yet. /// This can only occur with cyclic shared pointer structures, and so rkyv /// treats this as an error by default. Pending, /// This value has already been validated. Finished, } /// A context that can validate shared archive memory. /// /// Shared pointers require this kind of context to validate. pub trait SharedContext::Error> { /// Starts validating the value associated with the given address. /// /// Returns an error if the value associated with the given address was /// started with a different type ID. fn start_shared( &mut self, address: usize, type_id: TypeId, ) -> Result; /// Finishes validating the value associated with the given address. /// /// Returns an error if the given address was not pending. fn finish_shared( &mut self, address: usize, type_id: TypeId, ) -> Result<(), E>; } impl SharedContext for Strategy where T: SharedContext, { fn start_shared( &mut self, address: usize, type_id: TypeId, ) -> Result { T::start_shared(self, address, type_id) } fn finish_shared( &mut self, address: usize, type_id: TypeId, ) -> Result<(), E> { T::finish_shared(self, address, type_id) } } rkyv-0.8.9/src/validation/shared/validator.rs000064400000000000000000000071521046102023000173640ustar 00000000000000//! Validators add validation capabilities by wrapping and extending basic //! validators. use core::{any::TypeId, error::Error, fmt, hash::BuildHasherDefault}; #[cfg(feature = "std")] use std::collections::hash_map; #[cfg(not(feature = "std"))] use hashbrown::hash_map; use rancor::{fail, Source}; use crate::{ hash::FxHasher64, validation::{shared::ValidationState, SharedContext}, }; /// A validator that can verify shared pointers. #[derive(Debug, Default)] pub struct SharedValidator { shared: hash_map::HashMap< usize, (TypeId, bool), BuildHasherDefault, >, } impl SharedValidator { /// Creates a new shared pointer validator. #[inline] pub fn new() -> Self { Self::default() } /// Creates a new shared pointer validator with specific capacity. #[inline] pub fn with_capacity(capacity: usize) -> Self { Self { shared: hash_map::HashMap::with_capacity_and_hasher( capacity, Default::default(), ), } } } #[derive(Debug)] struct TypeMismatch { previous: TypeId, current: TypeId, } impl fmt::Display for TypeMismatch { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "the same memory region has been claimed as two different types: \ {:?} and {:?}", self.previous, self.current, ) } } impl Error for TypeMismatch {} #[derive(Debug)] struct NotStarted; impl fmt::Display for NotStarted { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "shared pointer was not started validation") } } impl Error for NotStarted {} #[derive(Debug)] struct AlreadyFinished; impl fmt::Display for AlreadyFinished { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "shared pointer was already finished validation") } } impl Error for AlreadyFinished {} impl SharedContext for SharedValidator { fn start_shared( &mut self, address: usize, type_id: TypeId, ) -> Result { match self.shared.entry(address) { hash_map::Entry::Vacant(vacant) => { vacant.insert((type_id, false)); Ok(ValidationState::Started) } hash_map::Entry::Occupied(occupied) => { let (previous_type_id, finished) = occupied.get(); if previous_type_id != &type_id { fail!(TypeMismatch { previous: *previous_type_id, current: type_id, }) } else if !finished { Ok(ValidationState::Pending) } else { Ok(ValidationState::Finished) } } } } fn finish_shared( &mut self, address: usize, type_id: TypeId, ) -> Result<(), E> { match self.shared.entry(address) { hash_map::Entry::Vacant(_) => fail!(NotStarted), hash_map::Entry::Occupied(mut occupied) => { let (previous_type_id, finished) = occupied.get_mut(); if previous_type_id != &type_id { fail!(TypeMismatch { previous: *previous_type_id, current: type_id, }); } else if *finished { fail!(AlreadyFinished); } else { *finished = true; Ok(()) } } } } } rkyv-0.8.9/src/vec.rs000064400000000000000000000207621046102023000125560ustar 00000000000000//! An archived version of `Vec`. use core::{ borrow::Borrow, cmp, fmt, hash, ops::{Deref, Index}, slice::SliceIndex, }; use munge::munge; use rancor::Fallible; use crate::{ primitive::{ArchivedUsize, FixedUsize}, seal::Seal, ser::{Allocator, Writer, WriterExt as _}, Archive, Place, Portable, RelPtr, Serialize, SerializeUnsized, }; /// An archived [`Vec`]. /// /// This uses a [`RelPtr`] to a `[T]` under the hood. Unlike /// [`ArchivedString`](crate::string::ArchivedString), it does not have an /// inline representation. #[derive(Portable)] #[cfg_attr( feature = "bytecheck", derive(bytecheck::CheckBytes), bytecheck(verify) )] #[rkyv(crate)] #[repr(C)] pub struct ArchivedVec { ptr: RelPtr, len: ArchivedUsize, } impl ArchivedVec { /// Returns a pointer to the first element of the archived vec. pub fn as_ptr(&self) -> *const T { unsafe { self.ptr.as_ptr() } } /// Returns the number of elements in the archived vec. pub fn len(&self) -> usize { self.len.to_native() as usize } /// Returns whether the archived vec is empty. pub fn is_empty(&self) -> bool { self.len() == 0 } /// Gets the elements of the archived vec as a slice. pub fn as_slice(&self) -> &[T] { unsafe { core::slice::from_raw_parts(self.as_ptr(), self.len()) } } /// Gets the elements of the archived vec as a sealed mutable slice. pub fn as_slice_seal(this: Seal<'_, Self>) -> Seal<'_, [T]> { let len = this.len(); munge!(let Self { ptr, .. } = this); let slice = unsafe { core::slice::from_raw_parts_mut(RelPtr::as_mut_ptr(ptr), len) }; Seal::new(slice) } /// Resolves an archived `Vec` from a given slice. pub fn resolve_from_slice>( slice: &[U], resolver: VecResolver, out: Place, ) { Self::resolve_from_len(slice.len(), resolver, out); } /// Resolves an archived `Vec` from a given length. pub fn resolve_from_len( len: usize, resolver: VecResolver, out: Place, ) { munge!(let ArchivedVec { ptr, len: out_len } = out); RelPtr::emplace(resolver.pos as usize, ptr); usize::resolve(&len, (), out_len); } /// Serializes an archived `Vec` from a given slice. pub fn serialize_from_slice< U: Serialize, S: Fallible + Allocator + Writer + ?Sized, >( slice: &[U], serializer: &mut S, ) -> Result { Ok(VecResolver { pos: slice.serialize_unsized(serializer)? as FixedUsize, }) } /// Serializes an archived `Vec` from a given iterator. /// /// This method is unable to perform copy optimizations; prefer /// [`serialize_from_slice`](ArchivedVec::serialize_from_slice) when /// possible. pub fn serialize_from_iter( iter: I, serializer: &mut S, ) -> Result where U: Serialize, I: ExactSizeIterator + Clone, I::Item: Borrow, S: Fallible + Allocator + Writer + ?Sized, { use crate::util::SerVec; SerVec::with_capacity( serializer, iter.len(), |resolvers, serializer| { for value in iter.clone() { let resolver = value.borrow().serialize(serializer)?; resolvers.push(resolver); } let pos = serializer.align_for::()?; for (value, resolver) in iter.zip(resolvers.drain()) { unsafe { serializer.resolve_aligned(value.borrow(), resolver)?; } } Ok(VecResolver { pos: pos as FixedUsize, }) }, )? } /// Serializes an archived `Vec` from a given iterator. Compared to /// `serialize_from_iter()`, this function: /// - supports iterators whose length is not known in advance, and /// - does not collect the data in memory before serializing. /// /// This method will panic if any item writes during `serialize` (i.e no /// additional data written per item). pub fn serialize_from_unknown_length_iter( iter: &mut I, serializer: &mut S, ) -> Result where B: Serialize, I: Iterator, S: Fallible + Allocator + Writer + ?Sized, { unsafe { let pos = serializer.align_for::()?; for value in iter { let pos_cached = serializer.pos(); let resolver = value.serialize(serializer)?; assert!(serializer.pos() == pos_cached); serializer.resolve_aligned(value.borrow(), resolver)?; } Ok(VecResolver { pos: pos as FixedUsize, }) } } } impl AsRef<[T]> for ArchivedVec { fn as_ref(&self) -> &[T] { self.as_slice() } } impl Borrow<[T]> for ArchivedVec { fn borrow(&self) -> &[T] { self.as_slice() } } impl fmt::Debug for ArchivedVec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.as_slice()).finish() } } impl Deref for ArchivedVec { type Target = [T]; fn deref(&self) -> &Self::Target { self.as_slice() } } impl Eq for ArchivedVec {} impl hash::Hash for ArchivedVec { fn hash(&self, state: &mut H) { self.as_slice().hash(state) } } impl> Index for ArchivedVec { type Output = <[T] as Index>::Output; fn index(&self, index: I) -> &Self::Output { self.as_slice().index(index) } } impl Ord for ArchivedVec { fn cmp(&self, other: &Self) -> cmp::Ordering { self.as_slice().cmp(other.as_slice()) } } impl, U> PartialEq> for ArchivedVec { fn eq(&self, other: &ArchivedVec) -> bool { self.as_slice().eq(other.as_slice()) } } impl, U, const N: usize> PartialEq<[U; N]> for ArchivedVec { fn eq(&self, other: &[U; N]) -> bool { self.as_slice().eq(&other[..]) } } impl, U, const N: usize> PartialEq> for [U; N] { fn eq(&self, other: &ArchivedVec) -> bool { other.eq(self) } } impl, U> PartialEq<[U]> for ArchivedVec { fn eq(&self, other: &[U]) -> bool { self.as_slice().eq(other) } } impl, U> PartialEq> for [T] { fn eq(&self, other: &ArchivedVec) -> bool { self.eq(other.as_slice()) } } impl PartialOrd> for ArchivedVec { fn partial_cmp(&self, other: &ArchivedVec) -> Option { self.as_slice().partial_cmp(other.as_slice()) } } impl PartialOrd<[T]> for ArchivedVec { fn partial_cmp(&self, other: &[T]) -> Option { self.as_slice().partial_cmp(other) } } impl PartialOrd> for [T] { fn partial_cmp(&self, other: &ArchivedVec) -> Option { self.partial_cmp(other.as_slice()) } } /// The resolver for [`ArchivedVec`]. pub struct VecResolver { pos: FixedUsize, } impl VecResolver { /// Creates a new `VecResolver` from a position in the output buffer where /// the elements of the archived vector are stored. pub fn from_pos(pos: usize) -> Self { Self { pos: pos as FixedUsize, } } } #[cfg(feature = "bytecheck")] mod verify { use bytecheck::{ rancor::{Fallible, Source}, CheckBytes, Verify, }; use crate::{ validation::{ArchiveContext, ArchiveContextExt}, vec::ArchivedVec, }; unsafe impl Verify for ArchivedVec where T: CheckBytes, C: Fallible + ArchiveContext + ?Sized, C::Error: Source, { fn verify(&self, context: &mut C) -> Result<(), C::Error> { let ptr = core::ptr::slice_from_raw_parts( self.ptr.as_ptr_wrapping(), self.len.to_native() as usize, ); context.in_subtree(ptr, |context| unsafe { <[T]>::check_bytes(ptr, context) }) } } } rkyv-0.8.9/src/with.rs000064400000000000000000000443721046102023000127570ustar 00000000000000//! Wrapper type support and commonly used wrappers. //! //! Wrappers can be applied with the `#[rkyv(with = ..)]` attribute in the //! [`Archive`](macro@crate::Archive) macro. // mod impls; use core::{fmt, marker::PhantomData}; use rancor::Fallible; #[doc(inline)] pub use crate::niche::niching::DefaultNiche; use crate::{Archive, Deserialize, Place, Portable, Serialize}; /// A variant of [`Archive`] that works with wrappers. /// /// Creating a wrapper allows users to customize how fields are archived easily /// without changing the unarchived type. /// /// This trait allows wrapper types to transparently change the archive /// behaviors for struct and enum fields. When a field is serialized, it may use /// the implementations for the wrapper type and the given field instead of the /// implementation for the type itself. /// /// Only a single implementation of [`Archive`] may be written /// for each type, but multiple implementations of ArchiveWith can be written /// for the same type because it is parametric over the wrapper type. This is /// used with the `#[rkyv(with = ..)]` macro attribute to provide a more /// flexible interface for serialization. /// /// # Example /// /// ``` /// use rkyv::{ /// access_unchecked, deserialize, /// rancor::{Error, Fallible, Infallible, ResultExt as _}, /// to_bytes, /// with::{ArchiveWith, DeserializeWith, SerializeWith}, /// Archive, Archived, Deserialize, Place, Resolver, Serialize, /// }; /// /// struct Incremented; /// /// impl ArchiveWith for Incremented { /// type Archived = Archived; /// type Resolver = Resolver; /// /// fn resolve_with(field: &i32, _: (), out: Place) { /// let incremented = field + 1; /// incremented.resolve((), out); /// } /// } /// /// impl SerializeWith for Incremented /// where /// S: Fallible + ?Sized, /// i32: Serialize, /// { /// fn serialize_with( /// field: &i32, /// serializer: &mut S, /// ) -> Result { /// let incremented = field + 1; /// incremented.serialize(serializer) /// } /// } /// /// impl DeserializeWith, i32, D> for Incremented /// where /// D: Fallible + ?Sized, /// Archived: Deserialize, /// { /// fn deserialize_with( /// field: &Archived, /// deserializer: &mut D, /// ) -> Result { /// Ok(field.deserialize(deserializer)? - 1) /// } /// } /// /// #[derive(Archive, Deserialize, Serialize)] /// struct Example { /// #[rkyv(with = Incremented)] /// a: i32, /// // Another i32 field, but not incremented this time /// b: i32, /// } /// /// let value = Example { a: 4, b: 9 }; /// /// let buf = to_bytes::(&value).unwrap(); /// /// let archived = /// unsafe { access_unchecked::>(buf.as_ref()) }; /// // The wrapped field has been incremented /// assert_eq!(archived.a, 5); /// // ... and the unwrapped field has not /// assert_eq!(archived.b, 9); /// /// let deserialized = deserialize::(archived).always_ok(); /// // The wrapped field is back to normal /// assert_eq!(deserialized.a, 4); /// // ... and the unwrapped field is unchanged /// assert_eq!(deserialized.b, 9); /// ``` pub trait ArchiveWith { /// The archived type of `Self` with `F`. type Archived: Portable; /// The resolver of a `Self` with `F`. type Resolver; /// Resolves the archived type using a reference to the field type `F`. fn resolve_with( field: &F, resolver: Self::Resolver, out: Place, ); } /// A variant of `Serialize` for "with" types. /// /// See [ArchiveWith] for more details. pub trait SerializeWith: ArchiveWith { /// Serializes the field type `F` using the given serializer. fn serialize_with( field: &F, serializer: &mut S, ) -> Result; } /// A variant of `Deserialize` for "with" types. /// /// See [ArchiveWith] for more details. pub trait DeserializeWith { /// Deserializes the field type `F` using the given deserializer. fn deserialize_with(field: &F, deserializer: &mut D) -> Result; } /// A transparent wrapper which applies a "with" type. /// /// `With` wraps a reference to a type and applies the specified wrapper type /// when serializing and deserializing. #[repr(transparent)] pub struct With { _phantom: PhantomData, field: F, } impl With { /// Casts a `With` reference from a reference to the underlying field. pub fn cast(field: &F) -> &Self { // SAFETY: `With` is `repr(transparent)` and so a reference to `F` can // always be transmuted into a reference to `With`. unsafe { ::core::mem::transmute::<&F, &Self>(field) } } } impl> Archive for With { type Archived = >::Archived; type Resolver = >::Resolver; fn resolve(&self, resolver: Self::Resolver, out: Place) { W::resolve_with(&self.field, resolver, out); } } impl Serialize for With where S: Fallible + ?Sized, F: ?Sized, W: SerializeWith, { fn serialize( &self, serializer: &mut S, ) -> Result::Error> { W::serialize_with(&self.field, serializer) } } impl Deserialize for With where D: Fallible + ?Sized, F: ?Sized, W: DeserializeWith, { fn deserialize( &self, deserializer: &mut D, ) -> Result::Error> { W::deserialize_with(&self.field, deserializer) } } /// A wrapper that applies another wrapper to the values contained in a type. /// This can be applied to a vector to map each element, or an option to map any /// contained value. /// /// See [ArchiveWith] for more details. /// /// # Example /// /// ``` /// use rkyv::{ /// with::{InlineAsBox, Map}, /// Archive, /// }; /// /// #[derive(Archive)] /// struct Example<'a> { /// // This will apply `InlineAsBox` to the `&i32` contained in this option /// #[rkyv(with = Map)] /// option: Option<&'a i32>, /// // This will apply `InlineAsBox` to each `&i32` contained in this vector /// #[rkyv(with = Map)] /// vec: Vec<&'a i32>, /// } /// ``` pub struct Map { _phantom: PhantomData, } /// A wrapper that applies key and value wrappers to the key-value pairs /// contained in a type. This can be applied to a hash map or B-tree map to map /// the key-value pairs. /// /// # Example /// ``` /// use std::collections::HashMap; /// /// use rkyv::{ /// with::{Inline, InlineAsBox, MapKV}, /// Archive, /// }; /// /// #[derive(Archive)] /// struct Example<'a> { /// // This will apply `InlineAsBox` to the `&str` key, and `Inline` to the /// // `&str` value. /// #[rkyv(with = MapKV)] /// hash_map: HashMap<&'a str, &'a str>, /// } /// ``` pub struct MapKV { _phantom: PhantomData<(K, V)>, } /// A type indicating relaxed atomic loads. pub struct Relaxed; /// A type indicating acquire atomic loads. pub struct Acquire; /// A type indicating sequentially-consistent atomic loads. pub struct SeqCst; /// A wrapper that archives an atomic by loading its value with a particular /// ordering. /// /// When serializing, the specified ordering will be used to load the value from /// the source atomic. The underlying archived type is still a non-atomic value. /// /// # Example /// /// ``` /// # #[cfg(target_has_atomic = "32")] /// use core::sync::atomic::AtomicU32; /// /// use rkyv::{ /// with::{AtomicLoad, Relaxed}, /// Archive, /// }; /// /// # #[cfg(target_has_atomic = "32")] /// #[derive(Archive)] /// struct Example { /// #[rkyv(with = AtomicLoad)] /// a: AtomicU32, /// } /// ``` #[derive(Debug)] pub struct AtomicLoad { _phantom: PhantomData, } /// A wrapper that serializes a reference inline. /// /// References serialized with `Inline` cannot be deserialized because the /// struct cannot own the deserialized value. /// /// # Example /// /// ``` /// use rkyv::{with::Inline, Archive}; /// /// #[derive(Archive)] /// struct Example<'a> { /// #[rkyv(with = Inline)] /// a: &'a i32, /// } /// ``` #[derive(Debug)] pub struct Inline; /// A wrapper that serializes a field into a box. /// /// This functions similarly to [`InlineAsBox`], but is for regular fields /// instead of references. /// /// # Example /// /// ``` /// use rkyv::{with::AsBox, Archive}; /// /// #[derive(Archive)] /// struct Example { /// #[rkyv(with = AsBox)] /// a: i32, /// #[rkyv(with = AsBox)] /// b: str, /// } /// ``` #[derive(Debug)] pub struct AsBox; /// A wrapper that serializes a reference as if it were boxed. /// /// Unlike [`Inline`], unsized references can be serialized with `InlineAsBox`. /// /// References serialized with `InlineAsBox` cannot be deserialized because the /// struct cannot own the deserialized value. /// /// # Example /// /// ``` /// use rkyv::{with::InlineAsBox, Archive}; /// /// #[derive(Archive)] /// struct Example<'a> { /// #[rkyv(with = InlineAsBox)] /// a: &'a i32, /// #[rkyv(with = InlineAsBox)] /// b: &'a str, /// } /// ``` #[derive(Debug)] pub struct InlineAsBox; /// A wrapper that attempts to convert a type to and from UTF-8. /// /// Types like `OsString` and `PathBuf` aren't guaranteed to be encoded as /// UTF-8, but they usually are anyway. Using this wrapper will archive them as /// if they were regular `String`s. /// /// # Example /// /// ``` /// use std::{ffi::OsString, path::PathBuf}; /// /// use rkyv::{with::AsString, Archive}; /// /// #[derive(Archive)] /// struct Example { /// #[rkyv(with = AsString)] /// os_string: OsString, /// #[rkyv(with = AsString)] /// path: PathBuf, /// } /// ``` #[derive(Debug)] pub struct AsString; /// A wrapper that locks a lock and serializes the value immutably. /// /// This wrapper can panic under very specific circumstances when: /// /// 1. `serialize_with` is called and succeeds in locking the value to serialize /// it. /// 2. Another thread locks the value and panics, poisoning the lock /// 3. `resolve_with` is called and gets a poisoned value. /// /// Unfortunately, it's not possible to work around this issue internally. Users /// must ensure this doesn't happen on their own through manual synchronization /// or guaranteeing that panics do not occur while holding locks. /// /// # Example /// /// ``` /// use std::sync::Mutex; /// /// use rkyv::{with::Lock, Archive}; /// /// #[derive(Archive)] /// struct Example { /// #[rkyv(with = Lock)] /// a: Mutex, /// } /// ``` #[derive(Debug)] pub struct Lock; /// A wrapper that serializes a `Cow` as if it were owned. /// /// # Example /// /// ``` /// use std::borrow::Cow; /// /// use rkyv::{with::AsOwned, Archive}; /// /// #[derive(Archive)] /// struct Example<'a> { /// #[rkyv(with = AsOwned)] /// a: Cow<'a, str>, /// } /// ``` #[derive(Debug)] pub struct AsOwned; /// A wrapper that serializes associative containers as a `Vec` of key-value /// pairs. /// /// This provides faster serialization for containers like `HashMap` and /// `BTreeMap` by serializing the key-value pairs directly instead of building a /// data structure in the buffer. /// /// # Example /// /// ``` /// use std::collections::HashMap; /// /// use rkyv::{with::AsVec, Archive}; /// /// #[derive(Archive)] /// struct Example { /// #[rkyv(with = AsVec)] /// values: HashMap, /// } /// ``` #[derive(Debug)] pub struct AsVec; /// A wrapper that niches some type combinations. /// /// A common type combination is `Option>`. By using a null pointer, the /// archived version can save some space on-disk. /// /// # Example /// /// ``` /// use core::mem::size_of; /// /// use rkyv::{with::Niche, Archive, Archived}; /// /// #[derive(Archive)] /// struct BasicExample { /// value: Option>, /// } /// /// #[derive(Archive)] /// struct NichedExample { /// #[rkyv(with = Niche)] /// value: Option>, /// } /// /// assert!( /// size_of::>() /// > size_of::>() /// ); /// ``` #[derive(Debug)] pub struct Niche; /// A wrapper that niches based on a generic [`Niching`]. /// /// A common type combination is `Option>`. By niching `None` into the /// null pointer, the archived version can save some space on-disk. /// /// # Example /// /// ``` /// use core::mem::size_of; /// /// use rkyv::{ /// niche::niching::{NaN, Null}, /// with::NicheInto, /// Archive, Archived, /// }; /// /// #[derive(Archive)] /// struct BasicExample { /// maybe_box: Option>, /// maybe_non_nan: Option, /// } /// /// #[derive(Archive)] /// struct NichedExample { /// #[rkyv(with = NicheInto)] /// maybe_box: Option>, /// #[rkyv(with = NicheInto)] /// maybe_non_nan: Option, /// } /// /// assert!( /// size_of::>() /// > size_of::>() /// ); /// ``` /// /// [`Niching`]: crate::niche::niching::Niching pub struct NicheInto(PhantomData); impl Default for NicheInto { fn default() -> Self { Self(PhantomData) } } impl fmt::Debug for NicheInto { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("NicheInto") } } /// A wrapper that first applies another wrapper `W` to the value inside an /// `Option` and then niches the result based on the [`Niching`] `N`. /// /// # Example /// /// ``` /// use rkyv::{ /// with::{AsBox, MapNiche}, /// Archive, Serialize, /// }; /// /// #[derive(Archive, Serialize)] /// struct BasicExample { /// option: Option, /// } /// /// #[derive(Archive, Serialize)] /// struct NichedExample { /// #[rkyv(with = MapNiche)] /// option: Option, /// } /// /// #[derive(Archive, Serialize)] /// struct HugeType([u8; 1024]); /// /// # fn main() -> Result<(), rkyv::rancor::Error> { /// let basic_value = BasicExample { option: None }; /// let basic_bytes = rkyv::to_bytes(&basic_value)?; /// assert_eq!(basic_bytes.len(), 1 + 1024); /// /// let niched_value = NichedExample { option: None }; /// let niched_bytes = rkyv::to_bytes(&niched_value)?; /// assert_eq!(niched_bytes.len(), 4); // size_of::>() /// # Ok(()) } /// ``` /// /// [`Niching`]: crate::niche::niching::Niching pub struct MapNiche { _map: PhantomData, _niching: PhantomData, } impl Default for MapNiche { fn default() -> Self { Self { _map: PhantomData, _niching: PhantomData, } } } impl fmt::Debug for MapNiche { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("MapNiche") } } /// A wrapper that converts a [`SystemTime`](std::time::SystemTime) to a /// [`Duration`](std::time::Duration) since /// [`UNIX_EPOCH`](std::time::UNIX_EPOCH). /// /// If the serialized time occurs before the UNIX epoch, serialization will /// panic during `resolve`. The resulting archived time will be an /// [`ArchivedDuration`](crate::time::ArchivedDuration) relative to the UNIX /// epoch. /// /// # Example /// /// ``` /// use rkyv::{Archive, with::AsUnixTime}; /// use std::time::SystemTime; /// /// #[derive(Archive)] /// struct Example { /// #[rkyv(with = AsUnixTime)] /// time: SystemTime, /// } #[derive(Debug)] pub struct AsUnixTime; /// A wrapper that allows serialize-unsafe types to be serialized. /// /// Types like `Cell` and `UnsafeCell` may contain serializable types, but have /// unsafe access semantics due to interior mutability. They may be safe to /// serialize, but only under conditions that rkyv is unable to guarantee. /// /// This wrapper enables serializing these types, and places the burden of /// verifying that their access semantics are used safely on the user. /// /// # Safety /// /// Using this wrapper on types with interior mutability can create races /// conditions or allow access to data in an invalid state if access semantics /// are not followed properly. During serialization, the data must not be /// modified. /// /// # Example /// /// ``` /// use core::cell::{Cell, UnsafeCell}; /// /// use rkyv::{with::Unsafe, Archive}; /// /// #[derive(Archive)] /// struct Example { /// #[rkyv(with = Unsafe)] /// cell: Cell, /// #[rkyv(with = Unsafe)] /// unsafe_cell: UnsafeCell, /// } /// ``` #[derive(Debug)] pub struct Unsafe; /// A wrapper that skips serializing a field. /// /// Skipped fields must implement `Default` to be deserialized. /// /// # Example /// /// ``` /// use rkyv::{with::Skip, Archive}; /// /// #[derive(Archive)] /// struct Example { /// #[rkyv(with = Skip)] /// a: u32, /// } /// ``` #[derive(Debug)] pub struct Skip; /// A wrapper that clones the contents of `Arc` and `Rc` pointers. #[derive(Debug)] pub struct Unshare; /// A no-op wrapper which uses the default impls for the type. /// /// This is most useful for wrappers like [`MapKV`] when you only want to apply /// a wrapper to either the key or the value. /// /// # Example /// /// ``` /// use std::collections::HashMap; /// /// use rkyv::{ /// with::{Identity, Inline, MapKV}, /// Archive, /// }; /// /// #[derive(Archive)] /// struct Example<'a> { /// #[rkyv(with = MapKV)] /// a: HashMap, /// } /// ``` #[derive(Debug)] pub struct Identity; rkyv-0.8.9/tests/derive.rs000064400000000000000000000276631046102023000136410ustar 00000000000000use std::{fmt::Debug, marker::PhantomData, mem::MaybeUninit}; use rancor::{Fallible, Panic, ResultExt, Source, Strategy}; use rkyv::{ api::low::LowSerializer, ser::{allocator::SubAllocator, writer::Buffer, Writer}, with::{ArchiveWith, DeserializeWith, Map, SerializeWith}, Archive, Archived, Deserialize, Place, Resolver, Serialize, }; type ArchivedWith = >::Archived; fn roundtrip(remote: &T) where F: ArchiveWith + for<'a, 'b> SerializeWith> + DeserializeWith, T, Strategy<(), Panic>>, T: Debug + PartialEq, { let mut bytes = [0_u8; 128]; let buf = serialize::(remote, &mut bytes); let archived = access::(&buf); let deserialized: T = F::deserialize_with(archived, Strategy::wrap(&mut ())).always_ok(); assert_eq!(remote, &deserialized); } #[test] fn named_struct() { #[derive(Debug, PartialEq)] struct Remote<'a, A> { a: u8, b: PhantomData<&'a A>, c: Option, } #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = Remote<'a, A>)] struct Example<'a, A> { a: u8, #[rkyv(with = Identity)] b: PhantomData<&'a A>, #[rkyv(with = Map)] c: Option, } impl<'a, A> From> for Remote<'a, A> { fn from(value: Example<'a, A>) -> Self { Remote { a: value.a, b: value.b, c: value.c, } } } #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = Remote<'a, A>)] struct Partial<'a, A> { b: PhantomData<&'a A>, #[rkyv(with = Map)] c: Option, } impl<'a, A> From> for Remote<'a, A> { fn from(archived: Partial<'a, A>) -> Self { Self { a: 42, b: archived.b, c: archived.c, } } } let remote = Remote { a: 42, b: PhantomData, c: Some(Foo::default()), }; roundtrip::, _>(&remote); roundtrip::, _>(&remote); } #[test] fn unnamed_struct() { #[derive(Debug, PartialEq)] struct Remote<'a, A>(u8, PhantomData<&'a A>, Option); #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = Remote::<'a, A>)] struct Example<'a, A>( u8, #[rkyv(with = Identity)] PhantomData<&'a A>, #[rkyv(with = Map)] Option, ); impl<'a, A> From> for Remote<'a, A> { fn from(value: Example<'a, A>) -> Self { Remote(value.0, value.1, value.2) } } #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = Remote::<'a, A>)] struct Partial<'a, A>( u8, #[rkyv(with = Identity)] PhantomData<&'a A>, // Only trailing fields may be omitted for unnamed structs ); impl<'a, A> From> for Remote<'a, A> { fn from(archived: Partial<'a, A>) -> Self { Remote(archived.0, archived.1, Some(Foo::default())) } } let remote = Remote(42, PhantomData, Some(Foo::default())); roundtrip::, _>(&remote); roundtrip::, _>(&remote); } #[test] fn unit_struct() { #[derive(Debug, PartialEq)] struct Remote; #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = Remote)] struct Example; impl From for Remote { fn from(_: Example) -> Self { Self } } let remote = Remote; roundtrip::(&remote); } #[test] fn full_enum() { #[derive(Debug, PartialEq)] enum Remote<'a, A> { A, B(u8), C { a: PhantomData<&'a A>, b: Option, }, } #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = Remote::<'a, A>)] enum Example<'a, A> { A, B(u8), C { #[rkyv(with = Identity)] a: PhantomData<&'a A>, #[rkyv(with = Map)] b: Option, }, } impl<'a, A> From> for Remote<'a, A> { fn from(value: Example<'a, A>) -> Self { match value { Example::A => Remote::A, Example::B(value) => Remote::B(value), Example::C { a, b } => Remote::C { a, b }, } } } #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = Remote::<'a, A>)] // If a variant is missing (or the remote type is `#[non_exhaustive]`), one // *unit* variant must be denoted with `#[rkyv(other)]`. enum Partial<'a, A> { A, C { a: PhantomData<&'a A>, }, #[rkyv(other)] Other, } impl<'a, A> From> for Remote<'a, A> { fn from(archived: Partial<'a, A>) -> Self { match archived { Partial::A => Remote::A, Partial::C { a } => Remote::C { a, b: Some(Foo::default()), }, Partial::Other => Remote::B(42), } } } for remote in [ Remote::A, Remote::B(42), Remote::C { a: PhantomData, b: Some(Foo::default()), }, ] { roundtrip::, _>(&remote); roundtrip::, _>(&remote); } } #[test] fn named_struct_private() { mod remote { #[derive(Copy, Clone, Debug, Default, PartialEq)] pub struct Remote { inner: [u8; 4], } impl Remote { pub fn new(inner: [u8; 4]) -> Self { Self { inner } } pub fn inner(&self) -> [u8; 4] { self.inner } pub fn inner_ref(&self) -> &[u8; 4] { &self.inner } } } #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = remote::Remote)] struct ExampleByRef { #[rkyv(getter = remote::Remote::inner)] inner: [u8; 4], } impl From for remote::Remote { fn from(value: ExampleByRef) -> Self { remote::Remote::new(value.inner) } } #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = remote::Remote)] struct ExampleThroughRef { #[rkyv(getter = remote::Remote::inner_ref)] inner: [u8; 4], } impl From for remote::Remote { fn from(value: ExampleThroughRef) -> Self { remote::Remote::new(value.inner) } } let remote = remote::Remote::default(); roundtrip::(&remote); roundtrip::(&remote); } #[test] fn unnamed_struct_private() { mod remote { #[derive(Copy, Clone, Debug, Default, PartialEq)] pub struct Remote([u8; 4]); impl Remote { pub fn new(inner: [u8; 4]) -> Self { Self(inner) } pub fn inner(&self) -> [u8; 4] { self.0 } pub fn inner_ref(&self) -> &[u8; 4] { &self.0 } } } #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = remote::Remote)] struct ExampleByRef(#[rkyv(getter = remote::Remote::inner)] [u8; 4]); impl From for remote::Remote { fn from(value: ExampleByRef) -> Self { remote::Remote::new(value.0) } } #[derive(Archive, Serialize, Deserialize)] #[rkyv(remote = remote::Remote)] struct ExampleThroughRef( #[rkyv(getter = remote::Remote::inner_ref)] [u8; 4], ); impl From for remote::Remote { fn from(value: ExampleThroughRef) -> Self { remote::Remote::new(value.0) } } let remote = remote::Remote::default(); roundtrip::(&remote); roundtrip::(&remote); } #[cfg(feature = "bytecheck")] pub trait CheckedArchived: for<'a> rkyv::bytecheck::CheckBytes> { } #[cfg(feature = "bytecheck")] impl< Archived: for<'a> rkyv::bytecheck::CheckBytes< rkyv::api::low::LowValidator<'a, Panic>, >, > CheckedArchived for Archived { } #[cfg(not(feature = "bytecheck"))] pub trait CheckedArchived {} #[cfg(not(feature = "bytecheck"))] impl CheckedArchived for Archived {} type Serializer<'a, 'b> = LowSerializer, SubAllocator<'a>, Panic>; fn serialize<'buf, F, T>(remote: &T, buf: &'buf mut [u8; 128]) -> Buffer<'buf> where F: for<'a, 'b> SerializeWith>, { struct Wrap<'a, F, T>(&'a T, PhantomData); impl Archive for Wrap<'_, F, T> where F: ArchiveWith, { type Archived = >::Archived; type Resolver = >::Resolver; fn resolve( &self, resolver: Self::Resolver, out: Place, ) { F::resolve_with(self.0, resolver, out) } } impl<'a, 'b, F, T> Serialize> for Wrap<'_, F, T> where F: SerializeWith>, { fn serialize( &self, serializer: &mut Serializer<'a, 'b>, ) -> Result { F::serialize_with(self.0, serializer) } } let wrap = Wrap(remote, PhantomData::); let writer = Buffer::from(buf); let mut scratch = [MaybeUninit::uninit(); 128]; let alloc = SubAllocator::new(&mut scratch); rkyv::api::low::to_bytes_in_with_alloc::<_, _, Panic>(&wrap, writer, alloc) .always_ok() } fn access(bytes: &[u8]) -> &>::Archived where F: ArchiveWith, { #[cfg(feature = "bytecheck")] { rkyv::api::low::access::<>::Archived, Panic>(bytes) .always_ok() } #[cfg(not(feature = "bytecheck"))] unsafe { rkyv::access_unchecked::<>::Archived>(bytes) } } #[derive(Debug, PartialEq)] struct Foo([u8; 4]); impl Default for Foo { fn default() -> Self { Self([2, 3, 5, 7]) } } struct FooWrap; impl ArchiveWith for FooWrap { type Archived = Archived<[u8; 4]>; type Resolver = Resolver<[u8; 4]>; fn resolve_with( field: &Foo, resolver: Self::Resolver, out: Place, ) { field.0.resolve(resolver, out); } } impl SerializeWith for FooWrap where S: Fallible + Writer + ?Sized, { fn serialize_with( field: &Foo, serializer: &mut S, ) -> Result { field.0.serialize(serializer) } } impl DeserializeWith, Foo, D> for FooWrap { fn deserialize_with( archived: &Archived<[u8; 4]>, deserializer: &mut D, ) -> Result { archived.deserialize(deserializer).map(Foo) } } struct Identity; impl ArchiveWith for Identity { type Archived = Archived; type Resolver = Resolver; fn resolve_with( this: &T, resolver: Self::Resolver, out: Place, ) { this.resolve(resolver, out); } } impl> SerializeWith for Identity { fn serialize_with( this: &T, serializer: &mut S, ) -> Result::Error> { this.serialize(serializer) } } impl DeserializeWith, T, D> for Identity where D: Fallible + ?Sized, T: Archive, Archived: Deserialize, { fn deserialize_with( archived: &Archived, deserializer: &mut D, ) -> Result::Error> { archived.deserialize(deserializer) } } rkyv-0.8.9/tests/ui/derive_visibility.rs000064400000000000000000000014051046102023000165070ustar 00000000000000mod inner { use rkyv::{Archive, Serialize}; #[derive(Archive, Serialize)] pub struct TestTuple(pub i32); #[derive(Archive, Serialize)] pub struct TestStruct { pub value: i32, } #[derive(Archive, Serialize)] pub enum TestEnum { B(i32), C { value: i32 }, } } use inner::{ ArchivedTestEnum, ArchivedTestStruct, ArchivedTestTuple, TestEnum, TestStruct, TestTuple, }; fn main() { TestTuple(42.into()); ArchivedTestTuple(42.into()); TestStruct { value: 42.into() }; ArchivedTestStruct { value: 42.into() }; TestEnum::B(42.into()); TestEnum::C { value: 42.into() }; ArchivedTestEnum::B(42.into()); ArchivedTestEnum::C { value: 42.into() }; } rkyv-0.8.9/tests/ui/raw_identifiers.rs000064400000000000000000000006211046102023000161370ustar 00000000000000#![allow(non_camel_case_types)] use rkyv::{Archive, Deserialize, Serialize}; #[derive(Archive, Deserialize, Serialize, Debug, PartialEq)] #[rkyv(compare(PartialEq), derive(Debug))] struct r#virtual { r#virtual: i32, } #[derive(Archive, Deserialize, Serialize, Debug, PartialEq)] #[rkyv(compare(PartialEq), derive(Debug))] enum r#try { r#try { r#try: i32 }, } fn main() {} rkyv-0.8.9/tests/ui/the_most_unhelpful_error.rs000064400000000000000000000010251046102023000200750ustar 00000000000000use rancor::{Failure, Strategy}; use rkyv::{access_unchecked, Archive, Serialize, Deserialize}; pub trait MyTrait {} struct Serializer; impl MyTrait for Serializer {} struct NotSerializer; #[derive(Archive, Serialize, Deserialize)] #[rkyv(deserialize_bounds(__D: MyTrait))] pub struct MyStruct; fn main() { let bytes = &[]; let archived = unsafe { access_unchecked::(bytes) }; let state = archived.deserialize(Strategy::<_, Failure>::wrap(&mut NotSerializer)); } rkyv-0.8.9/tests/ui/the_most_unhelpful_error.stderr000064400000000000000000000017021046102023000207560ustar 00000000000000error[E0277]: the trait bound `Strategy: MyTrait` is not satisfied --> tests/ui/the_most_unhelpful_error.rs:21:26 | 21 | let state = archived.deserialize(Strategy::<_, Failure>::wrap(&mut NotSerializer)); | ^^^^^^^^^^^ the trait `MyTrait` is not implemented for `Strategy` | = help: the trait `MyTrait` is implemented for `Serializer` note: required for `ArchivedMyStruct` to implement `Deserialize>` --> tests/ui/the_most_unhelpful_error.rs:12:30 | 12 | #[derive(Archive, Serialize, Deserialize)] | ^^^^^^^^^^^ 13 | #[rkyv(deserialize_bounds(__D: MyTrait))] | ------- unsatisfied trait bound introduced in this `derive` macro = note: this error originates in the derive macro `Deserialize` (in Nightly builds, run with -Z macro-backtrace for more info) rkyv-0.8.9/tests/ui.rs000064400000000000000000000004261046102023000127640ustar 00000000000000#[rustversion::attr(not(nightly), ignore)] #[test] #[cfg(not(miri))] fn ui() { let t = trybuild::TestCases::new(); t.pass("tests/ui/derive_visibility.rs"); t.pass("tests/ui/raw_identifiers.rs"); t.compile_fail("tests/ui/the_most_unhelpful_error.rs"); }