ogg-0.9.0/.cargo_vcs_info.json0000644000000001360000000000100116160ustar { "git": { "sha1": "0910d8d57645eccc1a1400731fefef376859c661" }, "path_in_vcs": "" }ogg-0.9.0/.editorconfig000064400000000000000000000002441046102023000130630ustar 00000000000000# top-most EditorConfig file root = true [*] indent_style = tab tab_width = 4 end_of_line=lf charset=utf-8 trim_trailing_whitespace=true insert_final_newline=true ogg-0.9.0/.github/workflows/ogg.yml000064400000000000000000000020061046102023000153000ustar 00000000000000name: ogg on: [push, pull_request] jobs: build: strategy: matrix: os: [macOS-latest, ubuntu-latest, windows-latest] toolchain: [stable, beta, 1.56.1] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@master - name: Install Rust uses: actions-rs/toolchain@v1 with: toolchain: ${{ matrix.toolchain }} override: true - name: Run no-default-features builds if: matrix.toolchain != '1.56.1' run: | cargo test --verbose --no-default-features cargo doc --verbose --no-default-features - name: Run no-default-features builds (forbidding warnings) if: matrix.toolchain == '1.56.1' env: RUSTFLAGS: -D warnings run: | cargo test --verbose --no-default-features cargo doc --verbose --no-default-features - name: Run all-features builds if: matrix.toolchain != '1.56.1' run: | cargo test --verbose --all-features cargo doc --verbose --all-features ogg-0.9.0/.gitignore000064400000000000000000000000221046102023000123700ustar 00000000000000target Cargo.lock ogg-0.9.0/Cargo.lock0000644000000277540000000000100076100ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "byteorder" version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "futures-core" version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" [[package]] name = "futures-io" version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" [[package]] name = "futures-macro" version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "futures-sink" version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" [[package]] name = "futures-task" version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" [[package]] name = "futures-util" version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ "futures-core", "futures-macro", "futures-task", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "getrandom" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "hermit-abi" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] [[package]] name = "libc" version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "lock_api" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", ] [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "mio" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", "wasi", "windows-sys", ] [[package]] name = "num_cpus" version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", ] [[package]] name = "ogg" version = "0.9.0" dependencies = [ "byteorder", "bytes", "futures-core", "futures-io", "futures-util", "pin-project", "rand", "tokio", "tokio-util", ] [[package]] name = "once_cell" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" [[package]] name = "parking_lot" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-sys", ] [[package]] name = "pin-project" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "pin-project-lite" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "ppv-lite86" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro2" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ "getrandom", ] [[package]] name = "redox_syscall" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" dependencies = [ "bitflags", ] [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "signal-hook-registry" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" [[package]] name = "smallvec" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" [[package]] name = "socket2" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" dependencies = [ "libc", "winapi", ] [[package]] name = "syn" version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tokio" version = "1.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" dependencies = [ "bytes", "libc", "memchr", "mio", "num_cpus", "once_cell", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", "winapi", ] [[package]] name = "tokio-macros" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tokio-util" version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", "log", "pin-project-lite", "tokio", ] [[package]] name = "unicode-ident" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_i686_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_x86_64_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" ogg-0.9.0/Cargo.toml0000644000000032410000000000100076140ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.56.0" name = "ogg" version = "0.9.0" authors = ["est31 "] description = "Ogg container decoder and encoder written in pure Rust" documentation = "https://docs.rs/ogg/0.8.0" readme = "README.md" keywords = [ "ogg", "decoder", "encoder", "xiph", ] license = "BSD-3-Clause" repository = "https://github.com/RustAudio/ogg" resolver = "2" [package.metadata.docs.rs] all-features = true rustdoc-args = [ "--cfg", "docsrs", ] [lib] name = "ogg" [dependencies.byteorder] version = "1.0" [dependencies.bytes] version = "1" optional = true [dependencies.futures-core] version = "0.3" optional = true [dependencies.futures-io] version = "0.3" optional = true [dependencies.pin-project] version = "1" optional = true [dependencies.tokio] version = "1" optional = true [dependencies.tokio-util] version = "0.6" features = [ "codec", "compat", ] optional = true [dev-dependencies.futures-util] version = "0.3" [dev-dependencies.rand] version = "0.8" [dev-dependencies.tokio] version = "1" features = ["full"] [features] async = [ "futures-core", "futures-io", "tokio", "tokio-util", "bytes", "pin-project", ] ogg-0.9.0/Cargo.toml.orig000064400000000000000000000020151046102023000132730ustar 00000000000000[package] name = "ogg" edition = "2021" version = "0.9.0" authors = ["est31 "] description = "Ogg container decoder and encoder written in pure Rust" license = "BSD-3-Clause" keywords = ["ogg", "decoder", "encoder", "xiph"] documentation = "https://docs.rs/ogg/0.8.0" repository = "https://github.com/RustAudio/ogg" readme = "README.md" rust-version = "1.56.0" [lib] name = "ogg" [dependencies] byteorder = "1.0" futures-core = { version = "0.3", optional = true } futures-io = { version = "0.3", optional = true } tokio = { version = "1", optional = true } tokio-util = { version = "0.6", features = ["codec", "compat"], optional = true } bytes = { version = "1", optional = true } pin-project = { version = "1", optional = true } [dev-dependencies] rand = "0.8" tokio = { version = "1", features = ["full"] } futures-util = "0.3" [features] async = ["futures-core", "futures-io", "tokio", "tokio-util", "bytes", "pin-project"] [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] ogg-0.9.0/LICENSE000064400000000000000000000033131046102023000114130ustar 00000000000000NOTE: The full list of contributors can be obtained by looking at the VCS log (originally, this crate was git versioned, there you can do "git shortlog -sn" for this task). License text ------------ Copyright (c) 2016-2017 est31 and contributors Copyright (c) 2002-2015 Xiph.org Foundation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of the Xiph.org Foundation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ogg-0.9.0/README.md000064400000000000000000000016221046102023000116660ustar 00000000000000## Ogg [![docs.rs documentation](https://img.shields.io/docsrs/ogg?label=docs.rs)](https://docs.rs/ogg/latest) An Ogg decoder and encoder. Implements the [xiph.org Ogg spec](https://www.xiph.org/vorbis/doc/framing.html) in pure Rust. If the `async` feature is disabled, Version 1.56.1 of Rust is the minimum supported one. Note: `.ogg` files are vorbis encoded audio files embedded into an Ogg transport stream. There is no extra support for vorbis codec decoding or encoding in this crate, so you need additional functionality in order to decode them. For example, you can use the [lewton](https://github.com/RustAudio/lewton) crate. Also note that the encoder part of the Crate isn't as well tested as the decoder part, in fact it was only written in order to write compact testing code for the decoder. ## License Licensed under the three clause BSD license. For details, see the [LICENSE](LICENSE) file. ogg-0.9.0/examples/dump-all.rs000064400000000000000000000046741046102023000143200ustar 00000000000000// Ogg decoder and encoder written in Rust // // Copyright (c) 2018 est31 // and contributors. All rights reserved. // Licensed under MIT license, or Apache 2 license, // at your option. Please see the LICENSE file // attached to this source distribution for details. extern crate ogg; use std::env; use ogg::{PacketReader, Packet}; use std::fs::File; fn main() { match run() { Ok(_) =>(), Err(err) => println!("Error: {}", err), } } #[allow(dead_code)] fn print_u8_slice(arr :&[u8]) { if arr.len() <= 4 { for a in arr { print!("0x{:02x} ", a); } println!(); return; } println!("["); let mut i :usize = 0; while i * 4 < arr.len() - 4 { println!("\t0x{:02x}, 0x{:02x}, 0x{:02x}, 0x{:02x},", arr[i * 4], arr[i * 4 + 1], arr[i * 4 + 2], arr[i * 4 + 3]); i += 1; } match arr.len() as i64 - i as i64 * 4 { 1 => println!("\t0x{:02x}];", arr[i * 4]), 2 => println!("\t0x{:02x}, 0x{:02x}];", arr[i * 4], arr[i * 4 + 1]), 3 => println!("\t0x{:02x}, 0x{:02x}, 0x{:02x}];", arr[i * 4], arr[i * 4 + 1], arr[i * 4 + 2]), 4 => println!("\t0x{:02x}, 0x{:02x}, 0x{:02x}, 0x{:02x}];", arr[i * 4], arr[i * 4 + 1], arr[i * 4 + 2], arr[i * 4 + 3]), de => panic!("impossible value {}", de), } } fn dump_pck_info(p :&Packet, ctr :usize) { println!("Packet: serial 0x{:08x}, data {:08} large, first {: >5}, last {: >5}, absgp 0x{:016x} nth {}", p.stream_serial(), p.data.len(), p.first_in_page(), p.last_in_page(), p.absgp_page(), ctr); print_u8_slice(&p.data); } fn run() -> Result<(), std::io::Error> { let file_path = env::args().nth(1).expect("No arg found. Please specify a file to open."); println!("Opening file: {}", file_path); let mut f = File::open(file_path)?; let mut pck_rdr = PacketReader::new(&mut f); let mut ctr = 0; loop { let r = pck_rdr.read_packet(); match r { Ok(Some(p)) => { dump_pck_info(&p, ctr); // Why do we not check p.last_packet here, and break the loop if false? // Well, first, this is only an example. // Second, the codecs may end streams in the middle of the file, // while still continuing other streams. // Therefore, don't do a probably too-early break. // Applications which know the codec may know after which // ended stream to stop decoding the file and thus not // encounter an error. }, // End of stream Ok(None) => break, Err(e) => { println!("Encountered Error: {:?}", e); break; } } ctr+=1; } Ok(()) } ogg-0.9.0/examples/format-info.rs000064400000000000000000000037111046102023000150150ustar 00000000000000// Ogg decoder and encoder written in Rust // // Copyright (c) 2016 est31 // and contributors. All rights reserved. // Licensed under MIT license, or Apache 2 license, // at your option. Please see the LICENSE file // attached to this source distribution for details. extern crate ogg; use std::env; use ogg::{PacketReader, Packet}; use std::fs::File; use std::time::Instant; fn main() { match run() { Ok(_) =>(), Err(err) => println!("Error: {}", err), } } fn dump_pck_info(p :&Packet) { println!("Packet: serial 0x{:08x}, data {:08} large, first {: >5}, last {: >5}, absgp 0x{:016x}", p.stream_serial(), p.data.len(), p.first_in_page(), p.last_in_page(), p.absgp_page()); } fn run() -> Result<(), std::io::Error> { let file_path = env::args().nth(1).expect("No arg found. Please specify a file to open."); println!("Opening file: {}", file_path); let mut f = File::open(file_path)?; let mut pck_rdr = PacketReader::new(&mut f); let mut byte_ctr :u64 = 0; let begin = Instant::now(); loop { let r = pck_rdr.read_packet(); match r { Ok(Some(p)) => { byte_ctr += p.data.len() as u64; dump_pck_info(&p); let elapsed = begin.elapsed(); let elapsed_ms = 1000.0 * elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1_000_000.0; println!("speed: {:.3} kb per ms ({} read)", byte_ctr as f64 / elapsed_ms / 1000.0, byte_ctr); // Why do we not check p.last_packet here, and break the loop if false? // Well, first, this is only an example. // Second, the codecs may end streams in the middle of the file, // while still continuing other streams. // Therefore, don't do a probably too-early break. // Applications which know the codec may know after which // ended stream to stop decoding the file and thus not // encounter an error. }, // End of stream Ok(None) => break, Err(e) => { println!("Encountered Error: {:?}", e); break; } } } Ok(()) } ogg-0.9.0/examples/repack.rs000064400000000000000000000035561046102023000140500ustar 00000000000000// Ogg decoder and encoder written in Rust // // Copyright (c) 2018 est31 // and contributors. All rights reserved. // Licensed under MIT license, or Apache 2 license, // at your option. Please see the LICENSE file // attached to this source distribution for details. extern crate ogg; use std::env; use ogg::{PacketReader, PacketWriter}; use ogg::writing::PacketWriteEndInfo; use std::fs::File; fn main() { match run() { Ok(_) =>(), Err(err) => println!("Error: {}", err), } } macro_rules! btry { ($e:expr) => { match $e { Ok(v) => v, Err(e) => { println!("Encountered Error: {:?}", e); break; }, } }; } fn run() -> Result<(), std::io::Error> { let input_path = env::args().nth(1).expect("No arg for input path found. Please specify a file to open."); let output_path = env::args().nth(2).expect("No arg for output path found. Please specify a file to save to."); println!("Opening file: {}", input_path); println!("Writing to: {}", output_path); let mut f_i = File::open(input_path)?; let mut f_o = File::create(output_path)?; let mut pck_rdr = PacketReader::new(&mut f_i); // This call doesn't discard anything as nothing has // been stored yet, but it does set bits that // make reading logic a bit more tolerant towards // errors. pck_rdr.delete_unread_packets(); let mut pck_wtr = PacketWriter::new(&mut f_o); loop { let r = btry!(pck_rdr.read_packet()); match r { Some(pck) => { let inf = if pck.last_in_stream() { PacketWriteEndInfo::EndStream } else if pck.last_in_page() { PacketWriteEndInfo::EndPage } else { PacketWriteEndInfo::NormalPacket }; let stream_serial = pck.stream_serial(); let absgp_page = pck.absgp_page(); btry!(pck_wtr.write_packet(pck.data, stream_serial, inf, absgp_page)); }, // End of stream None => break, } } Ok(()) } ogg-0.9.0/src/crc.rs000064400000000000000000000037561046102023000123250ustar 00000000000000// Ogg decoder and encoder written in Rust // // Copyright (c) 2016-2017 est31 // and contributors. All rights reserved. // Redistribution or use only under the terms // specified in the LICENSE file attached to this // source distribution. /*! Implementation of the CRC algorithm with the vorbis specific parameters and setup */ // Lookup table to enable bytewise CRC32 calculation static CRC_LOOKUP_ARRAY :&[u32] = &lookup_array(); const fn get_tbl_elem(idx :u32) -> u32 { let mut r :u32 = idx << 24; let mut i = 0; while i < 8 { r = (r << 1) ^ (-(((r >> 31) & 1) as i32) as u32 & 0x04c11db7); i += 1; } return r; } const fn lookup_array() -> [u32; 0x100] { let mut lup_arr :[u32; 0x100] = [0; 0x100]; let mut i = 0; while i < 0x100 { lup_arr[i] = get_tbl_elem(i as u32); i += 1; } lup_arr } #[cfg(test)] pub fn vorbis_crc32(array :&[u8]) -> u32 { return vorbis_crc32_update(0, array); } pub fn vorbis_crc32_update(cur :u32, array :&[u8]) -> u32 { let mut ret :u32 = cur; for av in array { ret = (ret << 8) ^ CRC_LOOKUP_ARRAY[(*av as u32 ^ (ret >> 24)) as usize]; } return ret; } #[test] fn test_crc32() { // Test page taken from real Ogg file let test_arr = &[ 0x4f, 0x67, 0x67, 0x53, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0xa3, 0x90, 0x5b, 0x00, 0x00, 0x00, 0x00, // The spec requires us to zero out the CRC field /*0x6d, 0x94, 0x4e, 0x3d,*/ 0x00, 0x00, 0x00, 0x00, 0x01, 0x1e, 0x01, 0x76, 0x6f, 0x72, 0x62, 0x69, 0x73, 0x00, 0x00, 0x00, 0x00, 0x02, 0x44, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xb5, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x01]; println!(); println!("CRC of \"==!\" calculated as 0x{:08x} (expected 0x9f858776)", vorbis_crc32(&[61,61,33])); println!("Test page CRC calculated as 0x{:08x} (expected 0x3d4e946d)", vorbis_crc32(test_arr)); assert_eq!(vorbis_crc32(&[61,61,33]), 0x9f858776); assert_eq!(vorbis_crc32(test_arr), 0x3d4e946d); assert_eq!(vorbis_crc32(&test_arr[0 .. 27]), 0x7b374db8); } ogg-0.9.0/src/lib.rs000064400000000000000000000062151046102023000123150ustar 00000000000000// Ogg decoder and encoder written in Rust // // Copyright (c) 2016 est31 // and contributors. All rights reserved. // Redistribution or use only under the terms // specified in the LICENSE file attached to this // source distribution. #![allow(unknown_lints)] #![forbid(unsafe_code)] #![allow(clippy::needless_return)] #![allow(clippy::nonminimal_bool)] #![allow(clippy::tabs_in_doc_comments)] #![allow(clippy::new_without_default)] #![cfg_attr(docsrs, feature(doc_auto_cfg))] /*! Ogg container decoder and encoder The most interesting structures for in this mod are `PacketReader` and `PacketWriter`. */ #[cfg(test)] mod test; macro_rules! tri { ($e:expr) => { match $e { Ok(val) => val, Err(err) => return Err(err.into()), } }; } mod crc; pub mod reading; pub mod writing; pub use crate::writing::{PacketWriter, PacketWriteEndInfo}; pub use crate::reading::{PacketReader, OggReadError}; /** Ogg packet representation. For the Ogg format, packets are the logically smallest subdivision it handles. Every packet belongs to a *logical* bitstream. The *logical* bitstreams then form a *physical* bitstream, with the data combined in multiple different ways. Every logical bitstream is identified by the serial number its pages have stored. The Packet struct contains a field for that number as well, so that one can find out which logical bitstream the Packet belongs to. */ pub struct Packet { /// The data the `Packet` contains pub data :Vec, /// `true` iff this packet is the first one in the page. first_packet_pg :bool, /// `true` iff this packet is the first one in the logical bitstream. first_packet_stream :bool, /// `true` iff this packet is the last one in the page. last_packet_pg :bool, /// `true` iff this packet is the last one in the logical bitstream last_packet_stream :bool, /// Absolute granule position of the last page the packet was in. /// The meaning of the absolute granule position is defined by the codec. absgp_page :u64, /// Serial number. Uniquely identifying the logical bitstream. stream_serial :u32, /*/// Packet counter /// Why u64? There are MAX_U32 pages, and every page has up to 128 packets. u32 wouldn't be sufficient here... pub sequence_num :u64,*/ // TODO perhaps add this later on... } impl Packet { /// Returns whether the packet is the first one starting in the page pub fn first_in_page(&self) -> bool { self.first_packet_pg } /// Returns whether the packet is the first one of the entire stream pub fn first_in_stream(&self) -> bool { self.first_packet_stream } /// Returns whether the packet is the last one starting in the page pub fn last_in_page(&self) -> bool { self.last_packet_pg } /// Returns whether the packet is the last one of the entire stream pub fn last_in_stream(&self) -> bool { self.last_packet_stream } /// Returns the absolute granule position of the page the packet ended in. /// /// The meaning of the absolute granule position is defined by the codec. pub fn absgp_page(&self) -> u64 { self.absgp_page } /// Returns the serial number that uniquely identifies the logical bitstream. pub fn stream_serial(&self) -> u32 { self.stream_serial } } ogg-0.9.0/src/reading.rs000064400000000000000000001146341046102023000131650ustar 00000000000000// Ogg decoder and encoder written in Rust // // Copyright (c) 2016-2017 est31 // and contributors. All rights reserved. // Redistribution or use only under the terms // specified in the LICENSE file attached to this // source distribution. /*! Reading logic */ use std::error; use std::io; use std::io::{Cursor, Read, Write, SeekFrom, Error, ErrorKind}; use byteorder::{ReadBytesExt, LittleEndian}; use std::collections::HashMap; use std::collections::hash_map::Entry; use std::fmt::{Display, Formatter, Error as FmtError}; use std::mem::replace; use crate::crc::vorbis_crc32_update; use crate::Packet; use std::io::Seek; /// Error that can be raised when decoding an Ogg transport. #[derive(Debug)] pub enum OggReadError { /// The capture pattern for a new page was not found /// where one was expected. NoCapturePatternFound, /// Invalid stream structure version, with the given one /// attached. InvalidStreamStructVer(u8), /// Mismatch of the hash value with (expected, calculated) value. HashMismatch(u32, u32), /// I/O error occurred. ReadError(io::Error), /// Some constraint required by the spec was not met. InvalidData, } impl OggReadError { fn description_str(&self) -> &str { match *self { OggReadError::NoCapturePatternFound => "No Ogg capture pattern found", OggReadError::InvalidStreamStructVer(_) => "A non zero stream structure version was passed", OggReadError::HashMismatch(_, _) => "CRC32 hash mismatch", OggReadError::ReadError(_) => "I/O error", OggReadError::InvalidData => "Constraint violated", } } } impl error::Error for OggReadError { fn description(&self) -> &str { self.description_str() } fn cause(&self) -> Option<&dyn error::Error> { match *self { OggReadError::ReadError(ref err) => Some(err as &dyn error::Error), _ => None } } } impl Display for OggReadError { fn fmt(&self, fmt :&mut Formatter) -> Result<(), FmtError> { write!(fmt, "{}", Self::description_str(self)) } } impl From for OggReadError { fn from(err :io::Error) -> OggReadError { return OggReadError::ReadError(err); } } /// Containing information about an OGG page that is shared between multiple places struct PageBaseInfo { /// `true`: the first packet is continued from the page before. `false`: if it's a "fresh" one starts_with_continued :bool, /// `true` if this page is the first one in the logical bitstream first_page :bool, /// `true` if this page is the last one in the logical bitstream last_page :bool, /// Absolute granule position. The codec defines further meaning. absgp :u64, /// Page counter sequence_num :u32, /// Packet information: /// index is number of packet, /// tuple is (offset, length) of packet /// if ends_with_continued is true, the last element will contain information /// about the continued packet packet_positions :Vec<(u16,u16)>, /// `true` if the packet is continued in subsequent page(s) /// `false` if the packet has a segment of length < 255 inside this page ends_with_continued :bool, } /// Internal helper struct for PacketReader state struct PageInfo { /// Basic information about the last read page bi :PageBaseInfo, /// The index of the first "unread" packet packet_idx :u8, /// Contains the package data page_body :Vec, /// If there is a residue from previous pages in terms of a package spanning multiple /// pages, this field contains it. Having this Vec> and /// not Vec ensures to give us O(n) complexity, not O(n^2) /// for `n` as number of pages that the packet is contained in. last_overlap_pck :Vec>, } impl PageInfo { /// Returns `true` if the first "unread" packet is the first one /// in the page, `false` otherwise. fn is_first_pck_in_pg(&self) -> bool { return self.packet_idx == 0; } /// Returns `true` if the first "unread" packet is the last one /// in the page, `false` otherwise. /// If the first "unread" packet isn't completed in this page /// (spans page borders), this returns `false`. fn is_last_pck_in_pg(&self) -> bool { return (self.packet_idx + 1 + (self.bi.ends_with_continued as u8)) as usize == self.bi.packet_positions.len(); } } /// Contains a fully parsed OGG page. pub struct OggPage(PageParser); impl OggPage { /// Returns whether there is an ending packet in the page fn has_packet_end(&self) -> bool { (self.0.bi.packet_positions.len() - self.0.bi.ends_with_continued as usize) > 0 } /// Returns whether there is a packet that both /// starts and ends inside the page fn has_whole_packet(&self) -> bool { self.0.bi.packet_positions.len().saturating_sub( self.0.bi.ends_with_continued as usize + self.0.bi.starts_with_continued as usize) > 0 } /// Returns whether there is a starting packet in the page fn has_packet_start(&self) -> bool { (self.0.bi.packet_positions.len() - self.0.bi.starts_with_continued as usize) > 0 } } /** Helper struct for parsing pages It's created using the `new` function and then it's fed more data via the `parse_segments` and `parse_packet_data` functions, each called exactly once and in that precise order. Then later code uses the `OggPage` returned by the `parse_packet_data` function. */ pub struct PageParser { // Members packet_positions, ends_with_continued and packet_count // get populated after segments have been parsed bi :PageBaseInfo, stream_serial :u32, checksum :u32, header_buf: [u8; 27], /// Number of packet ending segments packet_count :u16, // Gets populated after segments have been parsed /// after segments have been parsed, this contains the segments buffer, /// after the packet data have been read, this contains the packets buffer. segments_or_packets_buf :Vec, } impl PageParser { /// Creates a new Page parser /// /// The `header_buf` param contains the first 27 bytes of a new OGG page. /// Determining when one begins is your responsibility. Usually they /// begin directly after the end of a previous OGG page, but /// after you've performed a seek you might end up within the middle of a page /// and need to recapture. /// /// Returns a page parser, and the requested size of the segments array. /// You should allocate and fill such an array, in order to pass it to the `parse_segments` /// function. pub fn new(header_buf :[u8; 27]) -> Result<(PageParser, usize), OggReadError> { let mut header_rdr = Cursor::new(header_buf); header_rdr.set_position(4); let stream_structure_version = tri!(header_rdr.read_u8()); if stream_structure_version != 0 { tri!(Err(OggReadError::InvalidStreamStructVer(stream_structure_version))); } let header_type_flag = header_rdr.read_u8().unwrap(); let absgp = header_rdr.read_u64::().unwrap(); let stream_serial = header_rdr.read_u32::().unwrap(); let sequence_num = header_rdr.read_u32::().unwrap(); let checksum = header_rdr.read_u32::().unwrap(); Ok((PageParser { bi : PageBaseInfo { starts_with_continued : header_type_flag & 0x01u8 != 0, first_page : header_type_flag & 0x02u8 != 0, last_page : header_type_flag & 0x04u8 != 0, absgp, sequence_num, packet_positions : Vec::new(), ends_with_continued : false, }, stream_serial, checksum, header_buf, packet_count : 0, segments_or_packets_buf :Vec::new(), }, // Number of page segments header_rdr.read_u8().unwrap() as usize )) } /// Parses the segments buffer, and returns the requested size /// of the packets content array. /// /// You should allocate and fill such an array, in order to pass it to the `parse_packet_data` /// function. pub fn parse_segments(&mut self, segments_buf :Vec) -> usize { let mut page_siz :u16 = 0; // Size of the page's body // Whether our page ends with a continued packet self.bi.ends_with_continued = self.bi.starts_with_continued; // First run: get the number of packets, // whether the page ends with a continued packet, // and the size of the page's body for val in &segments_buf { page_siz += *val as u16; // Increment by 1 if val < 255, otherwise by 0 self.packet_count += (*val < 255) as u16; self.bi.ends_with_continued = !(*val < 255); } let mut packets = Vec::with_capacity(self.packet_count as usize + self.bi.ends_with_continued as usize); let mut cur_packet_siz :u16 = 0; let mut cur_packet_offs :u16 = 0; // Second run: get the offsets of the packets // Not that we need it right now, but it's much more fun this way, am I right for val in &segments_buf { cur_packet_siz += *val as u16; if *val < 255 { packets.push((cur_packet_offs, cur_packet_siz)); cur_packet_offs += cur_packet_siz; cur_packet_siz = 0; } } if self.bi.ends_with_continued { packets.push((cur_packet_offs, cur_packet_siz)); } self.bi.packet_positions = packets; self.segments_or_packets_buf = segments_buf; page_siz as usize } /// Parses the packets data and verifies the checksum. /// /// Returns an `OggPage` to be used by later code. pub fn parse_packet_data(mut self, packet_data :Vec) -> Result { // Now to hash calculation. // 1. Clear the header buffer self.header_buf[22] = 0; self.header_buf[23] = 0; self.header_buf[24] = 0; self.header_buf[25] = 0; // 2. Calculate the hash let mut hash_calculated :u32; hash_calculated = vorbis_crc32_update(0, &self.header_buf); hash_calculated = vorbis_crc32_update(hash_calculated, &self.segments_or_packets_buf); hash_calculated = vorbis_crc32_update(hash_calculated, &packet_data); // 3. Compare to the extracted one if self.checksum != hash_calculated { // Do not verify checksum when the decoder is being fuzzed. // This allows random input from fuzzers reach decoding code that's actually interesting, // instead of being rejected early due to checksum mismatch. if !cfg!(fuzzing) { tri!(Err(OggReadError::HashMismatch(self.checksum, hash_calculated))); } } self.segments_or_packets_buf = packet_data; Ok(OggPage(self)) } } /** Low level struct for reading from an Ogg stream. Note that most times you'll want the higher level `PacketReader` struct. It takes care of most of the internal parsing and logic, you will only have to take care of handing over your data. Essentially, it manages a cache of package data for each logical bitstream, and when the cache of every logical bistream is empty, it asks for a fresh page. You will then need to feed the struct one via the `push_page` function. All functions on this struct are async ready. They get their data fed, instead of calling and blocking in order to get it. */ pub struct BasePacketReader { // TODO the hashmap plus the set is perhaps smart ass perfect design but could be made more performant I guess... // I mean: in > 99% of all cases we'll just have one or two streams. // AND: their setup changes only very rarely. /// Contains info about all logical streams that page_infos :HashMap, /// Contains the stream_serial of the stream that contains some unprocessed packet data. /// There is always <= 1, bc if there is one, no new pages will be read, so there is no chance for a second to be added /// None if there is no such stream and one has to read a new page. stream_with_stuff :Option, // Bool that is set to true when a seek of the stream has occurred. // This helps validator code to decide whether to accept certain strange data. has_seeked :bool, } impl BasePacketReader { /// Constructs a new blank `BasePacketReader`. /// /// You can feed it data using the `push_page` function, and /// obtain data using the `read_packet` function. pub fn new() -> Self { BasePacketReader { page_infos: HashMap::new(), stream_with_stuff: None, has_seeked: false } } /// Extracts a packet from the cache, if the cache contains valid packet data, /// otherwise it returns `None`. /// /// If this function returns `None`, you'll need to add a page to the cache /// by using the `push_page` function. pub fn read_packet(&mut self) -> Option { if self.stream_with_stuff == None { return None; } let str_serial :u32 = self.stream_with_stuff.unwrap(); let pg_info = self.page_infos.get_mut(&str_serial).unwrap(); let (offs, len) = pg_info.bi.packet_positions[pg_info.packet_idx as usize]; // If there is a continued packet, and we are at the start right now, // and we actually have its end in the current page, glue it together. let need_to_glue = pg_info.packet_idx == 0 && pg_info.bi.starts_with_continued && !(pg_info.bi.ends_with_continued && pg_info.bi.packet_positions.len() == 1); let packet_content :Vec = if need_to_glue { // First find out the size of our spanning packet let mut siz :usize = 0; for pck in pg_info.last_overlap_pck.iter() { siz += pck.len(); } siz += len as usize; let mut cont :Vec = Vec::with_capacity(siz); // Then do the copying for pck in pg_info.last_overlap_pck.iter() { cont.write_all(pck).unwrap(); } // Now reset the overlap container again pg_info.last_overlap_pck = Vec::new(); cont.write_all(&pg_info.page_body[offs as usize .. (offs + len) as usize]).unwrap(); cont } else { let mut cont :Vec = Vec::with_capacity(len as usize); // TODO The copy below is totally unnecessary. It is only needed so that we don't have to carry around the old Vec's. // TODO get something like the shared_slice crate for RefCells, so that we can also have mutable data, shared through // slices. let cont_slice :&[u8] = &pg_info.page_body[offs as usize .. (offs + len) as usize]; cont.write_all(cont_slice).unwrap(); cont }; let first_pck_in_pg = pg_info.is_first_pck_in_pg(); let first_pck_overall = pg_info.bi.first_page && first_pck_in_pg; let last_pck_in_pg = pg_info.is_last_pck_in_pg(); let last_pck_overall = pg_info.bi.last_page && last_pck_in_pg; // Update the last read index. pg_info.packet_idx += 1; // Set stream_with_stuff to None so that future packet reads // yield a page read first if last_pck_in_pg { self.stream_with_stuff = None; } return Some(Packet { data: packet_content, first_packet_pg: first_pck_in_pg, first_packet_stream: first_pck_overall, last_packet_pg: last_pck_in_pg, last_packet_stream: last_pck_overall, absgp_page: pg_info.bi.absgp, stream_serial: str_serial, }); } /// Pushes a given Ogg page, updating the internal structures /// with its contents. /// /// If you want the code to function properly, you should first call /// `parse_segments`, then `parse_packet_data` on a `PageParser` /// before passing the resulting `OggPage` to this function. pub fn push_page(&mut self, page :OggPage) -> Result<(), OggReadError> { let mut pg_prs = page.0; match self.page_infos.entry(pg_prs.stream_serial) { Entry::Occupied(mut o) => { let inf = o.get_mut(); if pg_prs.bi.first_page { tri!(Err(OggReadError::InvalidData)); } if pg_prs.bi.starts_with_continued != inf.bi.ends_with_continued { if !self.has_seeked { tri!(Err(OggReadError::InvalidData)); } else { // If we have seeked, we are more tolerant here, // and just drop the continued packet's content. inf.last_overlap_pck.clear(); if pg_prs.bi.starts_with_continued { pg_prs.bi.packet_positions.remove(0); if pg_prs.packet_count != 0 { // Decrease packet count by one. Normal case. pg_prs.packet_count -= 1; } else { // If the packet count is 0, this means // that we start and end with the same continued packet. // So now as we ignore that packet, we must clear the // ends_with_continued state as well. pg_prs.bi.ends_with_continued = false; } } } } else if pg_prs.bi.starts_with_continued { // Remember the packet at the end so that it can be glued together once // we encounter the next segment with length < 255 (doesn't have to be in this page) let (offs, len) = inf.bi.packet_positions[inf.packet_idx as usize]; if len as usize != inf.page_body.len() { let mut tmp = Vec::with_capacity(len as usize); tmp.write_all(&inf.page_body[offs as usize .. (offs + len) as usize]).unwrap(); inf.last_overlap_pck.push(tmp); } else { // Little optimisation: don't copy if not necessary inf.last_overlap_pck.push(replace(&mut inf.page_body, vec![0;0])); } } inf.bi = pg_prs.bi; inf.packet_idx = 0; inf.page_body = pg_prs.segments_or_packets_buf; }, Entry::Vacant(v) => { if !self.has_seeked { if !pg_prs.bi.first_page || pg_prs.bi.starts_with_continued { // If we haven't seeked, this is an error. tri!(Err(OggReadError::InvalidData)); } } else { if !pg_prs.bi.first_page { // we can just ignore this. } if pg_prs.bi.starts_with_continued { // Ignore the continued packet's content. // This is a normal occurrence if we have just seeked. pg_prs.bi.packet_positions.remove(0); if pg_prs.packet_count != 0 { // Decrease packet count by one. Normal case. pg_prs.packet_count -= 1; } else { // If the packet count is 0, this means // that we start and end with the same continued packet. // So now as we ignore that packet, we must clear the // ends_with_continued state as well. pg_prs.bi.ends_with_continued = false; } // Not actually needed, but good for consistency pg_prs.bi.starts_with_continued = false; } } v.insert(PageInfo { bi : pg_prs.bi, packet_idx: 0, page_body: pg_prs.segments_or_packets_buf, last_overlap_pck: Vec::new(), }); }, } let pg_has_stuff :bool = pg_prs.packet_count > 0; if pg_has_stuff { self.stream_with_stuff = Some(pg_prs.stream_serial); } else { self.stream_with_stuff = None; } return Ok(()); } /// Reset the internal state after a seek /// /// It flushes the cache so that no partial data is left inside. /// It also tells the parsing logic to expect little inconsistencies /// due to the read position not being at the start. pub fn update_after_seek(&mut self) { self.stream_with_stuff = None; self.page_infos = HashMap::new(); self.has_seeked = true; } } #[derive(Clone, Copy)] enum UntilPageHeaderReaderMode { Searching, FoundWithNeeded(u8), SeekNeeded(i32), Found, } enum UntilPageHeaderResult { Eof, Found, ReadNeeded, SeekNeeded, } struct UntilPageHeaderReader { mode :UntilPageHeaderReaderMode, /// Capture pattern offset. Needed so that if we only partially /// recognized the capture pattern, we later on only check the /// remaining part. cpt_of :u8, /// The return buffer. ret_buf :[u8; 27], read_amount :usize, } impl UntilPageHeaderReader { pub fn new() -> Self { UntilPageHeaderReader { mode : UntilPageHeaderReaderMode::Searching, cpt_of : 0, ret_buf : [0; 27], read_amount : 0, } } /// Returns Some(off), where off is the offset of the last byte /// of the capture pattern if it's found, None if the capture pattern /// is not inside the passed slice. /// /// Changes the capture pattern offset accordingly fn check_arr(&mut self, arr :&[u8]) -> Option { for (i, ch) in arr.iter().enumerate() { match *ch { b'O' => self.cpt_of = 1, b'g' if self.cpt_of == 1 || self.cpt_of == 2 => self.cpt_of += 1, b'S' if self.cpt_of == 3 => return Some(i), _ => self.cpt_of = 0, } } return None; } /// Do one read exactly, and if it was successful, /// return Ok(true) if the full header has been read and can be extracted with /// /// or return Ok(false) if the pub fn do_read(&mut self, mut rdr :R) -> Result { use self::UntilPageHeaderReaderMode::*; use self::UntilPageHeaderResult as Res; // The array's size is freely choosable, but must be > 27, // and must well fit into an i32 (needs to be stored in SeekNeeded) let mut buf :[u8; 1024] = [0; 1024]; let rd_len = tri!(rdr.read(if self.read_amount < 27 { // This is an optimisation for the most likely case: // the next page directly follows the current read position. // Then it would be a waste to read more than the needed amount. &mut buf[0 .. 27 - self.read_amount] } else { match self.mode { Searching => &mut buf, FoundWithNeeded(amount) => &mut buf[0 .. amount as usize], SeekNeeded(_) => return Ok(Res::SeekNeeded), Found => return Ok(Res::Found), } })); if rd_len == 0 { // Reached EOF. This means we're in one of these cases: // 1. If we have read nothing yet (self.read_amount == 0), // there is no data but ogg data, meaning the stream // ends legally and without corruption. // 2. If we have read something, there is corruption here. // The ogg spec doesn't say whether random data past the // last ogg page is allowed, and ogginfo complains about // that. But files with trailing garbarge can be played // back just fine by oggdec, VLC and other players, so // just ignore that to meet user expectations. return Ok(Res::Eof); } self.read_amount += rd_len; // 150 kb gives us a bit of safety: we can survive // up to one page with a corrupted capture pattern // after having seeked right after a capture pattern // of an earlier page. let read_amount_max = 150 * 1024; if self.read_amount > read_amount_max { // Exhaustive searching for the capture pattern // has returned no ogg capture pattern. tri!(Err(OggReadError::NoCapturePatternFound)); } let rd_buf = &buf[0 .. rd_len]; use std::cmp::min; let (off, needed) = match self.mode { Searching => match self.check_arr(rd_buf) { // Capture pattern found Some(off) => { self.ret_buf[0] = b'O'; self.ret_buf[1] = b'g'; self.ret_buf[2] = b'g'; self.ret_buf[3] = b'S'; // (Not actually needed) (off, 24) }, // Nothing found None => return Ok(Res::ReadNeeded), }, FoundWithNeeded(needed) => { (0, needed as usize) }, _ => unimplemented!(), }; let fnd_buf = &rd_buf[off..]; let copy_amount = min(needed, fnd_buf.len()); let start_fill = 27 - needed; (&mut self.ret_buf[start_fill .. copy_amount + start_fill]) .copy_from_slice(&fnd_buf[0 .. copy_amount]); // Comparison chain operation via cmp can be slower, // and also requires an import. It's a questionable idea // to suggest it. TODO: once open ranges are stable in // match, use them. #[allow(clippy::comparison_chain)] if fnd_buf.len() == needed { // Capture pattern found! self.mode = Found; return Ok(Res::Found); } else if fnd_buf.len() < needed { // We still have to read some content. let needed_new = needed - copy_amount; self.mode = FoundWithNeeded(needed_new as u8); return Ok(Res::ReadNeeded); } else { // We have read too much content (exceeding the header). // Seek back so that we are at the position // right after the header. self.mode = SeekNeeded(needed as i32 - fnd_buf.len() as i32); return Ok(Res::SeekNeeded); } } pub fn do_seek(&mut self, mut skr :S) -> Result { use self::UntilPageHeaderReaderMode::*; use self::UntilPageHeaderResult as Res; match self.mode { Searching | FoundWithNeeded(_) => Ok(Res::ReadNeeded), SeekNeeded(offs) => { tri!(skr.seek(SeekFrom::Current(offs as i64))); self.mode = Found; Ok(Res::Found) }, Found => Ok(Res::Found), } } pub fn into_header(self) -> [u8; 27] { use self::UntilPageHeaderReaderMode::*; match self.mode { Found => self.ret_buf, _ => panic!("wrong mode"), } } } /** Reader for packets from an Ogg stream. This reads codec packets belonging to several different logical streams from one physical Ogg container stream. This reader is not async ready. It does not keep its internal state consistent when it encounters the `WouldBlock` error kind. If you desire async functionality, consider enabling the `async` feature and look into the async module. */ pub struct PacketReader { rdr :T, base_pck_rdr :BasePacketReader, read_some_pg :bool } impl PacketReader { /// Constructs a new `PacketReader` with a given `Read`. pub fn new(rdr :T) -> PacketReader { PacketReader { rdr, base_pck_rdr : BasePacketReader::new(), read_some_pg : false } } /// Returns the wrapped reader, consuming the `PacketReader`. pub fn into_inner(self) -> T { self.rdr } /// Reads a packet, and returns it on success. /// /// Ok(None) is returned if the physical stream has ended. pub fn read_packet(&mut self) -> Result, OggReadError> { // Read pages until we got a valid entire packet // (packets may span multiple pages, so reading one page // doesn't always suffice to give us a valid packet) loop { if let Some(pck) = self.base_pck_rdr.read_packet() { return Ok(Some(pck)); } let page = tri!(self.read_ogg_page()); match page { Some(page) => tri!(self.base_pck_rdr.push_page(page)), None => return Ok(None), } } } /// Reads a packet, and returns it on success. /// /// The difference to the `read_packet` function is that this function /// returns an Err(_) if the physical stream has ended. /// This function is useful if you expect a new packet to come. pub fn read_packet_expected(&mut self) -> Result { match tri!(self.read_packet()) { Some(p) => Ok(p), None => tri!(Err(Error::new(ErrorKind::UnexpectedEof, "Expected ogg packet but found end of physical stream"))), } } /// Reads until the new page header, and then returns the page header array. /// /// If no new page header is immediately found, it performs a "recapture", /// meaning it searches for the capture pattern, and if it finds it, it /// reads the complete first 27 bytes of the header, and returns them. /// /// Ok(None) is returned if the stream has ended without an uncompleted page /// or non page data after the last page (if any) present. fn read_until_pg_header(&mut self) -> Result, OggReadError> { let mut r = UntilPageHeaderReader::new(); use self::UntilPageHeaderResult::*; let mut res = tri!(r.do_read(&mut self.rdr)); loop { res = match res { Eof => return Ok(None), Found => { // Keep track whether a page was read to distinguish non-Ogg // files from Ogg files with trailing junk at read_ogg_page. self.read_some_pg = true; break }, ReadNeeded => tri!(r.do_read(&mut self.rdr)), SeekNeeded => tri!(r.do_seek(&mut self.rdr)) } } Ok(Some(r.into_header())) } /// Parses and reads a new OGG page /// /// To support seeking this does not assume that the capture pattern /// is at the current reader position. /// Instead it searches until it finds the capture pattern. fn read_ogg_page(&mut self) -> Result, OggReadError> { let header_buf :[u8; 27] = match tri!(self.read_until_pg_header()) { Some(s) => s, None if self.read_some_pg => return Ok(None), None => return Err(OggReadError::NoCapturePatternFound) }; let (mut pg_prs, page_segments) = tri!(PageParser::new(header_buf)); let mut segments_buf = vec![0; page_segments]; // TODO fix this, we initialize memory for NOTHING!!! Out of some reason, this is seen as "unsafe" by rustc. tri!(self.rdr.read_exact(&mut segments_buf)); let page_siz = pg_prs.parse_segments(segments_buf); let mut packet_data = vec![0; page_siz as usize]; tri!(self.rdr.read_exact(&mut packet_data)); Ok(Some(tri!(pg_prs.parse_packet_data(packet_data)))) } /// Seeks the underlying reader /// /// Seeks the reader that this PacketReader bases on by the specified /// number of bytes. All new pages will be read from the new position. /// /// This also flushes all the unread packets in the queue. pub fn seek_bytes(&mut self, pos :SeekFrom) -> Result { let r = tri!(self.rdr.seek(pos)); // Reset the internal state self.base_pck_rdr.update_after_seek(); return Ok(r); } /// Seeks to absolute granule pos /// /// More specifically, it seeks to the first Ogg page /// that has an `absgp` greater or equal to the specified one. /// In the case of continued packets, the seek operation may also end up /// at the last page that comes before such a page and has a packet start. /// /// The passed `stream_serial` parameter controls the stream /// serial number to filter our search for. If it's `None`, no /// filtering is applied, but if it is `Some(n)`, we filter for /// streams with the serial number `n`. /// Note that the `None` case is only intended for streams /// where only one logical stream exists, the seek may misbehave /// if `None` gets passed when multiple streams exist. /// /// The returned bool indicates whether the seek was successful. pub fn seek_absgp(&mut self, stream_serial :Option, pos_goal :u64) -> Result { macro_rules! found { ($pos:expr) => {{ // println!("found: {}", $pos); tri!(self.rdr.seek(SeekFrom::Start($pos))); self.base_pck_rdr.update_after_seek(); return Ok(true); }}; } macro_rules! bt { ($e:expr) => {{ match tri!($e) { Some(s) => s, None => return Ok(false), } }}; } // The task of this macro is to read to the // end of the logical stream. For optimisation reasons, // it returns early if we found our goal // or any page past it. macro_rules! pg_read_until_end_or_goal { {$goal:expr} => {{ let mut pos; let mut pg; loop { let (n_pos, n_pg) = pg_read_match_serial!(); pos = n_pos; pg = n_pg; // If the absgp matches our goal, the seek process is done. // This is a nice shortcut as we don't need to perform // the remainder of the seek process any more. // Of course, an exact match only happens in the fewest // of cases if pg.0.bi.absgp == $goal { found!(pos); } // If we found a page past our goal, we already // found a position that can serve as end post of the search. if pg.0.bi.absgp > $goal { break; } // Stop the search if the stream has ended. if pg.0.bi.last_page { return Ok(false) } // If the page is not interesting, seek over it. } (pos, pg) }}; } macro_rules! pg_read_match_serial { {} => {{ let mut pos; let mut pg; let mut continued_pck_start = None; loop { pos = tri!(self.rdr.seek(SeekFrom::Current(0))); pg = bt!(self.read_ogg_page()); /*println!("absgp {} serial {} wh {} pe {} @ {}", pg.0.bi.absgp, pg.0.bi.sequence_num, pg.has_whole_packet(), pg.has_packet_end(), pos);// */ match stream_serial { // Continue the search if we encounter a // page with a different stream serial Some(s) if pg.0.stream_serial != s => (), _ => match continued_pck_start { None if pg.has_whole_packet() => break, None if pg.has_packet_start() => { continued_pck_start = Some(pos); }, Some(s) if pg.has_packet_end() => { // We have remembered a packet start, // and have just encountered a packet end. // Return the position of the start with the // info from the end (for the absgp). pos = s; break; }, _ => (), }, } } (pos, pg) }}; } // Bisect seeking algo. // Start by finding boundaries, e.g. at the start and // end of the file, then bisect those boundaries successively // until a page is found. //println!("seek start. goal = {}", pos_goal); let ab_of = |pg :&OggPage| { pg.0.bi.absgp }; let seq_of = |pg :&OggPage| { pg.0.bi.sequence_num }; // First, find initial "boundaries" // Seek to the start of the file to get the starting boundary tri!(self.rdr.seek(SeekFrom::Start(0))); let (mut begin_pos, mut begin_pg) = pg_read_match_serial!(); // If the goal is the beginning, we are done. if pos_goal == 0 { //println!("Seeking to the beginning of the stream - skipping bisect."); found!(begin_pos); } // Seek to the end of the file to get the ending boundary // TODO the 200 KB is just a guessed number, any ideas // to improve it? tri!(seek_before_end(&mut self.rdr, 200 * 1024)); let (mut end_pos, mut end_pg) = pg_read_until_end_or_goal!(pos_goal); // Then perform the bisection loop { // Search is done if the two limits are the same page, // or consecutive pages. if seq_of(&end_pg) - seq_of(&begin_pg) <= 1 { found!(end_pos); } // Perform the bisection step let pos_to_seek = begin_pos + (end_pos - begin_pos) / 2; tri!(self.rdr.seek(SeekFrom::Start(pos_to_seek))); let (pos, pg) = pg_read_match_serial!(); /*println!("seek {} {} . {} @ {} {} . {}", ab_of(&begin_pg), ab_of(&end_pg), ab_of(&pg), begin_pos, end_pos, pos);// */ if seq_of(&end_pg) == seq_of(&pg) || seq_of(&begin_pg) == seq_of(&pg) { //println!("switching to linear."); // The bisection seek doesn't bring us any further. // Switch to a linear seek to get the last details. let mut pos; let mut pg; let mut last_packet_end_pos = begin_pos; tri!(self.rdr.seek(SeekFrom::Start(begin_pos))); loop { pos = tri!(self.rdr.seek(SeekFrom::Current(0))); pg = bt!(self.read_ogg_page()); /*println!("absgp {} pck_start {} whole_pck {} pck_end {} @ {} {}", ab_of(&pg), pg.has_packet_start(), pg.has_whole_packet(), pg.has_packet_end(), pos, last_packet_end_pos);// */ match stream_serial { // Continue the search if we encounter a // page with a different stream serial, // or one with an absgp of -1. Some(s) if pg.0.stream_serial != s => (), _ if ab_of(&pg) == -1i64 as u64 => (), // The page is found if the absgp is >= our goal _ if ab_of(&pg) >= pos_goal => found!(last_packet_end_pos), // If we encounter a page with a packet start, // update accordingly. _ => if pg.has_packet_end() { last_packet_end_pos = pos; }, } } } if ab_of(&pg) >= pos_goal { end_pos = pos; end_pg = pg; } else { begin_pos = pos; begin_pg = pg; } } } /// Resets the internal state by deleting all /// unread packets. pub fn delete_unread_packets(&mut self) { self.base_pck_rdr.update_after_seek(); } } // util function fn seek_before_end(mut rdr :T, offs :u64) -> Result { let end_pos = tri!(rdr.seek(SeekFrom::End(0))); let end_pos_to_seek = ::std::cmp::min(end_pos, offs); return Ok(tri!(rdr.seek(SeekFrom::End(-(end_pos_to_seek as i64))))); } #[cfg(feature = "async")] /** Asynchronous ogg decoding */ pub mod async_api { use std::pin::Pin; use std::task::{Context, Poll}; use super::*; use futures_core::{ready, Stream}; use futures_io::AsyncRead as FuturesAsyncRead; use tokio::io::AsyncRead as TokioAsyncRead; use bytes::BytesMut; use pin_project::pin_project; use tokio_util::codec::{Decoder, FramedRead}; use tokio_util::compat::{Compat, FuturesAsyncReadCompatExt}; enum PageDecodeState { Head, Segments(PageParser, usize), PacketData(PageParser, usize), InUpdate, } impl PageDecodeState { fn needed_size(&self) -> usize { match self { PageDecodeState::Head => 27, PageDecodeState::Segments(_, s) => *s, PageDecodeState::PacketData(_, s) => *s, PageDecodeState::InUpdate => panic!("invalid state"), } } } /** Async page reading functionality. */ struct PageDecoder { state : PageDecodeState, } impl PageDecoder { fn new() -> Self { PageDecoder { state : PageDecodeState::Head, } } } impl Decoder for PageDecoder { type Item = OggPage; type Error = OggReadError; fn decode(&mut self, buf :&mut BytesMut) -> Result, OggReadError> { use self::PageDecodeState::*; loop { let needed_size = self.state.needed_size(); if buf.len() < needed_size { return Ok(None); } let mut ret = None; let consumed_buf = buf.split_to(needed_size).to_vec(); self.state = match ::std::mem::replace(&mut self.state, InUpdate) { Head => { let mut hdr_buf = [0; 27]; // TODO once we have const generics, the copy below can be done // much nicer, maybe with a new into_array fn on Vec's hdr_buf.copy_from_slice(&consumed_buf); let tup = tri!(PageParser::new(hdr_buf)); Segments(tup.0, tup.1) }, Segments(mut pg_prs, _) => { let new_needed_len = pg_prs.parse_segments(consumed_buf); PacketData(pg_prs, new_needed_len) }, PacketData(pg_prs, _) => { ret = Some(tri!(pg_prs.parse_packet_data(consumed_buf))); Head }, InUpdate => panic!("invalid state"), }; if ret.is_some() { return Ok(ret); } } } fn decode_eof(&mut self, buf :&mut BytesMut) -> Result, OggReadError> { // Ugly hack for "bytes remaining on stream" error return self.decode(buf); } } /** Async packet reading functionality. */ #[pin_project] pub struct PacketReader where T :TokioAsyncRead { base_pck_rdr :BasePacketReader, #[pin] pg_rd :FramedRead, } impl PacketReader { /// Wraps the specified Tokio runtime `AsyncRead` into an Ogg packet /// reader. /// /// This is the recommended constructor when using the Tokio runtime /// types. pub fn new(inner :T) -> Self { PacketReader { base_pck_rdr : BasePacketReader::new(), pg_rd : FramedRead::new(inner, PageDecoder::new()), } } } impl PacketReader> { /// Wraps the specified futures_io `AsyncRead` into an Ogg packet /// reader. /// /// This crate uses Tokio internally, so a wrapper that may have /// some performance cost will be used. Therefore, this constructor /// is to be used only when dealing with `AsyncRead` implementations /// from other runtimes, and implementing a Tokio `AsyncRead` /// compatibility layer oneself is not desired. pub fn new_compat(inner :T) -> Self { Self::new(inner.compat()) } } impl Stream for PacketReader { type Item = Result; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); // Read pages until we got a valid entire packet // (packets may span multiple pages, so reading one page // doesn't always suffice to give us a valid packet) loop { if let Some(pck) = this.base_pck_rdr.read_packet() { return Poll::Ready(Some(Ok(pck))); } let page = match ready!(this.pg_rd.as_mut().poll_next(cx)) { Some(Ok(page)) => page, Some(Err(err)) => return Poll::Ready(Some(Err(err))), None => return Poll::Ready(None), }; match this.base_pck_rdr.push_page(page) { Ok(_) => {}, Err(err) => return Poll::Ready(Some(Err(err))), }; } } } } ogg-0.9.0/src/test.rs000064400000000000000000000427441046102023000125350ustar 00000000000000// Ogg decoder and encoder written in Rust // // Copyright (c) 2016-2017 est31 // and contributors. All rights reserved. // Redistribution or use only under the terms // specified in the LICENSE file attached to this // source distribution. use super::*; use std::io::{Cursor, Seek, SeekFrom, Write}; macro_rules! test_arr_eq { ($a_arr:expr, $b_arr:expr) => { let a_arr = &$a_arr; let b_arr = &$b_arr; for i in 0 .. b_arr.len() { if a_arr[i] != b_arr[i] { panic!("Mismatch of values at index {}: {} {}", i, a_arr[i], b_arr[i]); } } } } #[test] fn test_packet_rw() { let mut c = Cursor::new(Vec::new()); let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let test_arr_2 = [2, 4, 8, 16, 32, 64, 128, 127, 126, 125, 124]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; w.write_packet(&test_arr[..], 0xdeadb33f, np, 0).unwrap(); w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 2).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut r = PacketReader::new(c); let p1 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr, *p1.data); let p2 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_2, *p2.data); let p3 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_3, *p3.data); } // Now test packets spanning multiple segments let mut c = Cursor::new(Vec::new()); let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let mut test_arr_2 = [0; 700]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; for (idx, a) in test_arr_2.iter_mut().enumerate() { *a = (idx as u8) / 4; } { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; w.write_packet(&test_arr[..], 0xdeadb33f, np, 0).unwrap(); w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 2).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut r = PacketReader::new(&mut c); let p1 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr, *p1.data); let p2 = r.read_packet().unwrap().unwrap(); test_arr_eq!(test_arr_2, *p2.data); let p3 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_3, *p3.data); } // Now test packets spanning multiple pages let mut c = Cursor::new(Vec::new()); let mut test_arr_2 = [0; 14_000]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; for (idx, a) in test_arr_2.iter_mut().enumerate() { *a = (idx as u8) / 4; } { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 2).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut r = PacketReader::new(c); let p2 = r.read_packet().unwrap().unwrap(); test_arr_eq!(test_arr_2, *p2.data); let p3 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_3, *p3.data); } } #[test] fn test_page_end_after_first_packet() { // Test that everything works well if we force a page end // after the first packet let mut c = Cursor::new(Vec::new()); let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let test_arr_2 = [2, 4, 8, 16, 32, 64, 128, 127, 126, 125, 124]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; w.write_packet(&test_arr[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 0).unwrap(); w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 2).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut r = PacketReader::new(c); let p1 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr, *p1.data); let p2 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_2, *p2.data); let p3 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_3, *p3.data); } } #[test] fn test_packet_write() { let mut c = Cursor::new(Vec::new()); // Test page taken from real Ogg file let test_arr_out = [ 0x4f, 0x67, 0x67, 0x53, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0xa3, 0x90, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x94, 0x4e, 0x3d, 0x01, 0x1e, 0x01, 0x76, 0x6f, 0x72, 0x62, 0x69, 0x73, 0x00, 0x00, 0x00, 0x00, 0x02, 0x44, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xb5, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x01u8]; let test_arr_in = [0x01, 0x76, 0x6f, 0x72, 0x62, 0x69, 0x73, 0x00, 0x00, 0x00, 0x00, 0x02, 0x44, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xb5, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x01u8]; { let mut w = PacketWriter::new(&mut c); w.write_packet(&test_arr_in[..], 0x5b90a374, PacketWriteEndInfo::EndPage, 0).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.get_ref().len(), test_arr_out.len()); let cr = c.get_ref(); test_arr_eq!(cr, test_arr_out); } #[test] fn test_write_large() { // Test that writing an overlarge packet works, // aka where a new page is forced by the // first packet in the page. let mut c = Cursor::new(Vec::new()); // A page can contain at most 255 * 255 = 65025 // bytes of payload packet data. // A length of 70_00 will guaranteed create a page break. let test_arr = gen_pck(1234, 70_000 / 4); let mut w = PacketWriter::new(&mut c); w.write_packet(&test_arr, 0x5b90a374, PacketWriteEndInfo::EndPage, 0).unwrap(); //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut r = PacketReader::new(c); let p = r.read_packet().unwrap().unwrap(); test_arr_eq!(test_arr, *p.data); } } struct XorShift { state :(u32, u32, u32, u32), } impl XorShift { fn from_two(seed :(u32, u32)) -> Self { let mut xs = XorShift { state : (seed.0 ^ 0x2a24a930, seed.1 ^ 0xa9f60227, !seed.0 ^ 0x68c44d2d, !seed.1 ^ 0xa1f9794a) }; xs.next(); xs.next(); xs.next(); xs } fn next(&mut self) -> u32 { let mut r = self.state.3; r ^= r << 11; r ^= r >> 8; self.state.3 = self.state.2; self.state.2 = self.state.1; self.state.1 = self.state.0; r ^= self.state.0; r ^= self.state.0 >> 19; self.state.0 = r; r } } fn gen_pck(seed :u32, len_d_four :usize) -> Vec { let mut ret = Vec::with_capacity(len_d_four * 4); let mut xs = XorShift::from_two((seed, len_d_four as u32)); if len_d_four > 0 { ret.push(seed as u8); ret.push((seed >> 8) as u8); ret.push((seed >> 16) as u8); ret.push((seed >> 24) as u8); } for _ in 1..len_d_four { let v = xs.next(); ret.push(v as u8); ret.push((v >> 8) as u8); ret.push((v >> 16) as u8); ret.push((v >> 24) as u8); } ret } macro_rules! test_seek_r { ($r:expr, $absgp:expr) => { test_seek_r!($r, $absgp, +, 0); }; ($r:expr, $absgp:expr, $o:tt, $m:expr) => { // First, perform the seek $r.seek_absgp(None, $absgp).unwrap(); // Then go to the searched packet inside the page // We know that all groups of three packets form one. for _ in 0 .. ($absgp % 3) $o $m { $r.read_packet().unwrap().unwrap(); } // Now read the actual packet we are interested in and let pck = $r.read_packet().unwrap().unwrap(); // a) ensure we have a correct absolute granule pos // for the page and assert!(($absgp - pck.absgp_page as i64).abs() <= 3); // b) ensure the packet's content matches with the one we // have put in. This is another insurance. test_arr_eq!(pck.data, gen_pck($absgp, &pck.data.len() / 4)); }; } macro_rules! ensure_continues_r { ($r:expr, $absgp:expr) => { // Ensure the stream continues normally let pck = $r.read_packet().unwrap().unwrap(); test_arr_eq!(pck.data, gen_pck($absgp, &pck.data.len() / 4)); let pck = $r.read_packet().unwrap().unwrap(); test_arr_eq!(pck.data, gen_pck($absgp + 1, &pck.data.len() / 4)); let pck = $r.read_packet().unwrap().unwrap(); test_arr_eq!(pck.data, gen_pck($absgp + 2, &pck.data.len() / 4)); let pck = $r.read_packet().unwrap().unwrap(); test_arr_eq!(pck.data, gen_pck($absgp + 3, &pck.data.len() / 4)); }; } #[test] fn test_byte_seeking_continued() { let mut c = Cursor::new(Vec::new()); let off; { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; let ep = PacketWriteEndInfo::EndPage; let es = PacketWriteEndInfo::EndStream; w.write_packet(gen_pck(1, 300), 0xdeadb33f, ep, 1).unwrap(); w.write_packet(gen_pck(2, 270_000), 0xdeadb33f, np, 2).unwrap(); off = w.get_current_offs().unwrap(); w.write_packet(gen_pck(3, 270_000), 0xdeadb33f, np, 3).unwrap(); w.write_packet(gen_pck(4, 270_000), 0xdeadb33f, es, 4).unwrap(); } assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); let mut r = PacketReader::new(c); let pck = r.read_packet().unwrap().unwrap(); assert_eq!(1, pck.absgp_page); test_arr_eq!(pck.data, gen_pck(1, &pck.data.len() / 4)); // Jump over the second packet assert_eq!(r.seek_bytes(SeekFrom::Start(off)).unwrap(), off); let pck = r.read_packet().unwrap().unwrap(); assert_eq!(3, pck.absgp_page); test_arr_eq!(pck.data, gen_pck(3, &pck.data.len() / 4)); let pck = r.read_packet().unwrap().unwrap(); assert_eq!(4, pck.absgp_page); test_arr_eq!(pck.data, gen_pck(4, &pck.data.len() / 4)); } #[test] fn test_seeking() { let pck_count = 402; let mut rng = XorShift::from_two((0x9899eb03, 0x54138143)); let mut c = Cursor::new(Vec::new()); { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; let ep = PacketWriteEndInfo::EndPage; for ctr in 0..pck_count { w.write_packet(gen_pck(ctr, rng.next() as usize & 127), 0xdeadb33f, if (ctr + 1) % 3 == 0 { ep } else { np }, ctr as u64).unwrap(); } } assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); let mut r = PacketReader::new(c); macro_rules! test_seek { ($absgp:expr) => { test_seek_r!(r, $absgp) }; } macro_rules! ensure_continues { ($absgp:expr) => { ensure_continues_r!(r, $absgp) }; } test_seek!(32); test_seek!(300); test_seek!(314); test_seek!(100); ensure_continues!(101); test_seek!(10); ensure_continues!(11); // Ensure that if we seek to the same place multiple times, it doesn't // fill data needlessly. r.seek_absgp(None, 377).unwrap(); r.seek_absgp(None, 377).unwrap(); test_seek!(377); ensure_continues!(378); // Ensure that if we seek to the same place multiple times, it doesn't // fill data needlessly. r.seek_absgp(None, 200).unwrap(); r.seek_absgp(None, 200).unwrap(); test_seek!(200); ensure_continues!(201); // Ensure the final page can be sought to test_seek!(401); // After we sought to the final page, we should be able to seek // before it again. test_seek!(250); } // TODO add seeking tests for more cases: // * multiple logical streams // * seeking to unavailable positions #[test] /// Test for pages with -1 absgp (no packet ending there), /// and generally for continued packets. fn test_seeking_continued() { let pck_count = 402; // Array of length to add to the randomized packet size // From this array, we take a random index to determine // the value for the current packet. let mut pck_len_add = [0; 8]; // One page can contain at most 255 * 255 = 65025 // bytes of payload packet data. // Therefore, to force a page that contains no // page ending, we need more than double that number. // 65025 * 2 = 130_050. // 1/4 for large packets that guaranteed produce at // least one -1 absgp page each. pck_len_add[0] = 133_000; pck_len_add[1] = 133_000; // 1/8 for really large packets that produce >= 3 // -1 abs pages each. pck_len_add[2] = 270_000; // 1/4 for big fill packets // one packet is full after a few of them pck_len_add[3] = 30_000; pck_len_add[4] = 13_000; // 3/8 for small fill packets (0-127 bytes) let mut rng = XorShift::from_two((0x9899eb03, 0x54138143)); let mut c = Cursor::new(Vec::new()); { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; let ep = PacketWriteEndInfo::EndPage; for ctr in 0..pck_count { let r = rng.next() as usize; let size = ((r & 127) + pck_len_add[(r >> 8) & 7]) >> 2; w.write_packet(gen_pck(ctr, size), 0xdeadb33f, if (ctr + 1) % 3 == 0 { ep } else { np }, ctr as u64).unwrap(); } } assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); let mut r = PacketReader::new(c); macro_rules! test_seek { ($absgp:expr) => { test_seek_r!(r, $absgp) }; ($absgp:expr, $o:tt, $m:expr) => { test_seek_r!(r, $absgp, $o, $m) }; } macro_rules! ensure_continues { ($absgp:expr) => { ensure_continues_r!(r, $absgp) }; } test_seek!(32); test_seek!(300,+,2); test_seek!(314,+,2); test_seek!(100,-,1); ensure_continues!(101); test_seek!(10); ensure_continues!(11); // Ensure that if we seek to the same place multiple times, it doesn't // fill data needlessly. r.seek_absgp(None, 377).unwrap(); r.seek_absgp(None, 377).unwrap(); test_seek!(377); ensure_continues!(378); // Ensure that if we seek to the same place multiple times, it doesn't // fill data needlessly. r.seek_absgp(None, 200).unwrap(); r.seek_absgp(None, 200).unwrap(); test_seek!(200); ensure_continues!(201); // Ensure the final page can be sought to test_seek!(401,-,2); // Aafter we sought to the final page, we should be able to seek // before it again. test_seek!(250,-,1); } // Regression test for issue 14: // Have "O" right before the OggS magic. #[test] fn test_issue_14() { let mut c = Cursor::new(Vec::new()); let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let test_arr_2 = [2, 4, 8, 16, 32, 64, 128, 127, 126, 125, 124]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; { c.write_all(&[b'O']).unwrap(); let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; w.write_packet(&test_arr[..], 0xdeadb33f, np, 0).unwrap(); w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 2).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut r = PacketReader::new(c); let p1 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr, *p1.data); let p2 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_2, *p2.data); let p3 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_3, *p3.data); } // Now test packets spanning multiple segments let mut c = Cursor::new(Vec::new()); let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let mut test_arr_2 = [0; 700]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; for (idx, a) in test_arr_2.iter_mut().enumerate() { *a = (idx as u8) / 4; } { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; w.write_packet(&test_arr[..], 0xdeadb33f, np, 0).unwrap(); w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 2).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut r = PacketReader::new(&mut c); let p1 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr, *p1.data); let p2 = r.read_packet().unwrap().unwrap(); test_arr_eq!(test_arr_2, *p2.data); let p3 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_3, *p3.data); } // Now test packets spanning multiple pages let mut c = Cursor::new(Vec::new()); let mut test_arr_2 = [0; 14_000]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; for (idx, a) in test_arr_2.iter_mut().enumerate() { *a = (idx as u8) / 4; } { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 2).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut r = PacketReader::new(c); let p2 = r.read_packet().unwrap().unwrap(); test_arr_eq!(test_arr_2, *p2.data); let p3 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_3, *p3.data); } } // Regression test for issue 7: // Ignore junk after the last Ogg page, while ensuring that non-Ogg // data is treated as invalid. #[test] fn test_issue_7() { let mut c = Cursor::new(Vec::new()); let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let test_arr_2 = [2, 4, 8, 16, 32, 64, 128, 127, 126, 125, 124]; { let mut w = PacketWriter::new(&mut c); w.write_packet(&test_arr[..], 0xdeadb33f, PacketWriteEndInfo::EndStream, 0).unwrap(); } // Write trailing garbage. c.write_all(&test_arr_2[..]).unwrap(); //print_u8_slice(c.get_ref()); // Trailing garbage should be ignored. assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut r = PacketReader::new(c); let p1 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr, *p1.data); // Make sure we don't yield the trailing garbage. assert!(r.read_packet().unwrap().is_none()); } // Non-Ogg data should return an error. let c = Cursor::new(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); { let mut r = PacketReader::new(c); assert!(matches!(r.read_packet(), Err(OggReadError::NoCapturePatternFound))); } // Empty data is considered non-Ogg data. let c = Cursor::new(&[]); { let mut r = PacketReader::new(c); assert!(matches!(r.read_packet(), Err(OggReadError::NoCapturePatternFound))); } } ogg-0.9.0/src/writing.rs000064400000000000000000000245241046102023000132350ustar 00000000000000// Ogg decoder and encoder written in Rust // // Copyright (c) 2016-2017 est31 // and contributors. All rights reserved. // Redistribution or use only under the terms // specified in the LICENSE file attached to this // source distribution. /*! Writing logic */ use std::borrow::Cow; use std::result; use std::io::{self, Cursor, Write, Seek, SeekFrom}; use byteorder::{WriteBytesExt, LittleEndian}; use std::collections::HashMap; use crate::crc::vorbis_crc32_update; /// Ogg version of the `std::io::Result` type. /// /// We need `std::result::Result` at other points /// too, so we can't use `Result` as the name. type IoResult = result::Result; /** Writer for packets into an Ogg stream. Note that the functionality of this struct isn't as well tested as for the `PacketReader` struct. */ pub struct PacketWriter<'writer, T :io::Write> { wtr :T, page_vals :HashMap>, } struct CurrentPageValues<'writer> { /// `true` if this page is the first one in the logical bitstream first_page :bool, /// Page counter of the current page /// Increased for every page sequence_num :u32, /// Points to the first unwritten position in cur_pg_lacing. segment_cnt :u8, cur_pg_lacing :[u8; 255], /// The data and the absgp's of the packets cur_pg_data :Vec<(Cow<'writer, [u8]>, u64)>, /// Some(offs), if the last packet /// couldn't make it fully into this page, and /// has to be continued in the next page. /// /// `offs` should point to the first idx in /// cur_pg_data[last] that should NOT be written /// in this page anymore. /// /// None if all packets can be written nicely. pck_this_overflow_idx :Option, /// Some(offs), if the first packet /// couldn't make it fully into the last page, and /// has to be continued in this page. /// /// `offs` should point to the first idx in cur_pg_data[0] /// that hasn't been written. /// /// None if all packets can be written nicely. pck_last_overflow_idx :Option, } /// Specifies whether to end something with the write of the packet. /// /// If you want to end a stream you need to inform the Ogg `PacketWriter` /// about this. This is the enum to do so. /// /// Also, Codecs sometimes have special requirements to put /// the first packet of the whole stream into its own page. /// The `EndPage` variant can be used for this. #[derive(PartialEq)] #[derive(Clone, Copy)] pub enum PacketWriteEndInfo { /// No ends here, just a normal packet NormalPacket, /// Force-end the current page EndPage, /// End the whole logical stream. EndStream, } impl <'writer, T :io::Write> PacketWriter<'writer, T> { pub fn new(wtr :T) -> Self { return PacketWriter { wtr, page_vals : HashMap::new(), }; } pub fn into_inner(self) -> T { self.wtr } /// Access the interior writer /// /// This allows access of the writer contained inside. /// No guarantees are given onto the pattern of the writes. /// They may change in the future. pub fn inner(&self) -> &T { &self.wtr } /// Access the interior writer mutably /// /// This allows access of the writer contained inside. /// No guarantees are given onto the pattern of the writes. /// They may change in the future. pub fn inner_mut(&mut self) -> &mut T { &mut self.wtr } /// Write a packet /// /// pub fn write_packet>>(&mut self, pck_cont :P, serial :u32, inf :PacketWriteEndInfo, /* TODO find a better way to design the API around passing the absgp to the underlying implementation. e.g. the caller passes a closure on init which gets called when we encounter a new page... with the param the index inside the current page, or something. */ absgp :u64) -> IoResult<()> { let is_end_stream :bool = inf == PacketWriteEndInfo::EndStream; let pg = self.page_vals.entry(serial).or_insert( CurrentPageValues { first_page : true, sequence_num : 0, segment_cnt : 0, cur_pg_lacing :[0; 255], cur_pg_data :Vec::with_capacity(255), pck_this_overflow_idx : None, pck_last_overflow_idx : None, } ); let pck_cont = pck_cont.into(); let cont_len = pck_cont.len(); pg.cur_pg_data.push((pck_cont, absgp)); let last_data_segment_size = (cont_len % 255) as u8; let needed_segments :usize = (cont_len / 255) + 1; let mut segment_in_page_i :u8 = pg.segment_cnt; let mut at_page_end :bool = false; for segment_i in 0 .. needed_segments { at_page_end = false; if segment_i + 1 < needed_segments { // For all segments containing 255 pieces of data pg.cur_pg_lacing[segment_in_page_i as usize] = 255; } else { // For the last segment, must contain < 255 pieces of data // (including 0) pg.cur_pg_lacing[segment_in_page_i as usize] = last_data_segment_size; } pg.segment_cnt = segment_in_page_i + 1; segment_in_page_i = (segment_in_page_i + 1) % 255; if segment_in_page_i == 0 { if segment_i + 1 < needed_segments { // We have to flush a page, but we know there are more to come... pg.pck_this_overflow_idx = Some((segment_i + 1) * 255); tri!(PacketWriter::write_page(&mut self.wtr, serial, pg, false)); } else { // We have to write a page end, and it's the very last // we need to write tri!(PacketWriter::write_page(&mut self.wtr, serial, pg, is_end_stream)); // Not actually required // (it is always None except if we set it to Some directly // before we call write_page) pg.pck_this_overflow_idx = None; // Required (it could have been Some(offs) before) pg.pck_last_overflow_idx = None; } at_page_end = true; } } if (inf != PacketWriteEndInfo::NormalPacket) && !at_page_end { // Write a page end tri!(PacketWriter::write_page(&mut self.wtr, serial, pg, is_end_stream)); pg.pck_last_overflow_idx = None; // TODO if inf was PacketWriteEndInfo::EndStream, we have to // somehow erase pg from the hashmap... // any ideas? perhaps needs external scope... } // All went fine. Ok(()) } fn write_page(wtr :&mut T, serial :u32, pg :&mut CurrentPageValues, last_page :bool) -> IoResult<()> { { // The page header with everything but the lacing values: let mut hdr_cur = Cursor::new(Vec::with_capacity(27)); tri!(hdr_cur.write_all(&[0x4f, 0x67, 0x67, 0x53, 0x00])); let mut flags :u8 = 0; if pg.pck_last_overflow_idx.is_some() { flags |= 0x01; } if pg.first_page { flags |= 0x02; } if last_page { flags |= 0x04; } tri!(hdr_cur.write_u8(flags)); let pck_data = &pg.cur_pg_data; let mut last_finishing_pck_absgp = (-1i64) as u64; for (idx, &(_, absgp)) in pck_data.iter().enumerate() { if !(idx + 1 == pck_data.len() && pg.pck_this_overflow_idx.is_some()) { last_finishing_pck_absgp = absgp; } } tri!(hdr_cur.write_u64::(last_finishing_pck_absgp)); tri!(hdr_cur.write_u32::(serial)); tri!(hdr_cur.write_u32::(pg.sequence_num)); // checksum, calculated later on :) tri!(hdr_cur.write_u32::(0)); tri!(hdr_cur.write_u8(pg.segment_cnt)); let mut hash_calculated :u32; let pg_lacing = &pg.cur_pg_lacing[0 .. pg.segment_cnt as usize]; hash_calculated = vorbis_crc32_update(0, hdr_cur.get_ref()); hash_calculated = vorbis_crc32_update(hash_calculated, pg_lacing); for (idx, &(ref pck, _)) in pck_data.iter().enumerate() { let mut start :usize = 0; if idx == 0 { if let Some(idx) = pg.pck_last_overflow_idx { start = idx; }} let mut end :usize = pck.len(); if idx + 1 == pck_data.len() { if let Some(idx) = pg.pck_this_overflow_idx { end = idx; } } hash_calculated = vorbis_crc32_update(hash_calculated, &pck[start .. end]); } // Go back to enter the checksum // Don't do excessive checking here (that the seek // succeeded & we are at the right pos now). // It's hopefully not required. tri!(hdr_cur.seek(SeekFrom::Start(22))); tri!(hdr_cur.write_u32::(hash_calculated)); // Now all is done, write the stuff! tri!(wtr.write_all(hdr_cur.get_ref())); tri!(wtr.write_all(pg_lacing)); for (idx, &(ref pck, _)) in pck_data.iter().enumerate() { let mut start :usize = 0; if idx == 0 { if let Some(idx) = pg.pck_last_overflow_idx { start = idx; }} let mut end :usize = pck.len(); if idx + 1 == pck_data.len() { if let Some(idx) = pg.pck_this_overflow_idx { end = idx; } } tri!(wtr.write_all(&pck[start .. end])); } } // Reset the page. pg.first_page = false; pg.sequence_num += 1; pg.segment_cnt = 0; // If we couldn't fully write the last // packet, we need to keep it for the next page, // otherwise just clear everything. if pg.pck_this_overflow_idx.is_some() { let d = pg.cur_pg_data.pop().unwrap(); pg.cur_pg_data.clear(); pg.cur_pg_data.push(d); } else { pg.cur_pg_data.clear(); } pg.pck_last_overflow_idx = pg.pck_this_overflow_idx; pg.pck_this_overflow_idx = None; return Ok(()); } } impl PacketWriter<'_, T> { pub fn get_current_offs(&mut self) -> Result { self.wtr.seek(SeekFrom::Current(0)) } } // TODO once 1.18 gets released, move this // to the test module and make wtr pub(crate). #[test] fn test_recapture() { // Test that we can deal with recapture // at varying distances. // This is a regression test use std::io::Write; use super::PacketReader; let mut c = Cursor::new(Vec::new()); let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let test_arr_2 = [2, 4, 8, 16, 32, 64, 128, 127, 126, 125, 124]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; { let np = PacketWriteEndInfo::NormalPacket; let ep = PacketWriteEndInfo::EndPage; { let mut w = PacketWriter::new(&mut c); w.write_packet(&test_arr[..], 0xdeadb33f, ep, 0).unwrap(); // Now, after the end of the page, put in some noise. w.wtr.write_all(&[0; 38]).unwrap(); w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, ep, 2).unwrap(); } } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut r = PacketReader::new(c); let p1 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr, *p1.data); let p2 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_2, *p2.data); let p3 = r.read_packet().unwrap().unwrap(); assert_eq!(test_arr_3, *p3.data); } } ogg-0.9.0/tests/async_read.rs000064400000000000000000000105251046102023000142310ustar 00000000000000// Ogg decoder and encoder written in Rust // // Copyright (c) 2016 est31 // and contributors. All rights reserved. // Redistribution or use only under the terms // specified in the LICENSE file attached to this // source distribution. #![cfg(feature = "async")] use std::io; use ogg::{PacketWriter, PacketWriteEndInfo}; use ogg::reading::async_api::PacketReader; use std::io::{Cursor, Seek, SeekFrom}; use std::pin::Pin; use std::task::{Poll, Context}; use pin_project::pin_project; use tokio::io::{AsyncRead, AsyncSeek, ReadBuf}; use futures_util::TryStreamExt; #[pin_project] struct RandomWouldBlock(#[pin] T); impl AsyncRead for RandomWouldBlock { fn poll_read(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { if rand::random() { cx.waker().wake_by_ref(); return Poll::Pending; } self.project().0.poll_read(cx, buf) } } impl AsyncSeek for RandomWouldBlock { fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> io::Result<()> { self.project().0.start_seek(position) } fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if rand::random() { cx.waker().wake_by_ref(); return Poll::Pending; } self.project().0.poll_complete(cx) } } async fn test_ogg_random_would_block_run() { let mut c = Cursor::new(Vec::new()); let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let test_arr_2 = [2, 4, 8, 16, 32, 64, 128, 127, 126, 125, 124]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; w.write_packet(&test_arr[..], 0xdeadb33f, np, 0).unwrap(); w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 2).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut rwd = RandomWouldBlock(&mut c); let mut r = PacketReader::new(&mut rwd); let p1 = r.try_next().await.unwrap().unwrap(); assert_eq!(test_arr, *p1.data); let p2 = r.try_next().await.unwrap().unwrap(); assert_eq!(test_arr_2, *p2.data); let p3 = r.try_next().await.unwrap().unwrap(); assert_eq!(test_arr_3, *p3.data); } // Now test packets spanning multiple segments let mut c = Cursor::new(Vec::new()); let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let mut test_arr_2 = [0; 700]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; for (idx, a) in test_arr_2.iter_mut().enumerate() { *a = (idx as u8) / 4; } { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; w.write_packet(&test_arr[..], 0xdeadb33f, np, 0).unwrap(); w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 2).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut rwd = RandomWouldBlock(&mut c); let mut r = PacketReader::new(&mut rwd); let p1 = r.try_next().await.unwrap().unwrap(); assert_eq!(test_arr, p1.data.as_slice()); let p2 = r.try_next().await.unwrap().unwrap(); assert_eq!(test_arr_2, p2.data.as_slice()); let p3 = r.try_next().await.unwrap().unwrap(); assert_eq!(test_arr_3, p3.data.as_slice()); } // Now test packets spanning multiple pages let mut c = Cursor::new(Vec::new()); let mut test_arr_2 = [0; 14_000]; let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125]; for (idx, a) in test_arr_2.iter_mut().enumerate() { *a = (idx as u8) / 4; } { let mut w = PacketWriter::new(&mut c); let np = PacketWriteEndInfo::NormalPacket; w.write_packet(&test_arr_2[..], 0xdeadb33f, np, 1).unwrap(); w.write_packet(&test_arr_3[..], 0xdeadb33f, PacketWriteEndInfo::EndPage, 2).unwrap(); } //print_u8_slice(c.get_ref()); assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0); { let mut rwd = RandomWouldBlock(&mut c); let mut r = PacketReader::new(&mut rwd); let p2 = r.try_next().await.unwrap().unwrap(); assert_eq!(test_arr_2, p2.data.as_slice()); let p3 = r.try_next().await.unwrap().unwrap(); assert_eq!(test_arr_3, p3.data.as_slice()); } } #[tokio::test] async fn test_ogg_random_would_block() { for i in 0 .. 100 { println!("Run {}", i); test_ogg_random_would_block_run().await; } }