pest_generator-2.7.4/Cargo.toml0000644000000023700000000000100120670ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.61" name = "pest_generator" version = "2.7.4" authors = ["Dragoș Tiselice "] description = "pest code generator" homepage = "https://pest.rs/" documentation = "https://docs.rs/pest" readme = "_README.md" keywords = [ "pest", "generator", ] categories = ["parsing"] license = "MIT OR Apache-2.0" repository = "https://github.com/pest-parser/pest" [dependencies.pest] version = "2.7.4" default-features = false [dependencies.pest_meta] version = "2.7.4" [dependencies.proc-macro2] version = "1.0" [dependencies.quote] version = "1.0" [dependencies.syn] version = "2.0" [features] default = ["std"] grammar-extras = ["pest_meta/grammar-extras"] not-bootstrap-in-src = ["pest_meta/not-bootstrap-in-src"] std = ["pest/std"] pest_generator-2.7.4/Cargo.toml.orig000064400000000000000000000013631046102023000155510ustar 00000000000000[package] name = "pest_generator" description = "pest code generator" version = "2.7.4" edition = "2021" authors = ["Dragoș Tiselice "] homepage = "https://pest.rs/" repository = "https://github.com/pest-parser/pest" documentation = "https://docs.rs/pest" keywords = ["pest", "generator"] categories = ["parsing"] license = "MIT OR Apache-2.0" readme = "_README.md" rust-version = "1.61" [features] default = ["std"] std = ["pest/std"] not-bootstrap-in-src = ["pest_meta/not-bootstrap-in-src"] grammar-extras = ["pest_meta/grammar-extras"] [dependencies] pest = { path = "../pest", version = "2.7.4", default-features = false } pest_meta = { path = "../meta", version = "2.7.4" } proc-macro2 = "1.0" quote = "1.0" syn = "2.0" pest_generator-2.7.4/LICENSE-APACHE000064400000000000000000000251371046102023000146130ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pest_generator-2.7.4/LICENSE-MIT000064400000000000000000000017771046102023000143270ustar 00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pest_generator-2.7.4/_README.md000064400000000000000000000175721046102023000143110ustar 00000000000000

# pest. The Elegant Parser [![Join the chat at https://gitter.im/pest-parser/pest](https://badges.gitter.im/dragostis/pest.svg)](https://gitter.im/pest-parser/pest?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Book](https://img.shields.io/badge/book-WIP-4d76ae.svg)](https://pest.rs/book) [![Docs](https://docs.rs/pest/badge.svg)](https://docs.rs/pest) [![pest Continuous Integration](https://github.com/pest-parser/pest/actions/workflows/ci.yml/badge.svg)](https://github.com/pest-parser/pest/actions/workflows/ci.yml) [![codecov](https://codecov.io/gh/pest-parser/pest/branch/master/graph/badge.svg)](https://codecov.io/gh/pest-parser/pest) Rustc Version 1.61.0+ [![Crates.io](https://img.shields.io/crates/d/pest.svg)](https://crates.io/crates/pest) [![Crates.io](https://img.shields.io/crates/v/pest.svg)](https://crates.io/crates/pest) pest is a general purpose parser written in Rust with a focus on accessibility, correctness, and performance. It uses parsing expression grammars (or [PEG]) as input, which are similar in spirit to regular expressions, but which offer the enhanced expressivity needed to parse complex languages. [PEG]: https://en.wikipedia.org/wiki/Parsing_expression_grammar ## Getting started The recommended way to start parsing with pest is to read the official [book]. Other helpful resources: * API reference on [docs.rs] * play with grammars and share them on our [fiddle] * find previous common questions answered or ask questions on [GitHub Discussions] * leave feedback, ask questions, or greet us on [Gitter] or [Discord] [book]: https://pest.rs/book [docs.rs]: https://docs.rs/pest [fiddle]: https://pest.rs/#editor [Gitter]: https://gitter.im/pest-parser/pest [Discord]: https://discord.gg/XEGACtWpT2 [GitHub Discussions]: https://github.com/pest-parser/pest/discussions ## Example The following is an example of a grammar for a list of alphanumeric identifiers where all identifiers don't start with a digit: ```rust alpha = { 'a'..'z' | 'A'..'Z' } digit = { '0'..'9' } ident = { !digit ~ (alpha | digit)+ } ident_list = _{ ident ~ (" " ~ ident)* } // ^ // ident_list rule is silent which means it produces no tokens ``` Grammars are saved in separate .pest files which are never mixed with procedural code. This results in an always up-to-date formalization of a language that is easy to read and maintain. ## Meaningful error reporting Based on the grammar definition, the parser also includes automatic error reporting. For the example above, the input `"123"` will result in: ``` thread 'main' panicked at ' --> 1:1 | 1 | 123 | ^--- | = unexpected digit', src/main.rs:12 ``` while `"ab *"` will result in: ``` thread 'main' panicked at ' --> 1:1 | 1 | ab * | ^--- | = expected ident', src/main.rs:12 ``` These error messages can be obtained from their default `Display` implementation, e.g. `panic!("{}", parser_result.unwrap_err())` or `println!("{}", e)`. ## Pairs API The grammar can be used to derive a `Parser` implementation automatically. Parsing returns an iterator of nested token pairs: ```rust extern crate pest; #[macro_use] extern crate pest_derive; use pest::Parser; #[derive(Parser)] #[grammar = "ident.pest"] struct IdentParser; fn main() {    let pairs = IdentParser::parse(Rule::ident_list, "a1 b2").unwrap_or_else(|e| panic!("{}", e)); // Because ident_list is silent, the iterator will contain idents for pair in pairs { // A pair is a combination of the rule which matched and a span of input println!("Rule: {:?}", pair.as_rule()); println!("Span: {:?}", pair.as_span()); println!("Text: {}", pair.as_str()); // A pair can be converted to an iterator of the tokens which make it up: for inner_pair in pair.into_inner() { match inner_pair.as_rule() { Rule::alpha => println!("Letter: {}", inner_pair.as_str()), Rule::digit => println!("Digit: {}", inner_pair.as_str()), _ => unreachable!() }; } } } ``` This produces the following output: ``` Rule: ident Span: Span { start: 0, end: 2 } Text: a1 Letter: a Digit: 1 Rule: ident Span: Span { start: 3, end: 5 } Text: b2 Letter: b Digit: 2 ``` ### Defining multiple parsers in a single file The current automatic `Parser` derivation will produce the `Rule` enum which would have name conflicts if one tried to define multiple such structs that automatically derive `Parser`. One possible way around it is to put each parser struct in a separate namespace: ```rust mod a { #[derive(Parser)] #[grammar = "a.pest"] pub struct ParserA; } mod b { #[derive(Parser)] #[grammar = "b.pest"] pub struct ParserB; } ``` ## Other features * Precedence climbing * Input handling * Custom errors * Runs on stable Rust ## Projects using pest You can find more projects and ecosystem tools in the [awesome-pest](https://github.com/pest-parser/awesome-pest) repo. * [pest_meta](https://github.com/pest-parser/pest/blob/master/meta/src/grammar.pest) (bootstrapped) * [AshPaper](https://github.com/shnewto/ashpaper) * [brain](https://github.com/brain-lang/brain) * [cicada](https://github.com/mitnk/cicada) * [comrak](https://github.com/kivikakk/comrak) * [elastic-rs](https://github.com/cch123/elastic-rs) * [graphql-parser](https://github.com/Keats/graphql-parser) * [handlebars-rust](https://github.com/sunng87/handlebars-rust) * [hexdino](https://github.com/Luz/hexdino) * [Huia](https://gitlab.com/jimsy/huia/) * [insta](https://github.com/mitsuhiko/insta) * [jql](https://github.com/yamafaktory/jql) * [json5-rs](https://github.com/callum-oakley/json5-rs) * [mt940](https://github.com/svenstaro/mt940-rs) * [Myoxine](https://github.com/d3bate/myoxine) * [py_literal](https://github.com/jturner314/py_literal) * [rouler](https://github.com/jarcane/rouler) * [RuSh](https://github.com/lwandrebeck/RuSh) * [rs_pbrt](https://github.com/wahn/rs_pbrt) * [stache](https://github.com/dgraham/stache) * [tera](https://github.com/Keats/tera) * [ui_gen](https://github.com/emoon/ui_gen) * [ukhasnet-parser](https://github.com/adamgreig/ukhasnet-parser) * [ZoKrates](https://github.com/ZoKrates/ZoKrates) * [Vector](https://github.com/timberio/vector) * [AutoCorrect](https://github.com/huacnlee/autocorrect) * [yaml-peg](https://github.com/aofdev/yaml-peg) * [qubit](https://github.com/abhimanyu003/qubit) * [caith](https://github.com/Geobert/caith) (a dice roller crate) * [Melody](https://github.com/yoav-lavi/melody) * [json5-nodes](https://github.com/jlyonsmith/json5-nodes) * [prisma](https://github.com/prisma/prisma) ## Minimum Supported Rust Version (MSRV) This library should always compile with default features on **Rust 1.61.0**. ## no_std support The `pest` and `pest_derive` crates can be built without the Rust standard library and target embedded environments. To do so, you need to disable their default features. In your `Cargo.toml`, you can specify it as follows: ```toml [dependencies] # ... pest = { version = "2", default-features = false } pest_derive = { version = "2", default-features = false } ``` If you want to build these crates in the pest repository's workspace, you can pass the `--no-default-features` flag to `cargo` and specify these crates using the `--package` (`-p`) flag. For example: ```bash $ cargo build --target thumbv7em-none-eabihf --no-default-features -p pest $ cargo bootstrap $ cargo build --target thumbv7em-none-eabihf --no-default-features -p pest_derive ``` ## Special thanks A special round of applause goes to prof. Marius Minea for his guidance and all pest contributors, some of which being none other than my friends. pest_generator-2.7.4/src/docs.rs000064400000000000000000000077321046102023000147550ustar 00000000000000use pest::iterators::Pairs; use pest_meta::parser::Rule; use std::collections::HashMap; #[derive(Debug)] pub(crate) struct DocComment { pub grammar_doc: String, /// HashMap for store all doc_comments for rules. /// key is rule name, value is doc_comment. pub line_docs: HashMap, } /// Consume pairs to matches `Rule::grammar_doc`, `Rule::line_doc` into `DocComment` /// /// e.g. /// /// a pest file: /// /// ```ignore /// //! This is a grammar doc /// /// line doc 1 /// /// line doc 2 /// foo = {} /// /// /// line doc 3 /// bar = {} /// ``` /// /// Then will get: /// /// ```ignore /// grammar_doc = "This is a grammar doc" /// line_docs = { "foo": "line doc 1\nline doc 2", "bar": "line doc 3" } /// ``` pub(crate) fn consume(pairs: Pairs<'_, Rule>) -> DocComment { let mut grammar_doc = String::new(); let mut line_docs: HashMap = HashMap::new(); let mut line_doc = String::new(); for pair in pairs { match pair.as_rule() { Rule::grammar_doc => { // grammar_doc > inner_doc let inner_doc = pair.into_inner().next().unwrap(); grammar_doc.push_str(inner_doc.as_str()); grammar_doc.push('\n'); } Rule::grammar_rule => { if let Some(inner) = pair.into_inner().next() { // grammar_rule > line_doc | identifier match inner.as_rule() { Rule::line_doc => { if let Some(inner_doc) = inner.into_inner().next() { line_doc.push_str(inner_doc.as_str()); line_doc.push('\n'); } } Rule::identifier => { if !line_doc.is_empty() { let rule_name = inner.as_str().to_owned(); // Remove last \n line_doc.pop(); line_docs.insert(rule_name, line_doc.clone()); line_doc.clear(); } } _ => (), } } } _ => (), } } if !grammar_doc.is_empty() { // Remove last \n grammar_doc.pop(); } DocComment { grammar_doc, line_docs, } } #[cfg(test)] mod tests { use std::collections::HashMap; use pest_meta::parser; use pest_meta::parser::Rule; #[test] fn test_doc_comment() { let pairs = match parser::parse(Rule::grammar_rules, include_str!("../tests/test.pest")) { Ok(pairs) => pairs, Err(_) => panic!("error parsing tests/test.pest"), }; let doc_comment = super::consume(pairs); let mut expected = HashMap::new(); expected.insert("foo".to_owned(), "Matches foo str, e.g.: `foo`".to_owned()); expected.insert( "bar".to_owned(), "Matches bar str\n\n Indent 2, e.g: `bar` or `foobar`".to_owned(), ); expected.insert( "dar".to_owned(), "Matches dar\n\nMatch dar description\n".to_owned(), ); assert_eq!(expected, doc_comment.line_docs); assert_eq!( "A parser for JSON file.\nAnd this is a example for JSON parser.\n\n indent-4-space\n", doc_comment.grammar_doc ); } #[test] fn test_empty_grammar_doc() { assert!(parser::parse(Rule::grammar_rules, "//!").is_ok()); assert!(parser::parse(Rule::grammar_rules, "///").is_ok()); assert!(parser::parse(Rule::grammar_rules, "//").is_ok()); assert!(parser::parse(Rule::grammar_rules, "/// Line Doc").is_ok()); assert!(parser::parse(Rule::grammar_rules, "//! Grammar Doc").is_ok()); assert!(parser::parse(Rule::grammar_rules, "// Comment").is_ok()); } } pest_generator-2.7.4/src/generator.rs000064400000000000000000001166061046102023000160140ustar 00000000000000// pest. The Elegant Parser // Copyright (c) 2018 Dragoș Tiselice // // Licensed under the Apache License, Version 2.0 // or the MIT // license , at your // option. All files in the project carrying such notice may not be copied, // modified, or distributed except according to those terms. use std::path::PathBuf; use proc_macro2::TokenStream; use quote::{ToTokens, TokenStreamExt}; use syn::{self, Ident}; use pest::unicode::unicode_property_names; use pest_meta::ast::*; use pest_meta::optimizer::*; use crate::docs::DocComment; use crate::ParsedDerive; pub(crate) fn generate( parsed_derive: ParsedDerive, paths: Vec, rules: Vec, defaults: Vec<&str>, doc_comment: &DocComment, include_grammar: bool, ) -> TokenStream { let uses_eoi = defaults.iter().any(|name| *name == "EOI"); let name = parsed_derive.name; let builtins = generate_builtin_rules(); let include_fix = if include_grammar { generate_include(&name, paths) } else { quote!() }; let rule_enum = generate_enum(&rules, doc_comment, uses_eoi, parsed_derive.non_exhaustive); let patterns = generate_patterns(&rules, uses_eoi); let skip = generate_skip(&rules); let mut rules: Vec<_> = rules.into_iter().map(generate_rule).collect(); rules.extend(builtins.into_iter().filter_map(|(builtin, tokens)| { if defaults.contains(&builtin) { Some(tokens) } else { None } })); let (impl_generics, ty_generics, where_clause) = parsed_derive.generics.split_for_impl(); let result = result_type(); let parser_impl = quote! { #[allow(clippy::all)] impl #impl_generics ::pest::Parser for #name #ty_generics #where_clause { fn parse<'i>( rule: Rule, input: &'i str ) -> #result< ::pest::iterators::Pairs<'i, Rule>, ::pest::error::Error > { mod rules { #![allow(clippy::upper_case_acronyms)] pub mod hidden { use super::super::Rule; #skip } pub mod visible { use super::super::Rule; #( #rules )* } pub use self::visible::*; } ::pest::state(input, |state| { match rule { #patterns } }) } } }; quote! { #include_fix #rule_enum #parser_impl } } // Note: All builtin rules should be validated as pest builtins in meta/src/validator.rs. // Some should also be keywords. fn generate_builtin_rules() -> Vec<(&'static str, TokenStream)> { let mut builtins = Vec::new(); insert_builtin!(builtins, ANY, state.skip(1)); insert_builtin!( builtins, EOI, state.rule(Rule::EOI, |state| state.end_of_input()) ); insert_builtin!(builtins, SOI, state.start_of_input()); insert_builtin!(builtins, PEEK, state.stack_peek()); insert_builtin!(builtins, PEEK_ALL, state.stack_match_peek()); insert_builtin!(builtins, POP, state.stack_pop()); insert_builtin!(builtins, POP_ALL, state.stack_match_pop()); insert_builtin!(builtins, DROP, state.stack_drop()); insert_builtin!(builtins, ASCII_DIGIT, state.match_range('0'..'9')); insert_builtin!(builtins, ASCII_NONZERO_DIGIT, state.match_range('1'..'9')); insert_builtin!(builtins, ASCII_BIN_DIGIT, state.match_range('0'..'1')); insert_builtin!(builtins, ASCII_OCT_DIGIT, state.match_range('0'..'7')); insert_builtin!( builtins, ASCII_HEX_DIGIT, state .match_range('0'..'9') .or_else(|state| state.match_range('a'..'f')) .or_else(|state| state.match_range('A'..'F')) ); insert_builtin!(builtins, ASCII_ALPHA_LOWER, state.match_range('a'..'z')); insert_builtin!(builtins, ASCII_ALPHA_UPPER, state.match_range('A'..'Z')); insert_builtin!( builtins, ASCII_ALPHA, state .match_range('a'..'z') .or_else(|state| state.match_range('A'..'Z')) ); insert_builtin!( builtins, ASCII_ALPHANUMERIC, state .match_range('a'..'z') .or_else(|state| state.match_range('A'..'Z')) .or_else(|state| state.match_range('0'..'9')) ); insert_builtin!(builtins, ASCII, state.match_range('\x00'..'\x7f')); insert_builtin!( builtins, NEWLINE, state .match_string("\n") .or_else(|state| state.match_string("\r\n")) .or_else(|state| state.match_string("\r")) ); let box_ty = box_type(); for property in unicode_property_names() { let property_ident: Ident = syn::parse_str(property).unwrap(); // insert manually for #property substitution builtins.push((property, quote! { #[inline] #[allow(dead_code, non_snake_case, unused_variables)] fn #property_ident(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> { state.match_char_by(::pest::unicode::#property_ident) } })); } builtins } /// Generate Rust `include_str!` for grammar files, then Cargo will watch changes in grammars. fn generate_include(name: &Ident, paths: Vec) -> TokenStream { let const_name = format_ident!("_PEST_GRAMMAR_{}", name); // Need to make this relative to the current directory since the path to the file // is derived from the CARGO_MANIFEST_DIR environment variable let current_dir = std::env::current_dir().expect("Unable to get current directory"); let include_tokens = paths.iter().map(|path| { let path = path.to_str().expect("non-Unicode path"); let relative_path = current_dir .join(path) .to_str() .expect("path contains invalid unicode") .to_string(); quote! { include_str!(#relative_path) } }); let len = include_tokens.len(); quote! { #[allow(non_upper_case_globals)] const #const_name: [&'static str; #len] = [ #(#include_tokens),* ]; } } fn generate_enum( rules: &[OptimizedRule], doc_comment: &DocComment, uses_eoi: bool, non_exhaustive: bool, ) -> TokenStream { let rule_variants = rules.iter().map(|rule| { let rule_name = format_ident!("r#{}", rule.name); match doc_comment.line_docs.get(&rule.name) { Some(doc) => quote! { #[doc = #doc] #rule_name }, None => quote! { #rule_name }, } }); let grammar_doc = &doc_comment.grammar_doc; let mut result = quote! { #[doc = #grammar_doc] #[allow(dead_code, non_camel_case_types, clippy::upper_case_acronyms)] #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] }; if non_exhaustive { result.append_all(quote! { #[non_exhaustive] }); } result.append_all(quote! { pub enum Rule }); if uses_eoi { result.append_all(quote! { { #[doc = "End-of-input"] EOI, #( #rule_variants ),* } }); } else { result.append_all(quote! { { #( #rule_variants ),* } }) }; let rules = rules.iter().map(|rule| { let rule_name = format_ident!("r#{}", rule.name); quote! { #rule_name } }); result.append_all(quote! { impl Rule { pub fn all_rules() -> &'static[Rule] { &[ #(Rule::#rules), * ] } } }); result } fn generate_patterns(rules: &[OptimizedRule], uses_eoi: bool) -> TokenStream { let mut rules: Vec = rules .iter() .map(|rule| { let rule = format_ident!("r#{}", rule.name); quote! { Rule::#rule => rules::#rule(state) } }) .collect(); if uses_eoi { rules.push(quote! { Rule::EOI => rules::EOI(state) }); } quote! { #( #rules ),* } } fn generate_rule(rule: OptimizedRule) -> TokenStream { let name = format_ident!("r#{}", rule.name); let expr = if rule.ty == RuleType::Atomic || rule.ty == RuleType::CompoundAtomic { generate_expr_atomic(rule.expr) } else if rule.name == "WHITESPACE" || rule.name == "COMMENT" { let atomic = generate_expr_atomic(rule.expr); quote! { state.atomic(::pest::Atomicity::Atomic, |state| { #atomic }) } } else { generate_expr(rule.expr) }; let box_ty = box_type(); match rule.ty { RuleType::Normal => quote! { #[inline] #[allow(non_snake_case, unused_variables)] pub fn #name(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> { state.rule(Rule::#name, |state| { #expr }) } }, RuleType::Silent => quote! { #[inline] #[allow(non_snake_case, unused_variables)] pub fn #name(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> { #expr } }, RuleType::Atomic => quote! { #[inline] #[allow(non_snake_case, unused_variables)] pub fn #name(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> { state.rule(Rule::#name, |state| { state.atomic(::pest::Atomicity::Atomic, |state| { #expr }) }) } }, RuleType::CompoundAtomic => quote! { #[inline] #[allow(non_snake_case, unused_variables)] pub fn #name(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> { state.atomic(::pest::Atomicity::CompoundAtomic, |state| { state.rule(Rule::#name, |state| { #expr }) }) } }, RuleType::NonAtomic => quote! { #[inline] #[allow(non_snake_case, unused_variables)] pub fn #name(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> { state.atomic(::pest::Atomicity::NonAtomic, |state| { state.rule(Rule::#name, |state| { #expr }) }) } }, } } fn generate_skip(rules: &[OptimizedRule]) -> TokenStream { let whitespace = rules.iter().any(|rule| rule.name == "WHITESPACE"); let comment = rules.iter().any(|rule| rule.name == "COMMENT"); match (whitespace, comment) { (false, false) => generate_rule!(skip, Ok(state)), (true, false) => generate_rule!( skip, if state.atomicity() == ::pest::Atomicity::NonAtomic { state.repeat(|state| super::visible::WHITESPACE(state)) } else { Ok(state) } ), (false, true) => generate_rule!( skip, if state.atomicity() == ::pest::Atomicity::NonAtomic { state.repeat(|state| super::visible::COMMENT(state)) } else { Ok(state) } ), (true, true) => generate_rule!( skip, if state.atomicity() == ::pest::Atomicity::NonAtomic { state.sequence(|state| { state .repeat(|state| super::visible::WHITESPACE(state)) .and_then(|state| { state.repeat(|state| { state.sequence(|state| { super::visible::COMMENT(state).and_then(|state| { state.repeat(|state| super::visible::WHITESPACE(state)) }) }) }) }) }) } else { Ok(state) } ), } } fn generate_expr(expr: OptimizedExpr) -> TokenStream { match expr { OptimizedExpr::Str(string) => { quote! { state.match_string(#string) } } OptimizedExpr::Insens(string) => { quote! { state.match_insensitive(#string) } } OptimizedExpr::Range(start, end) => { let start = start.chars().next().unwrap(); let end = end.chars().next().unwrap(); quote! { state.match_range(#start..#end) } } OptimizedExpr::Ident(ident) => { let ident = format_ident!("r#{}", ident); quote! { self::#ident(state) } } OptimizedExpr::PeekSlice(start, end_) => { let end = QuoteOption(end_); quote! { state.stack_match_peek_slice(#start, #end, ::pest::MatchDir::BottomToTop) } } OptimizedExpr::PosPred(expr) => { let expr = generate_expr(*expr); quote! { state.lookahead(true, |state| { #expr }) } } OptimizedExpr::NegPred(expr) => { let expr = generate_expr(*expr); quote! { state.lookahead(false, |state| { #expr }) } } OptimizedExpr::Seq(lhs, rhs) => { let head = generate_expr(*lhs); let mut tail = vec![]; let mut current = *rhs; while let OptimizedExpr::Seq(lhs, rhs) = current { tail.push(generate_expr(*lhs)); current = *rhs; } tail.push(generate_expr(current)); quote! { state.sequence(|state| { #head #( .and_then(|state| { super::hidden::skip(state) }).and_then(|state| { #tail }) )* }) } } OptimizedExpr::Choice(lhs, rhs) => { let head = generate_expr(*lhs); let mut tail = vec![]; let mut current = *rhs; while let OptimizedExpr::Choice(lhs, rhs) = current { tail.push(generate_expr(*lhs)); current = *rhs; } tail.push(generate_expr(current)); quote! { #head #( .or_else(|state| { #tail }) )* } } OptimizedExpr::Opt(expr) => { let expr = generate_expr(*expr); quote! { state.optional(|state| { #expr }) } } OptimizedExpr::Rep(expr) => { let expr = generate_expr(*expr); quote! { state.sequence(|state| { state.optional(|state| { #expr.and_then(|state| { state.repeat(|state| { state.sequence(|state| { super::hidden::skip( state ).and_then(|state| { #expr }) }) }) }) }) }) } } #[cfg(feature = "grammar-extras")] OptimizedExpr::RepOnce(expr) => { let expr = generate_expr(*expr); quote! { state.sequence(|state| { #expr.and_then(|state| { state.repeat(|state| { state.sequence(|state| { super::hidden::skip( state ).and_then(|state| { #expr }) }) }) }) }) } } OptimizedExpr::Skip(strings) => { quote! { let strings = [#(#strings),*]; state.skip_until(&strings) } } OptimizedExpr::Push(expr) => { let expr = generate_expr(*expr); quote! { state.stack_push(|state| #expr) } } OptimizedExpr::RestoreOnErr(expr) => { let expr = generate_expr(*expr); quote! { state.restore_on_err(|state| #expr) } } #[cfg(feature = "grammar-extras")] OptimizedExpr::NodeTag(expr, tag) => { let expr = generate_expr(*expr); let tag_cow = { #[cfg(feature = "std")] quote! { ::std::borrow::Cow::Borrowed(#tag) } #[cfg(not(feature = "std"))] quote! { ::alloc::borrow::Cow::Borrowed(#tag) } }; quote! { #expr.and_then(|state| state.tag_node(#tag_cow)) } } } } fn generate_expr_atomic(expr: OptimizedExpr) -> TokenStream { match expr { OptimizedExpr::Str(string) => { quote! { state.match_string(#string) } } OptimizedExpr::Insens(string) => { quote! { state.match_insensitive(#string) } } OptimizedExpr::Range(start, end) => { let start = start.chars().next().unwrap(); let end = end.chars().next().unwrap(); quote! { state.match_range(#start..#end) } } OptimizedExpr::Ident(ident) => { let ident = format_ident!("r#{}", ident); quote! { self::#ident(state) } } OptimizedExpr::PeekSlice(start, end_) => { let end = QuoteOption(end_); quote! { state.stack_match_peek_slice(#start, #end, ::pest::MatchDir::BottomToTop) } } OptimizedExpr::PosPred(expr) => { let expr = generate_expr_atomic(*expr); quote! { state.lookahead(true, |state| { #expr }) } } OptimizedExpr::NegPred(expr) => { let expr = generate_expr_atomic(*expr); quote! { state.lookahead(false, |state| { #expr }) } } OptimizedExpr::Seq(lhs, rhs) => { let head = generate_expr_atomic(*lhs); let mut tail = vec![]; let mut current = *rhs; while let OptimizedExpr::Seq(lhs, rhs) = current { tail.push(generate_expr_atomic(*lhs)); current = *rhs; } tail.push(generate_expr_atomic(current)); quote! { state.sequence(|state| { #head #( .and_then(|state| { #tail }) )* }) } } OptimizedExpr::Choice(lhs, rhs) => { let head = generate_expr_atomic(*lhs); let mut tail = vec![]; let mut current = *rhs; while let OptimizedExpr::Choice(lhs, rhs) = current { tail.push(generate_expr_atomic(*lhs)); current = *rhs; } tail.push(generate_expr_atomic(current)); quote! { #head #( .or_else(|state| { #tail }) )* } } OptimizedExpr::Opt(expr) => { let expr = generate_expr_atomic(*expr); quote! { state.optional(|state| { #expr }) } } OptimizedExpr::Rep(expr) => { let expr = generate_expr_atomic(*expr); quote! { state.repeat(|state| { #expr }) } } #[cfg(feature = "grammar-extras")] OptimizedExpr::RepOnce(expr) => { let expr = generate_expr_atomic(*expr); quote! { state.sequence(|state| { #expr.and_then(|state| { state.repeat(|state| { state.sequence(|state| { #expr }) }) }) }) } } OptimizedExpr::Skip(strings) => { quote! { let strings = [#(#strings),*]; state.skip_until(&strings) } } OptimizedExpr::Push(expr) => { let expr = generate_expr_atomic(*expr); quote! { state.stack_push(|state| #expr) } } OptimizedExpr::RestoreOnErr(expr) => { let expr = generate_expr_atomic(*expr); quote! { state.restore_on_err(|state| #expr) } } #[cfg(feature = "grammar-extras")] OptimizedExpr::NodeTag(expr, tag) => { let expr = generate_expr_atomic(*expr); let tag_cow = { #[cfg(feature = "std")] quote! { ::std::borrow::Cow::Borrowed(#tag) } #[cfg(not(feature = "std"))] quote! { ::alloc::borrow::Cow::Borrowed(#tag) } }; quote! { #expr.and_then(|state| state.tag_node(#tag_cow)) } } } } struct QuoteOption(Option); impl ToTokens for QuoteOption { fn to_tokens(&self, tokens: &mut TokenStream) { let option = option_type(); tokens.append_all(match self.0 { Some(ref t) => quote! { #option::Some(#t) }, None => quote! { #option::None }, }); } } fn box_type() -> TokenStream { #[cfg(feature = "std")] quote! { ::std::boxed::Box } #[cfg(not(feature = "std"))] quote! { ::alloc::boxed::Box } } fn result_type() -> TokenStream { #[cfg(feature = "std")] quote! { ::std::result::Result } #[cfg(not(feature = "std"))] quote! { ::core::result::Result } } fn option_type() -> TokenStream { #[cfg(feature = "std")] quote! { ::std::option::Option } #[cfg(not(feature = "std"))] quote! { ::core::option::Option } } #[cfg(test)] mod tests { use super::*; use proc_macro2::Span; use std::collections::HashMap; use syn::Generics; #[test] fn rule_enum_simple() { let rules = vec![OptimizedRule { name: "f".to_owned(), ty: RuleType::Normal, expr: OptimizedExpr::Ident("g".to_owned()), }]; let mut line_docs = HashMap::new(); line_docs.insert("f".to_owned(), "This is rule comment".to_owned()); let doc_comment = &DocComment { grammar_doc: "Rule doc\nhello".to_owned(), line_docs, }; assert_eq!( generate_enum(&rules, doc_comment, false, false).to_string(), quote! { #[doc = "Rule doc\nhello"] #[allow(dead_code, non_camel_case_types, clippy::upper_case_acronyms)] #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum Rule { #[doc = "This is rule comment"] r#f } impl Rule { pub fn all_rules() -> &'static [Rule] { &[Rule::r#f] } } } .to_string() ); } #[test] fn sequence() { let expr = OptimizedExpr::Seq( Box::new(OptimizedExpr::Str("a".to_owned())), Box::new(OptimizedExpr::Seq( Box::new(OptimizedExpr::Str("b".to_owned())), Box::new(OptimizedExpr::Seq( Box::new(OptimizedExpr::Str("c".to_owned())), Box::new(OptimizedExpr::Str("d".to_owned())), )), )), ); assert_eq!( generate_expr(expr).to_string(), quote! { state.sequence(|state| { state.match_string("a").and_then(|state| { super::hidden::skip(state) }).and_then(|state| { state.match_string("b") }).and_then(|state| { super::hidden::skip(state) }).and_then(|state| { state.match_string("c") }).and_then(|state| { super::hidden::skip(state) }).and_then(|state| { state.match_string("d") }) }) } .to_string() ); } #[test] fn sequence_atomic() { let expr = OptimizedExpr::Seq( Box::new(OptimizedExpr::Str("a".to_owned())), Box::new(OptimizedExpr::Seq( Box::new(OptimizedExpr::Str("b".to_owned())), Box::new(OptimizedExpr::Seq( Box::new(OptimizedExpr::Str("c".to_owned())), Box::new(OptimizedExpr::Str("d".to_owned())), )), )), ); assert_eq!( generate_expr_atomic(expr).to_string(), quote! { state.sequence(|state| { state.match_string("a").and_then(|state| { state.match_string("b") }).and_then(|state| { state.match_string("c") }).and_then(|state| { state.match_string("d") }) }) } .to_string() ); } #[test] fn choice() { let expr = OptimizedExpr::Choice( Box::new(OptimizedExpr::Str("a".to_owned())), Box::new(OptimizedExpr::Choice( Box::new(OptimizedExpr::Str("b".to_owned())), Box::new(OptimizedExpr::Choice( Box::new(OptimizedExpr::Str("c".to_owned())), Box::new(OptimizedExpr::Str("d".to_owned())), )), )), ); assert_eq!( generate_expr(expr).to_string(), quote! { state.match_string("a").or_else(|state| { state.match_string("b") }).or_else(|state| { state.match_string("c") }).or_else(|state| { state.match_string("d") }) } .to_string() ); } #[test] fn choice_atomic() { let expr = OptimizedExpr::Choice( Box::new(OptimizedExpr::Str("a".to_owned())), Box::new(OptimizedExpr::Choice( Box::new(OptimizedExpr::Str("b".to_owned())), Box::new(OptimizedExpr::Choice( Box::new(OptimizedExpr::Str("c".to_owned())), Box::new(OptimizedExpr::Str("d".to_owned())), )), )), ); assert_eq!( generate_expr_atomic(expr).to_string(), quote! { state.match_string("a").or_else(|state| { state.match_string("b") }).or_else(|state| { state.match_string("c") }).or_else(|state| { state.match_string("d") }) } .to_string() ); } #[test] fn skip() { let expr = OptimizedExpr::Skip(vec!["a".to_owned(), "b".to_owned()]); assert_eq!( generate_expr_atomic(expr).to_string(), quote! { let strings = ["a", "b"]; state.skip_until(&strings) } .to_string() ); } #[test] fn expr_complex() { let expr = OptimizedExpr::Choice( Box::new(OptimizedExpr::Ident("a".to_owned())), Box::new(OptimizedExpr::Seq( Box::new(OptimizedExpr::Range("a".to_owned(), "b".to_owned())), Box::new(OptimizedExpr::Seq( Box::new(OptimizedExpr::NegPred(Box::new(OptimizedExpr::Rep( Box::new(OptimizedExpr::Insens("b".to_owned())), )))), Box::new(OptimizedExpr::PosPred(Box::new(OptimizedExpr::Opt( Box::new(OptimizedExpr::Rep(Box::new(OptimizedExpr::Choice( Box::new(OptimizedExpr::Str("c".to_owned())), Box::new(OptimizedExpr::Str("d".to_owned())), )))), )))), )), )), ); let sequence = quote! { state.sequence(|state| { super::hidden::skip(state).and_then( |state| { state.match_insensitive("b") } ) }) }; let repeat = quote! { state.repeat(|state| { state.sequence(|state| { super::hidden::skip(state).and_then(|state| { state.match_string("c") .or_else(|state| { state.match_string("d") }) }) }) }) }; assert_eq!( generate_expr(expr).to_string(), quote! { self::r#a(state).or_else(|state| { state.sequence(|state| { state.match_range('a'..'b').and_then(|state| { super::hidden::skip(state) }).and_then(|state| { state.lookahead(false, |state| { state.sequence(|state| { state.optional(|state| { state.match_insensitive( "b" ).and_then(|state| { state.repeat(|state| { #sequence }) }) }) }) }) }).and_then(|state| { super::hidden::skip(state) }).and_then(|state| { state.lookahead(true, |state| { state.optional(|state| { state.sequence(|state| { state.optional(|state| { state.match_string("c") .or_else(|state| { state.match_string("d") }).and_then(|state| { #repeat }) }) }) }) }) }) }) }) } .to_string() ); } #[test] fn expr_complex_atomic() { let expr = OptimizedExpr::Choice( Box::new(OptimizedExpr::Ident("a".to_owned())), Box::new(OptimizedExpr::Seq( Box::new(OptimizedExpr::Range("a".to_owned(), "b".to_owned())), Box::new(OptimizedExpr::Seq( Box::new(OptimizedExpr::NegPred(Box::new(OptimizedExpr::Rep( Box::new(OptimizedExpr::Insens("b".to_owned())), )))), Box::new(OptimizedExpr::PosPred(Box::new(OptimizedExpr::Opt( Box::new(OptimizedExpr::Rep(Box::new(OptimizedExpr::Choice( Box::new(OptimizedExpr::Str("c".to_owned())), Box::new(OptimizedExpr::Str("d".to_owned())), )))), )))), )), )), ); assert_eq!( generate_expr_atomic(expr).to_string(), quote! { self::r#a(state).or_else(|state| { state.sequence(|state| { state.match_range('a'..'b').and_then(|state| { state.lookahead(false, |state| { state.repeat(|state| { state.match_insensitive("b") }) }) }).and_then(|state| { state.lookahead(true, |state| { state.optional(|state| { state.repeat(|state| { state.match_string("c") .or_else(|state| { state.match_string("d") }) }) }) }) }) }) }) } .to_string() ); } #[test] fn test_generate_complete() { let name = Ident::new("MyParser", Span::call_site()); let generics = Generics::default(); let rules = vec![ OptimizedRule { name: "a".to_owned(), ty: RuleType::Silent, expr: OptimizedExpr::Str("b".to_owned()), }, OptimizedRule { name: "if".to_owned(), ty: RuleType::Silent, expr: OptimizedExpr::Ident("a".to_owned()), }, ]; let mut line_docs = HashMap::new(); line_docs.insert("if".to_owned(), "If statement".to_owned()); let doc_comment = &DocComment { line_docs, grammar_doc: "This is Rule doc\nThis is second line".to_owned(), }; let defaults = vec!["ANY"]; let result = result_type(); let box_ty = box_type(); let current_dir = std::env::current_dir().expect("Unable to get current directory"); let base_path = current_dir.join("base.pest").to_str().unwrap().to_string(); let test_path = current_dir.join("test.pest").to_str().unwrap().to_string(); let parsed_derive = ParsedDerive { name, generics, non_exhaustive: false, }; assert_eq!( generate(parsed_derive, vec![PathBuf::from("base.pest"), PathBuf::from("test.pest")], rules, defaults, doc_comment, true).to_string(), quote! { #[allow(non_upper_case_globals)] const _PEST_GRAMMAR_MyParser: [&'static str; 2usize] = [include_str!(#base_path), include_str!(#test_path)]; #[doc = "This is Rule doc\nThis is second line"] #[allow(dead_code, non_camel_case_types, clippy::upper_case_acronyms)] #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub enum Rule { r#a, #[doc = "If statement"] r#if } impl Rule { pub fn all_rules() -> &'static [Rule] { &[Rule::r#a, Rule::r#if] } } #[allow(clippy::all)] impl ::pest::Parser for MyParser { fn parse<'i>( rule: Rule, input: &'i str ) -> #result< ::pest::iterators::Pairs<'i, Rule>, ::pest::error::Error > { mod rules { #![allow(clippy::upper_case_acronyms)] pub mod hidden { use super::super::Rule; #[inline] #[allow(dead_code, non_snake_case, unused_variables)] pub fn skip(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> { Ok(state) } } pub mod visible { use super::super::Rule; #[inline] #[allow(non_snake_case, unused_variables)] pub fn r#a(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> { state.match_string("b") } #[inline] #[allow(non_snake_case, unused_variables)] pub fn r#if(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> { self::r#a(state) } #[inline] #[allow(dead_code, non_snake_case, unused_variables)] pub fn ANY(state: #box_ty<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<#box_ty<::pest::ParserState<'_, Rule>>> { state.skip(1) } } pub use self::visible::*; } ::pest::state(input, |state| { match rule { Rule::r#a => rules::r#a(state), Rule::r#if => rules::r#if(state) } }) } } }.to_string() ); } } pest_generator-2.7.4/src/lib.rs000064400000000000000000000231501046102023000145630ustar 00000000000000// pest. The Elegant Parser // Copyright (c) 2018 Dragoș Tiselice // // Licensed under the Apache License, Version 2.0 // or the MIT // license , at your // option. All files in the project carrying such notice may not be copied, // modified, or distributed except according to those terms. #![doc( html_root_url = "https://docs.rs/pest_derive", html_logo_url = "https://raw.githubusercontent.com/pest-parser/pest/master/pest-logo.svg", html_favicon_url = "https://raw.githubusercontent.com/pest-parser/pest/master/pest-logo.svg" )] #![warn(missing_docs, rust_2018_idioms, unused_qualifications)] #![recursion_limit = "256"] //! # pest generator //! //! This crate generates code from ASTs (which is used in the `pest_derive` crate). #[macro_use] extern crate quote; use std::env; use std::fs::File; use std::io::{self, Read}; use std::path::Path; use proc_macro2::TokenStream; use syn::{Attribute, DeriveInput, Expr, ExprLit, Generics, Ident, Lit, Meta}; #[macro_use] mod macros; mod docs; mod generator; use pest_meta::parser::{self, rename_meta_rule, Rule}; use pest_meta::{optimizer, unwrap_or_report, validator}; /// Processes the derive/proc macro input and generates the corresponding parser based /// on the parsed grammar. If `include_grammar` is set to true, it'll generate an explicit /// "include_str" statement (done in pest_derive, but turned off in the local bootstrap). pub fn derive_parser(input: TokenStream, include_grammar: bool) -> TokenStream { let ast: DeriveInput = syn::parse2(input).unwrap(); let (parsed_derive, contents) = parse_derive(ast); let mut data = String::new(); let mut paths = vec![]; for content in contents { let (_data, _path) = match content { GrammarSource::File(ref path) => { let root = env::var("CARGO_MANIFEST_DIR").unwrap_or_else(|_| ".".into()); // Check whether we can find a file at the path relative to the CARGO_MANIFEST_DIR // first. // // If we cannot find the expected file over there, fallback to the // `CARGO_MANIFEST_DIR/src`, which is the old default and kept for convenience // reasons. // TODO: This could be refactored once `std::path::absolute()` get's stabilized. // https://doc.rust-lang.org/std/path/fn.absolute.html let path = if Path::new(&root).join(path).exists() { Path::new(&root).join(path) } else { Path::new(&root).join("src/").join(path) }; let file_name = match path.file_name() { Some(file_name) => file_name, None => panic!("grammar attribute should point to a file"), }; let data = match read_file(&path) { Ok(data) => data, Err(error) => panic!("error opening {:?}: {}", file_name, error), }; (data, Some(path.clone())) } GrammarSource::Inline(content) => (content, None), }; data.push_str(&_data); if let Some(path) = _path { paths.push(path); } } let pairs = match parser::parse(Rule::grammar_rules, &data) { Ok(pairs) => pairs, Err(error) => panic!("error parsing \n{}", error.renamed_rules(rename_meta_rule)), }; let defaults = unwrap_or_report(validator::validate_pairs(pairs.clone())); let doc_comment = docs::consume(pairs.clone()); let ast = unwrap_or_report(parser::consume_rules(pairs)); let optimized = optimizer::optimize(ast); generator::generate( parsed_derive, paths, optimized, defaults, &doc_comment, include_grammar, ) } fn read_file>(path: P) -> io::Result { let mut file = File::open(path.as_ref())?; let mut string = String::new(); file.read_to_string(&mut string)?; Ok(string) } #[derive(Debug, PartialEq)] enum GrammarSource { File(String), Inline(String), } struct ParsedDerive { pub(crate) name: Ident, pub(crate) generics: Generics, pub(crate) non_exhaustive: bool, } fn parse_derive(ast: DeriveInput) -> (ParsedDerive, Vec) { let name = ast.ident; let generics = ast.generics; let grammar: Vec<&Attribute> = ast .attrs .iter() .filter(|attr| { let path = attr.meta.path(); path.is_ident("grammar") || path.is_ident("grammar_inline") }) .collect(); if grammar.is_empty() { panic!("a grammar file needs to be provided with the #[grammar = \"PATH\"] or #[grammar_inline = \"GRAMMAR CONTENTS\"] attribute"); } let mut grammar_sources = Vec::with_capacity(grammar.len()); for attr in grammar { grammar_sources.push(get_attribute(attr)) } let non_exhaustive = ast .attrs .iter() .any(|attr| attr.meta.path().is_ident("non_exhaustive")); ( ParsedDerive { name, generics, non_exhaustive, }, grammar_sources, ) } fn get_attribute(attr: &Attribute) -> GrammarSource { match &attr.meta { Meta::NameValue(name_value) => match &name_value.value { Expr::Lit(ExprLit { lit: Lit::Str(string), .. }) => { if name_value.path.is_ident("grammar") { GrammarSource::File(string.value()) } else { GrammarSource::Inline(string.value()) } } _ => panic!("grammar attribute must be a string"), }, _ => panic!("grammar attribute must be of the form `grammar = \"...\"`"), } } #[cfg(test)] mod tests { use super::parse_derive; use super::GrammarSource; #[test] fn derive_inline_file() { let definition = " #[other_attr] #[grammar_inline = \"GRAMMAR\"] pub struct MyParser<'a, T>; "; let ast = syn::parse_str(definition).unwrap(); let (_, filenames) = parse_derive(ast); assert_eq!(filenames, [GrammarSource::Inline("GRAMMAR".to_string())]); } #[test] fn derive_ok() { let definition = " #[other_attr] #[grammar = \"myfile.pest\"] pub struct MyParser<'a, T>; "; let ast = syn::parse_str(definition).unwrap(); let (parsed_derive, filenames) = parse_derive(ast); assert_eq!(filenames, [GrammarSource::File("myfile.pest".to_string())]); assert!(!parsed_derive.non_exhaustive); } #[test] fn derive_multiple_grammars() { let definition = " #[other_attr] #[grammar = \"myfile1.pest\"] #[grammar = \"myfile2.pest\"] pub struct MyParser<'a, T>; "; let ast = syn::parse_str(definition).unwrap(); let (_, filenames) = parse_derive(ast); assert_eq!( filenames, [ GrammarSource::File("myfile1.pest".to_string()), GrammarSource::File("myfile2.pest".to_string()) ] ); } #[test] fn derive_nonexhaustive() { let definition = " #[non_exhaustive] #[grammar = \"myfile.pest\"] pub struct MyParser<'a, T>; "; let ast = syn::parse_str(definition).unwrap(); let (parsed_derive, filenames) = parse_derive(ast); assert_eq!(filenames, [GrammarSource::File("myfile.pest".to_string())]); assert!(parsed_derive.non_exhaustive); } #[test] #[should_panic(expected = "grammar attribute must be a string")] fn derive_wrong_arg() { let definition = " #[other_attr] #[grammar = 1] pub struct MyParser<'a, T>; "; let ast = syn::parse_str(definition).unwrap(); parse_derive(ast); } #[test] #[should_panic( expected = "a grammar file needs to be provided with the #[grammar = \"PATH\"] or #[grammar_inline = \"GRAMMAR CONTENTS\"] attribute" )] fn derive_no_grammar() { let definition = " #[other_attr] pub struct MyParser<'a, T>; "; let ast = syn::parse_str(definition).unwrap(); parse_derive(ast); } #[doc = "Matches dar\n\nMatch dar description\n"] #[test] fn test_generate_doc() { let input = quote! { #[derive(Parser)] #[non_exhaustive] #[grammar = "../tests/test.pest"] pub struct TestParser; }; let token = super::derive_parser(input, true); let expected = quote! { #[doc = "A parser for JSON file.\nAnd this is a example for JSON parser.\n\n indent-4-space\n"] #[allow(dead_code, non_camel_case_types, clippy::upper_case_acronyms)] #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[non_exhaustive] pub enum Rule { #[doc = "Matches foo str, e.g.: `foo`"] r#foo, #[doc = "Matches bar str\n\n Indent 2, e.g: `bar` or `foobar`"] r#bar, r#bar1, #[doc = "Matches dar\n\nMatch dar description\n"] r#dar } }; assert!( token.to_string().contains(expected.to_string().as_str()), "{}\n\nExpected to contains:\n{}", token, expected ); } } pest_generator-2.7.4/src/macros.rs000064400000000000000000000026061046102023000153040ustar 00000000000000// pest. The Elegant Parser // Copyright (c) 2018 Dragoș Tiselice // // Licensed under the Apache License, Version 2.0 // or the MIT // license , at your // option. All files in the project carrying such notice may not be copied, // modified, or distributed except according to those terms. macro_rules! insert_builtin { ($builtin: expr, $name: ident, $pattern: expr) => { $builtin.push((stringify!($name), generate_rule!($name, $pattern))); }; } #[cfg(feature = "std")] macro_rules! generate_rule { ($name: ident, $pattern: expr) => { quote! { #[inline] #[allow(dead_code, non_snake_case, unused_variables)] pub fn $name(state: ::std::boxed::Box<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<::std::boxed::Box<::pest::ParserState<'_, Rule>>> { $pattern } } } } #[cfg(not(feature = "std"))] macro_rules! generate_rule { ($name: ident, $pattern: expr) => { quote! { #[inline] #[allow(dead_code, non_snake_case, unused_variables)] pub fn $name(state: ::alloc::boxed::Box<::pest::ParserState<'_, Rule>>) -> ::pest::ParseResult<::alloc::boxed::Box<::pest::ParserState<'_, Rule>>> { $pattern } } } } pest_generator-2.7.4/tests/base.pest000064400000000000000000000000211046102023000156210ustar 00000000000000base = { "base" }pest_generator-2.7.4/tests/test.pest000064400000000000000000000005131046102023000156740ustar 00000000000000//! A parser for JSON file. //! And this is a example for JSON parser. //! //! indent-4-space //! /// Matches foo str, e.g.: `foo` foo = { "foo" } /// Matches bar str /// /// Indent 2, e.g: `bar` or `foobar` bar = { "bar" | "foobar" } bar1 = { "bar1" } /// Matches dar /// /// Match dar description /// dar = { "da" }