distributional/0000755000175000017500000000000014165427252013450 5ustar nileshnileshdistributional/MD50000644000175000017500000002477514165427252013777 0ustar nileshnilesh81d2387f43f429e2a51cacfaaa972a42 *DESCRIPTION 528fa7977d317f60b800201d88f41512 *NAMESPACE 71b4d039dd8b514b74c2aca40adde07b *NEWS.md d2eef53219f7071a39945bdfda097b71 *R/default.R 22c7999050a1792182e52c9bd05bd706 *R/dist_bernoulli.R 7b99937c8a40752f3fbfa704d6f5c9b6 *R/dist_beta.R 1853ef098538090d1752859f2e888072 *R/dist_binomial.R d7730cc6990d18cd0d73f2735c92eaab *R/dist_burr.R 09b8e47d26a2092b50034404d2663a2e *R/dist_categorical.R 80a134c8b2d0074058b9f58d586ce9d9 *R/dist_cauchy.R fa46babb8c03bbe5b99408a6b9b9e8f2 *R/dist_chisq.R c69b1cf9fd6958c0b0314f57408eda43 *R/dist_degenerate.R 1544dc8b7c2564b5eb3047ef4904ea88 *R/dist_exponential.R e8f6cac423a838232e894a51b92d056c *R/dist_f.R 60385fed534b7d5f5101bc8b8d77351e *R/dist_gamma.R 8b69e66332c9ddb334735cb76c2c6c1c *R/dist_geometric.R 3b03bbdcbf0de7c7473a3f53a814e311 *R/dist_gumbel.R 7b171f2f8bc20f9b61b59886a175b80f *R/dist_hypergeometric.R a7b7ba10ea4142bce992997553e3dfb7 *R/dist_inverse_exponential.R 0f9324d57bc93274754508e85b4db703 *R/dist_inverse_gamma.R f21416140a4cd473f428cacabda9a8cd *R/dist_inverse_gaussian.R 25ad39846b5ef49ed6c5f0320a95d95d *R/dist_logarithmic.R e48514f3db29a84af045e11263fa6740 *R/dist_logistic.R bc5d659fc7195407c3a073e943756999 *R/dist_lognormal.R 6c205339fb619cff69445f1118d0ce63 *R/dist_missing.R 09a16970a9d76c6a0aa34f0a6123942f *R/dist_multinomial.R 6cb0b91ad006ddb69aa6854c562f923c *R/dist_multivariate_normal.R 0a06041de8197c57a36c92bfa68358ef *R/dist_negative_binomial.R bf27b8cbbb51ccb141a686a4ff8c3160 *R/dist_normal.R 59d1d719a44163669b55e30c7f3d53c3 *R/dist_pareto.R 92c2a75778f0cb7db87129d15131ce0a *R/dist_percentile.R f59e21356f40f43a7ea03191357abb68 *R/dist_poisson.R a2d3473399c6abe7d32276e7f4026ff5 *R/dist_poisson_inverse_gaussian.R 25e09cd53f5ed5f44cfd53e4cfe7fab4 *R/dist_sample.R c5278e31d1596ddc1530031b4e712d04 *R/dist_student_t.R 8a0a891c4e0581a93f16b9d70e11d3c0 *R/dist_studentized_range.R 01ffbc3713f65487518478185137a5c0 *R/dist_uniform.R 8e44904b2bf815e5e019efaab325653e *R/dist_weibull.R faece3d1605b385dce10eb38cfc1d72c *R/dist_wrap.R 7100cac5d513de9bcbc4375bb00492a7 *R/distribution.R 62dc0d881f387a03e057609237dee3c9 *R/distributional-package.R eb9cdedfa8fc4ad192ce86bcc1a88cbf *R/geom_hilo.R 56a00c61086997879f1ced5ff84371ae *R/hdr.R 9701a5c23471803c89710ab09c92e699 *R/hilo.R f2c9c2e786c1f0225163b239d6bfed72 *R/inflated.R 4495366bcb4f9b041e01f3ef49dfedc4 *R/mixture.R 54c8424d85bdcaed230540752212be0e *R/plot.R c0ea658b5cab1732627259bc8885988a *R/reexports.R b6e8ac75ff3d04a985354e7d09deaa2c *R/scale-level.R 79a8df1027a0605ca4a31e16ec5ba924 *R/support.R 62bb78a631763c1cb35361bf7bfb4ab5 *R/transformed.R 5ebc8b4bdb83e64059def10fcb699722 *R/truncated.R 22b44377f816b0244228cd8455f384d7 *R/utils.R d55a7f8964e25799161e3e60efb0b1d6 *R/zzz.R 5f16073e39cd5d95ea85a8b0ebf3a731 *README.md ad9d29f9d83c01e80387c0889abe5072 *build/distributional.pdf eda3a422eef3020a9d51107684527d4d *man/autoplot.distribution.Rd 1e71e7d866f8b864dbb69d2b74854e12 *man/cdf.Rd 3fe5b2e1207e7529aa08123a6c7db9b1 *man/covariance.Rd ed1ae82a84b099371aea9546e203e3f5 *man/covariance.distribution.Rd b25816e7c5940bab45c6747a1a0b6ae9 *man/density.distribution.Rd 367bcf98334416b98c583558c5d56c29 *man/dist_bernoulli.Rd dac80d62f5e032e9534d8721ca03389e *man/dist_beta.Rd cfe109f5e3cd219d16e19cfc24e15976 *man/dist_binomial.Rd 84a2cf414d0293f316f9b0a54e055be8 *man/dist_burr.Rd e2fd0b755094e49d094de9e743bf21c5 *man/dist_categorical.Rd 2fc8bcde8fc90fd2fef6bc7cd55d00ad *man/dist_cauchy.Rd 9419b702c854a5c4196700b0de5833f5 *man/dist_chisq.Rd 4b605d01a47030aac2436768f5619ae3 *man/dist_degenerate.Rd 8d797142fc7782bc2ac386ccc8751040 *man/dist_exponential.Rd 6b009e0b1447c8fb0d4cb70f804b46f1 *man/dist_f.Rd 671080de3b798b62b12782ea78de9fa8 *man/dist_gamma.Rd 8f38be882f7040c439552428fd6eae27 *man/dist_geometric.Rd ee7b02dffcbe8769810eda9a44116e9d *man/dist_gumbel.Rd 6ffc911003f9b062de246d463cc8e9b3 *man/dist_hypergeometric.Rd 528479104726a77fc0c1d251783a4103 *man/dist_inflated.Rd c8a90ea45ed2d984bece7b9d9f9345ff *man/dist_inverse_exponential.Rd d701f3716c084f396e2703f1cc023364 *man/dist_inverse_gamma.Rd 0157eedac18ce07ebe380646caafd6d5 *man/dist_inverse_gaussian.Rd 775a052be286830c327b1830613710b1 *man/dist_logarithmic.Rd 28b51675a186aa70134b50fb1d825c5b *man/dist_logistic.Rd bb19a2b5733eb4ce61c97806290f8e24 *man/dist_lognormal.Rd ef3576d8680a6f58102b290e660bddda *man/dist_missing.Rd 32a96a260a4960742c4fe33eb8733c5d *man/dist_mixture.Rd ebe78ff74a5401b7e673c9fd6c879f8d *man/dist_multinomial.Rd 869e89b5e65cdfd209742473798e8cd2 *man/dist_multivariate_normal.Rd 1da41ef41e9dfb1c5adc8abb66cccffb *man/dist_negative_binomial.Rd b550d286bb66c15783ad57856eee4141 *man/dist_normal.Rd 94a8b2bdfd5ebc331bacf7a8dd696c02 *man/dist_pareto.Rd 6b39584e14f2104c2dc0c345bf83a74d *man/dist_percentile.Rd a68ad717688de2051d8e9169b553ed14 *man/dist_poisson.Rd 2f056daa7bfff40a0a3c9dedc78a26d8 *man/dist_poisson_inverse_gaussian.Rd 500483ef111861b9fb86abcfa55d7099 *man/dist_sample.Rd bce70c88e5f7454951bdd42703bb6644 *man/dist_student_t.Rd d26eb0635194384271dc5ae9188f0587 *man/dist_studentized_range.Rd 3592669df16418c0f89febe43d0e6334 *man/dist_transformed.Rd 5bbf9a18904573a7d7e3e2d5bdecc13f *man/dist_truncated.Rd 151fcc32865b9eb14df2299966274272 *man/dist_uniform.Rd 3b75cf68c9b7be55312cd72318030ddd *man/dist_weibull.Rd a37de96053d58d525550b0d9efaf4528 *man/dist_wrap.Rd dbd09bbed8d18a088bf27f3b7d04e510 *man/distributional-package.Rd fc5b4feb9708facf289853bb7a0e7e89 *man/family.distribution.Rd e00b3f82baae79c54be0ca78e5305823 *man/figures/README-plot-1.png cb1e46f469cfbbbde29c8b5113e1d789 *man/figures/lifecycle-archived.svg c0d2e5a54f1fa4ff02bf9533079dd1f7 *man/figures/lifecycle-defunct.svg a1b8c987c676c16af790f563f96cbb1f *man/figures/lifecycle-deprecated.svg c3978703d8f40f2679795335715e98f4 *man/figures/lifecycle-experimental.svg 952b59dc07b171b97d5d982924244f61 *man/figures/lifecycle-maturing.svg 27b879bf3677ea76e3991d56ab324081 *man/figures/lifecycle-questioning.svg 46de21252239c5a23d400eae83ec6b2d *man/figures/lifecycle-retired.svg 6902bbfaf963fbc4ed98b86bda80caa2 *man/figures/lifecycle-soft-deprecated.svg 53b3f893324260b737b3c46ed2a0e643 *man/figures/lifecycle-stable.svg 512f979e0ff280bf7de33a1260d76172 *man/generate.distribution.Rd aca79f33f6c6120173768108f1f7ac77 *man/geom_hilo_linerange.Rd ce83b60315fe573e2aa9e340a5001ae4 *man/geom_hilo_ribbon.Rd 55fb83ec3afb992be79a00f71bb263bd *man/guide-helpers.Rd c487c0f510da5a5d0c198664fe311def *man/guide_level.Rd 1b79fdc35b5a03963705a461f71621a6 *man/hdr.Rd b84f033fc33b4248afa5021a91378d50 *man/hdr.distribution.Rd 805c75f6b0576e4296b2baf6a91be300 *man/hilo.Rd 56a85cb20aabfe2a15268b800620749b *man/hilo.distribution.Rd c0e8d42a8ef78867d5ff08abcffa0f9f *man/is-distribution.Rd 202225fd45b8291ef9a0e6e296955913 *man/is_hdr.Rd e7315a386366939fde82a760fce20ddc *man/is_hilo.Rd 209066551f67a71857703626a1c54ce5 *man/kurtosis.Rd 0991ad96561a6de0de2e1f808f8a5482 *man/likelihood.Rd efb17e3387b71c5e2a93ea9d900678e1 *man/mean.distribution.Rd b96a39429c28723d43d78ef45e7db5a3 *man/median.distribution.Rd 0ffa1c9a06f176b203cc83ee0e6ba5de *man/new_dist.Rd a7d0b9e27c3fcde27d461e01f6b479a9 *man/new_hdr.Rd 4fcbcc6835fc742713c9c565ab7f7eab *man/new_hilo.Rd 28fb594248cc80824ae745d4a185ec3f *man/new_support_region.Rd 8af413ad4b20e66ac568dbf769ec27d0 *man/parameters.Rd 6e536ab5abb8cd4cabd04e008bb384a4 *man/quantile.distribution.Rd be73e31803dd103145a71b28ad94d6a3 *man/reexports.Rd b4b76a7d70bc9aa45d661e581b502b9b *man/scale_hilo_continuous.Rd c7a97bb47fd50b5dacd9fc959f80b46b *man/scale_level.Rd 5b5115a6f3a5e7f285318b48ab42b055 *man/skewness.Rd 3e564a2e2cbc4bb7a15058295dcd8126 *man/support.Rd 2b9b3c0b575d3232b5786de5b85f1c08 *man/variance.Rd e5046ab33cbe2d4ecf945c7bc8aa52d2 *man/variance.distribution.Rd 0eed614527f4bcdb7b5d80cef3a90163 *tests/testthat.R 734f0773aeba9582e8091e75ca21df4f *tests/testthat/Rplots.pdf c9d07a0c2baafa1d864c61cae8939b68 *tests/testthat/setup-tests.R 600f6b0befaff2eb766057ba4492ad7e *tests/testthat/test-apply.R 34d59205418fbb439986220b21e6a2e1 *tests/testthat/test-dist-bernoulli.R 98c6f80f397d5be23582ad8374af5837 *tests/testthat/test-dist-beta.R f9e4e4131783f7237d4f97dfe72fbe3a *tests/testthat/test-dist-burr.R c0267778b901e4a3691a6be6e1461687 *tests/testthat/test-dist-cauchy.R 46065b755f643e6d67e14b3dbbab2f43 *tests/testthat/test-dist-chisq.R c2bc05c26f197ef3e2d532f76e6dcd16 *tests/testthat/test-dist-degenerate.R 1a43b2c7b3a8d43de284e9b6ffc01fd8 *tests/testthat/test-dist-exponential.R 028b728b08ac8f88871d0b8b5f4881d2 *tests/testthat/test-dist-f.R d682147ddb5e7d006cd3b6f33a1bdb55 *tests/testthat/test-dist-gamma.R ffdfbf31992e228107c0d64534549edd *tests/testthat/test-dist-geometric.R c857ddebb4f3ef4436cb234c87b2abc0 *tests/testthat/test-dist-gumbel.R 3c203e6e7e503d1d611abe46e84064a1 *tests/testthat/test-dist-hypergeometric.R 50b33bef9f581dddf82060fc0e9a84f9 *tests/testthat/test-dist-inverse-exponential.R 661e60cc3d1cce040072848183fa3a94 *tests/testthat/test-dist-inverse-gamma.R fb99988132630fcc5055dd320050d692 *tests/testthat/test-dist-inverse-gaussian.R 3788908d3a0ebdd2437f987b2f80fd75 *tests/testthat/test-dist-logarithmic.R 3b8646feb5b8006e2ad931d47a3e2f86 *tests/testthat/test-dist-logistic.R 1bbd6c40e7c6bb55660d1ebb06c56dac *tests/testthat/test-dist-multinomial.R bb0ca99390c7b2c4d991f2e7ca5fa1f8 *tests/testthat/test-dist-multivariate-normal.R 8e6d6d1d5e21ce78f6efdef37b2bf132 *tests/testthat/test-dist-negative-binomial.R a5c19b6c3d33b56a0f9ee06f13ac331b *tests/testthat/test-dist-normal.R dbbd61cb750f4ee5c164b377c2a5f3e0 *tests/testthat/test-dist-pareto.R b1d1c4055d8c137124c31b00aeb013f7 *tests/testthat/test-dist-percentile.R 8dbd79f036ef97752d1c226d4d5aee40 *tests/testthat/test-dist-poisson-inverse-gaussian.R 293e915312b88390102f5dd9bc9cfe7e *tests/testthat/test-dist-sample.R d6eb44eeb209211d414093e9965b899b *tests/testthat/test-dist-student-t.R a76bf887960e6fe5d8e74b7eb7841b85 *tests/testthat/test-dist-studentised-range.R 54e2974b72000400caf3aafcf7aee453 *tests/testthat/test-dist-uniform.R c534bea9f61ff207a0250a4f9c291e62 *tests/testthat/test-dist-weibull.R 81a9e2d2da843f71fe2ae121b970171d *tests/testthat/test-dist_categorical.R 121c000ccb65141a721f4157398a9def *tests/testthat/test-dist_lognormal.R a6cba40c30d644db4a3d7757be58ce25 *tests/testthat/test-distribution.R 6fb9b439523bb10ed98c954d747090a4 *tests/testthat/test-graphics.R a275086e34a17feb80675c2036a3a6e7 *tests/testthat/test-hilo.R 3dfad230c0483029636f11da9836bf2f *tests/testthat/test-inflated.R 8dd171e53c24bf91d7910cfb4a27bd47 *tests/testthat/test-issues.R 758cc8b6952f1af02e8c72ba9c68b797 *tests/testthat/test-mixture.R 4047d1c2b891518482f7cd59e5add943 *tests/testthat/test-transformations.R 538487050b2556bbc9691abefdfab6f8 *tests/testthat/test-truncated.R distributional/NEWS.md0000644000175000017500000002146614164777307014567 0ustar nileshnilesh# distributional 0.3.0 ## New features ### Probability distributions * Added `dist_categorical()` for the Categorical distribution. * Added `dist_lognormal()` for the log-normal distribution. Mathematical conversion shortcuts have also been added, so `exp(dist_normal())` produces `dist_lognormal()`. ### Generics * Added `parameters()` generic for obtaining the distribution's parameters. * Added `family()` for getting the distribution's family name. * Added `covariance()` to return the covariance of a distribution. * Added `support()` to identify the distribution's region of support (#8). * Added `log_likelihood()` for computing the log-likelihood of observing a sample from a distribution. ## Improvements * `variance()` now always returns a variance. It will not default to providing a covariance matrix for matrices. This also applies to multivariate distributions such as `dist_multivariate_normal()`. The covariance can now be obtained using the `covariance()` function. * `dist_wrap()` can now search for distribution functions in any environment, not just packages. If the `package` argument is `NULL`, it will search the calling environment for the functions. You can also provide a package name as before, and additionally an arbitrary environment to this argument. * `median()` methods will now ignore the `na.rm` option when it does not apply to that distribution type (#72). * `dist_sample()` now allows for missing values to be stored. Note that `density()`, `quantile()` and `cdf()` will remove these missing values by default. This behaviour can be changed with the `na.rm` argument. * `` objects now support non-numeric and multivariate distributions. `` vectors that have different bound types cannot be mixed (#74). * Improved performance of default methods of `mean()` and `variance()`, which no longer use sampling based means and variances for univariate continuous distributions (#71, @mjskay) * `dist_binomial()` distributions now return integers for `quantile()` and `generate()` methods. * Added conditional examples for distributions using functions from supported packages. ## Bug fixes * Fixed fallback `format()` function for distributions classes that have not defined this method (#67). ## Breaking changes * `variance()` on a `dist_multivariate_normal()` will now return the diagonal instead of the complete variance-covariance matrix. * `dist_bernoulli()` will now return logical values for `quantile()` and `generate()`. # distributional 0.2.2 ## New features * Added `is_distribution()` to identify if an object is a distribution. ## Improvements * Improved NA structure of distributions, allowing it to work with `is.na()` and `vctrs` vector resizing / filling functionality. * Added `as.character()` method, allowing datasets containing `hilo()` objects to be saved as a text file (#57). ## Bug fixes * Fixed issue with `hdr()` range `size` incorrectly being treated as `100-size`, giving 5% ranges for 95% sizes and vice-versa (#61). # distributional 0.2.1 A small performance and methods release. Some issues with truncated distributions have been fixed, and some more distribution methods have been added which improve performance of common tasks. ## New features ### Probability distributions * Added `dist_missing()` for representing unknown or missing (NA) distributions. ## Improvements * Documentation improvements. * Added `cdf()` method for `dist_sample()` which uses the emperical cdf. * `dist_mixture()` now preserves `dimnames()` if all distributions have the same `dimnames()`. * Added `density()` and `generate()` methods for sample distributions. * Added `skewness()` method for `dist_sample()`. * Improved performance for truncated Normal and sample distributions (#49). * Improved vectorisation of distribution methods. ## Bug fixes * Fixed issue with computing the median of `dist_truncated()` distributions. * Fixed format method for `dist_truncated()` distributions with no upper or lower limit. * Fixed issue with naming objects giving an invalid structure. It now gives an informative error (#23). * Fixed documentation for Negative Binomial distribution (#46). # distributional 0.2.0 ## New features ### Probability distributions * Added `dist_wrap()` for wrapping distributions not yet added in the package. ### Methods * Added `likelihood()` for computing the likelihood of observing a sample from a distribution. * Added `skewness()` for computing the skewness of a distribution. * Added `kurtosis()` for computing the kurtosis of a distribution. * The `density()`, `cdf()` and `quantile()` methods now accept a `log` argument which will use/return probabilities as log probabilities. ## Improvements * Improved documentation for most distributions to include equations for the region of support, summary statistics, density functions and moments. This is the work of @alexpghayes in the `distributions3` package. * Documentation improvements * Added support for displaying distributions with `View()`. * `hilo()` intervals can no longer be added to other intervals, as this is a common mistake when aggregating forecasts. * Incremented `d` for `numDeriv::hessian()` when computing mean and variance of transformed distributions. ## Deprecated features * Graphics functionality provided by `autoplot.distribution()` is now deprecated in favour of using the `ggdist` package. The `ggdist` package allows distributions produced by distributional to be used directly with ggplot2 as aesthetics. # distributional 0.1.0 First release. ## New features ### Object classes * `distribution`: Distributions are represented in a vectorised format using the [vctrs](https://cran.r-project.org/package=vctrs) package. This makes distributions suitable for inclusion in model prediction output. A `distribution` is a container for distribution-specific S3 classes. * `hilo`: Intervals are also stored in a vector. A `hilo` consists of a `lower` bound, `upper` bound, and confidence `level`. Each numerical element can be extracted using `$`, for example my_hilo$lower to obtain the lower bounds. * `hdr`: Highest density regions are currently stored as lists of `hilo` values. This is an experimental feature, and is likely to be expanded upon in an upcoming release. ### Generic functions Values of interest can be computed from the distribution using generic functions. The first release provides 9 functions for interacting with distributions: * `density()`: The probability density/mass function (equivalent to `d...()`). * `cdf()`: The cumulative distribution function (equivalent to `p...()`). * `generate()`: Random generation from the distribution (equivalent to `r...()`). * `quantile()`: Compute quantiles of the distribution (equivalent to `q...()`). * `hilo()`: Compute probability intervals of probability distribution(s). * `hdr()`: Compute highest density regions of probability distribution(s). * `mean()`: Obtain the mean(s) of probability distribution(s). * `median()`: Obtain the median(s) of probability distribution(s). * `variance()`: Obtain the variance(s) of probability distribution(s). ### Graphics * Added an `autoplot()` method for visualising the probability density function ([`density()`]) or cumulative distribution function ([`cdf()`]) of one or more distribution. * Added `geom_hilo_ribbon()` and `geom_hilo_linerange()` geometries for ggplot2. These geoms allow uncertainty to be shown graphically with `hilo()` intervals. ### Probability distributions * Added 20 continuous probability distributions: `dist_beta()`, `dist_burr()`, `dist_cauchy()`, `dist_chisq()`, `dist_exponential()`, `dist_f()`, `dist_gamma()`, `dist_gumbel()`, `dist_hypergeometric()`, `dist_inverse_exponential()`, `dist_inverse_gamma()`, `dist_inverse_gaussian()`, `dist_logistic()`, `dist_multivariate_normal()`, `dist_normal()`, `dist_pareto()`, `dist_student_t()`, `dist_studentized_range()`, `dist_uniform()`, `dist_weibull()` * Added 8 discrete probability distributions: `dist_bernoulli()`, `dist_binomial()`, `dist_geometric()`, `dist_logarithmic()`, `dist_multinomial()`, `dist_negative_binomial()`, `dist_poisson()`, `dist_poisson_inverse_gaussian()` * Added 3 miscellaneous probability distributions: `dist_degenerate()`, `dist_percentile()`, `dist_sample()` ### Distribution modifiers * Added `dist_inflated()` which inflates a specific value of a distribution by a given probability. This can be used to produce zero-inflated distributions. * Added `dist_transformed()` for transforming distributions. This can be used to produce log distributions such as logNormal: `dist_transformed(dist_normal(), transform = exp, inverse = log)` * Added `dist_mixture()` for producing weighted mixtures of distributions. * Added `dist_truncated()` to impose boundaries on a distribution's domain via truncation. distributional/DESCRIPTION0000644000175000017500000000425114165427252015160 0ustar nileshnileshPackage: distributional Title: Vectorised Probability Distributions Version: 0.3.0 Authors@R: c(person(given = "Mitchell", family = "O'Hara-Wild", role = c("aut", "cre"), email = "mail@mitchelloharawild.com", comment = c(ORCID = "0000-0001-6729-7695")), person(given = "Matthew", family = "Kay", role = c("aut"), comment = c(ORCID = "0000-0001-9446-0419")), person(given = "Alex", family = "Hayes", role = c("aut"), comment = c(ORCID = "0000-0002-4985-5160")), person(given = "Earo", family = "Wang", role = c("ctb"), comment = c(ORCID = "0000-0001-6448-5260"))) Description: Vectorised distribution objects with tools for manipulating, visualising, and using probability distributions. Designed to allow model prediction outputs to return distributions rather than their parameters, allowing users to directly interact with predictive distributions in a data-oriented workflow. In addition to providing generic replacements for p/d/q/r functions, other useful statistics can be computed including means, variances, intervals, and highest density regions. License: GPL-3 Imports: vctrs (>= 0.3.0), rlang (>= 0.4.5), generics, ellipsis, stats, numDeriv, ggplot2, scales, farver, digest, utils, lifecycle Suggests: testthat (>= 2.1.0), covr, mvtnorm, actuar (>= 2.0.0), ggdist RdMacros: lifecycle URL: https://pkg.mitchelloharawild.com/distributional/, https://github.com/mitchelloharawild/distributional BugReports: https://github.com/mitchelloharawild/distributional/issues Encoding: UTF-8 Language: en-GB RoxygenNote: 7.1.2 NeedsCompilation: no Packaged: 2022-01-05 22:44:02 UTC; mitchell Author: Mitchell O'Hara-Wild [aut, cre] (), Matthew Kay [aut] (), Alex Hayes [aut] (), Earo Wang [ctb] () Maintainer: Mitchell O'Hara-Wild Repository: CRAN Date/Publication: 2022-01-05 23:50:02 UTC distributional/README.md0000644000175000017500000001276114151532232014724 0ustar nileshnilesh # distributional [![Lifecycle: maturing](https://img.shields.io/badge/lifecycle-maturing-blue.svg)](https://lifecycle.r-lib.org/articles/stages.html) [![R build status](https://github.com/mitchelloharawild/distributional/workflows/R-CMD-check/badge.svg)](https://github.com/mitchelloharawild/distributional) [![Coverage Status](https://codecov.io/gh/mitchelloharawild/distributional/branch/master/graph/badge.svg)](https://codecov.io/github/mitchelloharawild/distributional?branch=master) [![CRAN status](https://www.r-pkg.org/badges/version/distributional)](https://CRAN.R-project.org/package=distributional) The distributional package allows distributions to be used in a vectorised context. It provides methods which are minimal wrappers to the standard d, p, q, and r distribution functions which are applied to each distribution in the vector. Additional distributional statistics can be computed, including the `mean()`, `median()`, `variance()`, and intervals with `hilo()`. The distributional nature of a model’s predictions is often understated, with default output of prediction methods usually only producing point predictions. Some R packages (such as [forecast](https://CRAN.R-project.org/package=forecast)) further emphasise uncertainty by producing point forecasts and intervals by default, however the user’s ability to interact with them is limited. This package vectorises distributions and provides methods for working with them, making distributions compatible with prediction outputs of modelling functions. These vectorised distributions can be illustrated with [ggplot2](https://CRAN.R-project.org/package=ggplot2) using the [ggdist](https://CRAN.R-project.org/package=ggdist) package, providing further opportunity to visualise the uncertainty of predictions and teach distributional theory. ## Installation You can install the released version of distributional from [CRAN](https://CRAN.R-project.org/package=distributional) with: ``` r install.packages("distributional") ``` The development version can be installed from [GitHub](https://github.com/mitchelloharawild/distributional) with: ``` r # install.packages("remotes") remotes::install_github("mitchelloharawild/distributional") ``` ## Examples Distributions are created using `dist_*()` functions. Currently only the normal distribution is supported for testing purposes. ``` r library(distributional) my_dist <- c(dist_normal(mu = 0, sigma = 1), dist_student_t(df = 10)) my_dist #> #> [1] N(0, 1) t(10, 0, 1) ``` The standard four distribution functions in R are usable via these generics: ``` r density(my_dist, 0) # c(dnorm(0, mean = 0, sd = 1), dt(0, df = 10)) #> [1] 0.3989423 0.3891084 cdf(my_dist, 5) # c(pnorm(5, mean = 0, sd = 1), pt(5, df = 10)) #> [1] 0.9999997 0.9997313 quantile(my_dist, 0.1) # c(qnorm(0.1, mean = 0, sd = 1), qt(0.1, df = 10)) #> [1] -1.281552 -1.372184 generate(my_dist, 10) # list(rnorm(10, mean = 0, sd = 1), rt(10, df = 10)) #> [[1]] #> [1] 1.262954285 -0.326233361 1.329799263 1.272429321 0.414641434 #> [6] -1.539950042 -0.928567035 -0.294720447 -0.005767173 2.404653389 #> #> [[2]] #> [1] 0.99165484 -1.36999677 -0.40943004 -0.85261144 -1.37728388 0.81020460 #> [7] -1.82965813 -0.06142032 -1.33933588 -0.28491414 ``` You can also compute intervals using `hilo()` ``` r hilo(my_dist, 0.95) #> #> [1] [-0.01190677, 0.01190677]0.95 [-0.01220773, 0.01220773]0.95 ``` Additionally, some distributions may support other methods such as mathematical operations and summary measures. If the methods aren’t supported, a transformed distribution will be created. ``` r my_dist #> #> [1] N(0, 1) t(10, 0, 1) my_dist*3 + 2 #> #> [1] N(2, 9) t(t(10, 0, 1)) mean(my_dist) #> [1] 0 0 variance(my_dist) #> [1] 1.00 1.25 ``` You can also visualise the distribution(s) using the [ggdist](https://mjskay.github.io/ggdist/) package. ``` r library(ggdist) library(ggplot2) df <- data.frame( name = c("Gamma(2,1)", "Normal(5,1)", "Mixture"), dist = c(dist_gamma(2,1), dist_normal(5,1), dist_mixture(dist_gamma(2,1), dist_normal(5, 1), weights = c(0.4, 0.6))) ) ggplot(df, aes(y = factor(name, levels = rev(name)))) + stat_dist_halfeye(aes(dist = dist)) + labs(title = "Density function for a mixture of distributions", y = NULL, x = NULL) #> Warning: Computation failed in `stat_dist_slabinterval()`: #> invalid 'xmin' value ``` ## Related work There are several packages which unify interfaces for distributions in R: - stats provides functions to work with possibly multiple distributions (comparisons made below). - [distributions3](https://cran.r-project.org/package=distributions3) represents singular distributions using S3, with particularly nice documentation. This package makes use of some code and documentation from this package. - [distr](https://cran.r-project.org/package=distr) represents singular distributions using S4. - [distr6](https://cran.r-project.org/package=distr6) represents singular distributions using R6. - Many more in the [CRAN task view](https://cran.r-project.org/view=Distributions) This package differs from the above libraries by storing the distributions in a vectorised format. It does this using [vctrs](https://vctrs.r-lib.org/), so it should play nicely with the tidyverse (try putting distributions into a tibble\!). distributional/man/0000755000175000017500000000000014164770357014231 5ustar nileshnileshdistributional/man/dist_binomial.Rd0000644000175000017500000000537213711717775017346 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_binomial.R \name{dist_binomial} \alias{dist_binomial} \title{The Binomial distribution} \usage{ dist_binomial(size, prob) } \arguments{ \item{size}{The number of trials. Must be an integer greater than or equal to one. When \code{size = 1L}, the Binomial distribution reduces to the Bernoulli distribution. Often called \code{n} in textbooks.} \item{prob}{The probability of success on each trial, \code{prob} can be any value in \verb{[0, 1]}.} } \description{ \lifecycle{stable} } \details{ Binomial distributions are used to represent situations can that can be thought as the result of \eqn{n} Bernoulli experiments (here the \eqn{n} is defined as the \code{size} of the experiment). The classical example is \eqn{n} independent coin flips, where each coin flip has probability \code{p} of success. In this case, the individual probability of flipping heads or tails is given by the Bernoulli(p) distribution, and the probability of having \eqn{x} equal results (\eqn{x} heads, for example), in \eqn{n} trials is given by the Binomial(n, p) distribution. The equation of the Binomial distribution is directly derived from the equation of the Bernoulli distribution. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. The Binomial distribution comes up when you are interested in the portion of people who do a thing. The Binomial distribution also comes up in the sign test, sometimes called the Binomial test (see \code{\link[stats:binom.test]{stats::binom.test()}}), where you may need the Binomial C.D.F. to compute p-values. In the following, let \eqn{X} be a Binomial random variable with parameter \code{size} = \eqn{n} and \code{p} = \eqn{p}. Some textbooks define \eqn{q = 1 - p}, or called \eqn{\pi} instead of \eqn{p}. \strong{Support}: \eqn{\{0, 1, 2, ..., n\}}{{0, 1, 2, ..., n}} \strong{Mean}: \eqn{np} \strong{Variance}: \eqn{np \cdot (1 - p) = np \cdot q}{np (1 - p)} \strong{Probability mass function (p.m.f)}: \deqn{ P(X = k) = {n \choose k} p^k (1 - p)^{n-k} }{ P(X = k) = choose(n, k) p^k (1 - p)^(n - k) } \strong{Cumulative distribution function (c.d.f)}: \deqn{ P(X \le k) = \sum_{i=0}^{\lfloor k \rfloor} {n \choose i} p^i (1 - p)^{n-i} }{ P(X \le k) = \sum_{i=0}^k choose(n, i) p^i (1 - p)^(n-i) } \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = (1 - p + p e^t)^n }{ E(e^(tX)) = (1 - p + p e^t)^n } } \examples{ dist <- dist_binomial(size = 1:5, prob = c(0.05, 0.5, 0.3, 0.9, 0.1)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } distributional/man/dist_multivariate_normal.Rd0000644000175000017500000000174414164726554021627 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_multivariate_normal.R \name{dist_multivariate_normal} \alias{dist_multivariate_normal} \title{The multivariate normal distribution} \usage{ dist_multivariate_normal(mu = 0, sigma = diag(1)) } \arguments{ \item{mu}{A list of numeric vectors for the distribution's mean.} \item{sigma}{A list of matrices for the distribution's variance-covariance matrix.} } \description{ \lifecycle{maturing} } \examples{ dist <- dist_multivariate_normal(mu = list(c(1,2)), sigma = list(matrix(c(4,2,2,3), ncol=2))) dist \dontshow{if (requireNamespace("mvtnorm", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} mean(dist) variance(dist) support(dist) generate(dist, 10) density(dist, c(2, 1)) density(dist, c(2, 1), log = TRUE) cdf(dist, 4) quantile(dist, 0.7) \dontshow{\}) # examplesIf} } \seealso{ \link[mvtnorm:Mvnorm]{mvtnorm::dmvnorm}, \link[mvtnorm:qmvnorm]{mvtnorm::qmvnorm} } distributional/man/dist_negative_binomial.Rd0000644000175000017500000000363214164725546021223 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_negative_binomial.R \name{dist_negative_binomial} \alias{dist_negative_binomial} \title{The Negative Binomial distribution} \usage{ dist_negative_binomial(size, prob) } \arguments{ \item{size}{target for number of successful trials, or dispersion parameter (the shape parameter of the gamma mixing distribution). Must be strictly positive, need not be integer.} \item{prob}{probability of success in each trial. \code{0 < prob <= 1}.} } \description{ \lifecycle{stable} } \details{ A generalization of the geometric distribution. It is the number of failures in a sequence of i.i.d. Bernoulli trials before a specified number of successes (\code{size}) occur. The probability of success in each trial is given by \code{prob}. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Negative Binomial random variable with success probability \code{prob} = \eqn{p} and the number of successes \code{size} = \eqn{r}. \strong{Support}: \eqn{\{0, 1, 2, 3, ...\}} \strong{Mean}: \eqn{\frac{p r}{1-p}} \strong{Variance}: \eqn{\frac{pr}{(1-p)^2}} \strong{Probability mass function (p.m.f)}: \deqn{ f(k) = {k + r - 1 \choose k} \cdot (1-p)^r p^k }{ f(k) = (k+r-1)!/(k!(r-1)!) (1-p)^r p^k } \strong{Cumulative distribution function (c.d.f)}: Too nasty, omitted. \strong{Moment generating function (m.g.f)}: \deqn{ \left(\frac{1-p}{1-pe^t}\right)^r, t < -\log p }{ \frac{(1-p)^r}{(1-pe^t)^r}, t < -\log p } } \examples{ dist <- dist_negative_binomial(size = 10, prob = 0.5) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) support(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:NegBinomial]{stats::NegBinomial} } distributional/man/autoplot.distribution.Rd0000644000175000017500000000116714164770357021112 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.R \name{autoplot.distribution} \alias{autoplot.distribution} \title{Plot a distribution} \usage{ autoplot.distribution(x, ...) } \arguments{ \item{x}{The distribution(s) to plot.} \item{...}{Unused.} } \description{ \lifecycle{deprecated} } \details{ This function is now defunct and can no longer be used. Instead consider using the {ggdist} package to produce your own distribution plots. You can learn more about how this plot can be produced using {ggdist} here: https://mjskay.github.io/ggdist/articles/slabinterval.html } \keyword{internal} distributional/man/mean.distribution.Rd0000644000175000017500000000100013703764147020143 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{mean.distribution} \alias{mean.distribution} \title{Mean of a probability distribution} \usage{ \method{mean}{distribution}(x, ...) } \arguments{ \item{x}{The distribution(s).} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{stable} } \details{ Returns the empirical mean of the probability distribution. If the method does not exist, the mean of a random sample will be returned. } distributional/man/dist_gamma.Rd0000644000175000017500000000462513711717775016636 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_gamma.R \name{dist_gamma} \alias{dist_gamma} \title{The Gamma distribution} \usage{ dist_gamma(shape, rate) } \arguments{ \item{shape}{shape and scale parameters. Must be positive, \code{scale} strictly.} \item{rate}{an alternative way to specify the scale.} } \description{ \lifecycle{stable} } \details{ Several important distributions are special cases of the Gamma distribution. When the shape parameter is \code{1}, the Gamma is an exponential distribution with parameter \eqn{1/\beta}. When the \eqn{shape = n/2} and \eqn{rate = 1/2}, the Gamma is a equivalent to a chi squared distribution with n degrees of freedom. Moreover, if we have \eqn{X_1} is \eqn{Gamma(\alpha_1, \beta)} and \eqn{X_2} is \eqn{Gamma(\alpha_2, \beta)}, a function of these two variables of the form \eqn{\frac{X_1}{X_1 + X_2}} \eqn{Beta(\alpha_1, \alpha_2)}. This last property frequently appears in another distributions, and it has extensively been used in multivariate methods. More about the Gamma distribution will be added soon. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Gamma random variable with parameters \code{shape} = \eqn{\alpha} and \code{rate} = \eqn{\beta}. \strong{Support}: \eqn{x \in (0, \infty)} \strong{Mean}: \eqn{\frac{\alpha}{\beta}} \strong{Variance}: \eqn{\frac{\alpha}{\beta^2}} \strong{Probability density function (p.m.f)}: \deqn{ f(x) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{\alpha - 1} e^{-\beta x} }{ f(x) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{\alpha - 1} e^{-\beta x} } \strong{Cumulative distribution function (c.d.f)}: \deqn{ f(x) = \frac{\Gamma(\alpha, \beta x)}{\Gamma{\alpha}} }{ f(x) = \frac{\Gamma(\alpha, \beta x)}{\Gamma{\alpha}} } \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = \Big(\frac{\beta}{ \beta - t}\Big)^{\alpha}, \thinspace t < \beta }{ E(e^(tX)) = \Big(\frac{\beta}{ \beta - t}\Big)^{\alpha}, \thinspace t < \beta } } \examples{ dist <- dist_gamma(shape = c(1,2,3,5,9,7.5,0.5), rate = c(0.5,0.5,0.5,1,2,1,1)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:GammaDist]{stats::GammaDist} } distributional/man/dist_geometric.Rd0000644000175000017500000000334013711717775017523 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_geometric.R \name{dist_geometric} \alias{dist_geometric} \title{The Geometric Distribution} \usage{ dist_geometric(prob) } \arguments{ \item{prob}{probability of success in each trial. \code{0 < prob <= 1}.} } \description{ The Geometric distribution can be thought of as a generalization of the \code{\link[=dist_bernoulli]{dist_bernoulli()}} distribution where we ask: "if I keep flipping a coin with probability \code{p} of heads, what is the probability I need \eqn{k} flips before I get my first heads?" The Geometric distribution is a special case of Negative Binomial distribution. \lifecycle{stable} } \details{ We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Geometric random variable with success probability \code{p} = \eqn{p}. Note that there are multiple parameterizations of the Geometric distribution. \strong{Support}: 0 < p < 1, \eqn{x = 0, 1, \dots} \strong{Mean}: \eqn{\frac{1-p}{p}} \strong{Variance}: \eqn{\frac{1-p}{p^2}} \strong{Probability mass function (p.m.f)}: \deqn{ P(X = x) = p(1-p)^x, } \strong{Cumulative distribution function (c.d.f)}: \deqn{ P(X \le x) = 1 - (1-p)^{x+1} } \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = \frac{pe^t}{1 - (1-p)e^t} }{ E(e^{tX}) = \frac{pe^t}{1 - (1-p)e^t} } } \examples{ dist <- dist_geometric(prob = c(0.2, 0.5, 0.8)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Geometric]{stats::Geometric} } distributional/man/geom_hilo_linerange.Rd0000644000175000017500000000615113703764147020507 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geom_hilo.R \name{geom_hilo_linerange} \alias{geom_hilo_linerange} \title{Line ranges for hilo intervals} \usage{ geom_hilo_linerange( mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ... ) } \arguments{ \item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}} or \code{\link[ggplot2:aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the default), it is combined with the default mapping at the top level of the plot. You must supply \code{mapping} if there is no plot mapping.} \item{data}{The data to be displayed in this layer. There are three options: If \code{NULL}, the default, the data is inherited from the plot data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}. A \code{data.frame}, or other object, will override the plot data. All objects will be fortified to produce a data frame. See \code{\link[ggplot2:fortify]{fortify()}} for which variables will be created. A \code{function} will be called with a single argument, the plot data. The return value must be a \code{data.frame}, and will be used as the layer data. A \code{function} can be created from a \code{formula} (e.g. \code{~ head(.x, 10)}).} \item{stat}{The statistical transformation to use on the data for this layer, as a string.} \item{position}{Position adjustment, either as a string, or the result of a call to a position adjustment function.} \item{na.rm}{If \code{FALSE}, the default, missing values are removed with a warning. If \code{TRUE}, missing values are silently removed.} \item{show.legend}{logical. Should this layer be included in the legends? \code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE} never includes, and \code{TRUE} always includes. It can also be a named logical vector to finely select the aesthetics to display.} \item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics, rather than combining with them. This is most useful for helper functions that define both data and aesthetics and shouldn't inherit behaviour from the default plot specification, e.g. \code{\link[ggplot2:borders]{borders()}}.} \item{...}{Other arguments passed on to \code{\link[ggplot2:layer]{layer()}}. These are often aesthetics, used to set an aesthetic to a fixed value, like \code{colour = "red"} or \code{size = 3}. They may also be parameters to the paired geom/stat.} } \description{ \lifecycle{experimental} } \details{ \code{geom_hilo_linerange()} displays the interval defined by a hilo object. The luminance of the shaded area indicates its confidence level. The shade colour can be controlled by the \code{fill} aesthetic, however the luminance will be overwritten to represent the confidence level. } \examples{ dist <- dist_normal(1:3, 1:3) library(ggplot2) ggplot( data.frame(x = rep(1:3, 2), interval = c(hilo(dist, 80), hilo(dist, 95))) ) + geom_hilo_linerange(aes(x = x, hilo = interval)) } \seealso{ \code{\link[=geom_hilo_ribbon]{geom_hilo_ribbon()}} for continuous hilo intervals (ribbons) } distributional/man/dist_logarithmic.Rd0000644000175000017500000000134514164726426020046 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_logarithmic.R \name{dist_logarithmic} \alias{dist_logarithmic} \title{The Logarithmic distribution} \usage{ dist_logarithmic(prob) } \arguments{ \item{prob}{parameter. \code{0 <= prob < 1}.} } \description{ \lifecycle{stable} } \examples{ dist <- dist_logarithmic(prob = c(0.33, 0.66, 0.99)) dist \dontshow{if (requireNamespace("actuar", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} mean(dist) variance(dist) support(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) \dontshow{\}) # examplesIf} } \seealso{ \link[actuar:Logarithmic]{actuar::Logarithmic} } distributional/man/dist_hypergeometric.Rd0000644000175000017500000000355313711717775020601 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_hypergeometric.R \name{dist_hypergeometric} \alias{dist_hypergeometric} \title{The Hypergeometric distribution} \usage{ dist_hypergeometric(m, n, k) } \arguments{ \item{m}{The number of type I elements available.} \item{n}{The number of type II elements available.} \item{k}{The size of the sample taken.} } \description{ \lifecycle{stable} } \details{ To understand the HyperGeometric distribution, consider a set of \eqn{r} objects, of which \eqn{m} are of the type I and \eqn{n} are of the type II. A sample with size \eqn{k} (\eqn{k= "3.4") withAutoprint else force)(\{ # examplesIf} mean(dist) variance(dist) support(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) \dontshow{\}) # examplesIf} } \seealso{ \link[actuar:Burr]{actuar::Burr} } distributional/man/dist_lognormal.Rd0000644000175000017500000000423114151532232017515 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_lognormal.R \name{dist_lognormal} \alias{dist_lognormal} \title{The log-normal distribution} \usage{ dist_lognormal(mu = 0, sigma = 1) } \arguments{ \item{mu}{The mean (location parameter) of the distribution, which is the mean of the associated Normal distribution. Can be any real number.} \item{sigma}{The standard deviation (scale parameter) of the distribution. Can be any positive number.} } \description{ \lifecycle{stable} } \details{ The log-normal distribution is a commonly used transformation of the Normal distribution. If \eqn{X} follows a log-normal distribution, then \eqn{\ln{X}} would be characteristed by a Normal distribution. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{Y} be a Normal random variable with mean \code{mu} = \eqn{\mu} and standard deviation \code{sigma} = \eqn{\sigma}. The log-normal distribution \eqn{X = exp(Y)} is characterised by: \strong{Support}: \eqn{R+}, the set of all real numbers greater than or equal to 0. \strong{Mean}: \eqn{e^(\mu + \sigma^2/2} \strong{Variance}: \eqn{(e^(\sigma^2)-1) e^(2\mu + \sigma^2} \strong{Probability density function (p.d.f)}: \deqn{ f(x) = \frac{1}{x\sqrt{2 \pi \sigma^2}} e^{-(\ln{x} - \mu)^2 / 2 \sigma^2} }{ f(x) = 1 / (x * sqrt(2 \pi \sigma^2)) exp(-(log(x) - \mu)^2 / (2 \sigma^2)) } \strong{Cumulative distribution function (c.d.f)}: The cumulative distribution function has the form \deqn{ F(x) = \Phi((\ln{x} - \mu)/\sigma) }{ F(x) = Phi((log(x) - \mu)/\sigma) } Where \eqn{Phi}{Phi} is the CDF of a standard Normal distribution, N(0,1). } \examples{ dist <- dist_lognormal(mu = 1:5, sigma = 0.1) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) # A log-normal distribution X is exp(Y), where Y is a Normal distribution of # the same parameters. So log(X) will produce the Normal distribution Y. log(dist) } \seealso{ \link[stats:Lognormal]{stats::Lognormal} } distributional/man/new_support_region.Rd0000644000175000017500000000065314164717433020450 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/support.R \name{new_support_region} \alias{new_support_region} \title{Create a new support region vector} \usage{ new_support_region(x, limits = NULL) } \arguments{ \item{x}{A list of prototype vectors defining the distribution type.} \item{limits}{A list of value limits for the distribution.} } \description{ Create a new support region vector } distributional/man/hdr.Rd0000644000175000017500000000060613703764147015275 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hdr.R \name{hdr} \alias{hdr} \title{Compute highest density regions} \usage{ hdr(x, ...) } \arguments{ \item{x}{Object to create hilo from.} \item{...}{Additional arguments used by methods.} } \description{ Used to extract a specified prediction interval at a particular confidence level from a distribution. } distributional/man/dist_pareto.Rd0000644000175000017500000000142614164726426017036 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_pareto.R \name{dist_pareto} \alias{dist_pareto} \title{The Pareto distribution} \usage{ dist_pareto(shape, scale) } \arguments{ \item{shape}{parameters. Must be strictly positive.} \item{scale}{parameters. Must be strictly positive.} } \description{ \lifecycle{questioning} } \examples{ dist <- dist_pareto(shape = c(10, 3, 2, 1), scale = rep(1, 4)) dist \dontshow{if (requireNamespace("actuar", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} mean(dist) variance(dist) support(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) \dontshow{\}) # examplesIf} } \seealso{ \link[actuar:Pareto]{actuar::Pareto} } distributional/man/dist_percentile.Rd0000644000175000017500000000100713703764147017671 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_percentile.R \name{dist_percentile} \alias{dist_percentile} \title{Percentile distribution} \usage{ dist_percentile(x, percentile) } \arguments{ \item{x}{A list of values} \item{percentile}{A list of percentiles} } \description{ \lifecycle{maturing} } \examples{ dist <- dist_normal() percentiles <- seq(0.01, 0.99, by = 0.01) x <- vapply(percentiles, quantile, double(1L), x = dist) dist_percentile(list(x), list(percentiles*100)) } distributional/man/covariance.Rd0000644000175000017500000000071114151532232016611 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{covariance} \alias{covariance} \title{Covariance} \usage{ covariance(x, ...) } \arguments{ \item{x}{An object.} \item{...}{Additional arguments used by methods.} } \description{ A generic function for computing the covariance of an object. } \seealso{ \code{\link[=covariance.distribution]{covariance.distribution()}}, \code{\link[=variance]{variance()}} } distributional/man/likelihood.Rd0000644000175000017500000000117614151532232016630 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{likelihood} \alias{likelihood} \alias{likelihood.distribution} \alias{log_likelihood} \title{The (log) likelihood of a sample matching a distribution} \usage{ likelihood(x, ...) \method{likelihood}{distribution}(x, sample, ..., log = FALSE) log_likelihood(x, ...) } \arguments{ \item{x}{The distribution(s).} \item{...}{Additional arguments used by methods.} \item{sample}{A list of sampled values to compare to distribution(s).} \item{log}{If \code{TRUE}, the log-likelihood will be computed.} } \description{ \lifecycle{maturing} } distributional/man/reexports.Rd0000644000175000017500000000062413703764147016553 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reexports.R \docType{import} \name{reexports} \alias{reexports} \alias{generate} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{generics}{\code{\link[generics]{generate}}} }} distributional/man/new_hilo.Rd0000644000175000017500000000105113703764147016317 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hilo.R \name{new_hilo} \alias{new_hilo} \title{Construct hilo intervals} \usage{ new_hilo(lower = double(), upper = double(), size = double()) } \arguments{ \item{lower, upper}{A numeric vector of values for lower and upper limits.} \item{size}{Size of the interval between [0, 100].} } \value{ A "hilo" vector } \description{ Construct hilo intervals } \examples{ new_hilo(lower = rnorm(10), upper = rnorm(10) + 5, size = 95) } \author{ Earo Wang & Mitchell O'Hara-Wild } distributional/man/quantile.distribution.Rd0000644000175000017500000000105713711706357021057 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{quantile.distribution} \alias{quantile.distribution} \title{Distribution Quantiles} \usage{ \method{quantile}{distribution}(x, p, ..., log = FALSE) } \arguments{ \item{x}{The distribution(s).} \item{p}{The probability of the quantile.} \item{...}{Additional arguments passed to methods.} \item{log}{If \code{TRUE}, probabilities will be given as log probabilities.} } \description{ \lifecycle{stable} } \details{ Computes the quantiles of a distribution. } distributional/man/is-distribution.Rd0000644000175000017500000000102614006120043017617 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{is_distribution} \alias{is_distribution} \title{Test if the object is a distribution} \usage{ is_distribution(x) } \arguments{ \item{x}{An object.} } \value{ TRUE if the object inherits from the distribution class. } \description{ This function returns \code{TRUE} for distributions and \code{FALSE} for all other objects. \lifecycle{stable} } \examples{ dist <- dist_normal() is_distribution(dist) is_distribution("distributional") } distributional/man/dist_multinomial.Rd0000644000175000017500000000451714151532232020064 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_multinomial.R \name{dist_multinomial} \alias{dist_multinomial} \title{The Multinomial distribution} \usage{ dist_multinomial(size, prob) } \arguments{ \item{size}{The number of draws from the Categorical distribution.} \item{prob}{The probability of an event occurring from each draw.} } \description{ \lifecycle{maturing} } \details{ The multinomial distribution is a generalization of the binomial distribution to multiple categories. It is perhaps easiest to think that we first extend a \code{\link[=dist_bernoulli]{dist_bernoulli()}} distribution to include more than two categories, resulting in a \code{\link[=dist_categorical]{dist_categorical()}} distribution. We then extend repeat the Categorical experiment several (\eqn{n}) times. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X = (X_1, ..., X_k)} be a Multinomial random variable with success probability \code{p} = \eqn{p}. Note that \eqn{p} is vector with \eqn{k} elements that sum to one. Assume that we repeat the Categorical experiment \code{size} = \eqn{n} times. \strong{Support}: Each \eqn{X_i} is in \eqn{{0, 1, 2, ..., n}}. \strong{Mean}: The mean of \eqn{X_i} is \eqn{n p_i}. \strong{Variance}: The variance of \eqn{X_i} is \eqn{n p_i (1 - p_i)}. For \eqn{i \neq j}, the covariance of \eqn{X_i} and \eqn{X_j} is \eqn{-n p_i p_j}. \strong{Probability mass function (p.m.f)}: \deqn{ P(X_1 = x_1, ..., X_k = x_k) = \frac{n!}{x_1! x_2! ... x_k!} p_1^{x_1} \cdot p_2^{x_2} \cdot ... \cdot p_k^{x_k} }{ P(X_1 = x_1, ..., X_k = x_k) = n! / (x_1! x_2! ... x_k!) p_1^x_1 p_2^x_2 ... p_k^x_k } \strong{Cumulative distribution function (c.d.f)}: Omitted for multivariate random variables for the time being. \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = \left(\sum_{i=1}^k p_i e^{t_i}\right)^n }{ E(e^(tX)) = (p_1 e^t_1 + p_2 e^t_2 + ... + p_k e^t_k)^n } } \examples{ dist <- dist_multinomial(size = c(4, 3), prob = list(c(0.3, 0.5, 0.2), c(0.1, 0.5, 0.4))) dist mean(dist) variance(dist) generate(dist, 10) # TODO: Needs fixing to support multiple inputs # density(dist, 2) # density(dist, 2, log = TRUE) } \seealso{ \link[stats:Multinom]{stats::Multinomial} } distributional/man/dist_categorical.Rd0000644000175000017500000000427314151532232020006 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_categorical.R \name{dist_categorical} \alias{dist_categorical} \title{The Categorical distribution} \usage{ dist_categorical(prob, outcomes = NULL) } \arguments{ \item{prob}{A list of probabilities of observing each outcome category.} \item{outcomes}{The values used to represent each outcome.} } \description{ \lifecycle{stable} } \details{ Categorical distributions are used to represent events with multiple outcomes, such as what number appears on the roll of a dice. This is also referred to as the 'generalised Bernoulli' or 'multinoulli' distribution. The Cateogorical distribution is a special case of the \code{\link[=Multinomial]{Multinomial()}} distribution with \code{n = 1}. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Categorical random variable with probability parameters \code{p} = \eqn{\{p_1, p_2, \ldots, p_k\}}. The Categorical probability distribution is widely used to model the occurance of multiple events. A simple example is the roll of a dice, where \eqn{p = \{1/6, 1/6, 1/6, 1/6, 1/6, 1/6\}} giving equal chance of observing each number on a 6 sided dice. \strong{Support}: \eqn{\{1, \ldots, k\}}{{1, ..., k}} \strong{Mean}: \eqn{p} \strong{Variance}: \eqn{p \cdot (1 - p) = p \cdot q}{p (1 - p)} \strong{Probability mass function (p.m.f)}: \deqn{ P(X = i) = p_i }{ P(X = i) = p_i } \strong{Cumulative distribution function (c.d.f)}: The cdf() of a categorical distribution is undefined as the outcome categories aren't ordered. } \examples{ dist <- dist_categorical(prob = list(c(0.05, 0.5, 0.15, 0.2, 0.1), c(0.3, 0.1, 0.6))) dist generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) # The outcomes aren't ordered, so many statistics are not applicable. cdf(dist, 4) quantile(dist, 0.7) mean(dist) variance(dist) skewness(dist) kurtosis(dist) dist <- dist_categorical( prob = list(c(0.05, 0.5, 0.15, 0.2, 0.1), c(0.3, 0.1, 0.6)), outcomes = list(letters[1:5], letters[24:26]) ) generate(dist, 10) density(dist, "a") density(dist, "z", log = TRUE) } distributional/man/dist_f.Rd0000644000175000017500000000332513711717775015775 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_f.R \name{dist_f} \alias{dist_f} \title{The F Distribution} \usage{ dist_f(df1, df2, ncp = NULL) } \arguments{ \item{df1}{degrees of freedom. \code{Inf} is allowed.} \item{df2}{degrees of freedom. \code{Inf} is allowed.} \item{ncp}{non-centrality parameter. If omitted the central F is assumed.} } \description{ \lifecycle{stable} } \details{ We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Gamma random variable with parameters \code{shape} = \eqn{\alpha} and \code{rate} = \eqn{\beta}. \strong{Support}: \eqn{x \in (0, \infty)} \strong{Mean}: \eqn{\frac{\alpha}{\beta}} \strong{Variance}: \eqn{\frac{\alpha}{\beta^2}} \strong{Probability density function (p.m.f)}: \deqn{ f(x) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{\alpha - 1} e^{-\beta x} }{ f(x) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{\alpha - 1} e^{-\beta x} } \strong{Cumulative distribution function (c.d.f)}: \deqn{ f(x) = \frac{\Gamma(\alpha, \beta x)}{\Gamma{\alpha}} }{ f(x) = \frac{\Gamma(\alpha, \beta x)}{\Gamma{\alpha}} } \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = \Big(\frac{\beta}{ \beta - t}\Big)^{\alpha}, \thinspace t < \beta }{ E(e^(tX)) = \Big(\frac{\beta}{ \beta - t}\Big)^{\alpha}, \thinspace t < \beta } } \examples{ dist <- dist_f(df1 = c(1,2,5,10,100), df2 = c(1,1,2,1,100)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Fdist]{stats::FDist} } distributional/man/dist_truncated.Rd0000644000175000017500000000166713712645605017541 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/truncated.R \name{dist_truncated} \alias{dist_truncated} \title{Truncate a distribution} \usage{ dist_truncated(dist, lower = -Inf, upper = Inf) } \arguments{ \item{dist}{The distribution(s) to truncate.} \item{lower, upper}{The range of values to keep from a distribution.} } \description{ \lifecycle{experimental} } \details{ Note that the samples are generated using inverse transform sampling, and the means and variances are estimated from samples. } \examples{ dist <- dist_truncated(dist_normal(2,1), lower = 0) dist mean(dist) variance(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) if(requireNamespace("ggdist")) { library(ggplot2) ggplot() + ggdist::stat_dist_halfeye( aes(y = c("Normal", "Truncated"), dist = c(dist_normal(2,1), dist_truncated(dist_normal(2,1), lower = 0))) ) } } distributional/man/dist_mixture.Rd0000644000175000017500000000074713703764147017246 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mixture.R \name{dist_mixture} \alias{dist_mixture} \title{Create a mixture of distributions} \usage{ dist_mixture(..., weights = numeric()) } \arguments{ \item{...}{Distributions to be used in the mixture.} \item{weights}{The weight of each distribution passed to \code{...}.} } \description{ \lifecycle{experimental} } \examples{ dist_mixture(dist_normal(0, 1), dist_normal(5, 2), weights = c(0.3, 0.7)) } distributional/man/median.distribution.Rd0000644000175000017500000000111713712173123020455 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{median.distribution} \alias{median.distribution} \title{Median of a probability distribution} \usage{ \method{median}{distribution}(x, na.rm = FALSE, ...) } \arguments{ \item{x}{The distribution(s).} \item{na.rm}{Unused, included for consistency with the generic function.} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{stable} } \details{ Returns the median (50th percentile) of a probability distribution. This is equivalent to \code{quantile(x, p=0.5)}. } distributional/man/covariance.distribution.Rd0000644000175000017500000000104414151532232021327 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{covariance.distribution} \alias{covariance.distribution} \title{Covariance of a probability distribution} \usage{ \method{covariance}{distribution}(x, ...) } \arguments{ \item{x}{The distribution(s).} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{stable} } \details{ Returns the empirical covariance of the probability distribution. If the method does not exist, the covariance of a random sample will be returned. } distributional/man/guide_level.Rd0000644000175000017500000000172713703764147017011 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scale-level.R \name{guide_level} \alias{guide_level} \title{Level shade bar guide} \usage{ guide_level(title = waiver(), max_discrete = 5, ...) } \arguments{ \item{title}{A character string or expression indicating a title of guide. If \code{NULL}, the title is not shown. By default (\code{\link[ggplot2:waiver]{waiver()}}), the name of the scale object or the name specified in \code{\link[ggplot2:labs]{labs()}} is used for the title.} \item{max_discrete}{The maximum number of levels to be shown using \code{\link[ggplot2]{guide_legend}}. If the number of levels exceeds this value, level shades are shown with \code{\link[ggplot2]{guide_colourbar}}.} \item{...}{Further arguments passed onto either \code{\link[ggplot2]{guide_colourbar}} or \code{\link[ggplot2]{guide_legend}}} } \description{ The level guide shows the colour from the forecast intervals which is blended with the series colour. } distributional/man/parameters.Rd0000644000175000017500000000111614151532232016642 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{parameters} \alias{parameters} \alias{parameters.distribution} \title{Extract the parameters of a distribution} \usage{ parameters(x, ...) \method{parameters}{distribution}(x, ...) } \arguments{ \item{x}{The distribution(s).} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{experimental} } \examples{ dist <- c( dist_normal(1:2), dist_poisson(3), dist_multinomial(size = c(4, 3), prob = list(c(0.3, 0.5, 0.2), c(0.1, 0.5, 0.4))) ) parameters(dist) } distributional/man/variance.Rd0000644000175000017500000000204014165036336016275 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{variance} \alias{variance} \alias{variance.numeric} \alias{variance.matrix} \alias{covariance.numeric} \title{Variance} \usage{ variance(x, ...) \method{variance}{numeric}(x, ...) \method{variance}{matrix}(x, ...) \method{covariance}{numeric}(x, ...) } \arguments{ \item{x}{An object.} \item{...}{Additional arguments used by methods.} } \description{ A generic function for computing the variance of an object. } \details{ The implementation of \code{variance()} for numeric variables coerces the input to a vector then uses \code{\link[stats:cor]{stats::var()}} to compute the variance. This means that, unlike \code{\link[stats:cor]{stats::var()}}, if \code{variance()} is passed a matrix or a 2-dimensional array, it will still return the variance (\code{\link[stats:cor]{stats::var()}} returns the covariance matrix in that case). } \seealso{ \code{\link[=variance.distribution]{variance.distribution()}}, \code{\link[=covariance]{covariance()}} } distributional/man/dist_cauchy.Rd0000644000175000017500000000333613711717775017026 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_cauchy.R \name{dist_cauchy} \alias{dist_cauchy} \title{The Cauchy distribution} \usage{ dist_cauchy(location, scale) } \arguments{ \item{location}{location and scale parameters.} \item{scale}{location and scale parameters.} } \description{ \lifecycle{maturing} } \details{ The Cauchy distribution is the student's t distribution with one degree of freedom. The Cauchy distribution does not have a well defined mean or variance. Cauchy distributions often appear as priors in Bayesian contexts due to their heavy tails. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Cauchy variable with mean \verb{location =} \eqn{x_0} and \code{scale} = \eqn{\gamma}. \strong{Support}: \eqn{R}, the set of all real numbers \strong{Mean}: Undefined. \strong{Variance}: Undefined. \strong{Probability density function (p.d.f)}: \deqn{ f(x) = \frac{1}{\pi \gamma \left[1 + \left(\frac{x - x_0}{\gamma} \right)^2 \right]} }{ f(x) = 1 / (\pi \gamma (1 + ((x - x_0) / \gamma)^2) } \strong{Cumulative distribution function (c.d.f)}: \deqn{ F(t) = \frac{1}{\pi} \arctan \left( \frac{t - x_0}{\gamma} \right) + \frac{1}{2} }{ F(t) = arctan((t - x_0) / \gamma) / \pi + 1/2 } \strong{Moment generating function (m.g.f)}: Does not exist. } \examples{ dist <- dist_cauchy(location = c(0, 0, 0, -2), scale = c(0.5, 1, 2, 1)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Cauchy]{stats::Cauchy} } distributional/man/dist_studentized_range.Rd0000644000175000017500000000216713711717775021271 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_studentized_range.R \name{dist_studentized_range} \alias{dist_studentized_range} \title{The Studentized Range distribution} \usage{ dist_studentized_range(nmeans, df, nranges) } \arguments{ \item{nmeans}{sample size for range (same for each group).} \item{df}{degrees of freedom for \eqn{s} (see below).} \item{nranges}{number of \emph{groups} whose \bold{maximum} range is considered.} } \description{ \lifecycle{stable} } \details{ Tukey's studentized range distribution, used for Tukey's honestly significant differences test in ANOVA. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. \strong{Support}: \eqn{R^+}, the set of positive real numbers. Other properties of Tukey's Studentized Range Distribution are omitted, largely because the distribution is not fun to work with. } \examples{ dist <- dist_studentized_range(nmeans = c(6, 2), df = c(5, 4), nranges = c(1, 1)) dist cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Tukey]{stats::Tukey} } distributional/man/skewness.Rd0000644000175000017500000000062213705573747016366 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{skewness} \alias{skewness} \alias{skewness.distribution} \title{Skewness of a probability distribution} \usage{ skewness(x, ...) \method{skewness}{distribution}(x, ...) } \arguments{ \item{x}{The distribution(s).} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{stable} } distributional/man/dist_poisson.Rd0000644000175000017500000000275713711717775017252 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_poisson.R \name{dist_poisson} \alias{dist_poisson} \title{The Poisson Distribution} \usage{ dist_poisson(lambda) } \arguments{ \item{lambda}{vector of (non-negative) means.} } \description{ \lifecycle{stable} } \details{ Poisson distributions are frequently used to model counts. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Poisson random variable with parameter \code{lambda} = \eqn{\lambda}. \strong{Support}: \eqn{\{0, 1, 2, 3, ...\}}{{0, 1, 2, 3, ...}} \strong{Mean}: \eqn{\lambda} \strong{Variance}: \eqn{\lambda} \strong{Probability mass function (p.m.f)}: \deqn{ P(X = k) = \frac{\lambda^k e^{-\lambda}}{k!} }{ P(X = k) = \lambda^k e^(-\lambda) / k! } \strong{Cumulative distribution function (c.d.f)}: \deqn{ P(X \le k) = e^{-\lambda} \sum_{i = 0}^{\lfloor k \rfloor} \frac{\lambda^i}{i!} }{ P(X \le k) = e^(-\lambda) \sum_{i = 0}^k \lambda^i / i! } \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = e^{\lambda (e^t - 1)} }{ E(e^(tX)) = e^(\lambda (e^t - 1)) } } \examples{ dist <- dist_poisson(lambda = c(1, 4, 10)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Poisson]{stats::Poisson} } distributional/man/is_hdr.Rd0000644000175000017500000000035013703764147015764 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hdr.R \name{is_hdr} \alias{is_hdr} \title{Is the object a hdr} \usage{ is_hdr(x) } \arguments{ \item{x}{An object.} } \description{ Is the object a hdr } distributional/man/distributional-package.Rd0000644000175000017500000000267514151532232021137 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distributional-package.R \docType{package} \name{distributional-package} \alias{distributional} \alias{distributional-package} \title{distributional: Vectorised Probability Distributions} \description{ Vectorised distribution objects with tools for manipulating, visualising, and using probability distributions. Designed to allow model prediction outputs to return distributions rather than their parameters, allowing users to directly interact with predictive distributions in a data-oriented workflow. In addition to providing generic replacements for p/d/q/r functions, other useful statistics can be computed including means, variances, intervals, and highest density regions. } \seealso{ Useful links: \itemize{ \item \url{https://pkg.mitchelloharawild.com/distributional/} \item \url{https://github.com/mitchelloharawild/distributional} \item Report bugs at \url{https://github.com/mitchelloharawild/distributional/issues} } } \author{ \strong{Maintainer}: Mitchell O'Hara-Wild \email{mail@mitchelloharawild.com} (\href{https://orcid.org/0000-0001-6729-7695}{ORCID}) Authors: \itemize{ \item Matthew Kay (\href{https://orcid.org/0000-0001-9446-0419}{ORCID}) \item Alex Hayes (\href{https://orcid.org/0000-0002-4985-5160}{ORCID}) } Other contributors: \itemize{ \item Earo Wang (\href{https://orcid.org/0000-0001-6448-5260}{ORCID}) [contributor] } } \keyword{internal} distributional/man/kurtosis.Rd0000644000175000017500000000062213705573747016407 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{kurtosis} \alias{kurtosis} \alias{kurtosis.distribution} \title{Kurtosis of a probability distribution} \usage{ kurtosis(x, ...) \method{kurtosis}{distribution}(x, ...) } \arguments{ \item{x}{The distribution(s).} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{stable} } distributional/man/dist_uniform.Rd0000644000175000017500000000346713711717775017236 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_uniform.R \name{dist_uniform} \alias{dist_uniform} \title{The Uniform distribution} \usage{ dist_uniform(min, max) } \arguments{ \item{min}{lower and upper limits of the distribution. Must be finite.} \item{max}{lower and upper limits of the distribution. Must be finite.} } \description{ \lifecycle{stable} } \details{ A distribution with constant density on an interval. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Poisson random variable with parameter \code{lambda} = \eqn{\lambda}. \strong{Support}: \eqn{[a,b]}{[a,b]} \strong{Mean}: \eqn{\frac{1}{2}(a+b)} \strong{Variance}: \eqn{\frac{1}{12}(b-a)^2} \strong{Probability mass function (p.m.f)}: \deqn{ f(x) = \frac{1}{b-a} for x \in [a,b] }{ f(x) = \frac{1}{b-a} for x in [a,b] } \deqn{ f(x) = 0 otherwise }{ f(x) = 0 otherwise } \strong{Cumulative distribution function (c.d.f)}: \deqn{ F(x) = 0 for x < a }{ F(x) = 0 for x < a } \deqn{ F(x) = \frac{x - a}{b-a} for x \in [a,b] }{ F(x) = \frac{x - a}{b-a} for x in [a,b] } \deqn{ F(x) = 1 for x > b }{ F(x) = 1 for x > b } \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = \frac{e^{tb} - e^{ta}}{t(b-a)} for t \neq 0 }{ E(e^(tX)) = \frac{e^{tb} - e^{ta}}{t(b-a)} for t \neq 0 } \deqn{ E(e^{tX}) = 1 for t = 0 }{ E(e^(tX)) = 1 for t = 0 } } \examples{ dist <- dist_uniform(min = c(3, -2), max = c(5, 4)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Uniform]{stats::Uniform} } distributional/man/dist_inflated.Rd0000644000175000017500000000072413703764147017332 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inflated.R \name{dist_inflated} \alias{dist_inflated} \title{Inflate a value of a probability distribution} \usage{ dist_inflated(dist, prob, x = 0) } \arguments{ \item{dist}{The distribution(s) to inflate.} \item{prob}{The added probability of observing \code{x}.} \item{x}{The value to inflate. The default of \code{x = 0} is for zero-inflation.} } \description{ \lifecycle{maturing} } distributional/man/guide-helpers.Rd0000644000175000017500000000064013703764147017253 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scale-level.R \name{guide_train.level_guide} \alias{guide_train.level_guide} \alias{guide_geom.guide_level} \title{Helper methods for guides} \usage{ \method{guide_train}{level_guide}(guide, scale, aesthetic) \method{guide_geom}{guide_level}(guide, layers, default_mapping) } \description{ Helper methods for guides } \keyword{internal} distributional/man/hdr.distribution.Rd0000644000175000017500000000144513703764147020015 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{hdr.distribution} \alias{hdr.distribution} \title{Highest density regions of probability distributions} \usage{ \method{hdr}{distribution}(x, size = 95, n = 512, ...) } \arguments{ \item{x}{The distribution(s).} \item{size}{The size of the interval (between 0 and 100).} \item{n}{The resolution used to estimate the distribution's density.} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{experimental} } \details{ This function is highly experimental and will change in the future. In particular, improved functionality for object classes and visualisation tools will be added in a future release. Computes minimally sized probability intervals highest density regions. } distributional/man/dist_wrap.Rd0000644000175000017500000000314014151532232016472 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_wrap.R \name{dist_wrap} \alias{dist_wrap} \title{Create a distribution from p/d/q/r style functions} \usage{ dist_wrap(dist, ..., package = NULL) } \arguments{ \item{dist}{The name of the distribution used in the functions (name that is prefixed by p/d/q/r)} \item{...}{Named arguments used to parameterise the distribution.} \item{package}{The package from which the distribution is provided. If NULL, the calling environment's search path is used to find the distribution functions. Alternatively, an arbitrary environment can also be provided here.} } \description{ \lifecycle{experimental} } \details{ If a distribution is not yet supported, you can vectorise p/d/q/r functions using this function. \code{dist_wrap()} stores the distributions parameters, and provides wrappers which call the appropriate p/d/q/r functions. Using this function to wrap a distribution should only be done if the distribution is not yet available in this package. If you need a distribution which isn't in the package yet, consider making a request at https://github.com/mitchelloharawild/distributional/issues. } \examples{ dist <- dist_wrap("norm", mean = 1:3, sd = c(3, 9, 2)) density(dist, 1) # dnorm() cdf(dist, 4) # pnorm() quantile(dist, 0.975) # qnorm() generate(dist, 10) # rnorm() library(actuar) dist <- dist_wrap("invparalogis", package = "actuar", shape = 2, rate = 2) density(dist, 1) # actuar::dinvparalogis() cdf(dist, 4) # actuar::pinvparalogis() quantile(dist, 0.975) # actuar::qinvparalogis() generate(dist, 10) # actuar::rinvparalogis() } distributional/man/family.distribution.Rd0000644000175000017500000000106214151532232020476 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{family.distribution} \alias{family.distribution} \title{Extract the name of the distribution family} \usage{ \method{family}{distribution}(object, ...) } \arguments{ \item{object}{The distribution(s).} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{experimental} } \examples{ dist <- c( dist_normal(1:2), dist_poisson(3), dist_multinomial(size = c(4, 3), prob = list(c(0.3, 0.5, 0.2), c(0.1, 0.5, 0.4))) ) family(dist) } distributional/man/generate.distribution.Rd0000644000175000017500000000076113703764147021032 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{generate.distribution} \alias{generate.distribution} \title{Randomly sample values from a distribution} \usage{ \method{generate}{distribution}(x, times, ...) } \arguments{ \item{x}{The distribution(s).} \item{times}{The number of samples.} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{stable} } \details{ Generate random samples from probability distributions. } distributional/man/dist_sample.Rd0000644000175000017500000000117514164705031017013 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_sample.R \name{dist_sample} \alias{dist_sample} \title{Sampling distribution} \usage{ dist_sample(x) } \arguments{ \item{x}{A list of sampled values.} } \description{ \lifecycle{stable} } \examples{ # Univariate numeric samples dist <- dist_sample(x = list(rnorm(100), rnorm(100, 10))) dist mean(dist) variance(dist) skewness(dist) generate(dist, 10) density(dist, 1) # Multivariate numeric samples dist <- dist_sample(x = list(cbind(rnorm(100), rnorm(100, 10)))) dist mean(dist) variance(dist) skewness(dist) generate(dist, 10) density(dist, 1) } distributional/man/geom_hilo_ribbon.Rd0000644000175000017500000000614213703764147020016 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geom_hilo.R \name{geom_hilo_ribbon} \alias{geom_hilo_ribbon} \title{Ribbon plots for hilo intervals} \usage{ geom_hilo_ribbon( mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ... ) } \arguments{ \item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}} or \code{\link[ggplot2:aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the default), it is combined with the default mapping at the top level of the plot. You must supply \code{mapping} if there is no plot mapping.} \item{data}{The data to be displayed in this layer. There are three options: If \code{NULL}, the default, the data is inherited from the plot data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}. A \code{data.frame}, or other object, will override the plot data. All objects will be fortified to produce a data frame. See \code{\link[ggplot2:fortify]{fortify()}} for which variables will be created. A \code{function} will be called with a single argument, the plot data. The return value must be a \code{data.frame}, and will be used as the layer data. A \code{function} can be created from a \code{formula} (e.g. \code{~ head(.x, 10)}).} \item{stat}{The statistical transformation to use on the data for this layer, as a string.} \item{position}{Position adjustment, either as a string, or the result of a call to a position adjustment function.} \item{na.rm}{If \code{FALSE}, the default, missing values are removed with a warning. If \code{TRUE}, missing values are silently removed.} \item{show.legend}{logical. Should this layer be included in the legends? \code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE} never includes, and \code{TRUE} always includes. It can also be a named logical vector to finely select the aesthetics to display.} \item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics, rather than combining with them. This is most useful for helper functions that define both data and aesthetics and shouldn't inherit behaviour from the default plot specification, e.g. \code{\link[ggplot2:borders]{borders()}}.} \item{...}{Other arguments passed on to \code{\link[ggplot2:layer]{layer()}}. These are often aesthetics, used to set an aesthetic to a fixed value, like \code{colour = "red"} or \code{size = 3}. They may also be parameters to the paired geom/stat.} } \description{ \lifecycle{maturing} } \details{ \code{geom_hilo_ribbon()} displays the interval defined by a hilo object. The luminance of the shaded area indicates its confidence level. The shade colour can be controlled by the \code{fill} aesthetic, however the luminance will be overwritten to represent the confidence level. } \examples{ dist <- dist_normal(1:3, 1:3) library(ggplot2) ggplot( data.frame(x = rep(1:3, 2), interval = c(hilo(dist, 80), hilo(dist, 95))) ) + geom_hilo_ribbon(aes(x = x, hilo = interval)) } \seealso{ \code{\link[=geom_hilo_linerange]{geom_hilo_linerange()}} for discrete hilo intervals (vertical lines) } distributional/man/dist_transformed.Rd0000644000175000017500000000173313703764147020071 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/transformed.R \name{dist_transformed} \alias{dist_transformed} \title{Modify a distribution with a transformation} \usage{ dist_transformed(dist, transform, inverse) } \arguments{ \item{dist}{A univariate distribution vector.} \item{transform}{A function used to transform the distribution. This transformation should be monotonic over appropriate domain.} \item{inverse}{The inverse of the \code{transform} function.} } \description{ \lifecycle{experimental} } \details{ The \code{\link[=density]{density()}}, \code{\link[=mean]{mean()}}, and \code{\link[=variance]{variance()}} methods are approximate as they are based on numerical derivatives. } \examples{ # Create a log normal distribution dist <- dist_transformed(dist_normal(0, 0.5), exp, log) density(dist, 1) # dlnorm(1, 0, 0.5) cdf(dist, 4) # plnorm(4, 0, 0.5) quantile(dist, 0.1) # qlnorm(0.1, 0, 0.5) generate(dist, 10) # rlnorm(10, 0, 0.5) } distributional/man/new_hdr.Rd0000644000175000017500000000117414151532232016131 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hdr.R \name{new_hdr} \alias{new_hdr} \title{Construct hdr intervals} \usage{ new_hdr( lower = list_of(.ptype = double()), upper = list_of(.ptype = double()), size = double() ) } \arguments{ \item{lower, upper}{A list of numeric vectors specifying the region's lower and upper bounds.} \item{size}{A numeric vector specifying the coverage size of the region.} } \value{ A "hdr" vector } \description{ Construct hdr intervals } \examples{ new_hdr(lower = list(1, c(3,6)), upper = list(10, c(5, 8)), size = c(80, 95)) } \author{ Mitchell O'Hara-Wild } distributional/man/dist_gumbel.Rd0000644000175000017500000000414714164726426017022 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_gumbel.R \name{dist_gumbel} \alias{dist_gumbel} \title{The Gumbel distribution} \usage{ dist_gumbel(alpha, scale) } \arguments{ \item{alpha}{location parameter.} \item{scale}{parameter. Must be strictly positive.} } \description{ \lifecycle{stable} } \details{ The Gumbel distribution is a special case of the Generalized Extreme Value distribution, obtained when the GEV shape parameter \eqn{\xi} is equal to 0. It may be referred to as a type I extreme value distribution. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Gumbel random variable with location parameter \code{mu} = \eqn{\mu}, scale parameter \code{sigma} = \eqn{\sigma}. \strong{Support}: \eqn{R}, the set of all real numbers. \strong{Mean}: \eqn{\mu + \sigma\gamma}, where \eqn{\gamma} is Euler's constant, approximately equal to 0.57722. \strong{Median}: \eqn{\mu - \sigma\ln(\ln 2)}{\mu - \sigma ln(ln 2)}. \strong{Variance}: \eqn{\sigma^2 \pi^2 / 6}. \strong{Probability density function (p.d.f)}: \deqn{f(x) = \sigma ^ {-1} \exp[-(x - \mu) / \sigma]% \exp\{-\exp[-(x - \mu) / \sigma] \}}{% f(x) = (1 / \sigma) exp[-(x - \mu) / \sigma]% exp{-exp[-(x - \mu) / \sigma]}} for \eqn{x} in \eqn{R}, the set of all real numbers. \strong{Cumulative distribution function (c.d.f)}: In the \eqn{\xi = 0} (Gumbel) special case \deqn{F(x) = \exp\{-\exp[-(x - \mu) / \sigma] \}}{% F(x) = exp{ - exp[-(x - \mu) / \sigma]} } for \eqn{x} in \eqn{R}, the set of all real numbers. } \examples{ dist <- dist_gumbel(alpha = c(0.5, 1, 1.5, 3), scale = c(2, 2, 3, 4)) dist \dontshow{if (requireNamespace("actuar", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} mean(dist) variance(dist) skewness(dist) kurtosis(dist) support(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) \dontshow{\}) # examplesIf} } \seealso{ \link[actuar:Gumbel]{actuar::Gumbel} } distributional/man/hilo.Rd0000644000175000017500000000141214164770613015444 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hilo.R \name{hilo} \alias{hilo} \title{Compute intervals} \usage{ hilo(x, ...) } \arguments{ \item{x}{Object to create hilo from.} \item{...}{Additional arguments used by methods.} } \description{ Used to extract a specified prediction interval at a particular confidence level from a distribution. } \details{ The numeric lower and upper bounds can be extracted from the interval using \verb{$lower} and \verb{$upper} as shown in the examples below. } \examples{ # 95\% interval from a standard normal distribution interval <- hilo(dist_normal(0, 1), 95) interval # Extract the individual quantities with `$lower`, `$upper`, and `$level` interval$lower interval$upper interval$level } distributional/man/dist_poisson_inverse_gaussian.Rd0000644000175000017500000000174214164725546022666 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_poisson_inverse_gaussian.R \name{dist_poisson_inverse_gaussian} \alias{dist_poisson_inverse_gaussian} \title{The Poisson-Inverse Gaussian distribution} \usage{ dist_poisson_inverse_gaussian(mean, shape) } \arguments{ \item{mean}{parameters. Must be strictly positive. Infinite values are supported.} \item{shape}{parameters. Must be strictly positive. Infinite values are supported.} } \description{ \lifecycle{stable} } \examples{ dist <- dist_poisson_inverse_gaussian(mean = rep(0.1, 3), shape = c(0.4, 0.8, 1)) dist \dontshow{if (requireNamespace("actuar", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} mean(dist) variance(dist) support(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) \dontshow{\}) # examplesIf} } \seealso{ \link[actuar:PoissonInverseGaussian]{actuar::PoissonInverseGaussian} } distributional/man/dist_student_t.Rd0000644000175000017500000000417713711717775017567 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_student_t.R \name{dist_student_t} \alias{dist_student_t} \title{The (non-central) location-scale Student t Distribution} \usage{ dist_student_t(df, mu = 0, sigma = 1, ncp = NULL) } \arguments{ \item{df}{degrees of freedom (\eqn{> 0}, maybe non-integer). \code{df = Inf} is allowed.} \item{mu}{The location parameter of the distribution. If \code{ncp == 0} (or \code{NULL}), this is the median.} \item{sigma}{The scale parameter of the distribution.} \item{ncp}{non-centrality parameter \eqn{\delta}{delta}; currently except for \code{rt()}, only for \code{abs(ncp) <= 37.62}. If omitted, use the central t distribution.} } \description{ \lifecycle{stable} } \details{ The Student's T distribution is closely related to the \code{\link[=Normal]{Normal()}} distribution, but has heavier tails. As \eqn{\nu} increases to \eqn{\infty}, the Student's T converges to a Normal. The T distribution appears repeatedly throughout classic frequentist hypothesis testing when comparing group means. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a \strong{central} Students T random variable with \code{df} = \eqn{\nu}. \strong{Support}: \eqn{R}, the set of all real numbers \strong{Mean}: Undefined unless \eqn{\nu \ge 2}, in which case the mean is zero. \strong{Variance}: \deqn{ \frac{\nu}{\nu - 2} }{ \nu / (\nu - 2) } Undefined if \eqn{\nu < 1}, infinite when \eqn{1 < \nu \le 2}. \strong{Probability density function (p.d.f)}: \deqn{ f(x) = \frac{\Gamma(\frac{\nu + 1}{2})}{\sqrt{\nu \pi} \Gamma(\frac{\nu}{2})} (1 + \frac{x^2}{\nu} )^{- \frac{\nu + 1}{2}} }{ f(x) = \Gamma((\nu + 1) / 2) / (\sqrt(\nu \pi) \Gamma(\nu / 2)) (1 + x^2 / \nu)^(- (\nu + 1) / 2) } } \examples{ dist <- dist_student_t(df = c(1,2,5), mu = c(0,1,2), sigma = c(1,2,3)) dist mean(dist) variance(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:TDist]{stats::TDist} } distributional/man/dist_weibull.Rd0000644000175000017500000000311413711717775017207 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_weibull.R \name{dist_weibull} \alias{dist_weibull} \title{The Weibull distribution} \usage{ dist_weibull(shape, scale) } \arguments{ \item{shape}{shape and scale parameters, the latter defaulting to 1.} \item{scale}{shape and scale parameters, the latter defaulting to 1.} } \description{ \lifecycle{stable} } \details{ Generalization of the gamma distribution. Often used in survival and time-to-event analyses. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Weibull random variable with success probability \code{p} = \eqn{p}. \strong{Support}: \eqn{R^+} and zero. \strong{Mean}: \eqn{\lambda \Gamma(1+1/k)}, where \eqn{\Gamma} is the gamma function. \strong{Variance}: \eqn{\lambda [ \Gamma (1 + \frac{2}{k} ) - (\Gamma(1+ \frac{1}{k}))^2 ]} \strong{Probability density function (p.d.f)}: \deqn{ f(x) = \frac{k}{\lambda}(\frac{x}{\lambda})^{k-1}e^{-(x/\lambda)^k}, x \ge 0 } \strong{Cumulative distribution function (c.d.f)}: \deqn{F(x) = 1 - e^{-(x/\lambda)^k}, x \ge 0} \strong{Moment generating function (m.g.f)}: \deqn{\sum_{n=0}^\infty \frac{t^n\lambda^n}{n!} \Gamma(1+n/k), k \ge 1} } \examples{ dist <- dist_weibull(shape = c(0.5, 1, 1.5, 5), scale = rep(1, 4)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Weibull]{stats::Weibull} } distributional/man/dist_logistic.Rd0000644000175000017500000000345013711717775017364 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_logistic.R \name{dist_logistic} \alias{dist_logistic} \title{The Logistic distribution} \usage{ dist_logistic(location, scale) } \arguments{ \item{location}{location and scale parameters.} \item{scale}{location and scale parameters.} } \description{ \lifecycle{stable} } \details{ A continuous distribution on the real line. For binary outcomes the model given by \eqn{P(Y = 1 | X) = F(X \beta)} where \eqn{F} is the Logistic \code{\link[=cdf]{cdf()}} is called \emph{logistic regression}. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Logistic random variable with \code{location} = \eqn{\mu} and \code{scale} = \eqn{s}. \strong{Support}: \eqn{R}, the set of all real numbers \strong{Mean}: \eqn{\mu} \strong{Variance}: \eqn{s^2 \pi^2 / 3} \strong{Probability density function (p.d.f)}: \deqn{ f(x) = \frac{e^{-(\frac{x - \mu}{s})}}{s [1 + \exp(-(\frac{x - \mu}{s})) ]^2} }{ f(x) = e^(-(t - \mu) / s) / (s (1 + e^(-(t - \mu) / s))^2) } \strong{Cumulative distribution function (c.d.f)}: \deqn{ F(t) = \frac{1}{1 + e^{-(\frac{t - \mu}{s})}} }{ F(t) = 1 / (1 + e^(-(t - \mu) / s)) } \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = e^{\mu t} \beta(1 - st, 1 + st) }{ E(e^(tX)) = = e^(\mu t) \beta(1 - st, 1 + st) } where \eqn{\beta(x, y)} is the Beta function. } \examples{ dist <- dist_logistic(location = c(5,9,9,6,2), scale = c(2,3,4,2,1)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Logistic]{stats::Logistic} } distributional/man/support.Rd0000644000175000017500000000062014151532232016212 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{support} \alias{support} \alias{support.distribution} \title{Region of support of a distribution} \usage{ support(x, ...) \method{support}{distribution}(x, ...) } \arguments{ \item{x}{The distribution(s).} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{experimental} } distributional/man/is_hilo.Rd0000644000175000017500000000035613703764147016150 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hilo.R \name{is_hilo} \alias{is_hilo} \title{Is the object a hilo} \usage{ is_hilo(x) } \arguments{ \item{x}{An object.} } \description{ Is the object a hilo } distributional/man/dist_beta.Rd0000644000175000017500000000115613707412651016451 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_beta.R \name{dist_beta} \alias{dist_beta} \title{The Beta distribution} \usage{ dist_beta(shape1, shape2) } \arguments{ \item{shape1, shape2}{The non-negative shape parameters of the Beta distribution.} } \description{ \lifecycle{maturing} } \examples{ dist <- dist_beta(shape1 = c(0.5, 5, 1, 2, 2), shape2 = c(0.5, 1, 3, 2, 5)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Beta]{stats::Beta} } distributional/man/variance.distribution.Rd0000644000175000017500000000103014131011013020763 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{variance.distribution} \alias{variance.distribution} \title{Variance of a probability distribution} \usage{ \method{variance}{distribution}(x, ...) } \arguments{ \item{x}{The distribution(s).} \item{...}{Additional arguments used by methods.} } \description{ \lifecycle{stable} } \details{ Returns the empirical variance of the probability distribution. If the method does not exist, the variance of a random sample will be returned. } distributional/man/dist_missing.Rd0000644000175000017500000000107613712674266017220 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_missing.R \name{dist_missing} \alias{dist_missing} \title{Missing distribution} \usage{ dist_missing(length = 1) } \arguments{ \item{length}{The number of missing distributions} } \description{ \lifecycle{experimental} } \details{ A placeholder distribution for handling missing values in a vector of distributions. } \examples{ dist <- dist_missing(3L) dist mean(dist) variance(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } distributional/man/dist_chisq.Rd0000644000175000017500000000416313711717775016660 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_chisq.R \name{dist_chisq} \alias{dist_chisq} \title{The (non-central) Chi-Squared Distribution} \usage{ dist_chisq(df, ncp = 0) } \arguments{ \item{df}{degrees of freedom (non-negative, but can be non-integer).} \item{ncp}{non-centrality parameter (non-negative).} } \description{ \lifecycle{stable} } \details{ Chi-square distributions show up often in frequentist settings as the sampling distribution of test statistics, especially in maximum likelihood estimation settings. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a \eqn{\chi^2} random variable with \code{df} = \eqn{k}. \strong{Support}: \eqn{R^+}, the set of positive real numbers \strong{Mean}: \eqn{k} \strong{Variance}: \eqn{2k} \strong{Probability density function (p.d.f)}: \deqn{ f(x) = \frac{1}{\sqrt{2 \pi \sigma^2}} e^{-(x - \mu)^2 / 2 \sigma^2} }{ f(x) = 1 / (2 \pi \sigma^2) exp(-(x - \mu)^2 / (2 \sigma^2)) } \strong{Cumulative distribution function (c.d.f)}: The cumulative distribution function has the form \deqn{ F(t) = \int_{-\infty}^t \frac{1}{\sqrt{2 \pi \sigma^2}} e^{-(x - \mu)^2 / 2 \sigma^2} dx }{ F(t) = integral_{-\infty}^t 1 / (2 \pi \sigma^2) exp(-(x - \mu)^2 / (2 \sigma^2)) dx } but this integral does not have a closed form solution and must be approximated numerically. The c.d.f. of a standard normal is sometimes called the "error function". The notation \eqn{\Phi(t)} also stands for the c.d.f. of a standard normal evaluated at \eqn{t}. Z-tables list the value of \eqn{\Phi(t)} for various \eqn{t}. \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = e^{\mu t + \sigma^2 t^2 / 2} }{ E(e^(tX)) = e^(\mu t + \sigma^2 t^2 / 2) } } \examples{ dist <- dist_chisq(df = c(1,2,3,4,6,9)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Chisquare]{stats::Chisquare} } distributional/man/scale_level.Rd0000644000175000017500000001337414151532232016766 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scale-level.R \name{scale_level} \alias{scale_level} \alias{scale_level_continuous} \title{level luminance scales} \usage{ scale_level_continuous(..., guide = "level") } \arguments{ \item{...}{ Arguments passed on to \code{\link[ggplot2:continuous_scale]{continuous_scale}} \describe{ \item{\code{scale_name}}{The name of the scale that should be used for error messages associated with this scale.} \item{\code{palette}}{A palette function that when called with a numeric vector with values between 0 and 1 returns the corresponding output values (e.g., \code{\link[scales:area_pal]{scales::area_pal()}}).} \item{\code{name}}{The name of the scale. Used as the axis or legend title. If \code{waiver()}, the default, the name of the scale is taken from the first mapping used for that aesthetic. If \code{NULL}, the legend title will be omitted.} \item{\code{breaks}}{One of: \itemize{ \item \code{NULL} for no breaks \item \code{waiver()} for the default breaks computed by the \link[scales:trans_new]{transformation object} \item A numeric vector of positions \item A function that takes the limits as input and returns breaks as output (e.g., a function returned by \code{\link[scales:breaks_extended]{scales::extended_breaks()}}). Also accepts rlang \link[rlang:as_function]{lambda} function notation. }} \item{\code{minor_breaks}}{One of: \itemize{ \item \code{NULL} for no minor breaks \item \code{waiver()} for the default breaks (one minor break between each major break) \item A numeric vector of positions \item A function that given the limits returns a vector of minor breaks. Also accepts rlang \link[rlang:as_function]{lambda} function notation. }} \item{\code{n.breaks}}{An integer guiding the number of major breaks. The algorithm may choose a slightly different number to ensure nice break labels. Will only have an effect if \code{breaks = waiver()}. Use \code{NULL} to use the default number of breaks given by the transformation.} \item{\code{labels}}{One of: \itemize{ \item \code{NULL} for no labels \item \code{waiver()} for the default labels computed by the transformation object \item A character vector giving labels (must be same length as \code{breaks}) \item A function that takes the breaks as input and returns labels as output. Also accepts rlang \link[rlang:as_function]{lambda} function notation. }} \item{\code{limits}}{One of: \itemize{ \item \code{NULL} to use the default scale range \item A numeric vector of length two providing limits of the scale. Use \code{NA} to refer to the existing minimum or maximum \item A function that accepts the existing (automatic) limits and returns new limits. Also accepts rlang \link[rlang:as_function]{lambda} function notation. Note that setting limits on positional scales will \strong{remove} data outside of the limits. If the purpose is to zoom, use the limit argument in the coordinate system (see \code{\link[ggplot2:coord_cartesian]{coord_cartesian()}}). }} \item{\code{rescaler}}{A function used to scale the input values to the range [0, 1]. This is always \code{\link[scales:rescale]{scales::rescale()}}, except for diverging and n colour gradients (i.e., \code{\link[ggplot2:scale_gradient]{scale_colour_gradient2()}}, \code{\link[ggplot2:scale_gradient]{scale_colour_gradientn()}}). The \code{rescaler} is ignored by position scales, which always use \code{\link[scales:rescale]{scales::rescale()}}. Also accepts rlang \link[rlang:as_function]{lambda} function notation.} \item{\code{oob}}{One of: \itemize{ \item Function that handles limits outside of the scale limits (out of bounds). Also accepts rlang \link[rlang:as_function]{lambda} function notation. \item The default (\code{\link[scales:oob]{scales::censor()}}) replaces out of bounds values with \code{NA}. \item \code{\link[scales:oob]{scales::squish()}} for squishing out of bounds values into range. \item \code{\link[scales:oob]{scales::squish_infinite()}} for squishing infinite values into range. }} \item{\code{trans}}{For continuous scales, the name of a transformation object or the object itself. Built-in transformations include "asn", "atanh", "boxcox", "date", "exp", "hms", "identity", "log", "log10", "log1p", "log2", "logit", "modulus", "probability", "probit", "pseudo_log", "reciprocal", "reverse", "sqrt" and "time". A transformation object bundles together a transform, its inverse, and methods for generating breaks and labels. Transformation objects are defined in the scales package, and are called \verb{_trans} (e.g., \code{\link[scales:boxcox_trans]{scales::boxcox_trans()}}). You can create your own transformation with \code{\link[scales:trans_new]{scales::trans_new()}}.} \item{\code{expand}}{For position scales, a vector of range expansion constants used to add some padding around the data to ensure that they are placed some distance away from the axes. Use the convenience function \code{\link[ggplot2:expansion]{expansion()}} to generate the values for the \code{expand} argument. The defaults are to expand the scale by 5\% on each side for continuous variables, and by 0.6 units on each side for discrete variables.} \item{\code{position}}{For position scales, The position of the axis. \code{left} or \code{right} for y axes, \code{top} or \code{bottom} for x axes.} \item{\code{super}}{The super class to use for the constructed scale} }} \item{guide}{Type of legend. Use \code{"colourbar"} for continuous colour bar, or \code{"legend"} for discrete colour legend.} } \value{ A ggproto object inheriting from \code{Scale} } \description{ This set of scales defines new scales for prob geoms equivalent to the ones already defined by ggplot2. This allows the shade of confidence intervals to work with the legend output. } \concept{scale_level_*} distributional/man/dist_inverse_gamma.Rd0000644000175000017500000000161214164726426020356 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_inverse_gamma.R \name{dist_inverse_gamma} \alias{dist_inverse_gamma} \title{The Inverse Gamma distribution} \usage{ dist_inverse_gamma(shape, rate = 1/scale, scale) } \arguments{ \item{shape}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{scale}{parameters. Must be strictly positive.} } \description{ \lifecycle{stable} } \examples{ dist <- dist_inverse_gamma(shape = c(1,2,3,3), rate = c(1,1,1,2)) dist \dontshow{if (requireNamespace("actuar", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} mean(dist) variance(dist) support(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) \dontshow{\}) # examplesIf} } \seealso{ \link[actuar:InverseGamma]{actuar::InverseGamma} } distributional/man/dist_normal.Rd0000644000175000017500000000546114151532232017021 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_normal.R \name{dist_normal} \alias{dist_normal} \title{The Normal distribution} \usage{ dist_normal(mu = 0, sigma = 1) } \arguments{ \item{mu}{The mean (location parameter) of the distribution, which is also the mean of the distribution. Can be any real number.} \item{sigma}{The standard deviation (scale parameter) of the distribution. Can be any positive number. If you would like a Normal distribution with \strong{variance} \eqn{\sigma^2}, be sure to take the square root, as this is a common source of errors.} } \description{ \lifecycle{stable} } \details{ The Normal distribution is ubiquitous in statistics, partially because of the central limit theorem, which states that sums of i.i.d. random variables eventually become Normal. Linear transformations of Normal random variables result in new random variables that are also Normal. If you are taking an intro stats course, you'll likely use the Normal distribution for Z-tests and in simple linear regression. Under regularity conditions, maximum likelihood estimators are asymptotically Normal. The Normal distribution is also called the gaussian distribution. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a Normal random variable with mean \code{mu} = \eqn{\mu} and standard deviation \code{sigma} = \eqn{\sigma}. \strong{Support}: \eqn{R}, the set of all real numbers \strong{Mean}: \eqn{\mu} \strong{Variance}: \eqn{\sigma^2} \strong{Probability density function (p.d.f)}: \deqn{ f(x) = \frac{1}{\sqrt{2 \pi \sigma^2}} e^{-(x - \mu)^2 / 2 \sigma^2} }{ f(x) = 1 / sqrt(2 \pi \sigma^2) exp(-(x - \mu)^2 / (2 \sigma^2)) } \strong{Cumulative distribution function (c.d.f)}: The cumulative distribution function has the form \deqn{ F(t) = \int_{-\infty}^t \frac{1}{\sqrt{2 \pi \sigma^2}} e^{-(x - \mu)^2 / 2 \sigma^2} dx }{ F(t) = integral_{-\infty}^t 1 / sqrt(2 \pi \sigma^2) exp(-(x - \mu)^2 / (2 \sigma^2)) dx } but this integral does not have a closed form solution and must be approximated numerically. The c.d.f. of a standard Normal is sometimes called the "error function". The notation \eqn{\Phi(t)} also stands for the c.d.f. of a standard Normal evaluated at \eqn{t}. Z-tables list the value of \eqn{\Phi(t)} for various \eqn{t}. \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = e^{\mu t + \sigma^2 t^2 / 2} }{ E(e^(tX)) = e^(\mu t + \sigma^2 t^2 / 2) } } \examples{ dist <- dist_normal(mu = 1:5, sigma = 3) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Normal]{stats::Normal} } distributional/man/figures/0000755000175000017500000000000014126532471015664 5ustar nileshnileshdistributional/man/figures/lifecycle-defunct.svg0000644000175000017500000000170413703764147022003 0ustar nileshnileshlifecyclelifecycledefunctdefunct distributional/man/figures/lifecycle-archived.svg0000644000175000017500000000170713703764147022143 0ustar nileshnilesh lifecyclelifecyclearchivedarchived distributional/man/figures/lifecycle-stable.svg0000644000175000017500000000167413703764147021633 0ustar nileshnileshlifecyclelifecyclestablestable distributional/man/figures/lifecycle-questioning.svg0000644000175000017500000000171413703764147022721 0ustar nileshnileshlifecyclelifecyclequestioningquestioning distributional/man/figures/lifecycle-experimental.svg0000644000175000017500000000171613703764147023053 0ustar nileshnileshlifecyclelifecycleexperimentalexperimental distributional/man/figures/lifecycle-maturing.svg0000644000175000017500000000170613703764147022203 0ustar nileshnileshlifecyclelifecyclematuringmaturing distributional/man/figures/lifecycle-soft-deprecated.svg0000644000175000017500000000172613703764147023430 0ustar nileshnileshlifecyclelifecyclesoft-deprecatedsoft-deprecated distributional/man/figures/README-plot-1.png0000644000175000017500000003467214126532471020455 0ustar nileshnileshPNG  IHDRHc pHYsod IDATxw|Ss3tP:=dLdā"FWEPQ@EDD" U?E%[V[3Ai4mܛq{y{J, -: GA<DA<DA:18o^_W[".[!g{eʃ/ʶyw6Oטܣ|-W`z\qrfes9r‡}z}p\UW"`^!jq}w}Ѵs3LW2GlPJB>>e—_~sw=NjsmyTAٖ sw˽O= 멍XMWàtEv> 76M #gveYf {/7ݾe89%džv7}e*U^˻-B6=%qEq.xrsrXކnxG {7 -sMkg|Glg7}KӪ!Ձ+82ƠO|́ ݠCsLt̯>Խe(98ag`}bM#fOf1ó>f3vCֵu~>8(&/nRSS~UBL:hsTّ ZdžYk]?:c/.$L5=0{J\'bi-Ɏkw,l3SM3u 񸾕M/X57^DIma4ԙ\/ZW:)h'X줇ۜBq_lu_LuLH%n{x*0 ͮXq 9g;v{[sdY=lj7YN}f}tsd9w0֪aԛ~lue9gvV}DΉ|A˅ά?UPirSC˒QPoE]_^oΛS2i{ǖ GwytOp9JozɶC;бva/Xbqׯ*]w˳>\γZu.:lضyd ozE-}{xP!iƃ'[24'7ߑD}c律eqwU5">Qlkl^WlJn [ ϵHZ{mW\]Y$K{H)bCj3^5(kj3<1[m`4u?ءQ˱?dEF9_in|Q-Zˇ?G|eYngSН_VzX*OMr3Mle4uy^•_鱚S;H0-٥^F|iN$˲Ȍ&M~W5rV<2F.۸zz)96F7b͗eG7#jݽLwjkZ{!˵Vz_9{]۪HY7o_r" H9EUg.CmNрRحs죋Uuu^ZCy+4MvNI=--]Cwm/uwrmؙ֨F$&U _51G!n~;Tl0/6׵҄rJ])?pOS3.,sG.X{Wms᧳^O>ҒȰ.O=QOG*idCP VczySuz#lnx ~6֩_Kolwູa۰^lo"9}XAO}jg֛"{q V%V ʻ rݣc9نvfW{G1nD;\uvXm.}Eb9Si.8* 'N3UEw u'/&]t\S3j+JR(Uge}?c*;Ҝ'0S}^_GW_7x e߯D%H8r첮uMi7 c\uS'ΊZ-KּySjI;K֎-pIo:iw`Mbj 䴣 !d28I5Z%-Uix޾=nwz]wڪO -ڶQ o}C'7ޖ3;[جU3̿\!*{~/~r4}FBAcƭڲ9);'?>qCӷ D%欔Wŧ2{?\RCN]vt:pƩX7@gWn۪[ӫ7jҒo&ErNːˇv(›F!DmUIWfVb!5tBK~9 uEYjݛ0Ԯ_[vYWWJ9|(1̵'ؿn//=孛^|#=Ϭ?)mʐC¹wN{kZGzq)cmr6SZĩ/_]ܾ}IRZ5*M .~iˎ7~k=:oV{{CmG9cgt+n2 >NMms2}-gv}> o7שWPnpE:G7 P09uی'o]c]-B];~[8o˕W؎.yr9[Wiֿ]r*̌NBn#Ī~h=ґgɒ?]uڥ!~1oeYҜ15nYѱ(kl֪?7(Ls?zjTAϿy_o3&{9{ݹV]S^W"`<̕'Vwrܝ%tǹ,a;|ǭ79TPe=N??t㸸sgB8Rժ?NWg[Ε͟|Oݷᇆhl z[/w#{<=7%׸vebK6\׭K+_ƕ{m\o fqe/jaQEO1϶2[ܷןY:QtEiwMz]JWן\gƃ7]cFb>=UIbtՙllRNoH{Klu]ikTJ%r?mJB#$,c[~df"_0*8(jw@jޗGfޘ T-cqg˲,;db5~&:5kmsFfcح#!夿~9oE帰qțD$!΢-,&Wr츸uјͫXB4aeHھ/ n[RQӽ_".r[\-vhzcSKlG͸s*! tŽ 6G YP[E봟w|ےo9,a60!qG<.kӟ 6T{YGFskoNɽΉdYmy"LU6?>VnJE{H)59K>4,Qn~xƤBTrCy+4I;7soؙ ۶z]OwY/C]& ^hl;ҜP} ^l_`]!E>Üu~H]('NA  "  "  "Р/$222===##՚mieT 0,R Jad2+]Y,z 0IBCC/_t!L3@x4@x4@x4@x4@x4@x4@x4@x4@x4@ Jϋ/{JJ!ൣ=hʝ뮗F_#g_y5݋gx 3xH殔< f~F4yJ•~Jҽ0&ZPs6XP2!Ώ U5 ˯ce*tV?"HKTAy1^u44P^]4L7x(o#U$2/ж|WL;%F M ,#&bFA?d aj*K!X8c @>B 1@x.b@1@x@bU< ཎ;2@N0xD<E{<on)={*[ @{S4=ǩ{ P &<0a8@zʍIxD{3;[:[vm0P𴞰hWG{ NLD(6@ L=gpZB@(y#@ِ_ THSD>p(ԀuD>{<tދ<"?)<?Np\J#`̝/!Tr)\> ѤˠZ".<8Ɠ]lxj9iu:f!y񒔁czVe(z'%%EFF6jhիWW.9G>}WjSFVISfHĚ$Q~OxVdrڱ㶘VI!C+3O߲KuxztYԊ:y{s}#ѣG&Mp Iܙ8p`…_ciii_&ƍS 8ofPСC^_R%o)N^ |,"%o>rBd[B6/[#QWF5a+h4 !$Il u@9-JPp-}rʞs殽n֜hd9ర0!DHHHIQV,;!C~٦M1>}tP!tG$YAYVNl6/Wz`0dee)]H):8ҢM~?ɲ9|aeλY>)-̳y'2|6yvOϺUe˅fK4?T&)+++##W6kx͛ի+wBCCz}rrWbSSS" dGsŢ}ܘ> IRhhʷd2MvRs B$w ̈́"+#eodz+XJ!k鷟kG 1U3w9}L*%'o?{:)^M7NI&yZCV5;;|3 q-۷Sk?.<<9( j6|ȿ-|a=>ی^/MqS}nw|a'0.t~pE2^r@}ݶ.;%̑yp\Z&mgHIMi;IEGGy<<<|ѢEB>K\f?bʷuӾcٛu]w5rvqi_Dwc {}dddzz;KEf𹒒Ν8q"66CO=TLL,9E_4|j^pAB<,˗.ɔ{YZ}CB~9}ÀN38lOwS[wx5 9uzהz^H֖i(".m%--imw\Qq !$ O_r  "  RMv?w"x橿Ibp#^[%¸/x)݋k|dF$>p>C 稚k~~Q$)RǗhmhy&2]4;Ԁ4Q60c@Yƕ "oa{=yP8fP$,FR=3qFOg TB('t?[醱JK'60tB!˲$Kwݗ?[S/[Y-!lu% tݱYB w7rֲ~,wϟ9sfVVVsrr⢢7oTa(ǣ6"ߏqR-,^D K/TZU*-">?m}PwLcc)&^зANżԺyEcہ6Ft~=XrDQ0I9?|.޲c.-~]H^cHqXpaUcǎ9Js6m%jԨQ!!!JW\d7s I/~ ]^[طڥ MyBq[ۗftSۉa]Z?qŋg;3E{f/Ol΢ӷΟ|m^]4 !-?E٬iUPjKUz<7GY#/Tݢ/w 6\pAaZtR6+h4 C}oٺgʕJWᮍ7(]E\|yӦMJWDXXXPPUoX,JWD>0{!INS.wq)wݦn{qf#z4KM帽y}!t֫n^؅v*b iX$կs0CgCuٗm"3=pNiǎb:`1WmڱϨWJ7yf\绅61Ԋ:y{yҥKw%VZ>}RL&T7jذa\<)))JW_SJ .Q 4%2,M>jЯ?p#rjʕ܏ i.BaB2B!Id2$!d9ޖyiܪ5 \iW-B Gcmm:E)VKVjC7o^n+tܩ|EDDdddpTn/|gYn{i.de8ʕ+ީ(|QQQ^L䂧F,bx7 IBBBRSS.ZqOksԏC!fM5tB8.]H ; u%v:fܚ^;BB3fB9:t:`pg LSHpV[ mRf!DY/ϟ?_֭[W_9}K.=qGΝt`0Lt䚗2n(pXϼCRp!mӾ_R6~!Su9[N>,I8Rq<5S)v-3Q%`^˔QBRv|kR腐IԭR}bt~atS'z{yjժϯ^"U~L:~T7:၈ o<6Wp\t K=Gt enfB <4Y.DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA ƞ={z@xPDwpERNE<̫^JIz cj x\/.*bP~ˆy,@y? #xe_^1Bpz<?U tI(0f\v 21\qd3h(*<pUF{aqØPN&f!@ Pz #=t{!\u̙ˁbLZ{什Ϯ~ብ)ޚ?>'>wo~pѓ_N{oT9V?rݘ >+lC233? 6GZh1jԨU*[ԀGM):`Fz!/m|\o6 ! Ma]aph?!\f_n[*={&Ote;S}峠{歮3}RpI迺}L{ݡW^u移5nK~9|QE~$+{x!jMKKdgg0"7n\mMj V:qJ*44tѾY$I:{|pppG1LFQ{W$IlJN3Ͽ&{{xnMMRSEn\yʟ[hB!_IM?Rkt!7#- BgDB׭[OJJ|w /PbwmM&,999Jaz^QسgѣG>URǎFI}ȗ_~+B ɔU(F$Fa9vL&{mŌ4 tV,=y촮&MYggo$d!,3\Vdrv;67M^guxsfʽsRy3(]BT 0,ʪBN:uM79~͙3_~Yxd߯VV>Ӡ}Oj֦TO||W04}zycF=‹ծ9ّ?9bNwN5k.[,""F7P$Two 9'3K йLOOȨTŝS|={dee5jhuԩbG3xWbZ9i$I/?۬"&),,~S19#FP /A<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<DA<$ɲt ꮻ{ t!ps=Wfͱc*]2{SNJZj+VP\eP?vTBBBtRBBU]gϞU \Sh3kܸqddU]uԉQ +&&p(]ٸqc5q " Cr0\Ӱ'Gw1,|//x2ޠ T.} !E6eѣ*yJ)b81K 0\hEIyS W6}<ﯪOzqS,oZ*{߿v.ZϱZpr߂G%[B$!$IgQ˝!2o9! !|eǢn!S Nї.g_;تR_6nO}yhߩ=6š{s !gOUFh0I )bj#h4c|c-Qz0/|9]LO؈3ޝgOͻ!*r#5H~O @ sS8TKN {5E1`_*9++l jʒ| ̡B|uݵ Eh  "  "  "  "РMVIENDB`distributional/man/figures/lifecycle-deprecated.svg0000644000175000017500000000171213703764147022452 0ustar nileshnileshlifecyclelifecycledeprecateddeprecated distributional/man/figures/lifecycle-retired.svg0000644000175000017500000000170513703764147022012 0ustar nileshnilesh lifecyclelifecycleretiredretired distributional/man/dist_degenerate.Rd0000644000175000017500000000257113711717775017655 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_degenerate.R \name{dist_degenerate} \alias{dist_degenerate} \title{The degenerate distribution} \usage{ dist_degenerate(x) } \arguments{ \item{x}{The value of the distribution.} } \description{ \lifecycle{stable} } \details{ The degenerate distribution takes a single value which is certain to be observed. It takes a single parameter, which is the value that is observed by the distribution. We recommend reading this documentation on \url{https://pkg.mitchelloharawild.com/distributional/}, where the math will render nicely. In the following, let \eqn{X} be a degenerate random variable with value \code{x} = \eqn{k_0}. \strong{Support}: \eqn{R}, the set of all real numbers \strong{Mean}: \eqn{k_0} \strong{Variance}: \eqn{0} \strong{Probability density function (p.d.f)}: \deqn{ f(x) = 1 for x = k_0 }{ f(x) = 1 for x = k_0 } \deqn{ f(x) = 0 for x \neq k_0 }{ f(x) = 0 for x \neq k_0 } \strong{Cumulative distribution function (c.d.f)}: The cumulative distribution function has the form \deqn{ F(x) = 0 for x < k_0 }{ F(x) = 0 for x < k_0 } \deqn{ F(x) = 1 for x \ge k_0 }{ F(x) = 1 for x \ge k_0 } \strong{Moment generating function (m.g.f)}: \deqn{ E(e^{tX}) = e^{k_0 t} }{ E(e^(tX)) = e^(k_0 t) } } \examples{ dist_degenerate(x = 1:5) } distributional/man/scale_hilo_continuous.Rd0000644000175000017500000001143314151532232021072 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hilo.R \name{scale_hilo_continuous} \alias{scale_hilo_continuous} \title{Hilo interval scales} \usage{ scale_hilo_continuous( name = waiver(), breaks = waiver(), minor_breaks = waiver(), n.breaks = NULL, labels = waiver(), limits = NULL, expand = waiver(), oob = identity, na.value = NA, trans = "identity", guide = waiver(), position = "left", sec.axis = waiver() ) } \arguments{ \item{name}{The name of the scale. Used as the axis or legend title. If \code{waiver()}, the default, the name of the scale is taken from the first mapping used for that aesthetic. If \code{NULL}, the legend title will be omitted.} \item{breaks}{One of: \itemize{ \item \code{NULL} for no breaks \item \code{waiver()} for the default breaks computed by the \link[scales:trans_new]{transformation object} \item A numeric vector of positions \item A function that takes the limits as input and returns breaks as output (e.g., a function returned by \code{\link[scales:breaks_extended]{scales::extended_breaks()}}). Also accepts rlang \link[rlang:as_function]{lambda} function notation. }} \item{minor_breaks}{One of: \itemize{ \item \code{NULL} for no minor breaks \item \code{waiver()} for the default breaks (one minor break between each major break) \item A numeric vector of positions \item A function that given the limits returns a vector of minor breaks. Also accepts rlang \link[rlang:as_function]{lambda} function notation. }} \item{n.breaks}{An integer guiding the number of major breaks. The algorithm may choose a slightly different number to ensure nice break labels. Will only have an effect if \code{breaks = waiver()}. Use \code{NULL} to use the default number of breaks given by the transformation.} \item{labels}{One of: \itemize{ \item \code{NULL} for no labels \item \code{waiver()} for the default labels computed by the transformation object \item A character vector giving labels (must be same length as \code{breaks}) \item A function that takes the breaks as input and returns labels as output. Also accepts rlang \link[rlang:as_function]{lambda} function notation. }} \item{limits}{One of: \itemize{ \item \code{NULL} to use the default scale range \item A numeric vector of length two providing limits of the scale. Use \code{NA} to refer to the existing minimum or maximum \item A function that accepts the existing (automatic) limits and returns new limits. Also accepts rlang \link[rlang:as_function]{lambda} function notation. Note that setting limits on positional scales will \strong{remove} data outside of the limits. If the purpose is to zoom, use the limit argument in the coordinate system (see \code{\link[ggplot2:coord_cartesian]{coord_cartesian()}}). }} \item{expand}{For position scales, a vector of range expansion constants used to add some padding around the data to ensure that they are placed some distance away from the axes. Use the convenience function \code{\link[ggplot2:expansion]{expansion()}} to generate the values for the \code{expand} argument. The defaults are to expand the scale by 5\% on each side for continuous variables, and by 0.6 units on each side for discrete variables.} \item{oob}{One of: \itemize{ \item Function that handles limits outside of the scale limits (out of bounds). Also accepts rlang \link[rlang:as_function]{lambda} function notation. \item The default (\code{\link[scales:oob]{scales::censor()}}) replaces out of bounds values with \code{NA}. \item \code{\link[scales:oob]{scales::squish()}} for squishing out of bounds values into range. \item \code{\link[scales:oob]{scales::squish_infinite()}} for squishing infinite values into range. }} \item{na.value}{Missing values will be replaced with this value.} \item{trans}{For continuous scales, the name of a transformation object or the object itself. Built-in transformations include "asn", "atanh", "boxcox", "date", "exp", "hms", "identity", "log", "log10", "log1p", "log2", "logit", "modulus", "probability", "probit", "pseudo_log", "reciprocal", "reverse", "sqrt" and "time". A transformation object bundles together a transform, its inverse, and methods for generating breaks and labels. Transformation objects are defined in the scales package, and are called \verb{_trans} (e.g., \code{\link[scales:boxcox_trans]{scales::boxcox_trans()}}). You can create your own transformation with \code{\link[scales:trans_new]{scales::trans_new()}}.} \item{guide}{A function used to create a guide or its name. See \code{\link[ggplot2:guides]{guides()}} for more information.} \item{position}{For position scales, The position of the axis. \code{left} or \code{right} for y axes, \code{top} or \code{bottom} for x axes.} \item{sec.axis}{\code{\link[ggplot2:sec_axis]{sec_axis()}} is used to specify a secondary axis.} } \description{ Hilo interval scales } distributional/man/dist_inverse_gaussian.Rd0000644000175000017500000000165314164726426021113 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_inverse_gaussian.R \name{dist_inverse_gaussian} \alias{dist_inverse_gaussian} \title{The Inverse Gaussian distribution} \usage{ dist_inverse_gaussian(mean, shape) } \arguments{ \item{mean}{parameters. Must be strictly positive. Infinite values are supported.} \item{shape}{parameters. Must be strictly positive. Infinite values are supported.} } \description{ \lifecycle{stable} } \examples{ dist <- dist_inverse_gaussian(mean = c(1,1,1,3,3), shape = c(0.2, 1, 3, 0.2, 1)) dist \dontshow{if (requireNamespace("actuar", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} mean(dist) variance(dist) support(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) \dontshow{\}) # examplesIf} } \seealso{ \link[actuar:InverseGaussian]{actuar::InverseGaussian} } distributional/man/dist_exponential.Rd0000644000175000017500000000110013707412651020051 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_exponential.R \name{dist_exponential} \alias{dist_exponential} \title{The Exponential Distribution} \usage{ dist_exponential(rate) } \arguments{ \item{rate}{vector of rates.} } \description{ \lifecycle{stable} } \examples{ dist <- dist_exponential(rate = c(2, 1, 2/3)) dist mean(dist) variance(dist) skewness(dist) kurtosis(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) } \seealso{ \link[stats:Exponential]{stats::Exponential} } distributional/man/dist_inverse_exponential.Rd0000644000175000017500000000143314164726426021623 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dist_inverse_exponential.R \name{dist_inverse_exponential} \alias{dist_inverse_exponential} \title{The Inverse Exponential distribution} \usage{ dist_inverse_exponential(rate) } \arguments{ \item{rate}{an alternative way to specify the scale.} } \description{ \lifecycle{stable} } \examples{ dist <- dist_inverse_exponential(rate = 1:5) dist \dontshow{if (requireNamespace("actuar", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} mean(dist) variance(dist) support(dist) generate(dist, 10) density(dist, 2) density(dist, 2, log = TRUE) cdf(dist, 4) quantile(dist, 0.7) \dontshow{\}) # examplesIf} } \seealso{ \link[actuar:InverseExponential]{actuar::InverseExponential} } distributional/build/0000755000175000017500000000000014165417462014551 5ustar nileshnileshdistributional/build/distributional.pdf0000644000175000017500000115712414165417462020313 0ustar nileshnilesh%PDF-1.5 % 2 0 obj << /Type /ObjStm /N 100 /First 808 /Length 1045 /Filter /FlateDecode >> stream xڝVMs6 Wู$79ŦÕt_qS^z{=x$%A%R%YR9E$oIGhVp$@“T \!aWxT̨H<4iW 'd($<ȲӒ@V1 d4#/Ak-ȃ5zb Z0,*DssO4UHHp8  )|Ѱ4 68>FXO$a9F 4AXcY@p! xǡ맦/=t d *ŎUVw}[WM=si(|ئM"%6/o)9K};A3} Eq؍+C_2$k.WmٙƾśLsLZ=y3ЗCn9]m$,5ǘkZ.WeZޣgxĕSSJǗ6bʓ\eyxm:K Ch_,8 1xjW"tOOcw ӯ{/|!KC5ı9h6|DHǮ蓓sv <uιw|lw,ͽeInv_ykٝ$'΁ [nIWB5-=}?5S92[=^FT,3)zk׏=u vəF endstream endobj 298 0 obj << /Length 1147 /Filter /FlateDecode >> stream xڕWMo6Э6ed]E:4JKFUJߡH)Ĵęyp8^aK{8&#E^Lޟ4{[H\iwK梢O˿[>JoaHbYghPj`BZ+EiLbh D!,6tKdcĔ(! iMX`+wR莗\;XW,60pC8/T`=.H:gc6o$PQH1>`_^>ؕtBőVnJyU\0N,lgZrj i 4]cYrr1ŋKB ,ŒDMl!!Zg'6nt;gFV߁P}`}Ub=G(.8b(;đs Z^74]Jҏl9wX=9tE c!1qpb|9#<<,X$:fNjkJuw7U+r$ޔMiJCa*3:D9~2q4y#H TrZeJc_j\^rR}Kf)lqi:)5[/vMonnW~cc-VaRDҠ 鮎qlm~M{Zocfp?D}Vn.c%{=sLN|.rw/ݽ+<>gmI&JZi㚪a1_L31Y]z =d şlaG_D $x9¹~z endstream endobj 359 0 obj << /Length 1067 /Filter /FlateDecode >> stream xn8~ %ͱg`(m⑨^釔Ig!cf.|gYB.BїgGeV "6e((D&>sr6QD,Cг!e& z;@UOIJ8;ka?BL~AJ)ʂG))2B̏<@&{9L$?]7$eYrzaey<ȼF/[)E1}4x5/)&.Na{Bx\ki&D%r s5Q$ųVCίwhЍ'%O vv͝2ȱUI*?yGv]%f?oC:UF%"_l} ztE]iiӛ0z(oJw۩'VCk$|`~ݬiя=$X-j}c9_Vdbȇ#ic endstream endobj 203 0 obj << /Type /ObjStm /N 100 /First 861 /Length 2016 /Filter /FlateDecode >> stream xY]o}ׯc%g_Q m-P8)6coc䫏&3+JҶHF !9sfV&GrѰǐ gCޙLNG1^q0>s _ 6!Nؐ'94;6ދ;FHl$b$CB&0M 'фy$Ϣy&̃I٤vŤH&3M(c4ŃOL`.JK^ aa VaZJY_\\`'̌ MXGNƓ.8\+8x*R0*¤sEr+%E~BEW ^9l&"&V8Q'ΰIj=c}*.L'ˁxG3cYiహ0u''.W4A}2ݡ$N,`LGH4aFrB vÕ@ 8_l&#PBЙq6!%5.0FD'~ˆu uq\DTL ^̺ صtԅ&B(a&=J@0K ał8XnF0PtH, [mΜt}ggZt4bzZrҜfyծ=Q'A(R6 @I[ok=Q ~;^L>cMovM.4oqb}歧` lQ9yjll^oޭ06/:ŕb=YV_5{aig tv0ny_ƗXj$_Ft}y1|/RHi%: 4e 6퇢h|գnզ]p?L_~͔bQIofShyB`N%ܨ,ɁhCF{2MQx5?-W[l'D⻗y~Z{{GXwuu:rPNJ+xQţG*UoQB'ps@㒹O -!$9y ({<>H>):~ S@Kzx )|gdOm)+S_ dz(+0wlBt$d iF _ )= i9DR@W( BR,_D>{)PjyZ-b #>hb2)=D8f9ʋu>MCy1Nr6t4Ph|Tl跁sTǧoꁒK5SO dyH dA[~[@m=W(&鳂)AW(WN8o>PC"!ò XNi9$R9eBM/Xy`*IiAAKWIb4 9n|$8>-_$u>?QP"-;9\P,_}(u <~e'Rg%9~2 endstream endobj 428 0 obj << /Length 1045 /Filter /FlateDecode >> stream xZKs6Wp&J3!7C$Kcx< IP͇ z12 . Z0O!%K`^ 1zĻrv7|q̓=Q% $E3"l5{>!R{> #:͊*>E3_`7>~1 \v/2<H҂=P򁒴t1xi`: ?Ŝs0/ӁЯrߟ{chߛ.!!K,]E2pUq`FGyOA$u!>I1s[xSbLu(8-BpDsK=#bj*[ۦ6%T8`?6Q^)zX(cb]QzMertp8(tgm2i[gr:1! W6ڕ4x|RFٷ%=>6LDe屆1B)=Rn~mˋk.>,@=2$'>;u>swq{eF5`0=֮rvEn*.Ӈ'D &ۛ:ZdHC ,_h{z!G߰/;}-Bbu~?ovy4.C'tú;ӆ/YJ{Dr$t"[-GBA 'B"-DV{4٪輮~H?cq@$=R#ePXm "/߾vWQݵ4^wzm'(M䌩ڕ>$I 4Qfg |;[:3.s|M ܧe*DaU,]֎XD4{59ͬ *}}2m(BN o~V endstream endobj 453 0 obj << /Length 776 /Filter /FlateDecode >> stream xVKs0Wh&LPqt4)vp' 0NtZv~OA}_ήE5`.\K2F7WגrF'%iDM`3iUQnte3b횴,T1;Z~(MQn Oۥ9bOp3}mf%yD`\|r%q27˳ф2BǤt{PgCR}]q>&IJ#QO62{F6I7?[ϗk=&PD 4HQ8:$7dKOIA$yI1Wz yhiUV<uM5?,ر]Pl靱Y03lA>vjW'V'}ד\BU\$'Y]"> stream xWKo8WaeIE-CM-z$. ZmJt-rdXoxuf/EiLcfQ^cԻɽ[?e#Տy(jNP*w7a%1 H_a+fmF9J! -nﰗGRݷWw^HJ»$`3`KpNϢgcnat&l|A#yՊd=nZo>18Pxy8^hZO V0oAJXb^+*ĝO Bs1hЉzz5_DׁYwBm6DVh%p(J\8~2W~aR$c$IQ@ Q4Ȣ/8\Y!dY.~7ĖN<3zN9DG~6TlIvk]hшWۂGBz}%KeHn̶TYj?(D9{E uDs +ČK@ 6<ߗ:܇D:ȹ +uX'5^uFiu #4Y p P\Ӆ084E&0]k?p/KfɪRP,qy; s`Q|E4֛JH nPA%ATmiyEK^MJq6*o~^._7h'UEQmya X>g„ӚR(x{Ճsv< at)3Q.{C8XW`{F$ L("ExBC#ΰoY2vf٠FW}&XK0yb*%(vQ|3e ?-)񯫝S5 Ƙŋ2 0/fZiv~f.UQBfi sPqS 4<t3!z_Hrg(lT뽧> stream xڽXo6_XԖdK (ЭP+ŖXR=,ɔӬ^L:(l:^^r(Tu@0F@Ib⷗HI lUVVߥUbHg1P͡*i.yĘ C^mf]rmjN/vψ9dwsúʊf]?etP.ӕݭ xk<989arȒmZ=ò^۷c}&ٶ>F/"BX+Q‹2- 48DrWyae-ND~E U{/YڮYYzp6VNC6V{K2BZJW 7Nt*qlp~W΀}o.izvi)Z,h.k_+C~fCIk 4.K' "@!#I+%šYqo8TZUGiсu}Eb9Cy'N;>$l|FC!8v6sK*MrC:1LcQǴ)2`Bsܠc,D`o:d F聶% ++GMgу70aX^D#lo]K ,ޣZ7p1}Q(ASQk_Q(+׻S`X8}1gwэtW1=)*K,%x4/a)=&ݺ-6p,x{3%xZRf:ɫl;6 #՞,f38 '&b0.wpt!J[\mcA{5 *7G*1ih6+a}|# FKZ.پ?ū몋WIU˼PUߖ&NؤCySӥ _pWs:y endstream endobj 499 0 obj << /Length 1420 /Filter /FlateDecode >> stream xڥWK6W(kFEEYrDiw%YmZpߌh%я;F99!IR#Rw]+t禨c!0ф# ,*0uIj}xZ ͢P&rEX6dPsQ1Vuxdl%a٫dy(Ӊf?c+س"`Ywܝxs?<ʦV=G?9]ֺ:UFJ_zrM+~-( ^{x{HHtjrjTDX]/, ܃ q.qqwD;(ͳ%= F]F]Mscdp#82aو ` xr4{,JSoMq꟟:f :w[)ս|r=\#O[$)V>/NM Fadc %x6ۃWd͌c&(h+ ѣd఻ďp~vH"FI( {κҋX6hwrBQFiuᄪkGGԕuJUx@w Z`<,*xӧ2shF>,f['σjlURL "?@8>xl<2?QB@ԙQgaڷ6l0pԺغ6K=ybƲ# ="{)9℞bN yME IHsu0$l I }l2 f˜BD_ڳ =k{(r&⟼WT@4_s?kDt=`੨N{ƭ ,uq;xLҟ` `l<> stream xڽX[o6~ϯ "/a mvam"K$7~EJdI @:<xW/n^s9PpvL FgwqkzL(biH6IUJĩ!NH1l\Pid`7d588I(FQ?cn]g'U:}tt̩>͉ƒn`?ד ` ae4ύk7K"_ D8AgoIS!fG ^f & H{<+!nB Hɰ微jysS!{>.$ ƙј7M9GXucSHp資.:zog ;K;ϒ"zI~V04`؜Az̏v).]ITj,C(Vl-͢΋qxvqۅ؟MsF{&D,*8c̘8X%j0-:64mjPٍlJWZdRMZ/-H9dw\6 ï qrѲ0.N&ۣ*mM%bDBߣצJ#^.R,>8ӣlv;"2&J:YJl}}# LH]$] Oi.qNMӨ( Ch#&Ȩi/UW)L{4Gh)!h* :CTy#=e/;Ay}!9WN=ьx)li 05v%h'A/%2ca@Ð%cq{+Sio:1~' sЧ,H(r¶FS@4Qѱ)T ?s2rrV1(N#Z!RX {%F)+JgTӵ́W6}!-pDrNTU&cۋ+cnO4YhvqErAL F,ATPyX1>jj}1b@A:I2q%xap*1 XZ,è  >R8) aB괙gpw]rGI@baW.sME_YwO>Q:O hk(W1ޜ`sam T f!.38@ %$gԐ8"l_YGU:x 12|MPQ鄲>E(] xˁ4SyNGJ(ty{Q3 M 'y6$b8%CpHk$ +j| C̃ [t 8aތHp@;s#FA!. ]ThR*:4AD/гw 6Ja*K&od34~}幙oWl"ncMCj256WprPL^ qŽw<4^؁,ŵe|Z nX71i=dU#"{VKN{ci>Stս~tU=Ew,JtΟ;R6ltV{DO wc{wכ 1$쵯oS endstream endobj 401 0 obj << /Type /ObjStm /N 100 /First 880 /Length 1938 /Filter /FlateDecode >> stream xڽZo7~_rd`HWpwiGm:R`+w߬VʖM\jvq~(䂋Ž"cbşV581r.qv%E T)`X ۤ8R0L:oSpTbB$bT:H$SrR$_>i^,o>n8exjA|,Lȁz5o\5/ݓ/f>SC*w$2>(7RE:>ļk1HغP /ݻ kZ}Lj^ez}-&/U ".@#w`,W7˫735^^쏕{/W8Ϥ;[ vjvvT GAV7`y[ln Yz[܍zЍԍ܍S7vG?QǏ;~wk~G5ɯڸrƙNnG=/ ^`[|:GG,3CkAR`8>Uyx4C1tf UQ[ ș10 8QHCn1Ka@B|  n({ )WW2 d(dC@)A "qaH 0~uSa@ef82Fѧ0Py0~SaMt^ =SJt I3gU ]h4ikhGiPc.Щ $k MșY|DGsmak.:2MX\aA={=b{ѮL˜=;vRA}_-;OW3q~/'.L7f/pmPu*Nէ>0{gXQ@X?:Xr@68G#>81 4> ,fX6(T6aWMhs<셞{FvEcQPΨ{7mcG|<^(vmwQإ-VXxбNj!;K{EA"Yɧo|uiv}4o~s~7{p|:&֬(VG1$0WF (n8ňĔP $f(@bm-Ɇ7}K[H{(V1o>@-(/^*=1'}O5kջ9ʏ@Lz>d=Qכߜ(qޥ3KJoD/{l2r7%s{Ml?L@(51j}l5[oPOpwXQIKYEVFHP#GEi d>ցX=wNw CЩsx50pIWe y8Mѫ֯f_Nޟn>}`= ^eӖ'sdd endstream endobj 520 0 obj << /Length 746 /Filter /FlateDecode >> stream xUKO0q#m$vP JDQ͚%R6 y;T<|3C er<pG[D ϑC%.p6eN}1P ǂ30يGZdB~BaK}2|L AKxXSt9m t% " `}p.r\kwL\Iv#i$b nL8#ȥ!=e-9zUɕih> stream x]o6=b1CR"%ChX,~w$%26)x;M䇳W7godLr9YM$$OnSFgo~|#d$eƀ,sZ=cbzWBx茺98=w<5e=ctzm 6:YJmv|D,/gs.ZѨۼ\7Vu!/Vصj@ gRLɈ(j$LXܬ3.ucn@G VR%InU^U66NUnd3sIcMu~;@uW%r%SUkXa!+kLɀ<6ͷC։?i3R뺃kY挑L8=VlZvl*`RWr&Sk? y$[15;W ψ֭ft.u-h:}E~(A5Gy$ g\0j:'Fef=w4 ȉ!#Fw!Dq[=42!4Kxc1|G0h̷Һεsa 3wq.@a=}7`}TKu5&e0QSYq F>I Qn.?c&=i6W;ZEVz̋%z.Tqy vS%ֵ]m6u1 .U.um%8PƌQΌ,;I9q+  ExʧnFՌ0[!}RP]oBefes\ P8'nѭ=ttlJnkl5R Ϋyaf|xaWl48T:KTl19)k"{zL;\$S^)HNQSEr|΄,pڄ".#N\ym\= @s[:>P_Kn a* AR:d=ɒDQ꧗Zy 1"J bmqS8bDy}섭#?#TfD4@ ΢'R ;&6}?%} ic<|+1xtJ SS7%ڕ WLJlK6dսnLS"WT4he$Ɇ ;ŨN}=4y6J/C#Zv&x|^DKAw*i endstream endobj 546 0 obj << /Length 1670 /Filter /FlateDecode >> stream xڵX[o6~2ICo6ۀْ'M_ËdIVaS|G8D8qfqq%XTREcĸH0ݬO:?Wq]a+z8&Hip & ?l% 'J) +ΛX`؋e"0eQ* < ]8"]RaVŧ/8Z×*b> ƻ.++$++.9`1.è`BRjVfk 8~3D0=[wU~8>6yY̤m%SRu)sHmߚzUnqe"'I}|q]NT"}t5W<,jm1+W 2^e;c)$]ǛM=|Hm $f~g\~xȪloS蜃߄Uw3k} v5M嶺:fTMG ƕ)xpFymYusIi~X U3>o~T&Meο7rt*V' EIKSQQ舎jW6ܛ.dcZܛ,++뒊8XRaQ8X5&Ua\Ot=2ك,<ʢq51@)nO= (/Rc[2r]?4[ ;Z+o)RN +d^OdKmˋ_ϛ:Snn P^Wlw1A9š_xzoMe>#X{GTÄRē+!{ ngvBzqk)?&nvVN{]}{jѴx*ˉ5m-T:IO.m{6V P:7X>YM]̟1^RSIeL‰$ p<ʪp,U/'‚DtG  ֣'Tjk Y-l&SpJh}o0Q`ǖ^& Y8Xt*9Y0ieJ[ٜB'& 7@ko|Gޠhx,V']@3 19D90'^;7ӃöP G PG: y_A4\,Ώ4 ihd::|IT (&{L(Ck)¼ATTX m'*%(+׶'P`J >2a=+ )Oѓ'2g4pHʞ@vruQPB*pݨ X&t "SrM2p;'8!NĴ&K ]^#]W*$f5G ÄAW`TH)m}0E?F]8kSŠGZaE"QW$[cYHZ5F1T@03NnȂYdXEp{$d}GvĞ~Q΂|OU)Ё2Q6.$umo˟"srJawO& I @ޗzY11=[aOl{ogCeXߘ@^xAzG~?' ]U\iUZ/g endstream endobj 558 0 obj << /Length 1474 /Filter /FlateDecode >> stream xڕWێ6}WF\wC$E hN[ LʒWw(RWӛ!̙GG?/~,n1%(DD]c(qJ6w$2۬nMYhe[bthݝZi7ٷ_zr`*Z a~ktsz8/Sde>v~7n1 |Nung 5Q^'nhtacϺtrF` Sp΂rc0nz]jL\㊸~ L N&Aa$"8:#e?h A7ZcIF=\%v[Q1,S | %*ss6,֩)jۏ? Q\|~EU,8?X$ 1(Eizs\H>}Om+ Cf̐7;Ϛ"=M8WƼy}?@Jr>Bvrl7&yJ;{>+T4s;Xԇf}ql=u-yqҕ>T/ wr9zc/Ylն+JEKqn>ַ'햻5OH"oC+[Ɋף9x{{غSa2[ (iO&t{3OϞYy`YoP`5mם7;b_+'V'^V&-P>nmPv@m5-ح ٶ _!%EGCӜW=:fMz0y^eOYEϭR0b|a5]{ l4:Yj /HRJ%蓚CV0 n+fq҃put22ږTYhEb6t76,;B禃 kL$bb薻Ph1N;!xPN!# PUӌݝOj hoU' ^%c]c 73ANLT{㽩.L~Y9a0G<9nbUB~.jfV]Jnt)$̭atP^gUGi_":NhvdBi&Axun8݅MPQ`j:TtqU*WgXp $׉J@@ύb>UĜSىp*Ʊjfk! 5 /Wa%"rmSb~\HC0g(]o`&2@<)fPV!".}ȦTjE::Br_m{1<%%㋎^z yt T endstream endobj 568 0 obj << /Length 1420 /Filter /FlateDecode >> stream xڵWYoF~ׯ $z~HS@<4V"Q%HGl( ?|;; v]pD(Tm@0F,qF&oҪg,<):Yܯ>\ބ4Iv7i ;Ov%}oNϿdexi۷&_mᆌ#B"jDt/(Dk輮[6k65lԭmQ--B|Z  ̅pcI$RGL1db KU Ĥ_9S>)W//" |e(0 QH%0"@9ǘ R4 )"]^;PG 1z?!܅50н6tDvuvqOC 3Dh`GFv]v5%Psa#Hf1,& `酘P (쿚ȑ~ۄddB$Xx;5tlf8a'^:1#=GT ǰ"lى Cnx|sm[ml 9ƦL` UÈ- H/!I']V8o6ERY G7J6Hr=7ѹʙ|h ==!Cp'eB9_-d p*"jrqO9+dsv@N{.- ^Iʲ(}+oCj1^FJȖO4 Wpꈉ"Q8?a7[B. q.?qָ cU@VDwVYeYYht#pD`4!^' {<˴h|^ R#nR>w1s=[:=3?=qVB (Ν]1U/hdqLFTvP6WMH$2!mp~kX&zaUn&BaYm;RDpRA Of^$ # ޾}[Y9)ׇxeNzySxжDΎb1W3,51ߛ}{e/k\ v^Hd{tɃ2"3ݖB "BOܘ8qNU_yRU6e]T(pvҎ-."J=<1w׬uzzY ayfEa$/C;CSM.3CG81Le$ է0ÆTuuHwo80P2 VtYЖGs{%v!NZ}?R5Ľ{Zw˅`Zz׹rYAzWp]ϫ3$}5f$Fh7,uocS endstream endobj 581 0 obj << /Length 1385 /Filter /FlateDecode >> stream xXK6Wh1oR6"Y ,sm!Jn;)[&Sp gM_f?-gW/H*]D0FHIrXzR%P!̀MGΛo1JSyZC3E98T=iӼhFGzhA4ڝ\nM`םS:m=V竘ͫQ^=MyTyuT?4VWVF't N6e`ۧu3cW.:^n?mǔCk記@ ~+`F &v;St vk%պ@Ԧ.v #qTC;pw=mo7hEo=>Wތ΄br)9šu/8@1f]p­j[yfϱ'cyaZ o8[T+/!LxZLu ؛zZ't*%нO?,C{"Ŵ}- ܈xH$!O4G4aHQ=a6Y&n'6~c|Vw>X'L~Uv+S7!kF ڤe@ ʡ?ݒ3a. bq%P g UbL i'sr[ 50]E~Ӡlw2s>ҋ+ĘCn;E}BAԉ F,1$NO?}a#_;,H~Q|0"Mhp@: m.?r-zDnfq(gPdw59*=tCˋvY]ܮC%' ܐ #AP>NJ~38mD"%2AJC_^&Q; n[;|X?tc1ζH!!Nx^{Ӎח{Yn 0'+>[wIgmU˥ Z ;%uk> stream xW[6~ϯc2C-C;-3tx% G嵼N>ʑ;ptp|v,HK*>"#eK6.Ӭiݯox$1AUVd{ +G^xMc>y9)Ni/pHǒw.#xڭ*MfI \'qoegw#+0&Ϻה &M#-/LRZu۱̧Βrg&|0wir/u[5٬) Md헉<~ͫC7.ޥ~I9Ӡ0gqDx Pq+o(1N8ryzETv=ErqMQPgiE|7mS*-OAAݮ(^۬*/AR~Wp]AES4to_mnw[ۗT"AbM̷wȷЊKmotWN~ǔT^u{21 O}S"by bWj=lU=Ec^r*9^&u'y^XޙM\ J@L iJ|\7UVzϙ?%uR+!hf}UIݏwy!h!hwԴIv^GDemvUO$ʃo)v]{$)t[PܱM W3‘mO M:!wY"g:(07D9G^#BcDhj3)HGԐ i\f;YI[_Dݾ$>r{i]I_G'{ tmo!q(:)'kێ>,fk5df|K= &lF TKsH@`f\%ݫm<CıT>fq>Dx)fϽrp:>,8fCWb+A #-A Vs0f< 똩m+ v!x&@*Xu͕G ҕb4* (RRG( Q =tOsHA1؈Ո%C4m1Dk{UJ=@;2@H\\1ӔZVmvR׆$Ν# h?BY@#嵞Z6+{71 (ƙ l>P> stream xڕVn8}W"Ûȴ}I}cˮP[v-iC%YNZĈnsadE3\ȄXj5d$RD3'f/o v*"+ʯHMf<|‚κąJRetZX)V#z5l0Lh+;6 yb^FWucho$5H iIKtޕY|^fۼ~UjJMC)N3M$W#B`^;ۿL aa<I),N=!$B]|BA%2e<eLoY@D~b1d-8Ep@Ӧ_8a(SC _cqٌJbNVCb'R†fBLzK֗cJc 痢ޯ_5߯mOώWW|{wP> stream xYo~_Gw-Z`v7pw([%$o3"%lp@>$p8f.t懻۟XXb5׋D .~MEqpȖ'&IHj@c'R~ #ٵ^)?mV`hx-S!ڸ%W%SI^gZpcUYٺ[4.0G=2/X$ܟhwn1l1#S`3xe2%psk]D":&9-4Z5QL۹@z -!)ȸN2+RTkC($kcs"(?$.fg gRB(jz t«Ϳ /]Œ 6GߗD={`v}ܯ݅*\D6muIPyJ|XJڅIS qr1G3p~q"B p%"),]LfPޟd"foTшAL2LD@$E̞vlw5irI(cG<='|?}7>G~X8_" B&hri"@< iɠBXRsA]d.bQpuU}{#0(v)L8)rMGXyww]0ȝ ʨYhmb AI&šS'yXpXOstǒΚ8B֬X-2AvGߴR 80=h M}E* deH?F":҆Xk:`_x4ir5>kZc]k gcEvq¬IISvyo-J]wYygY!urwNMkO};*m]mZFdr ϗ8φl/NZ@уo`G\ !D@ 5\KD+gHsLoLĚz+,)P D,D" zr٧ա{xx(Sh9Gll$\|jBIfwsoP^a˅W12yz_s1;lcQA5 fD>Mmd(vu-ECa:e=TGW[vJCbz`m r|SbAHwKK٠#^O7U:~\ ͛7]K#۫_غDp_=g#x[3;St#;wF4~ voeيJ ar]4A\ي>`dȮp>;:}B endstream endobj 530 0 obj << /Type /ObjStm /N 100 /First 874 /Length 1772 /Filter /FlateDecode >> stream xY]O[9}ϯcxP%Zn%jH!hg. $!)$z|=ϙ;B8# yo8#d%xG]%&mk;I#>X11jw6$hS6١J)96Dz0/Pp# /%+R1tD+䌧H$`R#3H=gD*0!AUaV'"30͠6c(F] LE%E%0(UkKV_eU¹X]N^26!T-`C@@zI- j}CHJm&)c;_eL#$^EtA`ӉUnJ % wx&|QZ#bKV-A\TSqE{Y?E&N$D]'&29/!<;:GlBttTi>N)LFaC2JHJNqP39%r$e-+ &̓rAU#yېচ+-B$cv=~;Csp>鏆I;{q9|k/?٫7.=]5ɸF?q:cb̦L8fk|zxII 9=Y2}i6 Pl~#LǏzDi`\28E?%h kCۂ#8דzoMr O HXa#i`5RV̌˕#kbZK 'X i:.ޭցQ&lvSi]>]B ? 6 q 4T|W_Oy죴[DT;!bX ku`_6̺e}xJ ϤUth?lD+dejQCH@õcYSf{wJ3FvwzT Ll*u&?q[[.֯ |:ϝ-W/7j?4n 9*UAe,$vhMJ0'Zԁ}6) b]>c? endstream endobj 626 0 obj << /Length 1747 /Filter /FlateDecode >> stream xX[o6~ϯ$c5ûtЮm+&m1(2c -Ou~ɒ8mb|ppt~O:"#eH"z5{`I&(0NfQ4K]u[?~'7I&|}=~JUtkE'%.N~MG:`$9P&Zt]lN(-UtV~.52)bJ:۞&m46hN1 ]K5X.٧yf'4oj\nQ]1k膥:+"'J}?zu7ӤRpWT,aL:r^Qh$HA琄T'1vk7oulD?(?fFpɿØmYBW+,ڕ{(+q Di:'Nde,:.'+-f9Hܺ͋˦I5Tx™AQ'&`B.BSy+j]$uG7 nC/W1.W4<-{J,WzFD|[̨?(TTk|Xeb>o/+$RD~m!YrBc4eU96_z6G͹ 2NT_CTnu򕛁Y%F8OnHN^C(s 'B"ȼ͊"};(ǵΫ5a-ehWfr+OQwҰac?}G*Un6_鲬VY=Yyz,Ôst?t?)MBSz>ǰ/k7.?Τp]HњJʽ>\W["p(".9O1 al}Ю|:,0gYc|{='fO$%C~"c{ h6ƯVwZϘ ѵFKsa ecfCKi9Ӂq$YjmCt3_b˘D E mM `$+Z{x1 RP謎~/:r7tWG(|YRdҔkI%/'eS9!=nsvv <0 endstream endobj 638 0 obj << /Length 1614 /Filter /FlateDecode >> stream xXI6WV9%͡E&A h'-0LBdˑL}ɒLO&9`&o+hƈq#hXE7U^7l.f߯^wcYZ VhxNc\߫ݡ+*Ò[}LI\`ߢ?뻽t^Ǫ)JAL)qH{6s"O=6g`ۣ)FBwYS(ƭ>a9AƠaj_'wM4}bUZWLyo?LLؙ&R,tp1CP@!s4 $ŠZV KWZ{ihZMe\f-[(uw(e i MEQn]wyBϕZDzrpҹÆS3MӰiN5[پ;TD]LCJ_?F@Ft]%Kw eO "0!ٺw @uw!{Vj*wnB#hfG&B1(bQ(Kwǐv ܡ"d!"20f'Y]ݵKyaHفK/c-N &W$Xљ,"zK8CY5vdJ'y@)]@|}Guʯ&:h OƼ=/閜"$E?xEs&aAD8b9ƕsΌA)mBW?1odn`Q9XOMSt(9a("7@O `ْ B3 -E0DƧ[X} Jaf`qҩ$X;i-! .#$pr="`E|/8PQ̘#@?`O`"S˼B˪{YiZ_p)Px|s)M+je t #5PN6ub!SA8 v*$p=Gq ]NO'9J"b@^Ni'(x ^ Tr̺Pu50 AB? Z1"Ь:ǰNu!yu\a m MVz^2' endstream endobj 649 0 obj << /Length 1001 /Filter /FlateDecode >> stream xVMs6WḦ́0>ԇV[99LA2G$q @ ʹg X}X>,>,~^-.nB%(h60Dg[{Fat&n|ZrAUK[/TSe>[ z@mqGȘX݋ňļ7JF4$ɭC(e@9'e&jd1s3-UU# 1"{G/mɖuYeKYݴezs58b!@<郹,v`>@OF9 Ϙb'iq؛\}G,?|]&f}2SY ,!̄q r@1pVTc $a Q,f8Ƥ!H3;{<]ؙz&??nv)a&rs ʉ9Cb<8S$Qi@Yh8H MPWS}^'*|^F"*2mZY]^/wV,ϐ7hン 8O'h I+3zw'a隵::=Nv?2;h_?HzuPXX/,5?wYV,S]S_KUoڪٛvu{8-*U%^S<>@ JL.TYgͷ==:vrEO'm %uMܝi?#GI#arCWUUvϳuQw|Ճyf2[5!ӯKRq7j^?H*E1 Tty^PG3\rUܗYf~'$x{>w5#:luo]+s HJ endstream endobj 661 0 obj << /Length 2083 /Filter /FlateDecode >> stream xڽY[o~Уݡy'dv@ti A!kdj]&#z>$jd>";DhHO4Jп6 1L\kx "d,I֍92*;P."TPDLB!M.u|gi\{ Ț͏7Dέ[7Vv탢v+j88gߒ5n>59݂Z|P Qe( 3 9CAאBzwk+GL5]wR7Ԯ!'Au+GZ\_QUt>/f!纯8ڤJNRs7z@tK"^JMB.׮.rPޟx[ C8>l1rbN-%ɥp&Jy6K}jI2{8nOY>(a9*b1K!- qӂ(.}e6 ZKN!EKVMbݶ'C ^rm-f.6mwI}8ͱ 3- (-Vz>#c)C=PJ?A9:;m ??CNn r3#=^WKtd+% L7y{&#Pi$t e IMyjD(D/rG% LQk ʝW~"H)#,ʪ~jBْVT[Q\=:RJ[yts$ȑN`d`gbę!{ ~JB#VS,V!}B!H{8nx>g%sd  U؀fg e(Z<15[8I y+)=0}j{C5ѽ<"6g`GL4hjj # r6zy(-zN6A\hL^\97P I]_zuQ N>6@4 W8ի3={.',ɞA!^T6ADF`myR1=̛̝IJI8^K>@Po[?,MR޽ՅZN43iL/Qj֥N+6W0}kP1̈́\DGeHB7t*(i)F 'sPc;6U*$ җH=Q| < K@H]0 ~^006-#_ q3}q B@I,l$lOT]!N-u ڭYK8<ۧ_d>C|S@+_:Hocde ^U!dT9d9[Rb[t +#"̄y`637%f"AN@B{gc5LΥ/K,[S@zE^)FȡlA_Gvi|paɻOf]Qkl.KM8)߭ X㯷wGL$+=L`&f_@addMB0{c2L2Bڰ4>]?y۾Dt:vM[|:?(C򺅦qFOX!e8߭if\/rn`<*دd endstream endobj 670 0 obj << /Length 830 /Filter /FlateDecode >> stream xڽVKo0 Whzth֦(D 8v&;i_?ڲSqV@AH~G-AF_&3G>d(! >341nrqt&yKSC&N3>N]AKMuz\gNX%}'_# K[&0gV;px( CBR]~e Q}AQ@y؉{B;ްs( \I(+*;٢s6N6U8vtnˈ),&Ə$c%Ha|~ GC:c7U1q+31Q{~4՗uJsK$)7>$e:i| ҵ}/SR,7+flGNK>5õ/UqY7<=ډG%"j]9zG`j>NI#lClkLw#8&rg.F۳hwPY#k|#ˤԝ8<Da;*dS~~`^)%ܾjż,qJ IQ*44#jRNiX:}}{9IM /?؃ w6_Yh623) ;ۀt}4ɳ!2K,/YQ'4>-ϗeVD篧J| -wz#?{VDJeC&d}}A`WZͻ}VX3^Y3Sԩy]};usALe'gqci\0.@Ϭ endstream endobj 685 0 obj << /Length 712 /Filter /FlateDecode >> stream xڽUN0+LǏ!TŁ.86}؊ 9'88حYj qDٶUD+; 6H(eEb8^DҮxͻan> stream xVKo0 D!U(Q{hBNiyY m3aY6Tq0fňŗpqxx(gm# +tc0Ǽ /O]{$ RɬKt Q%CDJiApJ}l-A+8@pW'!R96jccovQ(`P$_˪wh:P߲p!Asq1IQͳ`|Udhv?p|p3컼GUQӤ!#^psՈe*o4)1a׵嬡:-l~dR(%ў{[ ;̛Uo S&tRT" Mۺ鋿ԔP*:iڤ!9OX4Zi$Dʺ`-ˢj O@neivoBh\+UӺStx ;qO) ё.e:/O~Lew6|}i5>Yj"%R@=je3-0z(3CBܷ}n$ra> stream xڵWmo6_P|:6t@X4dŕd;,t@u;>w{;{?-~\/Vׂy1%##ƥJz۬i̫<4_h줩" lm.zyScʶ?W]PD"$cij3>4(h!BD=( <;[f<Q^1IHFV)MT?y~5˛IWW] }a"z1\wIC^$Z(|p'wS^Kyֻ lp__G i^wq _Kbl3]Zy2U嬍x8Tu5*U:{(A1ly&O/^-nw ۇ~I1),_ ?x^|Y.i@7({m#F,N8 ֹwbu6% Er ]QBǧ)p@뽭UY|U&?YUp]xJ̿ݵ&+c!DUMZg%sM6yVd#pȫ4^ iҤdșWY1"[ܷmo* zD@Zϙ9y:UmnX/7`~Zwy^yJ 6jf+J%ԿHl2LulӪPbH*7{, y6(2~0"c GӍ]%\PP(9@Oa `/) ^ӹa&GcCՒ1b0.mbaxD &` 6?cXD⩇烓DL2O{U+E4jwz"1eўH|#դn3|mCsP(08{eqzr OU>o4uB_vͲi,")d3)0~" *a * uۿ&۬ܙv׫mvlHoD@]8fpm߶j:CE֦{)˷YY$W/ s;\|eͩDnkE lq@Cf]fʟi2tsɟ˙ P|er:9"E|Vt&H!36B2Kf20Gm^1<|C8ԋ7ნqDp|r*Z" º9\ݘg ;FDn \TN}k}>|xpIH X߸VzJi^5CݢzipĮc t~oIfM endstream endobj 633 0 obj << /Type /ObjStm /N 100 /First 876 /Length 1765 /Filter /FlateDecode >> stream xYn7}W1yr8!ri-8)"P[ $Mg֗ZTӑ˃p8g՘\p\Eqe'3Dg)9e˂n.W9iLGWW#R{>9%C1$Q!$0h0lRсJDeD :TIZ.P\ L .r@Of5 241dcHjB )%Rdl¤mV:*Qs$.jC(!OA@Qhõ{u a3k#b}) e0 )ʥ£u)j).`*$XDbC1%Y\!̎pXhCRtP̭Z/Yp}R&.6 ԔBĥ!+\mffB/1LfX ˡںaYMQaSLY 2%{$jFbbhjRHb!k( ?>]vq<^̦ti bv>=uz;  p<!_P tG=!)}qzK?p޹u/ݣ_dXSa$5_ϫV{6ZNf]=\~^<Ͽ~gti8>9=Ygv'ix#Q*-P >Սpxxv6]N|鎴ꇮ]-v3{MmX H{R1{t&I^ڼzYB6T{ѱꎥpفVDeI ]pb/A@dI\v0tJ,{ٽFƈ3䜗\~ey;NF\֒}Z27ɧ1zE![t yu{Mbj><]q3&. ~k8pci>867XDz%ihm|r+ѯ"uy!yvWEg4TGE6'~}zn[5m˨7wM`l53*f 9V~+9VvY[b+/;*DeGiC'r{ Ŗfxl {'õRX7G,e`EF(54ZLFHyXҦ l,j4#V;5bS!`.i…weNzX sR`*HEp_W¯gg-NޛkHEu`460GO%%Wans ~ӋXV_sk|׼5ߕRP!cgd8UC&`E .D`IRmTp͍f$Ro8ID Jb3H[e'SMKpZHk0־\\>^D숽tM) ip>Y~:[=fS.C? }lKBFN~y/dl|}6%] NP֚Vw7#T r#Xb5=`/[kXgF0|uO` endstream endobj 726 0 obj << /Length 1361 /Filter /FlateDecode >> stream xڭXnF}W죄J_iA Rph.HwhW\ Z6%pfvfYB.F[? JB!ݎ]\K!XP0 ,/YV~KNqgLU5ldH+_AT\cCMBP!$6@#榼/҈cEe }"vM)L:Ö kX4J G3 N!~Bxwg>1ItEէODGFKV4e29Sm*CwA[)FLqlh01 Ehd/̱0tu( ϭjl+*cu5'=ec+ؑ[UE|uT҉ 8UB!X@%|֠' ʱԥ5Y1ƹVj fVb%)bX꥾32>1iT5Kn$Qmnn`lSuI.~YL7oL[;at]vN+ZsàTK,in}w\XlqӛZJвc˿nVBݵQ:BKIi@HB   ď!~JETD()ecv[)%S}&UmlW}Մq?7ark3\Ÿar Z;*I7`C@oRDL"k*1*ggFv Xݻvh-^E`jGG cScʮs>40ƛuJՋ z= /Ȥ.,zyI&V %4y6~Y|s _퇷qӦ")lbDMsy\yO4Ns Rᶼ6IB?Ϫkssa[I?ݑS /5eeMՍ|.MTLٔ6 &m`Aܤɶ4R1CKKM%ܗþY\uN%<닣^_NA>^EX>X}Яd[dE㧃Ɓr"@8[mй %(B-F TMIFֻr2]j רYqۋB 4_i~:r{S$M]'|׭0n9-H!V$R/CQ2 endstream endobj 740 0 obj << /Length 1933 /Filter /FlateDecode >> stream xڵXmo6_23|EvsE!J,,7CR/ig\apFO~}"d1b!U*JM[)y.?^U2j`iy$W Z5]:%O]ahtuz=h51MRYF\TTL^χmXǻ"MhլlC+%j>r4>R4~ޖ뭥([o rA%c$Kd޶ͺ̻bX?Z\.N`7 ;3K/PnUI</ՖLJiԜzp3,7ł' : K13?RS f+k47mٕF< ~f93iQK/Yh‘"mB<-X<٢`Pcnu5u"d}wK  MhhdBKҴwax}lA?v>k,=iӢצL\L`(135jz \0A>6W_\Uے)Q,~]h \ Zԛ1ߔ}mI- F $QIt:xufn-S$n..%Z%@By[vȮVo *m +^5<C9NFЁI(I3W!PB`v\sMO(;?r KC`΁J̲7W]B*E~9Tkاf0>dJd!5a0S7 d ''J⒵ LY|އ2Yɸ+e2à02de{u*rLks/ $C : Iɡo& "%c>F l&K\< qx]Nm:5M_5vf O}3kYJ K'3,@M4(^G؇tf5séN& 'x$Re>w`W5<:儉Y@^!\D3=>g (On3¤E4%u6S^f֓ ?!H"2x!CW5Н%BhYƞ=J׳}9HVεncvxBcɆv  'hd$%tQr~k3 }kDjas30}e<.3Y}PIDKy([/0SQp ]H撥4L hqCWqwc'+δgR:Q)INyUϡa^ڌ#_6og^:c oNwi^S_2={Q |aOCPBu !N!~w#g* g9 <@{jbyFbDo2"68~֜}> stream xڕU]o0}ϯD*m=tj:i&mR\p% llhpH䂹{|@/ίD0x0B((K@% W+t).ue'mm+Ly/q~%ӄ2z*q=s\yH~r|UUS\܍xu*ɭ6Ֆ?>G/[px7uf|;Y(hۚ-+S(h A&rSntp+i`wpBjx<Ε(I@ e2F!x] a O 6׋ mۋGvO<08! * ?xWlÊnq~J~ӗ\%oFcy!kQcC?5GIzӲϦXpeYՒ a=uѬ0Zf;= (b:ɺ 9f+]Ggy{[D;+nc"geBͅ.],*OO7ee',W_|[.L,>cɓ:q@Tׯsޖv }{mO2Mm*u0 J{>ϬL~ag0:n/[1Gƍ?w1Ix\vDnB07=E!^ endstream endobj 761 0 obj << /Length 1571 /Filter /FlateDecode >> stream xڵX]o6}ϯУ IQb]-[3l@[ BIrKdmA@"/88כ˗q$7E, 8:x}o^_d`  T)mq}s.:e'I?` /_J`#`E/w @0J R"G/h-EfZ,iCikȼm6MZPZl 4~LL$IPJR fMۏ"m@x$%}SM^GD4Ag+7jشB1jcГ28<|Zm} ~ez"7Wb7y,7{P{΂sH&@xÇdߪ啛UCN#n%`O7z㷴+ V2sَdNԂ'0$Mɲ}2q . $9lCP̋y B:Ѕ#r|Ŭ0o[iT#SpU{ܺmG5逘pEW:Id*ܙimaa6|WeW|6~S>K`C> stream xڭXmo6_`f5wQ[nm:hBOeye;,JcEw{y8X8x9fA"Ieps2%Ff|2gMΨ돸H*͋,hl͂M4Ӫ4ԿI5V(b6gOoHfs*4ӓQ~g&ҙ,Q;DQȔP  0R VAWXa1y>j!F®9d  A4kg]&>!RDE͢<#IĉQǣj<99 &VvAXEc*9%&_ ۀ#ӼF EԳDhZͻKr m>8ˑ <1uT5%̵ۧUߨT7v[0 TZTYYAV *bZ̈mI-eiFwJs;w(Cw#-A8"(.z.9xh=İ!S!UFx0eN﯇)kQ+N 0TbQ@W!]'Wnxlqa:@H ka`$ۡJ65Myq3{Bj#@s&m.]fpJ0Z4 x a N~3u%:윺N\2rgl饚xx?X#)y @a7;9b<w=5he_b^Rʟ/|to<1/vE~Hn@]kG3ߊ+ƵX_)k?">* D}oծ+'so> xN Lܼ~za$ɪ4ûFV=r,]^TKȵnwom stlg&^ڕyQvD@i 6lzzk %3p$1w?y^R8weOFcAC͝90׶smK9 V\5]t>\վrdH* ޕ:9RFm,5W;m:74^Wv~ í+/{p6ި=3amr7t5`HɲʋQ/gz|r0Nu /$eEwcMBֻ endstream endobj 788 0 obj << /Length 1494 /Filter /FlateDecode >> stream xڥWn8}WQbEEm\iQ2mKwɖ,p89sΡ7}9 IB:(%:QHIqgq܍W4jZeD6cޏ|HIZPdF dVx#3[n/x@Y]v?Gb¡~ֺv0Pb1ߨMfXS;mP~B|!{C BKwb3_xZVG+oF=JF~GE4o̍i2+ ߕY.rTGտ i -%I 'qIQލ9nr\Kru{8UsVq' Y:9G@pg$/Z5?;xٻpgtnL9HNEkSu;fV9z~#D^u6ΣAB3v,5  ~ }AL(F$y U C5$1C0fQ}ufMSJ眺m_ tE/G$+aQuZCNL~ӡ>/ɗybVС*.1j m.yZ`R ޕF1>P(类-+$nkYrkںMSY632pYq9ȪƀMQ\6.tQ^@l$h^;kqFc@ *sluUi4C 4rcihCcƂ$BaX Ԥ6Zf JMOheO+hčiij'jLz d <>W( Fl^4!$"_BSӰF H9Oyq=CcU}K-f/VҊhfXVBoSD Ȃ2^#j6aq R.U_)Q(d!k3q"ċ.x&9sV /VTax>I|DcZ`[k9q8 l:ЋPe_Rj` u+9f#4{4{2oS]e#.ضdm4z<#jҽ̲rL {"[>G+s+so{ǽ싡ml*! J`0fabX*G"޲1blQNXtf=PG}-Qq D ZbSfE `m!'-t?HI/r3H>p  /aQ{8-#[ !}vTS$سAy眍Auk>|&zF=fIpxd>yz'XRխ3 K`O%c7Y_~a` endstream endobj 802 0 obj << /Length 1240 /Filter /FlateDecode >> stream xڥWnF}W#Z_i(V BRNگ,)%( x\ΙFOF[?!JB! =DM$$ zUZ՟o 7fLI&z0M+A RjnZw1 ,hecv>2p X 4(&wµ}m$3ĜHn41)&wv{l" 808KP sjYi/ĸƆI/!8fHS6 Kj` &d ƌjAJ:kZs96[[16X+B唚XċtLgL(=ey ?sM =-:AΌ}<& f2c>=Ԁ&z^kGB /؀E9Pd ѣf5tOZ܎w\IctA5@尊cX{. xenSo.-Yxwt@(;QC7L(;eF].K\=u!N{. xRUU=) _wn锒KZBS)#@S-jt nSś5E5+`U\di]'+<j|,:ɓcgx=2^2`I՘H,TesL0(Wt 'MXRX&* ~Kb zRxSEs`BII C.ɥB~ \iQE{ȦXvgGW eda{xwXTQd4mv8Vu\W77lj4]/ fb޳oq$ՈS&^ʔػc}q3IHj֮Л/wJMZBUٖŢ ]񀫴{u-Ԩ}~φ2/~J$W='_.=ʺҋrn-zH, ½{WL':H1"W?~tz:"DN7AGcb0 {0BDž/c9TLHpv1S@fKk"yP{hO ]?~ T |R_~ߒjY۽cHN}/6ɣoG%S+@>}rT:b,P endstream endobj 813 0 obj << /Length 2406 /Filter /FlateDecode >> stream xڽYm ȗzгV/$i-RA{mK31br{KJhvkZ)ȇ tۻD)I%C(%"$p_ݿuSe?j$( HrC@+g#9fhWk;i&,2bSaUz﹜JTACxL*ώ8b4e˺>oɪT`a=)NWi>R]mmg-_ 1#H滐M$hߚ };KOWۼ$IHV&Z4r6&In*{a.;foi7< ?u2kc1 |>3 }xMfS]a٥}+@QYV᧺'tҺ/,~ʼnܾe;Q7,Ik?<<48x2;3~@YSd]nQē6nTFIh"ӑk4#CFm߸QWA䢜zi.f >kܸsv+hxfv}s;Y.ϜR 꾵ԅbעcYY~߷..BEq66eQHP7y.o;dVnςd8l |R~Aoܖ| c;=Lvlf/1'0ok(e%SPrLKSMegh}r;*8e{T) ,Hx԰ 84Pshmx:oos#aX0M]  V¯覀?KO`wK.; ko?NN7$ Txc{_;#7%œQTUjeq]bUA;ae9@ɧV=0FC% HYQ +{܀S ,k Z4]淯8 XѾޙO4= @47|)b՘]]wj_ޞ~}$%C^SQ s0*dP-E1@FY8փP8ҥءKWTvH T1VDIX'$S?z#D1n}Q` *!1JtN/Ng1P_7G 3 )m'RPJDj)>7߼aYu <4W_ho& ep *7,;ȉXOvB-nHNT_B^0@pe2GLmKXDc˜Ӝ+0L+aء>&V 6:^z!Ƅ }jMmFcsI;4 Tif5IBv|WPdBeއY3aw `\\X_i[gKsc ;%2lU6bw瀢J_I9 0t/`Z2c"]48YG ". I NqJJp` .#c sI*~4!.A#48D@t)xY)5C${ DCs2Bg d%R~ &g!?!v^8Z%$l,;lN:u}#?dx;cW֭iaľںC|H}t4U:ہ`t޸tHA`-Aw$%3ܒW01 بDuwEoc 278@+@V< x QV~b50c6lj7]^h\6ky\+4uq+;U\ԙP~d]Թk=\m|~^>3$jR~#џ=!a4pwʞ`h~d}PˈDz/5W~Zs魟*qJ8j\Wͤ\?{,Uu?Ξ$Z`2hSd_LKy:;{scLWK2ຶ$lm(Gu7mܺ~ş֮j endstream endobj 822 0 obj << /Length 710 /Filter /FlateDecode >> stream xڽUN0+LW BJxEɘdBPv$ԇf}}dY2b$ABCJ@.eEmuPd ~֢Fcd3Qo|=%ʺ$pi\*V 7>Gr?fU7ӟ̀ͳRL֨cMJ% Jc&/t>06N4R{7m w7ݦ_č9gScxHYy0Ó% p;lrRt}5ۅ=rEPms8VGu( Sk4-rFwNE=%$9*!u-!À2FvՇ88۪JG^\xzaNoAnj0=5 0!Ei׏c3R^W𤾆F*|r\ {*`clSo<ܬ/eսnRKi`ncz}}}E]M =* {mn/.Үx0je]pJ85!d>Pf8b78v,4~> stream xYo7 ~_E'J(vEP$-ȃx19Lj$NH~$&`|Jx.&3dB)1Fj/>uQ]L ypDiҙ#"A#Zփz!O494͏4w,y7޵n6ya6GG26Ĥ^i# Ϗ7aɹ WxKևde`*_Vo6ك4s$ YQ<;fw }lg`PAr=z@7rGkp) n>,^4ߺyѭQ"-)zi35ҏ.@{ M*le$ЫgWry9f9l~9x}/{MS{6&yy=I{֜6٧ }qO>Ty.|C쵿*LR !Iׁ&ڭ2mڭ0bC ʮ!!W~;r5rEy C9XMf6P &J82"pEaX R0OuDkne˘W17?gJNO`4N8X{`@q\ranMEnRar^ć4wv9'&1zVNb#orqcKpJ|%8J0C':0|5Pp.` +&ek-J-\km2~j$Y[FR;818W,(,\4WtHrs^ONOIkԔކw'!gRÂĪ.m&NZ{{'g_eYQf[8կV$}UH!RfZG.PT~ >@wQ{%ƶamO BraP)Hd(ɒ~lG?OO_ݟ1+,K\r:vEvWM\Rwc:L+4x̜)h;R]~y{[6|ÁgےV嬇`5ajU|V?USy}( NuUX(H\-/!면oKmKkJzIw1^*GOM|,!J0 b8~=Uu<ȓR endstream endobj 835 0 obj << /Length 681 /Filter /FlateDecode >> stream xڽTn@}W죍v] "!@ OBNKSҿgoNl7ZDyZ{=̜A`|gWUB0!HYD ̖&\ms5"ngήPHi)7!Yή4Ա 31D,w?G&(Lcã9b҅oR[$A#Yp` D$%Dp([TIۆa-Q\N}Ӣ}& r( OH0k"*,c#M BKZUtrjEwA`v&VvN'ަ 6Otf?~9F/P(dMQڔ՞tMGHI?s7BpO;"ôdjX@<|SǞ=A\h/i.$Vl?^7w;) {hwTDxg"J;_>`;}H02{޼ݦφ_oGc|v@ znq4)P"I$׊PQ_N;엷Hkn/.D0 7ILz.Ջ11L~Ze'-?K|wz0EW7cs2"]n*Xfiƒ<2I endstream endobj 848 0 obj << /Length 1373 /Filter /FlateDecode >> stream xڥWI6W79IMCIdIPm őL(Rf2I->ѯnWbK*]D0FHIm_2x`J& )HҬi=VYT%^`/˿6ftop7^ )QgSUe㶓ڸŮ6Nl/Ԙԭ}*5[nS6*B('KIbͶ* SofqڞMZnRG5oi%{g\_]yuH>S\yOt^HiD#yEgS `6DKHwt0'ۃ_I{p+=W7Le5n)AOVN*0eE+/^nZ?BzkE+ z8uRU֟ΒMnzz´`\'m# ~<)6iЊQĔ^,C?`*L yF ֎x6XsZHqV{XU,^|b  2`O3X!o)I$e 8CM~2fƐb5NV1^nM Y2gH*9M"zYmMg헾4hNK |DRc$/ )Jwk&@{<h80G UĤqј>+9W,%8A0'e`؎}$b$t oLC* 8,1^oviA>G"$PEL Tq! bwEDwj[&qTX Hcg|B+YBpX4Trh+b[hC G{ \ƠԷw5Ǖˢ ,0&_ a8eq"{>&4XsI&-ĺFXT64Wc_9K925gt&abc:R \kЏ?Ǯc&P1qCoYG>Zkee'ja{SN+~E` As~)-;W&1}V1(=|j3Asœ%"5"STle2s(U aӣ dX_. > stream xU]o0}WlNkV5SWEnR$C$$s@ 8wO( L(`0B0)`( `"ͬ*3)b^ܕ#WRR/ry+e nzyt^0d L8Ȇ?:OVMdB0_:w,%P[I tKb=ov򔈁< 1`A2ր*YI|!ʋkP 4Q{!/͢=ܶJxoh|$A]"ni:r^g:8Jk j4>wZ0HH@u\QW Eh)x#+aیC; OzR| 5QlQH?R4J@1t &1_U)&Hdκ](YcVڂ筐=Z[Ue݈[> .FڸIh >$偢ƠNAz{|޴>>2}z}L?N^ogr!?CS ]SeL Aޖ@ѾaϨ"S9Q? 0fl3bP]ZqlӼeBԼYЁ``q! /#{rP~seN #Sˋ&Ǣd[ 2{Ad7﹅ 4Y~V%(!ZŌƟZ5G/d endstream endobj 872 0 obj << /Length 1048 /Filter /FlateDecode >> stream xV[o6~J@E˰=dh3`:q4(dr"EԻ`Rwwj}Mc'EiDfQH#'0bal n#ɐ(S2gٝ ^w^|nzչ@U4{?բj=99>`I(eSzwMOT,j[7m&P0f{lބ$#LxV+${V\Y9%R~MnkB{Zq.ϧOoE]]nVb`ɫv \s֒C A1 a^:7_ٚe\D8a"3E q@:##fsn{SjVlN4aX-t0Lc7\8硛MmK~1~,`w,gBO]*vvR}Ӽ5Z1>gö6vMk&T s 3۷E3{}' R_-.G(@μa'dW+DLt#v[lT?EǐG{mA\(-okk|ib֣̅, C#BU7eT=T-Q"6jDؔ k[^,ww(k F9~74c SrxjsK)̥ M+UP]h}->aYzvrvhyTǥѠA1f/GB^AA%h&)T7Gfj?A%F͊&ivK=ˢXv  dCu>ܡxG/搵Y{6:Y~J~m;4:KC/P-r+ zܟ+/L endstream endobj 885 0 obj << /Length 1868 /Filter /FlateDecode >> stream xڭXm6o5볍sM*JR]ݦD(,l;~{Hj؞yUz)Jc{G0F!$( ʽw~^Ƕ;>voX2za*x*l<]٧" `K8ȀEZ[?/z~{z4c+m'by_n_ݻ^/^AqPa?VΎQ,#KsǡrF=pj3sx_Ev*ݡ JX{4P@;Ƴ|tQYʙ`/IЍ”{wZt1nB/şg] 4B (X(J@N)J9jճvb]-3(܉RROu&T\-L6$ϚqHD"∘maEa`XrOo[`:Si@-v ±nNLf7o_y,G>ol;$qb,4hhjXrq4X1r.4RAmչBJ8c!vy!b; sL:88$=[(ʱ0o4@ a1য়%Z.;4 ,TEt~3k',Fs΃ J3YmuflY#XLq!S^(B*th \D 8EfKTC(ɿBB/Iٯ +uk|5RtsP3$W6JV_QG.[@;8'=+ a'5Jƿ?Ε1mM&I_uE To)= Mˁ[*ޮ59Thes(n YpU5238agV}gAM ^ƪk$<*{cQQ©AmS6`C!+EPS|>} lɶ+mҶY&6MgAC9jb%IU Gke#uי.lFVC} vy{{(I]o//.6X$J(/@wE#υ56(/ G*b\t Sњ9KD;FTyت"SGԒ9uWJ`Eˍ\5"AqRAxyU nC6t,V/ .k?7P̏Q+c@_\1ve$G!cF?MK ? 8lu ۛwlw$:[(ǘVkpXUEI(sհd3"z۫zb+28)M}M"hITʤOpRl\-t@ endstream endobj 901 0 obj << /Length 1363 /Filter /FlateDecode >> stream xڕW]oJ}V~jRҕmݧ&"6lp' (R0Ξ9sf_gf\! %j2P#h/V߮,R~*MRy,? u>]Fa,XrB&GLUj{FCa$" l}z^~ P1)k7YWLiwY垭BJ1vhܱڄ0hI9"X˟&4Tyu bmq2hY+PʭVA$elҷR(ump ô(BlFTpH1e-Dϳ99$vӛ8-s.,'A[”pk"AZ7DC(-(x*^ۼ !|2@B Q᳷$ նH+%eQG\4D. )$Tݺ2eKBRuѣ3[ICiSEq&&B)Sэ+i!00XVvLN$@tBZ_!Bradr6~5,krq() SƲK p+X &\xA%jTO}RXSP~TDx0i ;x$r5q 8 Œ ufYO<\+ɀCx!0+:!@3od3`0(1 EYKA{t$ Ф :BOERHrǝhw"!]P~z1 iՒfp>!sO {~K3ɛ 4KviUghd0~og{YWpEɕ}x<_+ldw]1c֔C(1*>xۼDyxȮZC[]yT6v.d>p}gO1wC~lfǀ}297hգ: !  bRCeFcY_t;ϿMuVf0' vzM}ǬMݚQe8 F#أ8 Q&I_b'f>3:/,0-9-4P}(N&w`υ9e殍ʠyȌsb'Eèu_-7R"\ҎYY-.ł:$d.Ml)3l[WؒF4Vh=A/ endstream endobj 911 0 obj << /Length 1002 /Filter /FlateDecode >> stream xڭV[6~_f}HM>DJ:yڮ*gTnk (>`ɬZfmssa's~ %gQ2'bwsyɾ:Nϗohb 5yH[!@&;K#ufX~ϋv7"Jbа? (v20Gţڼ7~gZ, Q Hq{E!J[ Ojn3Wb}DeΫE6ISJr)Ԇid5<=v?FSԕ9R hQJPB<2|~Vxg/ZdpX*2|hB /B\(1"`(Rh՞3jY9z2F.{02P;q@=HS3>֐BfG_َ {& 2M־Pz p3?B,1:Wqć%d^t|&@<7w$c1 JCN w )K'j=h` CBz= sϸ}dGk Mrx qǘ^5c9jf<`#ڃ̛Q7gRŃ3C5C3q{XoCs/⩨ieGu},.nոW#zeB?T+m!b(cTMb`([<꺫j"f*pfҮ+ U5hVS^sEU1_GYWbJBc4À0讼v-N~бo*3ۼBllܡL0Cy=Ha<r$3HOͤюV5tF?ph;=~5tS%. +?ܭ ^Ɓh > stream xXnF}W~kHhi#$J&J*EC$%R+nyggr̬ppɷ'/ 2r2R#ht9F/:!؞$ *q2tS(WVg5InMn߄*iw:+* KVILƣ'NО֬MW7qH$dc_yO2_ 泸J֖3Wu5ړ9{r~gkqeW<+ns O,P1]vo%˓?OGŇdMGsx B։"NRs>Du DbaM2PGoUV1ţmX!McE%E%4@_+od3+u{|?,ѐ Wx%>[%HMLzk?SlaJbc{<+UWA{p3c*F l|u]R0= V @@ /^nmZi-cUin/&qUu=ASkwc?ϓ*NpbgG}{V7iVym<7iuW7kWIF*P/"ğX8J ɬX&xKI7i^pcoP\ɗjki%,Tzsq~c Y$ˊol:.qv>9š7}Ùpj&¿IʤUS hД$K1 ǼnQd5D!g5 ʃvoku`Bm`~yUM꠨YܧL--!?" altb1|-4 n:)%L%щkBFYxFᅗEkUhnlK5*" V:dP+ 5ǽL<`6fO1a/HZ7|h@ C=ٚNy"@%q&q B '-C=6%OkZ K ~*$%!,lC@R ܗ-'%9J &uVH -ȴa;v6Ve&}K":HO?Jvh- )=*47u_`?mh{,]&㋀@LfO`ubVȷ2C*ᝨuP{`Ʉ2J U9ˈ.۩$ċ~`X0v`n_7A{$v^ħs%&-5\4Q41 gq4-!Hq(`:1գ[t+ &L!mW >{u`%CaxI Uw3 P'Q{t۴v}#Fs}9#}nӈA`UxG]CapLb3Af*<"BJ'd.QBIK{4[3XԈKL/0E)L^b)gI te/aBZe() `=J %*EPi1# 0`@ .;}3L?ێhuPDa2=dxy"a#7z;Q18Y3鮐lja*)i du^`ӏIgT!aǧ*D^0vZmh7hM 5RTW@ߣ54 endstream endobj 831 0 obj << /Type /ObjStm /N 100 /First 880 /Length 1739 /Filter /FlateDecode >> stream xY]oS9}ϯ , +.ChQ,=siҤMI>TW$IbG9U%r%dcu\1rLarT+1]`cv1ƳUm Y(@ $ 29bl$PX0ŴSB:\ ' A  $)LjMKPm&fFD6J 0#90SN+cZ͎XP`ѨZ Kf -@q 0fBu)QW밁3pRU"7pZxB S`%2ց2*0xw lIl&:f3ŌhθF“,Pqͬ ābU#i@T f*#qJp)6߁r6P`* _+TJⴅm`%#Pl\A6#L C&6TiŌ }F^hӷut:>~qol \L^ gXL/=b;}ӫ5yn*vi7*MjsԺ2>è|&3|Ópϣ|lx>ל1vg\Ǯ1v\]j&p}u[ s uzm]V`[|I?Kk5^g ;ҳ^E9_3Jѣ1nR;g+"l 4~ Ӱn`'nT7"Xvg#_ޕ9r#s.T-Y|^qņֺV*z}au=LK."We}ڍ 6}ݨ-[1"se[MFQ¦ lخf`e[EZ#Sl0I8fv%oD{h9lUCTw A!X%F25a,#SJ!m[i]1!ur;ؔ%8JVQx>+`l[ɖs-}eť5kw~eLJP\E G ܵ/Tnh2/-q+E"ZfWV&Z)f+y8Cs~2UQB{\ll\8BS Yl[ %AȱLbL"L>zh{-YU.UXWY%ql*fF7&kZN endstream endobj 936 0 obj << /Length 1334 /Filter /FlateDecode >> stream xWYoF~ׯ#F=yCh4XS+0$)Ca{pvofz{3e5[̋Q[m<1b<#Z{:*}SlmsFL̰d /h; *"?(*[Z#ںM}sH7e_EE,>|| Ί@Y;N/ ;,X<-vH&{f +t.)GF =n`JZ?ۗ(T\=I99r[UJ6ݽN/XN#yz㺸r;K'H $ &Fa'fV3bN"B/g[GЍXyG#{(`yw.֤>p:sh8N}=ƒ)#eyL^0Zi,`${ 6 2Ua-—Nt]?Y%kcL<ϊZnշWEQ^9~ԉ4!_-_/9^.!jKF0zچފ8bm'e%sը~f7*l`1Dm4zyȚ:Ҏ / \7EޏK4W7rfiHn..7#۹eD4)89$u;Ω??2Ishʅ"DVFfkU.Bna@Jxnrm\.l]&\6 MНNО( Zf_,-&٩,+wcY:gGly^s2!̆AFS(n 2H}ŸHߦSt8EGL5pGĨq۬iW@2sKh,:!Ip)W1ॶ jq(8C.O sU@V`2ӁdCj&2ͷʛ LEo@A Yd 8 2?M\+p'.7Dwwh'So[Sae!';L$`ȄfF\/t4T{/ ^jZ4<LZ_-CB;S=eSƄ0v3[8)DK/(s=Z:16tTQ;4ecřǟ\9:i4ȡ8`=o EO&z*Œt'' K~qT8(-<  y'g2 sîh61 1 endstream endobj 949 0 obj << /Length 1169 /Filter /FlateDecode >> stream xW[D~ϯ's,ʦR"fvL(sƷ;[ Ҏc>s.Θ "h:YlShF̅BZ,9C 1yX%\3,$;fߞ#Rm0S0&1R`T꺴n L5X֢ nǑ YH 1HA+xAάAOt;*FF?9nmWR֞s0yƄhppq5SE*S3Z@HA{ JJLh0cОL eQݼҴ#`&2bkq>X78$TSH |(ANt G{MGA1U|~OKM9?aP2E~2n02>CR]KF)eMOf0/\"S}ߒXt@?L`.%[W5:C`P3TNI]uϻ>Jq(R`ko&T9))t`M[|o9 DS$bb+ceՠ6)rDZ UIԹd-kZ+-ap$ X]W`&Kh$5";΂{5 0B#\36'L?da9.B{md&?/&M@Wp*Q١u; J7|\i>Tʐa210yǝc=gym9(pNpN1`Lj1|zwAYp rQhV.KwMksGV2ۯѱQ\\vmܫC!a3Oa e.EOIeʗ)ٶ\ijo2ddsφWk|شxw;rXCS۸~-b甛@z Jbrn@f -JbL+q{LDi^l-9>_j8MY9ړ4hD_Mݪz['ln}"qJ(ݺA?,D_9TC&quu ;O޾m' endstream endobj 958 0 obj << /Length 1412 /Filter /FlateDecode >> stream xڵWoF%$|>S"Uqڃ=C^8wfg[QvˬY~^m߅:iErs 9Y׹ޯ]fCYQh;yܭ=f_ھ ⑚N u>QdŴUF-bёm}nEn07%E8r3^bfvYJ؉e=;? v$̨G)sJ1a톶ѹ;a u4 voo')7nxiDgy~Z>B@a0Dohme'4Лy{y߈K$|^6"eV7EF6H) VEC=|Iie*zT˻R65&: myv'TE/L!EV:'.YCД U\{=%oymaW}z1NN78D) HuDȴ2<:bb|5{%.tIefismM+KkC<әpFNC:5$>'¼+cDSW'کFnԂ(ޘ5u 0PLR8JBo]%Tj!8`N3 k& HKnC|D B}O Q{?3( 4$ aז(|*1o!ɚPY!) *ն:0#n2~h+cwJ@Y 8FWMȘˆTyxuA@ݯfuYKማ14zFm'<#$w/T=Cߝ|?S)i뾬LHᙶiؙ*j Ex|ZwUsSvt*Tpp8s텥&˗ן-Q>WZh}緆Q-S^ݮ\,wxkheʁ N8~XJ`gqþ>~IknPxLe\ckHc\\s'z%$;wy~NTrxI0$5*pâLYf=}zkGO^}Ur⥯Kmh4~]G?pr'u'rYz`.wq=mƩ{EovU <G'?X$^[Z'u߇> stream xڵV[O0~ﯰaD8i{`"MhҠ<1\RKN%(;4 ih m4"6[| +<4N62єˢL=B_f}E<]WV h OdNDZu}89gK1Jt[m hT:;RH\f-sV= (S8c͕[hD4z.Dlvs+&~g#f2jOͩ(T}z3qJ:婵~?|.,k :A.Oa+̮b0KPo zdQڿɴ9vGADߩgȥf"W>4$mZ <᫕jo"?n./w8%(yAN\>įWXN9Γp> +:wp2Ub؂Ϯn[xY o$׆ endstream endobj 987 0 obj << /Length 2458 /Filter /FlateDecode >> stream xڥێ}KetKR.A`,";y(`!۴T QsxHIh2YR|uX|w V2]Ws'*K9S\V=}9VuݕA݇$\M`Y;IGp Φ7pvk夛ixH,Sy>Wa,eE.=uxi,鏺4t}lUhSgZQ {zP906툔Âd3E и`r*E 3ʾ\t q2W_'.r7nd"r+Sđ\MFDhڣ N I 勉 xn㼀&Z8::xlJ֝gbnUCxpR.V *\v0J8:U'gm8i}w|2s u)L@76S#C>Sr@ M OS FH׌{V냦:-cFFWZa7~Y clNK@d̷K1Yq6 >&8Ҡ 9|Ε8!߳I573ēQ0zPB>c)N5b#e[PK)%KjF֐? JcO" ;iB .\Du Uw1cmtk83&W]NUzu?Ge9l Tв< +@ZY/@ٕJgԶn?O`!/g};2T`RӜ6lu}*.ܸwsIteRE<f6q@nc!댚?a\KPtnCMV[ٸ\rWVN٠;] -&>DmVYx_H{ #Ux 8bsoB oMڸ16GOԋ!!N]K/98e%̶ u̯:j)͈$X r5trT)SGOJf- %=zfYu>D/ﰬp4l mY0WݗU/9bLCtufxIN⽝:rIux0|z8^_NUS6[9Yvcc O=oh q)Lnl< G$~pE- k7??G3~L0A(Mz"wo endstream endobj 1008 0 obj << /Length 2242 /Filter /FlateDecode >> stream xڥYݏ۸߿BKe ːEI=4)r]y(rA ۴,Cp*ڮ _.z˴zYthH])TWoh3R&"n9Xl pG7<#Y=|̃,Rv)PBD!2p׊Ni%'L$?|=ͻ˯vy|QL -"9hT9kNށ7w>^X߳L[^TGMm_|?clbP|j4}wJp9}!OtS=}AhbK?݅-6&̎~lci~[>D{@=Wc 8r@̀oZڳp.{sp'kYNtԗ$c<|G|eU.D1:p@'kK١h/a6ajЩp[6E\}iIʀ #h@4(JוCS&!+&E9w?W>B KѨÂvJKX4ZaO cpyGt'9ƭ5JjWݺ1W\t6bbs= 7mjͮ#3J7W Oe9jP,ddc,WQ wnzS2@&smsiO:fx[]t6yOTA%jʬbdjǗ]3`Kk 8A{nhnk4~PY NY ,EC CK^, tI>,]?ƻ9ZW@T:>%A?8)5@Yj9ǖWz|̋OWVw endstream endobj 1021 0 obj << /Length 1417 /Filter /FlateDecode >> stream xڝWY6~У}ؠ٢Ţ@) D,d=}خp\.Ż΂ e16 #Ac E9ܝT!J<|^ru(F$' 6Ѯ<&fϱ"[J=A">M/W4eafBCUy6 ƴ[n(mԛeޢ%k'*Q~Yog(@4h+BPƹSS4'YrJqn?7FcZHw M b1 uY/ETns#{mSn87[l'FiփU9?w AX;p__V :Ecdћc$L@h`gS6ҨQFjY33]S'7^b)QzRg?`4fd#8I'gYXB"od5 qLheX Bo .^XJ}3 qԃAhx:qʍf穪(Gv`֩ WϜRmѼX-D =5%SBmq mS=~78rCo^Ō4O댲2=W/>3u3lzςX'}"bWp/X/D >.7M#D4Mոہc^!=JW.#|\YW,34KEꄱ8E4N  u|vIGA4߹-gj[W_^`mҎ̅ Y )|?7 [`Lzr=M;y{/2qѪPϲx}{:Yt(euKWHC .^4"oM}fnUQlļbg3‘z,֍`f4m{ ǧ3)ѝmiv.e2!TICR,b}TO/ּ8]gCεaJy1߾Y*JN# lĆ# O]Sa!p_ħ̕il6v};D^; N;̉&+1-11MMxld2btv׎}s'mf)(}tik A·r"c*5'PBk2(|g v'{85A1n|/e 11?h${(IM׿MS7[ endstream endobj 1036 0 obj << /Length 955 /Filter /FlateDecode >> stream xVM&ϯKU !'JyrZA#1;4DȯO nvJN &'1v(!81JbEж@W O" =CфBP4j2FUwrE{?W)0!5Al~FnН88Kepr=S&Nae ->Z#5$8}<&@7`\7N^{] qEyR:}s Z3)}|Ff#zt3^\B.zT9X&9+Iysޫ'٨.YyݝK/|_7n־ͤr,!to/4KJ,) R1bhtݐ S͖eF%/֨A^sg\^vx:/n4wRGU^ _^zFlbܞumqP~v T =6,rhdԯi0"Ƃ}['b4W|ps@"TCVUUƥܷm$w' (Ju<5u뛊+z")C2h)?$J -H3Sץe㩕E2Uz8g\RfZC /U .ˇ1[VtQe_$Կ_C endstream endobj 932 0 obj << /Type /ObjStm /N 100 /First 915 /Length 1969 /Filter /FlateDecode >> stream xZMo7W\r1Q i@ N~>hYn7HT)9;K%ߒ3o\g㌄h( Na-$I5e\qc6;S8k>hlym(HHe b$:CH0aHPFK `Š`б:䓑`Id'm0h|p`D0 x! FtfLSAS2:̢\)NYF%kK,{m;!#*FL- -o)`BI0"VD$Nq(^Ӌgo+RkQIb"CBxvsspzr&:HnLS&\qČXau!XDPTMșjD0'5DS$՛T`RLiMʮM$}[X?'{ 9( +ǪXͽ7'oeuX7-N "!33f8uT)B NN͋n23''y"1zLPp↫-n0?\ۺ9p8|ݠu]lg4>aW퇙9'e? A=X =洽nMK{5>>3%%Zx(L9FNz:tjRRmg *nмGͳnzNHrKeCjtPNV>ٓuxiWiG'f6Ee]?08t Lɡg:hcLWL6<./$=n{mcKbFE/=>ZLQ?0b˶f8j fW Ã=?sO)Jw{+s7/=ҼDW\$<$Y ajCb+蚖C;]ʰߏwnO'ßvQ'}۱=,ML` "E\$s}n<1VP4k&N.=F? 'lf^슍LjG`;\Ԟ2(%\eCId25b>  #RHc Y=DVI jU;_\$2fƮ*hEFau1Pqׯ,JNȺH߆NJ7R܃U)n| ގNs^FhQ 苆˄3Qx'h% S:SrvtJwug endstream endobj 1050 0 obj << /Length 1043 /Filter /FlateDecode >> stream xVm6) ҭc6XJ+ͥRt9,n]sw鯯,Q>3؃{{?/~\-^g!VwE1BoUx7FupzmƘ$@!J̑׫S=j#^]byO֋#=/SG F,&^DB>>iӉQxJ?]D )eDmp!cģKykCcmȝu5wc zK4NVaj\FCq v(dxBR?+a5J]5ʭ0wMU\!;sdnҸ&ȹ݋5͕^&M>`%G%9& ؿﶢRg`Si'Li8oD2Ag4XZ%ay]DlH-H:ouC:?jS(2**eZXm*QJSjd -GјegUn{_]U0ϳn 0Փf/㷲W&L?|3a{&KR_2zzy϶RX *kgvC<R[kV e0J+5ٺ%Kd0R7Yb[.[#w.[Ac*DŽƬ@@X40B)D%:LB N\g3}.gIq/Fg_7zj?]_3K/V՝5/{1.gklh {zCo-ԣ},+۸颸W%Bm endstream endobj 1065 0 obj << /Length 915 /Filter /FlateDecode >> stream xڵVn8+n*˧$I1I1lI@X,z4i~.EQluixνރ+{ɈF|G Fʻ _ 6dIC&7JpBG#z4ևOդY^OG&%^H$el\UVEc?5%B!7Y^IPʐ-h47!wUHY5?IgE5y̚ 2 *}PƬ\;ꤌ#[DRJ la ?4qWj@͛wpbtAvJ;}M-m oY?x&l5G,`ަE+>d rcZ(Z]SuYnwm?6& jw;}օ,[{.46ӄXs Ĭ\νQP ؿٿ'!0EgcƉ|}FdوRDV$ecg8@sfQHe"N "~\"S$byn ڬzS.}X55~vIx )I_!"PfE^V' |cneVvX 6r28&x`N G"]]}w(gJ,w՘9 rgAiEhaD%NܗZ{ pϘo=;dZE݂G͠?S8ib PaR?zҼUI}h&tUnf;]il/.WE?W/\UeMBO0+ߌ NbD1>Tۈ+^zxruUo)X4bҩ:u _= endstream endobj 1081 0 obj << /Length 696 /Filter /FlateDecode >> stream xUQo0~WXÂ4ܳ bU~R.#\R$. ^w .CPWUF rlEE~p b#C;Y@QbGkebi<I0Z3a>r<^4N PgcVS] ?쐻/r`ZBaݏ=Utr)oDq}RH _WI/MhtgۅAs!]dI&œٌ]xU$NdAE8Iy-$vZ 7Dv$R.hPzvf<¬g4t4sB1.J5kՃ<a^@/ 3HyG뫸P`MC0IGGL(J´4́Z82֤n9 g@P^EHy54df8mt SRKNk"pL[uP\aCz'zPXkXrdқQ~he{iLGsj7iuj+fxg/g8޾ѭ  endstream endobj 1098 0 obj << /Length 927 /Filter /FlateDecode >> stream xWr(WPEZU!©SYn<^Fs-irV񦹍r8s!X}ڬN%8@[D ,(sM'X37qIXğU}H/[Սl] L\Fԓ.czn2 Tݛvq=U-菱]'X G k@"i`Oܑ endstream endobj 1112 0 obj << /Length 837 /Filter /FlateDecode >> stream xڽVMs0Whr ECiI&N{H3 pw%m0qM'Xv4E}ϸD sctmd´p'eeތϏϘŞU02dD-Nǣ_# uu @|t}CP kV=9bj2St9*׽*s%eyc 5U.P[6|xWgesb KɝicY%y2vSʇPV u{8c۩eT$uE}dQ~òa%K.pqtͫ2^,ڣkckp1>2-NaY7^y:Ԡ;R?)LJr.a7vZk R} ^ԍ'ZPHƱ$ݲЪ(κ{7`! : }sa͆^ y8Z" &&BN`GLle]Y\ q<˨dr }Oc)szbs;)kZmB*GVSDPZi |pQ=.Dk5Η\(5(G4i]./QLS)&Zm̪Gz{ȉΒj[H$jl\DU8yQ2yLiL )1Euxׯf&Xu{/njFC>Aܢ\FBDf>9L> stream xڵV]o0}ﯰxFnÀ 4 Z4eFJ㯤Mn({)SQ,F00z! '`47}ʼ7"}AG*ɸGhuW,\;=L$=x:au^F`<"0Q΁z`e" _]J\ !{F ú%i^C腵B\g˱y\%1N H1އG^E*9 G)-#3ЙHILWE<!)!5(Jހ"gIO~~T %Hg aԟ.<ų_`jz[ n)7dIٌ'Ln̅}vP(r&YC6j-˔3K#&f9}hs<꧆&ϠTSFfX++ݣ*ߥ<]c9˅l}qjv._9ۛϱ^JN<}狔g:TXPa ! r1׏3݈7څVm=}C<>akܲLHxc#v瀱ڕݸvcK]VQ!c0nEX V9|c!3KnO_ZدW/Z=9O{[O~f/D.s{v3QNHUuOT*{D^: l*4†d[PMË endstream endobj 1046 0 obj << /Type /ObjStm /N 100 /First 958 /Length 1359 /Filter /FlateDecode >> stream xڵKo7<.g`n @h ߾%+-{OR?΋RRS~)t{ im :A'A=YJ.P b8P\r.E%T94K:9B/ m6fNbBQ(e7폂Ga/+X,xOL̊& fB .`y&5d *ad"SenbHmE8쎭Q7 xmwn5sTff* J3I)"d &h0TcV$aV5G&N!['ab*"#Ȃ=(d$$Tg x|)(m&:K*/!')F 6]5)77*\?}*h6]jQZR ẔӨ\4Z#kg˨[Œ MUcmKUsPn`?XwkWxZCafbzyf?^\LO֛ƕ3-ތ*Ŋ}کBw5N=2L߭_"͋V[sXb;qd)-± }ю?.6j|s\*޳&9Geq$4Jl֚c4*Hsl+v"ΊtofUө{„Ε? j^_q_ݻ]{o܇ovngKÎ^h =dUu:vo[`9HgDJtF.LVRfsbF_³r\ހ{}{CG}CtAyh;rWa ,vH{C4㶤4wƉvI%ڧy60"Sa1 #ΟW}[~Ԇi-V{eYs4'y'[kAvFi"TJ.*wE刢ڇoR.tߒ endstream endobj 1142 0 obj << /Length 855 /Filter /FlateDecode >> stream xW[o0~ϯԇ%Rq}[i^O]5ख P k_?iդ;qX> >Lgt0B00 0ȋ;.\%ʸ `<< " q0OwD h\Tc0\7k s'bvhU~Ku*M0r0e:L0{0khHe.g#Rw/"^C}"UJut'Qnp=ߎ7e"Vei} T>)¥]iVbhs:rBC+H90U<91jV{FfHsaaix.d)# ݭ-^S9@νCHNÀ#sup˵PqtnBNsOAA5ﻄ)<0ka9ضG/`P}Z"2ٓY ; j*`I}$Eq<}eslݒTci8GV*͓0b-ѠW`O4KeQadr&X.OaykY,Og-Xתj;ւu sX"B"~R߅^wPC-᪔݆rjq U ȟ)נxLpW&tɲ;팰8].7.d,˧{y5ϳLhe?^&}^U~@ތ{jKҟv1¦(c¾#†?ZZ{z:&Sڦ(C$[u{9{~ ? endstream endobj 1160 0 obj << /Length 1432 /Filter /FlateDecode >> stream xWK6W9@̐Gh6RuCLJp%y7rHծi{( ÙἾ?~X͞q4QR(#Jdȃ&x?W2p(L@i3UO?euձ>FlFa/Wf 4`BY9{|V7 c$!oӯuL8( b$ &#- %tWnU^T4iJ1bzD$,vQYF~3Nq,Y?i\:ubh1 #gĻVw'Ĝ|V.FRZR/ ܨZ7FS+GL2F/R2VdB7o& ţ(2.Q_aŖòts,wJ}@gcq0c#1j!u1zkl- EX4| nw{6 KLw8ּŨ|=nO| SȞ9E[Su,:LG1ԜOA"1ݲ!=S_v]mS }7mRyÝ#FшXY)!ITtyHTD|,Gb҃˛p(5NoIiZP[4"6؋+ʟ}>[;ˇLJ(#X0#p~ (a$l}%g+[p j{d]guy8vڥuV?.d0W*l6:&{R$4}04+VhqMkSg_ߡON94Mϟdu2Eێ@D=ihMB>vN4I5@X BGJfP ޟ2K)@0k07 Cɰhk4Le>ؐ  [r&t)uFSÐ8v&t I"h4u㲖t?BYSqS}{eȃb#"ᄄR53쌰LT?4N;bGΕv_[dpz(qIJz; un ${Ӓ_Tg_2qdAiE fnLpr"zX6 ާkPHl[YEZ[#KW-j?rx^7#})Xz%Rl30'|(C‡Щ"֘^ם,Wj\wơg l(R'{ ?Om$?8VCs.X&΀1s+2HN3m†3BwQuQ q48 cƞfl c( #erD؜q5E8x7bXog@6|$tmfc+XZIu_)Ct(KsgfCQƉd ?^nxꩪA'x]p dIpc/{i+9h{=A|KnC2<;DziȠw'Ӡ$)iZ]ٛ~mtN} yr+d=&&_4d3.џ>Y0hx@x ~`⥑s$ƍ!<& +:xA̸eM~ҹu&ȽJ-fh b8ftm hAdTu7N#pd2Υٓ̕־@8r4;ڦZ -Xl!ϝw:2PcNȣEi`WcI0M˙wTj)B&2#r ħuApadCl"~ vC'8ѢiݒPI2& (Vb^:0soq #zCt8Iܑa¢DLuŨ 4rxw=+*|BT\ZVh$@*DK~m_t9>q¤l;)a 7طP;(Mvy:=[݁N {;ēN#.0p~>CI5pA炏sզN%>Ԅbz[1a@G1a 2e/H̸/fjs ^-˯ڤF~tgBoVԖSeCr{5+\$k6( 7($hSd8CuNK">19ъYJ[hL !\8Z=S6,5aziZĨ"έӃEeWkַvXt/ֻ1o]rC\Tu^g  ? G^QSjs=1[V5:6;+J%)p]f}԰o(4ԪYp!6$vʍwzL}Q@2~y cP3&[ݓ] + /?ak-`f-ЃCc):oo MaC< ŝ^v)Ka%8RfL&dž)+ɂ& 'BDD4il(2{ϊX"(gKu藯STLxZ(a?# ݓ^Q_?Xkҕ4(% R5}oO.k .8LF1@ F`r'rr*HDKpu5c"uai5|w|04c x^v:ů} endstream endobj 1204 0 obj << /Length 1963 /Filter /FlateDecode >> stream xYK6r("a$ A˴,9zL},R4j'΢Y|ջ>4:D4w_|f"J0R"%+۷SB tu ?%ܾn* ,^nSKp& .YK{u >5A`-C?~Jm=8%.mp&D+}2ׄ)Tw>ҕ[}(]B.Xwԝ'a{G{TP\Xچil&is8ۍ M" OӐj]Cp9F*(!v>âDSaξr\+ZGhjcv=0Ei;Xin3cǏ cS=mMh챱+m6b4뛪ŎmuӘ\W: Gg[C/i)6W(9[HE\ξn3¥V<(*e!2 x$Atfng'9waFWtFc›0I Y`QM35$$1hRȃvfoSK:h@d(0@hNF;ٍ\ }SƋ!ʚ>72* jj†]rT.|nAW14DYJ3UHGdPOu]]{5D pëɪh,e_cITZbh8PdACH2d~2BY2*zy5<'se-z]pI zg4T2 lnkڡ:p:#x²0S=}IIG,IR_,=r ur56]cCűԠ"_ilx'!~5+\.gCA=.YYAAd }Iu\Viyuȧ=a Hg0'7 RC66-v6#c윋ު'g.up;1 <{AbJ2"381fI }kTY'LvN9BF ⧖@n1@nh˻0hm 1£UPRoMiY圭~Yk-ƳCuU.~_y;K>c ]"߽8y+ddtxOT,]#82O35S98+Ɗ4 `,> stream xk!Pp`kIK)~ݴP.Qw;Ǟs;@ H[|yG% KB.6}&p>SR,6ţ|]q+R7?} ,aS cQ"B,A/+ ;}ŕiӿ=LV4q+\=槼m֭oMWM.0a|aLi+A% `%JdQ> K޴;2?u'Tv)}1#c1% s:}Wfm^ki-YφcC.BF>O:mApoܪ`ۮ.%ICYڴ\J hM?@ :uD|,^H"5f(E"=mw"C{_Vӄ i|b;\597f`p$E1TĨ^ӧ ;ɀJ%M-];m;{Dz\zړǤ;w5d;yC#wUޡBqs0Js5SC4#y9ΪM[o^V渎h}C&cpU朧,[iʄ1:7@h6%cAAb_*jk6;7w#f[;d cyyZ* NѲl e?x4 \= _odԘb$ )[N}3=ޡ'`̀=OlrHc$c_VUgtZpp%9 iS܁p0<ar~X=1qίzH1CjA/%($3?U+ gG7ŢDdI'ƒeAuT?jibʱYMဦc68EG @9? #"/L.tnp3^bյE۪+wMF1u)ܘ@ oFKnH_Bu .G0OGփЪH39Iа iOWACs93KZ7ܳq=H;L-XW|&kuysˈ/VhڠO%Hۣ߱yLIO|cʟrO{DA<# d}}l[r+1 ! -,lna*=11Ȇ2_V/Kt܉" /v[ѡD %e0dIE/&0vxÄL檩T p}.D_ݣʑllZ;0]1-]}gncs/ פZ/m ̋ *!W섉#+ߠz kwȟ|P6XY[rId<@r{ܚH){&! &(ʄ@=,!kyd8fX %+NBMLB.s鹐~c%um94ٻ(*2ӭ dd]D >FW0>mF"٘d,3ߜ!6_obd &F|`bBe ƤH0ں%'o ޷3"^CUFrK:?g7!lP{u&I" kͽhy}&TMg]nNs1\Z/6Mi~tEqqoFu6iCo6$ExbK)!5:T[}ȒKk!8Y/!ZF=,=1o}[@<\( s I'Q-%LK {'9ZgJoa!x6ԧ a%'1?~j}:/9 endstream endobj 1237 0 obj << /Length 678 /Filter /FlateDecode >> stream xVMs0+4CLQ8Ӥ3q{q} Z\B1N39IjCAw3Z(b ͖PWK,FbbZHkDy{IXa%&!:Z<曗v2\u=5^^éΫG5V)$YR߈$' `ᑚc rIpPڕIlghQmfOT z5Dƙ/#(@:tTkt|z1tSlWۼzȥc"m$Xʗvl3u^eaDLԟ`H 8d֪>)"vWjM4\/y9yN Žp8W?|u)0aL LA'yGԕcd'}X:Xhݶf-*[ǪU]j5օo'QG״B@ʠ- Տ3qQF K ,^N?nBp@"zW(l?QW<` endstream endobj 1139 0 obj << /Type /ObjStm /N 100 /First 964 /Length 2202 /Filter /FlateDecode >> stream xZKoW{ c}IX>$}Imggf(>_qkiGI>H]]M9s )9H+F~ٜM@`&@yky-HIhAK "J~CP,,ˢ'-LRC]ZW0ftT<ds_"f"` ~r]0Na:U6L5H1O2S.>EɁ( _"cPP>u "*]ZI h)P`V,UʵCqc *,A\[6.K٫뱁2MS&jP s`Z[ Zaj_x;VJRPJN^%|%*nTuA WM) PO)s(c4TN>/ܭr Uݽ)[`o-Ts=X"rK&]~Z',XͬۖfD<>- >N1P'wji]50!SMR:x`s8>L '#E$O/NO|Z5'rk-%ES%#q"7sˣpZȍ9OFvr܆/%TˈK&"޳ƛ^0~eη'L;,O٫a:[]6,?áoh曯1g.@+Z΃.3ڪT\.!f'rdžD2[DNƬbJy"@*M"=7\`"7U2K^qrSP]+6)EaV$f?,+ z3.V3Dډ?=>;],=Z >}z7/sc *j޹aʎ_ٟVoVeܮvZMVGƀߺ&"OP#pTF6awݰ<N⫳xn~BQ&ƍ;:QRcuXrs@L!~SJ6E>3Mx3V&rK15 ]S-=8H&]5D  1)1)1):ȵ3Mn^Pݼ F>Y13T+]X Y R11f%QN+S[ Cx<_oboZD1 v  QQ7K_tiUeX%z"rxXnV)`Jw<;el#ɤhT&{|X.Þmote~d@간hu~:v3GTaJ`Nwԅ[ݳh&gf-WQgaZpt@[2 } 2ݜ6q|DWĶ-% IP#666666_px1+ ހ+ mv<[|GL}4o! ܭ DϻTu$(D}_4!j{ΖOyDERr \zEC7W.)*3'@[|RЄTW=n(2,ڼXnA{  y~'-O{4)O$<s1e27Sy4n$Ri!~bbmAGE\Iю4<2:e$HH\t~@tA#82>Ks~\ZPt P3+'ekIȓn)G/r~~:<Atw[:g~r u<UB;UN,g4]2 *dA|{l!`-(y@(3!gZIi_>mhߨ$Ft#ɔi A,P!'y:&Ͻ-o[bЯ!CXܖ Нz߮ЍH2sRxDl$H܊^C!7[ve pF ^εܥpTȬn媏í9R]XЃ endstream endobj 1258 0 obj << /Length 1007 /Filter /FlateDecode >> stream xVKs6ϯjE؛S'8>0 { ۢƮqrHROԻJr/% Ŋ#o_x7CI?kMVv9OivJijCe$NZqeG;a a;\ug y\<2V$M:#I;2\\!$hNy]EatYwfn2۫NV(ZKSX20Pt*wGeFo  TR;>Ѡ~s,I%{Cdȩi hM7M;m?MnS7Te<tI_#:dP ',sȉYmG{?ե {7XNB))#7[`Ph`Ty!ObBUŷ"K[ *#.OQX`_Ҷٷ@>tPՔMbS[a/ԸB$J?? '}Ś7Ќl蘆RY8Y\!נk=yY:~KaՊ6tT x(cس3d7[ .i{O`D&˫- X8җ[^ߴ0J`jQ+ hclI (خp9~p4Хa">Ƴ$#mޚxuMε[|*k f{r(]JJk+s焼bKB6J삡뜯[BGR;{tY ߮?Qf.;ձ&=>+1 Tuu409~"W fjkaQܭSM?ځn:Tiע,`O:]Y[ժ6BfnEyA TdMdA endstream endobj 1359 0 obj << /Length 1552 /Filter /FlateDecode >> stream x͚[o8+xlW[ci5}68).=҉cl ҨRU|>s;t}><|xrw<>%y":>oħE.:Ts!k).Z=St8D B]УZA>EMUQ". bHD+RGeZX%D8eK3-_)%ޗuq5/!t|)Ҫ*rSR4Ы)L1~ b9 Y6tK5, ĺ#dqճ ʡ^±{w<#.AKVi3f*H4S-C%WiY {Цټd_q 6[QEe颊ۈNà:I W+KBEv:x":KTtQ!0ck yZ J!۟EBWڦڍ.GMcl,u?kTK[K-@""%(Ke+$ZY,,v;^nٴ/ߏ -(.4y t?6YwH9$!Ta@xN4clb=|&cyͦĚ]]'F; mWQN%11O/>3OSZbɴ68.(&,0Ey5煵C FM#}d!^7X#HRhz8Jnnm_ͨm1YSbе.ȃա^|FEb WݜPsS@ҏ-[*&̸*]ӭAG0nЎRz^R0UtxU*> stream xZ]o\}ׯcp00n @V Hrswg^m <<$盔jJj?KI;nISHu.CgڒI=FjQMRjic1-Q&?Ej [lK 4M}߆.;+il a1sw [XI)EeOLL'Q=YmD}גpFO:HV'!SlƝb]{m^+*[)5eDO4^[YFxjMgj}/7%N䩩cزU)h\FG]Fxr98-% E UPbdeit Z-XiLYƎR'9$7\H #ubsHKS@&e"Š5Z0y b1XUR HY-BӊI w MF+FU~>,LRS”s)zepץR3LQ(Ԕ镫A!VW^ r\!g/^Mol}/媲Rg+OoϾxb_]ZnZq|OJQWwŋ{ H}Y)m mО D}ws]zv}2^_r_[Hn]۟._}KzI`3do1 @ dkٻZ\ѰhlÏw/q£~7od wX44%Caܩ-c`!iղwׯvon7?|s7a3,̴1#:Vܝr_gZnNpX 4&@Ufj4woۦem7:k*F^FW?؊hcW,yh0#c>˜@<y 3g @<y EEGcDã5k @\r 5k k k k k k k k k k k [ [ [ [ [ [ [ [ [ [ @D#8Nlsf"@1d/DWHbӣ }Ͻlke5H8C[&170,0q*fP˖]3ugQe.7PYWH I: 3 >5cVH$abRfUKvq \!aJoE r%%):VHŒl<IJ2[ p#A! ڑt7\PnCE Kl0O*!!@,% D,>INAx ߐ LB%5N uzO˒>Nb˝h8WHX=>#+ўa#=؇r9agpC 8-BB wf>LD^?ɢ'2sRȃf:I '!̴zˆ"qUrY;6#$aģ$lo!t! T5S1rࠞ}%O)jE`[6~}1RG֎RtAUDt_|LpY9P*eR*,f7Ix&YT޳ BX^K/d|#<`;@"DYQ Ws2 Gi/AIiI9XI Zm2CKe43>9 Ēu}TU|Nq ҬVOB3 TSfգ<62Y!5.[ʵSN:/HiY aWp::jD$xU:3hF0o)ׁ,i!OS0f["Ga(2 4jBJyB٘e 9MBPڳ"[Ub64).ސwq|? a9I 駉bOb)flF I47lG@*Q$vk|$^lJps{+= )kmE0P~nu3R}0?hH2*Ƨ+aH xM(L_=.9z"YR|B¶;{PX[.``>Fv)F endstream endobj 1440 0 obj << /Length 1426 /Filter /FlateDecode >> stream xZn6}W1jE$EcIڢ - f68_ꖊ MQ P, y}hXu{v5? 5,sl5_[_twqˮlI[.^9sj7;_A:<3G ~`lt|_lb%3 b;Z3_ 㫃(< a#10VQ< hUdz>3pfT.ŵAC%NB`ޢP 2Z;j7@M!ʑVy%$U2$St '>T+aAB]d0OBi >CDCo2wb(.1r0!תz%<'0, $Z|hC5%I4xsjS-sHl2M5~rcԕ4$R'Q#53[NCa6Yn" 6ksꁨx,n /BE*z#Xtp +n^,XfR)l8*d:zbX5.|$|yBQ5 )o+<>L 1h0I|p"ߙRZ=O 8%b(;bM3 b=I3EF=%e3f{.m*?|:n ޭ5H y $"E.\i8Yk ̗Kn>7n瞊KHn,oO'cce _Mcyuǒ\gYSIJx#f[ 8,*7u1YAv'$jrt$wwȨ1X瘧rWZT<ӟt'- slYu2\=\G^}6!]dlH90n3m:?_a8m`z^ y3(z`*‘hy2Îs/flJ޿BonT4ZQfGh<;6~Ш|vuK,`px6F'WH=Z4[jO2mZk$}Y[Q.;?vI( endstream endobj 1360 0 obj << /Type /ObjStm /N 100 /First 1016 /Length 2630 /Filter /FlateDecode >> stream xڽ[]}_яK߮a-=8LH28>zoݕ+iLSU{K+P—.l^HQ1b׉/}u,DkeM#ܥc9%CHcmQqYƲ7J' 9S@%n8d}iw6/17_Z\\{^dX3`"ŃL}Ƽ>j_fQ9F+3 *:9 .֖G/FC=zt`)D)\b:ȋ9 ]bs0昭8Q`TF=ĨW S}tzӺhX0<IkeHeiTפq^c$eLUmZ& T b-ʌh)s]7˴VN~—ւB@((8@AL 2 K`ڠu>1F1L q,AG W$e,B 437Z0,c0`.f븊׳Hw%qmD,.Je~?b|Qx =;۷N_|W~:rC{s/_pw5P$e]*^>'<{VNߖӟ^=򇟿}c; U@$6LyaH`cax ]0YW10%AʠVh!2"‡>~y%d˳ثl@;Bh^{O*""߽xkc?><r99]vs0r0o9H˞=-{ZiӲeO#- (AK=:I-ӫ(-+iO=uu2lMN&nW?g; Ĺ|/R>łT؂tNآAK?C:zmA#[i|aՕSe {t9)+:Y;kp w l 5?F]3ᄠbIzLB?0#7^:kuQ:oS] PJOsH̃@b40v ddh[9@`X9C<-p1 2S"#UL GƆ$1 Q|@j8Vk9 )lR^o:|Y |#?fAl C]]cSfsOR`VA\hy -x)']xbC])q^7W;AWvPʢqq.WAꉳN|=4ZmAxFGխAڣoCP)c9P:DY/D:66:vxWw Qraš;}[{Kt3hAY Lmfr9e*[ n@֣6/Kaպ!NCClm[){5|}B,Ζ: %4Cmo9j1k]/ :|[ERQDTi wvjEJ: j4C85e9euQ~OX< n@LU49͞cY)NzfǤ@hxn6eXAꌽ3+#d6ϱ}z^(N+_}K#ɟV endstream endobj 1443 0 obj << /Length 113 /Filter /FlateDecode >> stream x332V0PP06S02U01SH1*24 (Bes< ͸=\ %E\N \. ц \.  33qzrrJi` endstream endobj 1467 0 obj << /Length1 2876 /Length2 20502 /Length3 0 /Length 21943 /Filter /FlateDecode >> stream xڜwT][%$]8w  %.%}U^RpK}Td*L@)+3+?@drq3q5dRZٙ8gl\HTT֮vN J b[_h H4܀E/'/Eg~ neDwprr5r3j&V5o. { ?D,M7lltQl`˔_?SڻAŜhFoj f@ 2:e)+ 3S/_M@k_G. + Ē sq{{ `nm 0ZZXr0Z+:[{YYY\ sou%pq, Zz ,& a0X\wbb_Xʂ,"o9x ])9k5V.V3/f?M_5F==ѿF/L qosTiog=o?_s!ebomـMSSO#?UJdiEhbjf0sSW2v _%x%XLtqd$ͭAv.n+xٹ>lk'O 68+qsXDqX~#o `,#Vo`,2E7S#p<Oo #F'E7giFx:8߈i7bc6q1/+)ح`v&f@;bsM6fo|Lom`)r%#Vp-kY:/xdU]}ʽnOޑӤcQBet5g존!VTgD \!W";r"AHG}B%fd|,̴}|- K`ؖ*@6^H_J7Wmi 2&o U&{)Mͨ;Sɋ;=h^b#372~1!&JN:2 M '^6]/[UPV휿SUL >WR -5cG)[ORp;N7ƪ e "泣߳A1IgM\cY B땉w\qg5T0dMר^b3U] . 'Go $cF,$ZY/dc >{WКi0Ts'4.ZT"=8&Nv,_]?Xݚ ,[D1جbЁkv'h CO>`FEF+̹?ͺr)3CX~xHxI]:"C詘kW'03bEzQʘkN}&V's녗m %4oG̪j?|ŗt}Z3,D~ $|_WұeMbyO!{v hO[H&}nUJj?UV@HGe,41p(]vvsSfK1p)RrD1 ;Z 6Eύ#Mb}o0Z CO }LN'™eYƨ|a{$ƺQhQY˰n4E>JS=J)fCnMu8~7lF@6 '+gXB /WTm ]i ٷa;bk낄)_n2x<@A}7UCnA QWJdfT;a^[]Uv(Z&<9(x HC[\4b  ef:\) 8sPz9~CmqxķOJe>//{L?+!==! fa6o5 ;Geg13!jd46?墌6 ZR/|%b:=L^H蠷[ܳ*THO` ]7YUp-&hn ̯y4:+)v.U?rDr^ x IaU oQTZޞj< TA' vg"^>'X-(C$OB ):Z@OP`z.ҋ[ RWT ᦻH:빭W2ڒaQ-C]'iyZyS &])[3̒>H ?&,( k5z"T=;-ؒ CaVH0l}1&G,&8T+awNYUB;ï!G&>Qi9pSCѱXv2C Jʁ#Pa]^X '}Ey5 {@9fԬĿg8]#pF9eU9<*fP! )y )e"dI x\\"6Y>2%+-X܌Rà!ۯ!o%V)wu2^d)" 򪉽E{TI[5eoB%Zy>?V: JP+reH[eX[7축)DYsv2+œ UM,FpX5]~[=!Ҫ)Po֏ IP~M ai3<[OTd'4Og +=5 =Au'V`f+/H*侱Ah<U"Nf Ը exҠ lz+MW CH#%wA[+*01$xGKoJv3Sf=T1Z݂ uW\#9Ojo#W?6^٣1"jS I{MZeک]S)AV[Mq=f=E^ȹƥn3}QBjĽv#iϒU5 Q:'s1WWk7\>XKy3ʽXgDwڜiu S%ԸI"lמu)6\4 >~{ӏ؁LTP}G-!Ӓ]E)ߦ*(:]. C tB^nMQ 2QVKfK,?b4x &*^: ,(0%fXѠG~8c7mQY1T[xvG.(m)]HN!kdWm medUKٲc,(DO]|ff'X S,;u#1)uM,)ܗ=/2ֿiD_-3hr.Cez隇N 3%e+ӅLKq*)nEv!E$("Pa72|3ߑ4pf#_-uѴHq`(PV |Qz g:W Eem{fKA`JY$οHnKqtM.E~|)-F-?7vVv1&e|.QMX} ػ& >w钢f'MsVn/(?-q;ݒKv 1ѩHQfTAs hUY ,yMQ?nM3k3y!. U]A=M[ [؍J6H0>4[2x7$'H:L j b-dήG=!|(?V>aۻH=:. tHwuabtATZ38-i@ ^x*#E##2݃0eْX~A -%k{̛-& GZ]p` s [& 6r]TYwB3/3uR0NʌD6_uP٪},F/:"{S,D+ oXHh{l$$d( 5q@vS'&؛^ȝՑ9AP& =%ߠcYüq^@YnYC8(TtY#TZOZ<\bG׸.gBljW:'QCh>]Gխ񈲕y\z)v#;!˄$fE>(/^2/ @:0mN[r/q" '[5VۤSTCfp.祭i_- Fs%L /# exn.>NJr'X G}unྒWTR$>yyCv:٥8lׅ}sZt[3*La0nkk^Ms{j}#o)*({jsKwkXYr<94(zhbXCƑd T 2HFZcKt16ˤ*v=˞950If6|dH=5a1QPR VI왕>o.@{Vv,Ûq4Ǒ)8}!Z.]zRsJj;#g6iE?3{h5i9W6,"f+ 1*ūW#<6 ) aZl='>od yO&h,ɝxQg&+`fzZC1sZ3GnuM3n.Q4݌,AbEyR94;!dT@o%闌Yg2M[C$g7Qo8Mv!\PXu O"bxi\dwa.ɿnWDgIH$(Em)`H)uGO~9v G)/K8BF6ҵ 8eS{ g{}W"&暀hԠwZ0WGh+rH8i)s*Y_6 8Y>hl/TEiB_!jބm zJ\pLV(# Wj:ywyᢵx*j 1ɵI9JЧPBGoUJ ղT=SJp!">YX+暼2=W4ݴU>GaD=(0OZ0:hu5Nl2%% 5~]LGcl_[w >[1eCoq _F6ú@X e1j;D_jƋɁϗvf{ݧ|嶖Xx\% 7|Fnvr};i hq}iBx7i([xPw_94㪩LWKP.9ZU6R'+h$udEkgqۜUqn^;\XU:m7"sxDe0]J#ӎD!Swa!̋͛Hwq7*Q>ӫ{wo(cEMh#"𢕷D<s XLnhF{f^U~πNAoT{[]y%D#G *f4WkP9ي͕ M;,w'ݥ6VkPmvO8FGZtN6-ۯ~,v!VPjy̸5.F+F{Ȱ; MsEnvBx8&'8m/s(_? eVȧe5庨C!'-+(TOxS2"]RڵVj@]"D|G/eCd sֿ<~UxKRt.%}o[afvGA]GPDHBo,h aCT!팉ELVx qae@W>[3ĝϰD?DΜm +b Mv:?BX>l"6X]wZ ^Qz$J>},>an&{"-gYI_W1=.WO< H&tz͖:bxqг+$Ep1ЉחTJK?JKY5UDYw7 fu:ֻ1; {d\0p<ɖ:Dɘ$k/Er ?E:=~UahG<C$Z6SE?^]}վGwzڨD(>D Q t_~qq:}kv- ^TT^jI>TqAXMʺ9ٛ/ri T G Um={Ȓć\R3t1 qb 3VE h$.VrM\vy=e#Oi WOyua hL2G@@|!3HV;fBj;GEGFYfUG}tuxNhA􁽍"ӭTrƋ O_8ѲO%BUAtO!ie%Z"܊o>'aypvgJdA3mğl[$:۟3}7?!fS9BМgv?HsQpbFf,^Ot{BÀQ+Do/ߊ&/Iy>R{΋#TAg.FKkE@y?/`dDTJcP k*oV mi™Q w&#Si+/e:E`AZ%o8A?MXdǴz?-cm''TGo8(WoFU%W82+@kU'H{v9:5<~NA^۱diѐ\R&÷j,&)oo{ :ylvwGm ڔ<;4~t,@DR YUg"՗1_PļTͫ4W _=%LC8Y^ {<3$ N{,;roFg>Y5<7p~oѪ=5%;>D˟]EiC&a ##buty)rܺR)VD/x&Z}gd}cJYO';E9bkU[,Er~du|emy"w'̴Q1qtc_aS})J)f{-""ۧ/%c"ʈY&˧{f@}$3ЧI%2 /e_\1;F.KlC۹/j0:$:.h[JKHƃ3uahۘ{uxsaN3F̤,L4;hIVjqd47cTXA*&eܿ&mZ ® ;\r \!T[b .a$>_ IU -߅M\ޥ4$xz͓+0c$Q2'yb'h3j5Ax!S耵M2_}coҳDgy3HSZ+%)G398N +kIy}~3'mw5SzGlKQPCc9tR;A/D_X0Lj/<\ObMV-)Vc17t=m -M!kQE]@Pt7ghgn *$Bet\Ϋ}Ǹ;k~yf/o'JWbmoOA(c~){k2!sg`1I"APį._,V_S+b!"aCح1˗_?+9C,EG#Uz[:;[Ǔ,tHa&{6HL\Br=L~>_4F(j88ws9 UflaHRLa.޷(>wԊ\A\;` ܥOЍFqfM١e E[q7F ZEϼw3frF4!d|[A[AY2rF6xSo@vaBʇ9p+ToKs1U̦z;qpTaVLk;\( j1hyj2z-Qv0cxjwCWU1L v_H (ʾyBv̥Z;HswxPr3K@FqhT8pZ ?(Y6@簶#7E{MMu:YCqKը$MBݚx+O]Ig[<GIҖ(JOzRu0|gF]Ew9k5qUb%4kJ>GUհa"c.dRMZkhc=E)`6JtF'KdVR],W6pV9]w^)'RlL.e4rǒ .nu.\E(&M|åt#鬐 \[ QSKo~3=A}I6E hu3?/ CO޾떃D$  _ &zW|>Ҋ艼f!@=o伮P-bEKX=#Ů; 9PW%"m;x>-{%o-7Aem)9{\TvƗ(:㮝q:N!Q7|dL)ľsQ 3 zEiSFǘ'4D5o}vق=֍oEX(kɌEZ@>Eʒe|WJ}mCYݚb6 ^ 4Z1l~=5 %p: _\ٽ3&<\E\%] X:qw4k5=Ր F=,Oo!Iާsdɋ ? ו Š#0Ư Zi]>& ŊԒSrǬzr1c'&!i⨣D ?umY {`vWzW4Wl&$>W]鞥~R ع ^ 0#nqtN1`S|N"Mq=ᓨ1|Y@@m \K&7k<$J)svSC@EE0zq4ЉX(ז#ЊLYk]s'v'ĝ{]OJދPuH}CH!;~۵_%~-Tɸquٿ}<{X.eQ`?a:N16uv :yyël{2 ' c8Z mdž=oh668%g³O?7x KW] V9?Oi33x nllLM`c-o2AK^cvs LA!_$v6u H\YO#+Jʥ_D^~Xh|fxz@SHWg3MNJÎMY\.\)זދL)ՊBak,s7݇ф҅{/zmGID3)FQa^|Z"W::J Rps헚+7~KvOau$ 雭J<'CzlEL8pē ѿ3n\u-;g%'ZYvSIf[w||$C߬slՃ} Ջ oʐC.F_Vo͢^kMZj}#$Hwo"OM:C_2:6.qI}gv߫7hg ?(.q#& 5rn6KTIŠ0*SFaWKz{)if\n)2àᐛhof Uh6nd(跦e:?5=zPC+lOZyYA4AulFH)!`#^36=jTNf$^R1|p a4 1MWD,|Wd{TL6ٌd'g/!NA,E)wLwW*34tl7+@fwm.0jRh悫-H| B,rCjPqC*yH)a xݱ/X2p\V[yBg"o:Hݲ eeag;i$6&N}Y > hP,DB &f%k(FhE(O2WP21w#h㻆Y4&Vjسr[lou=$v|:xYD3#fk3 4Hٶd`GH8%y+x˜r|dk~8'؄/^An/?QYg͠DWǕCbQ!2C씓pfn q `UjƹAoV#_T ?԰2aNX;S?s{t\1Rg6M?؝12'PߴԤ`c!Sw5֐9Tj7RfO:G{ N]eNjU߻TzFju)!`2M{tPg)2w_]*@9˯ͺdk-k>VbP|d4躅6pN%v}A@ ̤kGݴN? ֝|v4~ԕt^*іl1!co'8~Uxie\d ΀HSMD g5,'V!sUf$&2.W^*sYcW҈{ y_Z 1j7既~X,t}4M=G w>* 0uc6P/.( jm6wOA&"E_hjDQGWz߾@FkUHv `<7" Wȹ[6j0#j$*_B^|ܫDPݲg|ׯ0jy{_[R VKi l8܏:f_J$np/*7q!m9_x /&4kpf\4ةKUdC R0$pE⊶qHSؾ"0|B@8@ZO Y<==;b 6  XΩM&[T;鯁Eƅr,l&6Ng|%to_fk] {;-nndO#(') OzV3+e5 )|G9K~UFvýkR.ؼ}w^~+%&3 9UQ\Iaah|N`}k[T{W:r1_GO)I4)p87䜞L#UЏX#Þ{akI҅azWmh[wT y.1^Z 7W_aAHT膐_dUl'֖˥0dF&?}WB>2!k$ٷݱ5j)D$x5>~3Q [J=kLm(Z/I3&H=h,x'xȭ`8 bnz|+ߥ=t#夰diVk";jVIb FȄ0Mwdz<ѳ jJQJ -bc9B$R 3-|FܸiڒdPCqOW'8E-3ҝX(`SFM~n/h_<݇|ƍ)%6zn T⿉uJ9oIu횽q`+̜G-,~28&3:?.5՛B*O t׫2 ;}p.Ÿ~q mHd?zNIC㤾~⓳T-\p5}FQ|V} /7ۤ'R^G> ꨚ"ϥhhЎ/-oEE+\Ϸ0?;m7Y<r;__蔛dJ7d?k=W]oT~_aeӼC6Tv$b0)؇WVZHyxy{:OڹfԼ`b#-"rȂQbo j[6eC s} \;<V?]xq%`LsNA!6^?@\JÛWnuG):8bD.Evd0Pec+Zk t<}\!tVutWs3u/ei8LE֞S53'ȸ<ԋ'c v2CߍΉB>v<&k8 U8~_V 3֡Jcj_h4tokȪ|עm bfvMOU4=K Ap~j-G&ήj{ xkPN2Ś?Oc?/8=}ndlsNd;MɀX]?7*`qH1$*"2^6'*>JHkiNgcۓnM 4л-E^htrr\2Q'~@#SI_"rQ1fQ; #6IJF0&Ąm[V ya(Gsbݡ23% . )1.,zp`En~\G3RZs3''=9%MXM@ZZ-#[ :[2`dwŻsSe`]ޡ^mYS݊ "FyLa絔Ζz\r0)5 ء?ǐd)UDmpdzaQ]5C2"Y#k)!ϐfF89|[C&z+'J 猱*E1-LNsiW+yA<}K,?vJկlH9YM7(+֢ F$)nTKRJGoUq)Twz7s<|ԅ _~Q26zMk$'jl↶5`H\fb,BJ_ɍb0kC2S`"b@˥08*RF)͔ltUQ>I5HM`B6I}xw=5m>!U317N0 ҽsƲܯRBҵٓa>&lR5reH-b]HklG%5>nibHJs 2k+FZtTK-a,,gGG,Ǻ8l<[t'~ø.~P"A=Ui["1= 4' F/zwnh2K8{z@vb#b=k>ÒBOI>v_p( >( C;ѦDM<-7݃xL& P)a )*N&&ao?rC{WM>b;/2F[?9@PLJdd $z0 TUI)BNI*vP]N<3kfk"!̄y-~sYw@T1NTe%HDLGu$u㚧":Wعc55sIIaHWt=a6Og%}wKb؄evn{$pj;tjUin .51EH%'(83t#y$B;tbEy7[s'7}cɳK@H? T WR/v\Z`YMH:VK;VTpǘYMLs>~9f֮[uݱ`nj+Hy7>SKT5Cʥrz7%c }```+s7߂ϰ6sxm/=N#?)KPbEt&Arb㬌4>FR'* "8׻f2Dr Tk|N:H RtǕ6f ܑ>L"9^N}5jXUԿ74j[ϧB1yQ2Kѷ(:PJpK8|?Q):>[{D>ܒ]H=0nL*/u4N7bԊ?Ů&Cl U Hk $.?@Ƚ}6ı k1WLmeQg6/Urm  7j b:94qg~;38^]8w- JE?PG7ӶɃ={{xѝO%ސ6zwz_l1Bu==t0bB6W֊鉬n]vy HcցCc%s֛h I. JqS"^ ܎'z2ۀo,t)C2~'ɂMv~2DVͫ5D͠L}{’f^JJ=/na4 P }WAd㥃I -_,穘t-ʅʮPEfC%jȑkv^V_wC G| [kz])W5'@eKE>~ؖ_7BV*g* #qHÔ%jM)B=U')C;̄~**<%H$c)Fh.`1. d?V蹯pDR[cNa}_J,#l{8''"ORQS);r^ٔPw&rc\TtP^\W!>&Rf!$Rf:t %>I¶n^"7^&Erﴢ~׮>`+h)_CSB=A7|pV`4x_rmv5Lh$\GJtܬM5=0 rs$l/SQ5?IS;&#=؏0MDL^L_y7cenW|fjOc9Cc6Wf8Qu.CBHIhz0+;h%L+Z(CG1] 3x =K`I)L4_&.|GFv01L3BL87sa3}&@I~mNdI+d.{'^o˓ 9V ,U91P ܡΰLȅwjLz69S>,.7٦deBCA=d}ؠq\S'lzS&m\?PƧr_>bfn4YXRXMz d%~ͽXPC%1'U1iehP\@.YlBe}gzVB Ky3h`6eݧؖ}h8ϧo1pN&+xrp|SXܗວ]<3?l[Yp6 mr䳤hs;q$U_gɺ(+=SWzT,$~!E}is8,[KCfۢ}Fc endstream endobj 1469 0 obj << /Length1 1775 /Length2 8127 /Length3 0 /Length 9223 /Filter /FlateDecode >> stream xڍTT6LwI#5twwKw+ 0 1twH-݈4HJI#J߷{< =ع9Dj\..^.. zz]0^ BD!mr@أxp p pqxr`+9rŠ:ymlay0Y2~Ҏ %PlA-%O&1[IÃu`fxam+d2@5 z-@j %HqX\:ʪ 'g?npC~PG' X@ U' Xr:B@w ht @AZ |\-]N0WWï9yfy,bO|w/?Caĩ;y4ac\<3ii+7؃ `l z|aAW+% `C0hY~<'Q~__f Bq}Ĝ*J7(#y||AAi/2 Ǎf?E0K(]rsY>p?7O濢_)98ƙp@Gןu=q jcv@V`7Eaq8`W'J C1~͚҄.vn.q}o8?M)Z4~x̏+~DZ< )P_g/pZ@0 /Q+/,0<Aߡ66`cP(> ~LxKA~C//_ nBD~,o?yX Mz,̿?'m{e y,1@-ECB~H{o4xե5:3xR:eoq]Bjg5kۗ'6[1Ɖ ()u|}[;: hYW9|fSkZ@l=N/4d>"kNƂ;uq?g+V 8"Ui ].gdƤ'|dvR-L[;=(J\254ܧ#&U22)2PPcEѠS} Y5+T pS#]: lA@<E<,`^ X޾,b7<3 '7=L")(hp"3".Og,2ts[5 LL!\tQT=!fhKUxԇ'y)UB|ܓLֿb9i#ýT=w}e88,O w"IM N/Mzi.⡗;eBQ=fפ^#ZcT&8Ljc^6wS.x"2j=Gn&ߒ2jVWG(yy@jgA@ng2ET7Udg!T400`^+do j4DW,d){ܪDw5^ c5}zp&-tWD;{*n e_N3Eͷ Yn8~U "fTP憈B1Ccwؑ&i#NwDqhիKҌH2wLc)[ CQ/uKlg1PGƽ%e g axWkEhyBc[״ӻp RaԩGJ SdIiUuӳo!yc?5N|To*1xYpv|\yĴ3Lm]Z+6DUЃ_ZA>vsryeIj&+֥&K>.x3yV3(1Q<*<~{ɬ?O-7Mܴ,!HT\2Z×,bjMIPnu0P+}KL̘7aD*ɷ=ZwK"$UŃ·S^9g/5 :w t0Z s*y,K4|QB)"V”_I `cO` j>.=D%4 ʩJ]$]b 6P:} p)'+(l߫ zJ>)gk -F},\lȌk({͌]n#M%el߳,e37e F_)F|z6O^6ډ* Vnf:p:*tV&h0jl|r2#[ĚZN^0~? 3ιe)k wBLqD-xDJH1#3ݒPo*ߴi̓ZؐD;@ysR)gM9ݢW˥ަ}7T3 )"5 486nu(6v r QHR\ҌBNӫ]d")FwN| tM6>6Җ> ՝"j|_KjQ=vWE,&O4}H x<56GK^M>,~S)=q}}m87%"|D>a*R8qVk= ߏ36`q 0ZL(kHL?VHWTTejZ@k eGIBߟmv6XA9`V&/U#[c98,c\Ǵ[XnYVl\5Km"5T/R07Zu_'_L&QkO['4zкKnueSxΪ#ʣ+elA0CM[ꍺ|pd`pAo^`6i7O>}rf|^_UMNCԜsALy?XMḦ́;{[*=,^?=z?@G~5; 2CH%3~%D@c%ԥ~JM )Kf"!s5b7|W'.R[Vg1Zؕ7#2^8opx/_f%vSR v?SlqT (Zm%Xpeoډ CDUoK&_Jdϣ'+/zEJ+ٷaf1UH twM&sE0pV:Sdq~ᱬ٪F^xIq_ Pl}ގ86PFOQ* > Lrhp`R0Q|rR{Wч A3+K"Wh'KϿq잜!#S+BM.k_q(g鴱&ٍ5ZF6vKn.凞WkMx6j1μmijQʋ*J}(Yn RpN(\EEqc"!+L6D}f|s`-#o18{/ F7Z;]W@O͍P%mV#t%aw〰37 "Ђ/N3u[~~c"}kR"0.ظԋO. (IUp!%r~˾:.bUznFsm''>Z$URĐ&{4vTNLdXdMբ! r~Ӭ|6J#"1I&鍶Wxt\ëAWYo<aN[b#>>[$5T5#x-[I3X| 7QͮR.#$Ɓ KZSC<:ntKPRѶpmL䞥TPjK B#0XkBE?g~䜴TӦT^4 w/}GdR-ij}n=I1+ۜ\F.8C`|!ktWY{L9N0L˼bP=<_܎FykTq mm'"tFDrQVl&(UU {A6Æ㵝"dP၈S6[zw F1 aQCV_ CȵƒSTA~ZB˘BNЛ]Zmrut(rx\K"7ϖXC\Xv+ ^".WYtfɗJC 5  qQў)#LBQkWDV!BYO-i3kU^ze}Q>@ sEM~)"9(fc)EjN&'- )}EHFnq*_uRaUOfaT}?rHP')Akgc$]i{JhPut&)ԹC\k rqvAH++Oҋa՞%Ox~(1ƚ#pDyR 43`9 ښYup08R^-QTy QXIod0_yfkGZ-#c%kuo8 5vp̺}J1jR"_F9&ok`Zvg?#s*BBLwTA&Oq9Q^NB" ;7G'RLN2^cJxՒY BhUG>egtjMbȢˠVyz ˜ $tw3@6DOބ 4l}:#0~ʸ+gc!@Fb c[4ALƸ?7#+ r~73ܺ>fL1vix.]ee/飒ʱE+Ď1{P_s˞v*Ǹ6^5 ]47K/E/O]ϏF EgEz%"L@=hN}QgA4Ԍ:$GvїQq[9n}{IS~֯p$v>e^tRK9O7* ir3Җ􈆘H0&d%=>Κ@z[9H > ց>}I&ҭ 1*I]~&LL_[j ,&ocjXkSWJ"$g8'iyI={Lqt2_;P }^^R Keۓȅ+k|a&/s K[d"{34K88 -UjFCW]ꖒd͂*uOK O4g) P݋J -w Yy~zxS<׼`jOiE)Wuxw'F C HS4wj*. '/c[zNI5!rE <J*?lZ_O] !ՐX2\Q:y㧏TA xD1Oηj9Txg~Yb< [3c=͑peۑ ˿x}G{ rhH/J\{,A>ZMsgiRЂ-C H3>-Ghx"AX'IH?byjTQ}}f؍hsf'п54\<]=X#2>ݫ AƐɋZ >Yayma]s͍ZB^.t)ݦ ?3fdL` 2( / ;dm/U龳uZGّ*#3\}O͆'WB7Nx^id:P hwѳ2TBT*bKL~LZ:ZmN&-DU=3yϜR 7$xUCp@#?4]Z"cIDNMdL@4'5Bp^-.WGR!o1[VC/Q$jrxkPC_.҈fO8d㤤j۝7#UD_n+9|ރ8SN4]iX=oW20г#>惁.4fը^1xp3Уׁ7V3GUcn E!_]@ F>EgYvgP|R҇e-A&s伆<@7=Aܼ:.Vhh!h #Cbr| `wˏi]Mkuu+3PbКJ<ekL nOo3;}cֶ; l{pteIm:hZL yC|hs"LoC(tZy9$mp*L$D!̪ NhٖΉEOE{_d}2.);٦5o +A"(k(\Ap(5ʗ<We-(4`uxZVabJ> ˾ϋ}ϫχWwaȃ sBV^ŕRP5H뻸Zyo;X ?m0%xZn"goGH{P^ցz9~fpvQ1)jMT^D?igAuk3B!f͒J:0aZ!F|;C}b~ _J0 =#a)ɫ_*4Ǟk~\I-B*;ɏbz,#ds*<1Y% 4Qޛڧ}jS ?dȨ)GHѻ妫{sbLpa'}uDT 斤Ғy/^:> =[9Pr 3>v-1a{eq%—ޚ854!FAw -2__KݣޫT^w&PF?`~xDss(>bf# AM]t/C74l@Ngc; !ħ{'%ߦoZ9mLҚL!2χtPe:lYP$m9K?a -բ`f`2$8 Evҧ?[Kx*^]e!;'fZr M+qz){wfaH1৵$?g_n^!=1ξz:2c!/STrzDKY؆YdŴt|394!@{訝ZzlBj!eQQ 5vv$&ye-E7HD[ֶt1 uVnY>J4* um>CC}{c&Bg2Nh7XBW)*M}0,V痜^CFcv5b}9qOo緣je;v 3\2בYN!Hspy',YhW}ʩ!XFp wZ uso|Dk\$E[ZE.5DYN dn4n Uֻƒ>[[+(0Bt0mRRJ43k-.{u. hJR)Zrr&%8(_!|C5C:(`hnj`q͵`2SY}?t_ǃYK|Jye#ɇf^ȩv v I endstream endobj 1471 0 obj << /Length1 2065 /Length2 14941 /Length3 0 /Length 16216 /Filter /FlateDecode >> stream xڍP%и;$844.=Ipgu{UVWucʘMA bjo 102dXl̬ )4NΖvs>dFC{; `efC{'^)@ kotwp4}G _[@dhbdP7<5hdhd.HCpYT@'W)௒FOPtB f|l,Mv..v@'Gt<@hoc #ݿLLm<,f6@<#D03_$ETF>g'K3_52Ef ;S1{[[-&}`Zٻy235 S& ;KGl>D́ 377X0@/%_|fe},̀?^F@OF,,SKhni7hoqN`' 3_G$."Bwx1X9,ͣld<+cgf|??9g!4Mh1@ߓlyobͿ6Zxct]@k` vT UZ_ cD?FrKgIKw%co_ gciTwbf?-3F?f*#珕 yHؙ؛}#''#@/55kLvG>3{'$߈$_ `?t#V߈7/0}_@j bMe2 fC6? 9?? V5[~156Gm:]>#[toGv~pvauF[?re(lc|Q? ?#/c/u_}@w %{`இ:7i~˴mV"}XаļNZv֪\!I}^EQLJkF&;b#sb㸥;D{B%6|Q"Ra0PL͇(PW(1$dC{ʲ y4vUAc7TA{Qr= nۣchCȺ&Ij 䖺f{KaWnѤ<4(^Y_P'qY&Ey>~FsXMs+<4] #lLwLجl-"籺88"*uf$^!Aj̧;,.m_;,`&{:dQ+wE`:Op6eZ2SQb;/OQUovQۍEO,LߛL^sтa;RemE'_UZi1Ǽz ;yvB,]fQ,|0֨c1UƂbDos,,#v 2:a3'@&c@nno[Phi$pJb%;s`R-J"q/~bJ%fܺnƖ\5ga((H~1M}Fk6./"G䪗zd٧jS>ql-A&1 U^0L`%<\Dg~\l6d0vAP)#(xɧp..JMuA;?˹*YIk'1V##dKp1)\~y'a 6`$z$xoaʍ)䉑Μpo%aCpg3-"i<u9"ZBOE?0>kL7gs~an%בbyx7є6pR }mT(Xܱ_ \~rJtXЧ헖Y{ьpZFO<XQSSKiՄ 9,g b0ЬÞYd*`MytK>ߕ։rwEߗd $4$nWQ55"[ e[#+ݱW/|bLj\44So.7PA,`|4%0(>ښ)|cz-y/g9_']&%HzZG5R2 a6O҄cvoC@H6io=b@^!g{b e2~ PF&9I$&9He3Nwp n>LOk։/:-V5 ji4v8B9G-xDBb{O.n|5P@f[S^XQӢD ݫ$ (Tv^I. ]:45[6K8o2}P|8X͡Hh#3hȴO_-x_吴JFnOF'ʐt Qb۪:a~M5[BOl1`y*\u tC "\/,oyPɃπ:!/5/Ύ*mi1y$!VSPꤏq?z2/KXiB{eD\L'kʿsxhy{_^mYh xd[S_:5)HQAS"(=3ǟb0x~5NKІaqa$Xn)g/׋@T d5R ٔ؎@aQwy^aŒϳoYf#)vy KYk3sQH?v6a %3@V"xv:@lϓV C0Emž,m.SKOp29'${gt$RcnRmxe@lI.޹y#!iP$Buc\g(:/wwO"ܶ"?oeA(~K(qKj*AOTͭ\pl/X2!@}i(}$]n=I\?9P"w9vYmZV,派,5'~;td[N`.ҙwp^=t*ZWFT0pRU?14)6Ÿ_PJQ%*$mhp6ؠ@[6$b<ǿ :}}W MH WZXf[q/?;)%aHy$(خ!0LFf+,h B8C쭥§ e?IT7دm}fMd!C*J; W #գ]C5z2IxAT@ohwbf0܉kw̠"6wO *xz$~zq!33Eii de:y>8R.uMwz gi5iRVJ4HKߺ6kTXSq~F֧2Θ ZECF#MFX_:C7~苉|;p3L̲qfMWd%17~>*w`IvfN, .$x(GIHwQK Gu9}ɁtWG#10 P~/P=2:lGH$Q|gc]j,lBw=ɧe1ZŇHxENS='*'Am<;g[!1wjP@8Tv$mǗ8!w89U22qiܟc&;زň{mżP#crMo|:ﹳ!/W[ph7LN%ĵ"C']M6U67Fοŕt[6$'Xm$=y"мHB|OwH+|-HPy,5;֟ ֚N~$p ,Iw]k/Y!'4t< czȲcrVn;8^.n8R&Ԛ'7*ehylsI7#Lsڍq.Q:"H>{9u<{fGfqU2 Q(P1 mipX=.v^ Tg7_aC^ R3o+S 6Jtu[(D(ׁ8tg4X<ט񤟽0Z%d ߌShd Q-FeHD)=ϱw>} Kg##$_Mbr\J<ಸ:+7YH5v~J2r:pP%qGar zi4I_acE]%eiZERGBO[V!BGtXQ^[I4\!}7:> PsS'q|=t_4B*V!9E;I~i u&P7MPhchhYO\Rt_&rE"{O`i)'Թ*}GzmZ,d[K ߍhAQ&6uMSn`U,IĴıxAW ekh~Ac땍U~㔃+FJ6~쭪!r93K@ u u-%PJ WK'r+=g^Q,o*!$CBM;d VChfQ՘by>?H0iohԗ& N)|Tۍctb:G΄)[#жH4z۟CȬpkOf0~L1tuΠG-XL;7_v6nut-o\xHc3,ДBۏI>-;7ӪsSmpM}-Tx$ZqOX-#2헜2ao?.S,5M,rpm(׶%7xP"aIyP@`e*'ȧ> >-1_qSu; (sw2Br|q0濍iԓW e}h;O^k22W,@ی^aڧny<ay&aǶe AUL&k(eI.?3hLwM!l(8Vpw\:?+.0gۜu؈v6d켎V&u οPvz-LdκF&G>r$!,"#7ɥ>I7S͍+Up6_wFAm˼n1D|v4veZ,Aڴu__tn ӦgbuF~b( XrS ޚ:~C؟W!=}6& 8k:#9{oUf5Z/2py%kqid g5q~_.Wp wȤ|5}Sq;{0o9VVS~Ei\D(~e|rl| VHny by:YsqNjZX4 :/"t#݉ du-v}/R3 2OmB5RY JO3;:2xZAܜC'qþn1! 4' Nr$A3Q`P{Y;m+M^`+qH6.)[ǭ2WuXj&ajR*Q'{pCAWYN0bF.ކ . 0|l1O*IWSe?ِ2/H=8%_ ӗ$M*?]*KؘJӪE&{0=*m|+7N~M\?7^Wv¾}}hig*%q6Y>0t**vaxl^zB0wjAl[0H^/Hu>K |3"4U_ ,u"o7R rX<p88!'Ty'(q1@t..9^|_UԬ^%J 2H{5y4KBq)3+7stZۉ"|JY%N9Kί>hŁ_M boFOymIPm Ȇ`-[eAyyr`UuB~q̀TPszBC1.ڿB"f9dANKO0,"5A泛ό_㰠Y)u]'0ݍd:c8Ǔ3^q4&"د粭8'Il-@:GHq:=J E8W25{?4"th}gN,CgGY T8m;Yb*ȎߊS]D۪&{Rԙj;* g;#ì~@eSRF$xc z9c\wMjuq1C4y7_j|FPf5.0ae~um*~ГOn SJзkF<՘h+h.<V@TGyLSB-բAnŴ)yzyQs3QE*my)$hzѹRKJ) 2Ԧf9J9DЅ'*^; ؎cejl 7pyn/7+2ΐ$.L/, hcL񿜢 JH!^AkdF"IL*qY[{j~N>-}<#cY=o|T7ˤ-;kpM4B1,(˼$y !$i*F ٶ6ԴP.x{P=ߠ]Q~+u7CeXNly^*뢔Knahu|)VhE[_[8w/YYx >>j% S6lIe;O TBx>o)>v4֤vmA1 %ϜWdZ(IR?ɜ}*#S w%.m'̚W@ zBtul;l%1~p~?ss]O@vi: 셿UPeFc+nLne=B8bXxĥ^8T~38r s\Z]0qz^{J8RsjG{=˜z6 )NLu<֚*_m<-rXDx~ˀ5iDvm-ÐK[+aq@ u+T_X2%4w*wψ.So%ܶS}m9_RKIKU-{ጸ.-y;2lj:ʹ`Yiy".ctV%:>,d Wl׾g+͗+_d*:]kl4x[3cⰋ{bI42ш;q#y|{ kG@⮗T'檆|ctFfXwA[IzزR@E6"y;:M]G^ؿ)l mȨ/h'_K_SmhS0na +Z% Ewe0儧0G]NWZmAn"IHAca"dDMRx;$\54LӓBg H]IeYM7%5|գ(Fxc(!4Qp Cnz34g_ tzמ.P]ݲ-Nc'oIZnMyOS= jt <$3ܥ$%vu .`RNӊNh uPVbMܴ aG5nҵȊt'S ɂX(*UGFy@.T'#m<.jLȒ3zi$YI~"A;`XZw0״Yj:0 Qk~mnt ?Vda&5!at|͡C꒶է6:&Ru+SՑE0U6i4;#/&O߃;cwc*UC/ȎJMʋɺyϼV@]PJF*+E<kz$IctΛ9m_-TYƋ?gIDǖX IUܮc3$s kCGɽB8GLkGgiMiΒR۸(M򶵯%0&( }nKPi^]M\S.Q^M p+1W!?&ZB: +ͰV "j Hd/ZȟRe mL+[8=YH-sr+xnajY 2׾&a2)H\Xkcw!>v-B!dͲ#$G6Qy^ zOdv榸N"^HH:ad#5 TjտM,^F:T"m~ k e 9y֊BjZKk;}0ǥ<4Tvڳr x~jΐ.j~;^sƅrLz$TĨ/֜%?([%y81`"^/7eqv[`'wOtԝ~`?!w(xlC0U j^nK@sm CbO&>h͊MhC̕Bfloƥ0_>zF]*7c~JYju--AظK>&8 E&4"B1=EB$DCkѱ R8Fő`H|BΣ״ۈ͎Q俁@;-$MC"1Vi4M8cXKk˅P(&Y#X:3OΩх$6 !Qӿڤ0!J&x IܹPS"K0 )2&$_R]clEy7i53snt:ozڂl=N~︆i,S*P$-!t#Tx?؟&#)_</`S5RQ 2 AGD.ë΅_p᭶^6(.mi8o9KO viFjI"ACٽHf#zo4C8Bg+.y~862p^ KE؞zE:PUqx}Sltrjgd^zs9:GH3_kSYO O2bL[Ydϗ$NZf̔!!J_f1ٛB(k3[|7f߬0 tC柕u)-X&ŞӂmlMeH+`/,)7p}*r-iOϫapIugW`ǹ ?%l1fDB*a]2h* 0/)sJ=>+;>aß=ʼn{{4QKd<{ А+pp$|! )\S|MG *$)e}TnqeyE>v0#j0_)O3Z~ "Ü=U97!RY=?{IfoA?wdS~0yi"gNTЖ1*@P#3`. {bzI#qs.=ú\q񕃶^l ҚV \N6)DiL't~125ZN :@GͼՆ b /RttP`ٝXݐ0Lò H:L$9z[~sUV˜ %n j]I%?¿DHm`p8d/W)7Jz1_ẙ{a> $OVK u1N^[1*41 ~_/nf>w"S|pr{~:j%t!TnwU Аd#B?-7z&Lw5,]dV졇xسY|K_ J@LR,ee}//[GΔwYEFXzsμ1qܮՅf)5=(=uHncR(Ym!Q:Д0q_y>5$ɚ,{j]{^s uފU}#(*nzA++4jZ^f!&>qP+,hrZ!*(e.[)pѬ4sz3qimxݸM) 8e(;9%.o`w- a4,,;]tO7TZiH^Pp0c̣ -+$ y ޖ8f|# endstream endobj 1473 0 obj << /Length1 1475 /Length2 7439 /Length3 0 /Length 8432 /Filter /FlateDecode >> stream xڍtT6)]5ttw 0 5tJw7"JK4H* %{{k}ߚ]MK*aaoX9؀< DՂliuN{t`6)1N Pppp8x9@'(@{'AbPf(C/< Vְm038X~$Ns Yw44!`?J0[`nnnl ;g6{'+QFf ;\_T@v?!07hΏ.P qs@ '88]_ ss{;XBlU%6;Z :?\A[cA uqs6w8ٜ!FdU񔥡/P3 N`c`s6P{7_56+ф #nnq/{`% \>^'BX@a3wG3~|';=+GzYCm=}*zz&OR`>ҕ[g5.C-?>ӿ:v b*&h?SwῪo$d\lm~7bW#i]`PCuD Wz̬l@?v l[/B`5{gȯ1 /ߣmG^q}0QNCjnoKw<Q`Aa)Ǚ}N迮`6v;!7xu ` 9v89=h ~6`w9̤P몠 27aaôK=N|TXͬ%23]2&H:^[VMY}J)eEi7p?uz.G7EBȩN< Mª[Ǜs:э BpJk[ŇJDtV4` f}Z<$+vQF*Ԋ­W nK}sx]o\֏^Kqbff<2ߵ.]dLR9^;5brzWG҅W$ӟ >_3hc(tct ކ2FѨuËqҩ'AA1u vx(|"RC#B{ۛPfKWmtEVu_w$qΟDq%\~(,Du{HW襷'O[:JFCQ\jdXL7?bkC?K_D@/%.2"p ZI4Figx$JikHN3Zau藵ԢCm9j$ǖIGUKQ`@Aa۫CZd͒1Q38DߤmIGcǕэm΅Qp<-W/=H>cs2do&PE=\$ѾVf"r$-b J$|״E@6ey"0oᡗ)l!NzdeMlL:ru}D'Du(u^ =7nS0.-G4>('LPwSegt>E1%it_!@X_IDIB|U3}*ucMnGXC|&rhKht.7]o'?;u6O=_7yVxuf3F8؄9Jh]+>pi"KɗNwPDo4 Y%CeՆ Tڇ3եQ~ 7 I@Շ;[JSӮ+Ձ9/%Z.?u9P#kϽ׽PAb$e9KO 3Vh o7,nPTaÌVep\&ˑhPT 7E[3=bC%\^j5r"?2ًIw,Ӱ!5(x/p42P%wzk/--`55dq3z |T=6ڛ"AWSijh]1[$;$07n{jbƋ`: +"vYk'HrQyO^;>\qG3V)$:Ԍlf-6MM |\T+*k@ޞ hPJa}%y?ُu4-eY?iVؐ #XV9H)ؓ_1ryՔrAo PkG%l:%QVX-y\Q I6A-[RsjSXi HKukѡ8?Vk]nxxL!~4uPg4MaiȦ%gX|CQ&!C9/[9V6@F$XU)=d~Ʊ{Z/˭={̌Wn%H<( /*ͮ@{3@%ENj͂ Fus;-b} M2ɷ~gOn}6;*p75_Qcic/TOhbog bwF~1ϥw;T u1r|PF*^댍igAf ~\>jB-뚆1G<-C cQ{S}:J\1x29Ĉpo[j7"jwL2#-W Dq/Єp5'$ .Ic'Fq"(H Rz|Oy=GrI&5NYCM^h$8e.1b߇{vdG/҄m/ _JU*6>bjn+Y_'g)@ثa)`.~Ŏߛ{NF)r^dz8 C[oy+..&VJ"^Z3 d'웪P4;.~K#uW2A4I:Zȇ#w ^ Xyxոךj tQw7n}ҩ :]-B%U 5Rf#E},2?cQxR>4#}UgUzLiO5EY߱w} "XfƱnavg!W}!rB<&ZZ3iOtZ|6!Ą@wo 8HF5ʛIG9i&%vrA >qn`ytNƓ6gl|"=VA1jyCC: ]?0(m}rSZA\y ^i %7qgK4t '..5XQpd&#9g7SSSL8@ѬbOz2{/ƞ ROM+5:T+L(# gGDАL9;tv'n@5׺j- xK*oȪ +t#޺H2#;$K6zv8@[TB`'T*Yr8ߤiWTw8jd :-=<`ڍQPV9'J^7Dig+Uipv'R%u*t'=SCl]>׌з=Bp.ࣝbvq7OJa#w%x \΋6=fZאHP e{ -ɱtOhRQgwTrH ¬-f hPߕ=[MwJP\ ./ R 8.m4%a!KkCr処U3ěqs瘀SR364ڿ~MV =na[Ac4<KD. }1%j {@NoRI NB@De6GBB4VVEEtTsglLF''6 *DwaG'6\:q4=4 L 8SӌX H$6'Aa 1ꟵeIwN*K ۯWb3_ē~X#v"Ձ(+_wK:>#|GkAƾ+oeeo>btbwrop6"a}O$AbP+ib A%kw˴͇]fFs%v/%{&[{Bm V @m_(A>8a@;`kwvF;n~ fNJ% 45nƩk,@VA> QMQWT,z*L$SEwXx"(+xD]ZN*v05JW[-ceR*6ߌ ܜ fo/ɑzvp "xn2?J0 n@۶9/SzNpF6i\cQ˷Mow|zb]:;-%wD_0uc cq["^lԗv"99{Kȃ^%cn!`kasTmLiU멺(M.p䶝Eqv1KԮ(>(U"^%Px 7773#]͛wlj8DKuʬ*B+9?X _XvM,:Xr:h|LLbaOUBʣ8??,4cXCGA{i+ 2L&33ሠzOqC!Ri EЙ&ZE8U*`To!‘ƃ&Ri_ETйfMd Fc^ ^<.r9z1O-p3vLl+ԏðĤCr=11@Z^T)FnmhP;B9) ʄx!07`/hߔi~}-\$XEEhܛň9ө҇u h7\9ziLI`⹞qJS6N2H+wFPoaqܵѵk8 ޏ3AP0ŧ8/H%&*;)xK-ŶjiwoH/VP3|lÿZτdplԟ|u巕yCei#_0FRڔM%8h 8VV{m F1n]( `#"RLdHĻs8tdr/P9hȴG7h߽o_:¾ס'o?V@qϾTA13Êc/yPҶ}(8tVlGm9{LgGA(З71R$^_/@J)# x qU_ڣ{Gh H /p~dQ˶#j" '|̚O6;HT  Ukp^7q{{`ӫ)yCGM.N&95bg?r\b[2~M~}%>-oȖ*ړ޶cܹܻ#0hI-svg+ƌ7r>}GbN+Q:϶HNW<^+)vJfy%ި.g0bXjm_c\|SM}){_Q\i;W J $hiMdx3O-0IFko 3%%}˥s[dX7T"ܵb%!f[wdkӕg߸γ}r2|oQ:gL&DPf5DVIjI^ klR1ΨyUraf}Ns,ϑWIu]0ll-0VB]^EŸJ8Jk~*ћyE`؇GrQZPiY/)7^3Œ:݅vtHr=r<<0DiY|L^~p)<<m] VцSVj?Ӛ/Tڶj6b3&>ںCgdAv-=؃TbתYgSY6*3d=16-Y殞V ஫tQjj2\X~?^yռO<-8_D8uL&>԰ƍ?A;'Z`;'ra7k&,*uh.m+xʮ/].:Z;R=zaXxeS7$ =~~l;xm-bIR_;L$/Mlܒyu'CDoGHIf}߻cˑ&o ~LVP"`_S#׉W3uQ@tP5orx5f$6F6V_ۼS 6\/*vGH`P652%3Z}z6c`$8w ǢD^̭w*0k/*EA\9mhP'6F7X|@Eu<"FuDK%;B^AV \ ||B,>! b~rDӯ2VʸE͝\EtWa k:{9]RM~s <i endstream endobj 1475 0 obj << /Length1 1715 /Length2 10422 /Length3 0 /Length 11534 /Filter /FlateDecode >> stream xڍP\k- A` 0 !8B`Ak~_uoM՞WME$ffo01$x̬(TT` o3 6lo/yI!<%{;  `ge?@ 6(1@(TN` K1 509Mv% dz)ao A<+% ufwc!u3da2Wg(TMK_v {s x5؀MAvί.vf ' 9E/_Fw`cfOH3hjjoY6 "3ڙA8ۿ]`+ʁi15s6u;@6G[3AQO2}v&kmgf70ۙф$'7Մpp@%5=@:0v`0m6x9]A ߎF(ll3)`ۡ 2  =6eVVPa^Ll&v.V翳WO=j_*v[/࿓)ۿG\g?D vMڂm<&Jk`:V dv^9u,^, v!I/f;F/~Z~?_u l?!egjoޱsqNN@ѿ".낚T6x`nǘyy,D|<ЫxY6X^&_Wٲ \ 7_x@d6@[Xla`zY,vx_/ `q|J*gB^6^OwׄL]^GbId`o*lu[/Fƴ;-~4]l3J^Q^b6Uw]_i"ma"iAjr=| @'U=0*IVŰ7MR5n4|HrO_}y">1:dTs뚀&o6.G~Dr&`J[ITk9e([J`XN{1(󍗉 ,9ݬa#8q۽="l$ܘjwOI_ `j9Es_v Ua43I=cɪPv'IFKΨOt̒?VF Z$}+L8iVه Bk(Ąi$SXRG1/Q n; *|z+)ͅyQӵA rF0Ӑ[_9GVqk#|;hE"e~YuP!pp~]RUb~C8xB&[՜yHY#^rPhok֢p7lQjm~fՕA-^mΞBuBd_ʅ]ŁE|H{t&:ΏFy!~#yޛq%}d1Jbz X8L3hh6tO'x&һRs[~Ѝr1G5ٴu ',JʂXԓ;WA{9ėw?Z^8d3W7ľ4Rxݸ,'7*.;z-|, 8aI '~t;1~DY!k$e;د##vʂ0 2%_uyl*`N bޭ% %Є$-ۨc+AOyGchvyI[dc i.ԃ0FS{ͧSIezSc}d=R[ BQ'+9 .PJ0 ABmzlCwOJf3nA;-3>|Nڢ/z vhl9aVP׋Gz>#4N}Br"Qg,RnvԵ6[vᤃF_yyZr. (xx_k71m9އ|97fU!Fgkm.Vw*[T|z+?W,9[,A=YEp@^6j*w,@y٭,MLnGdBQGt DXGіUPڥ)Dʓps|>1q|ap㡅,, c-BL o[AaY}N?K$zD ӭ=0\ʪNpvBȘ(@|.*4^*39)ƺ!!C ۭȱ̚[W>by0fH6P1]8f\͛C*[=ʽ`L-#!퉑ޫbv!\vM C (p3 s! Pv8FO+;]oIZNlI5jt#1x#48!Q_7[E 5L` K~bt>[3!:2>f$ N9nO7 E#~y}vU{7^U$ *XbgLjd=.!n͔\{ L~ hiy38n1} 3%2{Փ67y/iK5zޢ:9IgOPktΒ,Y֤*'[ӠROj?.Е`K!',Hvꆠ&/TjtݍptRM)q%N{wV jl%2BɢCzVPD' ?~a`RE%}up'3춹=.9~tM_.-& &#l|h0zf ƄQJK%P4EVL RځjpyQKtw縊f/wآ nB&^_"CLzU0%;,˜1Pp82q {ϗ)rvojaws^J֥KQw,8( ,Ă^O MPWx(PIMv"=\5:Di(#|Edsc'$%;R9/\AջȻ:bPjN-%kGq_R ڧdVUM}.χX<Ϝ8mw>xbg|dK"iR--X5{so䅩7#/ 4L3#߽ صJ?PcS|DzDb 9oK6~x뗴fdٷa}c}۵@)lLp Q[Ӱvŝջ[Q/q4r/:=NpKI- ӵFKX1C0frќ`|?E`⤛l[lyxۂrZ-TS]Z&ɬ(#Cj+ؗ!M`U~Z8xYzy ʼQ^ clC8KiTQv[}a}hPsXmzwa^N 1?ިoS09*|㮜"-j Sk\gN#WH-«be*l#EJӶ7.UtAHҠ'mo<`Bu=* Vz,x3LWcǑ$ʘ`lGHw/ /jZh;,h2Jh!o䠝c4J"#+6}"ɊQPW\7~_YF [Bc?R$ .ȷ{f6TJG3^[g7:vF[{@kMu0 XCe.FbebP)F{߿wq+ }taboCDfO|GV]]rZ$;~_4KuUrm{[Az)0X7zUsǏ%&}0Mip$L4%rtg=ƛ-jyb^JD䶅+.{`.zcN)nZ![ eEʽzsULkIW5k)Kb K~|q6ǏOz.푊k*eb ~)1qη'* :MUo#yʨVEKPli?rr:9~x2˪jĒ\v$B~X@4P7mi//25܄V9L|CEHEOU[ȼU>ho~b,P-xU@5gSLBN4iyǝθXJ=^&le~]PTqdlg t**'v3@w M^ʁpt7@.8~%l7! L #/3`2q "y{:RZBM nYԌvi$U㏃)/~;Ԙ0&E_N4<3Rd_N.q Q0aұ/ ۱I4Z$ 7muގ K|#ϙ-m۽IxT fou( u N4)b:4jE=9(Tؑ+s2jGo<-4o)o?֤Md;(Ժ>& ;ֳ38$`,+U o4 ȍaeӗ.f‡SX3XlP8q]DiEa~7Ub85E%]whi(lOoxɭ3,$P.=$ǽ>,DOW ;䘫Y9zgy\JbF庍oi mfɟo\QUԂL5(ʛ?$ &џx հ[n{ų&~&6S&P2>le>Vs .q]Ctj|o~vcOQD\h:dO7ѹJcS?1QqR T=zsSP_g1K>h 3BYQR~@Rg àDÅ))?DyӷiN<kl;jfHh|BJ8)7}Qyذn޿eEd7(+[4E?wTXzh?rxЇ,>(d+At>K1E2@NkUk_؄"y@/K]$ vXMGT3i{']r\mfzX9zpҳ0"LZW9//b¹ 8>V [M}6帥6e\=tb_= ~OV52N1xh!'}e%6:Ҟ!rL\Ŷܡ5pB;,cwYlau3K׬dۮH5.(+|o#NLH3Mo#ٙ WX.§lo+H",>y92* рz̵1EpΚYhFMs~|J8-@dڜ_;>urBI0-C+DHvo{&&쬦h-=Q Z5)m|q.Qo1aT-_XcuFLB scs56js"g\W||Qjp-D8iF8Й׼qF&lX_GtS*I)y'|ryll߃;fNzEf)}0Ŀ&c(A߬+{Y@G[.xe^hq K;(ٛ}rq.*A-ߴ,t]s%qWJC%*o:~~ 5dz[G4{7j"tUHՔ!,rs|"$xfvdЌ1H.T,8z)˾ s6=,*E)0/x.w⿋X Oўѳ7(oC 1\ vqJ_=\g2h?eo<6$HUcmpYgǭ[D<g}Վp1]c7va].Js2, ;I"XLOl-B!{ic`ɨ 9FyC µ0!nەa`fy*ȹ-%3{I}h bǩҲVڃf fٚWn,j9 x(&RSk!ڨ#.qI;u!K6.JovNk۰HƠ|N8y3)kRGtƶ;hMY6ftϏ {ufSkki`aX20J-1U+-HUQUWcj?שVC&AXX7a4{|iǝ5~}湤R냗0e"+m #IOQĞyaC E`LƒRKݫ`gT #dYB|.8K MbvsujgI(NHxW"Ao<ݓݛI&rkn2u˞r%- z\3HM3zTH]?%hvJE-gԭ(1嘄-ާ@˓vBaZ[s$4gkTSVϡjC`rI#:AT>!CKCܽ 0J) fn|S(ĥz~oih`-o|T{"ɠwX6W=^Je%HN\Q1\gLe dI+vk uy$*#rUdT_NPօ9Rk~C&w_R \|S%K^W$ #Ζ}D \wR0 ~iU"8X" ç&-N0wpDVe(rO`uyj2gw(! !ᖊc .J/3;D,ŕ6v<G}FJYθɚ;ax :Yn TMTFpIh;[dxAmBXDE<7r4Fsq"l|:wL$V3قTߖ3. ]|?F "nsb$ !%c-~#E.S4UӸ;@\?},prh3Ǝ0V kΩUG?P8wP198T6b7+9:z(D{3G(=[f7-8_eO(?Uay^ak1TE @pv`Emhz_۷Y%e#J*n#B;|nWIc9a=8I$g=uK)y^/2 !-&S;LE¡YOoEU`i)_9b ցD:s6UݍDSOGMNch1R%+^)IĹ&$ɜUBMrgIGO CL5R~sV|CG-//5{%`NJJaH{D Ѹ|߷)8G-k=[檈 ,7ܟQ#X#Pϥ0\0uPTȏ$åaY\~e(Zpnxq-1}Z+]3PE,9a%%Dj/#41,;5sz}.<+6TGK.[)fT s-+ٷpqͤЉ>]Q>쫭U;rY"ͩ~&]=U7  "Ƃ:S3;[Y7$Se0[ЄoZ`8 Ë-)pI&_dYabOBl4`'gh:U,H+f;Y1 }Se^&o}qh~֌,5(n!;souWeF;N,wlۊ+>RL5~'*LE[u(U s#ⲣ.g_kDjda{f[藥CҁZB>R+N%_OMIŕQ*,7_^>٬g/b0^.h8 g6mͬ'4(lKY?]RѬ?U|G G>3#7MDZFKXo$r*}F;>$b'YW(Ų~ ̖>7LfE/ײ/ :OL˫;9 T@.w4Om!k! At&L1X*Mpxk$oC~1{/;Sb(+qtvק{Ԛ شX$fЭGFkȍpJF2Sn7]|<0u. U3cV(CpfT]>~_NX(K]Sȝ-/̮.ڍ]> stream xڍP\[- -\4i tC\%x )h힬{07hՑU rrKB]lllaiGz-OGF?|!& kuz*;; dڀfÇ<l/cE^V/4_cxl?DiA`kj_*v[t=s@^T +r#6n6˗/g?D qLc6w{"ZWPZe[]_Alc6e@@+5_jcA`ǵ`~9e{-/W$4_&翗[B1n9jr/2V@?E `eC\^B/!P?N *k`'`UyPs_`(90/y^7w? 7^/G,ra:k~u|)R$B/e5X  V[(b^Λb9^^B ,]/չ9/j z-Qf!v-7IܙGhu3{/@޽FJL \^'tb-oI]-?z4 5ū7l\@Q-.˱Nvg\u#{A!WV{W&x :1Z[53۝USd#_\otXPiIQ8i/xpg*_PQ%`gmzݑjp6^Ž )6jׇ^ GyI̜V| MȠ@U!2b+ߏ":D/rM$2̖ -n8jUoh߬?VdAX讖TQp)y͋ *@gk`fVn$ud4 uBdl>)BͿNjoVlOW#~*:@d&zΛtf:`z&>Cz.2v#!ȓ UteyU[FΥzbʓl%њӉQS*a|Sηx;lkg ʙleßܼyfnN_)9ҵ&Y ;d% $d3~z.ѽP PeIWuWOnNx0 #F&R{Ac`Qv0L \M6𵙡t♿mhM;W1ߒphz&ӥhVX=QGN udCjX.d Ov֨~ a# l-u?Mwy1k Pa(*o;麗-p,H Py> ߓ:}q@*QLJա ٿ"b> X-n\ (vлEWV ha"SK\.&Փ0ˣqsŅ]ܵ$Q|Ő+Ws=Rw$&}+5\CBAg #Q, t^<L$_td8f|,箌[ w"L~^54ʵ,~/΁ofue<Ǝ6Oѵ:o&)>&:u`Ʀ\@%?R%%]JĨ`߫MG*~͚mߟCDR9(\ȷz*?+zXch-N )sn/!)m+v@7 zL>DGě ??('89[ `iͶ0* S} bAi&+!8ܱv9tOpWBzU-s)U 2qGtdWX 'vk}ȲWҧ^2M^KaѐPwƿתz+Ԯ-OX?2S5F^2Άm~vvN=?14 _:_BRq-ļc=gѐ)$%# ,Yt Ovh!6j_'*GS) 9c7`4~HۻcNc,r "0 unAF|r9֥kKG4瀚Fh@|û-/iwH櫔[{쯲xt7 ۽@b5s@5.,<}$݇%YY\,}JN 󇝛 9#ʩRCє#KnG^fZgV:bůeAoy0^)4 ЉѪ"NLE0]71 YI0ڌQD)c2U$aϾM164o=.65ZIݤR B>G i:6A&%uW1} hO33>;+IuGQb纝&DF{Kj2Uc@x1Y8hO{ 8­ Z_5K*_7hrm(BLP %'RL#OoMe)YKW6ݧ K, k"_Χؾ6TU-g }4(ό G5/ Ї [Y#FIAK  rƃz,'t,VVfOX|TI%3OTzќ-yK1MwIB-q@70 N@L[-Ξsrq Oϖ-SIcO7X}9גR (eޔZ+;O Mj?>\7wFO؍QȵD=#ĐE Un*4e0^B^@*\ N $|$m>[,0yY?3neD/{.l2'D(L}!Ւ8XnɌiY}֣zvCZwBtQG=ypfǩ[7w|гDWTk_K.F+3w$7퇯a[]w@rgY!Kt@Հ_2:ON"e{̲2BR h>iͷÐa{mID{hm6q V8*3Oq_Ė?X=o`9H|ts`j^| 'A׿^֤R֕T6vӅ5ji=zxuc{98^JqǺĆߖXDldQVJ1ϽdMuy4chyos%aHHNWq'% :q^C޼9WME⼔B5X9\{.n ]YMm>=3,I&aoPwQ2(,in_'D~pbAnrA|Ot'{[2I%{ǭZXKG% g]T1ګ[?+YY&]|g2٧uֆb.+X̲ۨ[2`b1tk =y1;z!][-"s96G:R 3>MMzЈ~^~=4kW_r|"(ZFc_\?^LÑá k1\y{ǿЦs[zP5Đsk Y9ZipQEbV}>N#tykDl<5Oㆴbfb1+GUwȟM\j ˜hݹWD 9M [Q7V."9姵8C0Pdy?mQ)y9v+hD [8vs|=D&5nU?f"-KDo.)}hXI&d,Qg4/5˫BzoD-h֫PT%4Co:FdPvv}ղĢXqXvGbB-}GM$1c Eex ٟa?F)r3(2ͭknETuw/|>S $;;\-3-;g_f&56o!Ɲ;p&d[Q-y[8O€@d~h7kȗG\Xt?cE]Q=_Eð0ZSbջQN,F;'nB\AV,Y/H<<̊ؠkaH]}'*%V~kLyTJ-MJY|?fS/iͽσC`׿=}SRh6|uB'T|˩`v/(Rb B;'kxԄ<_K߶7_&0bZDm!TOo@Hij ;(Kt--N GrR&iɍgFlw2\ZeR C_x8uX"t35qI >^#XW2i7:!Wش20!>Q7 zKy3Wl͹W[PP\j!m m!bTw5RR4\(I=vu$61hKJ9 U]YnǙPNe,ץ<ۃDOjKr%܎H2uX3erjX\Stu NrM(ks;)/ q )ލ%70i]#C[eAJ+ ys[K'QTް)eYGhn6!])AZV;5RモxǩyRQ's[X;$ձ'-O}#~;y80[MbSܓ`!+JdXt 5r3lt+Oާm/B &djwi%q5N]UsҪ*V x I9_}wn/0+kOVlBNl7ŹёPaՃ#]CBeUoP,Џ$`8~)jݧ%f:!+EOY "EvCq6OhBZGT<}ڴk]^Slj~MnUgdN&^Z,ӫ!rI fF0{d>6֓v➯Y- 6=.*? MQI0edfT5Rˊ& 6]gsɪ dS }zQYhQu'B^m"QbiJJQ\Al1V]-j$fUohprY1{45<-{u3g A!z3=ʹh.0Jt?+C!4̱{lj;5p=Cx cfvmBFΒT T0.H°i|MuFO G@b;>߰KgVQZCG>NxW^K6`|ʝ: >Cmj &w1,Fša-%j?\(Z.)P8{ wt> dLėu*˄ߎt%jnЍp}U2+b6!8*U([`3lP )}p17A2ĢIUeȵ/UU;$`T2@§<2n#%Na*HUZ/ee yZ`WM2ߖ>|hK TTS\3]/uȤ {ssAO\ J,6PDhG-s; yrSYdW*oφ|Q%6݄ ȇpvbbp0Q+s3{>.㶂 L(5@Wp:{) %h2qRq0f+#\81we`f!V0(I7rEr҆E'21ZFbx,H)]IRr9aMR$EL<ߙR௦ʂ(?5?PhmL7de `m a3O9a@I$^䕶9)I~Ւy0a ENTe'Fӱ0nO h{óXoA̹~-,9~sWbۚj&V{gtunҥG`~6/Vp9޸o"+ܝƜٚ"dzN>Uyt[ʟ-|ak48i> ۺ15!vd?N{yDH^TW^ y̻j'];B !Px{m2i4k!WbN wU}`%^ݧZ\6(~N\f>bf& Ȇz~SCYŝ rbv"av( $L|;Ô6La:h8`uu" :T +͉^[آ+,B|EErfzOCCW(;Fߦc/\w<\<50֔+'8 =}2a; Wϯ_ۘwy ߭+A Ǿz585d* σz:*Q_q=:)_Qo:XpnvZt7@/YBo資@M h6j4\xBN#L؜{vskm LߛoY&dY*_C@cwD>78rzu$t';5FjҲgAT@:UQ%U 5: iPS]Ng\kU [Ka>4Lfn)3aUR򨶕ρ"dAWÉ<9+;g4oEѺݖ֘-ʡ>P-\z:.]M< `+f<[TwZVE^"ܱ,S@S$2ۚF{y?3HKՏ+aJcu`1$">8\Q-GFxXp޿P=RHQ\ͯK? wXV<;`o ZT( ]a`MJ5;!u3S <\Rc`ҩ֚9=J _ʛ'tQuklxE,n~wE/~[>I}_hd5vYr'͉M`s*I]&/{B^f^+bDV 4beCR )\D q!ki%vz DV$Bl+܆] N:]4 _mLPQ~[h3J&Cĉp͞=m>ELXn_DSVY#aՒ(@֞wJw!1mmɾ?s݃"Iv gs!y:aoұIUӬ)Ga=iMmE.ꈸoeX86 Lio?  Ħ[>c<#S3DT]Ha}/rs e9c^Ö&3ŕ%ݯPuJKƔ%G bGBtN-qmvecMa2#{Y/1O=7Tp~E j\MJp!3kٓs?Iӹ$-E_J[ b0R;+D`{'z)CѨ!z_iݒ,e*y懳u3 yj "NjgC LG'i; G zϟw7:b%_IɢjdTfʳ(X UnAfK`9ie{ȟ9 CTE/`Pk*1Q>-4~$$h چJ5: N{^vD_ ƏT4X*,`OjSW~0OQ@0Ww|:[Vf=VO!h Dbs;n%5[];]ڰ[ 0Ye胾5{=jndTJkwMۀ\*Un{cK[oovM:H)؀nOOV5ԢUƋmN( 9/@@0Z4eΫe,^YC*8)++ZcB6O¸-s,~^ |.e_6h:i-JVґ'>CVc%1nlVwUrj NmR?0"^Ox} g yWprQ)c4USU,x9y\buOYP̊9IqFc(|bIyd"ϥ/m>˵#<|x2#?[XiQ6{P]oRτTQb|v.UkxGT­?7 y M!nх ;U$xFOWiuUĀ'O dhMx579pn?BWRcq@5SoBtb]qȎefqJ=~́(YNH0M@ F3S4@3WNP4 !kl\e!HOCTU4!WWּ#WC{h;_P~;MuًkCHk 墾YA? 03CWHZ!rƻ=RǥPPPɖ@߉IeM2e7~7A:"|ܳW9HS?7^<ā@Jog~M,I[5qT>yn|K^^.'=W`R^(68m*J_aN.'@_'חSdw1+ †+xcL>`y Jq#ؙ,AB}qߪ>LBP Կޭ,+.16U': uݗAp- endstream endobj 1479 0 obj << /Length1 1407 /Length2 6345 /Length3 0 /Length 7311 /Filter /FlateDecode >> stream xڍwTm.!04H ҍHCC4 t4HHJ 0wZYk澯g=ʨ#g )A!0~^ె0pXY0gB$; `7 ( HJ P?Pw  PB@8`{,6~qqQ9; а9\'X96`_)8`0W >>ooo^+^#Nn7yܽ@_4\@a;=zP;;6 "b r 詨\A?] tEJZA|{RRV_VPDp+ _yظ]a`_JdEc Udu_cu@!v`ݯl=] `7O_@Q11 q\ F u!Z@+/ ߆p`dwL>ܲB}q=\>M-MM mya @!(b,V࿪9'߹4Ƃ( A|wݿ?(y:;r2V+_v_=ak@ Sjl.mUY!4 wJ`6f*p_sC@P 1 qB<4<|m!D#!6P_X[ F 2f0!BvPw_!K?{A`>71Rl<BMDV=YH;ևw\zlKf~NY~!6Kq.p3-E&Ev;'}~:<_n CP[TY<I}˗F#fuֈ,73 )f}k3CńzHÇp3ɛ{$.x"u3~+,Tt$l;骔sE>RE o3yxw^׀uc!FK~I+k&pg"i% :v/R2&W 'EeIO{=х/Me^@uKO_A;$b^߆)y˫6U+m͸ 8N&HA=D<캔Ʈf^PU%Ri?,S$Zk%iNDDU_tpA 3nj׳1{G]i?FVoE\Z8шi #$'cƇЅsz<]RyUȊT;s~{c@9Y.tΡ'A@ٓumtwjyy4f+rg]]s&eJړ.<KpGO|n]V˫9h "H5H"6OV|l@ <9ͲyQUNY∂QIxV1GBڄ,/@ӴI2)`B'O[l2p~Iw#ܕv9?TfY[FN wHf#3h#|3KJgNg~X{V.5>f56J_"c wH%z;M|Ukp7dM^u$UѰis=yǘgX.\50i!yZzl~8m_Nƾ5s"1mfCd 9:<;H2P1k⻄^x<Nsk$*jNv֌zrYĿ96y}-:xH:>2 ײIǒvIePZjƴsSojAh%?ccDT(CŠLLz/_fq1!}F5\[ii;iu\H 5p@31)Od?d!~w6;˥S_bx`pL\!Z}fķ=*Le_~\ȶ&ܩ3VW^MgGےKڗ3c&*}zS/bb3SS?#uJDn<-.Q`m8t(HVL':af't;DqWz[#t\ wE=O^a$wb#{=$~[I!3e=bV1ew} 5 7"qL&2q]^TCr‹-`|tty11dfzmYi*5\ m#ŰîJX:M"+#rn_Śζnx%meߵ9j"(s~u (֧,RAʭc!Ix*_צ^N)k Z`H >xlʮRkE v-ȸII.T)eݡ g~DTt^߇ji)x Q'S_oҿokhӈ oxqN)RvV.w*ERC"%{2Y˜ar¬ɱfKLe_2IπYM.v3wusb(AՏ%O[JLKa.x Qcqq9LzdUd<\}vt0I1LA(i>AߦzG}1^db=*,ɯĚj[QgEbxSRI\o)Sn9=up_[ 3]&s6BHt_+CO~I +->zj#s29CA`mlZrlq p&%ƂXaHwL︡-H&@opp{`̴`omPh ]`e;o8~6gj7]yaE` Eʾ]Υ,x`|ɎL Y-YScʑ+ٖfy 5Z?>OCITqgWKZSD ̠< {@8( rz1|$"SetH~KL?>*Gٙ>2 ˇT{񍉸/i:nC5sIʼnZrtEe~Ѷm9S'tN?Ұq\`x%n:̟%nά__i#z:{{)E}hU%?r=H93ȍ:|vvs9m K,+˝q#'3*p@TWnqWy.K_0>G_Om7.MP+x㟧LǼ@f6t f>1 (381knt.$KvU6E3ӭyn3T6Ṡ7Ԟ5S+r3rtK< (U _<"XRV"7X3L%}ݎo̵oƧxl֢j`,\:k o4/BaA'P))u| xJО+1=/N?s8i'lJ'KdvT&n胔Д01==gQ t"1R'5F"#Λu9݌aZ_[h-Q!=ӺJKs3 {WOH~ty+\O)0zX@8ugPo7Boj>-َɨb^ :*,vH|'] 3oXQ@3h}NPбQduB?ucӷhr|8iPkG3L&_:dfɪF05=fSƔ ~6-KχfV(s QAܦs)iO(f ,e a{ s=o3~y; $3A#x7G~/mnѶm1#W5sM11* 8bp|_ffSWtH\<8/XA d:Y/޾um[dm60wd/A\R;b .\vBz'A"(oqA>.ս71-[:3uqx^byj>[TS{х*U|QbJ.)?Ɂؽз?!4K[.s&7 pL%"82uS-%Q0ZK5ƍdې>͚X5둈px@ֲЫ(\gY!fκ G,{s0Ub) \J8>bRI6v*׺hљiD Wg:p ҲHtD/c̺VO%{1 :i!,I=M(S"YݡUjwbn3(ܮeCd͐6U: lvƯ"IENX!FzE G9z n}01cR>6>QrDFl@}J1Mt>}5]睻?K: ]Gd2 JO'Ney\pF/w*?jR,Aԟ]΁9P(ªN=MΏ56~U\A]/)ބ1jZ/akF$xVn|w+Z,Woxd+~0#}1[?r4r{&FlcdBM6|{ӆi)m9]/X_Ў <(8mŨսR*%O^lZW- {zygڽvPHڵdmD Ɣ֕3$\]Q< m跽&s ;JJgU0ӈwMù='$K0!'Q ebyCMB) =rn]'n`pѲE¾/+\Ԋ!Y[GkPAXgY/WK["剁&Lvvu~f]<}S6<8t`Ft8g,Lm" %e`+~pI/2 ^B%+˖PwR; P8G[G@ ,I$?J B[uףzÖ ,5{Ůs 2e7{7yлk$tb4վ`&vh{i]>qqAwOfwWuOfa;WvnrWȹd2.0۾ 7iꏈk[eN $+8Kdɠb4di0͕:^TWDPLT.¥P}8O@hNY<ydQ~wUENӞ,fjFi-˛aZW-q~n?c'6^ "zg2W2e{***}qJhmPyzddPFt$=Gix.'FR^vJЌse7q A F6SiIz uYEq)5T"AxFKWUeN K̼OBHi y/gr;G;0"6ʝ({9 e/y~u"vYVyu>; Xw*>#LV}KAC}s?:3 \!Kt)hGNTe=Xߧ䩸SoeaȷlE9]L4g!{M+^K<7pq7XES2bJpUhgrB.Gp\F{gejpRERJbo: Ex9?zǘ(;8}y 4+$ѣ '\N;hnU#X8<QGJP!7A6A0`Q4Ѫr=Rl ̾ŇaíQy#f:ힺJKk#/^('5ǟRJL}]4T,()ME]܏ߧ-eLF|"Ɠ%'sU5Ey-CP1'x"7܍]s$fұ4z?$ Ac'&V֘̉;/R1+b&3 DF)] E?`vd5]h2vُx]9/Gvԥ#&ZIx :TCiAɾ8|MCSFGSSx0Ƣc.BR]V[>E?Aڦl;xLJb#\HJ,EMm3.:8귏P_.p^qS;Q=v|uTбC'P>xLbAҞh5!{LMEHmfXW)gFWhūa9'}蒰 1C'`i3H''Ua 5VUZB!e4o<9 XݛSˣ%$? ƽůwN;[ԭٿB| endstream endobj 1481 0 obj << /Length1 1500 /Length2 6990 /Length3 0 /Length 7997 /Filter /FlateDecode >> stream xڍvTj.%1t( #%=3twJI 0C#))-HJ+HKHIHIJ() zsݻֽk֚yw uC%'LĈ@D@=ci ,. xyMk򆣐Kbp*3C!>@8"% b` Q^@U/ ' F!a^: #QTBP$Pq!p7:B=&(G8 ]1,' Ex\~p+ 9 ԇ"` M]`3# sA:&Z@4 X0!+B8  uE1a C}pwPh7W_apMVC: * zXݑ(?$trUt ie`.0 P Kˀ0O +i[ ƢQh3X0`0 ߊKw`.p$8GM  }q pA&**B [bEĤ"b` "& + ?ZHgPO.'a߿bKc, `G]oAG㷖Bp8`pC6ߦ?s [v@ wp!*; /nq7U0J5#vIJ^^n8I @(qQ^_A0O\qo /2@GiѸsBBoX{x aPyU.7qMIa){n/:UbY]3xRd<ˋX:R覙_U?Tn5$iI3j= :K5[kL^{=xKMTq=3,ܝC&a1_]_ܻ5gR:UE$XO9~'icz{ %yBF=qcɧ5>T{n$ۚ]0Z#j齁 zߦG )v*MtMn{;sԪHwԛ);l40M&n){LrGՉ!zZ<Ɉ(K9Gb3¿{eyqLς4"e iqfy^P5ӜKʐS;-78A|xxЩ4`՞Zc`R6 :2/k[3lbhjgy}_'^ s 3:6$10B\.;}l:zCKϧɄl?7M*սjsVmX4j+ |3Y餼JwUǖQd~E㶟(N{K} IDo-!Uǭng)$y<2i%V։Vy ^-dGXNBa}#7 z6bTȈBVTN|֭f@pJ١N_xOorg,&bHC*ryznti|QZT7m3[B@5JoRk}mmWukxcmbmKrL&gqP-}VD &av8j0m!; y9­9XlQ蔱?B_}qϏ>CwBSEQ0l\$xHs|$cYDP˟?vYM:wUsE3w1cZLjmq6}f_j&Ӫܤ͠vЁo\"XrVӫ# 6_hw5"P UjM,qǥ{g9.sOr[A'-TSZ 7(x+"DEAYUhr󚏶#GVNCD_A$Ά qfcG-{>!kwe94TdPҧnXe#0&#AG#9!*^zMbB%3B4"Z~IzC;f{egoۃ&-C K^29ַN^}yyd2!d8:.)5Z{9q"[ՊESpu~a栵ŝ1[(O?>u0ph xvgUk'5߸מu `t%}5ޙ3iv̷vF?H#{(G(yrAHB1f=}n7ۼ~/ͰhvU$ zt[YSO:sko~],'(cd4\{}xhX}j5+I9Cܓ,<&tJub đ \ c_ c} %9+)J;bO$4v*kOfoUlPA@ж;c&~`7.}@f?u>Vt*dʧ5^Df%>w͛^}*kvQ՟VS݋;|Ipb2mV.Zl;|ZNNa( +D)xAJ|wvFՓ6 Yͨ*9-nD(=&O4Ti_R@V2> XꐰX1DV{3ߏX?4`T5zO .!fvR.Hq1a ?}k]NӬXih$`m8Xҵ ļ8OgC#;}3Ղi;|#)$>4q)S0kj_~FN]4TVPsJwxncpLW= ᙁ#A`C29}芟gT*&h7oF'}LbS>m=> j"w-}8-h,Cq#@aKLuD?3m}Y1 @| q%=[:s?,5hS4xgz'Ҳ u)ل羉_~0`nu"Ǒ͈l8,e1$1fQnN|-Q7.e_U8]ݾf3 zuh3TQ `ƜxyЙaэ8R+5շOn[2&?{3*\>XT{mDiٍǾ@r^L č)+L8${_5~{,q#-z_`n9ڎ}(ċU%՜^; h5sq)#neLHhGۃN) ] 3YwwgK⓾ "۬L!\9[3 [CL._@˪(,]fk{<>+Wp[$u /48At0 |e߅*Gf4˺vMC4u-5h,#Q# t+ ~9xop!J{63$N\Sa *Kz g?p^`E\cTcKdZ]xyCtPNj4l\HwP^5&$8ՏOo#P}njWS,; HϮY\vE=2e~ސEeZmPo31+Bt/pq.GḾ%R6w嚥W"WIw<Hd7"V$=|l(Z `!`w#0pNx0#ԙv (ӤEP#Pm"zNuT7nN9:NHg-PIUJA=P }6*kuOĊI]Z8>! ΟRZα_Uzӑ*kNfPojC <"MHTZ]e֖t]gi<;el\0e]\ Rn%|M Sۖ8|tKQZ,0xaڋ̸.YPlܖY Q\h7F8Zɡ<dH,!ɪC(o]˹ʍ }U,S6S)+ޯgd4}@Y@t:TƘnbIВgTL)TYul nf4F0kx )$WWJ |_5T*RZR|:{ɟNSSH7* QTOP\=j<.@ݳUiWwg(2R5 f/El~A"F9KFNv `qݮ,]1`u-XMT/hbdۢlhw7BӜHyhJ[}a2 H-72_D+pޖXu B?OŶ:/T Ut/\r*ew꫏cxEF;"!ɜ`"sa!ҥ;unŵ0^; wJa:=csea!ǫ+ؤ!q)z HDIc!ߚDMr Ra:O; CO+Ѣw?yM%\9H;2uxdm_Ш{HX¯Xv;"{{z?08ɠ̮"0|ؙ*MA^=4-V(:]SB##s+gcl/L?=F9["~X15IqwRy:P͢' xֺ,b"sݟ)| M*i nA\Qtмƿ9FX|G,$X~NQZ֘1R`ug-<tTIr J> h{]u@cc˝=[wSlEzoKsbNr)4U>Q;mW.pv9$0y gf#)/?(dUmq.,[9%2ח9“xqb殘4.j.?;c'Y':\*q|ޔm#,|Йiu<2@#FIP裷Oiz\/ /X/ڿ-1fGsӔNXVO@ UYFWFF;z{=|_ u/Gf\swS(c֨QyKk$~ꐒ}WRc3){tAL>aMu^>8X7A501߶HV7rHTW'G oV/*rGy25)SKi(裙ɀ/qY{?䋒\p+?b[̛&e[i* V=uթ Qޝ٤o<<& -|3TqJR[AqSիBIFQm\jȈ.cv)`_dQۣNl2"v1m jp"oRt-|t햛_tU W.Ck2^ã[_n[z 7!âߙˆ)B-ܥ;D{DX-)[ r}gP筕סTpG0sJ a{o-=WUb&Im@n%]<3l.+bJ(q3:eKΚB4uWeX<@סI#(f)-K`!!*eÕV(/ )}E⳵qݐ {,Dgׅ41bwGhj$s:߈5ynq2me S5[+Hg l4GD|]>K0wt\0fXFSFMYAG$b!jjhR endstream endobj 1483 0 obj << /Length1 1652 /Length2 7815 /Length3 0 /Length 8911 /Filter /FlateDecode >> stream xڍ4?U[Q;Ԧ{i$ =ko֦WQԮAڣ(NI}}> E0 Hu-|Bll&P NfBBp1 @SD]  ŤťB^R h!$2 B#PtxAA@8@rwac A' !tG!xP x@_%?K#`@N(_`PDx/zw@@Pog ppgP !h : S @tևyA=PH>$F_aǬ +#!pW~*P/}6 rN{{¡M?9h H  ȅ&0@ ]$A">oW08Bpakt~k^?٢Fa_JRϒ6*)!$$bqqQ@/_M Gs+e?5pKV.mD@7gvS(WoFj0o;@+]zK510( zhE?"ՠ~rC5ি C H% ?6з-&z*6!Q1 O5z% DO%[~>8vk 8!~5VHBA ( qA1#_ 0_: Gf1!A$D$P8zhYFGT4{#H80 _%=Y!ז,ZrA^^h3_ߗ,#@ґ/#;/|l㬬vǥXƊ>VTr6X>[(Da+?(v a#h'ųɎR H9篣^|~~ӍܲJIP{,YWQ6I^9&7IJ 4׭rG?~|?4-|R=P+VMAs7@`,m25_$z\BS; }x#uXػYO97 yقzOݻtH|*(d) LI1dZ&@AFa=vAhS:luh1r&,ŢޫMdfܪbzD=WrAlQaAGK0I قFTb,)1aËwboʆfJ <|?u|Kp?~av)^BxvڅV.ezZ(h^Lرr{6Nߓn5Z(6Z2zfB퟽aJh:>pUf,U5'+3tyqj>F(h}!-aݤwSI@N±%)^.MkBU8.4pqj&؀k*޳f}Opq|>@_k8tU&@'~ }Ç{[t*m8"+eѭdŖso- TunԴU 42 fhrtzu .ϯf%b}gXρ1 !4r|Vf2'F|i (-yωI4qw|q/Dz؅Ut } -ԮIbּr{_~أfͥs{|V_~SF1{i~`ׯ@MrVfv5Tby͚t;Dw# } 23e>FUu۰T}k|L=mtpH9s;PŅxV`j,ˍ* yi`Yِ9 ?oWd#Q6(HuWiF(NOvcT\>l)e_- 8O_̾P1Ji=9"h>Ҏqӗ>Q0o H+hP}L m.78';fYDGk {-,3yǟfdU(spia*˘H:y;8@Q$WLI}}:^Mnrd'KE-?,Gķn*@ OsDzG.lo;UTf6&#zm϶x 1NQERԟ>e8F;o$SvX̆})tm^b}% VY=C>ndITMiҴ*# tW)cHy,lA%Ybc%F۬[ڤ~vJR]{LZn%)8f.a5XT5bb޳Di$fu|Fvg\9_q= xރ,3;J nPIqxЀ+@nsCq?Y?u^2.yNXoAMaMϼɲ@mQf[)!kǭ绹I;E6RL} X{b~f;eMdw>LG<S2Pm%};_s@Zºր<^2Pn;bQ &,':-,m,RNM ڪh3^^ v3Uh ]%Rd,BPso >&CCR΀UdP[-!$VT/%`/y0#C+,ᰋfpYI̔e  %qBxXlp̗爤@[J9XjS]>v0^2^8H5t-hGA7dP,חDwa "[|,)/ [!I4LQ>d,\_>` ݪĻ~ *ƣ PW`XekIVDzٶh񜶸_/A"T OU*Zs? <'r஥۷ڽ%7X{|Y sR&t'L҃پ̰ R<ƣ9rĘ㽮Sa 9%PY˨[]p"@OjGKN9% G#n—K8~dߵK硄]t){ুi%|ÖhVsX)V\hQnnn"{ ֢Zel,Oγ=`O ?a;S{j:}Jo٭6-mcN,GagvR {\ONɺ5T$U^/T7&'(~< !wIڃյKl+H˘; ؑ+~mgN[JUֹƋ4iwnD{CBuQraX툺Pt^E0[6g#:a{E( 湷zȻDFo.ƙY>]c_6hEt𧩱 m`|6a Y>Chٮ.T2 7"Jv\l6VSMVR]{ ` bbksU:Ia赺)+R71 AuM=x3?Wn,JdlpR"WD_&_l'l 1ߘߋ_Lu'KxTsodM=ГqG֫NE^`&(ݐ'yNEHryT:Uvs v- | mH;P;So8SЛW|7e[>J yXvE _SmKJcl?"vRv.T/.f~VR\х9fQ򐡔*_ƴ{ =rX4F0>헬iE&RGUXo/ 8h{?҂$0#KhˬL\\WO&_H԰9+?I\28L:V3L֯HítG!ށڲ9D^-SkCxIJCW% žݙ[.X=7;UB2ӗYC<ؤ(:X tS5×wPM>8s6m'fj?'x47%圲+ZSτQ1U?ۋ\+KX?XSĻb&YS6p-"D(W~g H:C"J4 dԴ7boiλqYT&QfǥqtVu*5 y mюDQاxM?gY!AզI՘ǻnYOͱbC߻͉f8Sdon]n+|fz/ aP`ֱx/BEۣL"]6㘟YrB.8G( hL|f7"\:+!RF{xLxgJSk77y, <M9Յ}cq^n- 8 ާ™98Wl a\F×K, rT!iDrPZ5y o7EJs~_ jviq6!qDskI(k1u36 %ߟT'U -f{2cg׏ oTz.`ru(TQr~1x"d+6eÇ֔pCJcS*}RoSAB9-+ƳI↾7Ɲ-6I$^)ȓUI&EH&0ؽPJ׳[$oMwOÈxqk*5G?Ԑ;p:DpKXan C̤$л C.w(>>8q[QR(k\ERT8NրTۉOKx̘Ux %; +3fdKmi#yľasq7~RL"8ݓ)rSlJg9z䬞Sئn1 ڟr%!Z+C)*ܻ<6߉)V1 yOd'7mXLUC޳g0$Q耉Or*Adw.*䷜u*QЇ$jqX^XBR6w[\ Nl@>D)9D'NϘ] [| ^{i*Z a[gfw.6ƋjWKN:bVV\b.Ƽ$ #6}Ngn3 %˄q3yhƑbvw2?\JQOd/zwK}|9,W˨}hHioq} ܴPI% >w~}~'OS$P qabZRc'qkgXOi{-0رwOI{0hoQf]Ήċ,6q$7n[5n C ]CHBR.>͑.-|~-m$Iڱc4uZG[ lD¨n!ocΦ5~%wDkun.c H'";h.?j.e1;ReWg]KuIWп} _9Sr8܆f'-ዮ~xq00iNbQ푤`>[ejeagX,G^»Fbww6 ;mqsqAQȷ]=aÖCT'2ÛUA\ Ұޔpr6e%i[RK}‡;:3iZ|T `Cn`U+%EXikrЬ^Saz~Wu֯, V+y[BLR 6-k|}%9+ QEcm뺟\9[rXu3ɚ5~7e\q5\2Jzt^BML,yƽ vg?T endstream endobj 1485 0 obj << /Length1 1370 /Length2 6028 /Length3 0 /Length 6971 /Filter /FlateDecode >> stream xڍvTݶ-HoҤɇޥ*E BIo"MwW)"E*JGs㽑1Z{V]^y(!L@ %E-S 88 hg_r#AL @D%$`@  p(#0w"E nl-7 !!ȻPp[Ђ`.m!΀C*qqCq/8ЇP0(k`@3`w ^ p[၀Pfs@Mq!5@߳]oBpd-#;3 QC{AE8#1OC9P nοFUs"@O b݇:!^ ;8jk+ЀXBTTX0o[_ }\a`~HW3,n|C_Z*fwLx%aaq@LBg]o!쐀ğf1=?_opĈpG`-M[SoUnHwwC\> z1Bb\o1g`PG- pw7 G:Q/90];ףW c/['#!=Wa(A >D`ED?0BH4&!QD PT@0-̶Zv3 %FJ;ֆV3zn __ZiN2As9h?UwSV|ش?#ϗoŴ9h,5}oϾunA bk_mqO_Fu mUne& ixk+AS ( C?NZ|2zӅ4zD+QSf{OOSAX$56}#Eŋ~!z{mbW x7K?ǖD" C<䝜a\bքߓܔ(8EsQ  Q0*reRӔ q|ey7$5=9si0pTCuҁ^qJi+pATy:I7nϤӧ֛-f':"S2 j}LC$fz5lڥC`Ǩ˩O CcZ2ȣuY qxk~6>` lWjo\w$N ߤ?諑lJZD4՚깯-6ua0cVWkc@fl\7Wᜤ!F=y2;fyS_99DLifLPb$ک@!rא!Tr.A%f%g#W"POU/u1V@aǬ@ZwwC7{< o_HG|ZD,OuH钃dfcMQ/ex=wnEmh{r϶i}v}LfJ__8?V]l4#/D;ٴL'kyӵT&N#7-0836 Ookǒ`T3yr3vcѺϯڙĵli+ll r@ev!",ҪښɅ QֽRl%,C&註!`ulUӔϓkNa`vۡ8*&ڦ>)TUVPs*kiYU4(_f, ~kY"D[NDn$ryDo6/l`g2n#7Q#+e>;u+~-[ wXy(6Fv>mdq5۩^BCddоfޠ}v_5QaOJ6[zc/Ԛ4ԦQH!(-YlW]hA\t3ۯ2{zdG"f?H>g 0V<~rVc,T}^Y?Q/"~-lǻSCh[`Z~Lܮ1_,8*կJ"k&@4‘2ycW۹cM]3WB@&*nرΨ-)(8z|}~(%k)F/1z9C-VޱTnVڏY/]7G,qr6$*7ʛ~B~_itmPsrNcʲ _@b^%csAќ)4h8rD\Y.V{{E?DM|diWn9t$kZ\#0(Ц"JbIdqs.S积J9C;"պ:g&>h0 6%>Q$≔hMJlɄ$Ј ku7WOu k kt;C!uXwǠvTyJ<]z>^c?i(X}ؔ4vW~ rRᐕjZ1 i:֓%yNrr2As=MM0>ݧ/\m.aOmYmhWD粴6`{ Cy~:k޺2,~Cs1p #Ne=b/ΥNdA`=1 ?x( b[5N3FO U{T.2"*D-7<- .zŸ*bڀIrn~!fv5֬fcm ('=nr^3}A CBd%素Y6J8 ;~rSJ`U$uOxǯBo=2QD;ٻ g~{F>pZb 1R6j e j fQEp{ O !;3oTLͺܝ,вS $U Q'(w1x"A#ɧcrgqtYL9Mֳ@O;tXqHe{sEqm)S}M޹<M9,t9 YXo#B9?kP^{f>O4ȭܧϗ# b&br%>B$ 5]P4g샔vf;H:ZwML{3#龪I}lFC8W7xP0F `=}峒ELXG!LNS䢭)# /UwEggHzƆHfHm=11̄17V^2lo@c36 k)E([ Ĵj'o|!1V,+EgBG$Zx8j;%dOHnHM'yxlޥbó{@<,[+whsKO@ܥ2ḵŔSV*O!6:Qۡw%8-7e(Kmز8=VYOvoʥ֭4w3v8h"lI깱'u ֽG 5B.)Biۭ)B1Eog±*^=Hn9NKJ46efN M48–M~#(Gp2i!!Qj_cOqCk|1&aS]VihYp Qd#^3ޑ]Q*կ뗚V(cH ]C+Dt]-;z'[/n_h_Elb,x$J4hp RkC{飽-?ȿ83lX<\ǢSbKIxTqA% bFO戍BU`߸N%Am0ivٔ𽃽MLT3"Ltn 6XrO7:d7O0hFkCL7yߋF.t{q& Y(C ;"~e7k8-p2gEj]せ\/C9ts33>sVm;E}+%Nb\ ۑ Pg'אֹ5 vG.*w]uJ/7&UW&SZ3jwݷ ͏| O+LŽנYGW*ERdw Zg^OyA椻\*GoBAyѵ\q6ucS9rgab!nۀ˷c<>.40wUU3ԯg–Hf? 0?3|;E XTϑ-@ n u\ e~)"F.7ߴg5 K$goꈍթzh%P:xn<֭K9Y%F񡽄MP`V*k(L|5m3gE+'QG+9g |fUa8-/|.ì @ad;i,bU* 3,=.ɢ?9&kn-h1;,!uvbY;O5'z1ߺI׫rL!5]*/UcX$嵖Oc;,EޟE ﹷ|dG(!E5 ZX<|Tgsߋ̅`}E3 ;L ] Ѿ~|mjI-=CtG)8+MO^"=o>{x(31XvBhv)Jzusހf+{c27|Z]?T13^{# 7*P ^_kK S,%c뮃LPC8Ku")CXeQc>OeÐJ$7ADfM}WB tQPZr؜4Qۏβi̙ )]C?JبD+XJXjWZVeKkoB. M"6+)vvl^k_1fēa,^P40o('T 8i;`N|E2 /3T2v([]i'|M\x]+= ]'@$FvwR\6O4bzPN7_0WKun4IN۝/~3* euǔqޏ2PBjYrvbE2T[,won+Rظ%Yi[{:?MFusxK{0FHgu|XAsIt|Na6!Eg߲ O ':t.o(|ݨTU r"g7z 0czꝅCP+kl~62-9p칱=zr+Ԋ;]>.otd8Yyf*Pݤ ~+9S8IUCRy!3Pg3P~[3?ݩJxS4Uサ -,[/Sz{l$uYux,WAۇ`kr'kWܧn16sůͦ5{ ,*cJqkp+' $ko9NZ}b^Us &@jpJfń=M]9_j#-(5 SaiCwѷuky>)p?anoj'*pu}VY;IlbiVJug[ g?zd%unklILMn<޸/1K}fEHY~o5Ŵq{QUdRxy%h<dIt}j9%wۚgsqUY?^ʨX0ejԼERd S㠸 7E^V Bq)_{By&;,{@ -.nG^e h:JK M>t<`U, 춵(lo"B3"Xini-SQ.^rB߽_uFeuܧTCgCs endstream endobj 1487 0 obj << /Length1 1437 /Length2 6258 /Length3 0 /Length 7242 /Filter /FlateDecode >> stream xڍtTTm.(0 Hw#-0  1ݍ H4")-)!Hw 9k~;~zYu xl6Pe (h a(g_0C% F1E0 = AHD$*(=a->:u'bW@ a(68!\9(Z`# 0@@`PϿRpJ9P@ŝzPw(j 0@;7@ءH( 8 P;:n EЛ 4:Pg? O%!+`P&r;#`O0lv]9,=wrs9j+ (N>E Ad/_ ;WGpTM/4DfEEDP7;sEM@`vP m ( '';Yy={- xt<  KTL4`_eCT>+ \4kHn/A@Tc,/wAο͜sMZZZ j #Z--忭j(0Zrp{4yAB|Bp2j CAPԜap.nAG /pG ϿUCt& ,#`"~4~ m޿ (tc$5Vq1@:CPLP?#B08ZP0@7 H$Z).  Bfpr ^xm1I=(~Y*nֶw~~96}}}NBj> CgUPf[賘Ud=8S~Fv nVn]o\%XP rymXql˨S D yڣ+8X|K<݅Ϫyb|No3]U"tTy)}t9^%ݥ+vXv12%vJ`ј)H-cCR?}}KjA週 NȶO@rFqJtH.ؔ-7֠~'BIۑ-mإIҰ,zŠcfs$z"O_7,PPQ|l1r2% ϸk[uCi|YRU$zK%rf5o̦A#y{P.%Zsxj%/ז/_?(3п D#% dE6mj_+aPuՋQ ޞ0DIkOd }n.umu[c~@V0:[HS|MiC[ m*!T\8l>T ~*jRB u@6׽\/h [jyAD%U\꾟2d;;% YJbվ`$l鋔GG=Ēʫh@>4jBqg@ɏVۮ_8u_+`&GIxѳWJARP?w h%%XgLoeūHŒ:UaY-J@_3W&8)sy>(SG3o;ܤOܷD 4ݯ$p9y"%0Eݙ,yW8u3e=܁\|z1^$DMƉ//E5WlƟ[dƶٟV&־/?R,aI\ Fq1Ǎ;^lw(|vI%teؕjՊĺ.v-H^6'(qk.7jWH ^ȡYv̇osWsC{S&"Jv:]8Ȇ hXG˗1^ʫ|fԴ ,QYWyV= :InN:2Ĝaۦ:4!yp_M ,&‰-+_]}۹26LD*XɓUO 9!<ARkIt 䢉qU|frkA<91{Ќ#`y |qcn0l-:qiUz^5ArJoA[<-KGfkݑu؆%)za2BF%/*>;`{:(\u>SsRg&L-E\~l,%g8ߢmahSl w<-oɶprb..G8$Zvpj^\[ ~}2KS;54"2v5VƛJsps+Zb׬bpi.{gwX?J2n圸 GBxLNm_/c-́5 A$'[NZ aK~ a10 ~jg`j9Ϟ[+Җ<:zEz f8q(1q uvJ7jXi촤fzR}ޑ QVN]24 >Poq>ldh9jy M-|su*"=Rg1<:N; á"[ZMmޜ5پS'A]M?g{x%/3j}FADy10_rj+"$I#*nT#5;]dk-Zdҹ)5j)28rzQ1.O硚wԔxNE6DHu!MWw9ØL!#[f!9WnFD쪕3(j鴼,xa.}4/Y`n(MI*#Nf9! 53 +Mhw~ *;Ώ2eu?}R4a;P77ޮzgi`ZeǛאYT' u)Rdǭ/nvuW ycy@AzٟcˊQ ]eǍ#Ժ -jaňV7i)~6:+gU8n.4YY%yF|*4mỔ$:7'!3@@Mt'$Hh?9sgpȲzNLÆ:V}q6. ;Q2|Lʍ-I5L;vQnYOK/o\A# $C4 .Y>(җ2L2SNO&@C(7 0;.Ħ3X&D\ezXvQ1H]*E驋RRNW=#d/,~JJuZR e6wN4!9ɬ@kkl{T<;ӻxӒAi}^#C}n {M)Z& 0gTiab牱< 梅F#JL_d{qh"U#7Zʼ#s! WXW" y0l;'׾*{co*5s]ϘDSܯ6l_YxVVx_!X,؀`swQQK;n uP76erGl '%*Nlz4j ?6CmĜlSSks`eZ3||]Bë-\~iTҶۚ־w]10ʢiztɹȤꏼ~bl8GTy,EI%"d%0ӣ?NW]b6,`hW~'|k!f^UsPWৰ&)-73O;'G0%Z|xO _ 5Sb v3qKĚSEkAըCf=~l[7𚑁$YިO}$>WH7E%vkLg~zy\YMh9>εN* n0FԱvyUJǺ Y# yWBf'_w}G3˅6lJ)ʋZr+*g+*:X".\ŏ :aU#/ cL}g6sAJOEΊNz"cr{5k2EdIr},c DT3*% dϧTƎ/,~JD9;ǗyȒt+ KJ'{^>h/Beu_28-1-ػi&:AVcApƦ+gMcbF]muS7I$juE{juLD_tl40`V'+wON}ˉ%hWkNMOCo(d.D7I{599~I-a5/m糴P3򹗾.fZQ=>4VZ=3q{}<;ikF6^̽FNnڝlb}tӰZ]sz\(̻ۍ]n6鬕)r*|:Ed*)~~0iFZi{³@ X !EK|Zt$ٲוm<. oc*{Onכp?ca:zgxEx0`I}Or)ZoPf[69'V.ʲr .j)e'^$;c#Y0J7R0єp1TԵOHmoxO oUMPÔ#PŅ G*jmפ>~g=ܰu:uCz!۷YxE;[6,w*."(cRwf E8zLRj]v%Oͪr%[g%~G[/4lʬo~(T,;*SAnDØKqMr5\r%Ìw2Nm0Xor}l}% PZ߱(Gnt-o۩iºV]heϯrcX/(˹Oۗ7W endstream endobj 1489 0 obj << /Length1 1144 /Length2 1528 /Length3 0 /Length 2250 /Filter /FlateDecode >> stream xuSyQa"AXHx\dDg"B+1+|&WY#]AĆ#t rt&TA>Z4s:¢gBvP#X4L,SB ]3i̜!>@͝[q?,fδ6Ptw'alPXp+c62@gH4Lx`Ѹp;џb B;E`B !@5|SGa5 V ku^(o>H0fn_T06x)"o1WB;Blľ  îWALd3Ep?5wO-47˝dq\xӽsiiWsYw! 10uL 2)5,fμ87 `px.1"`P @7C0sN0aB0 Q̯4xf.=eςAp+P/AIg'ϐc0nYXm,Zn+t^fD6r)m`9o9L{c" j湥i0=gCT~Ф5EkcϝWFWO;T&#񺓛Qz|%1͏(u#%[҅S.x^Ѡ[ꨂJvU}E*&6޼d(۴dzt̬]ӣ뫻5S^ّX}Dkm60dx0t~zli^Kɚv󶞆{k'֩#%ILf=?x$6wjVurhu(237k<]iu4Mтָ'" ^&?S^PZo#fn=q-ޞ'IS 6Ɖg'v5+:+E-%F#/7삯O$1w_H\W8PAݓҨ@BT9>2hZJ?U7[qf*L&\꺪#oXl-Aih\Fѹw)}ʭDءx5{b 2+: M%w:~uxe[ؤ=j*/ާ z:V]q[e"Y)sa@&YDtd[~Lwp[:eMY1uX|ƹڪ~9qluL,a$+o[{$mr>[4|x~p7>Qi\XZT< 0\8e@<2}llDUޭ\Q=D-)p#1ve9k|U\3)J)}AؾގWuЉ<گ4kli3[}!FW7=81&A[%E R9etI犓%?Hd)g֍{}:drވ>~s@ҞhReQ? {#nq69WxKKԇn7r겜p=*VmI.xu$ #c|?M>ՙe:Y`{Yt2C eͺiۍ{6i8U捞5 K֭^]%+ ڍ#VE\~E"Pk~%lLs+ęyoj UVHF`iͶ8QO 6kKZ$M sSC] ąhv~B1Ja:`:>LcKRa-4&w([nR(UK}5*a㧬'R4>o R:`4V̷(2語rnxjo \s͓T҅ اPPhy`#qRãvEjA fR[SiNuC%eNy՝թsG9޷h{cdE>!Gm,)hi|-M7Q21dՈDZêhEm 쩒\h endstream endobj 1491 0 obj << /Length1 1626 /Length2 12815 /Length3 0 /Length 13654 /Filter /FlateDecode >> stream xڭveTܒ-${%wo=ww ڸ{x|ߝw}̛V]j9$*BF6@zf&-,9ON.`44Ɔ3777<9@ RS֠z>2Mm_lmDcc`bne QP&%/WH;[ 6[?C[#Zsdr ?Ҍ] r?6m6VNFM#ht4t0>*'LWmG7#鯖}|x6+Z#sG;+}`vpr41:_WҽٶG9ʄ壦!𣶩 <_"ecb `f?}ꯝ odkc026g~PTfAW5/>;|<26w qRͭ_Ia¶VFDC&]́f}ymW12v21虙ͧjfnhip;&Ϩ$%N<*~,P"gk_0¶zf.=+ ߔ_g9}+@o&濻ϿN:#fchkڨm>6ퟆ܆N}?;oljlhkh5. +Ȯ^ Ϸʶ'5tkuC$ϟV۞4pewy.)uoE;'?n1bډFżθn+ d;/s/ٽaJ] fJZM>"r`d`t>mv 9>w$3ڈCQc[IOc?m&Bu\N}Sl=T!X}l:I)ZlCT#*d~Z*̴=Dƕ-vĝ=i]U }?¸G8 [0jfqXCOHj6OUwaTȟh0q#ѕ<Å__7sx諦V-'W175oM+7ߗpƞ gdiψ#1"*ٍp`tea{7w)RR3bJ_ђ~9gt *g7` 3O,:x[<9|o*|;`3T4g&Űhg=%Yut7bSgt$ݚjxc-ōDxե~|WKy Xcr|X9zٺ}[e);](Z0.*Œ&HI0]JŨuzmR9 h a'\ zZn}ӘmqA?JX]-8< t 2;ft܉pރnåld, f}~\Lx{݇ӭU`GfOG= = (=YoP*i+.`@qUb5RԴ׬5 pҷ'`wf8`;sj(\-~ǗWöRaQvX"b U,1]~m746L % sW= ך(~g62P*EA^P>zE;х?$Hos9; agf0@VAѧvmvRr O(k"Z$ ')8T77r*Yeu_+1@%7[r>ܘk"⠢ULy!`bCx\XT`y卽?:1n$'ځGXP0PhI 50XEw=d^G5VڌOVlx+mLZѨ'q~`C"PEo8~,^LN_Nq\0\'6mni`*8(QuLzEL9 UzbR mn4oD`.3 sXfDNHm99'>b 7hpFtM!RpKhK@t(n8x K/T4cHž7iLހYƅ~J(+Js.}0z\<^z%{/3Zv2#G*uX?[Gpd/g_L!X$ohqG~ڈ6F"m545,UŽG҂-0enB&4~?NP[WrhLkpIŎؘLRY8rd#XS16Np,᛽_.GGx|G\#0S/tuňbUaqRWŜfc {)֯`P >U?)8EDG !r@s-꼿#('kjm%O9 acL.J<ؐo=C^]v[X2!ѝA oll%ňZ-J>RM~gOMᣓwoCt$227ؑ&cN]4wiprIc&јyz,*3oDA5q^p ZKϥ.Wg+w3{<\5MNl)m3|KU2N5ִCDFQX{_*7 ab* P<`q$TZ:M<`iٯn8CֺY38BpڭU AkEkT>vHN|3.=+L﬈E0=U<\pI,Hr8=TT3(eDh*nhؿN4$D5+Q ief>M&wVH^6rc1~KR{yPhƑ-`k6k:TZ2|{4:yC2K|XZB6΋|= =Xq_kЖ+/$zepT4-8zW{SuFG""Y8'Tsǀ`!#n?fZؙ}|ȐI9x>*ra3.|M3tuŻjs4]8 >qظ:mIYe޳r:y8RCA"6fyW7\~ڟ__1Ďkp [h$Cb-R,ֿ9uRBK$tN1,Qr#@Cؘ3[ax I~Mb\H~"Mag#T_&oz>*+1u^'^ضrͰ.jVHhD67*Kq-yd FBcth-kOF.v_#hrտkEwe'Ơ:;{lNKx"2ng1lU߱v{`2q,yj}o&M!7֤TiW1ew q͓NѕѩEE-LY {ZB6 P'vlˢLmgMSe&;d΀}-?RuT",9EHm9IoBa ࢞/HD$~V( *UX-f>0XIE=Jy㽌{ ?0'(C>>buEmE.9ZL|0e@B*ዮ`Zrqw{z^$G/Nəz蒜x_]eoF|Go;!)r~Y$rV)% _ݽ9v"#%Q;3? &_`w8-+ T"$av>@ajnÊ,-iSDZz EhUe\̖{T.M9`8m k;F~6Zи~W'{ӍCg''蠡 Dk_8D[&ۺo T`_Rr~X끹>Ȭ:@"֨T鰞 "u!Ĥ. l'-"&!F g [be1b[z6O;z$)0tI02*I^LPQ` Ȗ=y[1k)|{FBc=_h9H')zN-?쏭*jfZ4robJ(OM():[cA=_RbOaHW%O}pÛ`ɿ5-nMj @@+n!wo0odv"|EԈX-?F "D]PeN-o(Fx};D.-]y`$ψ]7IL_@EB#[+u_ "u?;R<&A1Y5,-,gNaCz Cbvc j!nFxg".<}P 'b(=[ց s*^Zӯ=,x }xEZy7hFC.y*κcȥNIMEzekbEuZsTAl< f+Y2q-)G:uҔ9{󄴺Q]=iluWw.U eY9!=n+ A֌"b+Pxڻ:1Ntۂb LAϲ~~Z >l N13Dif91@C%ddmȚ~VhWSSHꨅBwmg`@Xw$eƒd5G+yѡ2vv'BPN))ՀZ1Ro5\CgL>T'eKҳ()zgȤ:iS&Nb  טbX-q W[9 -Z,đߓ$(~:>^YiF-e{/T))uî?go}!CkMNU9Aɺh'm.ڳW>/f˃{EO/YWG⪄Z-(^*.mlB#~=+nyӂ*Bn;*D.f/6U,aYSJ]  pji@\d^˩IP%-/#^{gTj`+D"Vel)s:X$L}"0j߷_8\uz#AxʎʢLYw?}y- f&C2e5ǿ,*,CF3:k ``6S "]w8z#/"rWDuCfq7T\HF|N#,wS(uYuX+V$Wy>^yUj#ErhԂ=Druޟ'wh&PSK\R&#nb` 3mk~?f'4Yty(ad=eW>)K#Ż/3Oģ+ ڎ%B$$!![ZFz'03 {t,YqCX(L~NrFfQ|3i>VN#pF^yyl e"[KC SE"(Sb)c7񂷗E;(*`HZV?qZ!ûPRZ}Av ͓{oF{Si$:"r{gs$:x&*9`YZ>$A":B4A'[.cl/M߅b>*yz A:!V-Ǣ'xR/(K;G+p{yP`vq(Ld*fΎ '(#¹R$>icLK l*QJ2X1#̆o2?S- >?b/6jyEؔ;p 1fjԇtϸ`5g#I"p0ȕ7Jb?|bMgl,EMA{4؆7ڹ8?򩨖1R+~Dp8!x<*c[@J@tSQR '.6,Ͼ!m1}|N wZV1CvVK1I B>4ԸČc2Zjj©aú>r=DH-S5kP"^al2zki rӻQrkrǐaJWێZu9n]s Mw9PR}ܯhsFl+v;t lF϶[JמVg{"€x:g M+"RnL˦i輸I쨍J]gʢ`n~eppa`Ҵ,!=,\N 8`/e,+mt$'BƘ2:WyL[ dWBcH\l 6M%")K.T.U^^>-̒':CM1Zi@\,pRkGzE۲9W1L+حUPF\aiWvbbf\:0bQWx_?s.ݿy5科1jsh b!Q4%'!( |19 A츄݃tF9v+ >VAMM8d"%i>XDmd[9mgurrr4wGdI]k;;zCT@EPע&<)pV$R`K`rwԊ9 acEĚnlM3+ؼVKCq$\u%WG=a9vPGFZ<4'YC޹xFѻ-bξ5lrE` imL0|v)~;'ѐԴ|2*}ّ \lWV+AM$A^Qo\8BMN)]zmV̳,1WZ@Hd@Ջ[XЭ2 wk[˝%G@,Sh U<]k -wpJFoS%0O rLuG+R*=>oxNT+:|pE7 x8-S"В{&L,aX G.A$kn#/<eXz ̀fJ\84 e&o#ڏ DmbRoe_q2upw\I>a‚MJ%X\Ot+ b-ijZ/Wzt^BڑIlƜW/_'ft@$X&z{ H87$  A\hϸ.Io J6^mn x1 R{44zlz Ǹ\+vC&HUrr mK 4r=Dd<hF FxmOU:`6rÜZt/ IF6oq&cts [osF"9şJt4T3捾J4Xe)}MIIš;5=fR5'bśk m@zC7kg)Q~#dZUOfj)E{񉇓Z[|#0n C1DIju|XvG)[*6l! A9UvuV^} $_\ A  1h|\xw2%ޠD&-U@3C1@#ᑾp|pU/]i]R@j7"74s <測 #Uq;t`z1Hb$zޯ"e>6/ow[.݇TQpN{gnѨI o 10Y!2 h[cC渦 עEqN\sp܈=uSR$yq$`Ҏq2.Y}UK>/_y aEDrAjI1/a$+м̠Kw]7{ tHX|*jk3e225ۀs| xzpZկه$[S 5J{Ͽj+2;ixC Oi{Xfⷊ ]ZOuqdE)Xn}\ :-Qۙgg0ʙT[FQBFehŒLZQN A׃P{ƹ+ D-A++-29%FOU[AG:ׇyqj[z]߾K~Sh*%PSs/IXyǎ*r'sdٮS~Mi(ֻBXjz 챇QLMWղ ]%;#'vLSP1xW8(|k]=|KR #Jmd=#o]~+06h D3I-uRQS@g6jVMn KSe#;}#J_1fVfNg$Z˦RpzvFZn/=ƇY8j=D:=GQ(b _}160iv~N2:["Wm"ΧsxF<={neHpۜDҘI. 1abע 59k*oS>+>hĈ^L cquL!)iģs]0~Va|ʗQDЅ!;t3Ң)Y:4y*Yf?OW!t0a\@zT~Chnܻȍc͙M".ǻ)$4d2HM!|ɰJqqyUOV8RR;*<"~ןzHMZ__^8Ou \-S; С4pa ~Vo}CoF8E%؃ɢ{X_. A*LUFVU6um:]v{^pa2KO,l=)G/LuӞ J!=+!f12I~ :9)EnĶ2{:[ aU 2J-"YwřJ>i!˴V.]}756 =ͼ#1:-0a4M#QU Bk/u ub_<&Ȍ\X KܑJLL}*2 "AH,NgpcgYtzqby[6;5Jh:#$>dv-ˢ n^]F3T<)[>+46x ѺGOhEԤ[OΈ.:UBWWeHvt#,R[ԙ8fNMj_U gH հ'W/7pi_7K+,7m!/hdO:1h/0 QT1.B%/|Fy~dr=abE uD\Ɣ,D"ƴCR!F3_c6|gVvc# c<^op]2Tbl`(D jp"7ޯJz,MTUVXᨌrWwl-pM}3PyIP.RӮϯf4p#H()Άe#pXj5pZ}hw`(7NSJɿ2M_Sサ+1ƯlLZ}Xt5.~1щYXl1Je҄>ҷ` _XkQ6ʹ;\=PL-wahgU#蜭&䓓3ijؓ%} .TcD[pTUSbP!$B74bo!k 0=WB> stream xڬct]&b֎m۶UNc۬8ضm;bv_=ۧOu>?15qM\sͱȉTM@v L< #';;n9z ௜\t:x `'{8Z;U4iiS ?4=,l\Av6 [׎ 0D$T I-h Pr10Yl@S;Gc;[Jsb%A@ Tt{hv [ck+7WBv-l)99;;Z;FUw@b;YULZSҿtajNg?@ '{k-f@pMANNabӝTݿgN kSxf16ggVmMLQ33Z{L@ vCXf#[#r\_%\6;wm @Ϣ:|6'j w0ig߶ۚB ' w9hgۚ-lAW[LLEfnale VlMkW 2R/Ca7Q<#"bgгp1fa߄Ύݿu31Iۙ3:@[?]8kA w1ʢ1Ԍ4Z 1^f`|j԰m ÷`)6S}^,kʞde)u_>E'A ~ Rڙfռ΄~ T㗫'jR| G{d_NFg OznicyؾIĉ /N -h n.~n3]} 1 -Ⱥ] d)6&:&_׹(}̱z^/j1rQ2}MJHjp>C'$DJ 5MY[#O\\9Rsgi10òS,`3)eN6$%p37م@]ZX|B\H~<qS8&`-|^Wrp)S,u( -xHҝ: kA= oBz C= H#6 i80i7 i;8Z?>j5':iT62EZ,2?ql#?\RM$nB!Yvgp5=sK( "1o< ؂1pP@@tѼ|^zs|{=r[Pj=uu^ibRG4D:=ۃߋim͉8#5|q_PK *_nhP\1j BXt^h]Q;=j՝8]&:6+3?&`^e; ڣNY:_u'0CXK~X6A'@;hL̕J2v!Zѩm\s)cO+4:X O G^ Js0OU{EbתϴgQf9YT'9-=A[ZRh٫}ܤE [?miMڸXlfnٓM~VsP1&@'e2"-^`qpoxXt~+H1/:H l-Z{"+pk Xp$νAud}KcGH0]:ׁ!CBc[٬k>I:,G@ BRӏXuޕgS )dze]sM,S\6&,eo"m0"wlPj8z6p1`L! ѓ>&'1 5!YQI;E$t+NȊ zw 0jP n.nUD2{N'=Y$A1QuIG8{X(5IԩtBn>0nPAT.^$DB+侪cز]ΊvA5%# .2|w0wxziI-F=y`Q쬀lj~Y^|􋝵9CLPq\T~7k6BJ&5)PG зL0HI#feK<(ovXeY_}xv.=d}10(* ;ub } ~+:E4 pk.3T!EؑEY[Lk>dOh崇34ş'L:$Ut QƤF GӢf̓t<9^amf=՟ӜQ# L(AĉrE0 ub1IcЎy_v{M)\']=M mpb~k޹-$ߡ\LcA/(&"7(ؘ]E6nQjpQM4i؀O3 54&:vLS+Iv;>Ǐbgthܘ L@=h,>|\T&fcV:݋\Vdfmnz*Jxm/,2iϖt%0Ď0mcF#ݪ5d$$58@HgqI 06FL7*6t%>Q^4'{\م^tŒv7X[ϛJ!cblMŞe@K[֜ˤZQl8]/*\6{ڽ9 jiC!?o\F뇣Lz#w9?M촊K_Qn5Y1r6e-cJpN dȵThZENWRZ@"ː~>W pi&%w\3֥鮀E% c/4ASOw1PݼBlN3l6Ŏ2_4a沞gAqI '`D8OѨ=OC u[w tMfU8:Rq/x|1+#Ҕܮާj"ZRN L=rfrgzkELHI4 By9#Է@s#gA2am~K^|ղ!db sW1OIdxwO?z~`$-Wz`hJuu?(̙bkhYV"D:U][ȨB )Ƶ kHj\u ~VVDS+ײ02!%#OX#F.~ظ&)j,w a06)[)F!3Ûa0~L%XۊSj4g.*>Ò^Rv&fF#nY -[w n [n.D(+C_6VMm&JIdom %=^t*Gz&@Ճ+oŒj?y*VWSmw]Y+0  ?P(lF6ld/2P!-zK/g޲q^o9&nY턑 {{Ќ}*ҙ9mK)J0F99eT#3՗gsDdjzUed{E =rHDY/j7 ~v(4|$C)'"L_E*KчI?j7OAJxŮKŝXB8M>'Qk1~d H4hKqOArX yps]H|ք(v.#%n@R i=S-=$Z4Җ6z@1{(:"a x?8ɶC_#P&1v?hĴEG9FO'xOL45Z$0tAGbQu5Q..SAaSa:V7˥*vZ?Wk* A254=6ӕH[QDݖ [֗SC|}{D}0hTƦ/bk+uؐ~KX*Jr;<3:${g ׋I#LK)J`̶oQþx(3[0 LMMGeR_4Ai[ T}riqQX7{bC~v<֗/kZچ"-[﫩ʟ珻SwA6ZCX6nˈhknT&eQ }}@ ʆ--n?5`^>ֺZSVeQ\u$Ak)0 ,Pꚰyd)ƵDFavf!& BOd'KrB6s^CVr#@ n/묊[N+ PT{ 8B-%򇼮J|+'/(,r:hQ yjG*:Zeo"~98̓=N`lfT0۽"=kC~g"F3 Lf)9FL-U/HH\t60-Epم}ۀ9t 㲜fGoR^}:܁qU^1%v$E4>9B Qx>`HW)ə#/ixJM=he11؁l,n#Q] 6vF~w$7FxXZ"VbMͦA?A uF1,YA@w=|2mY?-[q%{bB16/ oFeIjL MqV܃ 'l_|+Hڗrᅮ߼*)FpK0 φ= ˛  b@;0x tӡoakќʄn~[=.8tpoiMK'.:{#k^R⩏㓯+yg;gkT }g4~G <#_+t}w}Kp.! "^轟!S1O+Ѕ3""+)_DcK; ![^/@SeJs!BM=FKhrH'⸟u dTBJ!G.w 6NЀH Yl40nDoM|p&4К拑6~ϊVVN#yMTͬ4 B@~ʖLTkk!G|4GE)Jh%ͷ 1g9RW.W{f;=a7p1"^`m|01([6p1`i(nh?hS!Ւڳ~5A}evIx ?$212K6l뒒$p~C2>J/fyl_/YzGYMӷN3srq#:k0<\`_Č99s}He}QޱI@3_{:psTfZN{[XB6!re [͐ZR*+B*Cs}b aN7ُmgi\Bo n)l|ZM &6{poRzqP|`3T=P}o#-}(a6`/H/)B(%x\GC;>rW.텤 (΍mWӵY@XQ?1m{DF(SlnA*x_{NJƈ h |zsQ.g_x {*WzĴS۫ :yۓVe#:v}{ yJji|Ov2O? 0>};T.!Qdϙ?:iW4"%G2u\X鶆:%X?5EAS MHhL'#*X9kv P(Џv[ߌCF?y4f{ղ? i0Kb"PlV ?-暶 o%zF㸕Q~ZIOJ l;wL']9u@C{630/")Ōhɓ=i$sCxaͽ*w*!QʊιvOQxت1>LѨJ B:4a,ܢ[2^ XKCuc)5گeW sOD/+{\Bh*:(rڰߜ]m[]ҬFMyu,VPMQjCWէ(;Aߧ貤_xw32hCn9C2K"ȗ&i4;]js.bTIŮҧUdKԲ`6^YSk@iZV'j׋F ΂g|yDG+o}$t;;(+ *M(D._:J'oq:B>} *Ԃ=T%C7nA3˻~0?D]h脼@Bkw rݪf^ M]^shOfJ8YPd^.*pVz4vsV?׸!ܮdAC%]+`b\!f* ;c%1qZWKR Vr B9ՉGs>F;?PnC~boj(Uѫi tUʦ& F?W?Ksr`0Fw;^:J*'~%lh-1u8LZN^{ws;TlbPWS(->UǍ^Ձ!&wW >Cc=ҿɾ Bd$>iNT"Vs:De;Y\.|m}C5:~ϡN@i'Yr[ gƯY{x2 lN_d=Bd8+>$MW]>0INZ\=f٥WndnZ$͔ŋ(f"+\orEbƂ."v\4hi=PVRWfV|˛1K眶jo'0 FD085\KszԄIAG=roG!2H X.ԁ0Yo tQMZ.mrxIZS SG,䆛0 C S`co.a  ;^Z"Y}<3,͒r_Tw}WuZfеGw%j]m*=7P:k\SGe ZSXr,wcC{S`CFM:˱iՌVU>&+o]:Eg_)S=FC⤑,Łr]>Jw+ b$b>$ʒA`BnR1p?[)Q\'}fڤRwad_Ŕ [jcYhPj%-2{F}Ń[j" 'ńL/DD4rh 1 zdl HNmp/ZʈP'=U ?Z(9^|mGҮX):0AŲ0$U#h-ԀcnQCh6bF?w,Fncl?NZSK0|wĬ뺣QMcdި`UK3xYb4(уHYՅ "$&?N^j%R5j}.],iv<@ YG}g!o $wG)FIY-nmT:la"%(}Ց>}3='֩wpអsn`#A΋h* "Ylvg(B寨xƖY/g~n*C\M#ɱ-ϰYr$?:qL<φ<&򎧪UnQmƎ Wpy7vUQ)[\*X+oa@dSzbˮ!HD?ɉn{.ke@?9ώ Q㖗Q@89-"/*' ((vg^0P7jXN(%pF2uVscZ]!3&V, c;C~C8R;T qsvNEb#W>r+pkMQD*SϞĆ\ȉ}̻5ʀYdlYQǼ7ehb''oTp4VXt,TWoa%~>&*Cm֏z~Le=OyqQc6/#_U25#rh}[ m'BP鴼g5yEM D%ipLƤۅJSg||茪+"Z8uTR9 O(=I(B`iT:S:3i--*T4SVT WcmYSPֶ]0)C}. o3SJ%l%"l >v.w{8fvl6ٰ0G $pI FDm#ƶ'/~9QܒP̉#HenzlXG5{!Oo<+_{>W++NuivBIAisƙ '9 2_ݾU)ѴqyQPBmV>$ӔOo4EBxf P27a'djBfw H`xR{h-溺0@< bvoS~yHR>wo8,|`eqTT ]% P-$kK L#Y(@朱~EEU;d7L}GӪUr?={g'#9IJ(P|OvJ QL=V`mtX E8 뜽#֞!gi* } QYu=ow& d92_TG]pK =߁jn g\W#P~ -v8R^?ܘ8tP4)THQeʤzg(}߫^=y^o6`|یᗳO޿0F,Xٕg'.ݦ[Pd!`eé-ctÉ؈V0cU$?kjEO$':<rSU ˨RP!=^F^TS<.BĉC.Xǣc*o7gj(wtHNҟ)nS~~)q1#L1cr J'.s1x,C8ArQ@@A!9{RW39nJʰv!F ӏ!wFGz$I;8YR$%Pn0NW\~QH|mMBci('uKF(C;CQrm')gt˾V?A C@x!PEP>~!q%O~OC0s.MAQB=Ne"0P@z ̚R]>UQZCnǫK]wl54 QûHCym!oϊ< ^``Fb>oܰ_zL) e Cd[xؐGǀA7lW 4|ᵲ=K1& ѷ8\r]CҔ#J55 zwޘHHL[풄0Πo-9mb\c_k7-v7ho!V0|bW6~00F6OcT:1 vVqUɣ?wHC( MBF8vH?*ѵ$z*蕂 ShR3w+'򽳚ʊ2DM61/0\kXW 6"a,;}S^Iᯟ]9[Q~o7,wlm\֓ag%G V5ຟH&uxc]T^R‚)(l}QSOmU"1OWȂ(ǬMӘgI7&`JEEBp0]u?]Aۄ13#{Jp fhuRJ_$r"Tx筜=.{e*nѥ AusR)a]ڭY:۪""XVQo .l~?_#s%u=[.N쯸.S{\ ugj6Ȯo|3`QsfWL-sk P;iMAY9bURa d]JætPֽJ+?_Ѩg+X[VpSrPu`zdzB qk#rlCbxZVrJFڠuNMiȷV䦗FӲ!evD)2: Rᬡܻu%!@OAʤd>GKh;]}zrj8#/Oqd6J&Qӛ9z - hq@C\KW$znO@3|%2W~:!2_P M,O$gPBIq6XȂeLXoG5K1[޲w\yd74ݗ(sT6znśb*G[c!' z*oXnj.d=_'/QћqT-פ0j6#"B1CReQ,T[9߫ 10SRxxfO^f7Ck|~VǘZM #22'Xn9 5Gnw߻H"8+4yQkI Ϥ׌mŖPuf;5MSpzOئ@e@_;=8Iۡ@my< vJvG,a-ha(Qtw;6tF%?Ge%բ*R^΍ 1%U4eȾO}olv)W*4ˉ³%LojsucHpΩZuRf Yij$e_!N#YMxk]ZL08[)1"z0sCȑ<pA+Phƃ-O&B^aUئ0Psv:uQ9Lni5O<0.eS`vFԦݚLj-I!B:JpOمB= 9pSmdi,b&x5תjRb9xFf7v9p(xdXU NxЊ1{^}$ے G÷L)mE;`XaGFyi-$C< 4 nLi=Y3|vwmCo7Dt"Ik#B,mpG9w$}!o.s#f>WXK%H[:"MZZhcmXh{x"=m8wQ2k ڈAVW69&Vf(.ټۖ!SQ;hygwHM}% 'mY9UF;Eԧ(m sg.ؚ1Cݲ3H\AXzYT8Ov}!hl*7B3UV$hp)[04;HC"ba;&ED,; 2 RL$*2ߨ9)GPX`qKO!Ύ OzÙ"29*T;*dmL;UkZǏdoH`mDF) 3/  `da8G z蓳 }(+ ]SŨZ/vi!!v(Pq0t'e&Y Uw`)u <]Xrb\[V5T,M?<_.Q+5*_)M/fY_5\zYρd|Y0/Eov 8GApZތpU"(X:Dک~LS77N|>!1źYD`玣pPr}#UI:'wձ .:v] >)ӢfOBRăHx͑C8i NW_9aaJ0C)CAy=cz=EO-`9&kO NCLAɼ;3a1UDSlށˇߞc}]D{u保(-඗aH HJkpomu"dxPobZFqj-e`38,2t0XA__2`bĈoڍv9ɧE(&] h\@H 2±I:}?f񗎞qؽ*Z o= s8!D(Of˼3>d7RU&7,186r)=Տq=Q 3!\0'烢Q31`|jnoWM[R*oX'U`y]hA$:b3s0)}"M_4=zuЙʬ63M23 t\$d2QJ/C.gw9-ͭeW;~%"쇻{Ż/m2J:7^9a)o]^JwVO &G # &1.-=!pfes\Ђ;],t؆z^ `@`+?8F38GXQ6p2+`i󌧪Ԅ`e5WVj33`֣3QVL?:.ڥJsBDD,{/5D>Mi(=ab j^n00lSI"ytI?6J&~'ktXœ|KPZW %-^pٳ1)Tִe;o>}*/~zOi XS Ѽ۱s573?=V =ɫi!&$h5~Rv} Ճ?1kz-:Jׄ'@A\rzfE~#bQP(l{)sLM}ABˆMWa\t1Ej&ɝ>v}Q?c9ZFS;+!Pq_*np6 ^!ԝ3>JjDQl ԫ;d&nWN0*ԆW˧#ނ 4E.e|wCX*5_:s9dNa._iQvjec@[`1OR/M1-TёS$Yfr/%VUP3x6L}:Th̜kPu.)S }܂3H'u ӕ?/~yL:s49pkew.^`u$p^)GTB+bK\&>%tnZ5שw> 0D}pn͎pW?29?كր3${j SW.lԃz!X6&yXenqpt*߮P;ẂTfO._9!iu >mVX噃Q~,ܯH1,9A >HQNrb@6nvxTVڸc!)%2*̢.⩜NA-\^K-w],R$rt\\l endstream endobj 1495 0 obj << /Length1 1644 /Length2 10617 /Length3 0 /Length 11467 /Filter /FlateDecode >> stream xڭteX]-{]iqܝqn!%wg"9`\1/tR/"ZAkM*Y{ҦKfPӎ#A H+dG$Z3[]α*ˀ ,'=Kwbbza1׈ܱ[`RW$*lE!X_<Oi ñVch.|/R :K|D0ۨX0''T"Hr?wٗFfÍJ6Ԭ(ޭd!,7VAZg}&0 ݡB.r=@]? J&la3@qlQF;&c%1q=k.>?jl)ĭ7Ulؐ2᎐o]d2!IT({{j֮վ;htb}e-*uꋕry1LB=\1=q8>rC.u10.VSbGq@[n!X+/õ|y’_}'s r1{qoƈ{-[2!blHEቡrB /[vϷg[$g2Ӎ Y*X߄-8Wt֐g{Wj~5th[b AQK3ljKiZrϪ[%YX*^AQqUYH=Pe^3VTͩ+Jۓ}ܽqt ۅ cyȯ^)f8Җyh!k.)є0]g1PTȋzI{*0q#'V5BzL{A.tʽ{"0уe 甓I\gyo%莂YhC*[Y~/KWdʀDioW G9ZeǖFLxz\' I-#LYrSB5e8rҧqBfVEsy[T`SgB` s]ȣjV2*;AUUvNBl\M0~}=bޜz7akPe!bOD! Ks.3zَ ;?C\0R_s{,nDQ$ jYD ~꣇1Nޑ+pMuLhipGD%⇢DE +7-9@+:dN|xJJ1l˜2WO%Wfܪ1'{{i&%aQEf9{iNHGҌaUy0룞*dJ#|-~])ތ 9fBă G[}Uȴ+},4UZ;Fx9/@On{+OhYCWߏy#pJrn+`}GI&9[ABz&X!&CWbF&kv]P }Wh-Yp&_ -lpd餭BDa#XoH3Gr۶V'AKH;zcY%[h3cj5`Wjlb7/rRh:X;n⮰RW>:qwR :n×rx3zV jGhfLLˏ0XLfd+KO`.Ăess¤>R)Oq"sx6*>^|HWDMSYX Yk/*`!!+@qk&l"nO&ƿ 0*ooRE1qõ,Ojg5!rOͲЇIrBJmܓ/BA|/좈2:AfFFPiHs!}FJn8Dwf!S 3vE촄(e5x/s D74D> x~{feU&H+~~5"T̽ڸԍ~mtLgym +:()Q>~Cw?I|DOe!!-Le';Ox (5~B ▞Fvǹ(5*`23J̬RSM~3m5ͪƴU-N׋l`軶LSc"/?w3$>2q%rL(L,Rfڊ f t(Q SR2hMo3\Exꇉ~WaX'4,t4)3ж\7D

0xNVLwWw=$}35H^MC./\.ԋ#k1}4N]B:kYrVeol` 5b\$6KC{ꈈ<|cN6AxX0Y:ʁٹq+/!1<1~a4Mδ$09qIf1xٻ o b:\=D> Mps7o Xe:&})ۍV=FZ8|aO.B4E]D*<~*\D:kfꯟ4ҩTV`G"L8lF/(>Jxej#HBG_O*~Tpl+h1%Xd|\*Ĺ't6 !GhvkN")m} rs{c3` E`|<'ئ>%3]'~xyME7jb;!s%Y.Xgmp2sEߕϐj>(d^R{B.cHƶ1q(=%9y>6 p1J}ȏ.L}{W[ Ǫ2M-wQ]=J[R"%cw[R2|RvTMa X 9*SA Z։\&a rJ/$r |yXOn|iP]%?$6MgZܱk)qoKR}QyL 8RXn4-[Czq1-RW [;ig}`/=]W8K[zOkϭhY(~rY5l}x>A緛5kɊ &,%+94mfU-lH1QjTS!&8nBQ%F}Ż0Y!jŠA`-oCbbiR7W P S]C=\"tS' -h1!רI L4dw13NQB1!mQW̬w`)o-)4UWEpDc'-J@.^UiМsB7Ukv8kSh/G5:A=vP.{u&KgH@7mxa|#!:EԇCtnQY=n/3?kn,dl- +by_?*œⵄp"D>[Lh[i@\|*[Q}u{w hx6} x+礼ݞfP`,'HZU !!6%`ANLV8"65Qvp< :Ru%r P+q]1E:هBZ]LL8uNX6[H NЂŲG!ImSv!Y`i|m2b|@I\Ƿ0/4yƆkO&p5[Kwjt;#De>R#h߹&ʮǠ!/TЂu(h%"R*<]+$v]Ufdɘy:@m`}h _GM/(pKAѝӧ;?Z3 RdۥtWSOn$`;:e1Ghyvӽ(7&H#`)l_$|.1^PqPCI 4]gәWzA`t% g,d4*L@$kjtW_T71\)of ]C6,j^z('TYz]iGTP= 8$m~I}Tm)_] kGL`D!Cb:cǔi*<:;Hz_輦. {:ᘍ%E.;uV;@X]B!dJ3-BɝR?1,; š}\B)?a-\ W<?Z~ JiќO%/4HSZ$@OrHwbx%G#wuj~kiZlg,-aw gg8ސ3rUs| ']]+gMic j+_G% ~R mSw)BODpU4d΅ #1&_ʪOC޹u"(uŕ?5 c;GK>D[(RvSҜe&(q{Ͼ5,DNw$/dj"~N37b).4;!r~a]pRg̱nh DmzvAk!ŀ*pd$Uy6r=e;Utġb3t6 D7SS R-&{0DR!o.QbE GR+;A&kb&}o|JNa13Os=s4gJђdXMMZ/j>,"ܳe&Vݓb=RE}*zŽ -wA%2@+ncxMMNě6x7-\$@቞6!\P?K:i7PlD2ێgqc2oOjVd :hnCjRq U hS*Mm`p-qEly}O۳+m+[+c"SvY[xOGF{.2\J@Cyu@8h ب|EbCzek }#FU݃gE)n!T tNGA%xm3]{%s%k%%ۚnrȁ qi\_ 1=N߾u\ơEs:&P7xG.R\V1+3&E)7hΪHOֳPJ}FR`,?V{ȣ:&&)]?|4(ibyNPn s2Zy+].j@ !KE(2\9UˏYg[ѻ Kn^q#o}d|&TaX5$Rߋ`M{̐~=qRV褟1- ZlstBg -AMvw{@Z%V (j-\g脅QdB_$lwy"fuXl1c1cx4Ѷ)Vש KϿr6j9 ȕی]CiوOKyZQ3ne{v–UT?cl:\@\:gC`\hL\MTGڿ9{+oS :Ѓx&'6reʹu8;`OB13]z.K:lillCǡ72WEqP[;|+i?it'N7R*~|W<-<cn؂ / XBPc=0mu:L҇/lZLklXQ mZL!C)n\zf=+ >̞F-Cl1*$mOn̹յ4zN[4F93 ?KA/*85H*MfS37FpNjC]o+h fƖ1WqJ}$>(R4l|ڵÂin\.Db)|HEK#r \ >%r;W .^E2Ҩ[=+e}*^bS4<h pe*v­ +۾CxӼ$G SiLS7sV_]߼kq?;1&D|bu&R~_ޭ*.yfk|ӓMu7t2Hk]RցP媂 ,NGH"㪳G|Q'vquOs:}91-< ^w-ffĬ'&4ӢVP0=we f sq/_G^ɴ8t}cnI;5S]nΕ2M+T5A1M8nFJ": C$"@:5{ȱZ [I5}4+7#čpO%mN(; Ap9Tebga ص2=Y>KdV0vJۂd9t& ?P언osWSQE2 'iPRùNToAJDGFMuOM/ٜ BvUx>= ;,!G,.-Ԑ`JgF@E1:xrfpuOsg{$`).eJf;'E:Hi=؇̃wd ļޔ2  g޷^> ܋Evr9 0?C? T?@ kv]8G7Il)-<]9="xOiTm{yzWtkB)0dnݐ}(}9P0t@b8 +BMu=1jLrˉa A,g'.P7J`*]v2сm#=e.)zߛ6NO{8zhd8! -|䫷׏_T>9lI*9ͪ⎎5fE x[Kߥި<6[Kt3K 0V mix*(No d|λ5~& ܭ4)goXTrfD l9Sr}LeHYѿߛKC, "^[LLRL#W8f(0j"j¹]%G²3\9GcZRA` 0i7=Q=k|Еha0IƬD X: VAeoSAn q09qw Y湏V~)܇.c}TOϠ쇡4d {Sdb) PvQ*okçќP&84W6䰸-+r%֘'Zn(H"WP`%+Jg,9nR^;qvyD*i/ [(] gI_9,FNdr+ !ן\hbx)`8iit I/t1Wq*T_X#?Sn1={_"8au`xP 2_TPMXc^BspP  }A|S`!T$jϣЖB,r4d,zHݎΜQ?^t endstream endobj 1497 0 obj << /Length1 1647 /Length2 15135 /Length3 0 /Length 15992 /Filter /FlateDecode >> stream xڭcx_&vvl6wl۶ձtlwm3{9s:|9zVݫZwڟ6* Pޕ`eg`#Ǡpv58((DƮVbƮ@^& 4Xxxx(^VjuM::`_O+ {w/*p̭lQE%miI:@ht[)@h;8l:؛YS _.a1hj i t8\\~\`eojfOJ_2%WSg+GWߨJb.Vaߝfn//_ t' `fhk7_2Ggbeof@49Uoh/o_9XmXX4u~7w0n;_DOM `4cRpp@2 "UJ-fk`l=g=6΀_wkC[Uf+ +O%ˮnot_׉`jV6oho_kۿ*`RPT֐?LmVj^@)`P8x|X8y \/_D,7vu2323ϕ7u0T]29;U_o%=pk|֙߳\GtYG˛J2#wy>gxtx-;~cR hP(莂 .4c}nv t85U >g؜an^h܋1ȟM30QA/(SN_GG~A%Rcz9?6|sr@At# 't=HU2'Ҵf}3h)*@  h 7 E9{Bf&HۦQq |jZMG`f˾7.4blXV ,z\:5R8G\(;Tj/Nt U4q*U+?6 ÆjJ杔vF|,Rj$xcz-ђKF}`Xw饍HDt2os.:%{f|m],@/GaQ*K\]Ф iv%d^-FV>72Jh^ctd #4K }Ng5aYSIZ"_Sw c/Q>:":cIwA*;?A0H䙎<1R˩DhZy̌ݨ@]?ߣ Ai)""D,ZꇶVL2h1wfB~E;Cƿ*ȮAϓOL>V 5 de5? odERMr/AA}<}Pn=w* ,6( tIU*%0y9䕊I,cR~nuJ: )4mh#JCQvqel bt͛: qK1ۈX+%?臷 *BN:֮UC4tgc8لZ}o$L=y>Sɘ$G_3 ?z5/v9렄"M;6B;ЩAg]m$^@Y;9 I)ukk߬DdkD2>'kAloh2xULe? }b85{gN1t=8jCluɄi:[[g cߢUvhz>0_£˵nPS&*i1/R*7 (.МOozeXtR3T N<EoKA(gx7cZ?$:U"g ޶nQW~;~(~[,2AWt#&Fl/Z`Fd+t$)tEP͵]b3W^qCs:#l <@mȻd՗c` t } h/{%SUȨt r/AK=p=yHБx3KYDzնǃqո0eⳅV%rCcw9:E);~jKi k#5ixQ,rR >Уnac^YKm1(/fǻe!LEyZz8,LMݟj~mx- *îaAy" HNvk[J֓pu,cUcywp.DtW9܎\tY II&L Af^;rnƔF gG*c_ѿkoü5AGOs*FUdyy(v>ҖdHڱȰ Y/A[`'B/BOymuvTep,2Rx.Wxcb ɩ)vu_2{f<'䞞mcx TH#g$ yOD?j-e/F2$4Z(e2|z;b ԉs-7N3J w~H폭y,V ~xZ|~u*t֢-QJ@ň憾L\3[1}S[F8jKKO|U{lAWHZ//J~쳚R}X2< %PooUjʵ)?f{5%J6,FRx4uO\C|bm 9_wv^Q7 xǶ ̗~Z/11s׋DPH? `*> CPsHɮ8ZzR΀C2YvyHp+Hf}E9uRUflXJ.p45&y\hfh^ #Ruݡ$۲z1WDqGvoFhPabazͩO9/pI|_ŀ$j?f&oL1µ;FsNsٷΟ.Qdw%Z#) >75d\71w Ҍo_t8X J/O]NWU[聵s1h79<%`B%;.U0.bqS+H 4wQ&κC)TWG(9ٌg/WsZf"+(Ր:i._/`Vɝ-Rur/sg7+8F޸v#&ۨ v܃ @fԉ3q!qZ"#o&-5wa5HJmBTno#tVdP x(ERs-+"^ /` ӧLdjfk #@gj@9c:nTAW],~8PMhiB+(~3ʤVЉ5M{L/Y3 S")ŠxzFM 6FYKV ;6&f02`NH'Zq.qqSLpT`11b&Zky VrӒDkWBb'%N?6?a9 9 VqdX;n\+pG_ <roQ1 @e!苻_CT\~ϟzcޥƃ3>%#Ť ±7wnX+nFy#sI].]PRtf7X/n=`eJ](z)%/Y%3 YPf9%6[ɢYK{G#$snxjw4#Si#Y8H*͘Q(;܂9#d \DJ%OiJ`' 9QRQg9tn :Lrezs52$R6)uP4/}4xr킩~0 >IP;]A3H#K}pڻ[^Ǒف^{q ZNpz-'6Y.TƤ*5B˖Ovϫl&5oz:3NxKcly.ۢ"pl\'4rp!N"'Ki[B}pjRݐ\Qȓc2+I֪N.%?|m;g9\];c Ǝ|E/]:ǡWWNXR8n,ȸjЍH*D&0iN֚a0T40WG d9w *{ܒ(x]gE!]F8o{}>wǓô޽׎w m"<цb<$lYC+L[ H6aܗQ/uCwzXkZ4/U0_O:.âopb0D/35PBK9`V8CT%6/M3׵-jr <S Yw?-דu+}aWS.y0IULS 9#QGA+W-_ mL3b68(>>Px9ذhֲ9H]fW>8T4 JɥYWiժ{Damukmjwr@vȱ\ђT%@z2!"GQ@908ZC ,mZip/r0 .a<̝o$ ӒeZ`/0,AȆ T8t3{ﻀ^d56=k2sI3: J K*FcG@ Qf_Eo,5Tfŝ/gšʋ5QQ(n`XY'h=Ŏ6s9psCHUfƹ+@[xPEQ.H`={,Ei·[;Uʧw~7<=AT1 4ug0$)/²W= 3hUQc',b}VArUkV蔧m.VC}(r!ɭqUHғn7p芅 x9 Ƚz%g3cA#'C1ս g~RyzM9pPݐ?𛏟oTt?ZlB#) RF*roHKߔb!JKi/Y㚚M#HXBl%݆g>$(=:L'ޒ;<*lDKW=@ e.V7r}cZ`y?ow$gғUV+gՈoIxQT"|s)8Ow0ϕ< |T~Vi(In'FjI1Fw# IZc}(f(!r/L>>( O(eu0sbfoOYvnoMO݋G)^P"@r})I|jJb[U-5)@F4XNoeW..ۨ"KJ]h+,eivKƳ53{R5Ó;Ivs`+SJi'>1:%jl&gʴVmYׅ1^r5+1R6vs|$]nɩhUCDmJ5zEYC~2\ڨD W}G-yЙ?TB6#!02!GA|[ۢ.bWR|{xɥv]+A܌+,1|EUoVJ#IU旿bҟba"-́+α.ׂd1;)ӂXە;s Q^H$e^5lJ"L;}d,,GK9AmN@9F3( 'C6i)u2L;̘Tèg4 YżUx$k#ҟ0hϵ򒮍z!{)ŢBzub-a Td']a+|,=eP t9|mN )CIX@7?Qq-'GN58@2FMVCQc:լd@1DbZμ4Ĉ Ԅܠd)b(1}:MՒnvaU4$Zm4sKj}K؉n-ӋvB7N&ZK_H%B|Pw3iUKA-#w=) HBqi:OInځ-UotKo9G!J͇ps 8"41a\"b&HLedՍlZBbW< ";pUOW?pvxV_F!u'OOMG~^qW?'?Gwo8@nzF^Heiy[Cgk-( .LFC(by}n1z?vJ"S{xy}-K[ͥz% wi@aM֓T__vx4 S\Ƈ셂a }x,OO ѶcCtӍA {Ƀ["u_l1hbS@qI>u>_³^ĤzhdyfSCWy3Y-RNtJؖkK*F{?}ۜ%@_΢xk5frH>n{*o)ZW/;"]J D {()qU L691lG<M#~ؠXEgGkgX ;?1nRg_GoP1 mJGc=4o~mA@xx3 ΓTٸqd@@dL#.IHܖq7H$箧==\MYhg,-!p\Nɖ*5ڈ߹K~Y#uK#T)Wqj|٠q$ItSlb*r4x.LgPHL Φ1`Cf  iOaAv?bX$<$?-O1ͼ}d$B_/oPV]Ic}9sSCz6 c P#yES  MY4b`/n?HLyQ/W((رbB;&͉z1o(U󤀪ok9w`[Nno1d7̆3qݶCTxCE' zJ$PC KCzuS;eN&J2ONYE wS؁,bON}IdPQ?KQ V=D4B2֎ U+Oz(֛f 8 =dHe,wq!  4/T5)Og,4M@@+Ø%qiKeQǛ}'k,XGҎ]" xG;]= 6ql-T Dlm/Jh,+@)o=I}Oߚq56(O'Tgi(̿ y=k2rʕ|bh}SHQ TP"wxNT>@(A da*9 =K.o>/}-T~hIShq ]ڱ:BV4!E/<ŝe ^)wJêVMcٝSH/6rޘ{ROb3bP6[`>J.%˷(ZQ){l^-ߓk6ng‡0Θ@MMNLܓ+Vk.?!'̎nӶ%ρRQe0)Ġ7w]v;xiL0}C9tBi4-F9g&2~=CYWd8g^`gM"5\~ajk Tty[{ǒT ǶJ'xBx1*FdV֛],nL/ټ1($طh}ixťA`5X 2 Pve7 >c[b66Ilrzv>mlR4& (u]62[`,Yq75(}r<H:5FcԍEa=2LPׯr.G!AH5 @7g>޴Fl>7@jk:LAFvF MO՛ pqzd,쉨6"݆0۾um|9&Ƥr=^[ZgS洼$X PM5%F7K-AS[#Iւ.$մl0"9l\WbfYY"9|3=?V8$[mi`7ML8pM7m}L=?+:`a1(?06mg{֬/!)-.;ԑ~~1 *LEFwkj<."=mYqٚv # 9"EoEXTs:(|i{WăҜA==G:ӫ;b89 ɧKn6/7D$Qs"W֫! &[L= O83ڠz3;Ee$@8ēͧ_Ev?hL%,;$IB6鏌")*T;OĝԠQZS܂TVw /-j+ {X\[a[Ccxo7y5$@Շ:c~_vbp, ѻ=Z_W0g$l$PX,\ f0 7HWёμ׾#R@6-/XZau*ǜ58.ۖĵHs4OlS] '^՘΍4X ko č< Ŧ bwJw"I5f$>!W'wCt(VWԤC Mj7wSW^Hǐz=bPr Cf'W9'9oJc>( c\5CUv7:/`e>Wkmr9 pcĭš:2i;3'h ꨡK~DҖ <Α))v-4n{?NqM<5 eY*:mHX̗׬KF\מ|R)\jtRK%t4ȷ/_~,Ń!ݴ"r%~1eep'SJJ^ M JC\Ub} %){i(gf^qۋP:&Ps3)>* i4cCLԦMzP;g=ͣu〉*Y"Tɱa!9\oZ.峺 &Je$/Xsj'yl,REbhd\d`LD~pzN濺ޒ,_IeR̠zo6L1 #|ީG`r^5slېH}Җvs%Cݛ 0m1 eGEl#L`#&aUMLb2!@ jvY~ËYC./7P.rI)^/E?e{\>/.nu\N(7*R/d≆||x/ SU0׶,2X~*7*!y(w"g0x yA0MYOP>@b[MSZO56#6QcdVj+`RJK7ε(zPpVd+*~AdW瑴:S.oUM=[?ؔw (-ʼnq|\ endstream endobj 1442 0 obj << /Type /ObjStm /N 100 /First 899 /Length 4070 /Filter /FlateDecode >> stream x[kOIί菻WIB@0E 4;fl3KyNU&0ɮQ꺞:K7ҦO]i`,e0ui,bdXLv:@񌯤Þ*xFgLRJaƹJ4SP)O35A )]4Э WMC }Ʈc[鈱L aCF:cרdeHk0u tYB3PY2X@w[1H's'|! G{<.o\?m?T?_{.HrO9KOϑ<1p%zWZC8*~?+F:V3AW򮭯 i )=T@%TYтr=ْ$XIB$G2{YΤC<5n. qIU켒t#A3yYsħȐIRAE@|FX#4ztyMKQBk ÄFP-M20hU u&eU޺:ċv '89zK樋 i{[Zju @ovLhOEa 9EWA3LOY㏕x;6s%>{QݫES}Ng)-7,ڳ,vbK<NE<̀?KsWJW%-=?oF wNM=` T^Rsn):EqKځBKnRԆIͮ @EVj'7=(zpW%Dk^Xx:SB"eA74-k$HAˀvzKpRR`6=@>b03y]V6zZ(eޥ9O 4(s$P Ĥ)xeER]Ԇs .)E$ZM ,ht^qӬ @hU4I2 "<>aNKGJa 6Zajy ,En Jb0^#*?]4R0xǾ-*mt/`-q$c1nq/KS`[{v]O[)u5(DgI =KF>.GT ;`_*x.S %̧."upCl("JhNjY*C䔣g 2B%ƶVjK: nR@'#spy 5 m8Zf70Hpa9%9 HrЇ)jdmX.s$ I("qvVgC1l !0 yTd!Ŵ,Rw;N!g8׆B%9ڥVzi92{flwbؼڭ /Ay<&gܦL*箹-LYc̡/QFĔepPn2zh.r n:q D *J|) ,Yf8Iv̢Ԫ^ulxrJSҕ̬ȕ)r;"(ӿtN(tMq"-,Vu,K,z宒P/^+[ئEZ柙=`]J Yrd &8~JS jFN BW{ XF/dzK2U ,iLfeYTruNFf4Dq<;]zmt7=>Hn|> HbN61bybȎW,|EZHb*ܘF]<[s&9 CLO*D<Y,ɞ dWs^WRor)R dA `ju*Hw>Һ8 93l<9&q=sF.GKž,N lˑȺ$,3r[ [+ĵY3;4}L-||d<៭>h.nFCl /f)XնDfTSթ-dvڌU%_5%p_ߝGŨp\[8o7֧/7D숧x.^{o[q {OA_P gm;oGg uʹ&NtxڌyMq wk4©8&cjLMFDhc'o#q.?q> q).?__6cъHL\FoN鬽ĵN1ͽtu3l'gzt3o&d:FS1vފh8s16{"n=Lşf:{ዖd AKJ~x*Ŷ2ն1jfflSA9/CR9zўl1_>i/.x\Lvv=~^HgXvypQH?֍I2Y{:aevCZ7o'ˊc$'$׻$I*}g|!(M))t!+RVҬ5eid#f]_a|(N/ۅ5#! C\L!4Zٰ3 uo=-H{A;ܶb^>|k٪}6q5;"v.nW(ӏɋ^2e~L$}ʂH4Yt`!5_} ?=yN8d@ *Qōxɴ%+S"fʡ "kT} *ձP ,°P P(w)(`,Ro~3eI#4nzã'ǿD16gRp0]z:M,? M\!q%m氒11m\pPNN%E LϺGW>HFxn#;z7߽?|G,d%/շЏq>LK*K{rq{G?a:{\3m #9QVrJySntI Urov}p:\O@ ն6)~EK?긁0dN zx׃G\ .zgi\E\ k5d\ZJBGWJռ3>~ѫܣi @)Ò@\4M3lT?ӏRE|t>܌F#*\:I.{ǛbJav5zϯڄ7㣢o|xrlI˚:Ɉ9?Y15az֭pQkͻ5\aENjr_ ~aXD3lꅂaf^z=;2>#y}Pti9%n endstream endobj 1499 0 obj << /Type /ObjStm /N 100 /First 887 /Length 2465 /Filter /FlateDecode >> stream xŘ[O9W~$F┄ !F.7M7C&ɯu`DګrUrq=a+#RmS(ω(L(b%kLsp8a>DkAp'L#ř칅=FSHÉ%D(cr"[Xg'4 j"5( [Dg,z79y8G1( 0Ȇ 705\D!e0!HbC]+6$ J>FL#YT]K1d ʇ` <(X?m,#B<e0 ˨\pe" %ĀxQ/<m1 XhX΂dQ!P Aq~ʆa`Y,&S+,èUĢREb,m=:T=.dCO;*p$sXCB9t͇\H鹺15 Zeag7RC9%BmHyS=CH3!$C14I2B1% ~}@OU.z@'WWբ]VKoA&jUĔioNex>Z\VbͤfEF9Qsq (ގ׻jLMc7[tu*z5_/.'_gl|˷f4KZ}ZzQUd>C I6ݡ/+7-ݣtH!}G1=S^,FU5NP[QjPg>)NG:tF~ + J=XOՊV1?~ Gַfr=G }QB'!@Gx{:xh΀xktxusr2YM_Ž^g=^_phY;=;;y3jyܭ 4 fFpcXnF Y{v5i@Oƫefolmn޿+`/0Sgȸ։ra#_Mؽ7mԶN'ıƙk8ў&Ƅ7lc[ĂM{|"ĉoȷM2M? gfZ5x4oSm\uWt듻`GDc\֕hǻ%Jd+vwpǯzq~76DW( i_z5.ٮ٤Սd(ƨ`,q3l/,?<}egL|7f}ј\d,ec21sT4Ƌƌ6g,ܔK}dƘLI209:e:`rtT@@P*P "Y *@F @&TdH29"e" rDx@/` #V<E>f`n7ƻ^k,{1c9c2cEcϙXgjSe$1c*1&rt4&8l511f#[`Eb,F $lLI209&e:`rtt@P*P "U*@E @&TdL29"e" sD D@/ #^<E %x"g;'E \DyN7ߜF-+o"9ݍ[VDts%6mYmimڲ&B(eMD6Qb lN_5לFi-+k"9]ZVDTs%4iYM1iiҲ&B(eMD4QB hyX' endstream endobj 1504 0 obj << /Type /ObjStm /N 100 /First 1030 /Length 4801 /Filter /FlateDecode >> stream xڝ\n$9r}X,f~CIV.tYŬ`RZ! OɈdMTg\mgT]tΝ6ɦK u+߹=b6)QLO? d2izdf= kיXwV[κOC)!v{;8U2Y滠=,rf 5 zä.$'M0PYEyauF5]L\08=.% Lm6u98",;&y`G?S dN-ؤw )pp<>pIjisSa3oպ֗R߷_nK\0ސ*\m6+Rɦr:(~=fx Si9"*pmX)[^^-v߾˹s~i*~$Ǐ:V2Ikq88Y7FpޭÎ؞va??7?P fPW}]mȀx 7l*u^Τ=m=bxoMQBzU|E]*Spﷻz=HSM:+җe#U: # YPjuWTv6k iBjT܀vm{:O;"W*WuN\9ך? ߩoPKl#um&~ Aj0}r'D(JwM&JGd6 OPj6`ѤR. S96 8;лMotś i4 @jgG ܤmYDV ,W5/R鍺lY\[fr{K82ft鑡P4=ZR`lr>!Tr3֪W_Y2fc3O{b0U$<",ebxEeLg q'zhObmMs˪&g{WKdbػ},x`mq'[[~W V]`c5F&\y ˻`o}< Vg&6&|le.㉊HFRzMiGXW'Ug;0/\#>ҊBgV2zJØdZ ڲ?m 1.7s>f^-YisM=W s~,q2@S;D(x 9G" &TG%8|vyuSlhS}s5Wt1rیލ~rod- ~b6م޳ը6-zz^ NC>ֻ~|T֨ۿ\Tev̕W1e˧ѳ8\66^n T %U_A/O/nݕW(& a|wk$U،n#FǪg\E'F?O)KәdMܭ|?Á);s%>U[E^_"W)z\!L[b~=|x+2V}TYhzZ^gPugXMg#;HnH(HM8ԌTE_MU)(|9qeYK,o0߯ŌoxU֥q\nZW3@BaUF׭66v4Nϔ:'It̲XYde>E>R#72pm !ED<~ZK%FR" Kq"$c|*W~ r#'UӗƔoni]@3*| o`Ç}:qX_hշLarj"K]*Fr\QϓPQ4M*O4f\$&uu4~SZZ.W/5m-]r[Jmܢgѯ:QՑ7ll\y}ii; 2Β|jğU4PPQ"wmycqKckv{?]_صuzd11I?OoiJ ~zXhQ1}@齣q~=}?w=N_ӻ= Zז.?k|Y]ESf笄?wPB<. 뺬NwU+OiahFZ˔-K1=w)đyEi4Uf%TLՇzǪ_W×K)FŇOgʊ tW^\ob)Xݷ'etl0KSJ$xH\) R튔fuZ`03$%~ZGJIm|(1qJwW:.zlPГJ IAAO*)4pw\ ·=I'ibi E(;%e) bqL1[d|#F14Jebb ow7 H囻B;@GSB >0`E iF2h6RYݨh+FZts щi$/f H<mB)H>|K`-@tc A+1 ~ \L"F`Znk;$Ԉ`IҞ(z>QnH471K1 @7#5YE!IJrOG4YnHcӁ*@"E U;=Rdqy )2d$P0m$$0m `ZL[i )DhL[16r(B"i'@`Zwi<"GC^eAYy )j((iY$0 L#iy`$P[|f @+n(B:hy2$-OTy d?$pf,+ 幐<2x%@cqy )Dx-%[}N@[=ح .6v097bN3s.ONXfK93.CkqZˌ0]X~I`ׄyMh-S.Z˔&/*,FXFfhB,ծC/N oo `":QzvNhw>z endstream endobj 1634 0 obj << /Producer (pdfTeX-1.40.20) /Author()/Title()/Subject()/Creator(LaTeX with hyperref)/Keywords() /CreationDate (D:20220106094401+11'00') /ModDate (D:20220106094401+11'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019/Debian) kpathsea version 6.3.1) >> endobj 1599 0 obj << /Type /ObjStm /N 35 /First 342 /Length 1255 /Filter /FlateDecode >> stream xڍXK6WW!d([HScGrd}ADJ޽߃䐲)XiGXÒ4 ӀMࠁ3\hZؗ ?|\(6?>kƓFd>bCb!:(CAC1h(Y6nHs[QW&' a/ʅ|c={ cGK#g1Ά>Q_d\P#QW7ޞN~k:ͦ2s7eV4kl,٨~mw،?,^!PRfYulʬlГf]U Wjm(Mjm(ֆZԥȳĕͨzx|iʪQʬZCUkcE]Jn,WhΦ:*.l*7dgAMy~9*46X͖iX%퐍ՎUdf,oe3B\t 6FV,;$%.=Z'zwTk?QaTk]{ZZW.lFWUWjmՕZ[u֖sZ_.]4WNjmZ[ߤJZ[5ͤVM3Kape3B\ eGIjmq9 Sw+v{A@!Mfp8owxZ=oVχxb,BpUSaܮOݮ7%;$g`Itt>zmg5'%%%L") 8lMiNPPPʘAvS^y.˗5 ^^^Ɣ0fnwSc^|^]6/zǁ\a@}\?+f :( 2Zt[mF AރLkѕز>2-K+)y,]D`j0ft{!"DD'<db"TDPaj84HFEib"T$HPj+mJbD$H1\!17s$ 0 @E$;xzajWp N\^*q X%>K!y*8O< ;SNdM{̅vxo@/q}Zos?ꅫ|tK// G~ߵ7[= o% endstream endobj 1635 0 obj << /Type /XRef /Index [0 1636] /Size 1636 /W [1 3 1] /Root 1633 0 R /Info 1634 0 R /ID [ ] /Length 3875 /Filter /FlateDecode >> stream x%Y'Y:}zާ^mZHPob! 8E1.8R&rP (37߷Vz~S9UEE1QXon(fb &Ҝ)Ŧ4N)s`y y0_}~,{ Vh.hׯ4Rbj.:aN+aZ皫 wŞi5bĞjubŞh X=[FxSl$Hsl Ps l/@slv͈o]=c}-̓شMCR0%vC(;,q8!vN}͓pJ뚧sk30N,gXy 9y.=,WftpUX\?~]= qAbq4きd;W,n7bjFbb;h@=c q=,vJ3LX< Nh!ͩIH3uc/z]B8&ƍ݄r18(vDٻbhcn>w4/vH:f{[ٻb&41{TMfFbcnXs&wb xcnl*ĿGٻ5bcnJ4 Y;fc&wLdcnKލHJ4b[5w0\*]Mǰb Ebf.nc%;V똸c;L3u:Q8c΄1IgX3v љ;6Lw;/wJkl5BGR1y"X/p`] ̬#28%z/z齭Mpbb l?3 k`Sȧ#sb{^x.x.ŰB|aiXojXka bzO<7=~3lbx5&ޅ^p) ]X (HŚoŏ88 GtR9'2KϢSpf]8 \K(bxp%[ߘA!kp>TA >1@.@M5 ! 孖z߹A} 蠦j:&_Kr}75zDS?b%Z먛Еj?oTb{tA?yS$8hzf^E@dO 2IE/TdSDofɢ^퉡7tA7>SDO= $Г@O Yߍ{;v1SX3A+XAGWlKh"3zIJ3]U_c% S[ tAO}ʣ'> [qI'6zEj:"(4?N&ѓEOɑ)*cf,T|)Iff `>,EF _? X 1nsę߿'6› 6VU4w0.[> ĢP*gUtp 8 X7VMU,n߇Xnހkp6Eֽ%-g4A\q,Kgnos>Xs96R{ȢU䷲~kI@k/|Zi- Zhcg؏ S:?+:hc! S{ge f)]m#NBl̂0]SV*%[ v@+aQ,cbo ?eKѕ%zpr[|:;{:j ˌ)=}p+p NI8gū, p.{p53܉^nmF^4V4VXM\VrY=8yJ) RєJ+[O%U$P ET.VI6/A, VbFoB M\V,`UYIc䲒J.+\V;RGkvN;'}؋lb%gkH"8AETtPo%WGRƯxEYT!CV dQEEYTU,[@mT$P]J+~J@%Ul%ڟlK5@<}w~5.9bE4q;HeHz>*})5ߌf hf hEd(gre`h~-8b^ze^ze^ze0 D3ց? ?`jdz1H ݃t,S@* ?H ?H ?H &A f,3[*; Iaᕽ_@JR)o$Z(y䊒?JN)y䞒J*m3)edzlHOSz"-@-RMSZvkKVi4QvKC-#Wi罴]>S&-]ҤPQ¿,O )/πү_\HJ[%gT/m~ʼ:rVROiW,}(=5^*KU_c!Tz>k/fuG?||>8=lTGcq41UGcq4Gq4:yQcDưjn{;ib8&qt/M|qt?M8z&>8ɹG䶿iWIb5O׿Gy/ы44ƏqWi{baIb7_oNS?\"sE+2WdvwYV>&fNɜ"sE+2Wd\"sEvC +2Wd\"sE+r|+2Wd+2Wd\"sE+2WN+25ejԔ15ejԔ)SSLM25LC2 e䓹"sE+r|r+2Wd\"sElo$1{HS](L̂0< `!,Ű2rX+aVZXal7aWO endstream endobj startxref 314924 %%EOF distributional/tests/0000755000175000017500000000000013703764147014616 5ustar nileshnileshdistributional/tests/testthat/0000755000175000017500000000000014165427252016452 5ustar nileshnileshdistributional/tests/testthat/test-dist-pareto.R0000644000175000017500000000147314151532232022000 0ustar nileshnileshtest_that("Pareto distribution", { dist <- dist_pareto(10, 1) expect_equal(format(dist), "Pareto(10, 1)") # Require package installed skip_if_not_installed("actuar", "2.0.0") # quantiles expect_equal(quantile(dist, 0.1), actuar::qpareto(0.1, 10, 1)) expect_equal(quantile(dist, 0.5), actuar::qpareto(0.5, 10, 1)) # pdf expect_equal(density(dist, 0), actuar::dpareto(0, 10, 1)) expect_equal(density(dist, 3), actuar::dpareto(3, 10, 1)) # cdf expect_equal(cdf(dist, 0), actuar::ppareto(0, 10, 1)) expect_equal(cdf(dist, 3), actuar::ppareto(3, 10, 1)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4)), 0.4, tolerance = 1e-3) # stats expect_equal(mean(dist), actuar::mpareto(1, 10, 1)) expect_equal(variance(dist), actuar::mpareto(2, 10, 1) - actuar::mpareto(1, 10, 1)^2) }) distributional/tests/testthat/test-dist-sample.R0000644000175000017500000000103414151532232021760 0ustar nileshnileshtest_that("Negative Binomial distribution", { x <- generate(dist_normal(0, 1), 100) dist <- dist_sample(x) expect_equal(format(dist), "sample[100]") # quantiles expect_equal(quantile(dist, 0.6), unname(quantile(x[[1]], 0.6))) expect_equal(quantile(dist, 0.24), unname(quantile(x[[1]], 0.24))) # pdf # cdf # F(Finv(a)) ~= a # stats expect_equal(mean(dist), mean(x[[1]])) expect_equal(median(dist), median(x[[1]])) expect_equal(median(dist[[1]]), median(x[[1]])) expect_equal(variance(dist), var(x[[1]])) }) distributional/tests/testthat/test-dist-uniform.R0000644000175000017500000000125013703764147022174 0ustar nileshnileshtest_that("Uniform distribution", { dist <- dist_uniform(-2, 4) expect_equal(format(dist), "U(-2, 4)") # quantiles expect_equal(quantile(dist, 0.1), stats::qunif(0.1, -2, 4)) expect_equal(quantile(dist, 0.5), stats::qunif(0.5, -2, 4)) # pdf expect_equal(density(dist, 0), stats::dunif(0, -2, 4)) expect_equal(density(dist, 3), stats::dunif(3, -2, 4)) # cdf expect_equal(cdf(dist, 0), stats::punif(0, -2, 4)) expect_equal(cdf(dist, 3), stats::punif(3, -2, 4)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), 0.5*(-2 + 4)) expect_equal(variance(dist), (4+2)^2/12) }) distributional/tests/testthat/test-dist-gumbel.R0000644000175000017500000000146014151532232021755 0ustar nileshnileshtest_that("Gumbel distribution", { dist <- dist_gumbel(1, 2) expect_equal(format(dist), "Gumbel(1, 2)") # Require package installed skip_if_not_installed("actuar", "2.0.0") # quantiles expect_equal(quantile(dist, 0.1), actuar::qgumbel(0.1, 1, 2)) expect_equal(quantile(dist, 0.5), actuar::qgumbel(0.5, 1, 2)) # pdf expect_equal(density(dist, 0), actuar::dgumbel(0, 1, 2)) expect_equal(density(dist, 3), actuar::dgumbel(3, 1, 2)) # cdf expect_equal(cdf(dist, 0), actuar::pgumbel(0, 1, 2)) expect_equal(cdf(dist, 3), actuar::pgumbel(3, 1, 2)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4)), 0.4, tolerance = 1e-3) # stats expect_equal(mean(dist), actuar::mgumbel(1, 1, 2)) expect_equal(variance(dist), actuar::mgumbel(2, 1, 2) - actuar::mgumbel(1, 1, 2)^2) }) distributional/tests/testthat/test-hilo.R0000644000175000017500000000112213703764147020505 0ustar nileshnileshtest_that("hilo", { # defaults hl <- new_hilo() expect_length(hl, 0) # display expect_output(print(hl), "") expect_output(print(new_hilo(0,1,95)), "\\[0, 1\\]95") dist <- dist_normal() # hilo.distribution hl <- hilo(dist, 95) expect_length(hl, 1) expect_equal(hl, new_hilo(qnorm(0.025), qnorm(0.975), 95)) # vec_math.hilo expect_equal(is.na(hl), FALSE) expect_equal(is.nan(hl), FALSE) # vec_arith.hilo expect_equal( hl*3+1, new_hilo(qnorm(0.025)*3+1, qnorm(0.975)*3+1, 95) ) expect_equal(-hl, hl) expect_equal(-3*hl, hl/(1/3)) }) distributional/tests/testthat/test-dist-multivariate-normal.R0000644000175000017500000000242214151532232024475 0ustar nileshnileshtest_that("Multivariate normal distribution", { mu <- c(1,2) sigma <- matrix(c(4,2,2,3), ncol=2) dist <- dist_multivariate_normal(mu = list(mu), sigma = list(sigma)) dimnames(dist) <- c("a", "b") expect_equal(format(dist), "MVN[2]") # stats expect_equal( mean(dist), matrix(c(1,2), nrow = 1, dimnames = list(NULL, c("a", "b"))) ) expect_equal(covariance(dist), list(sigma)) # quantiles expect_equal( quantile(dist, 0.1), matrix(c(qnorm(0.1, mu[1], sqrt(sigma[1,1])), qnorm(0.1, mu[2], sqrt(sigma[2,2]))), nrow = 1, dimnames = list(NULL, c("a", "b"))) ) skip_if_not_installed("mvtnorm") expect_equivalent(quantile(dist, 0.1, type = "equicoordinate"), mvtnorm::qmvnorm(0.1, mean = mu, sigma = sigma)$quantile) # pdf expect_equal(density(dist, cbind(1, 2)), mvtnorm::dmvnorm(c(1, 2), mean = mu, sigma = sigma)) expect_equal(density(dist, cbind(-3, 4)), mvtnorm::dmvnorm(c(-3, 4), mean = mu, sigma = sigma)) # cdf expect_equivalent(cdf(dist, cbind(1, 2)), mvtnorm::pmvnorm(upper = c(1,2), mean = mu, sigma = sigma)) expect_equivalent(cdf(dist, cbind(-3, 4)), mvtnorm::pmvnorm(c(-3, 4), mean = mu, sigma = sigma)) # F(Finv(a)) ~= a # expect_equal(cdf(dist, list(as.numeric(quantile(dist, 0.53)))), 0.53, tolerance = 1e-3) }) distributional/tests/testthat/test-dist-bernoulli.R0000644000175000017500000000120414151532232022471 0ustar nileshnileshtest_that("Bernoulli distribution", { dist <- dist_bernoulli(0.4) expect_equal(format(dist), "Bernoulli(0.4)") # quantiles expect_equal(quantile(dist, 0.6), FALSE) expect_equal(quantile(dist, 0.61), TRUE) # pdf expect_equal(density(dist, 0), stats::dbinom(0, 1, 0.4)) expect_equal(density(dist, 1), stats::dbinom(1, 1, 0.4)) # cdf expect_equal(cdf(dist, 0), stats::pbinom(0, 1, 0.4)) expect_equal(cdf(dist, 1), stats::pbinom(1, 1, 0.4)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.6)), 0.6, tolerance = 1e-3) # stats expect_equal(mean(dist), 0.4) expect_equal(variance(dist), 0.4*(1-0.4)) }) distributional/tests/testthat/test-distribution.R0000644000175000017500000000116714165413625022275 0ustar nileshnileshtest_that("is_distribution", { expect_false(is_distribution(iris)) expect_true(is_distribution(dist_normal())) expect_false(is_distribution(NULL)) expect_false(is_distribution(0)) df <- data.frame(a = 1:10, b = dist_poisson(1:10), c = dist_normal(1:10)) expect_true(all(sapply(df, is_distribution) == c(FALSE, TRUE, TRUE))) }) test_that("variance() works correctly on vectors/matrices of different dimension", { x = 1:8 expect_equal(variance(x), 6) expect_equal(variance(matrix(x, nrow = 2)), rep(0.5, 4)) }) test_that("variance() throws an error on non-numeric objects", { expect_error(variance("foo")) }) distributional/tests/testthat/test-dist-percentile.R0000644000175000017500000000123313703764147022650 0ustar nileshnileshtest_that("Negative Binomial distribution", { dist <- dist_normal(0, 1) percentiles <- seq(0.01, 0.99, by = 0.01) x <- vapply(percentiles, quantile, double(1L), x = dist) dist <- dist_percentile(list(x), list(percentiles*100)) expect_equal(format(dist), "percentile[99]") # quantiles expect_equal(quantile(dist, 0.6), stats::qnorm(0.6, 0, 1)) expect_equal(quantile(dist, 0.61), stats::qnorm(0.61, 0, 1)) # pdf # cdf expect_equal(cdf(dist, 0), stats::pnorm(0, 0, 1)) expect_equal(cdf(dist, 1), stats::pnorm(1, 0, 1), tolerance = 1e-3) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.6)), 0.6, tolerance = 1e-3) # stats }) distributional/tests/testthat/setup-tests.R0000644000175000017500000000007713703764147021105 0ustar nileshnileshcontext("Configure test options") options(cli.unicode = FALSE) distributional/tests/testthat/Rplots.pdf0000644000175000017500000001324614164704777020447 0ustar nileshnilesh%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20220104104254) /ModDate (D:20220104104254) /Title (R Graphics Output) /Producer (R 4.0.2) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 823 /Filter /FlateDecode >> stream xVKo0 WIۀ5EO}`([m?$KtNۃ|K2Lu~ __( Z?owsxӉz W| Riu=uF`rÁ'|!De F9`r!@0NaDTR >Wo{zh4&|?W4wkF *#x *fak}ۊܘ Lk x *)f/8A-)c|,ZU牁 e#s;G=q.09NSrpv1!m% ڶ3333`QzU ꦌL^32g,#X$#He8Fb-ϟe*"j %H|?Dr83 Hxuؘy΂嵊̨s6/&ZǤGhq9qV^#\f{V4嫛ڜ(/wnTR:j\:;q*gu(>pbw/Z<8})T)K q-+d@^^OػVr0 x:VoΗ?vf*Hendstream endobj 9 0 obj << /Type /Page /Parent 3 0 R /Contents 10 0 R /Resources 4 0 R >> endobj 10 0 obj << /Length 836 /Filter /FlateDecode >> stream xVo9 _c{$kTiW8 Nl9T}+?;IG[;5XO?`ИU[x -/ys ha[~uͫk/˻`5?w\ }#1X,xrh#124X@KY?*`psR5qa~4Mt4 > <% ~4] rRZ}Ǧ16{7UP6|7h2p)bݠ:[dUG=6$Ap@ "MW%)V]A-F ٪׊ E4y@2GHa@2ʒLhASʠtGRH["t H׷=CPÖEJl5uB,Z\1_踂[,*0Y^eFd㰬?dsuм0 fG(j@<`p8 w #3Cv?A\y%r<3W23ep()=Z&#ש@Cڙ`"މ`ⅱy`i`bYJA"{G1Ͷ<"m}3}{M;fu48-a rP3yI1$;s4!K:fMDmFu#>>2mMN[;\ub$)1(mE/I 0-^~{=>9̈瑊T| p}ZZ:ھ#;s&:.Gt}1FͼT29E,aMB<rrc8- žB> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 11 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus 96/quoteleft 144/dotlessi /grave /acute /circumflex /tilde /macron /breve /dotaccent /dieresis /.notdef /ring /cedilla /.notdef /hungarumlaut /ogonek /caron /space] >> endobj 12 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 11 0 R >> endobj xref 0 13 0000000000 65535 f 0000000021 00000 n 0000000163 00000 n 0000002175 00000 n 0000002264 00000 n 0000002376 00000 n 0000002409 00000 n 0000000212 00000 n 0000000292 00000 n 0000001186 00000 n 0000001267 00000 n 0000005104 00000 n 0000005362 00000 n trailer << /Size 13 /Info 1 0 R /Root 2 0 R >> startxref 5460 %%EOF distributional/tests/testthat/test-dist-cauchy.R0000644000175000017500000000126113703764147021773 0ustar nileshnileshtest_that("Cauchy distribution", { dist <- dist_cauchy(-2, 1) expect_equal(format(dist), "Cauchy(-2, 1)") # quantiles expect_equal(quantile(dist, 0.1), stats::qcauchy(0.1, -2, 1)) expect_equal(quantile(dist, 0.5), stats::qcauchy(0.5, -2, 1)) # pdf expect_equal(density(dist, 0), stats::dcauchy(0, -2, 1)) expect_equal(density(dist, 3), stats::dcauchy(3, -2, 1)) # cdf expect_equal(cdf(dist, 0), stats::pcauchy(0, -2, 1)) expect_equal(cdf(dist, 3), stats::pcauchy(3, -2, 1)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), NA_real_) expect_equal(variance(dist), NA_real_) }) distributional/tests/testthat/test-dist_lognormal.R0000644000175000017500000000165514151532232022564 0ustar nileshnileshtest_that("Log-normal distribution", { # defaults dist <- dist_lognormal() expect_equal(mean(dist), exp(1/2)) expect_equal(variance(dist), (exp(1)-1)*exp(1)) # display expect_s3_class(dist, "distribution") expect_output(print(dist), "lN\\(0, 1\\)") expect_output(print(dist_lognormal(numeric())), "") # error checking expect_error( dist_lognormal(0, -1), "non-negative" ) expect_silent( dist_lognormal(mu = 0, sigma = 1) ) # density expect_equal( density(dist, 0), dlnorm(0, mean = 0, sd = 1) ) # cdf expect_equal( cdf(dist, 5), plnorm(5, mean = 0, sd = 1) ) # quantile expect_equal( quantile(dist, 0.1), qlnorm(0.1, mean = 0, sd = 1) ) # generate expect_equal( { set.seed(0) generate(dist, 10) }, { set.seed(0) mapply(function(m, s) rlnorm(10, m, s), m = 0, s = 1, SIMPLIFY = FALSE) } ) }) distributional/tests/testthat/test-graphics.R0000644000175000017500000000160414151532232021341 0ustar nileshnileshdist <- c(dist_normal(0, 1), dist_beta(5, 1)) test_that("geom_hilo_ribbon()", { dist <- dist_normal(1:3, 1:3) p <- ggplot2::ggplot( data.frame(x = rep(1:3, 2), interval = c(hilo(dist, 80), hilo(dist, 95))) ) + geom_hilo_ribbon(ggplot2::aes(x = x, hilo = interval)) expect_silent(print(p)) expect_length( ggplot2::layer_data(p)$hilo, 3*2 ) expect_equal( ggplot2::layer_data(p)$hilo, c(hilo(dist, 80), hilo(dist, 95))[c(1,4,2,5,3,6)] ) }) test_that("geom_hilo_linerange()", { dist <- dist_normal(1:3, 1:3) p <- ggplot2::ggplot( data.frame(x = rep(1:3, 2), interval = c(hilo(dist, 80), hilo(dist, 95))) ) + geom_hilo_linerange(ggplot2::aes(x = x, hilo = interval)) expect_silent(print(p)) expect_length( ggplot2::layer_data(p)$hilo, 3*2 ) expect_equal( ggplot2::layer_data(p)$hilo, c(hilo(dist, 80), hilo(dist, 95)) ) }) distributional/tests/testthat/test-issues.R0000644000175000017500000000034614151532232021056 0ustar nileshnileshtest_that("is.na() on [[1]] (#29)", { x <- c(dist_normal(0,1), NA) expect_equal( is.na(x), c(FALSE, TRUE) ) expect_equal( is.na(x[[1]]), FALSE ) expect_equal( is.na(x[[2]]), TRUE ) }) distributional/tests/testthat/test-truncated.R0000644000175000017500000000410213703764147021544 0ustar nileshnileshtest_that("Truncated Normal distributions", { dist <- dist_truncated(dist_normal(0, 1), -5, 5) # format expect_equal(format(dist), sprintf("%s[-5,5]", format(dist_normal(0,1)))) # quantiles expect_equal( quantile(dist, 0.1), -1.28155025885944 #dput(extraDistr::qtnorm(0.1, 0, 1, -5, 5)) ) expect_equal( quantile(dist, 0.5), -1.39145821233588e-16 #dput(extraDistr::qtnorm(0.5, 0, 1, -5, 5)) ) # pdf expect_equal( density(dist, 0), 0.398942509116427, #dput(extraDistr::dtnorm(0, 0, 1, -5, 5)) ) expect_equal( density(dist, 3), 0.00443185095273209, #dput(extraDistr::dtnorm(3, 0, 1, -5, 5)) ) # cdf expect_equal(cdf(dist, 0), 0.5) expect_equal( cdf(dist, 3), 0.998650387846205, #dput(extraDistr::ptnorm(3, 0, 1, -5, 5)) ) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.3)), 0.3, tolerance = 1e-6) # stats # expect_equal(mean(dist), ???) # expect_equal(variance(dist), ???) }) test_that("Truncated Binomial distributions", { dist <- dist_truncated(dist_binomial(100, 0.83), 76, 86) # format expect_equal(format(dist), sprintf("%s[76,86]", format(dist_binomial(100,0.83)))) # quantiles expect_equal( quantile(dist, 0.1), 79 #dput(extraDistr::qtbinom(0.1, 100, 0.83, 76, 86)) ) expect_equal( quantile(dist, 0.5), 82 #dput(extraDistr::qtbinom(0.5, 100, 0.83, 76, 86)) ) # pdf expect_equal(density(dist, 75), 0) expect_equal(density(dist, 87), 0) expect_equal( density(dist, 80), 0.094154977726162, #dput(extraDistr::dtbinom(80, 100, 0.83, 76, 86)) ) expect_equal( density(dist, 85), 0.123463609708811, #dput(extraDistr::dtbinom(85, 100, 0.83, 76, 86)) ) # cdf expect_equal(cdf(dist, 0), 0) expect_equal(cdf(dist, 76), 0) expect_equal(cdf(dist, 86), 1) expect_equal( cdf(dist, 80), 0.259185477677455, #dput(extraDistr::ptbinom(80, 100, 0.83, 76, 86)) ) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.372)), 0.372, tolerance = 1e-3) # stats # expect_equal(mean(dist), ???) # expect_equal(variance(dist), ???) }) distributional/tests/testthat/test-dist-degenerate.R0000644000175000017500000000136413703764147022626 0ustar nileshnileshtest_that("Degenerate distribution", { dist <- dist_degenerate(1) expect_equal( dist, vec_cast(1, new_dist()) ) expect_equal(format(dist), "1") # quantiles expect_equal(quantile(dist, 0), 1) expect_equal(quantile(dist, 0.5), 1) expect_equal(quantile(dist, 1), 1) # pdf expect_equal(density(dist, 1), 1) expect_equal(density(dist, 0.5), 0) expect_equal(density(dist, 0.99999), 0) # cdf expect_equal(cdf(dist, 0), 0) expect_equal(cdf(dist, 1), 1) expect_equal(cdf(dist, 0.9999), 0) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 1)), 1, tolerance = 1e-3) expect_equal(cdf(dist, quantile(dist, 0)), 1, tolerance = 1e-3) # stats expect_equal(mean(dist), 1) expect_equal(variance(dist), 0) }) distributional/tests/testthat/test-dist-burr.R0000644000175000017500000000143614151532232021457 0ustar nileshnileshtest_that("Burr distribution", { dist <- dist_burr(2, 3) expect_equal(format(dist), "Burr12(2, 3, 1)") # Require package installed skip_if_not_installed("actuar", "2.0.0") # quantiles expect_equal(quantile(dist, 0.1), actuar::qburr(0.1, 2, 3)) expect_equal(quantile(dist, 0.5), actuar::qburr(0.5, 2, 3)) # pdf expect_equal(density(dist, 0), actuar::dburr(0, 2, 3)) expect_equal(density(dist, 3), actuar::dburr(3, 2, 3)) # cdf expect_equal(cdf(dist, 0), actuar::pburr(0, 2, 3)) expect_equal(cdf(dist, 3), actuar::pburr(3, 2, 3)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4)), 0.4, tolerance = 1e-3) # stats expect_equal(mean(dist), actuar::mburr(1, 2, 3)) expect_equal(variance(dist), actuar::mburr(2, 2, 3) - actuar::mburr(1, 2, 3)^2) }) distributional/tests/testthat/test-dist-inverse-gaussian.R0000644000175000017500000000153614151532232023771 0ustar nileshnileshtest_that("Inverse Gaussian distribution", { dist <- dist_inverse_gaussian(3, .2) expect_equal(format(dist), "IG(3, 0.2)") # Require package installed skip_if_not_installed("actuar", "2.0.0") # quantiles expect_equal(quantile(dist, 0.1), actuar::qinvgauss(0.1, 3, .2)) expect_equal(quantile(dist, 0.5), actuar::qinvgauss(0.5, 3, .2)) # pdf expect_equal(density(dist, 0), actuar::dinvgauss(0, 3, .2)) expect_equal(density(dist, 3), actuar::dinvgauss(3, 3, .2)) # cdf expect_equal(cdf(dist, 0), actuar::pinvgauss(0, 3, .2)) expect_equal(cdf(dist, 3), actuar::pinvgauss(3, 3, .2)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4)), 0.4, tolerance = 1e-3) # stats expect_equal(mean(dist), actuar::minvgauss(1, 3, .2)) expect_equal(variance(dist), actuar::minvgauss(2, 3, .2) - actuar::minvgauss(1, 3, .2)^2) }) distributional/tests/testthat/test-dist-inverse-gamma.R0000644000175000017500000000164214151532232023237 0ustar nileshnileshtest_that("Inverse Gamma distribution", { dist <- dist_inverse_gamma(3, 2) expect_equal(format(dist), "InvGamma(3, 0.5)") # Require package installed skip_if_not_installed("actuar", "2.0.0") # quantiles expect_equal(quantile(dist, 0.1), actuar::qinvgamma(0.1, 3, 2)) expect_equal(quantile(dist, 0.5), actuar::qinvgamma(0.5, 3, 2)) # pdf expect_equal(density(dist, 0), actuar::dinvgamma(0, 3, 2)) expect_equal(density(dist, 3), actuar::dinvgamma(3, 3, 2)) # cdf expect_equal(cdf(dist, 0), actuar::pinvgamma(0, 3, 2)) expect_equal(cdf(dist, 3), actuar::pinvgamma(3, 3, 2)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4)), 0.4, tolerance = 1e-3) # stats expect_equal(mean(dist), (1/2) / (3 - 1)) expect_equal(median(dist), actuar::qinvgamma(0.5, 3, 2)) expect_equal(median(dist[[1]]), actuar::qinvgamma(0.5, 3, 2)) expect_equal(variance(dist), (1/2)^2/((3-1)^2*(3-2))) }) distributional/tests/testthat/test-dist-logarithmic.R0000644000175000017500000000156514151532232023012 0ustar nileshnileshtest_that("Logarithmic distribution", { dist <- dist_logarithmic(0.66) expect_equal(format(dist), "Logarithmic(0.66)") # Require package installed skip_if_not_installed("actuar", "2.0.0") # quantiles expect_equal(quantile(dist, 0.5), actuar::qlogarithmic(0.5, 0.66)) expect_equal(quantile(dist, 0.99), actuar::qlogarithmic(0.99, 0.66)) # pdf expect_equal(density(dist, 1), actuar::dlogarithmic(1, 0.66)) expect_equal(density(dist, 9), actuar::dlogarithmic(9, 0.66)) # cdf expect_equal(cdf(dist, 3), actuar::plogarithmic(3, 0.66)) expect_equal(cdf(dist, 12), actuar::plogarithmic(12, 0.66)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.9963064)), 0.9963064, tolerance = 1e-3) # stats expect_equal(mean(dist), -1/log(1-0.66)*(0.66/(1-0.66))) expect_equal(variance(dist), -(0.66^2 + 0.66*log(1-0.66))/((1-0.66)^2*log(1-0.66)^2)) }) distributional/tests/testthat/test-dist-beta.R0000644000175000017500000000116313703764147021433 0ustar nileshnileshtest_that("Beta distribution", { dist <- dist_beta(3, 4) expect_equal(format(dist), "Beta(3, 4)") # quantiles expect_equal(quantile(dist, 0.1), qbeta(0.1, 3, 4)) expect_equal(quantile(dist, 0.5), qbeta(0.5, 3, 4)) # pdf expect_equal(density(dist, 0), dbeta(0, 3, 4)) expect_equal(density(dist, 3), dbeta(3, 3, 4)) # cdf expect_equal(cdf(dist, 0), pbeta(0, 3, 4)) expect_equal(cdf(dist, 3), pbeta(3, 3, 4)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4)), 0.4, tolerance = 1e-3) # stats expect_equal(mean(dist), 3/(3+4)) expect_equal(variance(dist), 3*4/((3+4)^2*(3+4+1))) }) distributional/tests/testthat/test-dist-poisson-inverse-gaussian.R0000644000175000017500000000145614151532232025462 0ustar nileshnileshtest_that("Poisson Inverse Gaussian distribution", { dist <- dist_poisson_inverse_gaussian(0.1, 0.8) expect_equal(format(dist), "PIG(0.1, 0.8)") # Require package installed skip_if_not_installed("actuar", "2.0.0") # quantiles expect_equal(quantile(dist, 0.1), actuar::qpig(0.1, 0.1, 0.8)) expect_equal(quantile(dist, 0.5), actuar::qpig(0.5, 0.1, 0.8)) # pdf expect_equal(density(dist, 0), actuar::dpig(0, 0.1, 0.8)) expect_equal(density(dist, 3), actuar::dpig(3, 0.1, 0.8)) # cdf expect_equal(cdf(dist, 0), actuar::ppig(0, 0.1, 0.8)) expect_equal(cdf(dist, 3), actuar::ppig(3, 0.1, 0.8)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.994)), 0.994, tolerance = 1e-3) # stats expect_equal(mean(dist), 0.1) expect_equal(variance(dist), 0.1/0.8*(0.1^2 + 0.8)) }) distributional/tests/testthat/test-inflated.R0000644000175000017500000000255613703764147021354 0ustar nileshnileshtest_that("Check zero inflation", { dist <- dist_inflated(dist_poisson(6), 0.33) expect_equal(format(dist), "0+Pois(6)") # quantiles expect_equal(quantile(dist, 0.1), 0) expect_equal(quantile(dist, 0.5), 4) # pdf expect_equal(density(dist, 0), 0.33 + 0.67*dpois(0, 6)) expect_equal(density(dist, 3), 0.67*dpois(3, 6)) # cdf expect_equal(cdf(dist, 0), 0.33 + 0.67*ppois(0, 6)) expect_equal(cdf(dist, 3), 0.33 + 0.67*ppois(3, 6)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.52)), 0.52, tolerance = 1e-3) # stats expect_equal(mean(dist), 0.67*6) expect_equal(variance(dist), 0.67*6 + (0.33/0.67)*(0.67*6)^2) }) test_that("Check non-zero inflation", { dist <- dist_inflated(dist_poisson(6), 0.33, 2) expect_equal(format(dist), "2+Pois(6)") # quantiles expect_equal(quantile(dist, 0), 0) expect_equal(quantile(dist, 0.1), 2) expect_equal(quantile(dist, 0.33), 2) expect_equal(quantile(dist, 0.5), 4) # pdf expect_equal(density(dist, 0), 0.67*dpois(0, 6)) expect_equal(density(dist, 2), 0.33 + 0.67*dpois(2, 6)) expect_equal(density(dist, 3), 0.67*dpois(3, 6)) # cdf expect_equal(cdf(dist, 0), 0.67*ppois(0, 6)) expect_equal(cdf(dist, 2), 0.33 + 0.67*ppois(2, 6)) expect_equal(cdf(dist, 3), 0.33 + 0.67*ppois(3, 6)) # stats expect_equal(mean(dist), 0.33*2 + 0.67*6) # expect_equal(variance(d), ???) }) distributional/tests/testthat/test-dist-multinomial.R0000644000175000017500000000111114151532232023025 0ustar nileshnileshtest_that("Multinomial distribution", { p <- c(0.3, 0.5, 0.2) dist <- dist_multinomial(size = 4, prob = list(p)) dimnames(dist) <- c("a", "b", "c") expect_equal(format(dist), "Multinomial(4)[3]") # quantiles # pdf expect_equal(density(dist, cbind(1, 2, 1)), dmultinom(c(1, 2, 1), 4, p)) # cdf # F(Finv(a)) ~= a # stats expect_equal( mean(dist), matrix(c(1.2, 2, 0.8), nrow = 1, dimnames = list(NULL, c("a", "b", "c"))) ) expect_equal( covariance(dist), list(matrix(c(0.84, -0.6, -0.24, -0.6, 1, -0.4, -0.24, -0.4, 0.64), nrow = 3))) }) distributional/tests/testthat/test-dist-weibull.R0000644000175000017500000000136313703764147022165 0ustar nileshnileshtest_that("Weibull distribution", { dist <- dist_weibull(1.5, 1) expect_equal(format(dist), "Weibull(1.5, 1)") # quantiles expect_equal(quantile(dist, 0.1), stats::qweibull(0.1, 1.5, 1)) expect_equal(quantile(dist, 0.5), stats::qweibull(0.5, 1.5, 1)) # pdf expect_equal(density(dist, 0), stats::dweibull(0, 1.5, 1)) expect_equal(density(dist, 3), stats::dweibull(3, 1.5, 1)) # cdf expect_equal(cdf(dist, 0), stats::pweibull(0, 1.5, 1)) expect_equal(cdf(dist, 3), stats::pweibull(3, 1.5, 1)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), 1 * gamma(1 + 1/1.5)) expect_equal(variance(dist), 1^2 * (gamma(1 + 2/1.5) - gamma(1 + 1/1.5)^2)) }) distributional/tests/testthat/test-dist-gamma.R0000644000175000017500000000125413703764147021603 0ustar nileshnileshtest_that("Gamma distribution", { dist <- dist_gamma(7.5, 2) expect_equal(format(dist), "Gamma(7.5, 2)") # quantiles expect_equal(quantile(dist, 0.1), stats::qgamma(0.1, 7.5, 2)) expect_equal(quantile(dist, 0.5), stats::qgamma(0.5, 7.5, 2)) # pdf expect_equal(density(dist, 0), stats::dgamma(0, 7.5, 2)) expect_equal(density(dist, 3), stats::dgamma(3, 7.5, 2)) # cdf expect_equal(cdf(dist, 0), stats::pgamma(0, 7.5, 2)) expect_equal(cdf(dist, 3), stats::pgamma(3, 7.5, 2)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), 7.5/2) expect_equal(variance(dist), 7.5/2^2) }) distributional/tests/testthat/test-apply.R0000644000175000017500000000705714151532232020676 0ustar nileshnileshtest_that("Recycling rules and output for applying multiple inputs over multiple univariate distributions", { # is a distribution vector of length 10 dist <- dist_normal(1:10, 1:10) # p = 0.5: apply p across all elements of (recycling p onto ) # Returns a vector of length 10 expect_equal( quantile(dist, 0.5), qnorm(0.5, 1:10, 1:10) ) # p = c(0.5, 0.9): Cannot recycle p (length 2) onto (length 10) # Returns a list containing values for each p. expect_equal( quantile(dist, c(0.5, 0.9)), mapply({function(mean, sd) qnorm(c(0.5, 0.9), mean, sd)}, mean = 1:10, sd = 1:10, SIMPLIFY = FALSE) ) # p = ppoints(10): apply each p to each element of (no recycling) # Returns a list for each distribution with the 10 quantiles. expect_equal( quantile(dist, ppoints(10)), mapply({function(mean, sd) qnorm(ppoints(10), mean, sd)}, mean = 1:10, sd = 1:10, SIMPLIFY = FALSE) ) # p = list(0.5): equivalent to p = 0.5, but returns a list output # Returns a tibble with 1 vector column of length 10 expect_equal( quantile(dist, list(a = 0.5)), new_data_frame(list(a = quantile(dist, 0.5))) ) # p = list(c(0.5, 0.9)): # Cannot recycle p[[1]] (length 2) onto (length 10) # Returns an error. expect_error( quantile(dist, list(a = c(0.5, 0.9))), "Cannot recycle input" ) # p = list(p1, 0.5): equivalent to df(quantile(, p1), quantile(, 0.5)). # Returns a tibble with 2 vector columns of length 10 # Names of p are used in output. expect_equal( quantile(dist, list(a=ppoints(10), b=0.5)), new_data_frame(list(a = qnorm(ppoints(10), 1:10, 1:10), b = quantile(dist, 0.5))) ) }) test_that("Recycling rules and output for applying multiple inputs over multiple multivariate distributions", { # is a bivariate distribution vector of length 2 dist <- dist_multivariate_normal(mu = list(c(1,2), c(3,5)), sigma = list(matrix(c(4,2,2,3), ncol=2), matrix(c(5,1,1,4), ncol=2))) dimnames(dist) <- c("a", "b") expect_equal( quantile(dist, 0.5), matrix(c(1,3,2,5), nrow = 2, dimnames = list(NULL, c("a", "b"))) ) expect_equal( quantile(dist, c(0.5, 0.9)), list(matrix(c(1, 3.5631031310892, 2, 4.21971242404268), ncol = 2, dimnames = list(NULL, c("a", "b"))), matrix(c(3, 5.86563641722901, 5, 7.5631031310892), ncol = 2, dimnames = list(NULL, c("a", "b")))) ) expect_equal( quantile(dist, c(0.5, 0.9, 0.95)), list( matrix(c(1, 3.5631031310892, 4.28970725390294, 2, 4.21971242404268, 4.84897005289389), ncol = 2, dimnames = list(NULL, c("a","b"))), matrix(c(3, 5.86563641722901, 6.67800452290057, 5, 7.5631031310892, 8.28970725390294), ncol = 2, dimnames = list(NULL, c("a", "b"))) ) ) expect_equal( quantile(dist, list(single = 0.5, varied = c(0.8, 0.3))), new_data_frame( list(single = quantile(dist, 0.5), varied = rbind(quantile(dist[1], 0.8), quantile(dist[2], 0.3))) ) ) expect_equal( density(dist, cbind(2, 3)), c(0.046649277604197, 0.0215708514518913) ) expect_equal( mean(dist), matrix(c(1,3,2,5), nrow = 2, dimnames = list(NULL, c("a", "b"))) ) expect_equal( covariance(dist), list( matrix(c(4,2,2,3), nrow = 2), matrix(c(5,1,1,4), nrow = 2) ) ) }) distributional/tests/testthat/test-mixture.R0000644000175000017500000000274613703764147021264 0ustar nileshnileshtest_that("Mixture of Normals", { dist <- dist_mixture(dist_normal(0, 1), dist_normal(10, 4), weights = c(0.5, 0.5)) # format expect_equal(format(dist), "mixture(n=2)") # quantiles expect_equal(quantile(dist, 0.5), 2, tolerance = 1e-5) expect_equal(quantile(dist, 0.1), -0.854, tolerance = 1e-3) # pdf expect_equal(density(dist, 0), 0.5*dnorm(0) + 0.5*dnorm(0, 10, 4)) expect_equal(density(dist, 3), 0.5*dnorm(3) + 0.5*dnorm(3, 10, 4)) # cdf expect_equal(cdf(dist, 0), 0.5*pnorm(0) + 0.5*pnorm(0, 10, 4)) expect_equal(cdf(dist, 3), 0.5*pnorm(3) + 0.5*pnorm(3, 10, 4)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.5)), 0.5, tolerance = 1e-6) expect_equal(mean(dist), 5) expect_equal(variance(dist), 33.5) }) test_that("Mixture of different distributions", { dist <- dist_mixture(dist_normal(0, 1), dist_student_t(10), weights = c(0.3, 0.7)) # format expect_equal(format(dist), "mixture(n=2)") # quantiles expect_equal(quantile(dist, 0.5), 0, tolerance = 1e-5) expect_equal(quantile(dist, 0.1), -1.343, tolerance = 1e-3) # pdf expect_equal(density(dist, 0), 0.3*dnorm(0) + 0.7*dt(0, 10)) expect_equal(density(dist, 3), 0.3*dnorm(3) + 0.7*dt(3, 10)) # cdf expect_equal(cdf(dist, 0), 0.3*pnorm(0) + 0.7*pt(0, 10)) expect_equal(cdf(dist, 3), 0.3*pnorm(3) + 0.7*pt(3, 10)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.5)), 0.5, tolerance = 1e-6) expect_equal(mean(dist), 0) expect_equal(variance(dist), 1.175) }) distributional/tests/testthat/test-dist-geometric.R0000644000175000017500000000124413703764147022476 0ustar nileshnileshtest_that("Geometric distribution", { dist <- dist_geometric(0.4) expect_equal(format(dist), "Geometric(0.4)") # quantiles expect_equal(quantile(dist, 0.6), stats::qgeom(0.6, 0.4)) expect_equal(quantile(dist, 0.9), stats::qgeom(0.9, 0.4)) # pdf expect_equal(density(dist, 0), stats::dgeom(0, 0.4)) expect_equal(density(dist, 5), stats::dgeom(5, 0.4)) # cdf expect_equal(cdf(dist, 0), stats::pgeom(0, 0.4)) expect_equal(cdf(dist, 10), stats::pgeom(10, 0.4)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.64)), 0.64, tolerance = 1e-3) # stats expect_equal(mean(dist), 1/0.4 - 1) expect_equal(variance(dist), 1/0.4^2 - 1/0.4) }) distributional/tests/testthat/test-dist_categorical.R0000644000175000017500000000074114151532232023042 0ustar nileshnileshtest_that("Categorical distribution", { dist <- dist_categorical(list(c(0.4, 0.2, 0.3, 0.1))) expect_equal(format(dist), "Categorical[4]") # quantiles expect_true(all(is.na(quantile(dist, 0.5)))) expect_true(all(is.na(quantile(dist, 0.2)))) # pdf expect_equal(density(dist, 1), 0.4) expect_equal(density(dist, 2), 0.2) # cdf expect_true(all(is.na(cdf(dist, 1)))) # stats expect_true(all(is.na(mean(dist)))) expect_true(all(is.na(variance(dist)))) }) distributional/tests/testthat/test-transformations.R0000644000175000017500000000736314164770357023022 0ustar nileshnileshtest_that("hilo of transformed distributions", { expect_identical( hilo(exp(dist_poisson(3))), exp(hilo((dist_poisson(3)))) ) }) test_that("chains of transformations", { expect_identical( hilo(dist_student_t(5)), hilo(log(exp(dist_student_t(5)))) ) expect_output( print(exp(dist_student_t(5))-1), "t\\(t\\(5, 0, 1\\)\\)" ) }) test_that("handling of transformation arguments", { expect_identical( hilo(logb(dist_normal(5, 1), base = 10)), logb(hilo(dist_normal(5, 1)), base = 10) ) expect_identical( hilo(10^logb(dist_normal(5, 1), base = 10)), 10^logb(hilo(dist_normal(5, 1)), base = 10) ) }) test_that("LogNormal distributions", { dist <- dist_transformed(dist_normal(0, 0.5), exp, log) ln_dist <- dist_lognormal(0, 0.5) # Test exp() shortcut expect_identical( exp(dist_normal(0, 0.5)), ln_dist ) expect_identical( log(ln_dist), dist_normal(0, 0.5) ) # Test log() shortcut with different bases expect_equal(log(dist_lognormal(0, log(3)), base = 3), dist_normal(0, 1)) expect_equal(log2(dist_lognormal(0, log(2))), dist_normal(0, 1)) expect_equal(log10(dist_lognormal(0, log(10))), dist_normal(0, 1)) # format expect_equal(format(dist), sprintf("t(%s)", format(dist_normal(0, 0.5)))) # quantiles expect_equal( quantile(dist, c(0.1, 0.5)), quantile(ln_dist, c(0.1, 0.5)) ) # pdf expect_equal( density(dist, c(1, 20)), density(ln_dist, c(1, 20)) ) # cdf expect_equal( cdf(dist, c(4, 90)), cdf(ln_dist, c(4, 90)) ) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.372)), 0.372, tolerance = 1e-3) # stats (approximate due to bias adjustment method) expect_equal(mean(dist), exp(0.25/2), tolerance = 0.01) expect_equal(variance(dist), (exp(0.25) - 1)*exp(0.25), tolerance = 0.1) }) test_that("inverses are applied automatically", { dist <- dist_gamma(1,1) log2dist <- log(dist, base = 2) log2dist_t <- dist_transformed(dist, log2, function(x) 2 ^ x) expect_equal(density(log2dist, 0.5), density(log2dist_t, 0.5)) expect_equal(cdf(log2dist, 0.5), cdf(log2dist_t, 0.5)) expect_equal(quantile(log2dist, 0.5), quantile(log2dist_t, 0.5)) # test multiple transformations that get stacked together by dist_transformed explogdist <- exp(log(dist)) expect_equal(density(dist, 0.5), density(explogdist, 0.5)) expect_equal(cdf(dist, 0.5), cdf(explogdist, 0.5)) expect_equal(quantile(dist, 0.5), quantile(explogdist, 0.5)) # test multiple transformations created by operators (via Ops) explog2dist <- 2 ^ log2dist expect_equal(density(dist, 0.5), density(explog2dist, 0.5)) expect_equal(cdf(dist, 0.5), cdf(explog2dist, 0.5)) expect_equal(quantile(dist, 0.5), quantile(explog2dist, 0.5)) # basic set of inverses expect_equal(density(sqrt(dist^2), 0.5), density(dist, 0.5)) expect_equal(density(exp(log(dist)), 0.5), density(dist, 0.5)) expect_equal(density(10^(log10(dist)), 0.5), density(dist, 0.5)) expect_equal(density(expm1(log1p(dist)), 0.5), density(dist, 0.5)) expect_equal(density(cos(acos(dist)), 0.5), density(dist, 0.5)) expect_equal(density(sin(asin(dist)), 0.5), density(dist, 0.5)) expect_equal(density(tan(atan(dist)), 0.5), density(dist, 0.5)) expect_equal(density(cosh(acosh(dist + 1)) - 1, 0.5), density(dist, 0.5)) expect_equal(density(sinh(asinh(dist)), 0.5), density(dist, 0.5)) expect_equal(density(tanh(atanh(dist)), 0.5), density(dist, 0.5)) expect_equal(density(dist + 1 - 1, 0.5), density(dist, 0.5)) expect_equal(density(dist * 2 / 2, 0.5), density(dist, 0.5)) # inverting a gamma distribution expect_equal(density(1/dist_gamma(4, 3), 0.5), density(dist_inverse_gamma(4, 1/3), 0.5)) expect_equal(density(1/(1/dist_gamma(4, 3)), 0.5), density(dist_gamma(4, 3), 0.5)) }) distributional/tests/testthat/test-dist-normal.R0000644000175000017500000000256314151532232021777 0ustar nileshnileshtest_that("Normal distribution", { # defaults dist <- dist_normal() expect_equal(mean(dist), 0) expect_equal(variance(dist), 1) # display expect_s3_class(dist, "distribution") expect_output(print(dist), "N\\(0, 1\\)") expect_output(print(dist_normal(numeric())), "") # error checking expect_error( dist_normal(0, -1), "non-negative" ) expect_silent( dist_normal(mu = 0L, sigma = 1L) ) mu <- rnorm(10) sigma <- abs(rnorm(10)) dist <- dist_normal(mu, sigma) # summary statistics expect_equal(mean(dist), mu) expect_equal(median(dist), mu) expect_equal(median(dist[[1]]), mu[[1]]) expect_equal(variance(dist), sigma^2) # math expect_equal(mean(dist*3+1), mu*3+1) expect_equal(variance(dist*3+1), (sigma*3)^2) expect_equal(mean(dist + dist), mean(dist) + mean(dist)) expect_equal(variance(dist + dist), variance(dist) + variance(dist)) # density expect_equal( density(dist, 0), dnorm(0, mean = mu, sd = sigma) ) # cdf expect_equal( cdf(dist, 5), pnorm(5, mean = mu, sd = sigma) ) # quantile expect_equal( quantile(dist, 0.1), qnorm(0.1, mean = mu, sd = sigma) ) # generate expect_equal( { set.seed(0) generate(dist, 10) }, { set.seed(0) mapply(function(m, s) rnorm(10, m, s), m = mu, s = sigma, SIMPLIFY = FALSE) } ) }) distributional/tests/testthat/test-dist-chisq.R0000644000175000017500000000117113703764147021626 0ustar nileshnileshtest_that("Chisq distribution", { dist <- dist_chisq(9) expect_equal(format(dist), "x2(9)") # quantiles expect_equal(quantile(dist, 0.1), stats::qchisq(0.1, 9)) expect_equal(quantile(dist, 0.5), stats::qchisq(0.5, 9)) # pdf expect_equal(density(dist, 0), stats::dchisq(0, 9)) expect_equal(density(dist, 3), stats::dchisq(3, 9)) # cdf expect_equal(cdf(dist, 0), stats::pchisq(0, 9)) expect_equal(cdf(dist, 3), stats::pchisq(3, 9)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), 9) expect_equal(variance(dist), 2*9) }) distributional/tests/testthat/test-dist-negative-binomial.R0000644000175000017500000000134313703764147024112 0ustar nileshnileshtest_that("Negative Binomial distribution", { dist <- dist_negative_binomial(10, 0.4) expect_equal(format(dist), "NB(10, 0.4)") # quantiles expect_equal(quantile(dist, 0.6), stats::qnbinom(0.6, 10, 0.4)) expect_equal(quantile(dist, 0.61), stats::qnbinom(0.61, 10, 0.4)) # pdf expect_equal(density(dist, 0), stats::dnbinom(0, 10, 0.4)) expect_equal(density(dist, 1), stats::dnbinom(1, 10, 0.4)) # cdf expect_equal(cdf(dist, 0), stats::pnbinom(0, 10, 0.4)) expect_equal(cdf(dist, 1), stats::pnbinom(1, 10, 0.4)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.6358)), 0.6358, tolerance = 1e-3) # stats expect_equal(mean(dist), 0.6*10/(1-0.6)) expect_equal(variance(dist), 0.6*10/(1-0.6)^2) }) distributional/tests/testthat/test-dist-f.R0000644000175000017500000000136713703764147020753 0ustar nileshnileshtest_that("F distribution", { dist <- dist_f(5, 2) expect_equal(format(dist), "F(5, 2)") # quantiles expect_equal(quantile(dist, 0.1), stats::qf(0.1, 5, 2)) expect_equal(quantile(dist, 0.5), stats::qf(0.5, 5, 2)) # pdf expect_equal(density(dist, 0), stats::df(0, 5, 2)) expect_equal(density(dist, 3), stats::df(3, 5, 2)) # cdf expect_equal(cdf(dist, 0), stats::pf(0, 5, 2)) expect_equal(cdf(dist, 3), stats::pf(3, 5, 2)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), NA_real_) expect_equal(variance(dist), NA_real_) dist <- dist_f(5, 5) expect_equal(mean(dist), 5/(5-2)) expect_equal(variance(dist), 2*5^2*(5+5-2)/(5*(5-2)^2*(5-4))) }) distributional/tests/testthat/test-dist-logistic.R0000644000175000017500000000124413703764147022335 0ustar nileshnileshtest_that("Logistic distribution", { dist <- dist_logistic(5, 2) expect_equal(format(dist), "Logistic(5, 2)") # quantiles expect_equal(quantile(dist, 0.1), stats::qlogis(0.1, 5, 2)) expect_equal(quantile(dist, 0.5), stats::qlogis(0.5, 5, 2)) # pdf expect_equal(density(dist, 0), stats::dlogis(0, 5, 2)) expect_equal(density(dist, 3), stats::dlogis(3, 5, 2)) # cdf expect_equal(cdf(dist, 0), stats::plogis(0, 5, 2)) expect_equal(cdf(dist, 3), stats::plogis(3, 5, 2)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), 5) expect_equal(variance(dist), (2*pi)^2/3) }) distributional/tests/testthat/test-dist-exponential.R0000644000175000017500000000117613703764147023052 0ustar nileshnileshtest_that("Exponential distribution", { dist <- dist_exponential(2) expect_equal(format(dist), "Exp(2)") # quantiles expect_equal(quantile(dist, 0.1), stats::qexp(0.1, 2)) expect_equal(quantile(dist, 0.5), stats::qexp(0.5, 2)) # pdf expect_equal(density(dist, 0), stats::dexp(0, 2)) expect_equal(density(dist, 3), stats::dexp(3, 2)) # cdf expect_equal(cdf(dist, 0), stats::pexp(0, 2)) expect_equal(cdf(dist, 3), stats::pexp(3, 2)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), 1/2) expect_equal(variance(dist), 1/2^2) }) distributional/tests/testthat/test-dist-inverse-exponential.R0000644000175000017500000000151314151532232024500 0ustar nileshnileshtest_that("Inverse Exponential distribution", { dist <- dist_inverse_exponential(5) expect_equal(format(dist), "InvExp(5)") # Require package installed skip_if_not_installed("actuar", "2.0.0") # quantiles expect_equal(quantile(dist, 0.1), actuar::qinvexp(0.1, 5)) expect_equal(quantile(dist, 0.5), actuar::qinvexp(0.5, 5)) # pdf expect_equal(density(dist, 0), actuar::dinvexp(0, 5)) expect_equal(density(dist, 3), actuar::dinvexp(3, 5)) # cdf expect_equal(cdf(dist, 0), actuar::pinvexp(0, 5)) expect_equal(cdf(dist, 3), actuar::pinvexp(3, 5)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4)), 0.4, tolerance = 1e-3) # stats expect_equal(mean(dist), NA_real_) # dput(actuar::minvexp(1, 5)) expect_equal(variance(dist), NA_real_) # dput(actuar::minvexp(2, 5) - actuar::minvexp(1, 5)^2) }) distributional/tests/testthat/test-dist-hypergeometric.R0000644000175000017500000000144713703764147023553 0ustar nileshnileshtest_that("Hypergeometric distribution", { dist <- dist_hypergeometric(500, 50, 100) expect_equal(format(dist), "Hypergeometric(500, 50, 100)") # quantiles expect_equal(quantile(dist, 0.1), stats::qhyper(0.1, 500, 50, 100)) expect_equal(quantile(dist, 0.5), stats::qhyper(0.5, 500, 50, 100)) # pdf expect_equal(density(dist, 0), stats::dhyper(0, 500, 50, 100)) expect_equal(density(dist, 3), stats::dhyper(3, 500, 50, 100)) # cdf expect_equal(cdf(dist, 0), stats::phyper(0, 500, 50, 100)) expect_equal(cdf(dist, 3), stats::phyper(3, 500, 50, 100)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats p <- 500/(500+50) expect_equal(mean(dist), 100*p) expect_equal(variance(dist), 100*p*(1-p)*(500+50-100)/(500+50-1)) }) distributional/tests/testthat/test-dist-student-t.R0000644000175000017500000000550313703764147022451 0ustar nileshnileshtest_that("Student T distribution", { dist <- dist_student_t(5) expect_equal(format(dist), "t(5, 0, 1)") # quantiles expect_equal(quantile(dist, 0.1), stats::qt(0.1, 5)) expect_equal(quantile(dist, 0.5), stats::qt(0.5, 5)) # pdf expect_equal(density(dist, 0), stats::dt(0, 5)) expect_equal(density(dist, 3), stats::dt(3, 5)) # cdf expect_equal(cdf(dist, 0), stats::pt(0, 5)) expect_equal(cdf(dist, 3), stats::pt(3, 5)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), 0) expect_equal(variance(dist), 5 / (5-2)) }) test_that("Noncentral Student t distribution", { dist <- dist_student_t(8, ncp = 6) expect_equal(format(dist), "t(8, 0, 1, 6)") # quantiles expect_equal(quantile(dist, 0.1), stats::qt(0.1, 8, ncp = 6)) expect_equal(quantile(dist, 0.5), stats::qt(0.5, 8, ncp = 6)) # pdf expect_equal(density(dist, 0), stats::dt(0, 8, ncp = 6)) expect_equal(density(dist, 3), stats::dt(3, 8, ncp = 6)) # cdf expect_equal(cdf(dist, 0), stats::pt(0, 8, ncp = 6)) expect_equal(cdf(dist, 3), stats::pt(3, 8, ncp = 6)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), 2 * gamma(7/2)) expect_equal(variance(dist), 148/3 - 4 * gamma(7/2)^2) }) test_that("Location-scale Student t distribution", { dist <- dist_student_t(5, 2, 3) expect_equal(format(dist), "t(5, 2, 3)") # quantiles expect_equal(quantile(dist, 0.1), stats::qt(0.1, 5)*3 + 2) expect_equal(quantile(dist, 0.5), stats::qt(0.5, 5)*3 + 2) # pdf expect_equal(density(dist, 0), stats::dt(-2/3, 5)/3) expect_equal(density(dist, 3), stats::dt((3-2)/3, 5)/3) # cdf expect_equal(cdf(dist, 0), stats::pt(-2/3, 5)) expect_equal(cdf(dist, 3), stats::pt((3-2)/3, 5)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), 2) expect_equal(variance(dist), 5 / (5-2) * 3^2) }) test_that("Noncentral location-scale Student t distribution", { dist <- dist_student_t(5, 2, 3, ncp = 6) expect_equal(format(dist), "t(5, 2, 3, 6)") # quantiles expect_equal(quantile(dist, 0.1), stats::qt(0.1, 5, 6)*3 + 2) expect_equal(quantile(dist, 0.5), stats::qt(0.5, 5, 6)*3 + 2) # pdf expect_equal(density(dist, 0), stats::dt(-2/3, 5, 6)/3) expect_equal(density(dist, 3), stats::dt((3-2)/3, 5, 6)/3) # cdf expect_equal(cdf(dist, 0), stats::pt(-2/3, 5, 6)) expect_equal(cdf(dist, 3), stats::pt((3-2)/3, 5, 6)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) # stats expect_equal(mean(dist), 2 + 6 * sqrt(5/2) * gamma(2)/gamma(5/2) * 3) expect_equal(variance(dist), ((5*(1+6^2))/(5-2) - (6 * sqrt(5/2) * (gamma((5-1)/2)/gamma(5/2)))^2)*3^2) }) distributional/tests/testthat/test-dist-studentised-range.R0000644000175000017500000000101213703764147024136 0ustar nileshnileshtest_that("Studentized Range distribution", { dist <- dist_studentized_range(6, 5, 1) expect_equal(format(dist), "StudentizedRange(6, 5, 1)") # quantiles expect_equal(quantile(dist, 0.1), stats::qtukey(0.1, 6, 5, 1)) expect_equal(quantile(dist, 0.5), stats::qtukey(0.5, 6, 5, 1)) # pdf # cdf expect_equal(cdf(dist, 0), stats::ptukey(0, 6, 5, 1)) expect_equal(cdf(dist, 3), stats::ptukey(3, 6, 5, 1)) # F(Finv(a)) ~= a expect_equal(cdf(dist, quantile(dist, 0.4246)), 0.4246, tolerance = 1e-3) }) distributional/tests/testthat.R0000644000175000017500000000011013703764147016571 0ustar nileshnileshlibrary(testthat) library(distributional) test_check("distributional") distributional/R/0000755000175000017500000000000014164770357013657 5ustar nileshnileshdistributional/R/reexports.R0000644000175000017500000000007713711726207016031 0ustar nileshnilesh#' @importFrom generics generate #' @export generics::generate distributional/R/dist_multivariate_normal.R0000644000175000017500000000505014164726550021077 0ustar nileshnilesh#' The multivariate normal distribution #' #' \lifecycle{maturing} #' #' @param mu A list of numeric vectors for the distribution's mean. #' @param sigma A list of matrices for the distribution's variance-covariance matrix. #' #' @seealso [mvtnorm::dmvnorm], [mvtnorm::qmvnorm] #' #' @examples #' dist <- dist_multivariate_normal(mu = list(c(1,2)), sigma = list(matrix(c(4,2,2,3), ncol=2))) #' dist #' #' @examplesIf requireNamespace("mvtnorm", quietly = TRUE) #' mean(dist) #' variance(dist) #' support(dist) #' generate(dist, 10) #' #' density(dist, c(2, 1)) #' density(dist, c(2, 1), log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @export dist_multivariate_normal <- function(mu = 0, sigma = diag(1)){ new_dist(mu = mu, sigma = sigma, dimnames = colnames(sigma[[1]]), class = "dist_mvnorm") } #' @export format.dist_mvnorm <- function(x, digits = 2, ...){ sprintf( "MVN[%i]", length(x[["mu"]]) ) } #' @export density.dist_mvnorm <- function(x, at, ..., na.rm = FALSE){ require_package("mvtnorm") if(is.list(at)) return(vapply(at, density, numeric(1L), x = x, ...)) mvtnorm::dmvnorm(at, x[["mu"]], x[["sigma"]]) } #' @export log_density.dist_mvnorm <- function(x, at, ..., na.rm = FALSE){ require_package("mvtnorm") if(is.list(at)) return(vapply(at, log_density, numeric(1L), x = x, ...)) mvtnorm::dmvnorm(at, x[["mu"]], x[["sigma"]], log = TRUE) } #' @export quantile.dist_mvnorm <- function(x, p, type = c("univariate", "equicoordinate"), ..., na.rm = FALSE){ type <- match.arg(type) if (type == "univariate") { matrix( stats::qnorm(p, mean = rep(x[["mu"]], each = length(p)), sd = rep(diag(sqrt(x[["sigma"]])), each = length(p)), ...), nrow = length(p) ) } else { require_package("mvtnorm") mvtnorm::qmvnorm(p, mean = x[["mu"]], sigma = x[["sigma"]], ...)$quantile } } #' @export cdf.dist_mvnorm <- function(x, q, ..., na.rm = FALSE){ if(is.list(q)) return(vapply(q, cdf, numeric(1L), x = x, ...)) require_package("mvtnorm") mvtnorm::pmvnorm(as.numeric(q), mean = x[["mu"]], sigma = x[["sigma"]], ...)[1] } #' @export generate.dist_mvnorm <- function(x, times, ..., na.rm = FALSE){ require_package("mvtnorm") mvtnorm::rmvnorm(times, x[["mu"]], x[["sigma"]], ...) } #' @export mean.dist_mvnorm <- function(x, ...){ matrix(x[["mu"]], nrow = 1) } #' @export covariance.dist_mvnorm <- function(x, ...){ # Wrap in list to preserve matrix structure list(x[["sigma"]]) } #' @export dim.dist_mvnorm <- function(x){ length(x[["mu"]]) } distributional/R/default.R0000755000175000017500000001755014164770577015445 0ustar nileshnilesh#' @export density.dist_default <- function(x, ...){ abort( sprintf("The distribution class `%s` does not support `density()`", class(x)[1]) ) } #' @export log_density.dist_default <- function(x, ...){ log(density(x, ...)) } #' @export quantile.dist_default <- function(x, p, ...){ # abort( # sprintf("The distribution class `%s` does not support `quantile()`", # class(x)[1]) # ) stats::optim(0, function(pos){ (p - cdf(x, pos, ...))^2 })$par } #' @export log_quantile.dist_default <- function(x, p, ...){ quantile(x, exp(p), ...) } #' @export cdf.dist_default <- function(x, ...){ abort( sprintf("The distribution class `%s` does not support `cdf()`", class(x)[1]) ) } #' @export log_cdf.dist_default <- function(x, q, ...){ log(cdf(x, q, ...)) } #' @export generate.dist_default <- function(x, times, ...){ vapply(stats::runif(times,0,1), quantile, numeric(1L), x = x, ...) } #' @export likelihood.dist_default <- function(x, sample, ...){ prod(vapply(sample, density, numeric(1L), x = x)) } #' @export log_likelihood.dist_default <- function(x, sample, ...){ sum(vapply(sample, log_density, numeric(1L), x = x)) } #' @export parameters.dist_default <- function(x, ...) { # Reduce parameter structures to length 1 list if needed. lapply(unclass(x), function(z) { if(inherits(z, "dist_default")) wrap_dist(list(z)) else if (tryCatch(vec_size(z), error = function(e) Inf) > 1) list(z) else z }) } #' @export family.dist_default <- function(object, ...) { substring(class(object)[1], first = 6) } #' @export support.dist_default <- function(x, ...) { new_support_region( list(vctrs::vec_init(generate(x, 1), n = 0L)), list(quantile(x, c(0, 1))) ) } #' @export mean.dist_default <- function(x, ...){ x_sup <- support(x) dist_type <- field(x_sup, "x")[[1]] if (!is.numeric(dist_type)) return(NA_real_) if (is.double(dist_type)) { limits <- field(x_sup, "lim")[[1]] tryCatch( stats::integrate(function(at) density(x, at) * at, limits[1], limits[2])$value, error = function(e) NA_real_ ) } else { mean(quantile(x, stats::ppoints(1000)), na.rm = TRUE) } } #' @export variance.dist_default <- function(x, ...){ x <- covariance(x, ...) if(is.matrix(x[[1]]) && ncol(x[[1]]) > 1){ matrix(diag(x[[1]]), nrow = 1) } else x } #' @export covariance.dist_default <- function(x, ...){ x_sup <- support(x) dist_type <- field(x_sup, "x")[[1]] if (!is.numeric(dist_type)) return(NA_real_) else if (is.matrix(dist_type)) stats::cov(generate(x, times = 1000)) else if (is.double(dist_type)) { limits <- field(x_sup, "lim")[[1]] tryCatch( stats::integrate(function(at) density(x, at) * at^2, limits[1], limits[2])$value, error = function(e) NA_real_ ) - mean(x)^2 } else { stats::var(quantile(x, stats::ppoints(1000)), na.rm = TRUE) } } #' @export median.dist_default <- function(x, na.rm = FALSE, ...){ quantile(x, p = 0.5, ...) } #' @export hilo.dist_default <- function(x, size = 95, ...){ lower <- quantile(x, 0.5-size/200, ...) upper <- quantile(x, 0.5+size/200, ...) if(is.matrix(lower) && is.matrix(upper)) { return( vctrs::new_data_frame(split( new_hilo(drop(lower), drop(upper), size = rep_len(size, length(lower))), seq_along(lower))) ) } new_hilo(lower, upper, size) } #' @export hdr.dist_default <- function(x, size = 95, n = 512, ...){ dist_x <- quantile(x, seq(0.5/n, 1 - 0.5/n, length.out = n)) # Remove duplicate values of dist_x from less continuous distributions dist_x <- unique(dist_x) dist_y <- density(x, dist_x) alpha <- quantile(dist_y, probs = 1-size/100) crossing_alpha <- function(alpha, x, y){ it <- seq_len(length(y) - 1) dd <- y - alpha dd <- dd[it + 1] * dd[it] index <- it[dd <= 0] # unique() removes possible duplicates if sequential dd has same value. # More robust approach is required. out <- unique( vapply( index, function(.x) stats::approx(y[.x + c(0,1)], x[.x + c(0,1)], xout = alpha)$y, numeric(1L) ) ) # Add boundary values which may exceed the crossing point. c(x[1][y[1]>alpha], out, x[length(x)][y[length(y)]>alpha]) } # purrr::map(alpha, crossing_alpha, dist_x, dist_y) hdr <- crossing_alpha(alpha, dist_x, dist_y) lower_hdr <- seq_along(hdr)%%2==1 new_hdr(lower = list(hdr[lower_hdr]), upper = list(hdr[!lower_hdr]), size = size) } #' @export format.dist_default <- function(x, ...){ "?" } #' @export print.dist_default <- function(x, ...){ cat(format(x, ...)) } #' @export dim.dist_default <- function(x){ 1 } invert_fail <- function(...) stop("Inverting transformations for distributions is not yet supported.") #' Attempt to get the inverse of f(x) by name. Returns invert_fail #' (a function that raises an error if called) if there is no known inverse. #' @param f string. Name of a function. #' @noRd get_unary_inverse <- function(f) { switch(f, sqrt = function(x) x^2, exp = log, log = function(x, base = exp(1)) base ^ x, log2 = function(x) 2^x, log10 = function(x) 10^x, expm1 = log1p, log1p = expm1, cos = acos, sin = asin, tan = atan, acos = cos, asin = sin, atan = tan, cosh = acosh, sinh = asinh, tanh = atanh, acosh = cosh, asinh = sinh, atanh = tanh, invert_fail ) } #' Attempt to get the inverse of f(x, constant) by name. Returns invert_fail #' (a function that raises an error if called) if there is no known inverse. #' @param f string. Name of a function. #' @param constant a constant value #' @noRd get_binary_inverse_1 <- function(f, constant) { force(constant) switch(f, `+` = function(x) x - constant, `-` = function(x) x + constant, `*` = function(x) x / constant, `/` = function(x) x * constant, `^` = function(x) x ^ (1/constant), invert_fail ) } #' Attempt to get the inverse of f(constant, x) by name. Returns invert_fail #' (a function that raises an error if called) if there is no known inverse. #' @param f string. Name of a function. #' @param constant a constant value #' @noRd get_binary_inverse_2 <- function(f, constant) { force(constant) switch(f, `+` = function(x) x - constant, `-` = function(x) constant - x, `*` = function(x) x / constant, `/` = function(x) constant / x, `^` = function(x) log(x, base = constant), invert_fail ) } #' @method Math dist_default #' @export Math.dist_default <- function(x, ...) { if(dim(x) > 1) stop("Transformations of multivariate distributions are not yet supported.") trans <- new_function(exprs(x = ), body = expr((!!sym(.Generic))(x, !!!dots_list(...)))) inverse_fun <- get_unary_inverse(.Generic) inverse <- new_function(exprs(x = ), body = expr((!!inverse_fun)(x, !!!dots_list(...)))) vec_data(dist_transformed(wrap_dist(list(x)), trans, inverse))[[1]] } #' @method Ops dist_default #' @export Ops.dist_default <- function(e1, e2) { is_dist <- c(inherits(e1, "dist_default"), inherits(e2, "dist_default")) if(any(vapply(list(e1, e2)[is_dist], dim, numeric(1L)) > 1)){ stop("Transformations of multivariate distributions are not yet supported.") } trans <- if(all(is_dist)) { if(identical(e1$dist, e2$dist)){ new_function(exprs(x = ), expr((!!sym(.Generic))((!!e1$transform)(x), (!!e2$transform)(x)))) } else { stop(sprintf("The %s operation is not supported for <%s> and <%s>", .Generic, class(e1)[1], class(e2)[1])) } } else if(is_dist[1]){ new_function(exprs(x = ), body = expr((!!sym(.Generic))(x, !!e2))) } else { new_function(exprs(x = ), body = expr((!!sym(.Generic))(!!e1, x))) } inverse <- if(all(is_dist)) { invert_fail } else if(is_dist[1]){ get_binary_inverse_1(.Generic, e2) } else { get_binary_inverse_2(.Generic, e1) } vec_data(dist_transformed(wrap_dist(list(e1,e2)[which(is_dist)]), trans, inverse))[[1]] } distributional/R/dist_uniform.R0000644000175000017500000000566414151532232016477 0ustar nileshnilesh#' The Uniform distribution #' #' \lifecycle{stable} #' #' A distribution with constant density on an interval. #' #' @inheritParams stats::dunif #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Poisson random variable with parameter #' `lambda` = \eqn{\lambda}. #' #' **Support**: \eqn{[a,b]}{[a,b]} #' #' **Mean**: \eqn{\frac{1}{2}(a+b)} #' #' **Variance**: \eqn{\frac{1}{12}(b-a)^2} #' #' **Probability mass function (p.m.f)**: #' #' \deqn{ #' f(x) = \frac{1}{b-a} for x \in [a,b] #' }{ #' f(x) = \frac{1}{b-a} for x in [a,b] #' } #' \deqn{ #' f(x) = 0 otherwise #' }{ #' f(x) = 0 otherwise #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{ #' F(x) = 0 for x < a #' }{ #' F(x) = 0 for x < a #' } #' \deqn{ #' F(x) = \frac{x - a}{b-a} for x \in [a,b] #' }{ #' F(x) = \frac{x - a}{b-a} for x in [a,b] #' } #' \deqn{ #' F(x) = 1 for x > b #' }{ #' F(x) = 1 for x > b #' } #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = \frac{e^{tb} - e^{ta}}{t(b-a)} for t \neq 0 #' }{ #' E(e^(tX)) = \frac{e^{tb} - e^{ta}}{t(b-a)} for t \neq 0 #' } #' \deqn{ #' E(e^{tX}) = 1 for t = 0 #' }{ #' E(e^(tX)) = 1 for t = 0 #' } #' #' @seealso [stats::Uniform] #' #' @examples #' dist <- dist_uniform(min = c(3, -2), max = c(5, 4)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_uniform #' @export dist_uniform <- function(min, max){ min <- vec_cast(min, double()) max <- vec_cast(max, double()) if(any(min > max)){ abort("The min of a Uniform distribution must be less than max.") } new_dist(l = min, u = max, class = "dist_uniform") } #' @export format.dist_uniform <- function(x, digits = 2, ...){ sprintf( "U(%s, %s)", format(x[["l"]], digits = digits, ...), format(x[["u"]], digits = digits, ...) ) } #' @export density.dist_uniform <- function(x, at, ...){ stats::dunif(at, x[["l"]], x[["u"]]) } #' @export log_density.dist_uniform <- function(x, at, ...){ stats::dunif(at, x[["l"]], x[["u"]], log = TRUE) } #' @export quantile.dist_uniform <- function(x, p, ...){ stats::qunif(p, x[["l"]], x[["u"]]) } #' @export cdf.dist_uniform <- function(x, q, ...){ stats::punif(q, x[["l"]], x[["u"]]) } #' @export generate.dist_uniform <- function(x, times, ...){ stats::runif(times, x[["l"]], x[["u"]]) } #' @export mean.dist_uniform <- function(x, ...){ (x[["u"]]+x[["l"]])/2 } #' @export covariance.dist_uniform <- function(x, ...){ (x[["u"]]-x[["l"]])^2/12 } #' @export skewness.dist_uniform <- function(x, ...) 0 #' @export kurtosis.dist_uniform <- function(x, ...) -6/5 distributional/R/dist_poisson.R0000644000175000017500000000505014164725104016505 0ustar nileshnilesh#' The Poisson Distribution #' #' \lifecycle{stable} #' #' Poisson distributions are frequently used to model counts. #' #' @inheritParams stats::dpois #' #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Poisson random variable with parameter #' `lambda` = \eqn{\lambda}. #' #' **Support**: \eqn{\{0, 1, 2, 3, ...\}}{{0, 1, 2, 3, ...}} #' #' **Mean**: \eqn{\lambda} #' #' **Variance**: \eqn{\lambda} #' #' **Probability mass function (p.m.f)**: #' #' \deqn{ #' P(X = k) = \frac{\lambda^k e^{-\lambda}}{k!} #' }{ #' P(X = k) = \lambda^k e^(-\lambda) / k! #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{ #' P(X \le k) = e^{-\lambda} #' \sum_{i = 0}^{\lfloor k \rfloor} \frac{\lambda^i}{i!} #' }{ #' P(X \le k) = e^(-\lambda) #' \sum_{i = 0}^k \lambda^i / i! #' } #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = e^{\lambda (e^t - 1)} #' }{ #' E(e^(tX)) = e^(\lambda (e^t - 1)) #' } #' @seealso [stats::Poisson] #' #' @examples #' dist <- dist_poisson(lambda = c(1, 4, 10)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_poisson #' @export dist_poisson <- function(lambda){ lambda <- vec_cast(lambda, double()) if(any(lambda < 0)){ abort("The lambda parameter of an Poisson distribution must be non-negative.") } new_dist(l = lambda, class = "dist_poisson") } #' @export format.dist_poisson <- function(x, digits = 2, ...){ sprintf( "Pois(%s)", format(x[["l"]], digits = digits, ...) ) } #' @export density.dist_poisson <- function(x, at, ...){ stats::dpois(at, x[["l"]]) } #' @export log_density.dist_poisson <- function(x, at, ...){ stats::dpois(at, x[["l"]], log = TRUE) } #' @export quantile.dist_poisson <- function(x, p, ...){ stats::qpois(p, x[["l"]]) } #' @export cdf.dist_poisson <- function(x, q, ...){ stats::ppois(q, x[["l"]]) } #' @export generate.dist_poisson <- function(x, times, ...){ as.integer(stats::rpois(times, x[["l"]])) } #' @export mean.dist_poisson <- function(x, ...){ x[["l"]] } #' @export covariance.dist_poisson <- function(x, ...){ x[["l"]] } #' @export skewness.dist_poisson <- function(x, ...) 1 / sqrt(x[["l"]]) #' @export kurtosis.dist_poisson <- function(x, ...) 1 / x[["l"]] distributional/R/dist_lognormal.R0000644000175000017500000001047014164770357017021 0ustar nileshnilesh#' The log-normal distribution #' #' \lifecycle{stable} #' #' The log-normal distribution is a commonly used transformation of the Normal #' distribution. If \eqn{X} follows a log-normal distribution, then \eqn{\ln{X}} #' would be characteristed by a Normal distribution. #' #' @param mu The mean (location parameter) of the distribution, which is the #' mean of the associated Normal distribution. Can be any real number. #' @param sigma The standard deviation (scale parameter) of the distribution. #' Can be any positive number. #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{Y} be a Normal random variable with mean #' `mu` = \eqn{\mu} and standard deviation `sigma` = \eqn{\sigma}. The #' log-normal distribution \eqn{X = exp(Y)} is characterised by: #' #' **Support**: \eqn{R+}, the set of all real numbers greater than or equal to 0. #' #' **Mean**: \eqn{e^(\mu + \sigma^2/2} #' #' **Variance**: \eqn{(e^(\sigma^2)-1) e^(2\mu + \sigma^2} #' #' **Probability density function (p.d.f)**: #' #' \deqn{ #' f(x) = \frac{1}{x\sqrt{2 \pi \sigma^2}} e^{-(\ln{x} - \mu)^2 / 2 \sigma^2} #' }{ #' f(x) = 1 / (x * sqrt(2 \pi \sigma^2)) exp(-(log(x) - \mu)^2 / (2 \sigma^2)) #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' The cumulative distribution function has the form #' #' \deqn{ #' F(x) = \Phi((\ln{x} - \mu)/\sigma) #' }{ #' F(x) = Phi((log(x) - \mu)/\sigma) #' } #' #' Where \eqn{Phi}{Phi} is the CDF of a standard Normal distribution, N(0,1). #' #' @seealso [stats::Lognormal] #' #' @examples #' dist <- dist_lognormal(mu = 1:5, sigma = 0.1) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' # A log-normal distribution X is exp(Y), where Y is a Normal distribution of #' # the same parameters. So log(X) will produce the Normal distribution Y. #' log(dist) #' @export dist_lognormal <- function(mu = 0, sigma = 1){ mu <- vec_cast(mu, double()) sigma <- vec_cast(sigma, double()) if(any(sigma[!is.na(sigma)] < 0)){ abort("Standard deviation of a log-normal distribution must be non-negative") } new_dist(mu = mu, sigma = sigma, class = "dist_lognormal") } #' @export format.dist_lognormal <- function(x, digits = 2, ...){ sprintf( "lN(%s, %s)", format(x[["mu"]], digits = digits, ...), format(x[["sigma"]]^2, digits = digits, ...) ) } #' @export density.dist_lognormal <- function(x, at, ...){ stats::dlnorm(at, x[["mu"]], x[["sigma"]]) } #' @export log_density.dist_lognormal <- function(x, at, ...){ stats::dlnorm(at, x[["mu"]], x[["sigma"]], log = TRUE) } #' @export quantile.dist_lognormal <- function(x, p, ...){ stats::qlnorm(p, x[["mu"]], x[["sigma"]]) } #' @export log_quantile.dist_lognormal <- function(x, p, ...){ stats::qlnorm(p, x[["mu"]], x[["sigma"]], log.p = TRUE) } #' @export cdf.dist_lognormal <- function(x, q, ...){ stats::plnorm(q, x[["mu"]], x[["sigma"]]) } #' @export log_cdf.dist_lognormal <- function(x, q, ...){ stats::plnorm(q, x[["mu"]], x[["sigma"]], log.p = TRUE) } #' @export generate.dist_lognormal <- function(x, times, ...){ stats::rlnorm(times, x[["mu"]], x[["sigma"]]) } #' @export mean.dist_lognormal <- function(x, ...){ exp(x[["mu"]] + x[["sigma"]]^2/2) } #' @export covariance.dist_lognormal <- function(x, ...){ s2 <- x[["sigma"]]^2 (exp(s2)-1)*exp(2*x[["mu"]] + s2) } #' @export skewness.dist_lognormal <- function(x, ...) { es2 <- exp(x[["sigma"]]^2) (es2+2)*sqrt(es2-1) } #' @export kurtosis.dist_lognormal <- function(x, ...) { s2 <- x[["sigma"]]^2 exp(4*s2) + 2*exp(3*s2) + 3*exp(2*s2) - 6 } # make a normal distribution from a lognormal distribution using the # specified base normal_dist_with_base <- function(x, base = exp(1)) { vec_data(dist_normal(x[["mu"]], x[["sigma"]]) / log(base))[[1]] } #' @method Math dist_lognormal #' @export Math.dist_lognormal <- function(x, ...) { switch(.Generic, # Shortcuts to get Normal distribution from log-normal. log = normal_dist_with_base(x, ...), log2 = normal_dist_with_base(x, 2), log10 = normal_dist_with_base(x, 10), NextMethod() ) } distributional/R/dist_studentized_range.R0000644000175000017500000000277614151532232020537 0ustar nileshnilesh#' The Studentized Range distribution #' #' \lifecycle{stable} #' #' Tukey's studentized range distribution, used for Tukey's #' honestly significant differences test in ANOVA. #' #' @inheritParams stats::qtukey #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' **Support**: \eqn{R^+}, the set of positive real numbers. #' #' Other properties of Tukey's Studentized Range Distribution #' are omitted, largely because the distribution is not fun #' to work with. #' #' @seealso [stats::Tukey] #' #' @examples #' dist <- dist_studentized_range(nmeans = c(6, 2), df = c(5, 4), nranges = c(1, 1)) #' #' dist #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_studentized_range #' @export dist_studentized_range <- function(nmeans, df, nranges){ nmeans <- vec_cast(nmeans, double()) df <- vec_cast(df, double()) new_dist(nm = nmeans, df = df, nr = nranges, class = "dist_studentized_range") } #' @export format.dist_studentized_range <- function(x, digits = 2, ...){ sprintf( "StudentizedRange(%s, %s, %s)", format(x[["nm"]], digits = digits, ...), format(x[["df"]], digits = digits, ...), format(x[["nr"]], digits = digits, ...) ) } #' @export quantile.dist_studentized_range <- function(x, p, ...){ stats::qtukey(p, x[["nm"]], x[["df"]], x[["nr"]]) } #' @export cdf.dist_studentized_range <- function(x, q, ...){ stats::ptukey(q, x[["nm"]], x[["df"]], x[["nr"]]) } distributional/R/dist_negative_binomial.R0000644000175000017500000000566014164725335020504 0ustar nileshnilesh#' The Negative Binomial distribution #' #' \lifecycle{stable} #' #' A generalization of the geometric distribution. It is the number #' of failures in a sequence of i.i.d. Bernoulli trials before #' a specified number of successes (`size`) occur. The probability of success in #' each trial is given by `prob`. #' #' @inheritParams stats::NegBinomial #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Negative Binomial random variable with #' success probability `prob` = \eqn{p} and the number of successes `size` = #' \eqn{r}. #' #' #' **Support**: \eqn{\{0, 1, 2, 3, ...\}} #' #' **Mean**: \eqn{\frac{p r}{1-p}} #' #' **Variance**: \eqn{\frac{pr}{(1-p)^2}} #' #' **Probability mass function (p.m.f)**: #' #' \deqn{ #' f(k) = {k + r - 1 \choose k} \cdot (1-p)^r p^k #' }{ #' f(k) = (k+r-1)!/(k!(r-1)!) (1-p)^r p^k #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' Too nasty, omitted. #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' \left(\frac{1-p}{1-pe^t}\right)^r, t < -\log p #' }{ #' \frac{(1-p)^r}{(1-pe^t)^r}, t < -\log p #' } #' #' @seealso [stats::NegBinomial] #' #' @examples #' dist <- dist_negative_binomial(size = 10, prob = 0.5) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' support(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @export dist_negative_binomial <- function(size, prob){ size <- vec_cast(size, double()) prob <- vec_cast(prob, double()) if(any(prob < 0 | prob > 1)){ abort("Probability of success must be between 0 and 1.") } new_dist(n = size, p = prob, class = "dist_negbin") } #' @export format.dist_negbin <- function(x, digits = 2, ...){ sprintf( "NB(%s, %s)", format(x[["n"]], digits = digits, ...), format(x[["p"]], digits = digits, ...) ) } #' @export density.dist_negbin <- function(x, at, ...){ stats::dnbinom(at, x[["n"]], x[["p"]]) } #' @export log_density.dist_negbin <- function(x, at, ...){ stats::dnbinom(at, x[["n"]], x[["p"]], log = TRUE) } #' @export quantile.dist_negbin <- function(x, p, ...){ stats::qnbinom(p, x[["n"]], x[["p"]]) } #' @export cdf.dist_negbin <- function(x, q, ...){ stats::pnbinom(q, x[["n"]], x[["p"]]) } #' @export generate.dist_negbin <- function(x, times, ...){ stats::rnbinom(times, x[["n"]], x[["p"]]) } #' @export mean.dist_negbin <- function(x, ...){ x[["n"]] * (1 - x[["p"]]) / x[["p"]] } #' @export covariance.dist_negbin <- function(x, ...){ x[["n"]] * (1 - x[["p"]]) / x[["p"]]^2 } #' @export skewness.dist_negbin <- function(x, ...) { (1 + x[["p"]]) / sqrt(x[["p"]] * x[["n"]]) } #' @export kurtosis.dist_negbin <- function(x, ...) { 6 / x[["n"]] + (1 - x[["p"]])^2 / x[["n"]] * x[["p"]] } distributional/R/dist_hypergeometric.R0000644000175000017500000000700714151532232020037 0ustar nileshnilesh#' The Hypergeometric distribution #' #' \lifecycle{stable} #' #' To understand the HyperGeometric distribution, consider a set of #' \eqn{r} objects, of which \eqn{m} are of the type I and #' \eqn{n} are of the type II. A sample with size \eqn{k} (\eqn{k, where the math #' will render nicely. #' #' In the following, let \eqn{X} be a HyperGeometric random variable with #' success probability `p` = \eqn{p = m/(m+n)}. #' #' **Support**: \eqn{x \in { \{\max{(0, k-n)}, \dots, \min{(k,m)}}\}} #' #' **Mean**: \eqn{\frac{km}{n+m} = kp} #' #' **Variance**: \eqn{\frac{km(n)(n+m-k)}{(n+m)^2 (n+m-1)} = #' kp(1-p)(1 - \frac{k-1}{m+n-1})} #' #' **Probability mass function (p.m.f)**: #' #' \deqn{ #' P(X = x) = \frac{{m \choose x}{n \choose k-x}}{{m+n \choose k}} #' }{ #' P(X = x) = \frac{{m \choose x}{n \choose k-x}}{{m+n \choose k}} #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{ #' P(X \le k) \approx \Phi\Big(\frac{x - kp}{\sqrt{kp(1-p)}}\Big) #' } #' #' @seealso [stats::Hypergeometric] #' #' @examples #' dist <- dist_hypergeometric(m = rep(500, 3), n = c(50, 60, 70), k = c(100, 200, 300)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_hypergeometric #' @export dist_hypergeometric <- function(m, n, k){ m <- vec_cast(m, integer()) n <- vec_cast(n, integer()) k <- vec_cast(k, integer()) new_dist(m = m, n = n, k = k, class = "dist_hypergeometric") } #' @export format.dist_hypergeometric <- function(x, digits = 2, ...){ sprintf( "Hypergeometric(%s, %s, %s)", format(x[["m"]], digits = digits, ...), format(x[["n"]], digits = digits, ...), format(x[["k"]], digits = digits, ...) ) } #' @export density.dist_hypergeometric <- function(x, at, ...){ stats::dhyper(at, x[["m"]], x[["n"]], x[["k"]]) } #' @export log_density.dist_hypergeometric <- function(x, at, ...){ stats::dhyper(at, x[["m"]], x[["n"]], x[["k"]], log = TRUE) } #' @export quantile.dist_hypergeometric <- function(x, p, ...){ stats::qhyper(p, x[["m"]], x[["n"]], x[["k"]]) } #' @export cdf.dist_hypergeometric <- function(x, q, ...){ stats::phyper(q, x[["m"]], x[["n"]], x[["k"]]) } #' @export generate.dist_hypergeometric <- function(x, times, ...){ stats::rhyper(times, x[["m"]], x[["n"]], x[["k"]]) } #' @export mean.dist_hypergeometric <- function(x, ...){ p <- x[["m"]]/(x[["m"]] + x[["n"]]) x[["k"]] * p } #' @export covariance.dist_hypergeometric <- function(x, ...){ m <- x[["m"]] n <- x[["n"]] k <- x[["k"]] p <- m/(m + n) k * p * (1 - p) * ((m + n - k) / (m + n - 1)) } #' @export skewness.dist_hypergeometric <- function(x, ...) { N <- x[["n"]] + x[["m"]] K <- x[["m"]] n <- x[["k"]] a <- (N - 2 * K) * (N - 1)^0.5 * (N - 2 * n) b <- (n * K * (N - K) * (N - n))^0.5 * (N - 2) a / b } #' @export kurtosis.dist_hypergeometric <- function(x, ...) { N <- x[["n"]] + x[["m"]] K <- x[["m"]] n <- x[["k"]] 1 / (n * K * (N - K) * (N - n) * (N - 2) * (N - 3)) } distributional/R/dist_f.R0000644000175000017500000001043614151532232015236 0ustar nileshnilesh#' The F Distribution #' #' \lifecycle{stable} #' #' @inheritParams stats::df #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Gamma random variable #' with parameters #' `shape` = \eqn{\alpha} and #' `rate` = \eqn{\beta}. #' #' **Support**: \eqn{x \in (0, \infty)} #' #' **Mean**: \eqn{\frac{\alpha}{\beta}} #' #' **Variance**: \eqn{\frac{\alpha}{\beta^2}} #' #' **Probability density function (p.m.f)**: #' #' \deqn{ #' f(x) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{\alpha - 1} e^{-\beta x} #' }{ #' f(x) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{\alpha - 1} e^{-\beta x} #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{ #' f(x) = \frac{\Gamma(\alpha, \beta x)}{\Gamma{\alpha}} #' }{ #' f(x) = \frac{\Gamma(\alpha, \beta x)}{\Gamma{\alpha}} #' } #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = \Big(\frac{\beta}{ \beta - t}\Big)^{\alpha}, \thinspace t < \beta #' }{ #' E(e^(tX)) = \Big(\frac{\beta}{ \beta - t}\Big)^{\alpha}, \thinspace t < \beta #' } #' #' @seealso [stats::FDist] #' #' @examples #' dist <- dist_f(df1 = c(1,2,5,10,100), df2 = c(1,1,2,1,100)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_f #' @export dist_f <- function(df1, df2, ncp = NULL){ df1 <- vec_cast(df1, double()) df2 <- vec_cast(df2, double()) ncp <- vec_cast(ncp, double()) if(any((df1 < 0) | (df2 < 0))){ abort("The degrees of freedom parameters of the F distribution must be non-negative.") } if(is.null(ncp)){ new_dist(df1 = df1, df2 = df2, class = "dist_f") } else { new_dist(df1 = df1, df2 = df2, ncp = ncp, class = "dist_f") } } #' @export format.dist_f <- function(x, digits = 2, ...){ sprintf( "F(%s, %s)", format(x[["df1"]], digits = digits, ...), format(x[["df2"]], digits = digits, ...) ) } #' @export density.dist_f <- function(x, at, ...){ if(is.null(x[["ncp"]])) { stats::df(at, x[["df1"]], x[["df2"]]) } else { stats::df(at, x[["df1"]], x[["df2"]], x[["ncp"]]) } } #' @export log_density.dist_f <- function(x, at, ...){ if(is.null(x[["ncp"]])) { stats::df(at, x[["df1"]], x[["df2"]], log = TRUE) } else { stats::df(at, x[["df1"]], x[["df2"]], x[["ncp"]], log = TRUE) } } #' @export quantile.dist_f <- function(x, p, ...){ if(is.null(x[["ncp"]])) { stats::qf(p, x[["df1"]], x[["df2"]]) } else { stats::qf(p, x[["df1"]], x[["df2"]], x[["ncp"]]) } } #' @export cdf.dist_f <- function(x, q, ...){ if(is.null(x[["ncp"]])) { stats::pf(q, x[["df1"]], x[["df2"]]) } else { stats::pf(q, x[["df1"]], x[["df2"]], x[["ncp"]]) } } #' @export generate.dist_f <- function(x, times, ...){ if(is.null(x[["ncp"]])) { stats::rf(times, x[["df1"]], x[["df2"]]) } else { stats::rf(times, x[["df1"]], x[["df2"]], x[["ncp"]]) } } #' @export mean.dist_f <- function(x, ...){ df1 <- x[["df1"]] df2 <- x[["df2"]] if(df2 > 2) { if(is.null(x[["ncp"]])){ df2 / (df2 - 2) } else { (df2 * (df1 + x[["ncp"]])) / (df1 * (df2 - 2)) } } else { NA_real_ } } #' @export covariance.dist_f <- function(x, ...){ df1 <- x[["df1"]] df2 <- x[["df2"]] if(df2 > 4) { if(is.null(x[["ncp"]])){ (2 * df2^2 * (df1 + df2 - 2))/(df1*(df2-2)^2*(df2-4)) } else { 2*((df1 + x[["ncp"]])^2 + (df1 + 2*x[["ncp"]])*(df2 - 2))/((df2-2)^2*(df2-4)) * (df2^2/df1^2) } } else { NA_real_ } } #' @export skewness.dist_f <- function(x, ...) { df1 <- x[["df1"]] df2 <- x[["df2"]] if(!is.null(x[["ncp"]])) return(NA_real_) if (df2 > 6) { a <- (2 * df1 + df2 - 2) * sqrt(8 * (df2 - 4)) b <- (df2 - 6) * sqrt(df1 * (df1 + df2 - 2)) a / b } else { NA_real_ } } #' @export kurtosis.dist_f <- function(x, ...) { df1 <- x[["df1"]] df2 <- x[["df2"]] if(!is.null(x[["ncp"]])) return(NA_real_) if (df2 > 8) { a <- df1 * (5 * df2 - 22) * (df1 + df2 - 2) + (df2 - 4) * (df2 - 2)^2 b <- df1 * (df2 - 6) * (df2 - 8) * (df1 + df2 - 2) 12 * a / b } else { NA_real_ } } distributional/R/plot.R0000644000175000017500000000134514164770357014763 0ustar nileshnilesh#' Plot a distribution #' #' \lifecycle{deprecated} #' #' This function is now defunct and can no longer be used. Instead consider using #' the {ggdist} package to produce your own distribution plots. You can learn #' more about how this plot can be produced using {ggdist} here: #' https://mjskay.github.io/ggdist/articles/slabinterval.html #' #' @param x The distribution(s) to plot. #' @param ... Unused. #' #' @keywords internal #' #' @export autoplot.distribution <- function(x, ...){ lifecycle::deprecate_stop("0.2.0", "distributional::autoplot.distribution()", details = "The autoplot() method for distributions have been replaced by geoms in the {ggdist} package.\nYou can produce a similar plot using ggdist::stat_dist_slab()") } distributional/R/dist_gamma.R0000644000175000017500000000741114151532232016072 0ustar nileshnilesh#' The Gamma distribution #' #' \lifecycle{stable} #' #' Several important distributions are special cases of the Gamma #' distribution. When the shape parameter is `1`, the Gamma is an #' exponential distribution with parameter \eqn{1/\beta}. When the #' \eqn{shape = n/2} and \eqn{rate = 1/2}, the Gamma is a equivalent to #' a chi squared distribution with n degrees of freedom. Moreover, if #' we have \eqn{X_1} is \eqn{Gamma(\alpha_1, \beta)} and #' \eqn{X_2} is \eqn{Gamma(\alpha_2, \beta)}, a function of these two variables #' of the form \eqn{\frac{X_1}{X_1 + X_2}} \eqn{Beta(\alpha_1, \alpha_2)}. #' This last property frequently appears in another distributions, and it #' has extensively been used in multivariate methods. More about the Gamma #' distribution will be added soon. #' #' @inheritParams stats::dgamma #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Gamma random variable #' with parameters #' `shape` = \eqn{\alpha} and #' `rate` = \eqn{\beta}. #' #' **Support**: \eqn{x \in (0, \infty)} #' #' **Mean**: \eqn{\frac{\alpha}{\beta}} #' #' **Variance**: \eqn{\frac{\alpha}{\beta^2}} #' #' **Probability density function (p.m.f)**: #' #' \deqn{ #' f(x) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{\alpha - 1} e^{-\beta x} #' }{ #' f(x) = \frac{\beta^{\alpha}}{\Gamma(\alpha)} x^{\alpha - 1} e^{-\beta x} #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{ #' f(x) = \frac{\Gamma(\alpha, \beta x)}{\Gamma{\alpha}} #' }{ #' f(x) = \frac{\Gamma(\alpha, \beta x)}{\Gamma{\alpha}} #' } #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = \Big(\frac{\beta}{ \beta - t}\Big)^{\alpha}, \thinspace t < \beta #' }{ #' E(e^(tX)) = \Big(\frac{\beta}{ \beta - t}\Big)^{\alpha}, \thinspace t < \beta #' } #' #' @seealso [stats::GammaDist] #' #' @examples #' dist <- dist_gamma(shape = c(1,2,3,5,9,7.5,0.5), rate = c(0.5,0.5,0.5,1,2,1,1)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_gamma #' @export dist_gamma <- function(shape, rate){ shape <- vec_cast(shape, double()) rate <- vec_cast(rate, double()) if(any(shape[!is.na(shape)] < 0)){ abort("The shape parameter of a Gamma distribution must be non-negative.") } if(any(rate[!is.na(rate)] <= 0)){ abort("The rate parameter of a Gamma distribution must be strictly positive.") } new_dist(shape = shape, rate = rate, class = "dist_gamma") } #' @export format.dist_gamma <- function(x, digits = 2, ...){ sprintf( if (is_utf8_output()) "\u0393(%s, %s)" else "Gamma(%s, %s)", format(x[["shape"]], digits = digits, ...), format(x[["rate"]], digits = digits, ...) ) } #' @export density.dist_gamma <- function(x, at, ...){ stats::dgamma(at, x[["shape"]], x[["rate"]]) } #' @export log_density.dist_gamma <- function(x, at, ...){ stats::dgamma(at, x[["shape"]], x[["rate"]], log = TRUE) } #' @export quantile.dist_gamma <- function(x, p, ...){ stats::qgamma(p, x[["shape"]], x[["rate"]]) } #' @export cdf.dist_gamma <- function(x, q, ...){ stats::pgamma(q, x[["shape"]], x[["rate"]]) } #' @export generate.dist_gamma <- function(x, times, ...){ stats::rgamma(times, x[["shape"]], x[["rate"]]) } #' @export mean.dist_gamma <- function(x, ...){ x[["shape"]] / x[["rate"]] } #' @export covariance.dist_gamma <- function(x, ...){ x[["shape"]] / x[["rate"]]^2 } #' @export skewness.dist_gamma <- function(x, ...) 2 / sqrt(x[["shape"]]) #' @export kurtosis.dist_gamma <- function(x, ...) 6 / x[["shape"]] distributional/R/dist_degenerate.R0000644000175000017500000000422014151532232017106 0ustar nileshnilesh#' The degenerate distribution #' #' \lifecycle{stable} #' #' The degenerate distribution takes a single value which is certain to be #' observed. It takes a single parameter, which is the value that is observed #' by the distribution. #' #' @param x The value of the distribution. #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a degenerate random variable with value #' `x` = \eqn{k_0}. #' #' **Support**: \eqn{R}, the set of all real numbers #' #' **Mean**: \eqn{k_0} #' #' **Variance**: \eqn{0} #' #' **Probability density function (p.d.f)**: #' #' \deqn{ #' f(x) = 1 for x = k_0 #' }{ #' f(x) = 1 for x = k_0 #' } #' \deqn{ #' f(x) = 0 for x \neq k_0 #' }{ #' f(x) = 0 for x \neq k_0 #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' The cumulative distribution function has the form #' #' \deqn{ #' F(x) = 0 for x < k_0 #' }{ #' F(x) = 0 for x < k_0 #' } #' \deqn{ #' F(x) = 1 for x \ge k_0 #' }{ #' F(x) = 1 for x \ge k_0 #' } #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = e^{k_0 t} #' }{ #' E(e^(tX)) = e^(k_0 t) #' } #' #' @examples #' dist_degenerate(x = 1:5) #' #' @export dist_degenerate <- function(x){ vec_is(x, numeric()) new_dist(x = x, class = "dist_degenerate") } #' @export format.dist_degenerate <- function(x, ...){ format(x[["x"]], ...) } #' @export density.dist_degenerate <- function(x, at, ...){ ifelse(at == x[["x"]], 1, 0) } #' @export quantile.dist_degenerate <- function(x, p, ...){ ifelse(p < 0 | p > 1, NaN, x[["x"]]) } #' @export cdf.dist_degenerate <- function(x, q, ...){ ifelse(q >= x[["x"]], 1, 0) } #' @export generate.dist_degenerate <- function(x, times, ...){ rep(x[["x"]], times) } #' @export mean.dist_degenerate <- function(x, ...){ x[["x"]] } #' @export covariance.dist_degenerate <- function(x, ...){ 0 } #' @export skewness.dist_degenerate <- function(x, ...) NA_real_ #' @export kurtosis.dist_degenerate <- function(x, ...) NA_real_ distributional/R/hilo.R0000644000175000017500000001444014164770542014734 0ustar nileshnilesh#' Construct hilo intervals #' #' @param lower,upper A numeric vector of values for lower and upper limits. #' @param size Size of the interval between \[0, 100\]. #' #' @return A "hilo" vector #' #' @author Earo Wang & Mitchell O'Hara-Wild #' #' @examples #' new_hilo(lower = rnorm(10), upper = rnorm(10) + 5, size = 95) #' #' @export new_hilo <- function(lower = double(), upper = double(), size = double()) { vec_assert(size, double()) if (any(size < 0 | size > 100, na.rm = TRUE)) abort("'size' must be between [0, 100].") out <- vec_recycle_common(lower = lower, upper = upper) if(vec_is(lower, double()) && vec_is(upper, double())) { if (any(out[["upper"]] < out[["lower"]], na.rm = TRUE)) { abort("`upper` can't be lower than `lower`.") } } len <- vec_size(out[[1]]) out[["level"]] <- vctrs::vec_recycle(size, len) vctrs::new_rcrd(out, class = "hilo") } #' Compute intervals #' #' Used to extract a specified prediction interval at a particular confidence #' level from a distribution. #' #' The numeric lower and upper bounds can be extracted from the interval using #' `$lower` and `$upper` as shown in the examples below. #' #' @param x Object to create hilo from. #' @param ... Additional arguments used by methods. #' #' @examples #' # 95% interval from a standard normal distribution #' interval <- hilo(dist_normal(0, 1), 95) #' interval #' #' # Extract the individual quantities with `$lower`, `$upper`, and `$level` #' interval$lower #' interval$upper #' interval$level #' @export hilo <- function(x, ...){ UseMethod("hilo") } #' @export hilo.default <- function(x, ...){ abort(sprintf( "Objects of type `%s` are not supported by `hilo()`, you can create a custom `hilo` with `new_hilo()`", class(x) )) } #' Is the object a hilo #' #' @param x An object. #' #' @export is_hilo <- function(x) { inherits(x, "hilo") } #' @export format.hilo <- function(x, justify = "right", ...) { lwr <- field(x, "lower") upr <- field(x, "upper") if(is.matrix(lwr)) { lwr <- if(ncol(lwr) > 1) vctrs::vec_ptype_abbr(lwr) else drop(lwr) } if(is.matrix(upr)) { upr <- if(ncol(upr) > 1) vctrs::vec_ptype_abbr(upr) else drop(upr) } limit <- paste( format(lwr, justify = justify, ...), format(upr, justify = justify, ...), sep = ", " ) paste0("[", limit, "]", field(x, "level")) } #' @export is.na.hilo <- function(x) { # both lower and upper are NA's x <- vec_data(x) is.na(x$lower) & is.na(x$upper) } #' @export vec_ptype2.hilo.hilo <- function(x, y, ...){ x } #' @export vec_cast.character.hilo <- function(x, to, ...){ sprintf( "[%s, %s]%s", as.character(x$lower), as.character(x$upper), as.character(x$level) ) } #' @method vec_math hilo #' @export vec_math.hilo <- function(.fn, .x, ...){ out <- vec_data(.x) if(.fn == "mean") abort("Cannot compute the mean of hilo intervals.") out[["lower"]] <- get(.fn)(out[["lower"]], ...) out[["upper"]] <- get(.fn)(out[["upper"]], ...) if(.fn %in% c("is.nan", "is.finite", "is.infinite")) return(out[["lower"]] | out[["upper"]]) vec_restore(out, .x) } #' @method vec_arith hilo #' @export vec_arith.hilo <- function(op, x, y, ...){ out <- dt_x <- vec_data(x) if(is_hilo(y)){ abort("Intervals should not be added to other intervals, the sum of intervals is not the interval from a sum of distributions.") } else if(is_empty(y)){ if(op == "-"){ out[["upper"]] <- get(op)(dt_x[["lower"]]) out[["lower"]] <- get(op)(dt_x[["upper"]]) } } else{ out[["lower"]] <- get(op)(dt_x[["lower"]], y) out[["upper"]] <- get(op)(dt_x[["upper"]], y) } vec_restore(out, x) } #' @method vec_arith.numeric hilo #' @export vec_arith.numeric.hilo <- function(op, x, y, ...){ out <- hl <- vec_data(y) out[["lower"]] <- get(op)(x, hl[["lower"]]) out[["upper"]] <- get(op)(x, hl[["upper"]]) if(x < 0 && op %in% c("*", "/")){ out[c("lower", "upper")] <- out[c("upper", "lower")] } vec_restore(out, y) } #' @importFrom utils .DollarNames #' @export .DollarNames.hilo <- function(x, pattern){ utils::.DollarNames(vec_data(x), pattern) } #' @export `$.hilo` <- function(x, name){ field(x, name) } #' @export `names<-.hilo` <- function(x, value) { # abort("A object cannot be named.") x } # Graphics --------------------------------------------------------------------- #' @importFrom ggplot2 scale_type #' @export scale_type.hilo <- function(x){ "continuous" } #' Hilo interval scales #' #' @inheritParams ggplot2::scale_y_continuous #' #' @export scale_hilo_continuous <- function(name = waiver(), breaks = waiver(), minor_breaks = waiver(), n.breaks = NULL, labels = waiver(), limits = NULL, expand = waiver(), oob = identity, na.value = NA, trans = "identity", guide = waiver(), position = "left", sec.axis = waiver()) { sc <- ggplot2::scale_y_continuous( name = name, breaks = breaks, minor_breaks = minor_breaks, n.breaks = n.breaks, labels = labels, limits = limits, expand = expand, oob = oob, na.value = na.value, trans = trans, guide = guide, position = position, sec.axis = sec.axis ) ggplot2::ggproto( NULL, sc, aesthetics = c("hilo"), map = function(self, x, limits = self$get_limits()) { scaled <- self$oob(x, limits) scaled[is.na(scaled)] <- self$na.value scaled }, oob = function(x, range = c(0, 1), only.finite = TRUE){ force(range) finite <- if (only.finite) is.finite(x) else TRUE dt <- vec_data(x) x[finite & dt$lower < range[1]] <- NA x[finite & dt$upper > range[2]] <- NA x }, clone = function(self) { new <- ggplot2::ggproto(NULL, self) new$range <- hilo_range() new }, range = hilo_range() ) } RangeHilo <- ggplot2::ggproto("RangeHilo", NULL, train = function(self, x) { self$range <- scales::train_continuous(c(vec_data(x)$lower, vec_data(x)$upper), self$range) }, reset = function(self) { self$range <- NULL } ) hilo_range <- function() { ggplot2::ggproto(NULL, RangeHilo) } distributional/R/dist_inverse_gamma.R0000644000175000017500000000411114164726113017626 0ustar nileshnilesh#' The Inverse Gamma distribution #' #' \lifecycle{stable} #' #' @inheritParams actuar::dinvgamma #' #' @seealso [actuar::InverseGamma] #' #' @examples #' dist <- dist_inverse_gamma(shape = c(1,2,3,3), rate = c(1,1,1,2)) #' dist #' #' @examplesIf requireNamespace("actuar", quietly = TRUE) #' mean(dist) #' variance(dist) #' support(dist) #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_inverse_gamma #' @export dist_inverse_gamma <- function(shape, rate = 1/scale, scale){ shape <- vec_cast(shape, double()) rate <- vec_cast(rate, double()) if(any(shape <= 0)){ abort("The shape parameter of a Inverse Gamma distribution must be strictly positive.") } if(any(rate <= 0)){ abort("The rate/scale parameter of a Inverse Gamma distribution must be strictly positive.") } new_dist(s = shape, r = rate, class = "dist_inverse_gamma") } #' @export format.dist_inverse_gamma <- function(x, digits = 2, ...){ sprintf( "InvGamma(%s, %s)", format(x[["s"]], digits = digits, ...), format(1/x[["r"]], digits = digits, ...) ) } #' @export density.dist_inverse_gamma <- function(x, at, ...){ require_package("actuar") actuar::dinvgamma(at, x[["s"]], x[["r"]]) } #' @export log_density.dist_inverse_gamma <- function(x, at, ...){ require_package("actuar") actuar::dinvgamma(at, x[["s"]], x[["r"]], log = TRUE) } #' @export quantile.dist_inverse_gamma <- function(x, p, ...){ require_package("actuar") actuar::qinvgamma(p, x[["s"]], x[["r"]]) } #' @export cdf.dist_inverse_gamma <- function(x, q, ...){ require_package("actuar") actuar::pinvgamma(q, x[["s"]], x[["r"]]) } #' @export generate.dist_inverse_gamma <- function(x, times, ...){ require_package("actuar") actuar::rinvgamma(times, x[["s"]], x[["r"]]) } #' @export mean.dist_inverse_gamma <- function(x, ...){ if(x[["s"]] <= 1) return(NA_real_) 1/(x[["r"]]*(x[["s"]]-1)) } #' @export covariance.dist_inverse_gamma <- function(x, ...){ if(x[["s"]] <= 2) return(NA_real_) 1/(x[["r"]]^2*(x[["s"]]-1)^2*(x[["s"]]-2)) } distributional/R/dist_chisq.R0000644000175000017500000000645414151532232016125 0ustar nileshnilesh#' The (non-central) Chi-Squared Distribution #' #' \lifecycle{stable} #' #' Chi-square distributions show up often in frequentist settings #' as the sampling distribution of test statistics, especially #' in maximum likelihood estimation settings. #' #' @inheritParams stats::dchisq #' #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a \eqn{\chi^2} random variable with #' `df` = \eqn{k}. #' #' **Support**: \eqn{R^+}, the set of positive real numbers #' #' **Mean**: \eqn{k} #' #' **Variance**: \eqn{2k} #' #' **Probability density function (p.d.f)**: #' #' \deqn{ #' f(x) = \frac{1}{\sqrt{2 \pi \sigma^2}} e^{-(x - \mu)^2 / 2 \sigma^2} #' }{ #' f(x) = 1 / (2 \pi \sigma^2) exp(-(x - \mu)^2 / (2 \sigma^2)) #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' The cumulative distribution function has the form #' #' \deqn{ #' F(t) = \int_{-\infty}^t \frac{1}{\sqrt{2 \pi \sigma^2}} e^{-(x - \mu)^2 / 2 \sigma^2} dx #' }{ #' F(t) = integral_{-\infty}^t 1 / (2 \pi \sigma^2) exp(-(x - \mu)^2 / (2 \sigma^2)) dx #' } #' #' but this integral does not have a closed form solution and must be #' approximated numerically. The c.d.f. of a standard normal is sometimes #' called the "error function". The notation \eqn{\Phi(t)} also stands #' for the c.d.f. of a standard normal evaluated at \eqn{t}. Z-tables #' list the value of \eqn{\Phi(t)} for various \eqn{t}. #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = e^{\mu t + \sigma^2 t^2 / 2} #' }{ #' E(e^(tX)) = e^(\mu t + \sigma^2 t^2 / 2) #' } #' #' #' @seealso [stats::Chisquare] #' #' @examples #' dist <- dist_chisq(df = c(1,2,3,4,6,9)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_chisq #' @export dist_chisq <- function(df, ncp = 0){ df <- vec_cast(df, double()) ncp <- vec_cast(ncp, double()) if(any(df < 0)){ abort("The degrees of freedom parameter of a Chi-Squared distribution must be non-negative.") } new_dist(df = df, ncp = ncp, class = "dist_chisq") } #' @export format.dist_chisq <- function(x, digits = 2, ...){ sprintf( if (is_utf8_output()) "\u1d6a\u00b2(%s)" else "x2(%s)", format(x[["df"]], digits = digits, ...) ) } #' @export density.dist_chisq <- function(x, at, ...){ stats::dchisq(at, x[["df"]], x[["ncp"]]) } #' @export log_density.dist_chisq <- function(x, at, ...){ stats::dchisq(at, x[["df"]], x[["ncp"]], log = TRUE) } #' @export quantile.dist_chisq <- function(x, p, ...){ stats::qchisq(p, x[["df"]], x[["ncp"]]) } #' @export cdf.dist_chisq <- function(x, q, ...){ stats::pchisq(q, x[["df"]], x[["ncp"]]) } #' @export generate.dist_chisq <- function(x, times, ...){ stats::rchisq(times, x[["df"]], x[["ncp"]]) } #' @export mean.dist_chisq <- function(x, ...){ x[["df"]] + x[["ncp"]] } #' @export covariance.dist_chisq <- function(x, ...){ 2*(x[["df"]] + 2*x[["ncp"]]) } #' @export skewness.dist_chisq <- function(x, ...) sqrt(8 / x[["df"]]) #' @export kurtosis.dist_chisq <- function(x, ...) 12 / x[["df"]] distributional/R/dist_inverse_exponential.R0000644000175000017500000000332414164725747021113 0ustar nileshnilesh#' The Inverse Exponential distribution #' #' \lifecycle{stable} #' #' @inheritParams actuar::dinvexp #' #' @seealso [actuar::InverseExponential] #' #' @examples #' dist <- dist_inverse_exponential(rate = 1:5) #' dist #' #' @examplesIf requireNamespace("actuar", quietly = TRUE) #' mean(dist) #' variance(dist) #' support(dist) #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_inverse_exponential #' @export dist_inverse_exponential <- function(rate){ rate <- vec_cast(rate, double()) if(any(rate <= 0)){ abort("The rate parameter of a Inverse Exponential distribution must be strictly positive.") } new_dist(r = rate, class = "dist_inverse_exponential") } #' @export format.dist_inverse_exponential <- function(x, digits = 2, ...){ sprintf( "InvExp(%s)", format(x[["r"]], digits = digits, ...) ) } #' @export density.dist_inverse_exponential <- function(x, at, ...){ require_package("actuar") actuar::dinvexp(at, x[["r"]]) } #' @export log_density.dist_inverse_exponential <- function(x, at, ...){ require_package("actuar") actuar::dinvexp(at, x[["r"]], log = TRUE) } #' @export quantile.dist_inverse_exponential <- function(x, p, ...){ require_package("actuar") actuar::qinvexp(p, x[["r"]]) } #' @export cdf.dist_inverse_exponential <- function(x, q, ...){ require_package("actuar") actuar::pinvexp(q, x[["r"]]) } #' @export generate.dist_inverse_exponential <- function(x, times, ...){ require_package("actuar") actuar::rinvexp(times, x[["r"]]) } #' @export mean.dist_inverse_exponential <- function(x, ...){ NA_real_ } #' @export covariance.dist_inverse_exponential <- function(x, ...){ NA_real_ } distributional/R/hdr.R0000644000175000017500000000412114151532232014535 0ustar nileshnilesh#' Construct hdr intervals #' #' @param lower,upper A list of numeric vectors specifying the region's lower #' and upper bounds. #' @param size A numeric vector specifying the coverage size of the region. #' #' @return A "hdr" vector #' #' @author Mitchell O'Hara-Wild #' #' @examples #' #' new_hdr(lower = list(1, c(3,6)), upper = list(10, c(5, 8)), size = c(80, 95)) #' #' @export new_hdr <- function(lower = list_of(.ptype = double()), upper = list_of(.ptype = double()), size = double()) { lower <- as_list_of(lower) upper <- as_list_of(upper) vec_assert(lower, list_of(.ptype = double())) vec_assert(upper, list_of(.ptype = double())) vec_assert(size, double()) if (any(size < 0 | size > 100, na.rm = TRUE)) abort("'size' must be between [0, 100].") out <- vec_recycle_common(lower = lower, upper = upper) mapply( function(l,u) if (any(u, where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Gumbel random variable with location #' parameter `mu` = \eqn{\mu}, scale parameter `sigma` = \eqn{\sigma}. #' #' **Support**: \eqn{R}, the set of all real numbers. #' #' **Mean**: \eqn{\mu + \sigma\gamma}, where \eqn{\gamma} is Euler's #' constant, approximately equal to 0.57722. #' #' **Median**: \eqn{\mu - \sigma\ln(\ln 2)}{\mu - \sigma ln(ln 2)}. #' #' **Variance**: \eqn{\sigma^2 \pi^2 / 6}. #' #' **Probability density function (p.d.f)**: #' #' \deqn{f(x) = \sigma ^ {-1} \exp[-(x - \mu) / \sigma]% #' \exp\{-\exp[-(x - \mu) / \sigma] \}}{% #' f(x) = (1 / \sigma) exp[-(x - \mu) / \sigma]% #' exp{-exp[-(x - \mu) / \sigma]}} #' for \eqn{x} in \eqn{R}, the set of all real numbers. #' #' **Cumulative distribution function (c.d.f)**: #' #' In the \eqn{\xi = 0} (Gumbel) special case #' \deqn{F(x) = \exp\{-\exp[-(x - \mu) / \sigma] \}}{% #' F(x) = exp{ - exp[-(x - \mu) / \sigma]} } #' for \eqn{x} in \eqn{R}, the set of all real numbers. #' #' @seealso [actuar::Gumbel] #' #' @examples #' dist <- dist_gumbel(alpha = c(0.5, 1, 1.5, 3), scale = c(2, 2, 3, 4)) #' dist #' #' @examplesIf requireNamespace("actuar", quietly = TRUE) #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' support(dist) #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_gumbel #' @export dist_gumbel <- function(alpha, scale){ alpha <- vec_cast(alpha, double()) scale <- vec_cast(scale, double()) if(any(scale <= 0)){ abort("The scale parameter of a Gumbel distribution must be strictly positive.") } new_dist(a = alpha, s = scale, class = "dist_gumbel") } #' @export format.dist_gumbel <- function(x, digits = 2, ...){ sprintf( "Gumbel(%s, %s)", format(x[["a"]], digits = digits, ...), format(x[["s"]], digits = digits, ...) ) } #' @export density.dist_gumbel <- function(x, at, ...){ require_package("actuar") actuar::dgumbel(at, x[["a"]], x[["s"]]) } #' @export log_density.dist_gumbel <- function(x, at, ...){ require_package("actuar") actuar::dgumbel(at, x[["a"]], x[["s"]], log = TRUE) } #' @export quantile.dist_gumbel <- function(x, p, ...){ require_package("actuar") actuar::qgumbel(p, x[["a"]], x[["s"]]) } #' @export cdf.dist_gumbel <- function(x, q, ...){ require_package("actuar") actuar::pgumbel(q, x[["a"]], x[["s"]]) } #' @export generate.dist_gumbel <- function(x, times, ...){ require_package("actuar") actuar::rgumbel(times, x[["a"]], x[["s"]]) } #' @export mean.dist_gumbel <- function(x, ...){ actuar::mgumbel(1, x[["a"]], x[["s"]]) } #' @export covariance.dist_gumbel <- function(x, ...){ (pi*x[["s"]])^2/6 } #' @export skewness.dist_gumbel <- function(x, ...) { zeta3 <- 1.20205690315959401459612 (12 * sqrt(6) * zeta3) / pi^3 } #' @export kurtosis.dist_gumbel <- function(x, ...) 12/5 distributional/R/distributional-package.R0000644000175000017500000000054413711726207020422 0ustar nileshnilesh#' @keywords internal "_PACKAGE" # The following block is used by usethis to automatically manage # roxygen namespace tags. Modify with care! ## usethis namespace: start #' @importFrom lifecycle deprecate_soft ## usethis namespace: end #' @import vctrs #' @import rlang NULL # Used for generating transformation function expressions globalVariables("x") distributional/R/dist_burr.R0000644000175000017500000000436714164725641016005 0ustar nileshnilesh#' The Burr distribution #' #' \lifecycle{stable} #' #' @inheritParams actuar::dburr #' #' @seealso [actuar::Burr] #' #' @examples #' dist <- dist_burr(shape1 = c(1,1,1,2,3,0.5), shape2 = c(1,2,3,1,1,2)) #' dist #' #' @examplesIf requireNamespace("actuar", quietly = TRUE) #' mean(dist) #' variance(dist) #' support(dist) #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_burr #' @export dist_burr <- function(shape1, shape2, rate = 1){ shape1 <- vec_cast(shape1, double()) shape2 <- vec_cast(shape2, double()) if(any(shape1 <= 0)){ abort("The shape1 parameter of a Burr distribution must be strictly positive.") } if(any(shape2 <= 0)){ abort("The shape2 parameter of a Burr distribution must be strictly positive.") } if(any(rate <= 0)){ abort("The rate parameter of a Burr distribution must be strictly positive.") } new_dist(s1 = shape1, s2 = shape2, r = rate, class = "dist_burr") } #' @export format.dist_burr <- function(x, digits = 2, ...){ sprintf( "Burr12(%s, %s, %s)", format(x[["s1"]], digits = digits, ...), format(x[["s2"]], x[["r"]], digits = digits, ...), format(x[["r"]], digits = digits, ...) ) } #' @export density.dist_burr <- function(x, at, ...){ require_package("actuar") actuar::dburr(at, x[["s1"]], x[["s2"]], x[["r"]]) } #' @export log_density.dist_burr <- function(x, at, ...){ require_package("actuar") actuar::dburr(at, x[["s1"]], x[["s2"]], x[["r"]], log = TRUE) } #' @export quantile.dist_burr <- function(x, p, ...){ require_package("actuar") actuar::qburr(p, x[["s1"]], x[["s2"]], x[["r"]]) } #' @export cdf.dist_burr <- function(x, q, ...){ require_package("actuar") actuar::pburr(q, x[["s1"]], x[["s2"]], x[["r"]]) } #' @export generate.dist_burr <- function(x, times, ...){ require_package("actuar") actuar::rburr(times, x[["s1"]], x[["s2"]], x[["r"]]) } #' @export mean.dist_burr <- function(x, ...){ require_package("actuar") actuar::mburr(1, x[["s1"]], x[["s2"]], x[["r"]]) } #' @export covariance.dist_burr <- function(x, ...){ require_package("actuar") m1 <- actuar::mburr(1, x[["s1"]], x[["s2"]], x[["r"]]) m2 <- actuar::mburr(2, x[["s1"]], x[["s2"]], x[["r"]]) -m1^2 + m2 } distributional/R/dist_logistic.R0000644000175000017500000000550114151532232016623 0ustar nileshnilesh#' The Logistic distribution #' #' \lifecycle{stable} #' #' A continuous distribution on the real line. For binary outcomes #' the model given by \eqn{P(Y = 1 | X) = F(X \beta)} where #' \eqn{F} is the Logistic [cdf()] is called *logistic regression*. #' #' @inheritParams stats::dlogis #' #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Logistic random variable with #' `location` = \eqn{\mu} and `scale` = \eqn{s}. #' #' **Support**: \eqn{R}, the set of all real numbers #' #' **Mean**: \eqn{\mu} #' #' **Variance**: \eqn{s^2 \pi^2 / 3} #' #' **Probability density function (p.d.f)**: #' #' \deqn{ #' f(x) = \frac{e^{-(\frac{x - \mu}{s})}}{s [1 + \exp(-(\frac{x - \mu}{s})) ]^2} #' }{ #' f(x) = e^(-(t - \mu) / s) / (s (1 + e^(-(t - \mu) / s))^2) #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{ #' F(t) = \frac{1}{1 + e^{-(\frac{t - \mu}{s})}} #' }{ #' F(t) = 1 / (1 + e^(-(t - \mu) / s)) #' } #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = e^{\mu t} \beta(1 - st, 1 + st) #' }{ #' E(e^(tX)) = = e^(\mu t) \beta(1 - st, 1 + st) #' } #' #' where \eqn{\beta(x, y)} is the Beta function. #' #' @seealso [stats::Logistic] #' #' @examples #' dist <- dist_logistic(location = c(5,9,9,6,2), scale = c(2,3,4,2,1)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_logistic #' @export dist_logistic <- function(location, scale){ location <- vec_cast(location, double()) scale <- vec_cast(scale, double()) new_dist(l = location, s = scale, class = "dist_logistic") } #' @export format.dist_logistic <- function(x, digits = 2, ...){ sprintf( "Logistic(%s, %s)", format(x[["l"]], digits = digits, ...), format(x[["s"]], digits = digits, ...) ) } #' @export density.dist_logistic <- function(x, at, ...){ stats::dlogis(at, x[["l"]], x[["s"]]) } #' @export log_density.dist_logistic <- function(x, at, ...){ stats::dlogis(at, x[["l"]], x[["s"]], log = TRUE) } #' @export quantile.dist_logistic <- function(x, p, ...){ stats::qlogis(p, x[["l"]], x[["s"]]) } #' @export cdf.dist_logistic <- function(x, q, ...){ stats::plogis(q, x[["l"]], x[["s"]]) } #' @export generate.dist_logistic <- function(x, times, ...){ stats::rlogis(times, x[["l"]], x[["s"]]) } #' @export mean.dist_logistic <- function(x, ...){ x[["l"]] } #' @export covariance.dist_logistic <- function(x, ...){ (x[["s"]]*pi)^2/3 } #' @export skewness.dist_logistic <- function(x, ...) 0 #' @export kurtosis.dist_logistic <- function(x, ...) 6 / 5 distributional/R/inflated.R0000644000175000017500000000372414151532232015556 0ustar nileshnilesh#' Inflate a value of a probability distribution #' #' \lifecycle{maturing} #' #' @param dist The distribution(s) to inflate. #' @param prob The added probability of observing `x`. #' @param x The value to inflate. The default of `x = 0` is for zero-inflation. #' #' @name dist_inflated #' @export dist_inflated <- function(dist, prob, x = 0){ vec_is(dist, new_dist()) if(prob < 0 || prob > 1){ abort("The inflation probability must be between 0 and 1.") } new_dist(dist = dist, x = x, p = prob, dimnames = dimnames(dist), class = "dist_inflated") } #' @export format.dist_inflated <- function(x, ...){ sprintf( "%s+%s", format(x[["x"]]), format(x[["dist"]]) ) } #' @export density.dist_inflated <- function(x, at, ...){ x[["p"]]*(at==x[["x"]]) + (1-x[["p"]])*density(x[["dist"]], at, ...) } #' @export quantile.dist_inflated <- function(x, p, ...){ qt <- quantile(x[["dist"]], pmax(0, (p - x[["p"]]) / (1-x[["p"]])), ...) if(qt >= x[["x"]]) return(qt) qt <- quantile(x[["dist"]], p, ...) if(qt < x[["x"]]) qt else x[["x"]] } #' @export cdf.dist_inflated <- function(x, q, ...){ x[["p"]]*(q>=x[["x"]]) + (1-x[["p"]])*cdf(x[["dist"]], q, ...) } #' @export generate.dist_inflated <- function(x, times, ...){ p <- x[["p"]] inf <- stats::runif(times) < p r <- vec_init(x[["x"]], times) r[inf] <- x[["x"]] r[!inf] <- generate(x[["dist"]], sum(!inf)) r } #' @export mean.dist_inflated <- function(x, ...){ # Can't compute if inflation value is not numeric if(!vec_is(x[["x"]], numeric())) return(NA_real_) p <- x[["p"]] p*x[["x"]] + (1-p)*mean(x[["dist"]]) } #' @export covariance.dist_inflated <- function(x, ...){ # Can't compute if inflation value is not numeric if(!vec_is(x[["x"]], numeric())) return(NA_real_) # Can't (easily) compute if inflation value is not zero if(x[["x"]] != 0) return(NA_real_) m1 <- mean(x[["dist"]]) v <- variance(x[["dist"]]) m2 <- v + m1^2 p <- x[["p"]] (1-p)*v + p*(1-p)*m1^2 } distributional/R/dist_poisson_inverse_gaussian.R0000644000175000017500000000427214164725536022150 0ustar nileshnilesh#' The Poisson-Inverse Gaussian distribution #' #' \lifecycle{stable} #' #' @inheritParams actuar::dpoisinvgauss #' #' @seealso [actuar::PoissonInverseGaussian] #' #' @examples #' dist <- dist_poisson_inverse_gaussian(mean = rep(0.1, 3), shape = c(0.4, 0.8, 1)) #' dist #' #' @examplesIf requireNamespace("actuar", quietly = TRUE) #' mean(dist) #' variance(dist) #' support(dist) #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_poisson_inverse_gaussian #' @export dist_poisson_inverse_gaussian <- function(mean, shape){ mean <- vec_cast(mean, double()) shape <- vec_cast(shape, double()) if(any(mean[!is.na(mean)] <= 0)){ abort("The mean parameter of a Poisson-Inverse Gaussian distribution must be strictly positive.") } if(any(shape[!is.na(shape)] <= 0)){ abort("The shape parameter of a Poisson-Inverse Gaussian distribution must be strictly positive.") } new_dist(m = mean, s = shape, class = "dist_poisson_inverse_gaussian") } #' @export format.dist_poisson_inverse_gaussian <- function(x, digits = 2, ...){ sprintf( "PIG(%s, %s)", format(x[["m"]], digits = digits, ...), format(x[["s"]], digits = digits, ...) ) } #' @export density.dist_poisson_inverse_gaussian <- function(x, at, ...){ require_package("actuar") actuar::dpoisinvgauss(at, x[["m"]], x[["s"]]) } #' @export log_density.dist_poisson_inverse_gaussian <- function(x, at, ...){ require_package("actuar") actuar::dpoisinvgauss(at, x[["m"]], x[["s"]], log = TRUE) } #' @export quantile.dist_poisson_inverse_gaussian <- function(x, p, ...){ require_package("actuar") actuar::qpoisinvgauss(p, x[["m"]], x[["s"]]) } #' @export cdf.dist_poisson_inverse_gaussian <- function(x, q, ...){ require_package("actuar") actuar::ppoisinvgauss(q, x[["m"]], x[["s"]]) } #' @export generate.dist_poisson_inverse_gaussian <- function(x, times, ...){ require_package("actuar") actuar::rpoisinvgauss(times, x[["m"]], x[["s"]]) } #' @export mean.dist_poisson_inverse_gaussian <- function(x, ...){ x[["m"]] } #' @export covariance.dist_poisson_inverse_gaussian <- function(x, ...){ x[["m"]]/x[["s"]] * (x[["m"]]^2 + x[["s"]]) } distributional/R/dist_cauchy.R0000644000175000017500000000570614151532232016271 0ustar nileshnilesh#' The Cauchy distribution #' #' \lifecycle{maturing} #' #' The Cauchy distribution is the student's t distribution with one degree of #' freedom. The Cauchy distribution does not have a well defined mean or #' variance. Cauchy distributions often appear as priors in Bayesian contexts #' due to their heavy tails. #' #' @inheritParams stats::dcauchy #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Cauchy variable with mean #' `location =` \eqn{x_0} and `scale` = \eqn{\gamma}. #' #' **Support**: \eqn{R}, the set of all real numbers #' #' **Mean**: Undefined. #' #' **Variance**: Undefined. #' #' **Probability density function (p.d.f)**: #' #' \deqn{ #' f(x) = \frac{1}{\pi \gamma \left[1 + \left(\frac{x - x_0}{\gamma} \right)^2 \right]} #' }{ #' f(x) = 1 / (\pi \gamma (1 + ((x - x_0) / \gamma)^2) #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{ #' F(t) = \frac{1}{\pi} \arctan \left( \frac{t - x_0}{\gamma} \right) + #' \frac{1}{2} #' }{ #' F(t) = arctan((t - x_0) / \gamma) / \pi + 1/2 #' } #' #' **Moment generating function (m.g.f)**: #' #' Does not exist. #' #' @seealso [stats::Cauchy] #' #' @examples #' dist <- dist_cauchy(location = c(0, 0, 0, -2), scale = c(0.5, 1, 2, 1)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_cauchy #' @export dist_cauchy <- function(location, scale){ location <- vec_cast(location, double()) scale <- vec_cast(scale, double()) if(any(scale[!is.na(scale)] <= 0)){ abort("The scale parameter of a Cauchy distribution must strictly positive.") } new_dist(location = location, scale = scale, class = "dist_cauchy") } #' @export format.dist_cauchy <- function(x, digits = 2, ...){ sprintf( "Cauchy(%s, %s)", format(x[["location"]], digits = digits, ...), format(x[["scale"]], digits = digits, ...) ) } #' @export density.dist_cauchy <- function(x, at, ...){ stats::dcauchy(at, x[["location"]], x[["scale"]]) } #' @export log_density.dist_cauchy <- function(x, at, ...){ stats::dcauchy(at, x[["location"]], x[["scale"]], log = TRUE) } #' @export quantile.dist_cauchy <- function(x, p, ...){ stats::qcauchy(p, x[["location"]], x[["scale"]]) } #' @export cdf.dist_cauchy <- function(x, q, ...){ stats::pcauchy(q, x[["location"]], x[["scale"]]) } #' @export generate.dist_cauchy <- function(x, times, ...){ stats::rcauchy(times, x[["location"]], x[["scale"]]) } #' @export mean.dist_cauchy <- function(x, ...){ NA_real_ } #' @export covariance.dist_cauchy <- function(x, ...){ NA_real_ } #' @export skewness.dist_cauchy <- function(x, ...){ NA_real_ } #' @export kurtosis.dist_cauchy <- function(x, ...){ NA_real_ } distributional/R/dist_sample.R0000644000175000017500000000624014164705114016275 0ustar nileshnilesh#' Sampling distribution #' #' \lifecycle{stable} #' #' @param x A list of sampled values. #' #' @examples #' # Univariate numeric samples #' dist <- dist_sample(x = list(rnorm(100), rnorm(100, 10))) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' generate(dist, 10) #' #' density(dist, 1) #' #' # Multivariate numeric samples #' dist <- dist_sample(x = list(cbind(rnorm(100), rnorm(100, 10)))) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' generate(dist, 10) #' #' density(dist, 1) #' #' @export dist_sample <- function(x){ vec_assert(x, list()) x <- as_list_of(x, .ptype = vec_ptype(x[[1]])) new_dist(x = x, class = "dist_sample") } #' @export format.dist_sample <- function(x, ...){ sprintf( "sample[%s]", vapply(x, vec_size, integer(1L)) ) } #' @export density.dist_sample <- function(x, at, ..., na.rm = TRUE){ # Apply independently over sample variates if(is.matrix(x$x)) { return( apply(x$x, 2, function(x, ...) density.dist_sample(list(x=x), ...), at = at, ..., na.rm = TRUE ) ) } # Shortcut if only one point in density is needed if(vec_size(at) == 1){ return(density(x[["x"]], from = at, to = at, n = 1)$y) } d <- density(x[["x"]], from = min(at), to = max(at), ..., na.rm=na.rm) stats::approx(d$x, d$y, xout = at)$y } #' @export quantile.dist_sample <- function(x, p, ..., na.rm = TRUE){ # Apply independently over sample variates if(is.matrix(x$x)) { return( apply(x$x, 2, function(x, ...) quantile.dist_sample(list(x=x), ...), p = p, ..., na.rm = TRUE ) ) } quantile(x$x, probs = p, ..., na.rm = na.rm, names = FALSE) } #' @export cdf.dist_sample <- function(x, q, ..., na.rm = TRUE){ # Apply independently over sample variates if(is.matrix(x$x)) { return( apply(x$x, 2, function(x, ...) cdf.dist_sample(list(x=x), ...), q = q, ..., na.rm = TRUE ) ) } if(length(q) > 1) return(vapply(q, cdf, numeric(1L), x = x, ...)) vapply(x, function(x, q) mean(x < q, ..., na.rm = na.rm), numeric(1L), q = q) } #' @export generate.dist_sample <- function(x, times, ...){ i <- sample.int(vec_size(x[["x"]]), size = times, replace = TRUE) if(is.matrix(x$x)) x$x[i,,drop = FALSE] else x$x[i] } #' @export mean.dist_sample <- function(x, ...){ if(is.matrix(x$x)) apply(x$x, 2, mean, ...) else mean(x$x, ...) } #' @export median.dist_sample <- function(x, na.rm = FALSE, ...){ if(is.matrix(x$x)) apply(x$x, 2, median, na.rm = na.rm, ...) else median(x$x, na.rm = na.rm, ...) } #' @export covariance.dist_sample <- function(x, ...){ if(is.matrix(x$x)) stats::cov(x$x, ...) else stats::var(x$x, ...) } #' @export skewness.dist_sample <- function(x, ..., na.rm = FALSE) { if(is.matrix(x)) {abort("Multivariate sample skewness is not yet implemented.")} n <- lengths(x, use.names = FALSE) x <- lapply(x, function(.) . - mean(., na.rm = na.rm)) sum_x2 <- vapply(x, function(.) sum(.^2, na.rm = na.rm), numeric(1L), USE.NAMES = FALSE) sum_x3 <- vapply(x, function(.) sum(.^3, na.rm = na.rm), numeric(1L), USE.NAMES = FALSE) y <- sqrt(n) * sum_x3/(sum_x2^(3/2)) y * ((1 - 1/n))^(3/2) } distributional/R/dist_inverse_gaussian.R0000644000175000017500000000402114164725627020367 0ustar nileshnilesh#' The Inverse Gaussian distribution #' #' \lifecycle{stable} #' #' @inheritParams actuar::dinvgauss #' #' @seealso [actuar::InverseGaussian] #' #' @examples #' dist <- dist_inverse_gaussian(mean = c(1,1,1,3,3), shape = c(0.2, 1, 3, 0.2, 1)) #' dist #' #' @examplesIf requireNamespace("actuar", quietly = TRUE) #' mean(dist) #' variance(dist) #' support(dist) #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_inverse_gaussian #' @export dist_inverse_gaussian <- function(mean, shape){ mean <- vec_cast(mean, double()) shape <- vec_cast(shape, double()) if(any(mean[!is.na(mean)] <= 0)){ abort("The mean parameter of a Inverse Gaussian distribution must be strictly positive.") } if(any(shape[!is.na(shape)] <= 0)){ abort("The shape parameter of a Inverse Gaussian distribution must be strictly positive.") } new_dist(m = mean, s = shape, class = "dist_inverse_gaussian") } #' @export format.dist_inverse_gaussian <- function(x, digits = 2, ...){ sprintf( "IG(%s, %s)", format(x[["m"]], digits = digits, ...), format(x[["s"]], digits = digits, ...) ) } #' @export density.dist_inverse_gaussian <- function(x, at, ...){ require_package("actuar") actuar::dinvgauss(at, x[["m"]], x[["s"]]) } #' @export log_density.dist_inverse_gaussian <- function(x, at, ...){ require_package("actuar") actuar::dinvgauss(at, x[["m"]], x[["s"]], log = TRUE) } #' @export quantile.dist_inverse_gaussian <- function(x, p, ...){ require_package("actuar") actuar::qinvgauss(p, x[["m"]], x[["s"]]) } #' @export cdf.dist_inverse_gaussian <- function(x, q, ...){ require_package("actuar") actuar::pinvgauss(q, x[["m"]], x[["s"]]) } #' @export generate.dist_inverse_gaussian <- function(x, times, ...){ require_package("actuar") actuar::rinvgauss(times, x[["m"]], x[["s"]]) } #' @export mean.dist_inverse_gaussian <- function(x, ...){ x[["m"]] } #' @export covariance.dist_inverse_gaussian <- function(x, ...){ x[["m"]]^3/x[["s"]] } distributional/R/scale-level.R0000644000175000017500000001320013711726207016162 0ustar nileshnilesh#' level luminance scales #' #' This set of scales defines new scales for prob geoms equivalent to the #' ones already defined by ggplot2. This allows the shade of confidence intervals #' to work with the legend output. #' @return A ggproto object inheriting from `Scale` #' @family scale_level_* #' @name scale_level #' @rdname scale_level NULL #' @rdname scale_level #' @inheritParams ggplot2::scale_colour_gradient #' @export scale_level_continuous <- function(..., guide = "level") { level_scale("level", "identity", identity, guide = guide, ...) } ScaleLevel <- ggplot2::ggproto(NULL, ggplot2::ScaleContinuous) #' @importFrom ggplot2 waiver discrete_scale level_scale <- function(...) { ggplot2::ggproto(NULL, ggplot2::continuous_scale(...), range = level_range()) } level_range <- function(){ ggplot2::ggproto(NULL, RangeLevel) } RangeLevel <- ggplot2::ggproto(NULL, NULL, range = NULL, levels = NULL, reset = function(self){ self$range <- NULL self$levels <- NULL }, train = function(self, x){ self$range <- scales::train_continuous(x, self$range) self$levels <- unique(c(x[!is.na(x)],self$range)) } ) #' Level shade bar guide #' #' The level guide shows the colour from the forecast intervals which is blended with the series colour. #' #' @inheritParams ggplot2::guide_colourbar #' @param max_discrete The maximum number of levels to be shown using \code{\link[ggplot2]{guide_legend}}. #' If the number of levels exceeds this value, level shades are shown with \code{\link[ggplot2]{guide_colourbar}}. #' @param ... Further arguments passed onto either \code{\link[ggplot2]{guide_colourbar}} or \code{\link[ggplot2]{guide_legend}} #' #' @export guide_level <- function(title = waiver(), max_discrete = 5, ...) { structure(list(title = title, max_discrete = max_discrete, available_aes = "level", args = list(...)), class=c("guide", "level_guide")) } #' Helper methods for guides #' #' @export #' @rdname guide-helpers #' @importFrom ggplot2 guide_colourbar guide_legend guide_train #' @keywords internal guide_train.level_guide <- function(guide, scale, aesthetic) { args <- append(guide[!(names(guide)%in%c("max_discrete", "args"))], guide$args) levels <- scale$range$levels if (length(levels) == 0 || all(is.na(levels))) return() if(length(levels)<=guide$max_discrete){ guide <- do.call("guide_legend", args) class(guide) <- c("guide", "guide_level") breaks <- levels cols <- darken_fill(rep.int("white", length(breaks)), breaks) key <- as.data.frame( set_names(list(cols), aesthetic %||% scale$aesthetics[1]), stringsAsFactors = FALSE ) key$.label <- scale$get_labels(breaks) if (!scale$is_discrete()) { limits <- scale$get_limits() noob <- !is.na(breaks) & limits[1] <= breaks & breaks <= limits[2] key <- key[noob, , drop = FALSE] } if (guide$reverse) key <- key[nrow(key):1, ] guide$key <- key guide$hash <- with(guide, digest::digest(list(title, key$.label, direction, name))) } else{ guide <- do.call("guide_colourbar", args) breaks <- scale$get_breaks() ticks <- as.data.frame(stats::setNames(list(scale$map(breaks)), aesthetic %||% scale$aesthetics[1])) ticks$.value <- breaks ticks$.label <- scale$get_labels(breaks) guide$key <- ticks .limits <- scale$get_limits() .bar <- seq(.limits[1], .limits[2], length = guide$nbin) if (length(.bar) == 0) { .bar = unique(.limits) } guide$bar <- data.frame(colour = scale$map(.bar), value = .bar, stringsAsFactors = FALSE) if (guide$reverse) { guide$key <- guide$key[nrow(guide$key):1, ] guide$bar <- guide$bar[nrow(guide$bar):1, ] } guide$hash <- with(guide, digest::digest(list(title, key$.label, bar, name))) } if(guide$title == "vctrs::vec_data(hilo)$level") guide$title <- "level" guide } #' @export #' @importFrom ggplot2 guide_geom #' @rdname guide-helpers guide_geom.guide_level <- function (guide, layers, default_mapping) { class(guide) <- c("guide", "legend") guide <- guide_geom(guide, layers, default_mapping) guide$geoms <- lapply(guide$geoms, function(x){ x$draw_key <- ggplot2::ggproto(NULL, NULL, draw_key = function(data, params, size){ lwd <- min(data$size, min(size) / 4) fillcol <- data$level #blendHex(data$col, data$level, 0.7) grid::rectGrob( width = grid::unit(1, "npc") - grid::unit(lwd, "mm"), height = grid::unit(1, "npc") - grid::unit(lwd, "mm"), gp = grid::gpar( col = fillcol, fill = scales::alpha(fillcol, data$alpha), lty = data$linetype, lwd = lwd * ggplot2::.pt, linejoin = "mitre" ) ) })$draw_key x }) guide } distributional/R/dist_student_t.R0000644000175000017500000001004614151532232017017 0ustar nileshnilesh#' The (non-central) location-scale Student t Distribution #' #' \lifecycle{stable} #' #' The Student's T distribution is closely related to the [Normal()] #' distribution, but has heavier tails. As \eqn{\nu} increases to \eqn{\infty}, #' the Student's T converges to a Normal. The T distribution appears #' repeatedly throughout classic frequentist hypothesis testing when #' comparing group means. #' #' @inheritParams stats::dt #' @param mu The location parameter of the distribution. #' If `ncp == 0` (or `NULL`), this is the median. #' @param sigma The scale parameter of the distribution. #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a **central** Students T random variable #' with `df` = \eqn{\nu}. #' #' **Support**: \eqn{R}, the set of all real numbers #' #' **Mean**: Undefined unless \eqn{\nu \ge 2}, in which case the mean is #' zero. #' #' **Variance**: #' #' \deqn{ #' \frac{\nu}{\nu - 2} #' }{ #' \nu / (\nu - 2) #' } #' #' Undefined if \eqn{\nu < 1}, infinite when \eqn{1 < \nu \le 2}. #' #' **Probability density function (p.d.f)**: #' #' \deqn{ #' f(x) = \frac{\Gamma(\frac{\nu + 1}{2})}{\sqrt{\nu \pi} \Gamma(\frac{\nu}{2})} (1 + \frac{x^2}{\nu} )^{- \frac{\nu + 1}{2}} #' }{ #' f(x) = \Gamma((\nu + 1) / 2) / (\sqrt(\nu \pi) \Gamma(\nu / 2)) (1 + x^2 / \nu)^(- (\nu + 1) / 2) #' } #' #' @seealso [stats::TDist] #' #' @examples #' dist <- dist_student_t(df = c(1,2,5), mu = c(0,1,2), sigma = c(1,2,3)) #' #' dist #' mean(dist) #' variance(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_student_t #' @export dist_student_t <- function(df, mu = 0, sigma = 1, ncp = NULL){ df <- vec_cast(df, numeric()) if(any(df <= 0)){ abort("The degrees of freedom parameter of a Student t distribution must be strictly positive.") } mu <- vec_cast(mu, double()) sigma <- vec_cast(sigma, double()) if(any(sigma[!is.na(sigma)] <= 0)){ abort("The scale (sigma) parameter of a Student t distribution must be strictly positive.") } new_dist(df = df, mu = mu, sigma = sigma, ncp = ncp, class = "dist_student_t") } #' @export format.dist_student_t <- function(x, digits = 2, ...){ out <- sprintf( "t(%s, %s, %s%s)", format(x[["df"]], digits = digits, ...), format(x[["mu"]], digits = digits, ...), format(x[["sigma"]], digits = digits, ...), if(is.null(x[["ncp"]])) "" else paste(",", format(x[["ncp"]], digits = digits, ...)) ) } #' @export density.dist_student_t <- function(x, at, ...){ ncp <- x[["ncp"]] %||% missing_arg() sigma <- x[["sigma"]] stats::dt((at - x[["mu"]])/sigma, x[["df"]], ncp) / sigma } #' @export log_density.dist_student_t <- function(x, at, ...){ ncp <- x[["ncp"]] %||% missing_arg() sigma <- x[["sigma"]] stats::dt((at - x[["mu"]])/sigma, x[["df"]], ncp, log = TRUE) - log(sigma) } #' @export quantile.dist_student_t <- function(x, p, ...){ ncp <- x[["ncp"]] %||% missing_arg() stats::qt(p, x[["df"]], ncp) * x[["sigma"]] + x[["mu"]] } #' @export cdf.dist_student_t <- function(x, q, ...){ ncp <- x[["ncp"]] %||% missing_arg() stats::pt((q - x[["mu"]])/x[["sigma"]], x[["df"]], ncp) } #' @export generate.dist_student_t <- function(x, times, ...){ ncp <- x[["ncp"]] %||% missing_arg() stats::rt(times, x[["df"]], ncp) * x[["sigma"]] + x[["mu"]] } #' @export mean.dist_student_t <- function(x, ...){ df <- x[["df"]] if(df <= 1) return(NA_real_) if(is.null(x[["ncp"]])){ x[["mu"]] } else { x[["mu"]] + x[["ncp"]] * sqrt(df/2) * (gamma((df-1)/2)/gamma(df/2)) * x[["sigma"]] } } #' @export covariance.dist_student_t <- function(x, ...){ df <- x[["df"]] ncp <- x[["ncp"]] if(df <= 1) return(NA_real_) if(df <= 2) return(Inf) if(is.null(ncp)){ df / (df - 2) * x[["sigma"]]^2 } else { ((df*(1+ncp^2))/(df-2) - (ncp * sqrt(df/2) * (gamma((df-1)/2)/gamma(df/2)))^2) * x[["sigma"]]^2 } } distributional/R/truncated.R0000644000175000017500000000526614142052233015762 0ustar nileshnilesh#' Truncate a distribution #' #' \lifecycle{experimental} #' #' Note that the samples are generated using inverse transform sampling, and the #' means and variances are estimated from samples. #' #' @param dist The distribution(s) to truncate. #' @param lower,upper The range of values to keep from a distribution. #' #' @name dist_truncated #' #' @examples #' dist <- dist_truncated(dist_normal(2,1), lower = 0) #' #' dist #' mean(dist) #' variance(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' if(requireNamespace("ggdist")) { #' library(ggplot2) #' ggplot() + #' ggdist::stat_dist_halfeye( #' aes(y = c("Normal", "Truncated"), #' dist = c(dist_normal(2,1), dist_truncated(dist_normal(2,1), lower = 0))) #' ) #' } #' #' @export dist_truncated <- function(dist, lower = -Inf, upper = Inf){ vec_is(dist, new_dist()) vec_is(lower, numeric()) vec_is(upper, numeric()) if(any(lower >= upper)){ abort("The `lower` truncation bound must be lower than the `upper` bound.") } new_dist(dist = dist, lower = lower, upper = upper, dimnames = dimnames(dist), class = "dist_truncated") } #' @export format.dist_truncated <- function(x, ...){ sprintf( "%s[%s,%s]", format(x[["dist"]]), x[["lower"]], x[["upper"]] ) } #' @export density.dist_truncated <- function(x, at, ...){ in_lim <- at >= x[["lower"]] & at <= x[["upper"]] cdf_upr <- cdf(x[["dist"]], x[["upper"]]) cdf_lwr <- cdf(x[["dist"]], x[["lower"]]) out <- numeric(length(at)) out[in_lim] <- density(x[["dist"]], at = at[in_lim], ...)/(cdf_upr - cdf_lwr) out } #' @export quantile.dist_truncated <- function(x, p, ...){ F_lwr <- cdf(x[["dist"]], x[["lower"]]) F_upr <- cdf(x[["dist"]], x[["upper"]]) qt <- quantile(x[["dist"]], F_lwr + p * (F_upr - F_lwr), ...) pmin(pmax(x[["lower"]], qt), x[["upper"]]) } #' @export cdf.dist_truncated <- function(x, q, ...){ cdf_upr <- cdf(x[["dist"]], x[["upper"]]) cdf_lwr <- cdf(x[["dist"]], x[["lower"]]) out <- numeric(length(q)) q_lwr <- q < x[["lower"]] # out[q_lwr <- q < x[["lower"]]] <- 0 out[q_upr <- q > x[["upper"]]] <- 1 q_mid <- !(q_lwr|q_upr) out[q_mid] <- (cdf(x[["dist"]], q = q[q_mid], ...) - cdf_lwr)/(cdf_upr - cdf_lwr) out } #' @export mean.dist_truncated <- function(x, ...) { if(inherits(x$dist, "dist_sample")) { y <- x$dist[[1]] mean(y[y >= x$lower & y <= x$upper]) } else if(inherits(x$dist, "dist_normal")) { mu <- x$dist$mu s <- x$dist$sigma a <- (x$lower - mu) / s b <- (x$upper - mu) / s mu + (stats::dnorm(a) - stats::dnorm(b))/(stats::pnorm(b) - stats::pnorm(a))*s } else { NextMethod() } } distributional/R/dist_binomial.R0000644000175000017500000001036014164724564016616 0ustar nileshnilesh#' The Binomial distribution #' #' \lifecycle{stable} #' #' Binomial distributions are used to represent situations can that can #' be thought as the result of \eqn{n} Bernoulli experiments (here the #' \eqn{n} is defined as the `size` of the experiment). The classical #' example is \eqn{n} independent coin flips, where each coin flip has #' probability `p` of success. In this case, the individual probability of #' flipping heads or tails is given by the Bernoulli(p) distribution, #' and the probability of having \eqn{x} equal results (\eqn{x} heads, #' for example), in \eqn{n} trials is given by the Binomial(n, p) distribution. #' The equation of the Binomial distribution is directly derived from #' the equation of the Bernoulli distribution. #' #' @param size The number of trials. Must be an integer greater than or equal #' to one. When `size = 1L`, the Binomial distribution reduces to the #' Bernoulli distribution. Often called `n` in textbooks. #' @param prob The probability of success on each trial, `prob` can be any #' value in `[0, 1]`. #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' The Binomial distribution comes up when you are interested in the portion #' of people who do a thing. The Binomial distribution #' also comes up in the sign test, sometimes called the Binomial test #' (see [stats::binom.test()]), where you may need the Binomial C.D.F. to #' compute p-values. #' #' In the following, let \eqn{X} be a Binomial random variable with parameter #' `size` = \eqn{n} and `p` = \eqn{p}. Some textbooks define \eqn{q = 1 - p}, #' or called \eqn{\pi} instead of \eqn{p}. #' #' **Support**: \eqn{\{0, 1, 2, ..., n\}}{{0, 1, 2, ..., n}} #' #' **Mean**: \eqn{np} #' #' **Variance**: \eqn{np \cdot (1 - p) = np \cdot q}{np (1 - p)} #' #' **Probability mass function (p.m.f)**: #' #' \deqn{ #' P(X = k) = {n \choose k} p^k (1 - p)^{n-k} #' }{ #' P(X = k) = choose(n, k) p^k (1 - p)^(n - k) #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{ #' P(X \le k) = \sum_{i=0}^{\lfloor k \rfloor} {n \choose i} p^i (1 - p)^{n-i} #' }{ #' P(X \le k) = \sum_{i=0}^k choose(n, i) p^i (1 - p)^(n-i) #' } #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = (1 - p + p e^t)^n #' }{ #' E(e^(tX)) = (1 - p + p e^t)^n #' } #' #' @examples #' dist <- dist_binomial(size = 1:5, prob = c(0.05, 0.5, 0.3, 0.9, 0.1)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_binomial #' @export dist_binomial <- function(size, prob){ size <- vec_cast(size, integer()) prob <- vec_cast(prob, double()) if(any(size < 0)){ abort("The number of observations cannot be negative.") } if(any((prob < 0) | (prob > 1))){ abort("The probability of success must be between 0 and 1.") } new_dist(n = size, p = prob, class = "dist_binomial") } #' @export format.dist_binomial <- function(x, digits = 2, ...){ sprintf( "B(%s, %s)", format(x[["n"]], digits = digits, ...), format(x[["p"]], digits = digits, ...) ) } #' @export density.dist_binomial <- function(x, at, ...){ stats::dbinom(at, x[["n"]], x[["p"]]) } #' @export log_density.dist_binomial <- function(x, at, ...){ stats::dbinom(at, x[["n"]], x[["p"]], log = TRUE) } #' @export quantile.dist_binomial <- function(x, p, ...){ as.integer(stats::qbinom(p, x[["n"]], x[["p"]])) } #' @export cdf.dist_binomial <- function(x, q, ...){ stats::pbinom(q, x[["n"]], x[["p"]]) } #' @export generate.dist_binomial <- function(x, times, ...){ as.integer(stats::rbinom(times, x[["n"]], x[["p"]])) } #' @export mean.dist_binomial <- function(x, ...){ x[["n"]]*x[["p"]] } #' @export covariance.dist_binomial <- function(x, ...){ x[["n"]]*x[["p"]]*(1-x[["p"]]) } #' @export skewness.dist_binomial <- function(x, ...) { n <- x[["n"]] p <- x[["p"]] q <- 1 - p (1 - (2 * p)) / sqrt(n * p * q) } #' @export kurtosis.dist_binomial <- function(x, ...) { n <- x[["n"]] p <- x[["p"]] q <- 1 - p (1 - (6 * p * q)) / (n * p * q) } distributional/R/dist_exponential.R0000644000175000017500000000305614151532232017337 0ustar nileshnilesh#' The Exponential Distribution #' #' \lifecycle{stable} #' #' @inheritParams stats::dexp #' #' @seealso [stats::Exponential] #' #' @examples #' dist <- dist_exponential(rate = c(2, 1, 2/3)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_exponential #' @export dist_exponential <- function(rate){ rate <- vec_cast(rate, double()) if(any(rate < 0)){ abort("The rate parameter of an Exponential distribution must be non-negative.") } new_dist(rate = rate, class = "dist_exponential") } #' @export format.dist_exponential <- function(x, digits = 2, ...){ sprintf( "Exp(%s)", format(x[["rate"]], digits = digits, ...) ) } #' @export density.dist_exponential <- function(x, at, ...){ stats::dexp(at, x[["rate"]]) } #' @export log_density.dist_exponential <- function(x, at, ...){ stats::dexp(at, x[["rate"]], log = TRUE) } #' @export quantile.dist_exponential <- function(x, p, ...){ stats::qexp(p, x[["rate"]]) } #' @export cdf.dist_exponential <- function(x, q, ...){ stats::pexp(q, x[["rate"]]) } #' @export generate.dist_exponential <- function(x, times, ...){ stats::rexp(times, x[["rate"]]) } #' @export mean.dist_exponential <- function(x, ...){ 1/x[["rate"]] } #' @export covariance.dist_exponential <- function(x, ...){ 1/x[["rate"]]^2 } #' @export skewness.dist_exponential <- function(x, ...) 2 #' @export kurtosis.dist_exponential <- function(x, ...) 6 distributional/R/dist_missing.R0000644000175000017500000000267514151532232016470 0ustar nileshnilesh#' Missing distribution #' #' \lifecycle{experimental} #' #' A placeholder distribution for handling missing values in a vector of #' distributions. #' #' @param length The number of missing distributions #' #' @name dist_missing #' #' @examples #' dist <- dist_missing(3L) #' #' dist #' mean(dist) #' variance(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @export dist_missing <- function(length = 1) { vctrs::vec_rep(NA_dist_, length) } NA_dist_ <- structure(list(NULL), class = c("distribution", "vctrs_vctr", "list")) #' @export format.dist_na <- function(x, ...) { "NA" } #' @export density.dist_na <- function(x, at, ...){ NA_real_ } #' @export log_density.dist_na <- density.dist_na #' @export quantile.dist_na <- function(x, p, ...){ NA_real_ } #' @export log_quantile.dist_na <- quantile.dist_na #' @export cdf.dist_na <- function(x, q, ...){ NA_real_ } #' @export log_cdf.dist_na <- cdf.dist_na #' @export generate.dist_na <- function(x, times, ...){ rep(NA_real_, times) } #' @export mean.dist_na <- function(x, ...) NA_real_ #' @export covariance.dist_na <- function(x, ...) NA_real_ #' @export skewness.dist_na <- function(x, ...) NA_real_ #' @export kurtosis.dist_na <- function(x, ...) NA_real_ #' @export Math.dist_na <- function(x, ...) { x } #' @export Ops.dist_na <- function(e1, e2) { dist_missing(max(length(e1), length(e2))) } distributional/R/dist_categorical.R0000644000175000017500000000651414151532232017270 0ustar nileshnilesh#' The Categorical distribution #' #' \lifecycle{stable} #' #' Categorical distributions are used to represent events with multiple #' outcomes, such as what number appears on the roll of a dice. This is also #' referred to as the 'generalised Bernoulli' or 'multinoulli' distribution. #' The Cateogorical distribution is a special case of the [Multinomial()] #' distribution with `n = 1`. #' #' @param prob A list of probabilities of observing each outcome category. #' @param outcomes The values used to represent each outcome. #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Categorical random variable with #' probability parameters `p` = \eqn{\{p_1, p_2, \ldots, p_k\}}. #' #' The Categorical probability distribution is widely used to model the #' occurance of multiple events. A simple example is the roll of a dice, where #' \eqn{p = \{1/6, 1/6, 1/6, 1/6, 1/6, 1/6\}} giving equal chance of observing #' each number on a 6 sided dice. #' #' **Support**: \eqn{\{1, \ldots, k\}}{{1, ..., k}} #' #' **Mean**: \eqn{p} #' #' **Variance**: \eqn{p \cdot (1 - p) = p \cdot q}{p (1 - p)} #' #' **Probability mass function (p.m.f)**: #' #' \deqn{ #' P(X = i) = p_i #' }{ #' P(X = i) = p_i #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' The cdf() of a categorical distribution is undefined as the outcome categories aren't ordered. #' #' @examples #' dist <- dist_categorical(prob = list(c(0.05, 0.5, 0.15, 0.2, 0.1), c(0.3, 0.1, 0.6))) #' #' dist #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' # The outcomes aren't ordered, so many statistics are not applicable. #' cdf(dist, 4) #' quantile(dist, 0.7) #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' dist <- dist_categorical( #' prob = list(c(0.05, 0.5, 0.15, 0.2, 0.1), c(0.3, 0.1, 0.6)), #' outcomes = list(letters[1:5], letters[24:26]) #' ) #' #' generate(dist, 10) #' #' density(dist, "a") #' density(dist, "z", log = TRUE) #' #' @export dist_categorical <- function(prob, outcomes = NULL){ prob <- lapply(prob, function(x) x/sum(x)) prob <- as_list_of(prob, .ptype = double()) if(is.null(outcomes)) { new_dist(p = prob, class = "dist_categorical") } else { new_dist(p = prob, x = outcomes, class = "dist_categorical") } } #' @export format.dist_categorical <- function(x, digits = 2, ...){ sprintf( "Categorical[%s]", format(length(x[["p"]]), digits = digits, ...) ) } #' @export density.dist_categorical <- function(x, at, ...){ if(!is.null(x[["x"]])) at <- match(at, x[["x"]]) x[["p"]][at] } #' @export quantile.dist_categorical <- function(x, p, ...){ NA_real_ } #' @export cdf.dist_categorical <- function(x, q, ...){ NA_real_ } #' @export generate.dist_categorical <- function(x, times, ...){ z <- sample( x = seq_along(x[["p"]]), size = times, prob = x[["p"]], replace = TRUE ) if(is.null(x[["x"]])) return(z) x[["x"]][z] } #' @export mean.dist_categorical <- function(x, ...){ NA_real_ } #' @export covariance.dist_categorical <- function(x, ...){ NA_real_ } #' @export skewness.dist_categorical <- function(x, ...) { NA_real_ } #' @export kurtosis.dist_categorical <- function(x, ...) { NA_real_ } distributional/R/dist_percentile.R0000644000175000017500000000217414151532232017143 0ustar nileshnilesh#' Percentile distribution #' #' \lifecycle{maturing} #' #' @param x A list of values #' @param percentile A list of percentiles #' #' @examples #' dist <- dist_normal() #' percentiles <- seq(0.01, 0.99, by = 0.01) #' x <- vapply(percentiles, quantile, double(1L), x = dist) #' dist_percentile(list(x), list(percentiles*100)) #' #' @export dist_percentile <- function(x, percentile){ x <- as_list_of(x, .ptype = double()) percentile <- as_list_of(percentile, .ptype = double()) new_dist(x = x, percentile = percentile, class = "dist_percentile") } #' @export format.dist_percentile <- function(x, ...){ sprintf( "percentile[%s]", length(x[["x"]]) ) } # #' @export # density.dist_percentile <- function(x, at, ...){ # } # #' @export quantile.dist_percentile <- function(x, p, ...){ stats::approx(x = x[["percentile"]]/100, y = x[["x"]], xout = p)$y } #' @export cdf.dist_percentile <- function(x, q, ...){ stats::approx(x = x[["x"]], y = x[["percentile"]]/100, xout = q)$y } #' @export generate.dist_percentile <- function(x, times, ...){ stats::approx(x[["percentile"]]/100, x[["x"]], xout=stats::runif(times,0,1))$y } distributional/R/dist_logarithmic.R0000644000175000017500000000335014164726063017323 0ustar nileshnilesh#' The Logarithmic distribution #' #' \lifecycle{stable} #' #' @inheritParams actuar::dlogarithmic #' #' @seealso [actuar::Logarithmic] #' #' @examples #' dist <- dist_logarithmic(prob = c(0.33, 0.66, 0.99)) #' dist #' #' @examplesIf requireNamespace("actuar", quietly = TRUE) #' mean(dist) #' variance(dist) #' support(dist) #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' @name dist_logarithmic #' @export dist_logarithmic <- function(prob){ prob <- vec_cast(prob, double()) if(any((prob < 0) | (prob > 1))){ abort("The prob parameter of a Logarithmic distribution must be between 0 and 1.") } new_dist(p = prob, class = "dist_logarithmic") } #' @export format.dist_logarithmic <- function(x, digits = 2, ...){ sprintf( "Logarithmic(%s)", format(x[["p"]], digits = digits, ...) ) } #' @export density.dist_logarithmic <- function(x, at, ...){ require_package("actuar") actuar::dlogarithmic(at, x[["p"]]) } #' @export log_density.dist_logarithmic <- function(x, at, ...){ require_package("actuar") actuar::dlogarithmic(at, x[["p"]], log = TRUE) } #' @export quantile.dist_logarithmic <- function(x, p, ...){ require_package("actuar") actuar::qlogarithmic(p, x[["p"]]) } #' @export cdf.dist_logarithmic <- function(x, q, ...){ require_package("actuar") actuar::plogarithmic(q, x[["p"]]) } #' @export generate.dist_logarithmic <- function(x, times, ...){ require_package("actuar") actuar::rlogarithmic(times, x[["p"]]) } #' @export mean.dist_logarithmic <- function(x, ...){ p <- x[["p"]] (-1/(log(1-p)))*(p/(1-p)) } #' @export covariance.dist_logarithmic <- function(x, ...){ p <- x[["p"]] -(p^2 + p*log(1-p))/((1-p)*log(1-p))^2 } distributional/R/dist_geometric.R0000644000175000017500000000545014151532232016767 0ustar nileshnilesh#' The Geometric Distribution #' #' The Geometric distribution can be thought of as a generalization #' of the [dist_bernoulli()] distribution where we ask: "if I keep flipping a #' coin with probability `p` of heads, what is the probability I need #' \eqn{k} flips before I get my first heads?" The Geometric #' distribution is a special case of Negative Binomial distribution. #' \lifecycle{stable} #' #' @inheritParams stats::dgeom #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Geometric random variable with #' success probability `p` = \eqn{p}. Note that there are multiple #' parameterizations of the Geometric distribution. #' #' **Support**: 0 < p < 1, \eqn{x = 0, 1, \dots} #' #' **Mean**: \eqn{\frac{1-p}{p}} #' #' **Variance**: \eqn{\frac{1-p}{p^2}} #' #' **Probability mass function (p.m.f)**: #' #' \deqn{ #' P(X = x) = p(1-p)^x, #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{ #' P(X \le x) = 1 - (1-p)^{x+1} #' } #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = \frac{pe^t}{1 - (1-p)e^t} #' }{ #' E(e^{tX}) = \frac{pe^t}{1 - (1-p)e^t} #' } #' #' @seealso [stats::Geometric] #' #' @examples #' dist <- dist_geometric(prob = c(0.2, 0.5, 0.8)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' @name dist_geometric #' @export dist_geometric <- function(prob){ prob <- vec_cast(prob, double()) if(any((prob < 0) | (prob > 1))){ abort("The prob parameter of an Geometric distribution must be between 0 and 1.") } new_dist(p = prob, class = "dist_geometric") } #' @export format.dist_geometric <- function(x, digits = 2, ...){ sprintf( "Geometric(%s)", format(x[["p"]], digits = digits, ...) ) } #' @export density.dist_geometric <- function(x, at, ...){ stats::dgeom(at, x[["p"]]) } #' @export log_density.dist_geometric <- function(x, at, ...){ stats::dgeom(at, x[["p"]], log = TRUE) } #' @export quantile.dist_geometric <- function(x, p, ...){ stats::qgeom(p, x[["p"]]) } #' @export cdf.dist_geometric <- function(x, q, ...){ stats::pgeom(q, x[["p"]]) } #' @export generate.dist_geometric <- function(x, times, ...){ stats::rgeom(times, x[["p"]]) } #' @export mean.dist_geometric <- function(x, ...){ 1/x[["p"]] - 1 } #' @export covariance.dist_geometric <- function(x, ...){ (1 - x[["p"]])/x[["p"]]^2 } #' @export skewness.dist_geometric <- function(x, ...) (2 - x[["p"]]) / sqrt(1 - x[["p"]]) #' @export kurtosis.dist_geometric <- function(x, ...) 6 + (x[["p"]]^2 / (1 - x[["p"]])) distributional/R/dist_multinomial.R0000644000175000017500000000703114151532232017340 0ustar nileshnilesh#' The Multinomial distribution #' #' \lifecycle{maturing} #' #' The multinomial distribution is a generalization of the binomial #' distribution to multiple categories. It is perhaps easiest to think #' that we first extend a [dist_bernoulli()] distribution to include more #' than two categories, resulting in a [dist_categorical()] distribution. #' We then extend repeat the Categorical experiment several (\eqn{n}) #' times. #' #' @param size The number of draws from the Categorical distribution. #' @param prob The probability of an event occurring from each draw. #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X = (X_1, ..., X_k)} be a Multinomial #' random variable with success probability `p` = \eqn{p}. Note that #' \eqn{p} is vector with \eqn{k} elements that sum to one. Assume #' that we repeat the Categorical experiment `size` = \eqn{n} times. #' #' **Support**: Each \eqn{X_i} is in \eqn{{0, 1, 2, ..., n}}. #' #' **Mean**: The mean of \eqn{X_i} is \eqn{n p_i}. #' #' **Variance**: The variance of \eqn{X_i} is \eqn{n p_i (1 - p_i)}. #' For \eqn{i \neq j}, the covariance of \eqn{X_i} and \eqn{X_j} #' is \eqn{-n p_i p_j}. #' #' **Probability mass function (p.m.f)**: #' #' \deqn{ #' P(X_1 = x_1, ..., X_k = x_k) = \frac{n!}{x_1! x_2! ... x_k!} p_1^{x_1} \cdot p_2^{x_2} \cdot ... \cdot p_k^{x_k} #' }{ #' P(X_1 = x_1, ..., X_k = x_k) = n! / (x_1! x_2! ... x_k!) p_1^x_1 p_2^x_2 ... p_k^x_k #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' Omitted for multivariate random variables for the time being. #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = \left(\sum_{i=1}^k p_i e^{t_i}\right)^n #' }{ #' E(e^(tX)) = (p_1 e^t_1 + p_2 e^t_2 + ... + p_k e^t_k)^n #' } #' #' @seealso [stats::Multinomial] #' #' @examples #' dist <- dist_multinomial(size = c(4, 3), prob = list(c(0.3, 0.5, 0.2), c(0.1, 0.5, 0.4))) #' #' dist #' mean(dist) #' variance(dist) #' #' generate(dist, 10) #' #' # TODO: Needs fixing to support multiple inputs #' # density(dist, 2) #' # density(dist, 2, log = TRUE) #' #' @name dist_multinomial #' @export dist_multinomial <- function(size, prob){ size <- vec_cast(size, double()) prob <- lapply(prob, function(x) x/sum(x)) prob <- as_list_of(prob, .ptype = double()) new_dist(s = size, p = prob, class = "dist_multinomial") } #' @export format.dist_multinomial <- function(x, digits = 2, ...){ sprintf( "Multinomial(%s)[%s]", format(x[["s"]], digits = digits, ...), format(length(x[["p"]]), digits = digits, ...) ) } #' @export density.dist_multinomial <- function(x, at, ...){ if(is.list(at)) return(vapply(at, density, numeric(1L), x = x, ...)) stats::dmultinom(at, x[["s"]], x[["p"]]) } #' @export log_density.dist_multinomial <- function(x, at, ...){ stats::dmultinom(at, x[["s"]], x[["p"]], log = TRUE) } #' @export generate.dist_multinomial <- function(x, times, ...){ t(stats::rmultinom(times, x[["s"]], x[["p"]])) } #' @export mean.dist_multinomial <- function(x, ...){ matrix(x[["s"]]*x[["p"]], nrow = 1) } #' @export covariance.dist_multinomial <- function(x, ...){ s <- x[["s"]] p <- x[["p"]] v <- numeric(length(p)^2) for(i in seq_along(p)){ for(j in seq_along(p)){ v[(i-1)*length(p) + j] <- if(i == j) s*p[i]*(1-p[j]) else -s*p[i]*p[j] } } list(matrix(v, nrow = length(p))) } #' @export dim.dist_multinomial <- function(x){ length(x[["p"]]) } distributional/R/distribution.R0000644000175000017500000003512214165036304016510 0ustar nileshnilesh#' Create a new distribution #' #' @param ... Parameters of the distribution (named). #' @param class The class of the distribution for S3 dispatch. #' @param dimnames The names of the variables in the distribution (optional). #' #' @export new_dist <- function(..., class = NULL, dimnames = NULL){ args <- transpose(vctrs::vec_recycle_common(...)) wrap_dist( lapply(args, structure, class = c(class, "dist_default")), dimnames = dimnames ) } wrap_dist <- function(x, dimnames = NULL){ vctrs::new_vctr(x, vars = dimnames, class = "distribution") } #' @export vec_ptype_abbr.distribution <- function(x, ...){ "dist" } #' @export format.distribution <- function(x, ...){ x <- vec_data(x) out <- vapply(x, format, character(1L), ...) out[vapply(x, is.null, logical(1L))] <- "NA" out } #' @export `dimnames<-.distribution` <- function(x, value){ attr(x, "vars") <- value x } #' @export dimnames.distribution <- function(x){ attr(x, "vars") } #' @export `[[.distribution` <- `[` #' The probability density/mass function #' #' \lifecycle{stable} #' #' Computes the probability density function for a continuous distribution, or #' the probability mass function for a discrete distribution. #' #' @param x The distribution(s). #' @param at The point at which to compute the density/mass. #' @param ... Additional arguments passed to methods. #' @param log If `TRUE`, probabilities will be given as log probabilities. #' #' @importFrom stats density #' @export density.distribution <- function(x, at, ..., log = FALSE){ if(log) return(log_density(x, at, ...)) at <- arg_listable(at, .ptype = NULL) dist_apply(x, density, at = at, ...) } log_density <- function(x, at, ...) { ellipsis::check_dots_used() UseMethod("log_density") } #' @export log_density.distribution <- function(x, at, ...){ at <- arg_listable(at, .ptype = NULL) dist_apply(x, log_density, at = at, ...) } #' Distribution Quantiles #' #' \lifecycle{stable} #' #' Computes the quantiles of a distribution. #' #' @inheritParams density.distribution #' @param p The probability of the quantile. #' @param ... Additional arguments passed to methods. #' #' @importFrom stats quantile #' @export quantile.distribution <- function(x, p, ..., log = FALSE){ if(log) return(log_quantile(x, p, ...)) p <- arg_listable(p, .ptype = double()) dist_apply(x, quantile, p = p, ...) } log_quantile <- function(x, q, ...) { ellipsis::check_dots_used() UseMethod("log_quantile") } #' @export log_quantile.distribution <- function(x, p, ...){ vec_assert(q, double(), 1L) p <- arg_listable(p, .ptype = double()) dist_apply(x, log_quantile, p = p, ...) } #' The cumulative distribution function #' #' \lifecycle{stable} #' #' @inheritParams density.distribution #' @param q The quantile at which the cdf is calculated. #' #' @name cdf #' @export cdf <- function (x, q, ..., log = FALSE){ if(log) return(log_cdf(x, q, ...)) ellipsis::check_dots_used() UseMethod("cdf") } #' @rdname cdf #' @export cdf.distribution <- function(x, q, ...){ q <- arg_listable(q, .ptype = NULL) dist_apply(x, cdf, q = q, ...) } log_cdf <- function(x, q, ...) { ellipsis::check_dots_used() UseMethod("log_cdf") } #' @export log_cdf.distribution <- function(x, q, ...){ q <- arg_listable(q, .ptype = NULL) dist_apply(x, log_cdf, q = q, ...) } #' Randomly sample values from a distribution #' #' \lifecycle{stable} #' #' Generate random samples from probability distributions. #' #' @param x The distribution(s). #' @param times The number of samples. #' @param ... Additional arguments used by methods. #' #' @export generate.distribution <- function(x, times, ...){ times <- vec_cast(times, integer()) times <- vec_recycle(times, size = length(x)) x <- vec_data(x) dist_is_na <- vapply(x, is.null, logical(1L)) x[dist_is_na] <- list(structure(list(), class = c("dist_na", "dist_default"))) mapply(generate, x, times = times, ..., SIMPLIFY = FALSE) # dist_apply(x, generate, times = times, ...) # Needs work to structure MV appropriately. } #' The (log) likelihood of a sample matching a distribution #' #' \lifecycle{maturing} #' #' @param x The distribution(s). #' @param ... Additional arguments used by methods. #' #' @name likelihood #' @export likelihood <- function (x, ...){ ellipsis::check_dots_used() UseMethod("likelihood") } #' @rdname likelihood #' @param sample A list of sampled values to compare to distribution(s). #' @param log If `TRUE`, the log-likelihood will be computed. #' @export likelihood.distribution <- function(x, sample, ..., log = FALSE){ if(vec_is(sample, numeric())) { warn("The `sample` argument of `likelihood()` should contain a list of numbers. The same sample will be used for each distribution, i.e. `sample = list(sample)`.") sample <- list(sample) } if(log){ dist_apply(x, log_likelihood, sample = sample, ...) } else { dist_apply(x, likelihood, sample = sample, ...) } } #' @rdname likelihood #' @export log_likelihood <- function(x, ...) { ellipsis::check_dots_used() UseMethod("log_likelihood") } #' @export log_likelihood.distribution <- function(x, sample, ...){ dist_apply(x, log_likelihood, sample = sample, ...) } #' Extract the parameters of a distribution #' #' \lifecycle{experimental} #' #' @param x The distribution(s). #' @param ... Additional arguments used by methods. #' #' @name parameters #' @examples #' dist <- c( #' dist_normal(1:2), #' dist_poisson(3), #' dist_multinomial(size = c(4, 3), #' prob = list(c(0.3, 0.5, 0.2), c(0.1, 0.5, 0.4))) #' ) #' parameters(dist) #' @export parameters <- function(x, ...) { ellipsis::check_dots_used() UseMethod("parameters") } #' @rdname parameters #' @export parameters.distribution <- function(x, ...) { x <- lapply(vec_data(x), parameters) x <- lapply(x, function(z) data_frame(!!!z, .name_repair = "minimal")) vec_rbind(!!!x) } #' Extract the name of the distribution family #' #' \lifecycle{experimental} #' #' @param object The distribution(s). #' @param ... Additional arguments used by methods. #' #' @examples #' dist <- c( #' dist_normal(1:2), #' dist_poisson(3), #' dist_multinomial(size = c(4, 3), #' prob = list(c(0.3, 0.5, 0.2), c(0.1, 0.5, 0.4))) #' ) #' family(dist) #' #' @importFrom stats family #' @export family.distribution <- function(object, ...) { vapply(vec_data(object), family, character(1L)) } #' Region of support of a distribution #' #' \lifecycle{experimental} #' #' @param x The distribution(s). #' @param ... Additional arguments used by methods. #' #' @name support #' @export support <- function(x, ...) { ellipsis::check_dots_used() UseMethod("support") } #' @rdname support #' @export support.distribution <- function(x, ...) { dist_apply(x, support, ...) } #' Mean of a probability distribution #' #' \lifecycle{stable} #' #' Returns the empirical mean of the probability distribution. If the method #' does not exist, the mean of a random sample will be returned. #' #' @param x The distribution(s). #' @param ... Additional arguments used by methods. #' #' @export mean.distribution <- function(x, ...){ dist_apply(x, mean, ...) } #' Variance #' #' A generic function for computing the variance of an object. #' #' @param x An object. #' @param ... Additional arguments used by methods. #' #' @details #' #' The implementation of `variance()` for numeric variables coerces the input to #' a vector then uses [`stats::var()`] to compute the variance. This means that, #' unlike [`stats::var()`], if `variance()` is passed a matrix or a 2-dimensional #' array, it will still return the variance ([`stats::var()`] returns the #' covariance matrix in that case). #' #' @seealso [`variance.distribution()`], [`covariance()`] #' #' @export variance <- function(x, ...){ UseMethod("variance") } #' @export variance.default <- function(x, ...){ stop( "The variance() method is not supported for objects of type ", paste(deparse(class(x)), collapse = "") ) } #' @rdname variance #' @export variance.numeric <- function(x, ...){ stats::var(as.vector(x), ...) } #' @rdname variance #' @export variance.matrix <- function(x, ...){ diag(stats::cov(x, ...)) } #' Variance of a probability distribution #' #' \lifecycle{stable} #' #' Returns the empirical variance of the probability distribution. If the method #' does not exist, the variance of a random sample will be returned. #' #' @param x The distribution(s). #' @param ... Additional arguments used by methods. #' #' @export variance.distribution <- function(x, ...){ dist_apply(x, variance, ...) } #' Covariance #' #' A generic function for computing the covariance of an object. #' #' @param x An object. #' @param ... Additional arguments used by methods. #' #' @seealso [`covariance.distribution()`], [`variance()`] #' #' @export covariance <- function(x, ...){ UseMethod("covariance") } #' @export covariance.default <- function(x, ...){ stop( "The covariance() method is not supported for objects of type ", paste(deparse(class(x)), collapse = "") ) } #' @rdname variance #' @export covariance.numeric <- function(x, ...){ stats::cov(x, ...) } #' Covariance of a probability distribution #' #' \lifecycle{stable} #' #' Returns the empirical covariance of the probability distribution. If the #' method does not exist, the covariance of a random sample will be returned. #' #' @param x The distribution(s). #' @param ... Additional arguments used by methods. #' #' @export covariance.distribution <- function(x, ...){ dist_apply(x, covariance, ...) } #' Skewness of a probability distribution #' #' \lifecycle{stable} #' #' @param x The distribution(s). #' @param ... Additional arguments used by methods. #' #' @export skewness <- function(x, ...) { ellipsis::check_dots_used() UseMethod("skewness") } #' @rdname skewness #' @export skewness.distribution <- function(x, ...){ dist_apply(x, skewness, ...) } #' Kurtosis of a probability distribution #' #' \lifecycle{stable} #' #' @param x The distribution(s). #' @param ... Additional arguments used by methods. #' #' @export kurtosis <- function(x, ...) { ellipsis::check_dots_used() UseMethod("kurtosis") } #' @rdname kurtosis #' @export kurtosis.distribution <- function(x, ...){ dist_apply(x, kurtosis, ...) } #' Median of a probability distribution #' #' \lifecycle{stable} #' #' Returns the median (50th percentile) of a probability distribution. This is #' equivalent to `quantile(x, p=0.5)`. #' #' @param x The distribution(s). #' @param na.rm Unused, included for consistency with the generic function. #' @param ... Additional arguments used by methods. #' #' @importFrom stats median #' @export median.distribution <- function(x, na.rm = FALSE, ...){ dist_apply(x, median, na.rm = na.rm, ...) } #' Probability intervals of a probability distribution #' #' \lifecycle{maturing} #' #' Returns a `hilo` central probability interval with probability coverage of #' `size`. By default, the distribution's [`quantile()`] will be used to compute #' the lower and upper bound for a centered interval #' #' @param x The distribution(s). #' @param size The size of the interval (between 0 and 100). #' @param ... Additional arguments used by methods. #' #' @seealso [`hdr.distribution()`] #' #' @importFrom stats median #' @export hilo.distribution <- function(x, size = 95, ...){ size <- arg_listable(size, .ptype = double()) dist_apply(x, hilo, size = size, ...) } #' Highest density regions of probability distributions #' #' \lifecycle{experimental} #' #' This function is highly experimental and will change in the future. In #' particular, improved functionality for object classes and visualisation tools #' will be added in a future release. #' #' Computes minimally sized probability intervals highest density regions. #' #' @param x The distribution(s). #' @param size The size of the interval (between 0 and 100). #' @param n The resolution used to estimate the distribution's density. #' @param ... Additional arguments used by methods. #' #' @export hdr.distribution <- function(x, size = 95, n = 512, ...){ size <- arg_listable(size, .ptype = double()) dist_apply(x, hdr, size = size, n = n, ...) } #' @export sum.distribution <- function(x, ...){ vec_restore(list(Reduce("+", x)), x) } #' @method vec_arith distribution #' @export vec_arith.distribution <- function(op, x, y, ...){ UseMethod("vec_arith.distribution", y) } #' @method vec_arith.distribution default #' @export vec_arith.distribution.default <- function(op, x, y, ...){ dist_is_na <- vapply(x, is.null, logical(1L)) x[dist_is_na] <- list(structure(list(), class = c("dist_na", "dist_default"))) if(is_empty(y)){ out <- lapply(vec_data(x), get(op)) } else { x <- vec_recycle_common(x = x, y = y) y <- x[["y"]] if(is_distribution(y)) y <- vec_data(y) x <- x[["x"]] out <- mapply(get(op), x = vec_data(x), y = y, SIMPLIFY = FALSE) } vec_restore(out, x) } #' @method vec_arith.numeric distribution #' @export vec_arith.numeric.distribution <- function(op, x, y, ...){ x <- vec_recycle_common(x = x, y = y) y <- x[["y"]] x <- x[["x"]] out <- mapply(get(op), x = x, y = vec_data(y), SIMPLIFY = FALSE) vec_restore(out, y) } #' @method vec_math distribution #' @export vec_math.distribution <- function(.fn, .x, ...) { if(.fn %in% c("is.nan", "is.infinite")) return(rep_len(FALSE, length(.x))) if(.fn == "is.finite") return(rep_len(TRUE, length(.x))) out <- lapply(vec_data(.x), get(.fn), ...) vec_restore(out, .x) } #' @export vec_ptype2.distribution.distribution <- function(x, y, ...){ if(!identical(dimnames(x), dimnames(y))){ abort("Distributions must have the same `dimnames` to be combined.") } x } #' @export vec_ptype2.double.distribution <- function(x, y, ...) new_dist() #' @export vec_ptype2.distribution.double <- function(x, y, ...) new_dist() #' @export vec_ptype2.integer.distribution <- function(x, y, ...) new_dist() #' @export vec_ptype2.distribution.integer <- function(x, y, ...) new_dist() #' @export vec_cast.distribution.distribution <- function(x, to, ...){ dimnames(x) <- dimnames(to) x } #' @export vec_cast.distribution.double <- function(x, to, ...){ x <- dist_degenerate(x) dimnames(x) <- dimnames(to) x } #' @export vec_cast.distribution.integer <- vec_cast.distribution.double #' @export vec_cast.character.distribution <- function(x, to, ...){ format(x) } #' Test if the object is a distribution #' #' @description #' This function returns `TRUE` for distributions and `FALSE` for all other objects. #' \lifecycle{stable} #' #' @param x An object. #' #' @return TRUE if the object inherits from the distribution class. #' @rdname is-distribution #' @examples #' dist <- dist_normal() #' is_distribution(dist) #' is_distribution("distributional") #' @export is_distribution <- function(x) { inherits(x, "distribution") } distributional/R/dist_wrap.R0000644000175000017500000000703514151532232015763 0ustar nileshnilesh#' Create a distribution from p/d/q/r style functions #' #' \lifecycle{experimental} #' #' If a distribution is not yet supported, you can vectorise p/d/q/r functions #' using this function. `dist_wrap()` stores the distributions parameters, and #' provides wrappers which call the appropriate p/d/q/r functions. #' #' Using this function to wrap a distribution should only be done if the #' distribution is not yet available in this package. If you need a distribution #' which isn't in the package yet, consider making a request at #' https://github.com/mitchelloharawild/distributional/issues. #' #' @param dist The name of the distribution used in the functions (name that is #' prefixed by p/d/q/r) #' @param ... Named arguments used to parameterise the distribution. #' @param package The package from which the distribution is provided. If NULL, #' the calling environment's search path is used to find the distribution #' functions. Alternatively, an arbitrary environment can also be provided here. # #' @param p,d,q,r The functions used to compute the p/d/q/r # #' (pdf/cdf/quantile/generate) #' #' @examples #' dist <- dist_wrap("norm", mean = 1:3, sd = c(3, 9, 2)) #' #' density(dist, 1) # dnorm() #' cdf(dist, 4) # pnorm() #' quantile(dist, 0.975) # qnorm() #' generate(dist, 10) # rnorm() #' #' library(actuar) #' dist <- dist_wrap("invparalogis", package = "actuar", shape = 2, rate = 2) #' density(dist, 1) # actuar::dinvparalogis() #' cdf(dist, 4) # actuar::pinvparalogis() #' quantile(dist, 0.975) # actuar::qinvparalogis() #' generate(dist, 10) # actuar::rinvparalogis() #' #' @export dist_wrap <- function(dist, ..., package = NULL){ vec_assert(dist, character(), 1L) if(is.null(package)) { env <- rlang::caller_env() } else if (is.character(package)) { env <- rlang::pkg_env(package) } else { env <- as.environment(package) } par <- vec_recycle_common(dist = dist, env = list(env), ...) new_dist(!!!par, class = "dist_wrap") } #' @export format.dist_wrap <- function(x, ...){ sprintf( "%s(%s)", x[["dist"]], paste0(x[-(1:2)], collapse = ", ") ) } #' @export density.dist_wrap <- function(x, at, ...){ fn <- get(paste0("d", x[["dist"]][[1]]), envir = x$env, mode = "function") # Remove distribution name and environment from parameters par <- x[-(1:2)] do.call(fn, c(list(at), par)) } #' @export log_density.dist_wrap <- function(x, at, ...){ fn <- get(paste0("d", x[["dist"]][[1]]), envir = x$env, mode = "function") # Remove distribution name and environment from parameters par <- x[-(1:2)] # Use density(log = TRUE) if supported if(is.null(formals(fn)$log)){ log(do.call(fn, c(list(at), par))) } else { do.call(fn, c(list(at), par, log = TRUE)) } } #' @export cdf.dist_wrap <- function(x, q, ...){ fn <- get(paste0("p", x[["dist"]][[1]]), envir = x$env, mode = "function") # Remove distribution name and environment from parameters par <- x[-(1:2)] do.call(fn, c(list(q), par)) } #' @export quantile.dist_wrap <- function(x, p, ...){ fn <- get(paste0("q", x[["dist"]][[1]]), envir = x$env, mode = "function") # Remove distribution name and environment from parameters par <- x[-(1:2)] do.call(fn, c(list(p), par)) } #' @export generate.dist_wrap <- function(x, times, ...){ fn <- get(paste0("r", x[["dist"]][[1]]), envir = x$env, mode = "function") # Remove distribution name and environment from parameters par <- x[-(1:2)] do.call(fn, c(list(times), par)) } #' @export parameters.dist_wrap <- function(x, ...) { # All parameters except distribution environment x[-2L] } distributional/R/transformed.R0000755000175000017500000000770714164770357016344 0ustar nileshnilesh#' Modify a distribution with a transformation #' #' \lifecycle{experimental} #' #' The [`density()`], [`mean()`], and [`variance()`] methods are approximate as #' they are based on numerical derivatives. #' #' @param dist A univariate distribution vector. #' @param transform A function used to transform the distribution. This #' transformation should be monotonic over appropriate domain. #' @param inverse The inverse of the `transform` function. #' #' @examples #' # Create a log normal distribution #' dist <- dist_transformed(dist_normal(0, 0.5), exp, log) #' density(dist, 1) # dlnorm(1, 0, 0.5) #' cdf(dist, 4) # plnorm(4, 0, 0.5) #' quantile(dist, 0.1) # qlnorm(0.1, 0, 0.5) #' generate(dist, 10) # rlnorm(10, 0, 0.5) #' #' @export dist_transformed <- function(dist, transform, inverse){ vec_is(dist, new_dist()) stopifnot(is.function(transform)) stopifnot(is.function(inverse)) new_dist(dist = vec_data(dist), transform = list(transform), inverse = list(inverse), dimnames = dimnames(dist), class = "dist_transformed") } #' @export format.dist_transformed <- function(x, ...){ sprintf( "t(%s)", format(x[["dist"]]) ) } #' @export density.dist_transformed <- function(x, at, ...){ density(x[["dist"]], x[["inverse"]](at))*abs(vapply(at, numDeriv::jacobian, numeric(1L), func = x[["inverse"]])) } #' @export cdf.dist_transformed <- function(x, q, ...){ cdf(x[["dist"]], x[["inverse"]](q), ...) } #' @export quantile.dist_transformed <- function(x, p, ...){ x[["transform"]](quantile(x[["dist"]], p, ...)) } #' @export generate.dist_transformed <- function(x, ...){ x[["transform"]](generate(x[["dist"]], ...)) } #' @export mean.dist_transformed <- function(x, ...){ mu <- mean(x[["dist"]]) sigma2 <- variance(x[["dist"]]) if(is.na(sigma2)){ # warning("Could not compute the transformed distribution's mean as the base distribution's variance is unknown. The transformed distribution's median has been returned instead.") return(x[["transform"]](mu)) } drop( x[["transform"]](mu) + numDeriv::hessian(x[["transform"]], mu, method.args=list(d = 0.01))/2*sigma2 ) } #' @export covariance.dist_transformed <- function(x, ...){ mu <- mean(x[["dist"]]) sigma2 <- variance(x[["dist"]]) if(is.na(sigma2)) return(NA_real_) drop( numDeriv::jacobian(x[["transform"]], mu)^2*sigma2 + (numDeriv::hessian(x[["transform"]], mu, method.args=list(d = 0.01))*sigma2)^2/2 ) } #' @method Math dist_transformed #' @export Math.dist_transformed <- function(x, ...) { trans <- new_function(exprs(x = ), body = expr((!!sym(.Generic))((!!x$transform)(x), !!!dots_list(...)))) inverse_fun <- get_unary_inverse(.Generic) inverse <- new_function(exprs(x = ), body = expr((!!x$inverse)((!!inverse_fun)(x, !!!dots_list(...))))) vec_data(dist_transformed(wrap_dist(list(x[["dist"]])), trans, inverse))[[1]] } #' @method Ops dist_transformed #' @export Ops.dist_transformed <- function(e1, e2) { is_dist <- c(inherits(e1, "dist_default"), inherits(e2, "dist_default")) trans <- if(all(is_dist)) { if(identical(e1$dist, e2$dist)){ new_function(exprs(x = ), expr((!!sym(.Generic))((!!e1$transform)(x), (!!e2$transform)(x)))) } else { stop(sprintf("The %s operation is not supported for <%s> and <%s>", .Generic, class(e1)[1], class(e2)[1])) } } else if(is_dist[1]){ new_function(exprs(x = ), body = expr((!!sym(.Generic))((!!e1$transform)(x), !!e2))) } else { new_function(exprs(x = ), body = expr((!!sym(.Generic))(!!e1, (!!e2$transform)(x)))) } inverse <- if(all(is_dist)) { invert_fail } else if(is_dist[1]){ inverse_fun <- get_binary_inverse_1(.Generic, e2) new_function(exprs(x = ), body = expr((!!e1$inverse)((!!inverse_fun)(x)))) } else { inverse_fun <- get_binary_inverse_2(.Generic, e1) new_function(exprs(x = ), body = expr((!!e2$inverse)((!!inverse_fun)(x)))) } vec_data(dist_transformed(wrap_dist(list(list(e1,e2)[[which(is_dist)[1]]][["dist"]])), trans, inverse))[[1]] } distributional/R/mixture.R0000644000175000017500000000460614151532232015465 0ustar nileshnilesh#' Create a mixture of distributions #' #' \lifecycle{experimental} #' #' @param ... Distributions to be used in the mixture. #' @param weights The weight of each distribution passed to `...`. #' #' @examples #' dist_mixture(dist_normal(0, 1), dist_normal(5, 2), weights = c(0.3, 0.7)) #' #' @export dist_mixture <- function(..., weights = numeric()){ dist <- dots_list(...) dn <- unique(lapply(dist, dimnames)) dn <- if(length(dn) == 1) dn[[1]] else NULL vec_is(weights, numeric(), length(dist)) if(sum(weights) != 1){ abort("Weights of a mixture model must sum to 1.") } if(any(weights < 0)){ abort("All weights in a mixtue model must be non-negative.") } new_dist(dist = transpose(dist), w = list(weights), class = "dist_mixture", dimnames = dn) } #' @export format.dist_mixture <- function(x, ...){ sprintf( "mixture(n=%i)", length(x) ) } #' @export density.dist_mixture <- function(x, at, ...){ if(length(at) > 1) return(vapply(at, density, numeric(1L), x = x, ...)) sum(x[["w"]]*vapply(x[["dist"]], density, numeric(1L), at = at, ...)) } #' @export quantile.dist_mixture <- function(x, p, ...){ if(length(p) > 1) return(vapply(p, quantile, numeric(1L), x = x, ...)) # Find bounds for optimisation based on range of each quantile dist_q <- vapply(x[["dist"]], quantile, numeric(1L), p, ..., USE.NAMES = FALSE) if(vctrs::vec_unique_count(dist_q) == 1) return(dist_q[1]) # Search the cdf() for appropriate quantile stats::optimise( function(pos) (p - cdf(x, pos, ...))^2, interval = c(min(dist_q), max(dist_q)) )$minimum } #' @export cdf.dist_mixture <- function(x, q, ...){ if(length(q) > 1) return(vapply(q, cdf, numeric(1L), x = x, ...)) sum(x[["w"]]*vapply(x[["dist"]], cdf, numeric(1L), q = q, ...)) } #' @export generate.dist_mixture <- function(x, times, ...){ dist_idx <- .bincode(stats::runif(times), breaks = c(0, cumsum(x[["w"]]))) r <- numeric(times) for(i in seq_along(x[["dist"]])){ r_pos <- dist_idx == i r[r_pos] <- generate(x[["dist"]][[i]], sum(r_pos), ...) } r } #' @export mean.dist_mixture <- function(x, ...){ sum(x[["w"]]*vapply(x[["dist"]], mean, numeric(1L), ...)) } #' @export covariance.dist_mixture <- function(x, ...){ m <- vapply(x[["dist"]], mean, numeric(1L), ...) v <- vapply(x[["dist"]], variance, numeric(1L), ...) m1 <- sum(x[["w"]]*m) m2 <- sum(x[["w"]]*(m^2 + v)) m2 - m1^2 } distributional/R/dist_beta.R0000644000175000017500000000413414151532232015722 0ustar nileshnilesh#' The Beta distribution #' #' \lifecycle{maturing} #' #' @param shape1,shape2 The non-negative shape parameters of the Beta distribution. #' #' @seealso [stats::Beta] #' #' @examples #' dist <- dist_beta(shape1 = c(0.5, 5, 1, 2, 2), shape2 = c(0.5, 1, 3, 2, 5)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_beta #' @export dist_beta <- function(shape1, shape2){ shape1 <- vec_cast(shape1, double()) shape2 <- vec_cast(shape2, double()) if(any((shape1 < 0) | shape2 < 0)){ abort("Shape parameters of a Beta distribution must be non-negative.") } new_dist(shape1 = shape1, shape2 = shape2, class = "dist_beta") } #' @export format.dist_beta <- function(x, digits = 2, ...){ sprintf( "Beta(%s, %s)", format(x[["shape1"]], digits = digits, ...), format(x[["shape2"]], digits = digits, ...) ) } #' @export density.dist_beta <- function(x, at, ...){ stats::dbeta(at, x[["shape1"]], x[["shape2"]]) } #' @export log_density.dist_beta <- function(x, at, ...){ stats::dbeta(at, x[["shape1"]], x[["shape2"]], log = TRUE) } #' @export quantile.dist_beta <- function(x, p, ...){ stats::qbeta(p, x[["shape1"]], x[["shape2"]]) } #' @export cdf.dist_beta <- function(x, q, ...){ stats::pbeta(q, x[["shape1"]], x[["shape2"]]) } #' @export generate.dist_beta <- function(x, times, ...){ stats::rbeta(times, x[["shape1"]], x[["shape2"]]) } #' @export mean.dist_beta <- function(x, ...){ x[["shape1"]]/(x[["shape1"]] + x[["shape2"]]) } #' @export covariance.dist_beta <- function(x, ...){ a <- x[["shape1"]] b <- x[["shape2"]] a*b/((a+b)^2*(a+b+1)) } #' @export skewness.dist_beta <- function(x, ...) { a <- x[["shape1"]] b <- x[["shape2"]] 2 * (b - a) * sqrt(a + b + 1) / (a + b + 2) * sqrt(a * b) } #' @export kurtosis.dist_beta <- function(x, ...) { a <- x[["shape1"]] b <- x[["shape2"]] num <- 6 * ((a - b)^2 * (a + b + 1) - (a * b) * (a + b + 2)) denom <- a * b * (a + b + 2) * (a + b + 3) num / denom } distributional/R/dist_weibull.R0000644000175000017500000000633114151532232016453 0ustar nileshnilesh#' The Weibull distribution #' #' \lifecycle{stable} #' #' Generalization of the gamma distribution. Often used in survival and #' time-to-event analyses. #' #' @inheritParams stats::dweibull #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Weibull random variable with #' success probability `p` = \eqn{p}. #' #' **Support**: \eqn{R^+} and zero. #' #' **Mean**: \eqn{\lambda \Gamma(1+1/k)}, where \eqn{\Gamma} is #' the gamma function. #' #' **Variance**: \eqn{\lambda [ \Gamma (1 + \frac{2}{k} ) - (\Gamma(1+ \frac{1}{k}))^2 ]} #' #' **Probability density function (p.d.f)**: #' #' \deqn{ #' f(x) = \frac{k}{\lambda}(\frac{x}{\lambda})^{k-1}e^{-(x/\lambda)^k}, x \ge 0 #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{F(x) = 1 - e^{-(x/\lambda)^k}, x \ge 0} #' #' **Moment generating function (m.g.f)**: #' #' \deqn{\sum_{n=0}^\infty \frac{t^n\lambda^n}{n!} \Gamma(1+n/k), k \ge 1} #' #' @seealso [stats::Weibull] #' #' @examples #' dist <- dist_weibull(shape = c(0.5, 1, 1.5, 5), scale = rep(1, 4)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_weibull #' @export dist_weibull <- function(shape, scale){ shape <- vec_cast(shape, double()) scale <- vec_cast(scale, double()) if(any(shape[!is.na(shape)] < 0)){ abort("The shape parameter of a Weibull distribution must be non-negative.") } if(any(scale[!is.na(scale)] <= 0)){ abort("The scale parameter of a Weibull distribution must be strictly positive.") } new_dist(shape = shape, scale = scale, class = "dist_weibull") } #' @export format.dist_weibull <- function(x, digits = 2, ...){ sprintf( "Weibull(%s, %s)", format(x[["shape"]], digits = digits, ...), format(x[["scale"]], digits = digits, ...) ) } #' @export density.dist_weibull <- function(x, at, ...){ stats::dweibull(at, x[["shape"]], x[["scale"]]) } #' @export log_density.dist_weibull <- function(x, at, ...){ stats::dweibull(at, x[["shape"]], x[["scale"]], log = TRUE) } #' @export quantile.dist_weibull <- function(x, p, ...){ stats::qweibull(p, x[["shape"]], x[["scale"]]) } #' @export cdf.dist_weibull <- function(x, q, ...){ stats::pweibull(q, x[["shape"]], x[["scale"]]) } #' @export generate.dist_weibull <- function(x, times, ...){ stats::rweibull(times, x[["shape"]], x[["scale"]]) } #' @export mean.dist_weibull <- function(x, ...){ x[["scale"]] * gamma(1 + 1/x[["shape"]]) } #' @export covariance.dist_weibull <- function(x, ...){ x[["scale"]]^2 * (gamma(1 + 2/x[["shape"]]) - gamma(1 + 1/x[["shape"]])^2) } #' @export skewness.dist_weibull <- function(x, ...) { mu <- mean(x) sigma <- sqrt(variance(x)) r <- mu / sigma gamma(1 + 3/x[["shape"]]) * (x[["scale"]]/sigma)^3 - 3*r - 3^r } #' @export kurtosis.dist_weibull <- function(x, ...) { mu <- mean(x) sigma <- sqrt(variance(x)) gamma <- skewness(x) r <- mu / sigma (x[["scale"]]/sigma)^4 * gamma(1 + 4/x[["shape"]]) - 4*gamma*r -6*r^2 - r^4 - 3 } distributional/R/utils.R0000644000175000017500000001035114151532232015122 0ustar nileshnileshtranspose <- function(.l) { if(is_empty(.l)) return(.l) inner_names <- names(.l[[1L]]) result <- lapply(seq_along(.l[[1L]]), function(i) { lapply(.l, .subset2, i) }) set_names(result, inner_names) } transpose_c <- function(.l) { stopifnot(is_list_of(.l)) .ptype <- vec_init(attr(.l, "ptype"), 1L) if(is_empty(.l)) return(.l) inner_names <- names(.l[[1L]]) .l <- vec_recycle_common(!!!.l) result <- lapply(seq_along(.l[[1L]]), function(i) { unname(vec_c(!!!lapply(.l, vec_slice, i), .ptype = .ptype)) }) set_names(result, inner_names) } split_matrix_rows <- function(x) { lapply(seq_len(nrow(x)), function(i) x[i,,drop=FALSE]) } # Declare a function's argument as allowing list inputs for mapping values arg_listable <- function(x, .ptype) { if(is.list(x)) { x <- as_list_of(as.list(x), .ptype) if(is.matrix(attr(x, "ptype"))) { x <- lapply(x, split_matrix_rows) x <- as_list_of(x, .ptype) } if(is.null(names(x))) { names(x) <- vec_as_names(character(vec_size(x)), repair = "unique") } } else if(is.matrix(x)) { x <- split_matrix_rows(x) } else { vec_assert(x, .ptype) } # Declares list arguments to be unpacked for dist_apply() class(x) <- c("arg_listable", class(x)) x } validate_recycling <- function(x, arg) { if(is_list_of(arg)) return(lapply(arg, validate_recycling, x = x)) if(!any(vec_size(arg) == c(1, vec_size(x)))) { abort( sprintf("Cannot recycle input of size %i to match the distributions (size %i).", vec_size(arg), vec_size(x) ) ) } } dist_apply <- function(x, .f, ...){ dn <- dimnames(x) x <- vec_data(x) dist_is_na <- vapply(x, is.null, logical(1L)) x[dist_is_na] <- list(structure(list(), class = c("dist_na", "dist_default"))) args <- dots_list(...) is_arg_listable <- vapply(args, inherits, FUN.VALUE = logical(1L), "arg_listable") unpack_listable <- multi_arg <- FALSE if(any(is_arg_listable)) { if(sum(is_arg_listable) > 1) abort("Only distribution argument can be unpacked at a time.\nThis shouldn't happen, please report a bug at https://github.com/mitchelloharawild/distributional/issues/") arg_pos <- which(is_arg_listable) if(unpack_listable <- is_list_of(args[[arg_pos]])) { validate_recycling(x, args[[arg_pos]]) .unpack_names <- names(args[[arg_pos]]) args[[arg_pos]] <- transpose_c(args[[arg_pos]]) } else if (multi_arg <- (length(args[[arg_pos]]) > 1)){ args[[arg_pos]] <- list(unclass(args[[arg_pos]])) } } out <- do.call(mapply, c(.f, list(x), args, SIMPLIFY = FALSE, USE.NAMES = FALSE)) # out <- mapply(.f, x, ..., SIMPLIFY = FALSE, USE.NAMES = FALSE) if(unpack_listable) { # TODO - update and repair multivariate distribution i/o with unpacking out <- as_list_of(out) if (rbind_mat <- is.matrix(attr(out, "ptype"))) { out <- as_list_of(lapply(out, split_matrix_rows)) } out <- transpose_c(out) if(rbind_mat) { out <- lapply(out, function(x) `colnames<-`(do.call(rbind, x), dn)) } names(out) <- .unpack_names out <- new_data_frame(out, n = vec_size(x)) # } else if(length(out[[1]]) > 1) { # out <- suppressMessages(vctrs::vec_rbind(!!!out)) } else if(multi_arg) { out <- lapply(out, `colnames<-`, dn) } else { out <- vctrs::vec_c(!!!out) if((is.matrix(out) || is.data.frame(out)) && !is.null(dn)){ # Set dimension names colnames(out) <- dn } } out } # inlined from https://github.com/r-lib/cli/blob/master/R/utf8.R is_utf8_output <- function() { opt <- getOption("cli.unicode", NULL) if (!is_null(opt)) { isTRUE(opt) } else { l10n_info()$`UTF-8` && !is_latex_output() } } is_latex_output <- function() { if (!("knitr" %in% loadedNamespaces())) { return(FALSE) } get("is_latex_output", asNamespace("knitr"))() } require_package <- function(pkg){ if(!requireNamespace(pkg, quietly = TRUE)){ abort( sprintf('The `%s` package must be installed to use this functionality. It can be installed with install.packages("%s")', pkg, pkg) ) } } restore_rng <- function(expr, seed = NULL) { old_seed <- .GlobalEnv$.Random.seed # Set new temporary seed set.seed(seed) # Restore previous seed on.exit(.GlobalEnv$.Random.seed <- old_seed) expr } distributional/R/dist_normal.R0000644000175000017500000001356214164706123016312 0ustar nileshnilesh#' The Normal distribution #' #' \lifecycle{stable} #' #' The Normal distribution is ubiquitous in statistics, partially because #' of the central limit theorem, which states that sums of i.i.d. random #' variables eventually become Normal. Linear transformations of Normal #' random variables result in new random variables that are also Normal. If #' you are taking an intro stats course, you'll likely use the Normal #' distribution for Z-tests and in simple linear regression. Under #' regularity conditions, maximum likelihood estimators are #' asymptotically Normal. The Normal distribution is also called the #' gaussian distribution. #' #' @param mu The mean (location parameter) of the distribution, which is also #' the mean of the distribution. Can be any real number. #' @param sigma The standard deviation (scale parameter) of the distribution. #' Can be any positive number. If you would like a Normal distribution with #' **variance** \eqn{\sigma^2}, be sure to take the square root, as this is a #' common source of errors. #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Normal random variable with mean #' `mu` = \eqn{\mu} and standard deviation `sigma` = \eqn{\sigma}. #' #' **Support**: \eqn{R}, the set of all real numbers #' #' **Mean**: \eqn{\mu} #' #' **Variance**: \eqn{\sigma^2} #' #' **Probability density function (p.d.f)**: #' #' \deqn{ #' f(x) = \frac{1}{\sqrt{2 \pi \sigma^2}} e^{-(x - \mu)^2 / 2 \sigma^2} #' }{ #' f(x) = 1 / sqrt(2 \pi \sigma^2) exp(-(x - \mu)^2 / (2 \sigma^2)) #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' The cumulative distribution function has the form #' #' \deqn{ #' F(t) = \int_{-\infty}^t \frac{1}{\sqrt{2 \pi \sigma^2}} e^{-(x - \mu)^2 / 2 \sigma^2} dx #' }{ #' F(t) = integral_{-\infty}^t 1 / sqrt(2 \pi \sigma^2) exp(-(x - \mu)^2 / (2 \sigma^2)) dx #' } #' #' but this integral does not have a closed form solution and must be #' approximated numerically. The c.d.f. of a standard Normal is sometimes #' called the "error function". The notation \eqn{\Phi(t)} also stands #' for the c.d.f. of a standard Normal evaluated at \eqn{t}. Z-tables #' list the value of \eqn{\Phi(t)} for various \eqn{t}. #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = e^{\mu t + \sigma^2 t^2 / 2} #' }{ #' E(e^(tX)) = e^(\mu t + \sigma^2 t^2 / 2) #' } #' #' @seealso [stats::Normal] #' #' @examples #' dist <- dist_normal(mu = 1:5, sigma = 3) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @export dist_normal <- function(mu = 0, sigma = 1){ mu <- vec_cast(mu, double()) sigma <- vec_cast(sigma, double()) if(any(sigma[!is.na(sigma)] < 0)){ abort("Standard deviation of a normal distribution must be non-negative") } new_dist(mu = mu, sigma = sigma, class = "dist_normal") } #' @export format.dist_normal <- function(x, digits = 2, ...){ sprintf( "N(%s, %s)", format(x[["mu"]], digits = digits, ...), format(x[["sigma"]]^2, digits = digits, ...) ) } #' @export density.dist_normal <- function(x, at, ...){ stats::dnorm(at, x[["mu"]], x[["sigma"]]) } #' @export log_density.dist_normal <- function(x, at, ...){ stats::dnorm(at, x[["mu"]], x[["sigma"]], log = TRUE) } #' @export quantile.dist_normal <- function(x, p, ...){ stats::qnorm(p, x[["mu"]], x[["sigma"]]) } #' @export log_quantile.dist_normal <- function(x, p, ...){ stats::qnorm(p, x[["mu"]], x[["sigma"]], log.p = TRUE) } #' @export cdf.dist_normal <- function(x, q, ...){ stats::pnorm(q, x[["mu"]], x[["sigma"]]) } #' @export log_cdf.dist_normal <- function(x, q, ...){ stats::pnorm(q, x[["mu"]], x[["sigma"]], log.p = TRUE) } #' @export generate.dist_normal <- function(x, times, ...){ stats::rnorm(times, x[["mu"]], x[["sigma"]]) } #' @export mean.dist_normal <- function(x, ...){ x[["mu"]] } #' @export covariance.dist_normal <- function(x, ...){ x[["sigma"]]^2 } #' @export skewness.dist_normal <- function(x, ...) 0 #' @export kurtosis.dist_normal <- function(x, ...) 0 #' @export Ops.dist_normal <- function(e1, e2){ ok <- switch(.Generic, `+` = , `-` = , `*` = , `/` = TRUE, FALSE) if (!ok) { return(NextMethod()) } if(.Generic == "/" && inherits(e2, "dist_normal")){ abort(sprintf("Cannot divide by a normal distribution")) } if(.Generic %in% c("-", "+") && missing(e2)){ e2 <- e1 e1 <- if(.Generic == "+") 1 else -1 .Generic <- "*" } if(.Generic == "-"){ .Generic <- "+" e2 <- -e2 } else if(.Generic == "/"){ .Generic <- "*" e2 <- 1/e2 } # Ops between two normals if(inherits(e1, "dist_normal") && inherits(e2, "dist_normal")){ if(.Generic == "*"){ abort(sprintf("Multiplying two normal distributions is not supported.")) } e1$mu <- e1$mu + e2$mu e1$sigma <- sqrt(e1$sigma^2 + e2$sigma^2) return(e1) } # Ops between a normal and scalar if(inherits(e1, "dist_normal")){ dist <- e1 scalar <- e2 } else { dist <- e2 scalar <- e1 } if(!is.numeric(scalar)){ abort(sprintf("Cannot %s a `%s` with a normal distribution", switch(.Generic, `+` = "add", `-` = "subtract", `*` = "multiply", `/` = "divide"), class(scalar))) } if(.Generic == "+"){ dist$mu <- dist$mu + scalar } else if(.Generic == "*"){ dist$mu <- dist$mu * scalar dist$sigma <- dist$sigma * abs(scalar) } dist } #' @method Math dist_normal #' @export Math.dist_normal <- function(x, ...) { # Shortcut to get log-normal distribution from Normal. if(.Generic == "exp") return(vec_data(dist_lognormal(x[["mu"]], x[["sigma"]]))[[1]]) NextMethod() } distributional/R/support.R0000644000175000017500000000223214164730171015503 0ustar nileshnilesh#' Create a new support region vector #' #' @param x A list of prototype vectors defining the distribution type. #' @param limits A list of value limits for the distribution. #' new_support_region <- function(x, limits = NULL) { vctrs::new_rcrd(list(x = x, lim = limits), class = "support_region") } #' @export format.support_region <- function(x, ...) { type <- vapply(field(x, "x"), function(z) { out <- if(is.integer(z)) "Z" else if(is.numeric(z)) "R" else if(is.complex(z)) "C" else vec_ptype_abbr(z) if(is.matrix(z)) { if(ncol(z) > 1) { out <- paste(out, ncol(z), sep = "^") } } out }, FUN.VALUE = character(1L)) mapply(function(type, z) { if(any(is.na(z)) || all(is.infinite(z))) type else if (type == "Z" && identical(z[2], Inf)) { if(z[1] == 0L) "N0" else if (z[2] == 1L) "N+" else paste0("[", z[1], ",", z[1]+1L, ",...,", z[2], "]") } else if (type == "R") paste0("[", z[1], ",", z[2], "]") else if (type == "Z") paste0("[", z[1], ",", z[1]+1L, ",...,", z[2], "]") else type }, type, field(x, "lim")) } #' @export vec_ptype_abbr.support_region <- function(x, ...){ "support" } distributional/R/dist_pareto.R0000644000175000017500000000376614164726032016322 0ustar nileshnilesh#' The Pareto distribution #' #' \lifecycle{questioning} #' #' @inheritParams actuar::dpareto #' #' @seealso [actuar::Pareto] #' #' @examples #' dist <- dist_pareto(shape = c(10, 3, 2, 1), scale = rep(1, 4)) #' dist #' #' @examplesIf requireNamespace("actuar", quietly = TRUE) #' mean(dist) #' variance(dist) #' support(dist) #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @name dist_pareto #' @export dist_pareto <- function(shape, scale){ shape <- vec_cast(shape, double()) scale <- vec_cast(scale, double()) if(any(shape < 0)){ abort("The shape parameter of a Pareto distribution must be non-negative.") } if(any(scale <= 0)){ abort("The scale parameter of a Pareto distribution must be strictly positive.") } new_dist(shape = shape, scale = scale, class = "dist_pareto") } #' @export format.dist_pareto <- function(x, digits = 2, ...){ sprintf( "Pareto(%s, %s)", format(x[["shape"]], digits = digits, ...), format(x[["scale"]], digits = digits, ...) ) } #' @export density.dist_pareto <- function(x, at, ...){ require_package("actuar") actuar::dpareto(at, x[["shape"]], x[["scale"]]) } #' @export log_density.dist_pareto <- function(x, at, ...){ require_package("actuar") actuar::dpareto(at, x[["shape"]], x[["scale"]], log = TRUE) } #' @export quantile.dist_pareto <- function(x, p, ...){ require_package("actuar") actuar::qpareto(p, x[["shape"]], x[["scale"]]) } #' @export cdf.dist_pareto <- function(x, q, ...){ require_package("actuar") actuar::ppareto(q, x[["shape"]], x[["scale"]]) } #' @export generate.dist_pareto <- function(x, times, ...){ require_package("actuar") actuar::rpareto(times, x[["shape"]], x[["scale"]]) } #' @export mean.dist_pareto <- function(x, ...){ actuar::mpareto(1, x[["shape"]], x[["scale"]]) } #' @export covariance.dist_pareto <- function(x, ...){ actuar::mpareto(2, x[["shape"]], x[["scale"]]) - actuar::mpareto(1, x[["shape"]], x[["scale"]])^2 } distributional/R/dist_bernoulli.R0000644000175000017500000000643414151532232017007 0ustar nileshnilesh#' The Bernoulli distribution #' #' \lifecycle{stable} #' #' Bernoulli distributions are used to represent events like coin flips #' when there is single trial that is either successful or unsuccessful. #' The Bernoulli distribution is a special case of the [Binomial()] #' distribution with `n = 1`. #' #' @inheritParams dist_binomial #' #' @details #' #' We recommend reading this documentation on #' , where the math #' will render nicely. #' #' In the following, let \eqn{X} be a Bernoulli random variable with parameter #' `p` = \eqn{p}. Some textbooks also define \eqn{q = 1 - p}, or use #' \eqn{\pi} instead of \eqn{p}. #' #' The Bernoulli probability distribution is widely used to model #' binary variables, such as 'failure' and 'success'. The most #' typical example is the flip of a coin, when \eqn{p} is thought as the #' probability of flipping a head, and \eqn{q = 1 - p} is the #' probability of flipping a tail. #' #' **Support**: \eqn{\{0, 1\}}{{0, 1}} #' #' **Mean**: \eqn{p} #' #' **Variance**: \eqn{p \cdot (1 - p) = p \cdot q}{p (1 - p)} #' #' **Probability mass function (p.m.f)**: #' #' \deqn{ #' P(X = x) = p^x (1 - p)^{1-x} = p^x q^{1-x} #' }{ #' P(X = x) = p^x (1 - p)^(1-x) #' } #' #' **Cumulative distribution function (c.d.f)**: #' #' \deqn{ #' P(X \le x) = #' \left \{ #' \begin{array}{ll} #' 0 & x < 0 \\ #' 1 - p & 0 \leq x < 1 \\ #' 1 & x \geq 1 #' \end{array} #' \right. #' }{ #' P(X \le x) = (1 - p) 1_{[0, 1)}(x) + 1_{1}(x) #' } #' #' **Moment generating function (m.g.f)**: #' #' \deqn{ #' E(e^{tX}) = (1 - p) + p e^t #' }{ #' E(e^(tX)) = (1 - p) + p e^t #' } #' #' @examples #' dist <- dist_bernoulli(prob = c(0.05, 0.5, 0.3, 0.9, 0.1)) #' #' dist #' mean(dist) #' variance(dist) #' skewness(dist) #' kurtosis(dist) #' #' generate(dist, 10) #' #' density(dist, 2) #' density(dist, 2, log = TRUE) #' #' cdf(dist, 4) #' #' quantile(dist, 0.7) #' #' @export dist_bernoulli <- function(prob){ prob <- vec_cast(prob, double()) if(any((prob < 0) | (prob > 1))){ abort("The probability of success must be between 0 and 1.") } new_dist(p = prob, class = "dist_bernoulli") } #' @export format.dist_bernoulli <- function(x, digits = 2, ...){ sprintf( "Bernoulli(%s)", format(x[["p"]], digits = digits, ...) ) } #' @export density.dist_bernoulli <- function(x, at, ...){ stats::dbinom(at, 1, x[["p"]]) } #' @export log_density.dist_bernoulli <- function(x, at, ...){ stats::dbinom(at, 1, x[["p"]], log = TRUE) } #' @export quantile.dist_bernoulli <- function(x, p, ...){ as.logical(stats::qbinom(p, 1, x[["p"]])) } #' @export cdf.dist_bernoulli <- function(x, q, ...){ stats::pbinom(q, 1, x[["p"]]) } #' @export generate.dist_bernoulli <- function(x, times, ...){ as.logical(stats::rbinom(times, 1, x[["p"]])) } #' @export mean.dist_bernoulli <- function(x, ...){ x[["p"]] } #' @export covariance.dist_bernoulli <- function(x, ...){ x[["p"]]*(1-x[["p"]]) } #' @export skewness.dist_bernoulli <- function(x, ...) { p <- x[["p"]] q <- 1 - x[["p"]] (1 - (2 * p)) / sqrt(p * q) } #' @export kurtosis.dist_bernoulli <- function(x, ...) { p <- x[["p"]] q <- 1 - x[["p"]] (1 - (6 * p * q)) / (p * q) } distributional/R/geom_hilo.R0000644000175000017500000001346613711726207015746 0ustar nileshnilesh#' Ribbon plots for hilo intervals #' #' \lifecycle{maturing} #' #' `geom_hilo_ribbon()` displays the interval defined by a hilo object. The #' luminance of the shaded area indicates its confidence level. The shade colour #' can be controlled by the `fill` aesthetic, however the luminance will be #' overwritten to represent the confidence level. #' #' @inheritParams ggplot2::geom_ribbon #' #' @seealso #' [`geom_hilo_linerange()`] for discrete hilo intervals (vertical lines) #' #' @examples #' dist <- dist_normal(1:3, 1:3) #' library(ggplot2) #' ggplot( #' data.frame(x = rep(1:3, 2), interval = c(hilo(dist, 80), hilo(dist, 95))) #' ) + #' geom_hilo_ribbon(aes(x = x, hilo = interval)) #' #' @export geom_hilo_ribbon <- function(mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...) { if(!is.null(mapping$hilo)){ mapping$ymin <- expr(ggplot2::after_stat(vctrs::vec_data(hilo)$lower)) mapping$ymax <- expr(ggplot2::after_stat(vctrs::vec_data(hilo)$upper)) mapping$level <- expr(ggplot2::after_stat(vctrs::vec_data(hilo)$level)) } ggplot2::layer( geom = GeomHiloRibbon, mapping = mapping, data = data, stat = stat, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list(na.rm = na.rm, ...) ) } GeomHiloRibbon <- ggplot2::ggproto( "GeomHiloRibbon", ggplot2::GeomRibbon, required_aes = c(ggplot2::GeomRibbon$required_aes), optional_aes = c("hilo", "level"), default_aes = ggplot2::aes( colour = "grey20", fill = "grey60", size = .5, linetype = 1, weight = 1, alpha = 1, level = NA ), draw_key = function(data, params, size) { lwd <- min(data$size, min(size) / 4) # Calculate and set colour fillcol <- darken_fill(data$fill, 80) grid::rectGrob( width = grid::unit(1, "npc") - grid::unit(lwd, "mm"), height = grid::unit(1, "npc") - grid::unit(lwd, "mm"), gp = grid::gpar( col = fillcol, fill = scales::alpha(fillcol, data$alpha), lty = data$linetype, lwd = lwd * ggplot2::.pt, linejoin = "mitre" ) ) }, draw_group = function(data, panel_scales, coord) { # Calculate colour data$fill <- darken_fill(data$fill, data$level) data$colour <- NA # Compute alpha transparency data$alpha <- grDevices::col2rgb(data$fill, alpha = TRUE)[4, ] / 255 * data$alpha # Create grobs grobs <- lapply( split(data, -data$level), ggplot2::GeomRibbon$draw_group, panel_scales, coord ) ggplot2:::ggname("geom_hilo_ribbon", do.call(grid::grobTree, grobs)) } ) #' Line ranges for hilo intervals #' #' \lifecycle{experimental} #' #' `geom_hilo_linerange()` displays the interval defined by a hilo object. The #' luminance of the shaded area indicates its confidence level. The shade colour #' can be controlled by the `fill` aesthetic, however the luminance will be #' overwritten to represent the confidence level. #' #' @inheritParams ggplot2::geom_linerange #' #' @seealso #' [`geom_hilo_ribbon()`] for continuous hilo intervals (ribbons) #' #' @examples #' dist <- dist_normal(1:3, 1:3) #' library(ggplot2) #' ggplot( #' data.frame(x = rep(1:3, 2), interval = c(hilo(dist, 80), hilo(dist, 95))) #' ) + #' geom_hilo_linerange(aes(x = x, hilo = interval)) #' #' @export geom_hilo_linerange <- function(mapping = NULL, data = NULL, stat = "identity", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...) { if(!is.null(mapping$hilo)){ mapping$ymin <- expr(ggplot2::after_stat(vctrs::vec_data(hilo)$lower)) mapping$ymax <- expr(ggplot2::after_stat(vctrs::vec_data(hilo)$upper)) mapping$level <- expr(ggplot2::after_stat(vctrs::vec_data(hilo)$level)) } ggplot2::layer( geom = GeomHiloLinerange, mapping = mapping, data = data, stat = stat, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list(na.rm = na.rm, ...) ) } GeomHiloLinerange <- ggplot2::ggproto( "GeomHiloLinerange", ggplot2::GeomLinerange, required_aes = c(ggplot2::GeomLinerange$required_aes), optional_aes = c("hilo", "level"), default_aes = ggplot2::aes( colour = "grey20", fill = "grey60", size = 2, linetype = 1, weight = 1, alpha = 1, level = NA ), draw_key = function(data, params, size) { lwd <- min(data$size, min(size) / 4) # Calculate and set colour fillcol <- darken_fill(data$colour, 80) grid::rectGrob( width = grid::unit(1, "npc") - grid::unit(lwd, "mm"), height = grid::unit(1, "npc") - grid::unit(lwd, "mm"), gp = grid::gpar( col = fillcol, fill = scales::alpha(fillcol, data$alpha), lty = data$linetype, lwd = lwd * ggplot2::.pt, linejoin = "mitre" ) ) }, draw_panel = function(data, panel_scales, coord) { # Calculate colour data$colour <- darken_fill(data$colour, data$level) data$fill <- NA # Compute alpha transparency data$alpha <- grDevices::col2rgb(data$colour, alpha = TRUE)[4, ] / 255 * data$alpha # Create grobs grobs <- lapply( split(data, -data$level), ggplot2::GeomLinerange$draw_panel, panel_scales, coord ) ggplot2:::ggname("geom_hilo_linerange", do.call(grid::grobTree, grobs)) } ) #' @importFrom farver convert_colour #' @importFrom grDevices col2rgb darken_fill <- function(col, prob) { col <- farver::convert_colour(t(grDevices::col2rgb(col)), "RGB", "HSL") n_prob <- length(unique(prob)) col[,3] <- seq(90 - pmin((n_prob - 1)*10, 30), 90, length.out = n_prob)[match(prob, sort(unique(prob)))] col <- farver::convert_colour(col, "HSL", "RGB") col2hex(col) } #' @importFrom grDevices rgb col2hex <- function(col){ grDevices::rgb(col, maxColorValue = 255) } distributional/NAMESPACE0000644000175000017500000003722014165413640014667 0ustar nileshnilesh# Generated by roxygen2: do not edit by hand S3method("$",hilo) S3method("[[",distribution) S3method("dimnames<-",distribution) S3method("names<-",hilo) S3method(.DollarNames,hilo) S3method(Math,dist_default) S3method(Math,dist_lognormal) S3method(Math,dist_na) S3method(Math,dist_normal) S3method(Math,dist_transformed) S3method(Ops,dist_default) S3method(Ops,dist_na) S3method(Ops,dist_normal) S3method(Ops,dist_transformed) S3method(cdf,dist_bernoulli) S3method(cdf,dist_beta) S3method(cdf,dist_binomial) S3method(cdf,dist_burr) S3method(cdf,dist_categorical) S3method(cdf,dist_cauchy) S3method(cdf,dist_chisq) S3method(cdf,dist_default) S3method(cdf,dist_degenerate) S3method(cdf,dist_exponential) S3method(cdf,dist_f) S3method(cdf,dist_gamma) S3method(cdf,dist_geometric) S3method(cdf,dist_gumbel) S3method(cdf,dist_hypergeometric) S3method(cdf,dist_inflated) S3method(cdf,dist_inverse_exponential) S3method(cdf,dist_inverse_gamma) S3method(cdf,dist_inverse_gaussian) S3method(cdf,dist_logarithmic) S3method(cdf,dist_logistic) S3method(cdf,dist_lognormal) S3method(cdf,dist_mixture) S3method(cdf,dist_mvnorm) S3method(cdf,dist_na) S3method(cdf,dist_negbin) S3method(cdf,dist_normal) S3method(cdf,dist_pareto) S3method(cdf,dist_percentile) S3method(cdf,dist_poisson) S3method(cdf,dist_poisson_inverse_gaussian) S3method(cdf,dist_sample) S3method(cdf,dist_student_t) S3method(cdf,dist_studentized_range) S3method(cdf,dist_transformed) S3method(cdf,dist_truncated) S3method(cdf,dist_uniform) S3method(cdf,dist_weibull) S3method(cdf,dist_wrap) S3method(cdf,distribution) S3method(covariance,default) S3method(covariance,dist_bernoulli) S3method(covariance,dist_beta) S3method(covariance,dist_binomial) S3method(covariance,dist_burr) S3method(covariance,dist_categorical) S3method(covariance,dist_cauchy) S3method(covariance,dist_chisq) S3method(covariance,dist_default) S3method(covariance,dist_degenerate) S3method(covariance,dist_exponential) S3method(covariance,dist_f) S3method(covariance,dist_gamma) S3method(covariance,dist_geometric) S3method(covariance,dist_gumbel) S3method(covariance,dist_hypergeometric) S3method(covariance,dist_inflated) S3method(covariance,dist_inverse_exponential) S3method(covariance,dist_inverse_gamma) S3method(covariance,dist_inverse_gaussian) S3method(covariance,dist_logarithmic) S3method(covariance,dist_logistic) S3method(covariance,dist_lognormal) S3method(covariance,dist_mixture) S3method(covariance,dist_multinomial) S3method(covariance,dist_mvnorm) S3method(covariance,dist_na) S3method(covariance,dist_negbin) S3method(covariance,dist_normal) S3method(covariance,dist_pareto) S3method(covariance,dist_poisson) S3method(covariance,dist_poisson_inverse_gaussian) S3method(covariance,dist_sample) S3method(covariance,dist_student_t) S3method(covariance,dist_transformed) S3method(covariance,dist_uniform) S3method(covariance,dist_weibull) S3method(covariance,distribution) S3method(covariance,numeric) S3method(density,dist_bernoulli) S3method(density,dist_beta) S3method(density,dist_binomial) S3method(density,dist_burr) S3method(density,dist_categorical) S3method(density,dist_cauchy) S3method(density,dist_chisq) S3method(density,dist_default) S3method(density,dist_degenerate) S3method(density,dist_exponential) S3method(density,dist_f) S3method(density,dist_gamma) S3method(density,dist_geometric) S3method(density,dist_gumbel) S3method(density,dist_hypergeometric) S3method(density,dist_inflated) S3method(density,dist_inverse_exponential) S3method(density,dist_inverse_gamma) S3method(density,dist_inverse_gaussian) S3method(density,dist_logarithmic) S3method(density,dist_logistic) S3method(density,dist_lognormal) S3method(density,dist_mixture) S3method(density,dist_multinomial) S3method(density,dist_mvnorm) S3method(density,dist_na) S3method(density,dist_negbin) S3method(density,dist_normal) S3method(density,dist_pareto) S3method(density,dist_poisson) S3method(density,dist_poisson_inverse_gaussian) S3method(density,dist_sample) S3method(density,dist_student_t) S3method(density,dist_transformed) S3method(density,dist_truncated) S3method(density,dist_uniform) S3method(density,dist_weibull) S3method(density,dist_wrap) S3method(density,distribution) S3method(dim,dist_default) S3method(dim,dist_multinomial) S3method(dim,dist_mvnorm) S3method(dimnames,distribution) S3method(family,dist_default) S3method(family,distribution) S3method(format,dist_bernoulli) S3method(format,dist_beta) S3method(format,dist_binomial) S3method(format,dist_burr) S3method(format,dist_categorical) S3method(format,dist_cauchy) S3method(format,dist_chisq) S3method(format,dist_default) S3method(format,dist_degenerate) S3method(format,dist_exponential) S3method(format,dist_f) S3method(format,dist_gamma) S3method(format,dist_geometric) S3method(format,dist_gumbel) S3method(format,dist_hypergeometric) S3method(format,dist_inflated) S3method(format,dist_inverse_exponential) S3method(format,dist_inverse_gamma) S3method(format,dist_inverse_gaussian) S3method(format,dist_logarithmic) S3method(format,dist_logistic) S3method(format,dist_lognormal) S3method(format,dist_mixture) S3method(format,dist_multinomial) S3method(format,dist_mvnorm) S3method(format,dist_na) S3method(format,dist_negbin) S3method(format,dist_normal) S3method(format,dist_pareto) S3method(format,dist_percentile) S3method(format,dist_poisson) S3method(format,dist_poisson_inverse_gaussian) S3method(format,dist_sample) S3method(format,dist_student_t) S3method(format,dist_studentized_range) S3method(format,dist_transformed) S3method(format,dist_truncated) S3method(format,dist_uniform) S3method(format,dist_weibull) S3method(format,dist_wrap) S3method(format,distribution) S3method(format,hdr) S3method(format,hilo) S3method(format,support_region) S3method(generate,dist_bernoulli) S3method(generate,dist_beta) S3method(generate,dist_binomial) S3method(generate,dist_burr) S3method(generate,dist_categorical) S3method(generate,dist_cauchy) S3method(generate,dist_chisq) S3method(generate,dist_default) S3method(generate,dist_degenerate) S3method(generate,dist_exponential) S3method(generate,dist_f) S3method(generate,dist_gamma) S3method(generate,dist_geometric) S3method(generate,dist_gumbel) S3method(generate,dist_hypergeometric) S3method(generate,dist_inflated) S3method(generate,dist_inverse_exponential) S3method(generate,dist_inverse_gamma) S3method(generate,dist_inverse_gaussian) S3method(generate,dist_logarithmic) S3method(generate,dist_logistic) S3method(generate,dist_lognormal) S3method(generate,dist_mixture) S3method(generate,dist_multinomial) S3method(generate,dist_mvnorm) S3method(generate,dist_na) S3method(generate,dist_negbin) S3method(generate,dist_normal) S3method(generate,dist_pareto) S3method(generate,dist_percentile) S3method(generate,dist_poisson) S3method(generate,dist_poisson_inverse_gaussian) S3method(generate,dist_sample) S3method(generate,dist_student_t) S3method(generate,dist_transformed) S3method(generate,dist_uniform) S3method(generate,dist_weibull) S3method(generate,dist_wrap) S3method(generate,distribution) S3method(guide_geom,guide_level) S3method(guide_train,level_guide) S3method(hdr,default) S3method(hdr,dist_default) S3method(hdr,distribution) S3method(hilo,default) S3method(hilo,dist_default) S3method(hilo,distribution) S3method(is.na,hilo) S3method(kurtosis,dist_bernoulli) S3method(kurtosis,dist_beta) S3method(kurtosis,dist_binomial) S3method(kurtosis,dist_categorical) S3method(kurtosis,dist_cauchy) S3method(kurtosis,dist_chisq) S3method(kurtosis,dist_degenerate) S3method(kurtosis,dist_exponential) S3method(kurtosis,dist_f) S3method(kurtosis,dist_gamma) S3method(kurtosis,dist_geometric) S3method(kurtosis,dist_gumbel) S3method(kurtosis,dist_hypergeometric) S3method(kurtosis,dist_logistic) S3method(kurtosis,dist_lognormal) S3method(kurtosis,dist_na) S3method(kurtosis,dist_negbin) S3method(kurtosis,dist_normal) S3method(kurtosis,dist_poisson) S3method(kurtosis,dist_uniform) S3method(kurtosis,dist_weibull) S3method(kurtosis,distribution) S3method(likelihood,dist_default) S3method(likelihood,distribution) S3method(log_cdf,dist_default) S3method(log_cdf,dist_lognormal) S3method(log_cdf,dist_na) S3method(log_cdf,dist_normal) S3method(log_cdf,distribution) S3method(log_density,dist_bernoulli) S3method(log_density,dist_beta) S3method(log_density,dist_binomial) S3method(log_density,dist_burr) S3method(log_density,dist_cauchy) S3method(log_density,dist_chisq) S3method(log_density,dist_default) S3method(log_density,dist_exponential) S3method(log_density,dist_f) S3method(log_density,dist_gamma) S3method(log_density,dist_geometric) S3method(log_density,dist_gumbel) S3method(log_density,dist_hypergeometric) S3method(log_density,dist_inverse_exponential) S3method(log_density,dist_inverse_gamma) S3method(log_density,dist_inverse_gaussian) S3method(log_density,dist_logarithmic) S3method(log_density,dist_logistic) S3method(log_density,dist_lognormal) S3method(log_density,dist_multinomial) S3method(log_density,dist_mvnorm) S3method(log_density,dist_na) S3method(log_density,dist_negbin) S3method(log_density,dist_normal) S3method(log_density,dist_pareto) S3method(log_density,dist_poisson) S3method(log_density,dist_poisson_inverse_gaussian) S3method(log_density,dist_student_t) S3method(log_density,dist_uniform) S3method(log_density,dist_weibull) S3method(log_density,dist_wrap) S3method(log_density,distribution) S3method(log_likelihood,dist_default) S3method(log_likelihood,distribution) S3method(log_quantile,dist_default) S3method(log_quantile,dist_lognormal) S3method(log_quantile,dist_na) S3method(log_quantile,dist_normal) S3method(log_quantile,distribution) S3method(mean,dist_bernoulli) S3method(mean,dist_beta) S3method(mean,dist_binomial) S3method(mean,dist_burr) S3method(mean,dist_categorical) S3method(mean,dist_cauchy) S3method(mean,dist_chisq) S3method(mean,dist_default) S3method(mean,dist_degenerate) S3method(mean,dist_exponential) S3method(mean,dist_f) S3method(mean,dist_gamma) S3method(mean,dist_geometric) S3method(mean,dist_gumbel) S3method(mean,dist_hypergeometric) S3method(mean,dist_inflated) S3method(mean,dist_inverse_exponential) S3method(mean,dist_inverse_gamma) S3method(mean,dist_inverse_gaussian) S3method(mean,dist_logarithmic) S3method(mean,dist_logistic) S3method(mean,dist_lognormal) S3method(mean,dist_mixture) S3method(mean,dist_multinomial) S3method(mean,dist_mvnorm) S3method(mean,dist_na) S3method(mean,dist_negbin) S3method(mean,dist_normal) S3method(mean,dist_pareto) S3method(mean,dist_poisson) S3method(mean,dist_poisson_inverse_gaussian) S3method(mean,dist_sample) S3method(mean,dist_student_t) S3method(mean,dist_transformed) S3method(mean,dist_truncated) S3method(mean,dist_uniform) S3method(mean,dist_weibull) S3method(mean,distribution) S3method(median,dist_default) S3method(median,dist_sample) S3method(median,distribution) S3method(parameters,dist_default) S3method(parameters,dist_wrap) S3method(parameters,distribution) S3method(print,dist_default) S3method(quantile,dist_bernoulli) S3method(quantile,dist_beta) S3method(quantile,dist_binomial) S3method(quantile,dist_burr) S3method(quantile,dist_categorical) S3method(quantile,dist_cauchy) S3method(quantile,dist_chisq) S3method(quantile,dist_default) S3method(quantile,dist_degenerate) S3method(quantile,dist_exponential) S3method(quantile,dist_f) S3method(quantile,dist_gamma) S3method(quantile,dist_geometric) S3method(quantile,dist_gumbel) S3method(quantile,dist_hypergeometric) S3method(quantile,dist_inflated) S3method(quantile,dist_inverse_exponential) S3method(quantile,dist_inverse_gamma) S3method(quantile,dist_inverse_gaussian) S3method(quantile,dist_logarithmic) S3method(quantile,dist_logistic) S3method(quantile,dist_lognormal) S3method(quantile,dist_mixture) S3method(quantile,dist_mvnorm) S3method(quantile,dist_na) S3method(quantile,dist_negbin) S3method(quantile,dist_normal) S3method(quantile,dist_pareto) S3method(quantile,dist_percentile) S3method(quantile,dist_poisson) S3method(quantile,dist_poisson_inverse_gaussian) S3method(quantile,dist_sample) S3method(quantile,dist_student_t) S3method(quantile,dist_studentized_range) S3method(quantile,dist_transformed) S3method(quantile,dist_truncated) S3method(quantile,dist_uniform) S3method(quantile,dist_weibull) S3method(quantile,dist_wrap) S3method(quantile,distribution) S3method(scale_type,hilo) S3method(skewness,dist_bernoulli) S3method(skewness,dist_beta) S3method(skewness,dist_binomial) S3method(skewness,dist_categorical) S3method(skewness,dist_cauchy) S3method(skewness,dist_chisq) S3method(skewness,dist_degenerate) S3method(skewness,dist_exponential) S3method(skewness,dist_f) S3method(skewness,dist_gamma) S3method(skewness,dist_geometric) S3method(skewness,dist_gumbel) S3method(skewness,dist_hypergeometric) S3method(skewness,dist_logistic) S3method(skewness,dist_lognormal) S3method(skewness,dist_na) S3method(skewness,dist_negbin) S3method(skewness,dist_normal) S3method(skewness,dist_poisson) S3method(skewness,dist_sample) S3method(skewness,dist_uniform) S3method(skewness,dist_weibull) S3method(skewness,distribution) S3method(sum,distribution) S3method(support,dist_default) S3method(support,distribution) S3method(variance,default) S3method(variance,dist_default) S3method(variance,distribution) S3method(variance,matrix) S3method(variance,numeric) S3method(vec_arith,distribution) S3method(vec_arith,hilo) S3method(vec_arith.distribution,default) S3method(vec_arith.numeric,distribution) S3method(vec_arith.numeric,hilo) S3method(vec_cast,character.distribution) S3method(vec_cast,character.hilo) S3method(vec_cast,distribution.distribution) S3method(vec_cast,distribution.double) S3method(vec_cast,distribution.integer) S3method(vec_math,distribution) S3method(vec_math,hilo) S3method(vec_ptype2,distribution.distribution) S3method(vec_ptype2,distribution.double) S3method(vec_ptype2,distribution.integer) S3method(vec_ptype2,double.distribution) S3method(vec_ptype2,hilo.hilo) S3method(vec_ptype2,integer.distribution) S3method(vec_ptype_abbr,distribution) S3method(vec_ptype_abbr,support_region) export(autoplot.distribution) export(cdf) export(covariance) export(dist_bernoulli) export(dist_beta) export(dist_binomial) export(dist_burr) export(dist_categorical) export(dist_cauchy) export(dist_chisq) export(dist_degenerate) export(dist_exponential) export(dist_f) export(dist_gamma) export(dist_geometric) export(dist_gumbel) export(dist_hypergeometric) export(dist_inflated) export(dist_inverse_exponential) export(dist_inverse_gamma) export(dist_inverse_gaussian) export(dist_logarithmic) export(dist_logistic) export(dist_lognormal) export(dist_missing) export(dist_mixture) export(dist_multinomial) export(dist_multivariate_normal) export(dist_negative_binomial) export(dist_normal) export(dist_pareto) export(dist_percentile) export(dist_poisson) export(dist_poisson_inverse_gaussian) export(dist_sample) export(dist_student_t) export(dist_studentized_range) export(dist_transformed) export(dist_truncated) export(dist_uniform) export(dist_weibull) export(dist_wrap) export(generate) export(geom_hilo_linerange) export(geom_hilo_ribbon) export(guide_level) export(hdr) export(hilo) export(is_distribution) export(is_hdr) export(is_hilo) export(kurtosis) export(likelihood) export(log_likelihood) export(new_dist) export(new_hdr) export(new_hilo) export(parameters) export(scale_hilo_continuous) export(scale_level_continuous) export(skewness) export(support) export(variance) import(rlang) import(vctrs) importFrom(farver,convert_colour) importFrom(generics,generate) importFrom(ggplot2,discrete_scale) importFrom(ggplot2,guide_colourbar) importFrom(ggplot2,guide_geom) importFrom(ggplot2,guide_legend) importFrom(ggplot2,guide_train) importFrom(ggplot2,scale_type) importFrom(ggplot2,waiver) importFrom(grDevices,col2rgb) importFrom(grDevices,rgb) importFrom(lifecycle,deprecate_soft) importFrom(stats,density) importFrom(stats,family) importFrom(stats,median) importFrom(stats,quantile) importFrom(utils,.DollarNames)