tidyr/0000755000176200001440000000000014554221641011411 5ustar liggesuserstidyr/NAMESPACE0000644000176200001440000000735014360013543012630 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(complete,data.frame) S3method(complete,grouped_df) S3method(complete_,data.frame) S3method(drop_na,data.frame) S3method(drop_na_,data.frame) S3method(expand,data.frame) S3method(expand,grouped_df) S3method(expand_,data.frame) S3method(extract,data.frame) S3method(extract_,data.frame) S3method(fill,data.frame) S3method(fill_,data.frame) S3method(full_seq,Date) S3method(full_seq,POSIXct) S3method(full_seq,numeric) S3method(gather,data.frame) S3method(gather_,data.frame) S3method(nest,data.frame) S3method(nest,grouped_df) S3method(nest,tbl_df) S3method(nest_legacy,data.frame) S3method(nest_legacy,tbl_df) S3method(pivot_longer,data.frame) S3method(pivot_wider,data.frame) S3method(replace_na,data.frame) S3method(replace_na,default) S3method(separate,data.frame) S3method(separate_,data.frame) S3method(separate_rows,data.frame) S3method(separate_rows_,data.frame) S3method(spread,data.frame) S3method(spread_,data.frame) S3method(uncount,data.frame) S3method(unite,data.frame) S3method(unite_,data.frame) S3method(unnest,data.frame) S3method(unnest,rowwise_df) S3method(unnest_legacy,data.frame) export("%>%") export(all_of) export(any_of) export(as_tibble) export(build_longer_spec) export(build_wider_spec) export(check_pivot_spec) export(chop) export(complete) export(complete_) export(contains) export(crossing) export(crossing_) export(drop_na) export(drop_na_) export(ends_with) export(everything) export(expand) export(expand_) export(expand_grid) export(extract) export(extract_) export(extract_numeric) export(fill) export(fill_) export(full_seq) export(gather) export(gather_) export(hoist) export(last_col) export(matches) export(nest) export(nest_) export(nest_legacy) export(nesting) export(nesting_) export(num_range) export(one_of) export(pack) export(pivot_longer) export(pivot_longer_spec) export(pivot_wider) export(pivot_wider_spec) export(replace_na) export(separate) export(separate_) export(separate_longer_delim) export(separate_longer_position) export(separate_rows) export(separate_rows_) export(separate_wider_delim) export(separate_wider_position) export(separate_wider_regex) export(spread) export(spread_) export(starts_with) export(tibble) export(tidyr_legacy) export(tribble) export(unchop) export(uncount) export(unite) export(unite_) export(unnest) export(unnest_) export(unnest_auto) export(unnest_legacy) export(unnest_longer) export(unnest_wider) export(unpack) import(rlang) import(vctrs) importFrom(dplyr,tbl_vars) importFrom(glue,glue) importFrom(lifecycle,deprecated) importFrom(magrittr,"%>%") importFrom(purrr,accumulate) importFrom(purrr,discard) importFrom(purrr,every) importFrom(purrr,imap) importFrom(purrr,keep) importFrom(purrr,map) importFrom(purrr,map2) importFrom(purrr,map2_chr) importFrom(purrr,map2_dbl) importFrom(purrr,map2_df) importFrom(purrr,map2_int) importFrom(purrr,map2_lgl) importFrom(purrr,map_at) importFrom(purrr,map_chr) importFrom(purrr,map_dbl) importFrom(purrr,map_df) importFrom(purrr,map_if) importFrom(purrr,map_int) importFrom(purrr,map_lgl) importFrom(purrr,pmap) importFrom(purrr,pmap_chr) importFrom(purrr,pmap_dbl) importFrom(purrr,pmap_df) importFrom(purrr,pmap_int) importFrom(purrr,pmap_lgl) importFrom(purrr,reduce) importFrom(purrr,some) importFrom(purrr,transpose) importFrom(tibble,as_tibble) importFrom(tibble,tibble) importFrom(tibble,tribble) importFrom(tidyselect,all_of) importFrom(tidyselect,any_of) importFrom(tidyselect,contains) importFrom(tidyselect,ends_with) importFrom(tidyselect,everything) importFrom(tidyselect,last_col) importFrom(tidyselect,matches) importFrom(tidyselect,num_range) importFrom(tidyselect,one_of) importFrom(tidyselect,starts_with) importFrom(utils,packageVersion) importFrom(utils,type.convert) useDynLib(tidyr, .registration = TRUE) tidyr/LICENSE0000644000176200001440000000005314520546620012414 0ustar liggesusersYEAR: 2023 COPYRIGHT HOLDER: tidyr authors tidyr/README.md0000644000176200001440000001137314553563421012701 0ustar liggesusers # tidyr tidyr website [![CRAN status](https://www.r-pkg.org/badges/version/tidyr)](https://cran.r-project.org/package=tidyr) [![R-CMD-check](https://github.com/tidyverse/tidyr/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/tidyverse/tidyr/actions/workflows/R-CMD-check.yaml) [![Codecov test coverage](https://codecov.io/gh/tidyverse/tidyr/branch/main/graph/badge.svg)](https://app.codecov.io/gh/tidyverse/tidyr?branch=main) ## Overview The goal of tidyr is to help you create **tidy data**. Tidy data is data where: 1. Each variable is a column; each column is a variable. 2. Each observation is a row; each row is an observation. 3. Each value is a cell; each cell is a single value. Tidy data describes a standard way of storing data that is used wherever possible throughout the [tidyverse](https://www.tidyverse.org/). If you ensure that your data is tidy, you’ll spend less time fighting with the tools and more time working on your analysis. Learn more about tidy data in `vignette("tidy-data")`. ## Installation ``` r # The easiest way to get tidyr is to install the whole tidyverse: install.packages("tidyverse") # Alternatively, install just tidyr: install.packages("tidyr") # Or the development version from GitHub: # install.packages("pak") pak::pak("tidyverse/tidyr") ``` ## Cheatsheet ## Getting started ``` r library(tidyr) ``` tidyr functions fall into five main categories: - “Pivoting” which converts between long and wide forms. tidyr 1.0.0 introduces `pivot_longer()` and `pivot_wider()`, replacing the older `spread()` and `gather()` functions. See `vignette("pivot")` for more details. - “Rectangling”, which turns deeply nested lists (as from JSON) into tidy tibbles. See `unnest_longer()`, `unnest_wider()`, `hoist()`, and `vignette("rectangle")` for more details. - Nesting converts grouped data to a form where each group becomes a single row containing a nested data frame, and unnesting does the opposite. See `nest()`, `unnest()`, and `vignette("nest")` for more details. - Splitting and combining character columns. Use `separate_wider_delim()`, `separate_wider_position()`, and `separate_wider_regex()` to pull a single character column into multiple columns; use `unite()` to combine multiple columns into a single character column. - Make implicit missing values explicit with `complete()`; make explicit missing values implicit with `drop_na()`; replace missing values with next/previous value with `fill()`, or a known value with `replace_na()`. ## Related work tidyr [supersedes](https://lifecycle.r-lib.org/articles/stages.html#superseded) reshape2 (2010-2014) and reshape (2005-2010). Somewhat counterintuitively, each iteration of the package has done less. tidyr is designed specifically for tidying data, not general reshaping (reshape2), or the general aggregation (reshape). [data.table](https://rdatatable.gitlab.io/data.table) provides high-performance implementations of `melt()` and `dcast()` If you’d like to read more about data reshaping from a CS perspective, I’d recommend the following three papers: - [Wrangler: Interactive visual specification of data transformation scripts](http://vis.stanford.edu/papers/wrangler) - [An interactive framework for data cleaning](https://www2.eecs.berkeley.edu/Pubs/TechRpts/2000/CSD-00-1110.pdf) (Potter’s wheel) - [On efficiently implementing SchemaSQL on a SQL database system](https://www.vldb.org/conf/1999/P45.pdf) To guide your reading, here’s a translation between the terminology used in different places: | tidyr 1.0.0 | pivot longer | pivot wider | |----------------|--------------|-------------| | tidyr \< 1.0.0 | gather | spread | | reshape(2) | melt | cast | | spreadsheets | unpivot | pivot | | databases | fold | unfold | ## Getting help If you encounter a clear bug, please file a minimal reproducible example on [github](https://github.com/tidyverse/tidyr/issues). For questions and other discussion, please use [community.rstudio.com](https://community.rstudio.com/). ------------------------------------------------------------------------ Please note that the tidyr project is released with a [Contributor Code of Conduct](https://tidyr.tidyverse.org/CODE_OF_CONDUCT.html). By contributing to this project, you agree to abide by its terms. tidyr/data/0000755000176200001440000000000014363516001012315 5ustar liggesuserstidyr/data/table4a.rda0000644000176200001440000000036614360256575014344 0ustar liggesusersBZh91AY&SYoKH@@t |@@瞰@ U?QёM CFOS0S 2@4neN\bvIAkv[^ݷVMsӯZsVjL]*ouq+ͮO]=zܝu{;G鋶Ū.G\n{Wmfw^w'cfiݨ[B[thfm˻+*VݻIڕݝ"wnlhwwvv}m{gۻoF>ﻻgyro}zt>]7;vNn;m{}^oO{W{ymZ[inwmފzhvxvs{tk{ki\wjw{חvݚtsvww7r6&;{i/w[ƻ݀:Yβ\nv:\$5Unzz;uuZ MsVqn!YXwk {vaUJv捪ٽVqJԪۇz㷗mV|n}mۯ}zmzwmSN7]ZWrݵM^ٻ׷Nw{S^n^Ƿ{홽zu.{۵oo{Z^{MVJZvt,}G:]ޯwۯwq^GM <]%{!Zo{Ym} 1@.9W8 LL"H!$緰I K,xő&na^ۆ,l!ϓ֥7i5EIpa&~Ͽ՘~l`62uEDJ7rWݶSHqTJ 7-㾄+D#YrQ 7fv:&0I[ zݕWLkQ~hu@En4ҿ쑖[erM%ftlqbE `;M .YfS,T?*M4U(B=xd{O[lQU-5ں Ln4{7t࿯jgZ43-f^Rݘ4iI7myٓݤ6f]屳ܺH-h%v@f=) F;3bibaC@ȕ [uDP|1ѧV4UN,x]3?>Lx% )VX{өsgܕ%\nwV]Z QZЇHH_::hBMVyMo zTCN3ԧ`nS g$@md-, 51y]AT$"GΗ]ij$Uԕhk(3Vsgl9Y0Fwu[Rxlh9fB e7\ˮx gh ܌%@C/۔syK#(µ)RL>pG.*st=OHh)QbΙ/)[soIDŽkX##I.( -`})y2~b+ʒMQH}`Ik3'RHۭ0+`Tr0#!t؏ A9.N-do+Hrű.)L E: fWjtr[k*M͵OG6& ͋SA Q ܒ#4~)4-?yΧG/ P$ah+Qt~rh)ٌ&DI+Vs3Ѵi_l2cҲk*#uC Ԣm(nhgPE3+@D,P{8%ӂXMwmbYlH$R5P88bDѱʓFC ó`-G(¼v5ufXYq ѧȚ^P2gʤ_ iaWKۓT9zNU~Jw>ea-feW%}Ly0 ̀z[C:"\-`" 9DKr$^2a< A%c0]!lDK=q&YROL\T )P'8]t oJNG[*{=j\DXFmU<'bwZ@DxG*=.[陒LvǎS'螴Yܿz[)IhѪy:?t؛Z[iA^Φfl%0U_ :f)d#Y*?Ug'Ε`KZb!jsI 'VqT=["{6#@L$xW8$T6yK|_|D!=ETXAT U_5%6oUSAy`w;VW*d8H!bh=B\EouX۔8"Rv2f⶟@ ،1#j$VUI`QS7AKԡѮѦJ9ke<+\L%GzQ%/TBJl%~TyQOp踨LiZT>Չ&%yKw,X*SƉ JU6 d.d'mߢ|3;.LhGa^aXCwӅ誯=L)¡ @K6WS6+.w"Y(Dqʡ ֬>ӈeH;~@iUQz(S1vD,R+%)UR@N\Z :n"jDAm[M*ZK"$qM1m11tCWNVڇ\n%mA-q[RI>]Kjy $ u%jb 4d.QKZ8Id9yhb)$R*5xe/ C4#1@{ `p\J`'@^?ZtIBp!Z,܊v[18/ ^qzL~t݀h 2\b6` Xv%U"~1RL7m`TZ5fC.U35Y챈"sʌLKIgx>u}:[/楫?ڞht2A-PfmDhBWi7=hoަ}bz剷h;CS;IZT@__#/Vb9 5JN#=e6_sa<'s#D_ aJG1]Y Q D!=C~n!G#wƅF}yjIz`I7٠8pUkÂeyq XL Q1XkKnaZu }9PC ]kd^;Cw=}(IIz o]@4 j2Yܘ8$`$9]f'*TŹPs*;$e8g(4rT^͹b0u3Åe pᄸgw"M,Cϱ\[Y7W9ڿ 4kA]UI}';2Ls.dRe!uW9׍:*pI`0/7Uz]-YvRYeм!Ts{^&iyU;l>ExV]NdIu?%~o\A (蹹T)VqE2N'HRɽybv]_283Wt35WjGZN*ԲԠ֣v{UeC.<&bpU2`8AK#TJM` {nDA9MW;q*\V$2uk0lMrL$Rfr_ѻ9!L5U5 $d5[.^Z'LNjPC}ԃ_fd1,ؾWk#c}^tS^F! &D )hqăD,FDaK2nլ~fJ Q~ᛎs˧#p˶|MM 0;tL Rb[}fXCd Y|k#Rx%2s8l`nX酄򺤔BzF}GluU^j<,<2NKPC{Wtl^CƣoNwuE IY/+)JykH)Poȧ 3nNtIEP!s9nv)c5"vף%A fZSTBҢ0;FTF.\ wL 5ؔm> a1 )pȟ(ck.T*m$`g w3re͈ osu \spYg$먯/#OK&NGsP MԂ' Q:6tL2W9YC+bHbȏM6E@D+&g ΐB]u | ҼЃ̈]و62{ O9S̀̈<Npp&bEU=f[B2LE# v[ (Q &;Y s~YڢL 3Y$\m ÿ$6穊^c%[M]Mjgy(/Ŋo3iKk;S6a]܊Z,[ UH,$@ձՌ+٣(gޗ;!re$B#-=T_B2'cP$7ä 8ay:!!c̕økkMfc2ԛ>WۅYybВ‘kRs>@F13H u͢i_H Ł JhPb+UQ4ʞ g\^^$-Gee0}l];`vZp0'e`fRpqZ*^+ G52Re^ vn M,ADQX )a4/(YBV2 ,Qr :!@ʙ`s-'BqT;FW1h f(vn|:uFIt2ꋲr]TAAfe0[[Tmw4 u Ttf:J D`ld 70s z.,"" mkc8f_W8@W``ϱ. .ٌtx̒cS (F 5f8VU!.LZظ80Ti), |'%X 6l l,9l A@2rQiYXvY5MЬ؂}0tF-Z6F?#spۼ8 ÊL\AE[Q׋ 9A`uj%^ؙ 1Jpn+B(d Xȃ[QT07vF{R=5{b  DBE gU4T@v 53&/QdMCqB$7N$\|Ҽ~ MZsb>,9Wi\.Eɬuqp[8d 9أ>2 S~xlWU#疑'v;׎,{riClZlTQ x볔Kʸ<67w5}1UkVb.utEn V(Kfj᫻V2X\S{U]g%95iCvT莂0@bZ|JREQ.*nq8 9)IW֑f1ʢC,="XŰ/lSYb_cc)B/sX/ڳBEZB6.EZon̆ k Hb|0FC>Z9iPvfAwl8mYb!L,-dn̞ d)ns3/Zz* 5I79CAe]Wo=n1L-t;T0Z#ObXtٲi9,yQ) ^Xf\/0lP2yAo)mewGTqBIbFOI(́V:qkrDA|`Zڍ1CU=̤CQGg9ώ_M5̼u ?%{ 4Y6F|YQa$%;#â>yۅ73'I3S˩3`u0=qu+8D8 |,F!sҩB{ S*JsEB@2S5sw4;Q>wHVR~d@U5cgVM~ܘªl]MmGUWiqלg?wG-⡸Y>4cqJSQ:ȄEGOt2!ڣa;ꖷ|3WR#5Jq@LDY#AP&2`U4; ̠94dbVNtCrإ95ntc䱛eMQ JN5k2A]}v5l x!XBUWjfDpCi~")jF |̣^-c8t'L $$}BȁW 2ɘ NmKAaQ0EZV5¾f^1W &A6;54@fPlYbWlȖ%UeTMܕL"`Jv?n^ B^JIbLjՌº, }_ 99C3} ek$X!K)1'ڹ5C!."kUA "t&H S"<Ōyv*h *;y^#Xd;C,U)UE6^X MvhM2dbPrJˉrPd2JE(PTJ(IbQM@np $%tJnT&Ch6p7\IhAaL m4Mp6D! I90C&"nép4JcT; Rs`Mf%١-}^E/fȼe&_0siJ!q7,+oT8h!;lKԬ\WL6ܭlX[uBe/^VqӋpVU77իJ"ĨS<>, O#fRc0,J6u8OgzB"SMtCr唪I+PCnM ⑅ †<YǏ1$ѓ|T꘩Z]͸ ZQC1o DC {ꋩ7 ϰ_H92lHnq9U)DE*j1 N!jn6Xhɻz66\±DRպ DST6 (lb1ĸc`c-LM44]E:]̉#Ġh 4Cnqa(!+ 萤;lAa ˇ!؀)`NJP6[B Ɓ2ʞP lbB. i MlؒM +Hm$&I I  m(iCLI $&-&`$B!MiIlA&ړĬ P&؛ D H``D4ࡊ6Pb(bl!ccmݵ6&ЛCbl& K 4bhDKH! ЇIlHsh ^4C&҆]0 X jM6@bMlC`M4 0bP(d4%m VP؆2CNM+L)jhC`Ya[Rw "lJC,؉44sYrh,ð +iJBRV[t4Mi6\1NlE bC,5iM$\ eҡa;*ZU4T L,:-2 iE 6ca&`4)v؁YbLU6U`&С[S6 $ĉ&lSi! h`RR`LK"SBP)bB&04!6(bRCL(4b)i%CHQDCm$lRilD imPФĩ`d ȦBp6Jn)dM6tD%C5SAKJM$CE !6slChM` i`A 6"CTI0l 2S$Mi6E! -)TСM1RT5&MbISƤKI)0Sh'(S D؛I6JTƓb1!ci)0$ơmchPm1m h6ia6 C` l`m `M  `6ؒll& bMlI6!hI6ImА6@ mbI T Bg` F# <`* ( dPw= *  al) B.  !z@-TA.BHT0) aaH^!XaB]L$\0 ] .L/[ hPApAP\ H$B. ]! 0H &.0T  + Y 0^! ah-H&A|@R]\ArPY \@\zXaB/a`/B[ P\@@-a  . /AvAP@RBal 0)WiO6~ŋ,X9>"!d !4+Es9MܝY|?y?M!!~v! '=B͙j+PC2Ifdz 2^Qkw3嫛ŻB=^qK7/,U&hu0d+aH2ÒLibӈ?|~fVlYk}N|R0j?G9-^̎Ƣ$``CAUE,Is 9@lsܾjx?#:G7SH`xTIMK]wFaS_odbJ7Q Ra6W߽a @ƛ EīmD2 CZI0\<KmXޙ+WėIAXҭ8%ޓ,[I p*XGk R(뤙amj;esI$nW6R#.BwSyg{6f4l-z-31a?p=gο{wg@=86EvIOcxݠlw[=dƉG4UnvS`8x%'eL+tXvD]z Ut6r) C|('r# An @C/ʚNo Rum4XU9_ Y :}SXym<|2/*#-5׳Y8 B -{H?EkCzT!9쟨Mǯ;q/()E;ngNtZ9ݓV|t ܌@ql}#@9θQ$۞p!+Ǩ #ik:OŢGތNA5(}we]z=7;wy}*z˛9Ke?? Ϟҳ{:F[ Xdߵbz,D#mG2c6Ձ0y=GE9{#7}OG4 hM(Z16r~$! I~3g8`5>5>j/+sqzfra0K3b!o-8ﻎGGe/kTL0If Xi Qp T%%07TvU Y~A@Nf0H!l/@ *`ro5~O'f;[I)92E-dX^3q@}a| iZP/~[ v}?}£~/iH L֍&r~wD]J_(9oYixZ^ޱ3Er\͒ _YڻFy]1^A(`nޱ%g(a3: 0gv+F ]|lOv ]GBvplo{Y-LY0Mk'ȳ _}~VpνZ;@_ֳg^݊nXMog%F|O姐? q\2-0oz(|9жf1REF Seǂ?a7\Rpz=.p>i ~:mlџQ$;Rۺ#$ﷳaj N!Pnr}krRְk4< ANpڞW5%;̨߰Z~~FAaIUlߞGaܼ9hٽuq5k}?sw'٘_Cf*cs[Õ0%^hچJ#v]߶ Gs6uuRQf˪E8B KgGʚqPWon{~|^esY{G)sXLO79E@*@ !R \sۇ}\o YU(C=%Ҏtٞ `# gƯovTfD5n1R!ykX w\ IPp(Qȹ fAYHho`o<$aK'._xn˛yZXRrPm"@!9m+ l޼ڴ7C>I7o#?ВJ0kT]E-.oTolg3ݎ5^h%ۿH n܀T'uw[\CJNHP6*VdE86.Ǔ^~w<-L/nї# \[7ƭ{@RjSᏄ$ڌ$x%J%ӐYq C[ 4Xfq [ֲ?wz=OfTܜ%EM_p6\q p֪{S -áM4_ߞ*Ayb n+y=SHpFe8!ph UQ J1)6!G! fɿ}1u3lFEzdr򨯋<3Cm+0D&Vb؀j5t%͕FU1n߹}Yb;Nsf`s@ w`蹐y(?txc"!'0T2܍ԍ]1!3d%_᧰% b@6ra[%jUJ ^ciCfOU0ps */{sfv጖? bDRV~d(|SK24P O^8%@꾦ږ- Ŵ"[>?N%W}W3H=KM*=UIs{PyGѼو^;p-ƍj5 nP!yCS/z?zlׯˋEB鎸ӄuRCEm<#uf7*~+xARwg o1&MA#`=G! FŢ~WY [ٛæHROɒ%5Xԧɯd?!pZ 4A٪xmL C:A y#RqV 5z܍OU\old[{+oJM:fU]G3e0]QDO*) 1w$j' U;o׵Ft_^Yf-ƚ{;N {^ Fm;[Ȟ0BxMRxMk~%,wRǵAN z J. yhOlY_!*8zixSj6 jńmIS| ǥc\=k\AJi.n|=#\aaF~hl^Uu=g:l(k/IvOhXvsC(E@ݲ5 '2K!H!o,mܿŧyԓSvXrwfguM#IEay i7mǍ! e]c{<ƫil4(q|I>-FU'V WY/pxmnO*pwk CXLBuu)ՐUi!޶X(Nny%N=N!cm+T7^dg@Aeq5hlPIU48>;  s |ܷ?bMp^ XG:[C~k9;jCYpۘc@}̍Ky}URˡS)WXk`:BOV8iֶO6?1 !r:כRu:-gG[Dmd]-d"Wd@R N~%8 ]1HERՇO]Zv5zVy<8tW*JNClؽSk)& ;KlV6? X  Ņh2;0?4+UtG 9'׹!9`k&+/,ovIԩW}Z]P+Q}rs8C*\7J9$^,n<ܶrF됋e+)ă M_hhIXWgGZ`x%tY W?taxnpG\3_n\ TbG}d*yNĵT $PƎ^d/T%y| {ofKY6V]ާĨ/h?-q§ >kyMA{U0J[aK穪o{oW_(;Xݑp;9ez̡-tNڊjZI:_);znOdE;{P/:ݻӊ|]4J>J{KH5HD6noq/^Ì.Z:je  i4q}$v +QdJ8>u58Ҟj)'S9~cߗ˖Pk=jhXO֋uWݨVvQBW#Ռ݇"9kH}JW*B]^e0 wia3Auѫj8,]#֤NTu/\$mlp5ѶdFkԵB@prǒ"-~-ܑPxVN6p"l)]iqJzRPj>6Ц nCL-0Xk1=.l}4X.B1ъ(p[c{vzXylU=lx5"[O@HN]d3$3WO^gu*Et)r>q,KFR Y׎eDX|N 8?%(fH+ 5$Gڡsscr~4)RI7ɶ89|0,h%xݾt3d"[=VRixs;ة4mDͲ2by0z]AxAڲ/o0/?{aٙ%j?k8ƴgc@u ~3(/}T{ }2DYcgqeX-\>D=SPCNso6`glH jEiTbv>I#cZF|*O`UNw,Ҕ >^@qrN{1:V f!P7>mU|n?wr,UOjsoip8Qb}aSwh8^9RU+a]MEk*\X-*utÙ.F =9vg[3):by`O(TKakz!s] :-DޮCFS"6oѩAg`go3Nir1ԥ|7gR\(َ̹Q9?zQ>:kGuWVެm My>޲iheH]c,-t F 7|CYfhqS"G @4B߿:J%܉7 *;kἄbϓ 0Ǿ zR;7ٻ9ɋl-_, ʹ()FCJ``ξ1fcazNZ0̨yvl1B8*^t?Gw/K~ &IdGʭA\ NEigYE&8Μ׾ѭ{%vQgx,4zAwx~Qݐ+IדNsNpz]- K X0ov#_Π p~N0J;% %iY!Z/C+泷'2)Q`=̻5Xţ4j*%OFϥ-,F+a ?j1 )Gt_h.3oktYӰ0󾯩FXAA 6oZcV0\ KO&mxTddXitJ)u80` D"GN ם7^g? `86s "5Vh"; ! ,#A9~O =O \zk\dl=x6 ooc Iu=v ,DEdJt8[{d`tcͽJl˪TR / 5` fk u{/;!ճ%IǍ8Ps<nOEA|SJ0sM!-b +~=W+uL4&+ǩ!T4Uc5a~1$zE(X ]123ѷմ [C$J!%|=42[{Q]R ܧLr-R7!=`sСbQv sR05IG"QܞVcTdGuO[wrG*D A7N3鎩7CVʧ}wI32`IZ-stV( P T6ݚbˆ31ٟ1&=|&S@/`9jO)@ RXJD*7Di"O%lnkʰ!-f?tzC oRa NiI!_5t6}Qm}D- .` *ѤufTYx3ֹ,_W tv 3?$6f 3?yABd*b!h5_ƕ{vF(kCxfo疠AgdUĥyaԾi @,!HJ2GUG \9aYA$ },\oga0CT.W_S̍4YyXYLTp~-fˀ@O<ӌ @-eGf HT&C+ N\C4jiL$9^mdyJ &ѫ$=b"|abo3dl#xf D!-VSHϷvgUy' ;0瀁 # $p-ܪossUXR gdFO֢j DWXm CԴpQh'եr{qR!ܱq4/׮;wc8y>CtP3ķr[WeB{Ңi5wE.*A3m`fAϾ*Ϲ,'nH)vf:xw)<MKmn_Z+ 7{:8vس3Tup}9|Sڏy)ЋEW4yS̓]%*IUBw}BV;J Rl3Zʐq([G(<+1AӠqx a]"!ć $ku.Vd_4QЀ<È,/T({ȧ08ʺ2m2*5@T\cdLч[$~ڏGwSZ4؆?2oLϳM0<;"9k/V Wsll˥NU*mYf;.P~UB=o6ai)^ EGLorAx&ߤ{s#|'1cbvګmue/2L+ n=V*K^Wve/ ݎ.o.ZPu2:*gx}u8p_;T4㘪 K/?0M7ȶt1JԵ&Zb{1Y-ʫ`WU*&o v}GI8*I|/ +bݩVAjtr ԛaNkԭJD5y|E8#.`@9r%vU o;LH$ J;Y40ZӂgEA924Rf b=jM\Fn[N?ܗN{ TqG!t# ^a UM+I|/4 wӝV5 Ɖy˸ז@ sl0@80Ic 4 jqfUjunoΔBjshsIv_5R |V$MU8nk43!T5J !|: zmʽKQ0EPu=S4yx99wK] EۺWm^',0[j!]A@=0BpDSǡI>k~#+DDGCqqYϖn</^DpUQܶ ׾]f)#h1e_yvɭ Da!!B-Fze)_7['QMDN&nȨKF ,D8O}]*CFo-+qP8fT=#x/ zK{olGY+uTKNz1N'hSƞ< OUXkXҕ'` Q(IT/:c '׫z3pO}_4H]~&u'GDN=]o!CizLZw&kTewu$OCe~Zƾ"ZOH={lӅWw յ5xB!;s.WTo܎9X*VKP3Dކޗ((-ї_H WŇxo5 J MeokgF;e^ge-ɿ.5oګpv(Z9M,5\s%Kh~`D)PaN// ;Py(0oUw(5bU㻦⅜|ppS瑦([Xi [S$ "_퇴|~G$M[&qMhҩIR] 8$$OEO45GBf96Ս8t,z#\CD2i|AVUe"$=\.trt iߎkѭvu&D"NSoJ%ОRy5'R@5E yACn[c2>|⥬%-$x*lvP,x&KLZ#${J4>鍤b.t^{-5q{M|92NfcH\MFÆйL;w:PuG11;agCV.(׫Y↩ݨFM%c-`ƺ-3Mi^L-9Ai?S,G*"~l.myKM)-W `_Z QM} 7p^̿TrxyKL̪Gp}ҒCw#;ns O&̧c8m3K%[HK~MVB}.Qڸe0G_(# 9wm%0`ǀPyyAoiƦ;Tw"*EA%|sw%\#\ǨPG0T}CM#MVX_+B3Z3%"yR/P ]z<Am/=wW#JUÙA8QQ$ƥڌȡIO2{D?gGk#gÓ#j$ ]Tk Q9Ie,/ =h yUVcH_.*N/xJI>=d{+2UG?K{zX1 f+T[Z[X/(M"KL"[-rҊj@AbA.;N2N0a0rGRK®Umho%]cՂ!ڗ).j /MCUG}sÜԐG|VWw9>\z;UE<>D:q`j2L$XKJkdd3iv׋(zzbT8gW݁xLYJqe^}a9Y0S]f.C3/w Փjn|+mqemLPM (y*^ZF–Poƍ  R51H?**+m}+P+YKTp_Pfw@ꎼEZd\%Ěn8ξπUnv?2}W c&nFk.ҰZBS+VsC:d}<p&jUpTɑ@}tPfĩ!\})vi7 $8ORPy\J *2' xꯀZvoZ;jn?* ^ZIDH사Ip?wHkyUN!O^YY,<,wtf8U7Tmg6 Ϳ`Slxms_K7Dv\VHコ@pK8yVTxܬʵKLUQu\|Hj(]Wr ί^?kQ$ L3$k /^<4Ճ (p塿՚bBqX6qpv:ƈoT7dEOğ1GD.ڻcUg=7TNf}" RISb.ݥ@ym?c4+[/1T&df7gb%Gxr*JJvtţaWhdt2͐#ZN)u2~1'R@”([ ̵m|6ix_;1@ȫDRpoG >zT97=cdVQ }a_ 2\[Ozބ; .;kF'"ő3(&kb5&a ğU~U-_'{op42Q&Xk.i_kWIe_y:רe6m&뱶CuPʚBŕ a$nƸ&pT_ZxlDqĆ?a`^ (N%7p"I(bwezqd>Ԧy^輌?dقG^7-gnE2kv"ȍ!B)$iCv+ P#XX7C@GِПO-fkj&i}W m0JF-º>I.5hSW6vfm||癶?q0iNXK)U{_SZ$~ O-<5D&Kz m~I $v(@3(Gʸ#I]0&Yd"oD;EW2_^肁l@UV%_pzJJ򑼟/>q(af(R's?2Lvf Y: 5\1n0]9cwJm =T{"vZϠtK鸌ƓsT, ᘬϣb>[%ge%Dk-Xz<͘CtNt;t-"E̿*m3Kϼ L8>5mX xzڋ7( V >ٟ]egL6?L ]4 J9v]^BR^r0k$7.vo<]⚏]a`PL>pIRTG3 Qn(f >YWI,E M { 8~*-tÛTp(%fH Qj=+<4@!c W%t(L4(&iG>_MiGB9^ݝ ŘVe3JsOYh;>%Us;+iWP:ۯ}܎Qj@x=/3u xMDwf]e)vIuSDDe9lW%I\>Ikw%@8/ir_@%#_@C;s&RN#Ivi\WX-!@|=-*݈XFMvRCvQiVDRQ/g. ob? nrgRFE,>hlD uGu*c&}35rWM`,l  skgC;)[EWYn#GUYqZUyey^D/L8 x#mH[9S)gI}邹;bZ|g ݕ)Vܸ YKB̰̊v_45n'>Sߨ)bJi >WH oฎEDPI Y>M]zŶ?Z*Ey5Khp8+M <iBDGAOlv*|vuw}A0_m pIZ3 Ӫ@jxPc6 <f^  eI= 9qvu۞N2˻rKG_XF 1~vn,ݍH_yQ+ږmr!O!tO*:Z) }c,=I+ 7Q|*/[0\[`si*ItUw<%/qKz||&-*OT7v츧/) zTp`+hl\KFyz0t({7+;JR3h|egF)ԃNLZǥgZSf!=ko%@nH#]b@=oUyC-hł 3EL}ycL&;v7Ih0rB"ef6#~%& HUeRr*R U[N[S&sIsNÖ2Zh}Yu `tu:AbFnçf+[{?QC"C;e%Jig}Ö́[ giwP K<^3Gv`%[F@kGb i\^E'-b1WwWs 'XE{lXdso󑯝1{I+ %xL+": p.'ek?_M5IˣK4*kHz3Pv-:i(ܙYtݭT߄ɧ~rqsW\~Rc j>{7%an XhH>1>ONa:aZ|^˱mp0zr7zjဃ\q˱g}"x91]}looڜs:qoKqZފ,?o$jY*nծ 'SβἡlQXX7cը:Z wC/̹ؠp:=-8|P}2aO#apdH>Kw褈ZNHHqH i q(CeZ{y撪=A*Ȯjf Bb{+tN+ў6^R;D(-d: .}s|3V5;<@/WdmgǭEr2WtO= |ԯ$ř|ا[zX]Q:P&r.Z2?wNeƭk W'%3k\|טzP:tB͸bCOqV󼗶\/W3I0eZ!ZM0j>$)ib!`69V4B"1% Hu#~%6w$6DBHi!6!؄uL!66" sJLC bJLHBM.ZA`Id4!"a%&xiC= tZ^U=`\ }TߑdD@&o!fd]_P˼g]A|s 5]]/I|bІҜal|䐿 q fC\~P>{ ἶQljhC4o{z6' ;CJW9Fm UkOR,v3c<ȍ\a_dcޅ@ sGH>Rް0 @{y,l7 ?m/?)j~\i],~`*va,A$6uu?J?ž8F7bq$kۡ"Lg6lj~4/ˉņ㋪ 6;5G.xt%Wk) ߤwfG`^HB0~wUmOYl6nͻ"u~L+2B } m6j_/c .mu}e]W#gqUU~_R r^ҥt{1w|B͋fOYvߨ/nXUuʕC1Ow9Gʒteڷ&"kfXx ~BNyKe$YؿO6M^ |x7x]):F&R>=#Z]*ı|ʁi$ۆHW!;}eLnVsodVmfaРE{.(_/0&0>.vr4)8K9roo%@+b"\рW fcä<\ECy>mtز2 M;_X}sm:vw;z=ar]F ԩQHc&h,Xr(/ٗ}|xk!&gbLt2-@ݻK5mYLU5GSO6!/QS_<^zi4 P(PVPya@}p=p+ fB```k(iU4qM~,jFo7ae뷫>ώ"Uh_ɔ=كj_ERZP{s=5EȂ?T-/#nRkYwhw+Q<\1mǛX'3=TdL/L]KVqpȍiqXpl?kR3+>j3ٵ{QG=Mi-WGxry^޵~$D6'NR럡"W?Il/%>Jëz*;[f KdZTFA/K^ή]dJ>*ARDŒˎ" ԿyWc=]M+j Q…&Kǃ*Q ljlG{eZ_ک{aݞKqhdEOSeRw+6d^MM&2zz+4[߼lسͅpDHs '^ʡX5l;ghGkW5Qm9-["î5z]\.i% dG߯?I鋝}frgg-yn)kNݬ?%|o 4o>W=o9'R:,ڭ q 'Y~ QɘK{[;%f-i@;ҕ\F}ۧퟪ0|ݯҁg#Db|S!.4RmVEZ;)0z:-[Yg|mg]&:*іSG})G[\rSyx-pSuhB蠳&Q2?{SJ{wV%9/5zLwGm&G9oJ_uXlŘ.YS,o<.e5J،?1Q<.,WM+Trh%"g[) m~̴qGY6=A,|FDE6`H^\vnd+gugMœZw\ gӑI)!. g)L?|nR~*=!Poot^׽o穸xiutO;Oo_7{FTloSV#3JQb9&|vdLvR|>G'eEmWahm2~/^z* 7ݙOGNYMvѝc;Bz {Մ-zxĖFv&ha_smi- HXaO3F͚{\E.PcB[e)Trb)wGJ^&/<ˬYs4Q{tz ҟe(pǁ&NYw[FN14>TƨgAz4Y)eWvR5XxlUa_VCEdzzԦ)Hz _ɞھNMGY3pDd)Tᇝ1f]EW@Lѵ8-0'qkDCWIfI#Q0 JvGy'Ma+,U2Cy2^7dG`g&F[-NQOwCq2էMg!ҤӼ5H_Um'aڋSl}_ ܪ&7fZP:nl5IW%,c|Œ2nyf!kZ(rVoC2'{ MJ׭4Ȇ#-:Ugm\ۥ^VME|U}qK F+z>l 27<ğ]T*b-sIyY:6Z)tD3Z6b"+葼j%,~oVm;@ TW(毇팑C-X/O1a\AErT`l_VTK{Ap* ftחQͶ/*8ɿ5ׄ70#YdwvEjJ*;WtntM^ *c-G,Q1P&B[)KBYsTs5spFq2%:Q$\ݬYxFUGUTѪVPRMEңou#G,DZc! Sߒ\E - v.QԮۄ ^huYh8]XJ2Fܸ.ވw'uw_J)2匘MXxqzZ~~uٛC\$:zauzk?_K^ JhZF'O⚿əz'tݫO2~Vג1_gcAN~b0mtK*q F̤a<ǗU(F7/W GUo8`ٿ|Y\_Ի2 \ަflfBj;+VFzXk*KȨ~[~k}0AD ZE 6*87!SޱEVBe_(:B+kX 7h Z@0O_U?WE8M[3ﵐ gm{+ȸ3%9>ZHu>DOʙ>@2=ujNR)|(\wڀNshcwmFZY :6;)R)zQ ϬR U^۸oE NCS)`V96׿Mdl;SJ_P3J],4KaI+F^{Ӽn%K-GRp=ކ3&X|kb_LQN\{ʥ3k#(dV&l\{EvY4N ?fW\T&ZDKH5]JvaEC6zbU j+]jsgFu{vP,xMBaN1Px=Uu>}W9PLG筣i9#@`H}V ڭߴ3#%av2B%v^x=Y&7sP 5M },[b^O2K݆O:ҜNe޽sݯDƷLی%Nek3,:V+T17m<-aUFl}ߵ= [D'v&ԋa/&k 9T.:׾h~]g)[Io։]`%zkRlzd܏/oB9`qA{F|{[l$ Khbo̐M:P򪣷|)9` %P>'F7y ktx }yP%'2RTpa^+2wh)?I.3*g+S;o֯;$Suв boZWsb@vp۟oJ~Y+bYoh8[E!:ɚ6]F G8 x<6YҊT*2N+9v\y=єYu4%( ?a_zGJ@و`[Aclsh򌯓Iy{jaHi$uQd^e#,{#Q7i,U9fԪ ">~ OM;養ѯ3j%6ЉNmǑ{J5U$?lxtRޣWgk~B(wo2S3{|WsKSjTػyZ2 d97zoʞ.7>c8u2ZvohR뺵%)F9s|xW@NePH(%XzxZu~[ k3JH !mhIBjR΄nEZTP!;\h@X׸j>& ns&g4]~I e\ K.VJ1U%5,^*hT ffݕ\ <3(1sy.qNTXx ); ,1>7+|#Dٻ'?L@HFz/Iv/IA7R@.O+ZN:иdXْVOg{z#V%l|I jTݙ%W`K~̉8Βs+1Nw+2WjfUqW*kޟzy꤯\}Yn#}= ;ys/q s"߱fZ-L_ TA[ߥWa37ϡlo;ʸ[_ _}U;b7tV)rmybK7k顩rvI4P F(8wS k0nmOTrZ/VFAgzn~|#B]JO?v y Gmon Pj/C]Sū4IY)dsb^feq?'P{xMORTC;gBGNmIqO)P"za4=vf:ڭGPsYR['%ߪEw֏3zr\W>GK$e_Qb\gwGkt5DrZdEsJ{/s-ۉ*KVJ[YgF&rn\liنu7@XX_ [d%cJ]Zzq.@ʍxH49iwv骧Wp}J%öry' ӟ;8}B鳹G\B$)|gY"OțmrI>6_LXp-|nBU's`C?l[`3E_| ~Z=cekRhR^6\!AqF+^]E&C׾~vHcKaU.l/o; o̱X +noҖ٠un/"QY nܱ nlnA7sy,5Kκny \ү^.riwu;Ҍ<.EJ=[! w `s0U=-U0x !yF*r/z79Դ;8UʶY[oe*Qֶ#F#a&G~*_GI|e>|!hmT9rntW|Sݲ: ĵ g:ăLOM,R )Ǣv`րA wWR6} e S_~͍$ACi7zñ]JYrF7`1GFweFeݟݶNïw7U~OWYWUPǏ5}erq{0E\i)f5XD"-xKTۧy(h)ظ$I9׍;n-,$vM}6ce~E|7 TmTT$B?¸ϫ ͍zx5H)VizUOyM:.AE<qy+:3nUS^ tkm}Eufr/HRW^.h,yo@gڦ.,z$ZFڂ 6jL; yEUp#Ѵ-^HqJ4P+ Q9ܣ"Ĝo ҫܳ.⍄"7/^÷!Xoإx1}`*y'iwb;LnK;$3AE"Y~øĸD4oC ЎB}%&ze;AgѷF\KT8t24GQ԰4:^pW}] n[B[}t64-woqΟKh(fhq7AF+jҶy"htvieʥarl;Ct<zdJg.sό3謨aȗY ӋXü3[%Y@GT0?̰Z€3K`{D0= "z¹q X7Mke 65To0l"ϿWag|4! soWɪ 5ў&mC) 1 ҎwMe8ɞNyxxZ1|*VDžd.%b$n4Yԏm9ٌƚo5DZ[jc<Ay:AҵJe(Ҿ 5-&ML%4Uys`4*,~ ~̣^6 g:QǢ\`4 ^N_~&uĪ 4O@§i)h@i-"{Ry=u@zF]7더cGa {=012̏6T5"'CAc$s" ~*2Ak7&h:8n2^[ Kt̜DVgʮ/L DuiU`ܧ Zj*Uk4fA k(칂/*o?:Z`pvªxh?6ڞU[8FeCI}9BHV**HL׈7VeSZAhe,eJp)[%A\ `T lpaڀMH P*`Ò j >FۨRBOk=+j!nQ܍)4=pn63J&aS^ˋ= 5Y־R+ڧe/^AK#P5 1:B:)~>>ʗ]Ky>Fk ~N"dDAwRgFd6.b%DޜQ7$`ȴ[}d@ 754jԮ}x*iUvDZBr bweŒUeI ہ5)>7Pzn,_gCä`S@qWY;ؠ70auL&2Rϡ*Oqq>UHvI+'B潐d"\|hž|7Zr9+'}YQhC~H0a̅u:p }0_{}o:ö o}+wúVͷj(\D :4l45;,t1#G[D⨲ vƢD8+M&#r:.d)j}v[y_ V%ݐFMff*ʥCܐ&zA$Nrf5KtZP?y+; ,BAr ^rfgqrSYC xzB.1\ZN&wYw8i4wGwS ;kD,c^`܀邘n@ Y nP ^A}R7{vQf?{-}RdvӦq..>MH{pw+*E I2vx%Nox:kxE6$k-wA4!>Krr>]~&5V s9@q'~T& ~1g$ [fE,~E%GDFu"}D1327K a)/6jS\I:uFK1lyxrϋ$H.D(Y[9G8_5~'"?b*k!@-  8;ނ-{)rVgob8n7sHa$ >2ts0Z#n <]g9" E,-$4wD3^qZ8/\^`hwlJaM; g A| I% b_a ߛx|VϜD]tfFaYeH ) #!)w?ǧ2G贐H_Bzj1!V=l*6<4cNC/v-K<)\3T3}%) `쒼Yj<|N;nB E f6\8"gVP'yTbڍԪ+O|͔_NvNj`J&4n17[n"]qE p"z/?tP |-Ӳ27(tYu(/ owqFuvmº]B\b_lȦiNU!HŜL/곝G }λ)baRv;Y -v?x<%K7!INsMkV,Y30x,dmtd^XI@c8rٕ_7h2q =H& s֘ۊ8v@a$v*.0;SsY9(h3uaIoOFΣ7=<(T0hs7 FiO+.Eg𕯆Cl:<Ѧml;=@; ?HYW7(TO#Gc,4U:P|@:Cps n^_PgG@)]9/W CЏVk~Nh3fGW4bǒ3VW .8jchJ&6<\roF̡b''oj;!!RS'@ǡ5mEݍYc/5Ok (.у151qh'/l\Ѱ˰7y O)$\dOUV , {{aTmrݰl(nDu';(10<Ԑ, 3]iM5ep9!beY"pHpe;TB́rG뛽\Zhfߌ{GSMt|("QZߍOG'd%G!`q6ikNئadvoxد ݯhDuVZCbCXEwB4|f ;]4n* ]<_ШΪtXema 87OeT b_BPp Cc'O}ڒQo :$bpԓi^=&)H*Av: /}%gV2Z=;w[|PK= #J?㰈!\-{oQ\G3APa>V|\qj(d1i>w|>=}}'q n?lXc*Q-Rs@40sa AHma?G==D  ="KfNvW/i/޷ bW0fAkX%]~ܐL+ pQPxOF"$[PlVc= 'gr.]rUZR}F@sSl˹U juysFjdGa3%ı7W*z*?<۟weVB_~ L(8A ).mh ]"RȖrg0 .4XjM}j00Xc0{/(c8|>г,N/݁h8NPp|K$=?"اAd RBfO;o; Fn!tV`?_8x}߰vpz >8C @phBS@4y9H$B}K-(V1Tкa}4y!R /C>3h% 8!M. /9ͲRP 4 p2{4zmwCʰz\S1s0vm:A?u1cv@$'c6I/G_=+>C Å naPk`8䂀Ó !0h9 Ӯ~?َ\w}>}Ypt;?>tL>?{aT7c\m\M :RZ( S^IU2Q %IԮ}/?]?fVdІT+;-cB( : W! A' @!sn-h[*׫+fQg0恠0V o me:~=1pqTWXܳvߛ+86R23|iЗe>ȋFHɡm7qv'[~,N+6q =|+&:js`Pk>BȨ^R²av—߾zYXdQf's#sn",n[6|6!ה@XlA@H;]@jxoÿ9kIlz~,8n!i,jq~Y<O ֟V,#MBb.,,_Z~c`6ޘpnu*jK@ R-IˆKyyV^ [R*;6W'spNEgB8# j"m8I nHdm4UAjq_LD[8^m!/Y몽$aݟpY4>TK.3D)=vXSNVIVXLHQܥNSM@!"@/?h~{Pz+gH٫xd\ki默;'{.~*ޙyƟU(6"@ B`}^Gliy6RȶZY]nsg#臸.^){z~c#'Vדn]Bq3~b龇+CWJj^YE;g0 ·B+x qFub~A2_pBU,3NRAޑs𾩼ZAiaba;M"'@UӍ΂vwp"[m/5)Mk99 AM[{(^irSټNw=d~ޗ ;0?xhC4 $ Y =aaϙσ.;Eh 0ږ"X|BMLAgt -cB3)+UΣam. ʴ6#o;8>ƞKmu^cCh BpSV0zgA @M5H5&u@k4Pvf;ؤ1afu?~-_{$D)E+UԂC~9A X`*(K^_\ ߭~,cO:e3pۦ`xFgngcFo¢{v E"-7|}oq&mrKQ  x%I$xddܠ4@j91=O+o/B2{hvgAuuP:]T Tx L7~ aa`EKS }fAq}cⰐ' _%PC.l7B0hO3B ? 븜 iGxcRfsr0brSx</nčSC%˛.R3N}!I}(JhUEƜ u Ҧ OMsФ.+ vӇv }~-|N~\J߇~F;!D-VpL :eA*c>~ShqOT{7R2"V"wujI@P&->#ڢvˍELt:c]aaw yWFvY$#["+$=y(g\ˁna /5鄃i0szl:q@ sj ??t=;.f[v Ao6~~WIu/kf7-.\9 t(ya˂²hLLd81rqA7X;.s ~빒 &vp1~GHLq ^"~Tv+1f w)t`AH- =+7ǃ6wVN5%V! gaKE:˲kǦOO_cx2}R{_[{st`ф̀53:܈4a(M/*5φIldEB@F$ 08&%q 812' tOix |V_~Dm]x\d l6p-  τL_ {`7+mf}O3cn0 6`ܟ1O%Nop0&@% j%,4l|Nfp]v߇][IFi @9gXAu`g,kҭXZ(5ڷ`KPLx0:%w$-U_k 3D.zrкհ::|{15ݻاs~sXhAY p <hdh OD$k|z RD@E9xsOιΏr` ~=B >#SUUg`vvm3R+ " 瞈.^.O&ߒǰtֽ I| <ХtW1A0};9~"V/Ў!W??E` ^fqpKܔvqDcUPo4nF'Q0~5_# }^+8;ЂoeeA3 :jE}D6\4$t_}g"~5y Vߧśr,Q`}J \OX-@l |b7G }hDPcc &x8 ~A,6f3?H;_@@-KRBDz]\?n_[0/Qj8?HC6JF'C@v."+HoNK4\Z <˳.F0|*wtBX*eix %Er]qcz(q")b7h&iI1~Ե&D>.˗n i,g GIZi<<@0(ÈKV,hGCgz볆<$Yw_hYH5D N^Bc}8qIyk_>iԴsWf[uY0(Cg$2gri ]'Ǥdc၉AB9D@>{<[vDRp(( #Etw?掀}%ߛ3%"eNsdU.ap7D5 h'Wq11C9dȵj w$A2j*vȽr;A Y>g9K?2DܭBh• k)2rj'@ҊN]= 6;6w9F@** e +=#CU%5Ͷ# lw*i;[~ѝi8]K&,x%_a344D^Gȫ^`w`Rڸ`@, lCkʠ"tm56S)RUh58>͓Yx'%s,9^T0I bKNdv>b;4ߟdx X؈"NE7f–KPT дB v# $fdWTk֌|F+'B a7tsl)Q 3T B3 \9j1M,UBkY MZ+:!? ȼ] pi3g+p>}>9Tx9Yڱ[zt[xn$IIw i`seAPK0*Up %kPx8'ffaLD ,2e7VyeAZW@8*$lc섐xcDIp8#9q%j?E)~_Y~Oz~?s0cOk}1o13aupQP3֙ד?+>ur[Xbv)0X]S,x3/5)AZwJ5mb*&L|ju?*JItA۳ܩ!ވfV#n4ф'pbDV +d)]7m_9 !óƖ3N<-#v~rqDl!pHbl4[* CM,ICm>b<:&b55b OvS1ޛ+c&.oSeCҔ4WKڥ.suf0l!ȹrjTuA^ ô~ 16c {siN B, ^Չȿ*M5^CT*S/+QG̏Z\erv~\bIsE>z|\CCӳIT\IC{mVZ /Y_bI'q3XL.vϕH#Ջ_Ą_QF=.!kS-%9΢ 24mP0]rfHC>[Pj~{>ONJ)sdzZG"q@4|8=o[yC@bTX~DPBKax4FyUt'3&Ii|PF82+@4[Gzc"X>RЪ؀VߚyILD(N&>aqr3gf:)h1>T~[jSV=.}DnvS\'{Kh.R}DVo_)=<,lV$x%x?v'rcE 8L~ِ/۶yω\Q? (u 8^Tu|$!v|vT]O?=f9n?SKV6ެ;Nx.;ˆ u)NR4JȚ_ƞsP$Kg3b prYz(HBvMO@5><0 [cp:὜"z(3B.IP鱌 ?+P+i7W l/8DzA8s\vX/SWnesXĎ ɽ05yY}MO00_NANq/0N Gnۯk̖ƞTcOq}\ nUߧ5%f& +飋= YE. V o45|pVIʪSH@6$$ASf~.<0v`.Oofo7Fl&Yl,.G1g\BYP_l(i)|.ԋ[F ~dp*ݚE2`HFDesF'0ł Lh]QA\f0J0-hxc*CVxeKF9;o ]rڑ,C1ڈhf  o3y hgzf}_KRA+%` bsG-YJ[ ]`#gHPh'cO2P nZ6*t7ҟ\Dw:ZIYL]B6P` z{~b`lot;xuaZ Jla$ޠ3%p9" ]5.TB'0XWF oio[VTuaߏ 6-(\o^ |܀Z3_b TOXٰ(-kVFx[5R`-B/]N$qp1[ >os^qIF[Y/Ź(ٝKSR"IE9O[%swIíq }`g8휭tO%MCX̬4!݂᤾em񥙛bFcqkfH~Z LuY#>ǘ9Eݼi,B d.K'U\(bqz'^F 4($%t* }ta!d,R*T(P=ˌwHc^M6p=]6r}hn(} xm ` 9@!G13b;<Mq&,S<(:/v?\ªZÚp+G(x,%/}!(gK-$Ì15(%P~fMaUV[ }t`0$םV+5Es~%lp8Nie7&3x@iQ<< mVwf ޫW.W>Xyl⦉${/K4Ƨ3 by9)~ $D%㪁NmocW;v<Ϲ\vd^nף1x5NBՃ8Hv_rn@9@rh籠jO'@7<@KaRE4p jS</Ƥ6=@t\˜{B Kx/NHf'["rRwV{Maȝ×CJe6h%EuEN_̴emp>`M|*D;.P % bS&ˍ_{ofyC9L2iN dyu|뎧{gaL b &AXϩȾ4{/:D^j 2u&Zlh{1u ;mc[mZB[<է)tJCe>'퍺T`k` Fmu&. h੢uǫA|Itm `c~ATav\C FURl! zX(W*xxmbwjo?JmMU]RD~T>kkP7{gNoOԶfoJ3p)nioATk {l"j,V,U 2޻cHjS@b_d],^Bp'4O-*./ϓD8Ds3LO ? b=[QeD #.pdkAynm#+9ǻF-xWN4*q L~3^XoNcx@`fwx A9kےWu&^h{PDtSּ[%*3mkfֺ@"@߶3Îl8qBnnŲ6S=1P 9a(oB䌾5l՗ tb]83cfk9wQچ^2Wsl\UHvo  $\!@V9ǽ |l{jki?=G'01+C,4@B߀\6b CTj0SJ׺&4FS:Aٮg? k̨ A vxL.Qe_O;҂S:' դ)? (v.p޵:4V?0F#^GS]O+3^HB+:s+j}G] h$l~FNSI ЍNds7cNǸ+lre5e`39y-KsbnKM7`DpAĵq85؜ 'X(֣jMU2fE?,ߛg߷= %SO2,P_FiUqo0oA-|cS,gzt*h|#ϘÑ?#w4O[\"snq1fA6YDpBf脦}~K%W/YglŔ^%\Ýֵ9VLMtFo]u5YG &zò{i IJ~UgB( CĔY?J7ݨg?9k^j-t a.*Iލv5`uWx=yt|ɒn3 OvG< %ǑP=x)EI'2I+7o.m1_!0wob1? )?oK~Ht?^(V"ZٟT o|ICp( Vу&`lۚP3r5y8 _!iIBnw|HҥP )i,A+n!V<$[Z,[ZoIJ av~qyAW>I?OJHpq7Lf%!͎Hj3>$+~ g ܋f5G(\1+l{BFT7:Xjݕ&x9lh|Gig0 e:'}[Η2q?z+h#Hj܆8#ن_> *R@WaΒLo9~\" 0,ww#`$xJw+FfoFا9;ݏO!(-xkN3|ӟn-S8=%J?$qӈH/@@cR?1eMionjȶ5cihc(!a *}ǚ2\|ALw}3SB[N)P)BF ]F>jMXCùrBG&,龁PAg%>h`:߯9M-xB \ vS La2t;tbA߯`TXM'!ɳUnpw+<-BL'y$ӘB8?\Y^]B.kb-.n[4OXTfm&޺ez,3 vP~,qm70/'ci+ B\@~Ñ卨u 4o#+3U^IqT_a! l} O5Ӽ(i1;c/WWT"}oݳkQfo=!n8x &~ yR7 fA rʆSku~]wЕ&$.s=%*+,Ne'M;{'^dXza2fUOZ' 6 nA]40Iͬuu (%N=A|YQ''uk5NNJ6"LxT>x;;smb2K62yzD9M>qb9@پ|64G!f_aN$%i|:XwLk"#Z4tk6Fǥ2Ap`PTC!!-54cI`P/YLqZκ2s#,B%#YS17O{F F8,P s O;rDs \20ǮkpC6ˠަ!}y]+<0,Ƿ`;,黃 C 4MY zr ]Cux,L)jNxI#/XF?WVA61A3K@~!p^7kue 1aD 6'4gT3_ W[pw72m`rUCX|y JE^g6ZfH5;nw@qMo1r?ci;㨝TEGBu؄$* >&U1,ql]Mٔ ̗Ь()~wT{P M}*)u $Rl(MM(؉‰B0i%5Z6s4p+⬫+2k$Њ:HLecO&o I5v*?@UY:*b@+:-79uIe%,WjK|q¤GK~=kFDkTYh;}W&{UwE_'e$P0y}d  ˓{(In@*Bs"-hx1dmjp^d3&h燂j(ϠrNAǨ.UQ |4~ WK.bӧ5/vgJߐ9uje _w?򠀱aBTT`fҢ7F0[MB؟zڒ۳px BED;A?J&Ur00;+ZlzGrD>䴨O:3{]xߏ7\ުW5_.Sy;&u]RƯ_k 䒢*^(BA'Sd:#jA)f*0#wݢ{~'t,JҚ|7>\֓nBh'*6J)K5:2-=.uDk'啋@8zˏ;5Қem1/8L[(0MͯaSNYBAрVݞ|o?40p\0^ג{;K$nNG̶<(נ,ÅlJ|TOzVQ[\m2.̰wx8RyD9*D';s0.Łm6,G?t ubKH@o 83 z")5n>W7cw-Γ',-<0%M =Y^Dǖc Dޣm(&*(b_|FLA[@6(4[K@MH6 ܞDOzݺ?N&l'0\7hfD4l?x1':^ɱmicOiO7G=8Kgj~1/l =?#wx ׎~=my(;JVg͟'OZ훣L9h2[6% Cᬹ !vFF_m Fve'E~TbK(Q'3 n`H0՗M1K@:av_Y:4Bxqpm>^lla˩9l*= =[4e-dIͬ'sa:_jgpm-œor1TRf7?Uj"Be TOxEz_ O~!gWvɵjd0E"G}lŎXh﬒orzt9cn%(>?J3\3$'zם#={BMivrzUŬ}QDI#zV,h@V9WBġI`1Sk/j@B"}c{>RqdC 2}b ,qK0B[fQt[Q9\YC=VPQPaLrܻifʼ~{+uxyN~=6VU?ߨ3Ӥ+ݩ?HɿM9b/<̀]^Ps a0{g_ӝs* x3_d)Y ʨ7E&K&Y. T9 \Ykd ?e3'Y&^l̑Ӟޘ>e_.ԞĂ8GC?ยMϔ6-ΝEe!kC FJ~pK?pc{h {w, W5rޟ201ft$O/Z Az[+4h̔^4<&RDn°nKFۿ:wf?qaSA%D}`Bo=Bz$Yñ g *1B˫O\Z !]h~lCt=.!4s".% s (BڰNz4:-6is%/}XǞĬygMџRH52-c\ /Qtm98>DH L{(!@"zpZb3%c >CMF!3-=yR15\N/NX?K&UΦ ~4f{! ?a¬o_<އ絩5m*_ x,XPaa0x(3 >W*H}ҺD0;}BZx͹,+~nxgNMj@V%~%+o埌?CYUA&&HSPi>zOL (D8¡ʈmR}ÒH>\I>+sZ}ͮ.i^Anc] @`~<"KƂ!Ke?MIE#&NJ ;7I_" KWX-u1T]v`FNoMR~Ռ"T[$ \sʶ;8$3_{"7)GA%zT~Ƌ'1c]]_psfY^g>:^@+E5 /II#)fp``19. \w+^w@EMvO\g,OJ"t Y3HIT:o2c 4y Nu4{3.nPj;r?x}r~c}&'Ô&S;tlzбBYv~/{&wIt( =,ѐw-Xv;rpgg|kzғH7S_Ȃi?Γ#Y9 N L;܍?yzGmn!)Y"NlT OB JzRYCIo1z`q:L%B41ԧ`qK˵1?kV' l5ǰkKV]Ȁ׷kaAsg5arv"edstJ@ؠ$J Ӎ-}U*3롔֐y..yTzG$&1ڿ\X}w #rrQ8D :"AϭQ@L2ʂ x w-B eD>Vc1i˛q!1xE52.Pʱ=uKo+`nA{jX2;#&9R>4GNb֚fñv)|dŠ&ϐ_~'l]9+4XoҼh)-v}[4~:W) 5Qjv-)#}Qՙ\w0ε[W3\x#KFhoc'GXP=GgOCT؜?$޴BYidE(F.[ ,KBE*UA^~^ P,W~"+lH ]UN(:ɏ\ǰIhЖBT$KbfJb7j0TʲOulgHs%#O`,9n{4P}O[q7qB17hut: ˝6N6Jhgv)/5x#('ϔ֮cl.q%Ꮔ˦Hz(z?WMpljA>T^ +N`֡Oȕ`mx-|?3p :Ρoʢk^9 {IEkQF2svqq91}Mx͌R}w`a\2Ý J_@si*a5,ltݴiikYBvDQ 0exujfE_tq |mI'/DjN=OMTD'%"#K:a]9y!3tbCr:bՁuXE'Z-Pm/z8B5I\F|3I .K4xܨD*'u"5N~0*km܈r]ISGTЙ lЄvRS2C۹x>DO7짡csJ D.!tD~&!a7(:O {n<{}㷉IRuWm`^YY^O3H~Wc Cؔ4R-#Iο+m.%{s8Rh5*#DEIbj.l_R"=?o2 _O~,YHۜuK |Gq@fנQMܗglɹHgcRjT=3m.uս56@vzv(%8s(\lͱW8ΦI|CzzELW>?Vj ťp`K-/֜5wM-:[ El]t_oQ`J˅OSc8Y]]Ub"e64]h/=.VV/*=ϛ>hcsΊ5d~wm11=nPnGcq:5 LyV9EXj(\O2QUi\$4o,Zmmٰz j,_(.6:H6Wm!;e,eN"P4h)KW}Y}SXM4Czc_3~(|Fޮr.Ec [KpCڬ jlB([?,z{y>#p~gz\\O"t|‘oذ?! g JQh.A[>7v{v( Y-]y:Z0?'1Blw$"կ 9I*޴1$渒Af7ݎt&er+"2X 7=x ^" rkD$vaTuy+*XHhKv • #lL ӼfHm'IEwMZۉ*>A?|huPWp)DVJMk罐Be.+ԩ}|>6 5Q`, ?P|J"},0W_Zo!p"|S7//g:>p$Pfއ`}Z9s˄#жMu1a5R!^ 4X)}_sR=2;hF-[iQo u8 |&" E-B-ݫ7qd D ߭|ul#{y@ǶШ躴@ZvRAbo8}u7 ZYYMY3&J<&8aWQeW F8󅱖N|?@iV4# xp}oGh`DyBM4;v#bم_QCހ k$/ՌL vD'ҷyDf4y$%y5k0a0ڄ|Qc&UAipl|he׶5Rz>.L# U\ulX6h, ~9cV$oP(S32Mt#QZ`o@- zVGs+f BWOIvgK:"'b'BU~Sk6ՋD]uCP{W$ZG)>2sܺB JʭsWHz`jwia'buzs0=JzoojZGed#4\_ј$Vuo[JeM0!yn[0_'s2/ϭ62  `Rqd[F6~]wqT\{ |_خһĭK3e $Onkqb%Öb.̢%{JAF}^Af#Jᮊ {Y2vY{=V x,s%K-gÜQ"QnXzt1_IH%!( ʛe5$F0HZF]\'0[;uxA5ˏI]=`$~Y/P蚪?4J&JfKv-I.ziv.گ*tWOGYVG}=frsG_ &*V,}NF̟Yߋ5/yޱxet+jkjGڃ::+Lvbjgsw&_ovb.'HlB[?Wxk%lLa)ܸv?qd0Yc|nkl+f%#qZ_*=ğ{ZSdb,"tvS?^Ua[碽PymX2~n&cQF]w'ȅ勚VיtO::,rM\j%K@>߂6x8isCbIΊT 0@ i!(osFZ~|[H'Ð~L.O NǟR=;5+,ӲwW1).A f;e={%6csNuu&e f~4 bb<ELIEШdD羟A,;bj`};: BJS>yԒr/DƤ['O~yyoqJ@Q Xm]蛃A@eDp`H3\5=zUdvHX![G~jљ!1%jzB8 %+=C[X Y>l-7I)J}Wi̊q(F'mFog Aɮ ;D9awoIaaiTב  !^F]sryC`0b׳'e$ $) b !⏎< |4OC&RI!V ٦gzEP~"tk.:C k~ J'%:zgϏNP}xEؔ\vq4U~7#l-#͛H<Cu @n+5IAqF7!P_˜NhwbX8x aJ qtKBZ2 {VSl'kcǙB٠_6G7R x'|Z#K1WXg٥aٮN{W;܇FlՎ&|}&A|b,WLdFz(jZsdG(G~Th1sQXV[\{$:<"SАsö(1:,ƒ)O9Xo Uz[Qv] uU, h4rznĜv7#PR*ZH3Ј {#XT?.,- Ϛ77푒(rdۄZ3!k?nl)ˋ\G5=L6ɋH]('ޤXsg}&b1I`8꥾WKyvwH]镥rvؘkV=yJ2Gt9~x`~l&bǦ}^.+13VG%ޮE'pdFJx W}*=@Ugj||I"*up̠D~>RK3~3Y~Ju''et֞ .A \6IB߭B)8Us 1.K=E㵅Sq9% d͏0hqL8,;d"gQЂb-+UpY'V◗ץC]'ʒ*!rﰔ~J RCR;Gt"⦐.r A(V'4v^]Q2(s92+,~$$ϵ7 !^z$??rQAK32Ǜz~OZ]s!D ;(+\D﫤5qx6|ݼB Ë4PssWp8?KL J`?#ly:mH(Cw̖8`ڻ@MS@Qi8'$F=Tyzk~]<+$fA \GH#A"1uO$Fq~CeBŞ\<"g֌:5KI}7ա-@#mrr}i(3Os ٯcAմ(PX f.ը&}NJʥ ayWcXE|h|6|yl |&^,).Jݰ‡}ī{ٿ~'=XZͤ  ^l(ѹ`m訕ָR \<k;/SCVC^D2=]C&Juvd! ,Zq䃢S#1H1@OB`.%E F5i6h*,&*.|%IﵚVkAւ^)N%0ցGBW VbȮ [aj1ҳ^*Q~̊G2 H;eJHJWYT3¯N%݇Ϻ¨{kNi_>|0{2_Uyݏ m5o3B$(ϙa>LlGTi&_GS 1kPg(5Z# 7 (,x+21O-ORzsk {JO<3@`O fmy& ,fmfPs4sn[ D,D;bX5Uˏ#TZ}8|`ר]ρ}99Gçk1%Y\o*-RmMp 8'uO#upj,ۋ-a/cekYYNaG0EHyajjdl<  6؝-h3}kZXt ^h, hr t`l.# oua=#OE}Kqh &TkץVÜO+ 5tf=ßQoCg*9-tN%3gKZg2Ga)65a +aN[AO] 5e(6)(*a2L_*Q4ADw45 IB Vd#*ONJS bs1JVCmz#cKb ^b঺pSoO٤tLు]"4H"_0dhN9Ke^;O\l!gVc^ uVMtCDUpXC0N%e^T*%KfLc?**M,糵|YA}. emv)E-j:eEԍOW˦cn/g{ӌ7Ypc(U+'7 ђ r<ԃ~kASg#ز\#+7pzSP)a}ۀyD>;pO9ֳ\#ŝWG r\ߍ_OaWy ̳dD+_e!Y jy,}Y Nn5@3qPiM"d} OOkmJAƽ~]j>u١ljRsy6MQ~xg|LfM-GqPmLSHb:-o⸡ZPٌ 9ɰ3rqj>AKsOo= bNZ,WyCO?"vE,ʭ䂲&+(+U:?~rĆ^j$ g=iӓ`rd)d,GmG[`pӈT0f7l b5y-c"Jv^FdrrBF`?3W?3B۽HQƬd6K.ݡ1:3" ,A!dtLYHsP+ V |U'dj/GljhfM>ml1T{6ٟP4{\↼o^ÿ!SgER=MN j0uV]gK8YSM".(&%?s3Sxeyi9α4I_!Zo`zk 16Ebf Ş;ms;},"Y7?eWIfíy!a~ЖkkwlHK[F1hXg@y/͚ua!=/;L(r~|>j?6kBӞX(*rA6,HT8fŁB$eE%I+gAF\ݞC+M,>qYu/> ,&2[:×:ʌMFI TۘK|ĂzˇO٥4)6AJkA{R^Eiٹ:+Tո![j}.u"z7Z8,ɚD&I6o^R9r-k}ny)jTP?G3sD_tH1_#@ȔbWOJ@0)^Kw_aϹuF+I\Ʊ,i ]/DD/ ˆ-1 GT>9@PP-/Yۈ^֭k B)b)qE/p3~(I\evE"vΎD{/JԿMx %n ee<@7$Ps8߷ AҸv@6Uw@".?2qS;-z(NZU:{N[zH\e`['6'HHS y}02a]֣VkbGic`C#NFY!@t'վC 0*Ae;KT On,<=Y6ut$bF쫬rCA|[j'ھ(Ɨ޳R,ש(2Ҕ lN(ǁZUCk{z3FWfoos*>~A厌wry8Y1D@~AJ @Bri (.^73i'# JTC> d} R]:=e2z Gy^MOE#\q/3*q#Pݖ; #ԭo-jK.9>/-%U/tUUl ~;~?r,=Jg3lB)}g^ci)"'Td+)'^q̍) ? BquGDpt,=\P|WEfs,( -}>Z!` nnc$ ^7fAhOSGEcunYi41:40+0F\}cӯJgY`t͗"/"k4t&un_e# HH},BWyVLT+Y>6@aSTXq"UL+ 0b4vmR4rܶ=qhKcK 8[cRQy'ÑJ/^FYX+Ɓꕦa N)6y#˹XCUiBI5V {>* žO ťkIhFWj{f䭶<=un{YfF ̸^$ LH@elb7_xRuw`[#"16^Cp$Ϟ3K I'Wo3׬R.ܵWY _@v•39> FKgG{Dz.LӚ*h^ Fiʼn@қ:v[Z ;]6Fu;qTܞ*{kpmbC``صo=ew݊E-".ʫ)pjKC=>N*0+;o1q9i.$Zxv (Q4U]R=T]7%RMgVLKQ<J\8O$h T .D͍*gz {8FQtN6 6d۠[׀@;^3nky$P-#fÜՑTyZ?Sn(BǮ}g L>=BMܼxk\_n,P)~{V2K/SsZ9i2pxגmju ?ɔf}TZ4'=[*C/WӊҨ*APR&5vg?Vr'P듃#;Lc6w[,N$F u2KQ. HȚFvѨU+}}M}%Xu67VR;9]97oѹfM'W450Vħ_rXfIЫe}yA~#># T;g.&n)Zeٷ*p SMNJ,WrE o3%:G{KxIRcH[:էU2{eA|zS)S]n!L/qiTuX0rȒ[%L(?'V Di5MA?bRxB'Ӌ0d\G.kO7#WG!183"fL|\Zݤk9)5u = [s3h+& 7VOCCgWi%_q'Zo9AHv>X1h8ZTu#ԹP" LO>9/ru^dh>de3݋8c9XJS$h;/w { ,W#`Qz㳁a8&&f>ڏC\Ϙ>`>m Ot98wuMڭLDf=o}oH:a/r-MI KܹhOϟ3}G}Q_agWsc__QOq&*=X/DV(p0N71 F[x !yaij &fJYg';U)Qշ IqJ)ͦ֙KYo²~ ӣI$p9( /SWg/ꦛ/4j%tiHFOvҀzȬ^T/4uмy:׍œGR(Çu&AE-LZW$/v.ߗ&"E!" Rzޯ̦eb,32y p o:t w/.竎7Y 698&XL6)ҁmh17Ԝ卧:[f܉ uWKkfc:mTi]'آ`2lKq^eOE7QST}.qJ?ݚgupjPZMDPl P]%26ߏ7:'q4‰C<n3L孿@5Z&5LymrqYP1İI-'UT鼙>ܩheWSQً1;d2܊B,ɞ7uCh*)- LR1>cqƟ|eX|Rq`&s-gf;73NTYEҡxAQ 6m[{\ (6# ,Pg6A,A qSU~ a){.>,(^kUI3lP f.;y/`?T" Q4sC+5"V^#|L>z(ڟlm  p>%cKo?9ƙq͑Ar2 5ZJ>ma/iskFRwDxZ/>Ũʖ̆CgoɌ "{۰ջyN>ҋ|ݸҹ&4gAk7Tɡ)KE$ M[( Fy >%^~ux ,gq>+bz]{ze`)D0ainkJ0MO=2Cw;d/]孯d(E#ئ=/Ty⁤ O1ןH 'G}D!giV@Gy{a*y)_1|3S$jait ŕ/ƻ5"w&$b_ѣ; Wx~t~,U mφyQa<@91#X2fT($D֬ sJc\.aztQJ4s[9 ;ћ.ӂ c .㐀< w)8vh8x+S0VsS.n_ĘPd=rU; @>RzkŵAbNaHѾ|Oe`B0wg bU\  zǁF1}Vq} S| 2~2>=؀(6klngjc:u˗Vq2BB)_П4rQ-e𑼾%Adu6N}V 3W? ׋abފ)TёmD+rc?fW&$Ci'`z5B`1)GcE„:hCZ1'\}!`E9B+QOXť)\XG =%҅qM>oCBΊz ָG;I=Fj.d_xa c "\`NAЃ00.CSu%7),5W85us?})v\DXaH|=RҽE7r_4%2Fev21F0͛%37xjE.4h +(eAۗ"ɊXX= BV16h̶-%dt[YY$D2f"!$eGxP yEY@ݝk5\UW_X!*]:g@|yxBKP3y 160$Bw1\>wCШf%!p[x1Xe$);0wuHWn-/b3Lܡz!RAϛK Hb\X9MX '!LחN"s& 'CAcȗ5z+[Z=Wb^;3孅>`\\nX2u&ڼLr.XHSǠ+HH%ůK`^+C󷌡YPg!}uzNqd[OSfJ )C-{ (A{; XQHς0H출H8ioy=(2ף)Lr  "3Z#ͥр-3u-lMRt-ޔZz7R m +q$pd#mOdZG+:dv =@a&^X!ƔM@IdCge\jqhV,sJ H#q~C8HHǑaُ~GC.g!\ʚG6n Qt.6Z}d 8gA8y6.E/4ܦ~|qM釵ˬJυho[[9] 1Y]`hL6=r(ژZQmNh]n= {_'Z|M_PTܤ,2eEa=?Lt_n`ǩ-mXA4ԣcjnM8Z4ʗ~ I\@-.']Ÿu?گ->{4U )rb6, 7_;H^ oЫLv0wUYKw͍)a0}j^؟cT J [K.t;.vY/Ћeq$ LJ M%EE$jl1A"*szʬ/\oh? i8]SHiJ i9$ -(FCrk-dDĉF 8@oݍ̷佧:$ӖY]SN=0$Wp_B.I0T{[nG۩ ㊍0?==N[}_)tߢe(PXiYqEM_5Vrp_܂ɳ|tT"[VXߐE"͙`HgUI$Ჴk)sRϚ{ϓcK Z` !-j(EJЉE(KmN;F%?sꌇR5UpXB6^S|,U:D ~ECכ @ޜ1WMHFv*kr*``t'fzo?~Om,χe>c@p2W]ݏ=fܨ|^t"}sT:T@]8밵ߦ@5(X\ߦ < ~F@ Z e3Z4J%w*K?Z cRJ$H߷smr!ګɹa -3%q >GgxUn Te.Ά$g9QGsCz>2C *9l/;uk͹SZ~0u(&k?5t70}_mUe7MrEv2,C" 1]8(|QȨjvԤ,wZћe_Wx]Kœߪuh߸FaChTaq?n'0N. TQCb_\sd[9ZJRDGl#Muzd<8O }{}* _#M3R_Fm@=7Oq@B~Ptwu]"dl_|GEdRPDޕқ|o"{ r.lFxϴ6D_BQb"Zĝ72:ѩVQ₌}HYZ3 ~rlڤ~Z6|08[c& fRmq#PSJSw_H\a sG5NbrjB<OWLjR~~`,jIPH`aJ!zu}k,}mluz*YRK߂vn؈1R /ٲG,72X8U%Lz%8>(̤Icf}<!0ʤ#vhܬ !y6sA^7^CVf@* wo*l%-gG**EoR1۠0Yy_7l<[u`̙/}P)<vȬ0>.wt~5)Bt%!KgiJT%-CvHt\W\T|o~l/3BaȀke:MWt˺&V{$]dU zSi։詈G><޳Tc)B{ 4$sY@ښf9R%;z2#3/gq:ϤBds olR 2 TW+pRpqWIb &B53uW[gra䮦^ xD(_VgEҙ/g0лޫxxl.TB7 2p#4ti c/ZN$zoB#`\9'@6ƌ;!(ΙH՘;/ZP!R>$cPH4;HSi6hIhIWdH;bc`t$ZJi[^ =?sRbA BZ &-)>UKlШhP4ɤ}"}{MiCH g?<iDN(聁2@p޷)GzK{KlZ>WO[ByLvu;b ˀ*!=|_ >Ñs *uHn51<RI(&5,.)H)@a ZBCS,+7q!.m@42T)>I$8J>2<5端4{ىBr{?- h&OTov߉AҔU·&Lmndi|F#n5}}R.?cq|@ァ|XSK'pSi=W704}MpsЭYaXSѳØ&AԂkN4j)/\;yӳC.u8J*sO^+ԫ -\\ME={@euϑ0<^YU\rtQ̗$U!Ņs^4.jnz3dž^~jtZ;>VPQի[|E#Z:mgn=7e]'6nd(l>?Dώ^#A.<]Y\+3nVjs-Ҭ?RՐJ}U=Uz:!0ߟ3\J5̓}Q.zFomF̓8 B"D Ū al L-s}xO;C0grEN=["=XrFZG 9 Xo'a@zL(ay6ְqB&㱆h@f)r|| @$ݶMoZkV?Mzw-A=*%1ю)mO2\ gi+|=xVA0|x݇Zz(Z-(^Y|Jy*ju d` wiB`p&E)&`g9(^{M hَ˩Ӓst{wRS](c٧1\Ҽw$Yړ1# %KF֩J/67La}Z0 | 6BRX(:Oe05M QTJ;&°TJgZo"O#$ Ɵ'hGc[U{ oXP@;GDrőq+:ґ_ 7lG4 @N fqRd=qw:li(t7x.;H1wXN)x/GZ#H2  ( =įT_mC>) /y]!57DM(/eP+w;eٽh8H8c ]' RBR$zqD 0Qw5l[1&1gے1<=4Qk [)={ߛ]U?bT#ۧQpOoO(\\> T]H58zF^2Jk;(!3oU?p% @hO ~5ږZz{Y'G~`T|$4]$8 P5Ds- KKimif^F`u -U zQ‹4B 6 Rpq&"_7g: '?"x rwj{!uR[v)~{'U|/dfispxDS A?)cZ_6]whS&%j-'OǙڿk}/o&[.LƁe%^5%0R<$o2Z/yY4$BFL;g^~iQlԼZ-[X_r2. X#AToȭ^IjGߧ}Su|ѩկ_f}‡jͥ ӆ#U#̿wNNgZgGj,W+C ϴ|AzTDu} ̩ A[ duS5u2Gy#AhWoNLѾ}a@Bl"V?bzOb/AaslU SPZߴ D7!'پu$MPh II_sa)F\uj+FmhyG8l7)Xȅ(ޟ,T9_KooioSpv[]_QFȇsYåVl*sd|18 &!GyFK/3[U`]hw{gD\mm I РMA۷0>xgxgdR(oM~)uRހ3BV|=Wbb$$`@ӻh6$pd#w?z_@l xZXb)x2+]LzWw<(X |aGEEv|'eCLlx|BW~gkZOT7,!aR=+ .E/m2 BZ@:&,#H=K33M"m04❯A.GBkQzƴJ3@DՁIPNwЌDՋ2`Oփ\x8S{0VZ];uC@ w?tO};5m/ΤokC=fjpv<'PR˗(.c&.H.GwJ/a%b^QY(n 8Q d8wU}w 6Go->WsW)Ć iBB\h9j.ǺXlcD0z>U չfZMwx;i% "AePj67'vs+} (vhC$5_tm׼ {L@~cSC OViuԐ ;M)Eoޘ _ ՎwYq*x=oT|RKWn*yk?j"N˜*l{?R%NGGz<"j۱A@Dj^nG|PBʥJEO&MTNc?7uՕsM_X#{KAsƍIwvbZc ȏ:Jo0̀WP=syn6)Rrz񧠮~KSs`];t^+2qqccL&nz_'(:-4'.K=Ʌu;XxO5rthw^-tItFLw-2ކ}_n, صCc5t_x~rd錊 r,꣪Z L;l j̾vLZS0 2/.n4R@қ:H%Ĕ@ MjOQilIehCaN2a;Atg )HRcN Bޣ8G]0%,.7;yFPÌK3?y6nuTLV|f1NFkD 4#f[H0 @>ikT>y/!;1ggzu#7R4F9yOB|}"iŬBhe.͹`lE:rlfD&\JJ3l JZlb?qzj~-r 2aJtAxU9Ԗ X[Z-[V~ھN~\N=dƕ.ץ +M$ I3^)J8"mtrOwIǘqcVu[q; #缨3C$6 l)}sݐPIR(_%`t,m߶=΍%>72pe45ƢU ?Z|)L$%j<2@ h4M(LkZ;"`8Th@%pr;beLp+q`Ug(ihg3lϵG՘a/؃8ξ&Pk#k;Ϟ]?~t55]8HWAݎgi03sO;HwwzC/cXC ~Wt=~J $gvxҭJ_igfKם%C\ u3`eE>ףЍ2,P e>t[2׋E@iB܈ P- $WՍyC={PvI*r}*5PD5㬃ح-vRPr%P.oJuQ:(y1`-f(J lton߸c68fz0^0@NݐKxHo0~ɬBWM.yCޫH u|~Št~:J~U7Y!Jpl_e /yq8TCY'u?dZ:._P |)Mmi3>뱋8}=Wq]f1L(c B X1:l# L<~r"s ֪Vf> [3^C}ak$@p֏~!!j̻: q_Ef'Q4]{esN۠y7H@ۇg_"xۅƗ`ꅔ{|w\43 [70O-p<5e4 RL堠潥>ثHxXN¿ ŪĆT9玶;oߧ ծ| E۴FRqOj+IڥXPW}@*$JACMδ03a=.O,Z|]4ca@#Z%l߲+51)7|pi 06g[Fǀ߶ EHee(U[(v;-* q6aP財`6cF3S-kPI` $Eʒ"~sagyN-e C?jg<ă ،*:,zټzGQ͖lqXX=}oI@"Y+Lj`dNF,+>G!Tl7b{o6ݻV35}Qcڛ8a3sG{ E5h1B x]F"®="*2J;%TUJ :.:vO&3%վqe3/ugաr|C[ի JC*ԒPk\"r6 Y{0GRZ*~N} _ rMf+(Fxϰzu6#D <简q6yH9V&Q)&]~-FpˠP=2)ȹHWvfPS֦j۟NSLtCo1׆2E[} ZX@Anwt@+CLX#kK4q+vJO_t"&mj _ h02-Y>D8R  p+d1DXQc7q$pEXO?il66yhx -VjzՏ.< ֜st]A:6p-@,\ɐ4o&w;@zU69UAC$?x%Ff腞 }Bt[RAYk'슟pבؘ,\OPi0TP5[IWm:l5}+Dnж=`۱"V LWƋMRgfXaY?=4x$;1G~q3,R~cӨ􍁼j/4צ6,R:MI0E;;wNO5M@G>{|z:|4G<̬0GGdKus)Dod@ VB3pC-$3-t78ۯ Wp~?xUckjYz:LsB`c!md. r-n 3rYf̿+ O~:fMOHc.xq^ۆ`4-9 =H}Rx,s$l̵HU2e E #}^Xa.W. //K˜/lE|~-WsoQdegh2(Zx/s("|_\zvN1ڢI")iNc/i^\2I)K{ޫ#=,yף W^fCf\:Q{S<˩/v)qZp`\mSlrq~E~qc%BQLҭV)aYX.Ve(oE;뀐@?4R~׶26@AE-N 9I/L;f2~`RńsU<>_ܻysyƒ2l~=f4p[DSGw0ߗy6?U17>moWsi,I=f%*wX]XIj;6|D*jd\>%,+]Ώf+蹞C~򓡸g=SVIo4=aY8;8"oc dhu,mP;tԋj m5:FuڐAkwEa:$[V=s/Iy~N,17[Zܤ a-h.HY[aN1- YST#U?N0>VfGDכҾ'T^1%i?3D:BLQװ{t8,!QؘPlTLϯb%({-E'98zv_KF?cQz+[ܣphhA%4V\߂kkrlL>^PZ+ ؚsT@b1``. vۆG"$YCdWݑ5F!wuEzabo/u@e .0D+'=P5>!뇰'&Z_hcVZX_3 eu'i"C]ỳ9:zǢ@>آY9#8Aa 3350h>@wFx)#ξOxa: NX@7oPv\( )~pRR&(ֲ}aH,׬._jc^ripfr/HXy"2O+e+|^ȍ&5.ҧgN.  f~CL#_pAPxB$~W\l<꓍(Ǝ[sbI iьF VGwCwm0UՒuPV6RlUc0s ۱sDyvzS {2y@ # zfe s~I|-,A~upE1~o1׃2I>˞YKh"Jrs)xއh2zkj.~D 3gZi< e\F(N7JcK@q4qsM&$$:_fFA! 9CYF[dgCvfb& XK~ jE.wӷf]%7Y'=#l IFfxN dfב!PM@6Mh6^IYx31k7w=! G3C< H8rKgU%"8'LǙ{t HQhݎ[: ͧQH(^}3gʹ5kڟMϴF6TA$hDcL8VJ3v@Ǹ\7%'%M=]Z rB ,5JQcEnarbZ)|MnB7d eU9p~r;eih|iiϗ LWgKc_ABbJs39"bu̮}hjry#_u'+1 ?GKR{ϮoS\>QPN<3#behP?-7xNqfg^D)=Y4ʨ:7k⼙Sſ&yLTcEmָi(gC(iVʞV[&'~H"sdr_u&(52l#$bA4}:',`yr\A_ؼ"-ŝ'_gXsH͌h(x'n8+nY]^t_&5}=~=|ԚFu QP0Yq .{}j}M2,bpAa_5w+S`]ƙ`rdP*n3kd^`{m%mϼfhkVm||7nScI, 7| 8eh*&2sR_g-n@# *OҽpǬ$6%Ip'NuC`X򨞬g"Y$A,@gqv1hHً!.jWfcY󃰋,'9ug墤! tgOqY&1'zZ !͝"ߕE;dwUPR"]m-L ot@bN~3:,֙bsqDŽuM$Q^+Os!zܿR?aX fE^fٸs/ɾ&Hҟi_uX9y%zC uj0f1v0Q*!X^g1?MDVn0  VGz|c&IFqjyR+<O\%sO65 'Q@Wm< p/:yľ^KE@dt:㏁(ޚ֛Έ.z_ Nf >-À\:&Z4+/ln#ٜZf;9J6#f=u%]:[YrD 9ڿi IVQLyZ9qs唥7^/@/Eou2ׂp\ͿK%_6IZZ77S֯v uy~1"/` @Uuw 4#@=ĻӋBaoOٵ\l1TI҃_ge2 MRm7śB Sfx$91:{AJ~3؃)nH=jzu,* @[G0:۟bX:bܦ`n;T4UajϸMRʂLO+ 6V( zBBo~_(,HKg\!7ӼuD7#ѫ:kYƕDb5;@ի˲Xϔ6 gۦNq`6n=OpA^^ޜrˬ5.G 7D4gWs@d "9 c {85B`(2lq:qiӞQEG?뵨l\V5U1h41AW)O:K7P2_oI@&6|&.3s˵YTu|./dNF|.ċT/pjBqAD\X\&=3ppl5'I:n4 Mf6dB(@:1 <ͩ"r@ZPؑs _leY`žPac ᗊ-$ Pؐz/}Iw72^y3IlgUzMb!B=ӂ-e LJ(N"" $k &I%lfkkA4'0&BU+o"QY\Ljb߲,yi; Z pIr%* ڻh:gJq-zUs!?Yڟ ?oUw<||T=8 XUJ$?;) XwHLx~Fy~VM~&}gUO`XY-IDWS8p{ O<]CA@,#K_HG QuAF|HSZyJk݊1%z7m?DHO'ڷ~I 㐏#$;)iY% z4j%ybm=p&Azemݐ66z^Xx4S{E?؇08W4bI+BE M8B({)}-05 杼5oY*HQdMyQax!ZQ=h߻&KQ/q_Y /.7r}s8eךbtruV <% n.P=QgtpQQ8o\Es;qօO{_RxF (89OΘBIHd?b+noJ?Jԩ o?bne  E,7>d(sCpSb!:x}$(jW) r}jMiS@TN!A`OՐ.u웻$m K~ Ex䂯A O3~k7umS-)66}+h:6 vr*8b]NKX;hE%qQvvX03(tWzJ#O8:WsiH*pYF&\(2-}v_ khLJsg^S}yp1C g#lT.%9H>⯅Ru#=ЙVa>:7᷽y/lBLq)kzRtalr'; 2+@!jd౼#ȧM6'C}Y 2u6_^Y ?p/>3mD`r[ZZ%E6֍@Ŷ_Vh`fW³Ubp;/eGsjZ7Hl'  a//zus #{b Ǯ/ 3]Y}. #A77=p^(JNIheЍ6*QH:}6M݁-:HHO͛P)'rDK{>DPl*=R8:ŠO~K6`5*E tbܦב 6?Cn,ݟL@B\f|mSdsVTMnc|V<Bo ;#pBlPM. *> R?rZ:BYZa6M$-wFgj[pbt!4:b.z&1FͅB24L˙|&+R4Y_ʱ:Dߟl+)i׺oyied 0c1B/RAh 2Nob#BGb? ,0Y>ש[J>a[C/?~(E [n 3 ,D Bg:o0 +CVAX"y#%Ph9TxOg}q%KspLH `}>0Y8A%kA|_5ʅN=\qcX\WX6ͿS/_L-eNxʂ.y\ZkoWȩ;WS=áØX@ܡKQ焅_QobJ{If"芭L׭~7%q?OQW,plRB,@k6m\܃OU6W0o!hTG{V<ǷVwCjmOC_^|*3XU؍RpN5Sn @ՂTsa.kPo?Ϻ7:}S gm@߷CvA,fs\i:}= lE:x&@{;o4&9OXtIZ;H.c\6W鋋X[ @~~^)yX09ht0_9?-%k o*"{ Y՛&Xh_8J]Kͧ1 YL7h [iHRԂ>. ɛ[)zl$CrJ%LmChj/_fNl-L ^%R6n"& o_Y?t%@ |,/'-7G* AXGW "̯@ 4oVC_ {$wk\=^&rRB_؄XU&`?29k&ijq6_Ö,NPI}ҳR⩈[ͼmH)D@ 4,܌?& bzxK@ xM~۴U~]K4 V xGo=Y5f _s06)B]Ś>@BTH` 7uʉ@w<)*nk*#$-YoyȵHƀqeJ4zR+mCK9ibIm4*čoI& \ AM<{v-?s {esOٲ{;5,Ν2(߷i+Ф02 6p<d{ Bq:~ }qn޶hO5^{I5 M>R= Qf5[<'+0Rz:v2npvZWxӜ*꩓s8riD@ Қm=^V}FǞ8]b^q,zSpᷴ5REe:2_ xI'P_ :ծ yrr-?4a{$9I+j6,o~iN8&>O!g1{ & RTe_b⒃}Թ8x h X ߮Ӊ}@smż+/yZ,!0!%R91]кY| 'P{~qd3l:[Min?x k5 5'fjK]^~MK<&z1y$a{I@LHs_[.txRl1 O] @䅃cke9ISD1fǘx9?OW?n#A.- m#~FRUH"H5lwQ6B7QSd)5w]G\)qY Y"-P/#2 Ymf;wqFuΩ~я=Zᳶ..pݴE J!U.j X֨ěBC/y@P^ӗy]bE%#.p}͎@t %XX כ_e2&Y3*c +alnmE@sM&`[0!;\5:T+Zo[O%mR0& M8 oQ1t}]jUex>;|_f3"fkύiH%]}K$[7|]]dZ N%:5ká* Ȍz=C׊A]N&>/C8W8r/FVSzlBYL;0 AҐ\}{*q`Sղ>M ] >WU-[Qhx$AGHK%[ݘs9f 5V1A&¤X6;ӆs R S%i6~T3~0!X@Y *(lۅ8b{?b3\ff#s?߄F 4nŇ hV} qre1,SBXsF )΅ 4Ξ␶y?Ɖ|$ģ,Cѱo*HR۹}0nluG>i;p6gcZÊÎ Vl2OCE}oT#8Nm&Q,7blԉH !H)}糇yBpZ㟢?iBs0ߓ56-Fs>s jpeɷ.9-6V3LtL<>g\2<깸Iב`!BĘV G?(o5ZX^kHZM22|4 GFW&@DR70^M@LJ4;('BuW=a ƭH`}s^F϶ e!d;Hb:I`{? iC~uӃqj띎/e@ [2 PQ:5M++_ǒW؃`:N~^awjQ-ؿcjZaGma$] 3R~OdJO8ubnԙkw((ȮƮEM/&)%O V;w (^3IQ5JԩroP ԣ AwGI+Qǥ268_`4LG'F5\y ^/u`QU?G611@>NY?\}"Hw?"T|LE )ڍN 0!62SI%cT"sLQw3,?}pèsփ@W<"-IŚԝ5 (_Ύ; _K5_-5<ռ=~Gx6TaÅ h3`' ܆>kH91YB~#'AD0Æk|h:D0 Fa2U, q/0~J ?\oKx.|s&n|: uVж lxdGƻ>p@8ȝ".e >8&aC4?MWLus 11r_n{dF6P#>%QX]2Cm{ ng[rԄGOI&ʻ]7ޱ z:( 5ݬA:)"IWNF \k\|p^{c>ƆWѸ;]m7X1:0Qf&/ˁ  rR_jBhA$uv[zmUhƽ u: ""AuSfƀ:'{Yr^- _%7U5]jQ[;N "ar*gY9Bj LJU.D7E!p^st/u AwJ* . niz ea@&"c g|&}S{w\qHyMU4z8gD xyͳ?2A,}^U.!obMGC^,CmLb^Fhߔ>h*hZǟ%>frrefo5/$JD89g4RcBƪR =Ip0 lX鰂 gS$TAym,ͽSpϵejU1)4nZbL6KG@j ]bj&X<3 SPWUiO벝Icr{7 ?vrpXQv>>P{ϋz@G4KZiTiqJ*}xv^ƚ3놄>QÄ́M.k>۝08[G>~gY Z"n<گY $*cħAğ+".%diOX)PC.ȤDbOoL,% }& i/YUxm:F g[46N\nDc Tu8 [|2!?k g|&`aTRdhvvJ}_R`;+؆u:cs ! \W ;]|,ZS|7k0]R|?|z}m-mWԏr IxgE5G0wݩ?^=-lXgEבnu@]Pw\^8#Z+ 6UhR%)u ѳ/#{L ̓xfa,^29 kK_Og Tݱk$h1^; (.5YWY ϓJ"TsTGPoN?NWqpN{5(k5hCt-\mlbDʑt1 r ɩ 0e}nX#0?ESVkE / r& 8wL=Wþ7C +(}45)v<ڞ1֪;ͫG &wquP%B!?6{I-S}@\m:4`r`x+(p3 D)kƁ?s.NDQa(l.P1NIo׺Dm rۿ_IKK KٶMYcbÇuB]vrll,+q("Wm`ʿx21^SiaeWś,ٟzyB|Z ^՛^=\u4b^tQq3' &Oyj,FdӚ͉[ŝzt4j]q~?|;W?0k?T1ß{dy3[ŧ'~ѨwVly (!q12D7֪쒢h4OQGbce{qV^W:)7d1G4D&cIM7vkuVث?3A}O^wWphoFag!3)2~$?Ӹzr3_>oT&%E3I:oRSsL'G G-ap?!/oUV}SַSTơSXYȝ3WMo)exD|7 N>%s2w;W{OsqjgcnKWK$;^9DV#%L/| s1Zf}]\!+^|" 5]?iiaxp$FJ]YǖyS7S_\#+ƭ~yoJ^wo,; @ջPT=Nsy3a5-.|IAiH`}JB&"yğ蜤R.JeV ק )QMBYÄˊMIǶ>OrGusJb׼|S-Zxݖ@ [$<?Ƙfq__,d VΒ<^k nd&3TJY ˣM!%'=m]+鹬^{ap> 5JTrI|)%K$MxRK_V6E甗I|J*Y;t>~v>kOp(.~WѤ{y/‹VwR}}x|LMU*tv8:|Ǧ :hOxU@qIjexam9s rMFm2ƞ-K,?Sthڟ{E ŭKB@ PBH@{E}k~_)^  pc߲'"i+&dSk+|l`U9iܩ4q)N6 1 ̳,ic,,`zrrΕ@)$P$Pab@"(4)m ((  BQ@E  P$(иGM aDih"0D89i.B9hDrLpu":ؤCRy!qHP8"&H B1B 8Al6>2,PQڈdPNS7"nsR+#pQ Q8)QȚh$܏~.I!"nAPohE!mH phj(5)DrF M!F&@4CNpR O1!=$ -bH)́J1T& IN T,JT w.y#X P8HB15%x!@q Pby "|PKp  VOߎCL/FP\; ~ֲŷ٢I21ZlcUQ,GZOD۝d+ I&M쟕-tw!M˱^mA#jn,ROLq[b;PESU+oIV'e.溶2Ȯ؆wj0}8F_7*4YFt$>GRnOg{e+8̭gI4&bzRAF$&dJ* ۊEo66~>'o>Ig? toҏ?)#]ttiF{tmQΎZ#7$otQ4F#јGִu#MDsGWo#$sFGo;[#Gb5:Ѫ8`TiTm4U3j1GZ3Tk΍{~HnQԌ^ZDf#5Ί9Ѣ3ЍvϢ;DfpFrG-ٵQtqFQ;tk#Qb4Ft8Qҍ;}w5B3tmҍFDk9#(j4G QwTmF#dqڏ#XFk͑#N}yG ;H<s938#Fn4Fks#Z6eHiݣThOh#Dry#*9:3DqҍQÂ4|G6xh׻o:Fsے6qFvG G7oJGVRRUO^y Å ,*+M/"RvY}׏SOu) m^w;\~P.ɝ}QEy.^6Lë_G%]?(T1R_~?}^]Q{k6\ %7"}]_!ŎW0sGĨxO MSBB>i-c+B}@ !Bٱ%<4 1ES/ .}z37oUJhR<Qȍ>p ^}_>Sz|+LU/U(;$߲R>S=YhG@/qaT@ - 4|!gkFX򑄏6V,o-E$F3py.< ?NbeL@ p-]+HSآܘ#(*? qE U7 bZNW?/|fn| x~KkP@p 'iutYWP lMR7;mf!1e! 1*^wkxH}?hn!j)gT)+ B>Ppq<OgTDOpW j&`Dl'dׇGq , `苟{ZP v\E̟C{@eeu6wH  sY&%fvku\)OWd]-`^(ۜw=U$}ޫJ _ϴO-xic ݫ! f)9*ÃMęunq 1PE+% 1~]vkx>e6fe ȾB>H3x%q(16I:;,Z&J`^!HgIb7ԏ@.ue!fkʱ^EV!.KtK2O[$v>|j'Ke2uYBw+ѥ 0>hsCBB.5-ݕ' !奉_m:҉|ӽvnyk4ݜOVk?9?lﲖWZhp0(EbW$>DD"ĉ- slCpdX ,bAy疁oq@%Yx*G](!(>A8 dIZuzBwtJ7H6ɵ[ڄ{J5$-yZg qoU#FF7WσRlƥ2JA%nHsEUЦ~c95Z`ueӃ ~jq ڣݖ-s/G׿h]QLݼ+2f'Z*9ĆӦ ZE{O"@&:Y)6XDB.+BO}yp!b@ sY9\hX:]IPQmags4c*>"}Ș].Hp g.~C)$8K=f^ܿT5TMo895?v%B?}}_6ƫ 2ܱ H!zWgC#w*LAءp|҅HD<-:BmtU M?U^ȥ%N;Il/u^2 :֫(]KAjOEgL+  9jr_v(@ uۖ~*3h%I]t*߻-2fQA o 1ۛ2 =lQB0M1L2=MSsHWǂa_LXGut6Da_|hpd%͡??du DUo"* Cn[ d>C~|p8Sa'6@h~Bi+,P1cM޸IJAbqD40YUu4{Xw6Ӣno < :,ee4Ɍhs|co@{>mE3_m9nLf=߳sՙk?t%~'" 2%! |{)5ĸV 2hvy`cIݧzI"I9BHqJH1A-i-˖v1xX}w$RF,fC :l3U?@ CWL6գ2vJl S.]g˰fu;mO?YA*(כo%[j+O|~02:(E:3_vg;>.]1s=FrR=wr';N|DŽZ7{ɦ+iPǩiuPsMힷ5DCtve*^s d92{^9qwm'fjꤹϵԎO-%@Я#,%WLm:Yt Q1Etx>Ogaz3s] !V1,ۏ8IY liWIfXmgX{񛆹#=g3^$ ģi2~.QZclM&1f3a!!BeS\ce! A@*#} I BU$*$! \Pޝ*Oh:AI T "ifQ 30̆c23,fVe }}ŦS2&`fij+2Fak,bKYl2M2CH*|*M28*g٨Fe& ֎_Q­^c+FqQLu-ލ[d>=W,*fJ^ Lī9ietr;@Fgå+,H D:!;^!GQjiF:_ã.{ߍѵM  =.v8HRrq6yk-Iz6FvZWv a2YPbC:P{pj9ގz8QUѲ5"BJ&n~Ɏn?_=#uZ[Vry[;+u508W ^,&+T9ޖ8\ >4i;ƓqJX`㣄VnP¨lzj7(IGOST5?Cn}r;OF#3Hr0ŢB֡5C4ra'j)~67?54*Bu 6y*߿\,c W&+t\E BJ 5 JCa}+\CS=ۨ}5n{1ح+T.JC&GG972(M#(h>scﹴconKgs.;7_K\T-*ڏ(֎‹?"\_i0߼ZcﻩF9~E PD /$, 8QUD>mO7*JC07]]]ma7j B&&S~P/Xa+SPCL4 „IBMf =<qhcKϖFw}yMBf aG3tmG<;操(*טuWQST{<5D'!Q"(o ='uUcw>S٘:Xu |noP(Lap|tܣ|GFo</r.o+zݳ0  Tag ҆ ؈OJ`GNK0L1IaV3[1@є x""*J*Ӄ0ϟ^AB\Asa#tq[zͿJF b.#c]&b vX<Nœprߞ{VWuo}v /: O n^E7ݩ 1?rnoeI4`!KjjlRճSN"!q~?K^f=}[xO{QD2|^/Jr=LJ5;3[uqWkϲ pB B1}Bn4PPBj2p ,!:X2x,=h 2ࣻ( hJ4Wңģ5G]ѮQhڍG$z4fIBhPa&J $m K ċ^{~;OhE7=EC9}hڪeaP 9u<cn;w8M2b>G#}4ydrG #m|4F{TLb(Qj3^/s(z}hj7j<*;US£v1J9h(QhQ͈hqPPP~K{'zӥb*OOJ\(Mdy6܌{^cA'Y4w PY倎ӚU(z'㾊| x B= =-G(Q&gvbj;3ʎTte͑9Q3ǑQf#ѦíQn5mBy*2֍7x5}}q(p装GE#^j>bf)T9 (t N}Pq1GX3ivљFڨ^gu;Q7{oOu\>΍{|0mލ4Fytq4(lHѪ77Fq6O-TkkG*5uъ: (窝q[Q㼺8TiFFQ>z~ޏ?GQQGyJ;(z5r4y#FQ̎=F#ƣE G :J#ȣDkFCFze~u\o-7a*h7PxG: :V!V!! k{?ahҍ/'/-gh֎gukFk5F#Ѿ Tb3(JM_ksN8V2I~T1NyZlx3'9 T*3fsIƎ{QR,cH'{@W+ԋsmT<Ӌ}_=5x([%FLqso_ku[T$!  (E y> 2`x6op}oQW{5{J2x4ytxqZQhUGkãZ8ѥtѢ]УtǕFqQTz*9Ό#f(ލTpn1FQG(G*:ތ(mFd"kFw(ڍ(](G*Z(ҌʌG֎<8љG?Z9Q??+{ .R~KM`h*bC7(PuF#fb<QT#J9|Wh(9Qt(?z/qQݣuQTiqzUNToFiۣj7SiET86Bbc9 s-C;'V[%gnyBT(@= ^hG?]~^qQ"?xQƎbQvQW= z;tsHƍ4U9MaƎ5z4z22ToG E e (SNXLUG$)&j:z/vHǣF#Oh Dn6ݤg*%KM+ZB9U PB$PP='glx_X!Aho"eazl IBɨQ+PB \/phػ$!>]&v-s3ݬj$Mb-)@d}gY;(^T-Sݗi7A䶓f`W7_o~4CTxHQmz9c~7G[ *Q>_AlP2e`g-]ssYoO;P3@`#2J5 E RjBBġJ(H8 6|ݾD`rD~dZr+.8sj8E~ C}tIR6f81y{enAk4d["_uD@Th2;$PW7qPWoГ !^'>@{JP|w+?._'a8IMz!n1).x.|\/lP>,L)G:bc?t٘gkQc+՞ޞYp(xHdܑ>R@泞dTAZVJbǣ 9$[{Ro'ڭUE^QNxiʣMdm L$Ft ſHWxy8up2*HXÆzD9eq)f>;kEV’ 84oma3L߇MOE7~M<(WS[c"R\pVQ`cnF`mN -YYyWy,n :ϫ&#+EM!L}1;hhʘ#>%Pp87xͮ_s?&GL9AM{{EzAJ@y7'_m  ,8z0yPKMZ6iPZ*&TK+1የ2zy7~i뷍bp4dոUqq.;vk>z4ζN%>S\ yځd2`hnOXZoh3Ka# ``kU/l(qFAT.4j$TNf&vG0 K偃#nq+ ёa ^Dޟ}?ϼ_ *g2EhO3P9QJi Yzwk֕Bb,R6y }}p&ژ>ͤwǒia(Cy{2P\_x''},'sk3w|n F't[eÆ'XsΜhv Fפ3Rmlbɝ꿠6,XZYPCPcZ<O]ggv(4waj7nM$4?X *$'eX!׮)E .2: -)%Dԝ ]D;*SB-[z BB]XĴxM!@]!'EvPqK(uMiq@%,KUQg"ϼ)FC"3BB}"C(n{(dfֺ%/='#>8qTc WлRD W_|&B j+Ynq ? w}W#ӛZqsK^,\~1aă#YJ;$7O| x5g.mIzp;h .XX$v͔ėLG>e+u m˛zΐ)~o@3h'Ynm_hbt9A{vBȓWEx"N*[t݁eBRZL;B^}ۏ/b7u~[N|Rk9P#(IQAa+POWm†ڀD>0"=PQ~ ۿͷE$OX>_( ߸~clE1U:+2'$Q*Eۿmg/POn#a`}E‡JALj FX;߽J>Y/靾iR3l~JUomkT+cEI(52ul9z7QK1_y/>4O%$lq1o+BXpOu\(ԕ-ѵ۴ÝK1'غ3uߐz].#~@+/E@In#DRt[^བzbcCn')nbS+=SKZh*ߍBB=ٰ',.>"xa$ӝ.:$[wb=[ōd۵ >y3lŮD;1Sz0Q<ri@Q!, #ԕ-h?@)`Qs>KΛoGH-YW#: =ڥt r,[Kh N<\bM)j79K pSXr{PaNl')̈%>L.\(ۄ!:nDh6 ::E[dc@Ts0|Ư5 dspc|NiP&Y=FgX?~:g:aQW==~_S!H1=}`v髽Pylic/'_-PN\\kkԘLn7rkxI)dWĚ(>C$!c DwoXuCaŝ>j,pb+MDG }=-sF )5I"]} ~ #[QEd +}ȂOdB`Cyؔ,2q0ȇt~II~TYOMqƋ+œ2RٛMdlq M*떉KV*RR B*uց$ʞNνcKRR(ט;7X$6-nc٥Zfʃޥ'd;>K˶Ӟ !B}B;?K`bIjrvB"wkas>Teb~4>hO_z}KOvmL&%Ga]5i|:ܰ h]YL<$eݪ>&rk1i~ݻ,O"V?#_!%& m뭙P9M¶_ Jk:pFMkb&J3c(@}1V77{^t)9=oKѭ4a]l䝵ז1ȷHzKqEnǏ)bdkP(ֹs7ڣƟ'BVO${Ya],4| V(3zG֮~^i=0#Cm:qdlt$.lY:.Fٮf399jn'o/H~rW/t⥌WL(cGLmN}y,qX1+6^_ęqF>Ϣ#o3JC;Wp='5_tgaC-$l٩©N_X>2B299w:ےL Ecorі F cGWNJcF !ˮG=3s`?DcZ8~u_,¡^ ܟX$1O[ {kf9dLh-E@p{ @- dNn \e-= GR;b|h#S't3QRķSgN~2{ayr'kٶK4..3%:%7W.ÅMPC~x$^[j'ņ?ֺusjlJfs-,C%Ɣ!?% PP- '4>S|K&]wVp hmI-3yaAAxuyKӲ;Li^&>r/ǺDexz[V(b-ZSYmQ\$vzRi f'uZZO (˴x(o(:uOzy`Z`_MC਒3·{"SR?Fq<YC ȸ ,I8#:(^3Rq$Pe{DV}JA_,ss0S:05h(9be<$9F"o? ɄHshO"29%C& 9Lcs`i9:oL3uS # 5"wSY|Ui;3O&k .Х{\ku]ԫLޣE(FjYMٌhq)DzKFF;* cS+x\?bA)K >R N9F?=}DYV%#Hf]Bk\ 4ӏuȔKH=P'D& DiB|OԽ|!" %$tɢG:BGSѩK3y/xqn =V."SG>Fi''zM$"Ԍ>kTza^T绗ɎOLO:MzNL*v!\ >r_a PX"1ڡ4wZ#'Lk/S7«0؊a#qBኚ2 Q1v 90B!)ب*~8WNǚ? 1svh߲)_7ӯM2p|(1 DșN張i`;+eh}|I'u5ӥKM`tҏO^zWx_N1?Yo}KF8c@9&rMŖB2` C $s\vw?i=F+h}]5OGy]0F$JMD1 !Ʈ_FyZJ:+۰Apaw>뼲q e#`nOh6wD"ʀ$]+`zP '1n!A=k'Jbij=  Cu5=^v\4qP89ѯgڵ'ݞ֗Ov?%F̍*Q.^4=l,mZ <(Sn`g =T`8?b N?0GW+L<Ŗt^-zTC$zrm}wVj+T[~`;cTj=r:`o.,8@h] @ FyLG=+r伊OFܩoa?D&03 R`3DL_ب pYx]7V4O[Apc|G߸6`oRHHnF/THV7hX0xۋJSI$<}nחZ7kֽ sB͘msCau^ K{̈J^Gnܿ J;z%+ԇ> uHY,o3I11_&~#ݑ:ESex/THƳ."(HB0tidyr/data/us_rent_income.rda0000644000176200001440000000275414360256575016044 0ustar liggesusersBZh91AY&SYdNYT`$PU@y^ lmjjL2h0S#P6M̑LS b1 4h=OS#M6xiD i&@C#C& 0 C 0F#&h0F#FFM2hS)OMQ S@d 4442iM hh4d1 M4ɣ4hIbOS4SC z' = 24 騸yd$@I1$^$QmcYwW[i_tRQSDL;h 0z >ZIF-I߱6pZLW2FqBX,BS"&" SY3Vk~ǯ"1GE!sW{JD":7V!)B#˔}1}N߰" +') ;޼gu]F,^UeYe2dɓ&L2dɓ&L2d,,ett'7v$ -kfW,2MyCKQ)D)ӔkM֔\SBb)BA25~z"9"3&!)$D)C(&QqMǷX?(I6۪=*;Y=or=uq9lXbŊ }mYe,C,YrKנ 0# 0`^z-̲,r!օs[Ԯ+֡"|r']@DD5&`XT 5",E@XK1$JHDCut%uK|vg/G;Y\1QeFfj31r +>+GZuYP("hKQ8!~F1вݛc2_ CI<1 HW\-Ÿ,hOjFso?ZgQ]#Qk-3{_cUsf>J沞J]V۟Sj 1K \^ _B~}jֱ/joWaXB7e[m )4ڈij3V9 t|m)<-GW/izU:/3\Uu^bi1Udfܯbzv"6چiqjaAyf_I ukDjֻ DeCHB*gQD-UzDY5pk5weape3/fo^38N z ;&8[ez[)=yaJ[.:UTS4L36UibWB-3{}\JܲVȀ  3B1Wl(G;9U:q^3!s]Uh"YV"Ն@j`y% te+)l =$H䎙XP5$wyg,ShCq >s jJi!2 4`ɑqW*ɗ^|1"(H tidyr/data/table3.rda0000644000176200001440000000045614360256575014202 0ustar liggesusersBZh91AY&SY-(5|@瞰@hCM 44i@6D=LѠlh"I5Ę#M2FLGwOr|A<w@FŦZYWRf M%Hbh d1 5Me5YßDhDE6!qYyzDFa1$RTZ!q-Ih#RWA{ ODbbLD䮡C+rzapPNP(z v/$*j,Nfm*%"?L$Ow$S ҃Ptidyr/data/cms_patient_experience.rda0000644000176200001440000001056714360256575017551 0ustar liggesusersBZh91AY&SY 'H(c1@D1O^C{r#}|J|ȑe_-ߴ_R%_%VWԾn\Vus1:NfS16ͪ}bfbo16LLͧStLN&fөw7Φ&3SI٣/X8TUW_ww܂29|!v.:^4c|F21=M3?Ѫncc8g&q1 ǹkmLed%r|31g&)rlc11b55vҪw6ʇňics/ޡ@UaU2  E=WG9wyvu̻x,)^ּN.| YN%X[V&%O_P/ /Sj]a}Žyt.asJʰVUE`ďfjLyMf{L1\ֺsy&Kk3\T^m>?H֣8foɶ0phS'tm4m}uU;_듥boѓ9S4{&`yfqOSe90gf{L춙4fXX15w::w^|'ϺJ'S uOS v&ťX[C=hUS3ZYzԸ,Sbt-mY9NJ-t˙m\P. uL+rYN+Kbձa]rag-ԃ|[J=u:k bLKf\Eָ/ N] 2Ļ6JXWP.YvgXYd<&ro?F|jޞ!q4f_k/²a~i'OMLΓu1356Ns:MKꭖys9m9X]u8]Φbjjq9]3n&ɩn[ 2Kָ.+zfS3r˹ڛ-4jfnNө-)fm0MO&MMKw2Z]MNgat9LU{\L*]kM-j\eOju8N'S#bq8N'3]Ζbo;XMNSSq6XKtbe-s\طrV[z/iޙgs0Yy13lMMeu7[,L&wM.3SuOfWSςG/u19ri7Ng3{. {M-󩥉Me;nO&r#ew9.pO)ao8Sz\/')5>S337X^әp+S'sޞO&u:Y-/'m9s4.'3ɕ;Qfl&giZ\s8OHm5<왛Nijs8OK+uu58^Ns:Ciu4.ejo9iy8 u3=]S9fzL' yitS93Io1:O'kes3531:Mӄfs;ym7Vөv:YSvO'z/Syfycz3ȸ]Aȹp\+-"dfPLM$ 4eA9$Tr^+LʖsFDQpR@Dd'9FpašiTI! 3.mxz`AK_YyRAֲ^oLLEbby1OI`idLS3ɐbZ}|sIq$Y.y[0XSS+ڷZ:鬪\ rNSꙟ?::O;-WX,Sw.բ: ]mϮ3i~g8\u@sp,I!a 8&(c:Ys䳙,-kS ܁z_/܊|kĿO=S*LctʬS}N'OR`Ŭk,?hŅ-BMg.u2%0&fffC'3l/,Lc]OmO54?R} Ix̧EprXڻֵmhzXI^/y&ff|g֜&f*X]P~ 2>3+ty;MĻݟLiGOY:YJ+u|KEܼKZr]绐\s,h7-(lz;ʲTK9|lV/ڤ̳G`es2Wq& ,/MqX%̸K\,rZ\˦n\@݌,Uf};?ڟX-'3<&J̨X|˼{rKh =7"31U ?,WRػkyVYy,M[ˇ"+ cK 1fťI̹-RR 'uMz˒&.KE, 7X^%j[mV|$djr=Ebn >|f&Oib`̞|7Pfbeo33:fo8͗ܟoCʼn8Nja:d0؜O̼S%Gb _wj6ͅRt)ذ,Lk"af./e0ܖ-sq8CuU<틺mX5&c",/Mz 9ur^h5^&'3L03LMH4Y)z>K`؜ϥ?Vs9Tfi2^߳O>r[6x-%p^4_\OiS!*gKTWXf|kKZ-&&gY_V+&Vg{ef}y%KX[ dK\̇\ؽΕtan藖\')vnu1??<>'*+ip311??sVg;L㟅w=Uׂy)*I^*'ڰXYf6$q52 l&ʵ1K334M"w1&(iq^=◶iи+\Ifؗ_uqSn[dVk)uKZ¦?Af)WSfݘ_~aqI:,_J̻{s\@:'jv.;tjZp[_Yd+jǶR:1.bEiSm3Ky=ڣ!غb ) Z~ )/bO 6r .W} Njpj~p-}`\6qjjU<_xJd uU0 Z` =P/h\`\TO%  e`)k ,!Wʧ]zUX~(Y!WoX t f,`|  *א.^)M/H7~{`N^*Aͷd VWј-Jb.p_ E<=@Zqz XN h `[a uwopbTae/_DGs^RSdHK)`B B ) E<=!,4R1.O-N"jBT$$#C#S)<.n>~=rE8P 'H(tidyr/data/who2.rda0000644000176200001440000026646514360256575013725 0ustar liggesusersBZh91AY&SY"3yU@`4+%@$HDT蠪DD@T8pw`Ҁ8:4O@7 $<8<@=nO=<o^AR@ yx ({KTRT ATEJ sD̓: ;ӝMgP9q͆ܠqV:-&F444 ɣM2OU?42#ѩ0MSj6L򆧦4bOScM5u0m!6eHN>Z\.ePCL8];'] j?Gie4q&WapH7 PҴ["N'NˉǷvtk ffBB@\swPTDRaҔyG2fF,i™LenfL21Q ˈaE\.\.iȊ'UD/T~0ҙ,ꤙiV%bӑ:*Y ,) 9b 7%zIBٟ<\Z& D}DVF(^tE""Q_;BIdfBDVuN~9zqKzi> 6hԑ4G~=$30LjEi]+7quFE/]؅Aa)0rSw$4QCK:܎x%UH8,K/"2&Mz-VG1Rg(AvStj'^DaxJeD!ò/ >^9kMQTKNe8<|@d=8{ޟ_7Wtte'ZfR+FZb$ExőHe򙓾|]su]><빂MڀVDtYUMV EMȵw!q#%G~Y!n]Ք$.CrᤨѪPH"4:kN7R*Ja& ,A)I:6uSiA2 Lh d "N&TUWZMi$IhS [@P5rneGHő).-%*bjaSc.:$^wFٺ&!gUR$K Dt{1+B@%a@!M3jI(-͉*2U싶M1knENI;Ai4\VMW(5T!UhY34Gq`E$B5$BE*pM5RZmt+HJ.) V aʉ\1K4H&eD WMJ(YV$4º(] ;-  E-U/ u[0xpFP).$Anж*5.ڨHBR @(bWF٨Z%˻7$FU)Xʪ$*6w|i6MBPK tP%[! NQIE]@wiSGY໕.wt][". ),]EQIY"Z. j$٢GPPU{gV8q=ܶAd+Dd] Zxa1٧6ʸJiR r'ݪ%*YI+-A.Z,/* óKCc0U &*UU爆.u V]Yd#`C'n27. `rZr [R77V Ch.蔪$Qy 5nEuuDiԹ\=rW=ݲ[*N T%xdUC CPbFCPɢ[vO M DFMryp(D:KUfP&I%U5b)zꘪrluF-B:iIJɩBY.10IҾ_8gjJʋy[0Li V!Zp!*ki:hbJ+8ʗ1i<+kg2nyD*RvV4"iuC`#&E-KsڶyVrzL+zNADC`L1ΒHıbh7xT5Aδ$ph!Ll)X$ F rvT^wU[” V{J+5]kFz(rԮ~ɴi$-:N2A% 30&gQSih b[C2IVx`/ ҩ*a&P3w;4>GXL8(uUS`m3t{quǝ±~8%@SiQ<m; ӗr˝_m^P[8&"*Qߏˮ9MCm[At׶1Ś[cd܇Fs}Q0]Lg:;WM_R}-yuI7nxt+r?M*^3\#n=|' _2+bay~ߕ{^}9]kOC0< Ow@M^}ϻӞ,\kA7׎Z.Ico '׈ZxmnPl#Feuufܧϟ>yPӛϥ^Sq>[({7Or|Z})鑷: }pz 2vepPs˴jSĦ KMrۏNNN;ߦy?m뷟}{KsZu;˗F͇ƺO>9Ϸk}fܲJg`k-ו43˄e@$z]U瘦~VLnΕ_p{SgKoc,qE˜:kunt۔ﭪ L+(enɖl˅/kŬ=wv=m9I.kyoI7x7>cO<; }.<"΁k ro|zatZBKq fнH9\ZjR{d+V(QC83,Ȕ` u6RG ڐ l!o-A˂W:Uc>-"V#[4*`_6U$q1g Nu5J^`\rӗK:ΦrJS~` œ)-[YYroO+PoFG]}[1!|+bH JqVS:~-\9v uYBڔ8~;y\>/\vɚjSw6Mnb6ܓtʆA BdΡ9Cԭ< o $2ZVU tj)iK7l ~ ơskϧZVv9ͣZTuVJ%ӔTy>IQˇ,RվCf΁^}S`C$Nm0AL5 %u7T)|=mVw!ȁ;4ߥhxbm.8)T*!.8jfO9~YXWϸ}fX DȚߜ$)eho#A^ȢrSb/G&t˛ ˙BތMBaMotbZ0ڥ} aA.?G?FLx u6gbxl#c՟'PMhfh5veY"ˊ}нW&}YԏlG/sG.tԫ4j͖)sx21̘(Xbӆ$LeDp"+8X ү}3T}g)(43hO cx& WⅧ G-,7 I&Us>^u(4Ӣ:%P͸<-.e92a] SrT}{ J2*Ji7gvYQ*hȎ!)!ZmZwQIoM-,9UeTMV:@$ޖe.+RW+.aK>nٹƔ鵂 yg>u6|g3kaWiJmc/'`9Y lZ[, (Ƃ =Eyxb|uej73ꥯvE]@)@Nj˂%zRK$G$$xIx%<A%􄗪$$=I{״lIg%/4C ^xK EKgR^IyiєYʎ>Cl PeЯk^ 7}Yi//o?'LQ4gU8Ր{T2X{3&a^ʜfY'OiUs`R<.f['Ŷ}6!GdYN> ;iWv5=y}6G>4syBRDѨ;+2n,}ʜn-L}M{GgCnoGg[LjI⭼LVҗx8=/8=Ga8I_tV_+SI3E'']$<|ŵv].ϋҢO.}íఇ,<|7Rn;:vԹ[}8z6.x3NڗțQ-0/ÙT1ni|͏^>{u:2$"Bi3cWv{ >v?K}q0jR5^)46nr|=NNg]JN{F5ڷ:[Éη?\*'_Rf@oΩx]D<~ ;?zwsv:˝-wSɧ❻>g:qnwgwxQe; SHg41a[ANj*9_C͑8sjnx]~R*MuIv9ߓ̇Kʻx֣7,RKʔv<.?ޭ_dT-;uY_XٞvOG 9ugͱ󽇇/kp0r٬`^ϕG[ٓ3(y=:sg{>򟆭.ݟ}'(YŦn~ϡJ^?oZW'KWW~N'}^Wȓ|.?>l}=C,Ow;_yΟ4_Z_/~o 8+|ݙ* 'Y~?Kw>m/FVgqd~q|A{Z'[}η_x|O{o'g]8w #:?_f|O׃ޮȥ~)\k^78^>U^gw7IkOGozu}ϑ=oaW{{ģ쳙MV/>{{ ~gO5_y|MK:tZjsEc|waH|?^os>DzSdx~l]ۭ{Lk[oe΋Wo=]}Wy{°*N} .nw(W˖GS<_:#S}|/ww/O{Bž?ƒC8RztݘkK5OmS=_З_r}o~ǡo}]qqs^O<οȭvy?ok7y?#S?=|3[x~sz?3w~%Wo>OJcw?s-n7_N=x?tltz^*SWۙ5>R?0ޗ>w5rg|KUjcil4šOV_vLY/wۮ{>t߹NOQv't'{~^3 cmx~,sT~k\ @8$@W-24zL2 tЯ۞,z_7pŽB? z= G@-Y: y_AwvQ4v8,賲Jtu4VqLdFiWͰ\Yf< 0`lz?d~kyo*}nq$~iHyR&ZP[+IBRa_Կ["gA-߰zmC.|ymWU8Az G# 02KM6tڪD'GeoæͣiӑRDk t'eM?,oK߰u1Jzwm ~5~擑O~nm7::B:$Iײ$} }:HEXWb\3N.nRA#Fә~i~٘|éct >CM~eU~g'y'HHCsC+ "P)ҿazTea 00BOa '[}QIU|5fD%)\ H5g@Ёq,U|UdP74Fs7kO@ݔSԳP]Ehg xBN$Aw`I"9󲉃s<#aq3S( C>5Z_N訓FV9}TFrZ#<@zڂ(H#m-Ir0xc=vUIDha#Icy3,JIQ%O'gQYdT_9E)C šbJ9J=F-Pi;P;W˦E+N<|=ERռSđWk ;.q&D*ʉ ٭9^sC+;~9NC||}GDW~\h ܗbuPL*st%.ZxV!tGKGxC&To8O7*ceAk6vk$)($1rtI(%'6%j]@\MjJ '/EˊUWxf4ڴVbwqf%Nm1"l,zgmZ44Dc#qؖ7Pu0;I]0klU Hz}W! Ѿp3j 0Gj d;C^ol`$b5#6QRVVOiQ?s~St/"=Ph m7BՖ.vs7VKa hM6!y8GvKj'j)A4RoFNA"4PPspF.¶2 ;89ǽ+T bԜsqeKnw[tfbuۋ|CĂ=H2^YٜA#'3eoTHTp9(VTt<<d)ݞV@琾nB226B,CNgJ q94^[*XcjЉYR%2~e=^kR0j\rR;@•a%YALMP~|cyuaCA0BDBŻqr4Z4IɊ]o9g8}ZIƄ +*BIk9nu^UCR(CF=վwPI˴ɞ/$ґZn b4pcWrDsIg4i 7#}vՌŹSOEx;u{doJ&'CQe!5pĩz@+6X)05#GHx>8E{u]~zf\D!$p n$:z둴VgAk+v277+)\tMAyƮ[Wirtq粤]jڳaw|y "IiF0!Z&=j]&p\;}'*cg CSW PH13:Lj;g6*@YD;"r:\T&r6,"T)E)evZ^)w Yo^%(t*uIP.)g-/Ga}Hr:W} 凌 SH0 @TU@pLBPX$L\ir9]/%++n |]yu|{鎹#+7li^}aE)-"{EDk9#BZ6M: wd ʏSTvA&k3Aʩsik c]fNKݧ"suw_'p"*g z^A- CŘʹ˲f8'&ky<.8Zvw29@ag!  A)Xל19[AA̬ŪTn AC%&` "ِeKr=[aA1r`Ζj A $ v0͖m:jiB`Q[:S*0h1-2b*irg5*aA\`yhӉ˜J`Mt|kC\eC?F(ki~6ɤ<4s쑯S&ç 3}؉T7[QEڞc[79n܇z_b&cyaRЙ+f2;ba'Ð)seUi:qiVef Xgg%:"t!k;` B2%h( bL"IyNc"J`jP'Z\%y1vz9ޒXGBk0E9%ɴ;B*F dշ8"@ن٘kIH2,2|eO=ǁ$1rF-My\j WSӝ߳Sɇհ2txR i۔Sig$Y~q4璻1쮠Yܞl(b*0(*rRUئbj-F)"lt=jt>{KN1V6s7Bkk9g3:fE4Ε[bqkVBL7$9#J|K;YyN;L-=)rikqaMGc*hizY{{q [P\+v]_,rF;hdVew]\4N}=Pҩj"z,@mbvM*Kkg2]"掲_okIO 8=fIR9s8f]l sճN'yӹc厕τnSiMNe,^ޭ @]3)h!ܻs zNuʬPT(˪\C6.‘Rțv&b\%a8[Xi_o{mCqƯ僟yAi7=3i>z_ [.|_<ڄ[}'R74?cnfw)O]tAѩI7/UǡRk\[,i۠cP1|;v}a_+j\tO])J ->NYJSS!SVw\&Jun^9K]qǗ x^[ż'.<9uQmsUg-ɽh=s_DwK~%/NZK/I[КzuWdt>619iާ^g7º#~ӯn;oX,J䀠ZJSm!0V sCo.&[Y({F{K*kJoI .>1#]ǜq4U8i$X O~x  >/oZh4Y海.Bq.T.28FuߠWP=۾1L-S6qjyJr$̯w3(m6|or,x1bVme\PgS~}bқ`A75p;;e?"ڲ-%9p7Ҩ.nm{:m٧=XPGZڥ|p[P.,t?F˗`);i3p^~߅@cAzrTޜ!:'0bqe>=>شpw|tNSi 7'_X= \C"1m6iW)kH>gzeJzZhG 3U#kTAE Qro_>u/fc8d'_ D<>]ޮ6,MUxӡnt✯Pح|3 4l=\rX=Z7YRyuS~OQt =ϘY[E֚+!8 20lٲm{H4'IsƗs*>~v1NK+K xs?| =| (˜IeY'6냾I>3ڝíu\}vsygf+JrDu+LZ9epmԶڂ%>ٯkQR-6 E шϦCYA= z}RJM;j|}=,x;yރ@<[1zrlWi^W=BbD>b }@(e&(4hPgIy$B(UYMF.q)D:YSJ,o)jLbG:uy \T}9G-Ki}'eJuZ#8@ )DH! ĜӴ 7sۧ~K`[Km#mewC^g;ۇ>Z_`3: #VU G"<쒿n{,NZ<$z%l#2-QEZ@֎9bd"ZC .o" g9XxaDD!'J`N$ pHq7tx!JTvkɿU̕$n La&1@ $GU"N+Iw&0rqs()CK"ZHW{y9A&y*k$tJVxf#T*i]18y9uf&)Ԓ0J*cFs4z"EC1F#W!cmkӖ\n<KW=z~-̋0P]hdrk:up ^VJI9)֜]Tm"T׷1}k>Fgvǔlܤ-'z?5ߊZgOO\C~Oߏź[n7G}eʜ4yg!w&2)}ˀ{7,VBAɃX}p$pm_G;~3t6Ukx:$Un*|rlbaE t@!9?\Ӂ Um"SjU{- \fT k`{^B įN~e\Z&QϩөX!~".B˟L|ɼ`+ÿM9V_Batrh(IVc,a/;.MQ$ V(J!VSSC6#(/)QOuг$ᎀr$W a +´<6e$ :-"@ao1'd!˼7aD?BVSDBm򞌷ڳiϊDZ#@=.}(SCP~ıE1 ȯ'ћ''4mj%X;9kқq^\{dMMAZ07[y"X2S?IS%)tOkVH7Jk_H>SQ < EadAR4M/ |:ā r!6cIB&s#8aӫ5"z8N5jf eB dYab K,ӎHRD| |]WWAQoSw5sS 4$N=͢`|, >[ >iΝ_)_aCjFMȫ(j 紂䑸rN6yVM_X j5UHi `:QKόO TM!h!Rєi&+oYV3nÆ}~E ~v fӚ{~siZ[rG˗w- 0oL1'9ę9Rܽ(FTH pAazXj04]9\) B6x;Jʟ$wuPsqyeR2$NƷYw -3PH8H5yI<9iV(Ю#L_J AFW'ZÍ'_/,}9|J4LA[ "m uIMDH0!()j, Սzh,T)Nqy$0(l* o F# V2;(]yIPMba!#ȅ4FN\&J?”&Äi؍+d(YPFٮm#eD46b_N'APpAYtpGR%>4 ܣQJh肷QH Ro~.#r Om0nY(%謒IEcvOm()Nycx}t3jX#NDsϟ}#!S5O6U{H;9ytP ZJX[CNǹzꗭP֭M%B)+^8w|Ngꏻ!dH=s azQL(2?kԟDЂw{̽H. &IoQ tDپpS.8fF<@ny6VF ߄IcjkI'C#u8MMh 0\^9Pz*oaD iTٌ>aOt+6HO\,BB[sd=<@S: +@ēUM"-JBvZpGhh(|sC)X-ɻ]k Há!O][P56goZ+PQbRtaoA=%BPstN%U€hh_Ǧ뵾dȊ,C4& l /Uu[1AQ!hk'4CPxq҄FD`" tBHFa=^f ZHy!*SÛ8 D8XK RC #a*%-j-氇vQtKs,:n+{L;j&`aҷq)'b}[t$FxBj<0 1+  $H#r^3c=<YAKx:|TĤ¯IE$8?wyd'&դ4H'LݗZ `T P=~wyOa-UWˮn`4.Hs 33@2f-lo5ؽHQQ< ̸d]Yg=Kl˅ʡs~;3V',mlGÎp\*56ٱZp[xn6f?H@?/I&FHBUST¤ʀȪʨaY.Їk7 "ʪĘA,ʦ!1ReX0 C%ؕBતWgRPU_sA|?s>AɃqHm>0}`ߔ:Czw\Z D7tp#h4\msZD| AAÂ];oOT܃|T[A׃m8qCh73"L 6n7t;7ڃrpti1 ̃gR0j d}PodvΜA >l>6d0b'Cj4 ՐqC w;Pwp~7kPm9om6A dw`A;3 Pw d8;| APw`sfqq8AAi7n1C } jv jA9npdH pg\ 0iG1|8Gq[WUQiT8!D,BHI4rݪtZbr6DAGC)K,44 U!+2w-f>,-$hH$]Y*5KY7Ͼ/,-|lM l@ [Hɕj@Dgݴ ?̥~o-]V*RM 7y t3 g>M'E/ QWpNUYJedyA9HTRD sK~Q d0ez ) p zZ̋[.G੬` 0_ ů;j̝Mi%(VZ$5o;OI[sk߽A3ml4ab| -g4xL8LCTjع{}mU0jD|gBhDxʻzBF0HN6qغ;JqHwRՓD\t"CD#S'E(/wJ[⹲,4hFSq= H˜, E3k8-]@d#U *>?1@lzdZDc!/!bv5 iIȿkMyJt!Ҭz=ac i.II'RBTEYϙ.X3eJUH'U1Y}M>Pu甁uܟ ;Ed$i|*HW!E!Ϸ٨x%3X=ņҲD,N!"!$6 B ӌ\|[7 ? ݲStpNp!KŎ{MtSkfW)PWf*heky=: y#tU ͥ"R]w$Q)dLQGIX~0cK\O>22kMHzQ*:`2Wb!<xt!-% i !CCiGZ2(i2 5(rq֠obPCCbqC8!̇4ҋ!!j Am݈zD8AЇAñ7!s!؇dCC !:PԇJ!RECh5jH|_qrlmwgxٍW $ _B(+*6_ @FCmGG(&&BXUq1$u_a @qNrxӾ^ZQ>̠FSO/Ӈ(qA(wPdvn.?#E:e*Gx/Կ~jE_^* k 9=,kߗdNa!`G2\{cNba]-=R[QSh2ЀBw< mlSA,RQkrs8p" [ :*Bc[ڪm3@5HnvrMM/j@cEǢ 056Q;@K@,SMZ)"N-i4@t!1 *'0ל=dHIɎ$נ(QCda LRKh1TKW'3 L%GU &m}}78S܋<|CYhD*NSI9eQwk};syM$&s9T$(/X[DyQ$ l}큾?Tn8* Y$Q:/~r#.^{psD^}v],m/NDTD' :g)hzlw%!=k?>FAgLb%JbDF_JWff>l<KtNZs5`ںU2E >tEB8Ή#B|\AQv_ (Muƨ?.`(ˊ۴MvIk݇m{*pGN$ǥX2t[/G[#m՛WL*uJ2U*IM!7(qmS?8aٮz͠AF⑌  f}38%(SM1DDgz݂R{>~^sW~9^w}_m?3LJWcM#~cu%]i A[暼uia3J&XFEgGlJ0^"v/]]x*yv=d]pf* 2 'n17\bY}݆,6ͲvՊAZI{U)BTO2h/,n󙐙fRxEB'"%E%*N|NW))sJ#{ i•;Оj["&8<:~sO\mʑ2@vGOˠl.*{as CӉ(-vٓ:~t3QֆD\Y;X>,~>OHF|']5<^ l$ry~ǿ?|%;.GQ^u7ܷ)b|$,-CQ!Dz[zEXܴ Ag^/,$9~D3h=EaOO<š%Qb@qPxy?D&o~Iɗ3s*>}/up58 a/\pxr@IRoD4#(<"vvEt8n߸8Nhmn콩}NgPe\FɔII>zc۰k dQ"7 (iW@Dza-tο\B0(-h ;$N6 6=wUe~!Jl7¥Jjb$2II!NDxx$Y.Y=o|@.ny9x2x~!9 =N"%y!@^*9}I8|I9'H/3ϝ/]w(sW~/i۫ iLO+lnvU\sa4>ˣ ]ZU<(8Yu,ۨ:+H oz!άRd{$ۿ !4Kqud[0 ZFESNwC4Jii" ˧\;R:,x;>w}V*oxnQ$nww^'W}rxrT.x)K:d>!;)9L) Su"$ABLNEE0, b퇬b%=||.E1 V'e(;Ó6]9pW~u+~4ߏyq$I?VΜv$&r$WwBU@'ېT$ 1+q ԄiƐP8\8So-ɂ@U06S*a!r((>2x{y ;.'AxWM tD:LeȀHƐ\K@(٘$Nۜ P]nMvv2헙]$@\(e@\ .QTL#|@c.76]:vʪdEpN\N“8'i^t((/˅+.‡ry,EM2ber" \ N6SBvˌ.VkviEDpeSe™E36I'N.I dpPҪ ntdPs˜qq젫.]|p€@Eh"+SԂ &ʹEW.2Mdp`,EW9qSee ҸGsɂp"ܘv0Dpl)r2G 2@W8¡l.ʠ˱PUR)˲ΔL $.Ar8ҢNҨ޺pz ˰ˉ6 )v%.2)=&3U#Qp e]@$ap(TP˜'(l&ʂPAv6v .aNq>yќQY (T6PScu.˜™T ܊ E\(@ ]C($q0UQt{A3@8P&vP@v9Wi9E4q #ɞcT N1 ).9q.d(ˌ(ePI]XtvW2;eP0iI4IMlKH2r$D'bLigPQftA$!*.BM8\ ts9ErBUu9%.sؒ"I!NrqL2[Kp.]".drǃ Xrg. \.P 28+.U\#,2DN˴VQLIrII'HTBfvhl4.SI&5]@PGNQaЖrIPkiB@P EȬV3Q BD-+Nh$\4ʇEH@Ȳ4 iD$Nˤ$N%TT$:Up)D0_k]lJ검' B*";.ieL9XN &I*[&pjLLeTMM"$D$',RE$"2ZaȪ\HS:P' / \.S9PsRaWME#K,r\rTEr!+)RWE,Bʊ*MQe\##UUW5X". +R,TMR.2,e"UE8vU\.(9EU$-%3*Y" *eA&I &RJʪ vZHI$Qȓ0((ErheDҳd]I $54Nd HQk)UABv%F""f;4 UQG#$FX3 EHT\+D$"DrPRtdvʉQ1%XeՙbJG:(b.dD\(j(rs!&QTWRKhI˳PEI92+TtBLUU5("3 %hBd``W(֑ JTKZrԊ5 aeQU$5Q%EUA)Q,tLʎ\Qp 8Z*2""T(ӅdPN "Yq*#25mS3U M(9W:QDiETa%]290JA$|4LNԁ fsDQVDjE " pHdQQG(aGeG+$\"QTDDB$SR.Ȋ$TIJʵ+$*jVtk-)5 hUM$·L #YQj(Eu(\QLz9UQBU)0(1H$N[Q H@UEP",!stC ҒrU@ *̓Jr*UlȮI" N(B+UPQEZ%DQ˘-Yt ++RʣR*J2.PMsVD(Ld\,*L]3N2PQJK,2g"UL/$Hj rBC2H)DBK +%^tp22-R#TUdFD$.$ j*:E'UDiIU# !2 3GJ LfQ9BE9" L3-B0òYW+!5,9aiBa *GNXK["i#$] M DA\ -QҕFFdB*+*:GIؑFt"쨨DLtG"S+D(*IU*pjHJ.ViHbGauZR,֔Q$(JQZ$BL-2gNҺr5i˅@I'Tjc%JH'(dW Wf!JI˔s12.˜\.%fv(HbE]8EСt 8\"U CPiFFEuD (VDAj ҍF#1$\+YJVf\@je(ND\+64 Ы,A4֢)YL) ҸQg (J"UrT,*! E+]VNPRE2iNFXN,Jɪ4!S.ie ]32,I$TtI8')$t&ВeAd' qJZvGN\.tYL8Y2vUEUhTH4KTU] $pb/r.(N'5QT j I$NBI\ siEؐREYJ(ԑ (.Ҡ$S)" N܆ĘZ:g E $h \eYNT@$YӉG ΨepN¬N2N]5e]$&Ird$RFhr8tZgL6ȉR49d\ N˜'C[I#FSIٕ](qrativrfRMZLΪrevpؐQJbEВJ4bɤ$(#$\vU4 ȒI4rRv\Tar fle0AELCJc5őW5(!( i \At*eD.]*9&!iS eГ*gaI;L( `G*)* ;iQ*Z$AbE5R0է ȦLC*e 4v$* ʊ"He˜]h%B@t&48&Bq"!\ *&L(LJI9feqQ2e;) `QIp:LJ jˉ@Uph2*j"ve˚*RrP424 V˶)fSIp*숫Q6sa 5i$2HI8C[H e0I9MIVBEP\AC$W@RBAPk!E8Pq;e NrpB. #;. һB3Uq8S;iˉ$ 9r&`] 2,SC,%B8a*v\)Ɣd؜.4[#vSL@ <FvPHQ&$4:M-HIҡ8' Jl" N!*!8P؊ؓ)P aM%*L&LHI0e% iӅ\$&b bq +*B8]6.MFS)'.pIW()$BtT$BEƐ8evL젻i #i6vU43'bvSi(BAIP&˰.BH)eYWarsK6\I cSJP\)$(e6\.d˜*P :ebAC(DؒLpH]H #ĀNpHU0 )iC)SiF ӂAgi!V[CrdE I )2m ]4&v `PӶY`9sF 00ˁp L PLi4S əeP 18 Ĩh\( *tr\e6!Q$i6D@RIE6P'B +" N'Le(# (iSItˈBv*S\ 2aDΜ6ʢLVbQĒN좪;l@U v']t2; ( 12SH,@ (i )eª.&@QAve$$ˉ e bpQe@QEŠ\jE2'(M6$6̖.rILHH ev]S)ؐPPq٠$Ӱ9@\Šl ei9vE'i](( 6]M6]'br!peC.) IM8P˶$*l ̳kY fUW*Ȓؒ .0€֐2ˁvDeQc`$.0*2\l\ ( S&\")c# m`9C(aN@aEƝ]ID Mvc(aLS Ȁ˜ AMSlaE(8cr(dtBa2&QvA0"TDRVL*\ 8V( M " -ieQR.UȪ*("ETVTQvU29p(.e+UU" ȊUreʪ9" Dʫ`\(3,03"WM*$;h~yHb졤?PAC"D_*~w:u!؇q Cb;i !~'!Ї7Cd<7C!(%_]:s!ևCt=B!2H~h՗@^f7p~FQ8 (:}+~w} ?~^zފS_ԇCܡ؇Cr6=E%Oy(t!6C !j  "H7뻾Pg4:٥FA.bCFO oؒh:@{ Z6 =Ƹv½N512+H[@dD؀a`!7:K^;} !QP|?Ny5ǕT/ևi ߟw$%<y` Q Gqԍjs k]/2q2q! -ijx&0CC!1XCt<9y>@;Hzz(qCHnCXm C騾y^vYU Cd;;pC!C DqC{߷_g/_ RәKjBĿ[WWu:`_i(YRSY B:,ba&HU Sse}/6ycsC^mymn,/u݊?|>ĵwMwvV4aY$)xbǜN!ⅩǑc÷@` m烆]VZۗN~M[ӴZrridMȜ_+_H ȀˈA臹C87CԗiBZ!cZf2@d9>Q|$?%۝G‘$2Q(yhfAnC!!OPC(t!Hu49!HB 5XsD0CC#rLm,>tG̋='ɏ}I0CO6|>N4Iq,)KHBqoHIx$ Q μj 2Sʃ^A;d}YDŃPm5qo!ʆmd"0mP[Av:C!dP52 ΝAf :{Pp߷ 胅95pCm !:?:?a!Cc=B4Cd;hi:CCD?`Zԇ1K?=CmC3;z CrC /RUa^2`ò8Ξ`孠Q Ed8t!0> !(xВv:{yACHa|q*Ci!^,38_ !pCCHxb١!> J$:P!Cd҇y TCCd\*5HwҋCC O}w5^^ӳ9reRI[R=UWW>{40%bPPA C!rC͡}(w 3 t!̆t!Afz(t!G#cv,0rBOP}g[9c˺B}3y;XX`g=i>&1 im$eG!?ľ&KY_yGx|=8 !d4l+CZCЇ10h(Cz$6CD_W^jps3n>CeNyxܠ`A :ot9ph2 9Ao88C"Ah8t9bm lo8dE6C_u !$Qn2z 4kHbAg>BP}u!PCP >Qz_x.8y$1PqdB!!CP䡐whx9!C;D:HCCCCeBl.hzd<<(tCCy@58{AD ԸBZP@l !n^g5rCס Nqd6Am Az8mq[A&bi 6CD>b$86Ct:܇(2 p.9!pCt4*!P!}ʋa7 Ne&sCd?aCHwPi Ad! CHi{1 .<hr,C7`^ =Wzo;hAw6b4 Ak mm[y mCM!A 1Exh8Ai bb ~쇁 S 2!ć4C (n(Ct1lC!d2 4CT5Ct8 !4Ct6CHbCHb8 L/M[p%$jwٙ,4R [|D7L=<`adG$> !P/C CJ.~@CCHuiC!ڡ>Jh|"CuCh5uXm v懎"!2u!!>9!ЇC†!׶AyHxw<$?f% !AtIzh-8vi V}4Jdq]Fo>/?a]~/+&]Σ,RnRbJȘΤ~5* 5d :Aڶ)4ʬ$?^$0W'}\>qLtIwHD<_x%DH^.B%">D O'eV;\$,ko=u\vVg]=u"`} ١ԆU7CCd9%KyI߾N(aC&ʹ[#̘7RPJzuMj CÝYsߡȇCLۡrChy(z*-!CD1C34CI !ԇR8!ȇM1TqCA 9>RPmbhCi4P6Cit$ !dgB~u!b9!Ф}ߎsCox7v2PimZiA8ť_$1Jjd|f!̈踾)x ߿ڏDO( y&'9c՘GM5H @i>;IJ|>\Kv,CZ'Z2(thvAnCo+O޻lTdU䟧6(I7 ΰuNm2D=!:wt0hD!ctq9! 2ԺH E[Phx5bCB _4#kCε΂_W"~P=D!'y!`D"^IBvpӇle}?8_y$mh2T\VЇA;=p}xGA}Iw΀9o~q9L'L˅2G>:y⾻Ժ +ͪhY;~n>}[9>:IG.8Bs**r8<to=W.'!SkhCrBHE>ԊEDP,8ULi(a\"(.吕z%AT¢p9 (\Q<+ QP\[iD]p"DEs(SzE]զG;Ex}uW vRq!9"\F$ r( J]H`G TvQDMr I"QGmP )IaȈ GQI T.vDS*QU ]Nh I:IڥI%tɔ˖rD:DB F\(+ZY$f'9(Bf!!ک ek(S%6D((Q,*™A\pBQBIPQdREG%ETH.%3Ese&lu\ FEVK2.QË+4bBQ2(\#ḘYB9QJ)5i(.(DAQA2;*t*L(ʨ*ċ#.r%r(PPEG#r9p2#A* *QLEqÐrS)DQ̋҉( Јpr*j 2Q4MDQUTօ2"&T@sEU"()+.ÜeQUQDW .(S#&\r( vU#UUʪ"QTUEPUDsʊTD ".iI7(}889}Lbv_(8NF.#d{rr㏜GP8$"'ޞ~Ì}Ͻk/[h|UǒO>ttnLʻ(JPS)[sl,,5)8ǎzxG<|Kzu?[I$4 o'z ]P' PQLx\2.@˲ EY\(9> Nˌ ) .\)$*@(\aA e)0)6€N[ RBvjhMšL(.\5&$L6N0$p*p*)a@Se U8 .q&.2v`]Š.\-m88†Pʦ6S(.v ( )(eAE\( (( UE®@QG;ee0"l Lc2&r *r{}pi,i)tjӣ389g'+|!W;nv^]z(LyNOǟYv{22v|M!v>ٖ`oDtM@.1l).]vad :@@C idqE$p.ؐ$\. v6`'e 0(lemeMl N$`\lm;l1&P58ZfRDMd%m&eYK0)1 Y1&e,Y(2  ˶2( **숳8 {X>'&ƑU~&cf{ed2"XDk3 ʌS2##Y&bZZ•\|Jp2m\[a l92zYfI8e*Qc!*Ys|Mdr F(ةS2RmeJCZfhцeT3Յk%3$16$6lXrz]¿(okɶ: #z`I/0{<[%˯ɺ,#o)M_}^>o hr>s^_nS-^%h$Ώ xI2"uxB=kqCPO@_ $1ha@^o{~ޑ\d= Eusp)0;B=?=IS3{߿e,~'BG')2 35=G1nw 2."CH4ViR u澝N~>y礓d"V#>uw;}&QGJw|zrۭ%MIAq6o-/v"~[t]v{ +XP;zL%~;==OE^twW|;OR8`xz~uEM6-I~>q_R>?~ D睼pğt|8ԵzO0o{)-1{LJԜTTwGRtVHCHp~"WD_;!uJ>,QDPC,wJ(a SwwjMj c-PL g"M~r8Sǝݎ" 9:SZOTWI&~T* :E&yA( 84$I'$NLuȢ=[B~].xU<<+VT{:BԳ9p'&uoO@ͧ'7ڃBLj{ߤ^G ȦQt=IOzОQU0)r*ʮq_>!P{㱷@$~'LJ&~|twؠ;=T~#sdM?gsOǝI;z@Y 28?bo OHHHA6n]K!Y -P$ZQ'DvHE>'ޕObs򼏧( v;|M xv@Dw)q!)<,\E$!p 4~Hz X]O㰉I$P*ےdЩaϞ^Z5Zt>SU4@(J-ujFVZo 0.UEU†.1 ~b@]8&wM(2N ~ZL*'EI>?_z`8Rgdv$2(V2S~B((u o7`!r c{9EQ |ߛw`X]^}rL+Mo7wc?-|χlW_?w]+>u5,u6~ǥ{8aO.-{Mͳ}_jSW9B4>w,[ӱI4?>8~Gygzf??.a|X^r2tW <n?oz^Mv;oymo/y?|!ҟ77qƜpoRwܿc4dgڪYC'*lA:_10zhT RnQBey~dPnjI ƫ5< BB1 U(14jbVsxzE&B f9dKk]TFS!?<܅reJ ҅8iC2)%!L*,.93@B"59Cioh[-,ʲRBU-jZ\I[Ni]ybd2<\/I.H6`\/3u]ίsʳ6m%0N\ {L%g8eZU&``8`IdgTAV2k0)!ͥ߼b +M*<Ş YcA T'oLbeV&MF 0Iof^J3@o,;`YwUJ<,,$c[Whu;MHk f$ Ia[TjTȮu%I51$&A1,SC&oYn:d)SyV lO3{'c^I)8HsָqI>1]J3BpqOhMr$)]ڑ#kXyI|4|&:g|4#bVQjv&[tRe}}Y՝PN/Ui_Yq\e%;{2%zs]^}rk<:Pt@DyT 5x)+Vðԉs/P{kc4*.*E#N$Iҙf\0}Նü+_nF40 ?5 r$LYc<g\4q$N :AR)}c-l*?6 oU>ٰo&JGMoi”>[I>5gƿlkL h"[qJ q]P mqf"' |$aΆ?]r\ts)&i飕dUB*LIp z& RJj5IQzͬKN% G89@s)r9-Onk-/e |@_-1Uɐ@Byʓs! /Ee~yA p\wʇo65xdS S%0^0<9ڇP=7o:Gn:R&-Ж%t@H$ހ7SKsZ1w!%0;IqFGa  vr61\gby#yJf&d$=;\)b u,E0 IBٙ<{9-^csYD' "^ xWadE!ITԹU/}lc420=1S(ĺyĮqd&7k]-΢iCߎ.84sd3^>ljҞױD5d.o#t1ȁވF=eVYK@P:qr2v8;;9w7v/ڔKͻrrנ3[!V$|0Fo=E/BXڊ4^HA2H ٧$z2ݓ\Źwqsh GSԹ$-HXlkGD@ d%w!3VYl Da0$k (f~{pw!RK܅ (-`@46N E\ɖjN S*~hUuTjʗ8ԐN͐B#17,SsG!+@m) m܄5@hͷp!@R2ZWjjG{6a밒]; /M)200#mds m }\kOnk4?;7"o!?#?$B%XoIw.TPM\QKq6MUeVތNo?%>#bҗb\]>hnnDTS0Mbt$Y♌j 6KS\jt&8o2b8Z PPSh5E[u洊.V7PNx ɆYQBzRgaq}0a& '86NPȶ,#Z _2EO/cʊ#!$ x`EXO Qt>>:h~ҁ'ݑitѹxM{|?{rQޗ٠wdPuUEΥgLC!:`z)]`),zx)V\[J̘.̬J00g~~/_E*&QK0+tyz4&igo3}6j,;quo=F}蓢hPf p @@-OmfX|0 V%u.UkJ.4@p GW_ƅ 5` :~+ Z!Ŷg{;"spq2!U Øh{&PoN) t3Di%RLY²| $/_?k#4fɩIR8U|Ą^gɈY:[}tn}74ǕeƢc+w*NIAoUpb$D:o9{^B5ySi~Q5NYuosZ`Wb]ϓZqucy2vGIc! jXb&vAAhs,ݞb 勵תi>Պ`9+>-`>q`f]Ow(OAb>@Sqne`4p;ȕY=Mc#{ OQ=/y8P6(?:z@zQywp|3G!vPUw_H5kzH'ڲ#HpVІ%cwNg9͈2.m;ecP^VRkFLrY(tYDqZ6j&;67KAC6Εf% 5UokyiRB ^t٢`A';]MY, S X ZgKdXZɆYH0L+o[gqfҵHI9 dsE/ VkK '9gUH2z!(զoeR Fcp4,qGrE ni{su?9IVП#TX3&>~g]尭/-+$Ï_}X|;Q Ѱk9Rg\Ymcc~ 6+G r,ImQee' . .?r4@0^LxA߄YZOe-$)X5 Nk8n#n:{4KTCR^G8{ZOXF S։e rBCV20"?P+|$e_a55xm'@0b6:?Zns&]c@NtjCu~_Fge0Aj߁5 *(v9DCJ20ح&Q|L5J7q:(KţBs@ /j@~-zheZhk5wDdm<++9G FJ,ݭ^Fټn~Е^ ϼSluk5^)l9r ֔YZN5[P:9=4E5{[/k2,a<>UCIr 5 9&R 7[ )$&cegEaP~=7Fѳ0~Txǐ՘9gDCy!3(EEiNkhloPɟo8v Vg52ߵZ7 70k4im|GnC?&C0s%za ߄l(/15nS[js;UmseDksVJ%AkbT*ZG#di}ITL6_Ǘ6Q`(~QsS}̒{u6Q>)8S Om -s\٭ *3c@UZs ~q@=}g3zsM0`] !B *7ͼP߆SOWSҝV)*40{txL\h !\ަw/o 7/{ !wof+$wr壐:FNsnW56(Ɗ&F0z b;=Jjy9̠g&d05( 1aҊTww2nd˯G[19,M@g,W`&9/ZGJ]ZSs:V׺6jA9Q$K7~ҌgP,Fģi3஀ş͒19&@G#!IEY`²'62 ,Vj ~dUB2OFpy~;&Ҽ \糟Dmz}a╚Ka+ ρrr>aӴTo&/iE^dti<}Cr0ӎ>*gx OWq`%CP&;e+} tz#D(C&q| {-0%VI4¬%6UӎvB4螨GBLRvZNRbR5ٹ}ňl6{| 9@v;Cf)s&{H0 Qp\ْ?epɩ]g<{g_VdkeoaJ]{۴8k\ -ZW@u7}- =.@V Otܻp$t w{C}vL}cᣚǞ[slv-wqRH1MCh/)#b,   A+$+A-o!XB "a2xPafUC7(Bn$I'_DD~CM"PB})Kh'GxWW3kq}VoޝrJܼ\BcvW`!VG_;Mc* B Uו2:^~{fy?%}woega|z8Ҕ>`"[f;IuBB KkR!q|}3oT|TP TMQpwO@@!4M &h b0&Sjbi虩zj2D= dhڀf1jzh EO 5OT#fi<~bF@zC zP44z4= I OD!$5O44ihI&F 24S*RR&a4h4CM4204d#C&&&@ha0#@bɄdɠQ4M A=dѦF12&d)0h4)l&Lh mOML5=3BMGL؀CڦѤѠ"z`2byOH!)!&5MF @4 M4@4b412b#CiQP>-_z4:1mc_ l&Mrmbs,K9Y\M4ZVH2ii|lxH=# Rs;+k.Ci*)8<+bЍ[DX^wi!)Tt'aYt[[ vͺ:lx{gkE%oiLJe-H3)5kIF 7K=ZG'c(m6m5{Qڱ-[isr-"ʡY1y|6 .9:}^ݛ.ِghٝ Š+E HFڛ:RQFJbL'~P[GD;ۅ_&7ΐk\h 6\xRM>9NJ\.ɺNJY`smESy[R.bx[FuRo~3]5ێH:S)xPH+mQ((1M$_7HE"zIMCCV-=0jy',5z&|En)J@WM+1c^${kÊ߂uZXMM EUc_V8ow{~g0|?Ka!gccG3`wcwJQq]s2_ź=|4|ݎğ<Α__KZrz}Oz>8n㫳N95zK˗mp8u#mJ~ t^&)Ӝo2Z{K;3[⦳0w ًx~ysVеúSsrt'&W%A|8)k,Q&/Kևb>Uwz{|?|sTZgyVmVf{,Ns+roGYOWEIߌl\4^c徊y^Oq˟*uަnʊW"ʬfY $wL 8siWJ3'P_>~L+>g f&@"0@D Q.L$SfOO+~N 0ez[7Z8VVEߢ0n#ѿ~DOҲ&zD9TIU?)t FRB kw0 EJG]܉Q@@34)XC\Z@COLk(Ā9Vwi^\X*azѼ?%P!/a=&疿}s_cޣnj}?,G|TgK`Xn_8#2ݑ團c=Б 0^Wݹ??-gZ\{$f~2r}} IA%y1j@42hK &ad73p/dLd&sb4F(<27qO8vwyx8x 7.=0< ~k?y,2{K%;΀*]cW32)C` (AH qUI7 U)P"A%Zf#s#p0)0 "Nlc'MYA ,pC]tsl6w$%E["q5n'%OtwzλMImKgl1PFĄ x֫9ּ? '̠Q\4tE$ǎ6u_o D=is^ HNRo=.kylv<:xuAF(L(%#R2HMKLI=D4 ts Q"UG _G;~dqvj,†©+*t3v^z`Z=,V5tZCȮldȪhgerɵ}I A!RбfhPNJTN5)5n:κ/z2)J"!dIbCS7rJX h2I%h|=蜠VrwG+>CUU6bT1-tV^'doK6QүB|Y3]@d__^}= ^9.KĮf`!5ڙHWyDZPJq$Ʋ$21**M"Vk2/GRW p6&g7 JRleGg<鰎9-3hYNʒau Ý& Ye;ޙ<3ZWI, œ'VPhI]m8p&k4'%Ni؊a/))9 Cn.% (j3<+&tj1$]SL&vDA-IIBgWKe)Y逑 tRlY \:ʘKӨs2ԌV'HVicv|,yJϦr5XI=a:.Q=9.l !FTr(&uI)!)1UkPMDQ"`-h]RȬÉS9{iqO4G*otUvj(w[JæhN86 !łL I0DmMzsjGS#r,ص38=9L+IY[zh?O A/C?5wY7c4tmE@i[2d:~#q4/IKEN*sloYPc]9Cf#茀kJ̃o(KMJai`9˳U-LĨ؟wd<) b3]'K4%Is2RD駚TKd|;5ֶٛ."K$r/k1B2ӗ?UrG@ObxU(SjvrbIyjj·{4S= )!IIBuᚱB  {٣W'tkxd]M8\|Uv*?#%B9̭KSb]lܘ06sח*ݕ&8"鎲f{ PҖ{Dam%ӏ\L$E~w0o/dT/.T}}4} !y;\}@{5|_R:ϔ7; 9FMin/0԰=͵SN9*J-.Ӛ(#DRŦuڥ}KGI9ǜ^Hd0-({AJ!\YMk0sw|.eb~I۬0v"b=t(̆oHÜJ }7=&q('|r:cut%f}>"iB\T7;plFUdˈ-:̡7 ]b,0 xIDo4c[Mx>T#[br tֽ)N]ZM7e4r: V`$m˜p`J$ˮOaI+䐢 Cj&DSZ'e)iZRD&x̔9? _ NI @DUGM^Uvw60I&ͅMIkaPX^n| Zff1_RUg#zz_k&2:%?) f*ijRؼ䋧27eTE,2"e¤$s2BNr {]?,+ 8 4]$}LBᠽt Q%ecb)> .(YhEݜ%͛;&"u[mu{t8sat3kFԛ=wL{*Rg$xJԂPw1TmշbkZg1ănzm i I,=q;QGӤ!eJt,`"mz1kPA`l}-`Ĵ 0pL|>.>5I<6g)<[:kZ&`@ !H4-4@4?Z[|bYwy*3$*+"3AN6)Azyq%˙pH 0 Dq co8$dt(/QbA A񀐒;F|5(Abh8c%#,32vI41O֮Oˢ&1ˆ(G7Z휁t) CX!kEimo "! JS،s,,&%:А)4jF=b )т-Hj`SJ!H<~vG!'8*$" , QaAPD #۠MI$wޜHC}ެI$' ;D桶z}b,q}¡#"}? @f'D' ?/h>H= 8|oC O? *?@!\]bdyCWO*ʏ) Oͤogo2oI+!O'+iQ=$o{;l'em4"BC  X. Q,UUIS#P`}NWgA_|Ryy4޶K QBfEAݎ̆{*0KܒM `;hmVC*f%DIl(KYT`W  +x/C!:;W;/P=@lz Ri\:uj]!4fxBUؘ +}f  םp x[=iId" ~}unп=HZb:HaHDT'(z<ٞqmظk{[*ܔCzTR<-~1|U$W=/s2SO9u2ΕEY{d^c;HCd I };T\>}; O2Km&/Pwc޿'N? kG;x;: !"<ʓV}OH9Kx]ҲHy.7>ntc|(ogKs}@gDP~|GP5[ ś,Yu ɑÖqtZXIm6> 6{M'su0۝JF5J$yQ&U'* :ժRf5-mސk|z,싰{oEp4=!@'w?x[^07bXll/f$Ds*=e( 4t Ġ@,I]mSe $%Z$$ V8 & w 3Qel$.ee;9N@pH]&H<)$K ̡˸VRW}z? zެ yݺu\}Ϝ?[[7@1s6ޙ[lCl13B0,"$P2HXFF}Adx̎v I>$4rh9q2ųR2:xHAH :섣ld!-[$&@* vU3VLIVBP$-l$f/xL\_ɧ+&Gh1'D4/pW* n  *y)ȉ(@DԧjM`kl(V6*!4DCLIHP,JBQTAjRw$GtWtuҬ䆁*- )vi*8gqGbҔ&4 ֗E"ѣZDtTPJh)@)te +[m%#HҚ]P+4ZWIKDISHд6@ҺR I(W")lK;XeYQ/YuF^7geDq:;::j΀bK-I0t G$)Jӥ wNrw~Hh4ꒀN)9ӍbB# <T |֢ ]*P`;/yD"wG|$iCd>zЍi :i <Ehi)˽yw$xî6@@rtRKB-!NJJ@…#KSH4ZEzM/ JRBrM iWr.NJh(B)Th%"4.\ !H% SBrA)HTJSH -iҁN@i "P0JSpi(R((zxwvG"b̹)C@hS ) МSH E;R B- (PS4!W@WHrMP =y*OSPu@:y E/$9( #J@!KC44R vtR(!*F!UTP(MҦ644 !JQ 7V@:@) DB=( @ Z)(JE)A(JDA@ЃBhEP@@4@ TC&9 !@z C. J籓J-!I{0hyi] O4&z M)BӠдHʔ;s w%dFt#Bp)tE/%J"5(@oj hPxC P#=2(+8QT#N) CN)CMHVT4 qJMiW 1kM*hХ#HiIuc@FP;N}:CT%% \ڷ3Z_(l*u iVW)iy(:@+t"Х]"iZG4C+J ZR %t_m(si: &DD)ZP4(DOUTNHi@(A y Vr^BIJ%' ʋԁԅ( {0aT] R@4"Q 4 B =r F BJHj (\(t-)AtP)9 Bz$B(%:  "(:D U:QC !G"Q^J %R:V@.JCJ@"> Qtiht =g㩮Dk"XwrHaÄ: 1$Pя&UBw}g}4PICfӜęw+H_fY;dQ¡\${aT fe7 GnlXzY6ftg6k`.QTXQ3=!t32i=Oai8|`13v9 8z.JY݁Rʼnڡ5Br .`W("L93Ps٭,I"%2Mu#0sִQr=z.Bilj>8/Bh)0EQHQEôF0@aEL.]qAT+XczBSVNR̔6:t-*h}2r:Z "4o^׌U1;n눍 zp>8.%==A^g%`>vjRu)M4)Я'z":,̸YUWqu^˲:."y:9Pmь٣Nml#ZP $EХU>1LA)z#Ey=2}n$H:]ҧ UYo <^&هAcrK h&4LSMTE<R*Bbs)iIM 'weVtu֊:mq]A#m=W}DXx1]CGʁILNNZij"%=@ =ZJel!R`pkwPiSRHR1%T^_ILL>/{(M#(â=#trCʉIê|+) h#K;_u:@O̫R{,iGki&ox&+R u%zQ(d趏&B7pZFafM<)[5l&o1^ˉn &}SM۶4Gilx}jź&N 4:K7cqnٶ~ÖO X{1UC%]'+Q2oo:߱i߯5HQ[ O&Ɲi޸0&$)7*..˰,z9m)!ZA-i j9N;8}Zڞ`O0\%p yp_WmaA.NqoCdOOz.禗a+"X4b'JݑwFhJC͑4WYY 2u',)HaFKPl- (Ƌxo?0nO?a1%#}n·zO8ww>&c#ԁQ; /K |vOlӠ^Zy\$P8TEs.?|箬Û:EB_';|\wt{]>QꟼZԇ`}vQ:sh?Yr79R> "N$pRf zwd;qwY>^. vgq_Y&=}WȜ6Τ>}aiHoxI)f LYjF ̶ug<<_$on4VLiI!pKC14X Ubn)fd?É7wXT|=+3_0ُXjyxp*|h>5qY }`4/$Z|/ɚykxpD`IV/hnIw;-`y0Y񿇾.~N>D7_7tPiŕ=|_lO}7cpjC-ژq 1{! !(O+O) X[v08" =w U2ur_KFQteQ81&ѱvB2ؘNb>iOoԜ~>;{;dߐEPM{[YMK{K5?YJ6wRRkiBp[*ɪ^"d J'ٷ/sNՋOCjRzB=ț( `2yv^.>D~qhLJʟcPvA2m"w l*\;eHTn"` oOqhzH$@dVĆ+PTۺLkScoC %`4̑|2h6,GǠ5+vH+1.Ⱥ3!JnuDa%7qwׇoMB_` zb  S@YW~)*i,Md $!$g**ݥ*4Զ WI%`hRlS_"\'Ъpv*Z 蘽ѵW-Grt[קB*nTQؖhb9;9OUwu.nٮ^ θ=!/hz)()Iq 3=oRNʌ`{`.1=U>78/ ',=0#[c/-"?/ Kz&yt"Zh[`I o8}ޕQ>wXzdڍMIy!v.R+0wx9޿Zg9x1 Xײ=ԥ.§Rd"5μIvt%븺ؿ Cnf1/K"OUtyp4r @-'0yAx>ZZ}F>'¼/ij ѷIW1tA0}nښ a,fޘ:,!W_\1[Ձ#kmvwZ &w:tǯ 2Q/4vۿt'oߌ!?xKm>>m'%9{._ְ<vl-ddZQoqg?[Vn7ls]@g\YL 7>y X tHn4Bq]\?u|ݟ3SJmŵv  :7̎m__so?&p{]O׭ wd9^Nכs'>_S~N]/PVIqr _?W_ߵU7]X@>@>]pt4әoD`< R`B((h ITDPP OO@@!(Q@ƔtM3JDSh  CA0A11a z56=Mi4M&=Mj4iTA")F4H46ڃ 6zjdhh4ѠRH$F`4a@Lh L %?RIMR$##BdbiS`=4R<#OLRm#h=1OS=D ҙ41LzM6yOSIMjdz&`0&рFF& &b`& xe-Y_Ɏ(Wdۭ̆MƜNI-2v%Psy#uIrw5na?,sOДrikeʾ xx6n73ۢ:\]ѵvemSFLI+&Z^,Pg0rQ=4ѭc?do{z\^ᣧsnk.g-OnbG.K2g~gs/'uwۑ1"EEL$n:2=R1)8a"\"rC %g:Sw'3}8.-.'5:Z5U>ÐدMuވ@x /? *Ao{3)ǶpOJyhr*XFrKdS#]1RLf;ԓx;pEfsp%p)^ V3Oi6MHB 5zhA$b9*>;ʠlz&c?'x$+=צ=WQN}>9f_s"v߾_s+KtUzf^sʞ&nZpoѿȒ9D`3^c)B}?Ws}oT׆EGazKS"H{$FIp! p$g }ֹ#0`8^(=ﹷ}I_R>y @HlF\q]f@@D2Q{ " t!x@N"b"Ү8NY-P[Jsdۮ;#ABBu,AL8plӿO # &(H(xaz1E AIAh)i(Pf>g]n&8+[TGu:{F4+X׏:^rk( E~ )ER 8!"D|0ɚ5TJl,!$d[\#AF1q]Zjok4 G3(DCe!OJK. @L̠L)n+ |I xAlptq3#wMi QsެɌTJٔռ,$uf\(@PbfBR$A5(II$4x'-nbAjbt/Hm7b3FӚv:uz0yÆ{2.ŧ7sz'3xҁNehT:Cil^A;c13U4aMɚ9.7ibR*0L3T&Cdƒ.g;!pxbHk#2pʬkSFuSQ *%YI~6Gx|zx0~8ux!8wxOydV_$`F/W59Y(!xxE,IPUkk+UEHx;33=Vo!}hfG rݬOgtDj$1tLh*$T2z ̄)u Ls^1MJ1u-b(^f )1y:'ñKCn"5U;oW^X]n/wk^9<9H/%/ 8X78*N׳7@\x6TjǃXߧ0'=qߟ1ߏ ۉ}W>9CG4IT٧**$8@bռ$hSZCI1kB\ōQD&g0JS?)e@⋩ 4mUɻTINs3D``2WU<[xRJa6M9ܖNI:0ad)56V^Ÿf4ix$9ؒ-LOb $hKqd,40ʄ_[W:h!Yjca1N u611- rzXڟ)NL%98&`I!tRZЇDpJ AXs^ncCrFQ`EhBzUksl1=4*+jؕǥ֋mSV@&U,'eʔf]mQ)J^p`])ٌY^8T`(Dj,RgL VB8O) MƍZӢ"8U63%i|b`Ӱ%5.PI2X RV6R'4':u1YQ->63aYu$,ej}gIN֋S~Iԕ68)`\xi[O,8:e5vV5Bmon7j0ܠI\+PX6 V`^:"QhA:T`+ҟQwO/ק&j:OE0 |jhXbO$"fԚf#%k-hfS%:OW.ﺧ4>8Tv8c'RSZMgM. :gr7ʓZJ栤 +ZpuQYt0ci\v"jcU`#A7 GLLXiZ2-Es `lrP(0'?R5KkW&NVjBzbڭ/֋ץMo҅SǠa2"uK۵Ho>L*Rg{)F\ B_d:[7'D߳K55[ͭ˙c4T"gbH#fN?UjMJlSG/~u&DA+l+ ԿCJ$Rt/ޗOdenْ$?#?K?i,0|;gNXd~WQ SEYz|#+j P|'gxI0HN^$h|ŴX ?/<~@J=1_{~ u~hͰdSՠ:4 ƠgFw-[oG޲YYm=$@m0TfIŪ,~gpYfy)Œh_?}q)܅l4jfl [~bwt=,q[_Wlj;-X.vbBy|1S^/$pb^!YSeyq\CٱZ4Űs\senWcf͘7S|З1<#nGXPۋjpW΢F.-L hiqteFb^mͷY!-@wZ]s9{.3_PhPkXUInUȽ6тoG3zh1rɿ1c[XlJ&eVL1yvoNnk}#)RɄ œԻV}񳚓:x9я+q:VCǎ*jn{O;jrzYr*,6Lpԗ  m' p]'6vE]>Ey| 8[]l_-a䞌{lNQÛI"`¯a@ +޳P"ʾޔعOe`ѣ;d ŇF{vf;6fjf<3<:vh58UVr7竩ʶuXPnXs-iu>1ϟv]FͼOBqy뫟yv'#g:KeD$U\Bqrէ.M]ѳvS~73FǓDJ{|X'PzST*h:\|B^c mMJ,i6w/TP=eAC z8G_vo6"+lO6N %u2r@/9Iâ u>٩ 3ٹs}bՂKhn@zlGpX!z100^ Id@A ܙΉyZA~*(```F6SXX (R! (7P]Jcll YZpWÞ]q8c! ADTaDU}"l}:TD9wxeJ~Wi qzpcZ}^M^\~OBT}}{SRI/gCw,^9Z&j.9#<{X>0/{[⅖#fq`oܫχ6~<&Ѯշ1$bZh igd xly5q [ݿUJT`SrZSY+o- !Bo^klr9@y8sC|kޔe+ԭen; 0?N06֯i(QJB$uZk%Rڝ P5 Cz{2~D8ӡ|e4jg6 )! B7mȢ~i P"M *ƟUR:mҒLT3!p!E@I`?hxA9eYAvvqJG^=9i".'x1zy̤EI x)qPx Eh d,ȋ j m ݪ;g8݋ ~y6Fbzd d]1oZI;OjX6s>2@d܈EI$ . FܙKU dD@đ#þ oSi},M,tealk(unh7ϋ4I2NpUP_%sO*;!j-XnFxrNljVZ٨N(h,)(3#cHq-݄UY U+ztUkWWJ1 HOxqy80^[Z(C ĤFɱX9! flaU)LFA9K3O L^M]nx( .f<QrpRE D. .)dWvqxV'VQRrR"o 2Ґup!0͒0(LB52Nd48(THиNkK!8uZ$ࡊbۺj]xgqc9m<׵Y=p9w֥A`ՍE6hL)ʾÚuD.\K[/g5E}PI@q{6gNa 3ĺ!,~pd qJn9? p[wl (Nt$r%(.!S8*4Gc/r|O0{ə6GwHmuY8[l *ao) E/1sEB"|=RY{Vjwu,IEi)B) '=e.o/}FFF#Gh[D ^>j)@X0n4ʲ!ѐvbl6"^BpN:(L(1R//M IPأX"f ǃE iE JsUpQapBD}6 ʼ%u( iz0k ily "x[ =X %hwCc%s.nǒ\`$d-)YmIJdw͉RPΨ C:\DH!MvG9&*hvErCl!6J hJ phiP()HJ6B !N& @&A4#K9)FI@;;(+KJD9)CeI@IHHd)d;d!!Rdl˄&#Jl CN@"2:meqWgĎF;4dl[Jvudw'tuYEՔuTuqpuW@th䫼+v 2U\VP_ h*ȪF"YBUCD4 Hrk!+BC @;( J@!*@4P HR+BdPQH% F@#B B4҅ 4!$@xGnH!v!B @d! %(4J(!@%+J31A$.NlVE+9 ЮJ Pd.Hf)@HP@*(@PP% %"R*ҙ(d P P4P-AB( -" J@ @!COxP(@))Z:@(ZA@Er iU)F R)Pz,[1yu1%݇zϻ4dnIe)ZT() P 60 r2(6 ,,J#<7wUIyw:י|ў< pPGbWŧ<;)%$Ş7N)0T,4isvsuEf뫮8ȍGnvhk4E7|DB 3$bXsȓpJ, /():ɪ. ӃJ("&]&I]@R0I( ZRVT( 6M%qu<"wtys~_|OOH3Wj7 =pFằWsj3&XO #~`ATy4vƈD9H/:,WV6 ;Y*xmKЧjDHaN > .)#EAGxPT9 ;lqWI qniXj-{mNL)U 49I9"Sj1s\nwqxV1fQʃ 6sɇy(FO6Sʶ.n'`y&vI99:6(+h cm J((Z(*%^zvo~8s6lev6Ę[U;$N1%'qDA Ru+7p3766])Cg!)lp(2()JJRps K3#*;(;/7]x&AY'sNy1650(8n:T#˼r ДBS諫.TtKz8<:iLw] Pd+wo!jkP׍qyBLk[2kV#j$쳫 meltq a$( [fڂ 0On & ߷<Ff&J +d2&2rZ\*'r+%[OmLK!vMr򠢬0S -*[jśT1ɠB7rzC;uY yІ@ltJPxVld.9sz;sוr({vГDhAXH8BKiqrA, )2 ) <QJGQBQMICHW:*8UDR(ʂyJv<{w6=yvP!\TڎnFBbPv(@/% z}'~2'=N?C滽,5Dʼn"~sig>aQЅyt,^*}n9?]<(,F>g3Lxǯ}v)/{Umsa 3J2NM kOMj>DfX|p½A_2#V>,4`<W#H#Q̹㚔?]?c!c{p𰔗o9FϪt}QOd^{_:^W[v_dhmo?:;NKƨWWn"vTp;>3 MG˃:T6Z拯x v:]4:63-y+r2Xջ֣By8vbًhv'a#ǭQV&KӰrUki3g>)P"y_֣ETK<}閞հÛ`/.C#|];Nt">B_"Yme\W Ș` I!"I|0 ?3$61R/ ( 0uWYKc` 7:v:kv^-mBQ^G@;(FY@9;qg-Ί7x)N|)Bא1g cEP*{AXRirbSA7MO9qDY zK8ny C_ŧZ?V}Jj mK[zQER WִҖΖCBª$#Pk2+VmmulҴ;X}HBmRC@ ^׹9q6 :ӷxv- N\En'q=]ϧHw5a*ʥA9|D}xI%jY)@ P_BP0zXMA"{N 27;EE@`` JPwYiL1u%dm"d E{d+$v!:3`v1gvCR%=^֩nVVbmWwVo{o./ T'/o&R4)dP<0lŷՆIq˦Çx:nd0w;s$'^W~4: vC  wK7O`^rO6;޷hq[wyc<-8s3*U?C< aw筇'!SΣƎRv,~- 1iswGũ_C_O:gmy$GF* /À8+:__Ŵ)b)J77k%ĝyN&uLNa~ ׯ^Oܹ|:Obv/e5E@ S4¢uvӪER*@35(Ƞh )i=&i&SdfM44lSi6#MOSdM&A2dlDڀ PxL2h2 Fl bf6CLj4hɐi@d)5Qi=FČGiOPɣ# 5 4## ` C `L&Jj42i@2d0h 4ѡhAh 1M0`iC&FC@2h0FFCJBOI'=Mhdh 4 M M @4 AAU6>evuGrR(*;Y8T[-^])L H((" H 4H1 PsA?.9ګ1A}p4t~"n1BFF#8+HP!d Dy)66rXalb B0u0xI :JI[L.#Kôכ`1@jUl:' @hU$ UVbq w."Yi6J7p8]k. w L9/GҚ?o}z0.8hvG}H,3_k+̗}4\wI|S1tZEp _#HZzAvvl_V!1WSD J] +V n .97uH"Izf\ fGėR5"FI@P02D`v3ofoDFk%b7E3 fppv[2_,pv0J._tz{n,6$A/(+҆`ߞ6UgX̓r8gYXqsfс$,lX|`y+@$P9$}JtQ 5_2sNSx6',|Ӓ¹M" n -cVzJ7(i BǿbIz],ؖ-q.q͐i"I ' dQl s#̢@ЛT7`>WˆQ?l+4wJ Mf Z$ YQE9{%p1zJɻ-Y"mٝ!G9qT6N.5Ogf'.9tr2v?kRv6WQyu7[y mUIՃKQpk&8=M 5u[" EevtÀD%kt+x!Zh oچE{MPJ Z~/X fRzXDiL2!!OXƘ+4Ma!B C0A`ͶW;F)5vuO㧛IYtoZ zA验9*$Ib&(3!hؒpG \+BHK"MUV,aY:l-G,ȹrdR'8B,m"`d ]>E0Ĥ ]ͨs,?߬^1 $v~5`r; q`f|fK )xC&bs4COͼd$M) ;ǞXje'd>[c;Y}z.cx^6=&&<:$-$d8>MoнӻE• 9Fu-N;h`~oTkk_(K-ڎt(uouLϦ; 蚰s PN\WIXpL,P >9maǗ>m5OMM0O6Kg>ՀX% >MY WU(/_n5yy[716vaJ63 hʲk6ְF =So[l/P_*90[6%9aMi:K?4q%j)mk!u19;n)qAnëV1PMceH̳+ӵD$ qz<C\'X %%)39'd+= ޝ$; ]%vxiWeM[N7-ɳgxЄ #5U/^^+xҝٷ^h/,%ɻR)ERɩWu[*QnWhB[$^gë  c)>}T`O;| ,$!Ll,:9WE76u[g'N93+WsUF0?qE` H_& &NMզ#wA*5ǃ%El,iՈ\@9Ns28so\(*SLf%+ =U-T(H6T@@dU  ?|Nõe3^H 5/jT7'ԊDLEqD/qKcn{7Zn]uZ620r3їh8IF`@Xw-c1+n%%\pZ{}dT3(kdrJ.Y'a qI}$+$ =gNoKEVb4{gzkJ {w*'vAYGKDz:i "bLGA@V_WMݼ搡7E5l` @w+p-R< q_ ~zMH />v,fOꞟA,RMSݪTi x )|@O}}J´Z8֬r;q\ \fԈ ީ-4 K.lmI<웏ؒyO#ԗȅ@LK:IF  (Jhhڒ.2D>+NPT$RGЫV!ص,L HXAB "*"B2 TREgQaE* $`{D$@x6l ImV VA*X5i l@A lX ZB Ȳ ԕ(# R)V,eKZ+ YA ł-!h PXVXJB,X)$P T!R $",(ED'i%H,") I$ITV*dbW!^-)bլ+4@U$ta3?rܾL| mw|58ʲ6r8"L@@ E tmEriȸyz)Tlypq'`F^̊4=ms0 t=&glKXW~ct]Z.PEMM4Ѭ3 .)P@" )|6 ӲAww-a`S7oܰQ"uRGt袙cR7-RH DHHE#h/E&@,$ "*%qBVI$2V5Df!u))RDQ$IBVXi(@tµ+(HQX*AdXq̕ B( Pd$dmVH EDDDda$TI$̙A0$8RBT$'A@QQ-:"3X!q !D$"JD$\@*P$T@ka;d97 T04)@TD."dPP01K"Rw WE ⨤(H("4UjUK" ܼT$DH, !P\!\@I vH; V-i*[Ts2V%8VTMT3Uih]%4+]QE&-TqLܼcS6FSYX۩UbWMDkj6iU8%N8Q)5x,WP !5xťy쭱XjnP,)[[Jjmurue(mUh֥4 aCYXW7ZfbzZd+\YCZY8Ǎ ʡ[#4NeA 5-ڢ%-\맡Fu3]v #YbX8S V)v3KcBS.K&XW],U[-J +Qvj,)is]kiu5:%R"M'9a0ֈkU[mzNG[y8mG[r/-]+9[UE핈318#kze/Tb]Kbs:^Zueu*,TcK]jTɗ!ĸD͌\Ro *iZ֨˒Q:uu6A^uG73z*dYuSukmzyfJR-bGsTʪ6ƆJ&8ԩFխKRb/Z*&[UVycJ_٘cª[h-ޙUlS5 =N.@j@N11 91_(yZ>G ue*[r;Y|^,VSSC뗁e_bkO dhf,2Q !ۺn!f+m9}VeZ:o:QC14sS& uYmfjQEpЀq3d`3 'P;iw2zW9> +Y!a!(dQm1dž%Xb'jfԣus&= / " rsr411>5[Tst3Kl̲Li\ҤE1C3Dm]'ry(3C8 #ƭ!N`O@OMDzw3R昽2oSS-P{Н$!R8!>Z|]4KS;N&ۻtm*f"S;п#❪B>یn)kW{$DEsPbZ!#3XjD˚`ëV0i,A 2e/&ZqR0CThF=@玡"T?1 ,5Ь:vU^ L f3R fiڐLcV$Gs>4w2IES NM~Zj#䧹!aߴ8b qBt gWD4 Z~^$w;]aYu|t>*fQ1خ{4ҜQ$y.Vc 't 2s5قm`;n6:0D֢yA!'џMe9Ga.lp e{hs0c]S֪΂R ؾ꣗MK94bVˈ#8 !R@ ̢$t-^ G>$`C830\f8塳kHJ@ňM\V=6a(6>fh`ZEjT6km`asD<2Rg_Y ٩PKLd%䅁ET[Țҧ"vq$hʰ.p  tidyr/data/cms_patient_care.rda0000644000176200001440000000305114360256575016322 0ustar liggesusersBZh91AY&SYhv1 ?[pQH AH AH%y|>@`ف;ESib444hhɣ20@hd M4`!` ɀ0C@#@@I5 @Lj44@ T!4ɴG3SGѣ@d&2 j44GϿY2d|uIAc*o02 5(P5Bb(ClS L@OAPDB\tA!ŘI=Gf(@ S:$Gxڴ*QEI{[pB;8{Z;(܍p#h# X##(Q2#xQ܎w>w%UXJeFhUXRЪF#DuGOoyQ@/b{-Yc翾qK| bAQEERJPQJZR%K# )d VDYqG]tx>"<$s᣶4GG5(.>:5#%9hH%0#D{##(QF8aK܍FDh#`(>0FQ壽F#R96lGAqHFGmvѩSZ;rѱH磉o#D}e$d98ќ–QlFjhQmFtF܍ŽDtIMdq7?R8Sy#KH##لwHF$awQG 8 MpHG-0G9Ž14db8QJi6DnF4|tQoʽ/,@HO+Q]dlW+ >ϝpV},;$b*&EV4`;tN C *! ൂ *AbK imFX fP[#Gߡs jGb*/@71 TW~o+W+wrN k{;啗1* 0,@B"H*XkYiUb@"Hp|ilڐUs}hh/tXB h ,h,` "N,i+x;J( +2}%U++%ti|okn!6mDTx@TDĢ 3KM#nգG`3nٍ^VHQ+ChHQ5 \]eu_"(H4;tidyr/data/who.rda0000644000176200001440000027671214360256575013637 0ustar liggesusersBZh91AY&SY"5%nV(n&TH諂#FT\ԒӜyX>F|D? ȟ)J%4s+]#Tm Pe2m"*p^1#b MsOӡ,()t*A0hkBTWrF8(e'IN-8QsꍅO_ldZȜdCt(FTtN&RI DXTE)ʘ5j%4Q- )6*qɏ(8*,DXԔAᤈi8o)V&RS5| H]iIA }ԧ݁RW1%2eQ̃),CWtF=ZɧD'Hㄝ7aIW aQZn 5HAhk4b4k Y@bi58 'G$#9Ώ|#%1# )1P͚ȾIG"90|AB6`:R_63y xJŤ&nQ*d1`~I[ZEzUMkV5Fф.f,+Lp<7Z[6F k4ɵr[1X=X p(B/(,J1! W"PXukfP2UH=a% [Rb <1F7! LJ R[ڔac! eiw: wVN&߲?FʼnVs:%c b@Eɤ.Ѵ)6Bmـ20n|9+tAWC ; S[NYe@#e2^3ldo`mEm[kNR'Ϫqt jYF6I1Z0H7Q TT92Lem}7_ٺC\7 ^&Zs~7{,g&ZY7yB:J&_3zܬ4leנ%79N|J ).m {3l+8XKM6$!O>-[9oϯfCNZ٧FϘGc WۀGkrˣuk#|zxߩ\O)7qH譱\Α4!쩽5Nn~8MІͼ]5ݻb\Zv u;˼U֏y[c)w<W9 V򫹚09IVzq ڽS-tbߙ.FYxG^Y.sI~MonJu1߮8u-W\ĞPgFbL!fYu).yaߒ8b]B}3&ݷi^1s$3zO-S.3zP;k7n=aͺ׻7m7|cfYZrvLv=V-y*s[_DC*eƪ^_4ys7?ۇ0r~ֶ}aˌkFm'˭/^t[F<3b)=Ә|;4Vgn䶻 Zr O ~: ob'xJxw~YmKN嗪<6W}yi(5DxbE>t/Fp(g7N]=2읦d \A;(m|űuC~}ξ)M~_o(l~[imut)9̒oXP%2be˲d躉6w)ݮɣ10J^H̿1D`Abch =Z#d&Inq#"UkdEmp{k:iݽb'wm$v#rZQRo}zTkUlmU5ke%Tn J-8y:3.ⴊ\;-ʮb`9d Ojdɒ !_j61VJ7Ax܃e޹}AUvkhWxԟFJu!.fjxZ y084=6b&2/aՈuN+r~6IJ7MT:.c-j_>mih}r71UXLKu1n1L+ܑTJnK]F%h-ˉ@RHTN 6LQ vl<iӊ}me'b3Z՝?rbϙwR jÇVDIؒ09ҫ+̚HƼQGΫyzխHO>E˱,>5JN+؈L'[P2~7ӫ~,w.է/5;a{ykn3 :;^n7f:P$!Ckh,g]zsEB-%Ϲ: u0of꼰ZX]Ұku)<ޭ{Bi>ItD&fjhuѐ8[ȗA׭ߵǵn ϾqQGwGlݔېV&Ŷ(.F[uMƄ3޶|IonAqB!ܧ{{ ʥ{/e\2b6b%"mJW{ li0,3%z^ { {ە(/1IN-[H'sZ*ZV4-Kw Z^NIZtB X2Aނ,̊0Ҝ)OxlWinX7IӖRo0p\*UZMZC򹛮Mf0Z]u`iG--!a&&'y3g1RͨMvVՙZ^z[^gI2ӹCȔ-pȱ9Q,'Ƴ^i{vl*kڱ hKLӖTpxGO_Y%~%^IzK}%$|$?$%~Ē{䗯=%]I,zz^%0HK_~Iq߁R/R[%8bc膘 ߊ_Rc=znﱱbԵ^v7Lz^TEn0b?lFA6ծ'TXd%d "j$⠤D'= {Cʶ_Lr>;WF6Yh]s٭7Ic[w$`%IWԓcȘOߑF9$:eעtiTj`HX}iU8<С뛟*=:W?}x]w@[ϫ5W8l^_@Fs;;"e}Wc|W"/q{~;}yow}?se_>^G[|͙_e.ī}R1_QLӲϰZnvq~C7Ηe7yr]{nodDukW]gz1v xf{k?ϻkkǹ4~שQ:/_^FfycNF?~?/tovw_c~/?[\sv}zvnf$Oz>wo\yDZKz~KlOǮ6Ə~.;k,?w:Y=7|=>ao9Fj} ={fWTx7m_%xCkfxҟ_?)(CK=||9[C{F;='cDGM3y'x[֙#F~XO#Coܶ ~/C;W_~?_~w~_5O>+..s_~'.x+ t:|U׵Oq,q@9'8+/ݛ"5W-!eC!HbI#u'GRr3mgib$LuD~kSP4+WwݐG'mWב;tIu~jɮEW$U}XexUwg\sv:祘yw;nE 9!2 oxfH H7R`u9ooJeԩ-ڎXj =be0Ry-i]k#:zĉ r܃qW% 5Q'J{Kyz^V{=>|'|!H<#ߖ$ 7zwC4 5Q `LTxk&GOTdeBrdsby(3 0%F/HI80{ywƃ];rhQ/& ]".Hwxhr+)nA'btda/~|Bwߒ# yBLArՆ hN\=*ULS R!(Dپ?K[_z29x}@|#UG0y4 ӲC"wlc'3~L-mj!r5wڠPqId0b&B#2vJ8=Q πPh5aIu&9%MD zh>ki;V) /),[:bCN쒓?|߉DEAc7%D$e Fi! }_5މ"Oy1:%͂W0̀$*暈;gz{xq4)rJW O!usp Jac`h$̱zOZ}v趍A\fYxvGsc!UPi$2B)#xNQMyA4[*6 ]Nm/ot~vBB^0& /v|DŽ/<^i-m! \<[\+~[|Δ]2~~༡_T#ňH6l['l7Bh5ɤj{#Z Hm"M5+.0^7pP=g{s87C8r'ѪZ*8Uge8#ʎ;ray_H<~aY0P@ꨂZ(X%vCfpYhG:"9 ,q Jup:'V7F&t^YM&[Q.S(1dd{Y*VMb ʨSa骯'<͋!#߭N |]jdIz9~UAp7xhj(e2|fs5mÆq%CEvzF,0G}AeiXp,eUIn]K6؅ͻ !b֑aZrSFqACk9Û/ h$ ?dLd MlF*%UQc r֑hAvF8 Uat!El ph K E>͂0H Vp hIDnճA )D&DɌ;Z]u!%(QmvZぬq VѩE4{jlz:yA0wzsx Z\*m`ieX˺&gkX&>*L<Cb2j GZD/5V{dA?R7 xƲGFK*H kxγH uCY nnJk+nW:NCsLSRrxC)q\l鼇O|d]RbGS 69Tu  S0E<]#*noQ{>H0xGvڎ tbpB7b hw}g*ҒY[fv\ 8I"Ӓ/8.77$23M'u͟,|2s,A@P7mBԘ4&<[pC:\n1NnFl0$,a0(; kr"xBe6-' _%kK1u8 g@[q g 47OBX Qma^.BnjV |iY}oFυC̲X3&{jNlC!K${,)GgƎ'h]l^rffɒϕ[lmMl SQַAO"ntɻ$;crwfU!20x(obpXi|CdWfpkN#٥bp/ 9Łt:|ǧ.X;^-tAD:2TXrԜp0 M"ΛaѲcYVX.Xɭ8bw bThW ͍h>[NPsZkߙ}7W>l$Sctگ'y&!T.7;VgN`;4/js,\@d w,b)2v<-)\NoLKX(-ZϤ^Esk5jbc &g+ }k%S`7,-7So1BguX5󼷩KuX'ohC?d-BF'nhL g͐U̴6Ǜ}a"dexz[rBiM5ٙ{@;E*]]:xD&|L^Xq R qkJ}Y/y6|ιe}WOӎ\},9]w5o9vM',^^/a:7 >PE>GJjO#EdM2 iOی3ؐvd~^|sB>Ħ^Zwt]4Mseo=cPXiJ'bB=P5=A{~ U$>zvE4esTza?ybaxߢ_qY=X Gnz`d>E)=Vy[c* "70KҸMzm,eg$w]ePK3g[0uμ(gi?=4`iۊe#$ [˛s%ֺB8oTǗ<-5y>氖(G#=Nze_8l`3E& lh׮ы;ydBD3mɚpu-!G(iZ;@(Ӛy&5|w۳<)+6_/E5cݶ5ޙSP0nT~42}(뿡~ZEцIsxdVZSy['ze_{[}S*wLvHuѾ^w s=o>7o>L!Dw9ޭ<$U}_~^0m ^2ͻyp:SNG;C(kD˧ݨѻdȕBbakؑ }W/F{ax< y[C>hz}\g:ܽ-{WOWBx6ؾez7qܳɽcˀY`8^|4&j׽7\azYM9m5k `VB|0Jf#GnB$7pGAWp}x$,ef!J}=k mw!i%s3 #5 ]k-G:~jq_KۯVbVy&~OHQdd25UH.YGl$gֱR#1XA3VǽG@UZD#{3@* X #3)N'nu\{7˗9e>G"|2  1wlt=<[B2{1}oHҟ\m[=?[ş+~KYg8OFyfNO??F|^B #k>r|;*uA_=նw~Jڏc7I=y$wNl?ͧӯcZ ԮԶE0l|<~;?Y>d2:ygڬ򇽏9~gޮS&:r_?3wnο%2nj䈇sr4~k} QJ6G ut>ccGۤ#~-ŶaB-غ͟ݠ;h #: ߂fP`叭>ذ7ߛ◞Ig eb*!* | {gNuV=٥h-ܐg V_u㑺4.$Oc k5>'|XKY = JtrJTFː]"C%;KJV>_!ڟC: >Y6Ssݾx c/)Uo}7bP$0?SrM]=$F/~z$O1*ݍ}ss}e EBf,sAf)W3ݥa?;Xvu#2D+4m݉%# Hp?5خ$- 6~ YtQ /1N9&"(W43NMEԘ6%{#s?g=u"CT{^J'dD0?! /=38,˂ͲaCWccqw:"@d!KD(dhő x; % {LhLyt~.txVt4GK1"R#2IR6THe(1?4U\3ꊟV8ɹ?F<[YR%1^k"k|d+>aEA63+|V|$EipD&z֋|(prWP)~:!jG2 \ YI[-3CJ&`gNZHrdk$h9{dC?6T:id.ET_ebkޝ$̳!#PffxN>Wuμ,l6Ic(!z {m[/M?hG-F 8뛘ޣh̸ M%)l3eBvUrwbh5<̓]<;䭘%7(!'.$ HO"+JxK%k8ma{hٺb7 O۞f"pǚY tբi$gA3"hq4,^N:B_<||(Zufdj;BtQ &i7"L2%@k% _~ bDO_?<^r/,!}g퐗jD8ޤUHѿbTxRӽA{Bdc̞z$ .N8W7tIW8p!,{ꖛ 4D )ELFӅ%ɿe,aMV[׸`,s# ph` v POŸUR Z+ b"3$R%#EaJFj P 0ow=obI(UaF1$XCa UaRbXCC XC JJOTed~zX=LO ~d}.Ҷh8sA~9t'ƃu~1^t7C p탪_lrnvuAٺA;`0o탣 ~=8A=;Pw3tJ;b:3tq=pi9{ x5{ =pi x L px ς كzA 9zPX;Ȇ ߻Px|b yAno9 u;`h8s7v_ 1 ۗ` 􃯒 {svN9t݃H9 ݃:z |vuw`AisBf u`(x M:: 6k݃z AA4惏(dclξ8pGG\om<;; 40qć&mLq { 5N~;z샙 tCpCރ߯D܃NH91 4߃ A都 iolH7!99Pކ5샇vθ3h1؃T8c Pf !AAp샓tqCH9nw|CH;` p =(v۠υx;qC ӞH1.w!ރ gv 3X<3pv!߃;0j?DAդwh1N8g3:t[s wA9; ݘ07 ݮ cmn惧vm,]5 w ^7!6:0gf :=7Ak hH0 Ǐ}F> 1pi .PAe9 ?O Pm9P m`Af Nt``mmwrվ}`僫t63.s`iې q1cD1AH5T:5AF99q A׬cH3:7,A!91L1 qh>(ub tCԆ  =\viA, y 59LT3 0itvjV 31 v W:v9 ,ctt (uX404=b 4:`b |q 1Vrh\(i62؃w`Ӷ0sroH564C0nP'R$ROk?s7I BI 9oK?tT$MdܾTO[`Є)/4Q8ǟ@/O $/RHH9|6x;x9UF{aug_[ofӋ$zy2J܌|! yᴬ_+' ?x(OHW(w5~3rXm֏Ά*eF)1VZ] Xژ_6=[_OHFN*5>-$ i}o!C}$}얡e&Afٙ63ڷ WO;֞J@@SX5Me>ffnZQ!>k< H"A;rȺhMmalń0~!q8af̣5򠵠IWOTӌ Bˬ{[j*K DD I{;3;~tt$!eYjwߐ3_bE2,:)63UHI!,ݹ$QiUTTE5^T:5|֕ ߜI^2N\TR)rU5Z!Hlt9 M0Sc%JdDJy 턭~*Grp`{4S,;e¨}O so^gvly )6ӫxSJOkR@gL!3`ꪨ>,'QE1p^RCZ48ޝ3cmX>Zhgysu\gYu]yk<>ɭy\Ti}ۗ xȿL% MDr .dDTEv#uc5H5R| x5Pc I/Լĥ?S 48HgC 8>c@l%G2PVͺ!1߄qhqI4pZ((v3?P'}þ&cCUbp1̉W4YVu?< ? ;(Esìr(Tf]ˉ>~XkMn4͓שd0Ӈz?. {D=o–10HqC‡b(xP!2CoCrd7qCzHr!9yPT7!?c93ka r!CBnCel!?Ӌd8 !PCD8`iznCZb A:P:CzCRP$:lA=D;pCq 9D4CD+>{PGo{Ah4_L')CLGDc<{!&jU뷓Y^OE ^=Rфߤ>Cx _5K !$ 80a ? ov8cB8w.mޡ^)R]2eǒ:N+gF;BӎI_J4׻H! Akeu+8pd2ΆQʂXwm38H22ЈH0-e[//"B@"Ow``b=)c!F oRsd όϟO1?0 43 y M ʔ$P @Y:^^u3DiihW^-@J+tk@ZsB~aO0dOM:c\cmmY,蜒^^%?s;|k s}g!(sY[[bp!©-!%^Q.žDaAH 4FT PyQNGxyݡic>t9D "H%6Bn$PrW'ZG. 98G\ЌXDaRΠC>*"zOs$ M>Ǟiwʎd~-|3'& ,XY(#i"qgj^"3(#iжM 1#Im(Bɤ`f2kw>MQBNk]O]KLN5:H1MRѰqAgjukX!yˡÛDٟ&XUB @XY4%-gl":SY7YP=i~>]/2P.Ѵ9[UqVRG1tGyuj-0$Y^ E|6x1jdޝAŒS+d,b94s[cAj9|{_cIJ1`dM<}#Ox5?7 h>7="t`cLlf"4驦d K}{=D]|=sy 5'7PuNXLc6 ɂ1©c-Czٍbjf Vd O{w/5f~C,1`q5-&SQ=䄈$r,C*4tmDzዹ˔#ItP!.nͽ@?{جE}dc-IE4 FV kO %GБ! hn81 DH:MeT}6RKʛ^wUi$apY0FW$ۿP@JAK*qQ4ma% l΍ApiSXT $A-&iB 2{w*6 ЬFUrzkJƚ+,7ZvA(%ͬs$, H"!TFX/ǞwiQ^:aw!r,ȡշ5xtTOxzw:w=ϨAa? ;];* '}o&9AosEC F;W)h>ǎޣC⸆y]McG`do fض!x8+)ͤb[`na#)72 ;.^Gq゙LO1FDwϨ TnMޜxBC˴"۱ M&9,W] ]qDI` Ywn[o!W* ˟#hp Ppm:T(I" Q`$rPPQl\ ApTM6s+ՕrT®l.LL(1^I0e *&AWoMfƩp j mG*P)U (XEL.\T r}*c.:.tP;";.Q8q˗{LJM*, eP\Qc\oI aE 9vQv2v$ *(\ ]FAL$2ȇ8$2 Pʻ(e4 !_]lH 4$IH\$NӔ1 iDr:JIBUrAwQ@RrPRF\0a*LĘ^E˜]PE cNSnLLy\\(( EP]8DEvW*k )&]2ErVI٬"vl1e4It.v\tBB*$/2хRh EbΑsD J(HL:wEE\LesHq&DIL pѧJFJt9d"(`UҤI " LEDB#9J˄EHU-B8oʡ)! Ehk5PDjDEHDKUHO%1΃I yмʢ2 0L ˜"IE{%D$QRDAFW5XeTgNjTAALB*Fp{G( QW eG"³8Re)dljpȅAIEr"Ф& j]J(\`O޺EL q(JPJL-k*.(TDTG+I,g*]Q1%2̠a4@*kO]W(u(.QBW1,LP\'4*BLBMSRR 40Q(AG""YTR( CDHhЈ3(!!Nidu g9T*9]RgL+8YDsQ,QaHZ`AE\+.FAJ#L#-NLɧH "*̢Ԋ5YdQI A"U9\0Πp"NY &™]D\ȤLR"¬"ZT "*J3,C#\Df2"N¢e!Uh-*UC",TTQYrUdPETY9QUURԺΒAqͩ$Er(DUIP]3rjXeFTO aW %auXiED*&AC2$ʨ)e\ -hEQp "'R+*eZtYELVEPfVEDQa3YQ"T$JArʪ,EAe  eFvaȐ#k.]3P9AE!e0BIԲ梥 ":DQ$XEaPLRBp(*(LJ"EE( hj\0V®x6QFtr,DԻ+DEpVQUFg*@je:UT'+j%r . ˜Lt"t2N,:eD M:dWiSGm(i JET#IӤ$h)LYPP3AIđD3"d*\ ! $UUh2쨎EQE$]"ȥ-$mK:r8\ rD Ф02iBS(E$'".\ rh&uXY,9JSI$9EIҫET˚).p%8D&Uq$PR(Dk Ēj"" i \( M8'LVjBr+ABW.bI吐]&'bɧ$\5,Jc)I2mI=j*A\+Zp>LUx.QBhӂgeĄ*]QQAd$\ R.$ʪM&Y:w:Eݓ5#[YHBfBJlYEY:p# Hav%tiu@(q9E%R &tL*BC&aN.@V,0IP\K;#8GI;5 HNHevD%( %q&q$%t☄(\:gi$ a ,3eʹI*4.;HP+L;eTP e0.!J"U9vv%LHI"le$U&QT!LH.· (MXMP\.\ TWiI' @#@4 $ӗHiL!qBEӍ9H (I`RV)$#]& \L2..Up"NPLX Di4 dQ `GeCHM@( ;H%v(dv3A:L$CI H(i eŠH);ABE\i 6Se.țNPiӅHTe xNB f.’)\+CYsBCTACC&\ -aCX4CNFLN MC(.(.WfdLCAvS)i!ˤ2ATI Ȅi We˴N'(Ji'fJ]56YH\S(. .He„T]3˂B!q+2bp\L)L.$( )aLJ&k.Rp}J]\Y1v$QLTU) 8!4“*Lbvi3X](l)v*N$d2'I\"IˀP]Ć]lи$ʹr ()vbM@! (Bad',$P'e6'Nlʡi1!*i$vUStU‚(e]J.Ɠ ʤL)E+:dN$‚;dT0D˅Qq &S.S.$v,rUP(*TapJ8 eUtS*RpNL#)qN ! "2BLc bt\0#( )(AM˱;TL)QpQQvӀv$ ˌ.1&8 . RE\( LR$m& G"`QpAq2N\E$Ҹ bq8]Hp(ll,@. L)U]8EP"KBKUFp ) f`S.(4m(v2(le]vU6D0)]v*Ap .\avmؐqL)$sS eˌ aL*\i nqFq8DșE¨UDA̬*eS @f̵4r֑Ym*% (* BrBES.]UAD)UPE0W.QQ\/ˢ }{!(kC?" c= iy`ăLƃH?8 9 އJӆyPCƢ Ca2P2r!!CpC-CP!!ІPP !?PC١Cd?fPD/_ܿ’yC9C> ?ʆ=7j12ʆ!0a C`CrC(aFh4QhCae CCCB42P?nCT4CCT> PKߛzA!;xky`7!  E0r!2r(aT22(ȌCȇxn|4<>"Czȇ*Cd8 P@n=hWp#ăq;?ق^Vo9i$2 .[Z V HmC!$7{7oV4⇻Cݡ9  !Ά2a Pr!PD>X|>u58;vA}C tNy32!;PCt!d:އ2qCD8!0ZHCz(|t5!!xԐ5Gȫ=KB@O @ )35YnCy!0`uU !>2) 񺠿O&t!҇!z{T<({x6Cqݡ>T=csC(r!CD9PC !0`!և*jCd6Qe !0C:PCܡCćCD2}T4CX04"}s.X1=}}DWwSo]}ק^}O鼨<7`ZF~UW;i1Z<2@f M A`8mBPzCT>  qCd5Cd2"Ce 0C! t>mQ'C1PjCP|b֡0C硪_JyP:=5CBhu!D0Ca v!!C!> PPa}B?}O[<~CT5CT<5Ca!ևb?{'-hR {=B=1C{6!!3=CD4QhCD>:(la-2h=R ?(!CRYx{!yHm CʇLA?C0a ܇C k1s!!cE=2: Qb0a !0a !7FP!CD5CT0w!0Ca " CCb=47 ! cHCa:=_ Еԇl1C!Ct>*X=C,>Ca (lPC4CCaC2Cd4C>CrCT?'%4v!PC!6 &BNǞZ.d`;yC !=H? {Dh14CN5 z !>n81C:3q9!l22C1Ԇ2u !CT:i'桂 /C4=T?$~='w!Ct<(w`wf u z:!0CCƆ1沆PC(e !BQeY (Ґ%AkR@!"0 '¤@jgC†pz*M bP!0*!rOʿU~'^ԼE{\}t'SJ'%5=9<ߵBbvݾؔT\l@IB&>C}܆e'a~'|zzfZv! !] 0rnCCC5C >2f"{?k<1lA C!ȇ#ƇBP!ȇ:PCz!0!!i'I ;޳?wYd\gvgm%sUX>Wc6A0%Mf4Ph>n$A|}_F.>uHAǡW+?׹{S=3Ƿ۴(FxIxu޳sc% ;5I.ikYp~ZiNšhԂQض("&F7UVR'@V>LFft!W17,5hse,`'FE ?nMdS,2 IbGVa|?32C!eyK98Pr -PP?YpC Ȗ@:bZo~)pEAjFW/tDs?>NhDxtn y:JV@ZB6'96pD!462m6p88؃?C(+dhW6G)(^C|/3 }_+λQ#u[ud_3߼>t\3v Apgxp|~=n\A ݃0ir b n`kC$4 xpEoCR- 8߂0pP݄: <`\!P&` q:фNx6קv 7cK0f0Cr9!p(C!?`:C 2:pC!d;s!{87Av:R\Ct?^y?C 1ȇD:xP}Eچ A?bs؇j<9=(}x1HnCoC:`AxC]38 v! !`Ї(tOƈC·D:3C ~bHpCd7nCwC:` A!7!CCzC{Ӻ! s!?C(zhJ!CdyQ =$56!Ce1}!Ά=[n;0Bˆ@CĆCȇC7A (b Ca P 2@ux$(I @Yr({;~r!wo2C~zkvR%_RBޭ]'?Ƞy&II00Chk%5A3sAE s1ޢ A額h|d4Cq He !$\b]}^JPNܰ|Z0nt8tkʗQqCb*pC(`aPv҇Cb9ކ|;u!v ١P9t?C(w68 *C:}$>yCDoC4;Co Cq ȇb!CƇJMb9l7haE>pC㡔>r΃'H^0c|5Іl(iwoOЃ\A9 PߴA5;pCX8d7ACH6 AA7AiƸ iqCD6!Q}I2Ex(c9Cb衧ԃcd>H~ 9P܇!ЇP A!ܫECow{g0x(a rxhb>Cp9r=8p:Hs!ֆ9PC!ۜHAr!(C"T;`i d]9&AduסCuTo(!7AD4AiكyLAiLaAYCX1A!a nC@>71|.oCR!0(hChuUbC5lPT2Z7!ΆkA a 6Cߡ:l؇u ܢ 7!]Up/t|2C!C(oClz]40u҇AJ4C!҇9(xC$4CzPCbh|9Hl"j*!Csi1 u;xa xoCe/ۏ?Lc룄RSd4^ V~efB*(]VWT/׹^39ʋ':ږ]zϑY=\ϯf du  $#6 .9l!ʆjCCD7KUU_-x 5!ThSz: !J3AahFsq|=ΈDCxCo{,b"{@[R ,%P,w:^,lܞ6 t ȯ?3e'r_W`{5'-zc~:އ7(|7_nsbՀX@\;~qEZ (w`@a![ffٛ4fCf7m 3va).) őI+.[ d*wOp8 y,zL=|'؈ch[h 5 J70l 9(becF$Og ri M CfunK& o=7 %}ܯ?bv{rc!\+xDQumu!c4~q䨒pm!gw8K, @dcQvО;CT7sk"(]O=txL蓓.bpy};IPv5 gSIB>:><'{o.[}q9LNYLGo˼Wӽ';EyrF9~3i.$C@dQEQCH=^t$)AQOErQW*>!F]TTp+Aȋ:PG.^ZW8Er+(".vEO+AEPG.TE9Dr .rwQR;UZPTTTN_r0UE !M\z#\ 㔑ev\" |6$xL1$aM .UUGiU8tɡ)$N$Ȫ r9gHц(Lq1 NBjHB.U˗"M¢YRM"*8LH6Z)rHL*i\I9ECK QNAQ2 &d]!ʋD3j.UQI +ErHʿ;DEsR*a!+(PQAAvsP$"dN+"hr;(¨"E\"2G#&E2L8DGe2*鴙r#C:aÕW"r*l9T.;"aTQ(Ñ (\#2"(*QȎDDETr*.r*TL!+UG\HQ>&Aʇ7`w%@\G'Q>9\cIBglk8}>/FwAxg=K*1^-n8 PzC$e7m)|i.>0*JRe6.66ƤV6`"hMv186aV#/G5A}"G ہX6QoeHnWeM_[{W>]o +Wܤʨ隆ErGvV C $3{u8g@d8$rI@<٢b#A%oz!-b_k iFJa$ ,hD?wOvRmkhTz;_ZxIh- )܉o8W))~OܾW$ $P!( @S,(%7y˪4i%|gEU>~w v3iO ǂX8XYCEѣ/4[e"dh|.`-]@3 v[|u/ʴ UgHgODI\/lDq!%D}Ǖq}>3Om,"L*@L֢,AH!DVt4D?H﫼WX]a;JDФ?9(^='aO -"I´,"hZEsB`ԹIHɱz$D5}NXE#𶺨 nJQJ㾫+7s_w ((}ޕzUw?muiԈ$~׌|P?v9o ~=ys=<ҫruO>ӥUp. *./^>:n >H||)|C';M~J{GE1~N@@쯷Zr;@~ŸH٣x>#7'`wdYͨD(@S@MHB/%DΕbU?4)(cc{@cE$COnć'#ܻ~ɳ>*9!vPɝ>^[*xxgN*7$JG̾o- e¯->Je3DhI18nC?8Ohޝ(@\)I_ _m N`18Qp 졷heD=sB>:&WG@26L'l)"*a- u9wAdp"&Qʋ}mndr.Q1翓29,CUw:/>ch^R{5LQX;8#&w|}>.9uޏzOqkAte7`{?$a_~י']QO鿦t|?g;͗lsyv}NE׃}/Oy??3O7Zמ;qzV9|{3  c )G# !`Xfz b4ĴB6L匮\ xT\b7XTxiZմ۔9k;^UȯKgsgLכ(WvOݣ~\iƐUhnqHp#Qdni:MqhX2U U#]rr/ϽY&yRsc-2gO;Ьƒ:\=_p/״Q]2to99r 3HW!B`ɫRYBZgW[&}Yu%s r9ۆIՋcO]-1іӸYo4kt hɲE勞#U&.NΫ,VR)ɡ".U"M`2̢$Gm{Ab<]/ġ&@_O;>=4sozҰSvȃa,P!KxbQ.N_{Qc.,> zU%|Н?R=Iu::,.B*i P̥ƈ>'ZK* ~gɓ ΋~$J @/rz}لK_{?sNީ>5~s뭢ɤ_tPl'ň2A[+/ѵ' 2 o-)u' L?d<\Sݲ}nP..?ICjYڝ " X3̸ %FS8"?\4]IE]݈i(-ikB.~_[f7M6 3}("vzT$MH/hv́+/[hy=A${l - 0{̐!3 YֻrxGxȍzvW4x &Is^$߸kzH &J 0̏lB D?Av߶W1.D=uD}qC!Kd>1EXtdQҥNrHfj.3 <| ilAQ-WuBds!+EҠ v2|EI@q.OâBhaS$& [ Q!P~ D\$UѢ:,6I" Μ `RH?^+iRO?FZ]j} ޾q'YIjlA(_%X49TKL+D$xg>o'σy=ǔK~nj62(bRYUCeHE f0qpeǬpӊZ‰62/Bdp|8zIEECDƓ91hA< -2nj)t0z$ k!B$̄Ia ;9+{lG/b1vNny$A -IdÖt<))CΔƏ\z+Q 9q! '( 0uNOEB<@[[g!Ys=Qb Z^Vs1mw:)x ,&B>JfbR=ކEd {=\>ԿپJ?W}{?433w@8[ 4&<ܩe{ʈY7_7>RX̶/vVU;օlMwδi14\fJ5YWӴzkNuqm^.y%Y*:0k<$z(r^[#hO'Z! HWonBۃ72K镫k<1m/&-a0"jmb+iMt cxѠriw`(=^F[~* L>Stw(P׼0E<80ş!ޙ.("@^ e n'R (Aa/hq+~cI;;u0V[ ,:i銝ξ6/#rydp=svvypFXFQ%8a| }'@ @ !!C $du< ~,p?sa@C=pgɴ @OQGI{,Lt#r'ex`1tKmPϞ~~xɍs<!ɲL x0>QȴCJ} h0©~8X۸ Fc XCY{KHek3߳bºsucc(ny,]1ptd:K3(i.?o_lm!l`)PRdjUk7~>>?G#dJ 6/&M!ں('ֺ3O5"B1dQ-[\"5 fj}"ћJ\'#;y=P28J8a[zS:'ɽGBSPdI+{*W5k2\N,Qo8|k"k(0dI-O (XBWְ=b>uGDO,^{Nyמ?r5&nq63ʼn="j}<1y?_p/A M"_\8>2/bGۢd;P现:ǩgyzxzmtX *Q{1.vrwK=6eg 0Iһx7V؂6"e HR C69Ȕs$5$*@Vyh xHJՏWbOIH %uʑ` 4& I0P4ACk[j1L@QukH^-ђKY$Xƾ̌dƑ֠( t0n?Ea$"RudB1&1tE(82! +,[deVBdۚmGXf"z9B:O(6ˆCM燺Kҡh&$%/EL.l V`Eϱ:0L892kd d}E>d?sdlp( A Vr wV/Oڨ=>W9g^Wrh6>DdfdI'yU^#9^kơ(>e/sݬ~{I %TEDoښhWRT8ddn7AYs9<ϕMPWdd xs6M=1jKN\0. >84S[=Һ=RpM m[3YlT?"J%IjڼB to Nk#&[aС=tbB~E8,攐8;j8=" E$0 wKs?'4nuqȧ*I{C/{< ]Ͽ=O=zQ~BG4($ q1uvsDSWQC3q{ kj, iҭ&1gp/FzSEV3΢Bu軵9Dq{@DAnr\3 1QxiѰAVΰ UƝBG>ϔ,Ҧ#ލ:*Z߂351p<5/0!z Y K:,! ]ѝd4v7 pgy"uehѮ,@ s煡^` \g>8 ,&b' F"B)6g0ȡG$mPgL1dɗi(%UX}!2O@Ms܋}/) sܘtJ+ Ҋy'bN&[(72T͚EG|Cg=ZL@w?~oo;nt\<܀j!X-fʕFp9v^h^-GVB&W-MlK(@)rR+h u!7*Aoc5w;X[.d6j8hJɲ*p85 c?3^ߐ»u 0a&WPf;ppe6p'^BVh54>Y:Ȏ36:.~P:T#n-EypQ`KBu %:rG`$GDEӥ:Id 0r oלx|IQ%zP= ^}>'`+6yE̟hvεyxN}]o7p6W#-嫞p"b(aӲPdh䋑 BRJ+zHqph)tdޤHA9?| =002/W%[;WoGVipgeə7g2խKtܽ|cSWp}/4Ȫ?!UZ!P|l$LE 5tH~ _[q~=nFVG׿N%8G}I(5*PRR4s 5ݜ(v|XPUXCd֖8Cl~`Kݝ݈3&F'W+!2Sjͪ`C̘zLg5mIN`TY]J;gXb,jO0H/; Vjͱk"øyh`"0݀̒h xJ=wjR4P$u1z7v'~H~PVIHk w$5)BRs*J;A H@ E"*_39vS! Y(AUTP @D!E$ (TT>}ا}"B(T@ BPU(4)JTpn 8vhA<12M0AL#6z#mi2='F!z@ 2i6h&44FA'4IO@Ajl*m#h`F2 шM @4ѓdOD$Iԟ1   њhQ4!#I@4h44h4TJ# Fdh@b@&i4bI!&! &M20QFA ihm$Fd`& $Qڙ=)52z hBR$M2Fiѡh4@PTT@_% ܝq'fa@}{t't@I{PTRSC~BA4ht)rh44JhM68..-  -!JYVuYaxSY6βmgm+*̸MDBGR ,Q:%mh̬{Dۻ(3Q;mXZw"jMیE1 vm5挖KiY[MLvk6d+^[n-[Z"Ͱw5ZhQ-,X nنl+cN *4MЀ^YZ3%6m-mh@xBlmb#G^񵄼hnͯfy,,j1&#iP2m4PHl=v|gjכc+NΟ+=c9]hD 1H}r0{pPb=- HlhiD60nյbu~nbi9qs^# 'HmytQ,Wo 2&7 CM -b#hh)=MQ]AA,Ս2^Sx=0FnĆh$n=##,kV#G[$kT1z/#zmݎ<1et弸/wl0ɑ^Y7]ݨv,^f&\y&8b"LճEAnmn3Y36콁swvڰ37uw2pY̨]{RfnfFSٶf2jކ76Ʊ˸ʹ]72ͬ5yy_4y̳1oGxb+;o.L՘,kvVaz\-QZ3\PxA2H9&z2momC( `)N F =Riԏ.^ %: W7^zf+Ûˆ_2\mw#g/.qh-mZuofً̛w×@˘r\\ՑC2ӹ/'-qF3>nf4S嬽sf^ /7k37:X~#qn9+Bs|kw9yy=@c$!$U@_ fQjh$pqPc5q4DSW2Ea6B 9T:i1qYI03&+RF0V!u052i(PFʹRRF5@PPC1L*5arԙwut+`XmǕWx0tBC6N"q' S&AjF@*Tc ĈIL2tR:7{g|YHZS)y40Yq NY,kQ %T 驲# "ޔtr:S%v7"0qs\0/Kb,?VܞGiT]Ԏvs 峬y*۩E F9'?wP xucՑ.9bZJX9-< A#Mv(졃Y^T¹ħVF t=, u1؈))MAa&u- X*t݆9)[s|ukh_E*Q1,< g91A 2'.b6X6('S..Ce74~Up`6΁_rn' /r:nmҳ]ZI"j6BVu̸옅+\j ft .,Q矲t :wWGhn@ُNaͣ[r#`wl"b1/wSCfǂ"i3>C^><EV=L2Aef>Ԝ;=$m:6k_Ш$ܛW&I&(ՋfmeA:Tbξ6X|E̋j^,rϭ.Ըt曩v]Vûţ.8)qgU|ྭ4!m vvzVm͸Yg-CaKL)&Qg f+~3-f@7mXy0SՋoΟԧ[S۷wc}ZJX9 hrĮLɉJ,ۏo%wKF#%.>EٟAu]Ԫ_mvUq=l}O ;'o|d㷻s~>cֻWPv_hmo^n~^vL?~WxJ|N>Sx&CgWi@^%Rn#c]ކMܢ/G>9%ayoyb}֫'l^w8;kNg7O*O+g=ϋ_N_rr|? >?@x}nS&p?} #o6/u#' }6EEw] UUé湔j&{Ļ? ,{ͪt|[(2Sw녽{OX%gF% wNWO*\?s]>旡[|OOt߻}3C?y'9{gޥT-^l_Cjt9{}o;ny\sMZzc^ſ*†bj:}>:c'=vx_hbG'<=J@/ +q%/7ryMT"JȈR6${Mm|̬\]ĞLSlǗhr9PerYb3J,e&<-'[jT@\Fk95r6 L0Ol|c}T|OuE?[/`9Y?%7u|˲ ̀ hJKS[t&Zmm:\kkCz>Uw'x!&F]]]9(0QOQrOOcػw{n?Anht0 1 ~t9ndלx~A^9" FϔKr!XtV('BU" ,6wpL$*Eaf~@@%Yx{ $}.^aZ! ^ 2P_3ɆRpWF &G~mEc~& Yx rxܰ tf+zw 6RO/<_@ ,:K;22X"?UL>K=hnaU #nl>2+Tq%}Ӻp:D1Oil0 9 :%4O {@3PS( d&feC&f?A4akٚ")3{o`:Xp SdXҳEA"& wh{1cCI 1z篜?!/bH' .r!_[qْB(F0 =ϧTcox{-oSXPpkJdD3)",)k}F0B&:'?S`b4+T/z~VKZ1Z4-ڋ$m=]2Irm, XRcn ,˸gǭmQ A%˷w"q-dboq9ͽ ?:Zo/:_{bB5>N-p(2*Zi64znHĵkIƄ!vATM <;M9Ubj!![vo>,+\34#Pg Bq6kZ|)b| x3bm:# l䁴I͂yo@ /' h)SOHt\hVc-c ōY#(u]`j:|fxGl@pJ EHT2# Uva2j0'tFbT$%tT862R$Y$̗ପ.kQ1#ShS .mDUfJbOCYiuM9Z! 'w`3w[8Qw0C$l,"v ( jbV`7h%=mNNwi!EHG1S8I£,tAP3ae@#3uEyP0Q~hpk% AkR7!2L $f+Bȡ(VjlòF 5Lrymm(;_T@E,`3,<; !X#肍  U2I]PHat%WTtc$B/vH(@Yu!F8ÂD `7x:2#-ꊐ-Cd3xyb \$\EI,F ǵf«2l2wD-aǏXe`/BBz[߮#swxAR"NEUJa:| Rrjbypp4ɘ A]!bX|`%P5 a=)-~# kVzl A ˃/~mb 3P`NIڧiKB'9$;k|{ndҮ.]dPH@QF[r)+L:O.LO/eCi{-K))/ ~cpw6ꜯySZ$hq-su=X_k % fuL' =Cv&%{ꄛy $Hc.Z Ա@г݊>x6>#{^$ ` g) v/Sxũ2Y_A^yMۣA"cluHӪQpTz_{?}\gfȳo ^z D#a.\tB@1*yVN+mgť߃>ȉ"8}FEfՄJtbl3g*/T1[Wd~D3gA2sϗ&\jdFW>uJĠ zҫŎᬖ3v]m#}s(|wn/aũWWb1|w'n&0` BGA,Y!*pcM1@-LN$x%CpDuItuR׍S $檰I8`%(Kز&9n7K3=0៧oj3HΛJD)#DP]<Ǽ .;|:UJP|veh-S!20qBE'"|;fn|;uITzQi [Pȉ qi10(*`^5]8(L4 4ac D (FgK()VfP:'zL<0!#Vx=0 S@'ݼ[{_/wBfV(pG(H Eu =x[9aH l$R9jXyd2:hY*3CIa>9BJy>*L6M\.9iq(7}1whOγ"KE$`sZ\ИÀpZ8D_p63p/ 0ӑti8bZ2V<RolQhkθP[,q>y>jNӦ""c#P+gdj$ #mPKFPZ&S<v-RhAdZZYA<дU]e%<`^gئhgT<1ۀlbe-1A>/PѐTJŒr jMR1v~;kî?ό|w in:+CdN`8!Y0#&|ٴ1>_tCi'D2OJ.-_}ѹ|4%LQ\,Uj&`-w>mxfx! nqeQ|#eSqU %qc ^8ȕ40TDM "`SV!\y9˓.cC\n׈ P$OF*qb1$FD7,4q),vyA# Lj8bĘr'%_r C#QEAlpFY$2W@X&0GLI8+hv47:Aq"Iue0AfG!0dt.Y k|3JV,ȑbR3inб$%a41  !"`Hb$LJ |S$B 3`I3E,cPҶ*= X"ǩk bӻJY#%tr8F MDePAITBM12YK@JQ~5_at,SHiDz"o 3 Z%4ȔFh"d~e*eD:4\p6 JZ!'9-E"8 ?ʗ>8OBhn#>{`~w63[ 1"08#fu ;Ì49qp:Eǩi8*f$P [c#E&$@Ʒ5U'(wa!7l¬*J(TxlA9VCEJ/s-Vϕ9񘏫${#'z*ukɀ<9qX[`RV)XCrQ>$vԇmŚ٭^0R*U˅6d#Ņ sj \:uEj*؝֓)JNةL TE909kF2/oF*itycL 0sVۺ?C:+'{A/¨J(QԻZ{Iv!6Ep@H&hH2 12e EnһZw0Ƈ  r_;adHxLze'bKGCk*t"42ڗ5܆>ydw~ȗcȻOEt?F/GƋ=ּ~ީLK[qK[/P/G׾/ĕLؽEl#?ϼkqDcj(Y'F/c6:8{DԻ:0aMP /N-.9[x"l49{H-]( I$V-~CP/pw߬=Xu3Lwd_S,]xₙM 'Bp"(V'a]W,:xmzc.Yk-M Sj oWPEA1 .O\SD6>Ojs׭Ivف۰a#D刀6d/U)F][E΂ b:|#1I+}~V f0kFR 8:%$ban"7Μ)ѷ,g䉂t HȂ0 rbD/ô{,yuI&)HqY9;R D1laن ӹâ1UÚ[5\VݬkuյѓQD"Fl ;+ٖDAyvRJgYiH*8X5GLBY\j3'X^@oU1Uc$X_/os7]&ɠ"8Mֱ0)!"LJfB 0rx8J.D_^ ւkrU Ilp$S[eB-Qj2:1@ύ~I8#C"C  А$I 35{^{Y(sxf$C'5| 40k[;Mo%Md@5CQZ a'?h{u$0B{%~p਱lNʻ%44KcKUbY@Vzٖi^>V`%'* 45> (1r\mdN,vojएE2k6X-%O9!(≓k+h"u[+ŷ&2ѣ&9jzYZ*\VۼG# !#4)!Ok*bZb$DKv$u'bf>M-\O8L=^nD-/>I[\#@WI뤱'-9$e i!(Yׄ-#3 Q$WIGYq^mpkwGŧ;lYBPx}5[HOxHImVjZv*ДkرH3yyӱkA&}~q3-~mzoۏܾ ˎ3;ai.\Eyyyvxu^L)C:讐bÏ|L:=m2D.uݽ>7]m/d<(;rIa"4a}駲{fF͂y >̚Yo)**I3=!"@鸲翻Z m;wFbqN峈i?:8v%ϕ vr7&JwcWOUoo=rk zs]EGބuJK&sb 63@-zI-}_Isb#jh9L)>!>Ԁ(O 5k@UR_bޕ; ov~>s7g]gi&rZ:nY=wC'CjT7EBS(첽qA%[ς .tuZaܠ|"yAC!:/#f́^)o\S) ~ltZksԼ ~6(^nbn(Fڃظm#MjcI\[[%Cόˈ z2JcmLb@ʱrQ2ѨYPѡX,cK-40QEߋTvHh}=wt *8YHnlj[f]ɶAnjtW9ɰ=eBP`,Ml1YN#(R&QPҒJ]#^flok4'vح6 n{sbKOy]+o9K|FfQ\,)2VO[ZfKDnmru-54")AD`S[ƕel()\d]7HA)e>ۊAMnZ(h:ZgÆCAHr^xN7X9 /64Vt%%=wIk R: llUP@m}kBki]\ MiYOJ*8#$rJInmCzΞM̑z$:EDmT!.#u^˽0^ՔxbMW!w4#M'c>Sd4Rj4۬Ύ;9+ E LwLP uUe&rZ*Ұ KVkx͚(T%wu{Ug7mXE]5{]קI(u"tCZtw&ḱE4P'K;cD iCH:++x"^ݹQAM{uEPXt e&*X>zر)f6ME AmNJ [QMt:IU=ʸ*=4S*"J;<MmeĞm.\iiIlkAhlCHЎT>%76ZR4ZJBD%3r-3#t8 HV #.˶glYSl(;mYݡwEqYYtPYVgt\۪*ҧ#D(swgugGw"D E+Af]t'VvM ;嚠룈M%)r h^K7;aDAWEu#wauܜEDurU@]QqQRW]=P C )$)JJD;...).JDʭ.d͗GtT QZijg:(Z!D"wJ+J(BgGET]E;qqQE%QVwpYhwGq$QtQYVqԕpu R b(h)yd[F'mn8:TA9%%[Z$˯CT!Hw8h㻊.)ӥMAO{4/*ZA[6UEqfǜ.:)hKXEʃ#8H) JZZM @V:"73Jp.,ȣ1&4^G'-;(mĕdqLMn*"8(#-"fLqQn3 ӫzLt ;^ط87J^>KFӫnϒrptWe+p+vHHа֊Z H@޸%T6FWI#D;H'XI-7)dyIrI*VB?V[ 6ؐX6@P霶XRJvJ6 ZK@!,a$#51 ~Ƃs_'l&ĒZ@QK< JՐjB|~s* R)8-c7>U|'̎zUX.xGT$Mbog"@F[e~)'@$ː;??1-|]O;Q6@ᰒNȡ&+yy,6d!u(y~I$)@-qd<ͣxgmqI3+")2k(@[ $d Y2I+̅P[s# @if\ vBIȜ+%[c. 1C\W{zЪd]|[ZtB9 Sly@ PAfGG1Cl %`BD$3+_?'?%)i1EW;9J\ (N@||~_0مp Hݐ-Pƃnazm'dH@МāHxBx ҏA,yL@'^Rhh4lrzNGRR.xC\N$4Cy[GI&:ʟ!^{[iuw6n\&*vՊb4ܤj=\A:r0T K6gbLeHU##DV2PU Ute):EO l`UuVvxjPunKgExQcXN9sm'-lju:4ih07|B:*⋊NNB.#㣸:9]=ي Ji).dys`P҅& hS#I^ͺ;tGIHG\VRonB\ EJV4.@bkaEIuwg\]aSZ]]eFUfY\Sj8.m,fD ;dI4[:  !ht & 1%:)m$K(h5ӎj#hT @WhД1.;VKI\iܲjh0a^kwWWeh")h壐8 iukƬ:9.#h N89N.$Rr4EHU+u]ew9eɵ媎Hhf)ԗVYABu9ԥ_X!tuYؑq֍Tҡu+I4G0]1 c@!M"Dt1@Ny!- P܀@U]C@AM gBzّ(3u"> zuT !P@dѤ%ھR44IIQG^xR]H<44S:ofk-&/&OqwFDU담Th}bcN`:)N`iz8ZG1:;4 M/I44ā.EAOb8dߨܩeӤ랡Sews-4=zZh5< @[WXz\;utFv<Ɗ䣨bN/]khB{0bݞ;G!DZs_zO&9r>2mIu r|ז;<\acMEM3T4,l04t$ZJSJ:PPꀡȠvȉS) C@i4 ԇ :$ AG ܏!: ? |D=BRSOf@) (VЮ}!UCJR r4#Ԧ:${즑)ZFz?K&) Z9unm.!F44҅ *h]4*P iPPT) `+aCώ n";)yIBCJ: ңB'RD@ Bԣ9*w):+@|@rOQ9 SH%=Hhf@P/4 * %#H䋥CUiPОx @hJ*JШRu/(i))D@)J"iϖTJ.S@% JF4 Pirt MmH)RGP< E TօM!KtB/(P̞ԋSx(_(PR @ *RAB'%NABP^J(ҡD_b=Pt? p="P)Jղ( Q^@S@!FTHUB!J(Ђ4 UhTVnJVt#VQTiFݔm`-R¥,9S{e8I$L5Ⱥܗ͚"8qN*o&QE>{rm"֓[ 4C #clv-fQEbm¯I-+J}?`L:~bs+-)A@iICmSu>Wb4|ӹL62n*%"]MтB$"ʱ]b=li(`v]Nŋh h4J& W״xwo4@t=wEu}vvjǂktLlPP=HMi\Ǐo=ьb٣]F[5Awxֲ὏[sX;7ǁOAњSp?g.'H: Os!F6GIY:{V"8ĝ'pf^wQ@sc4<.bB*GSVC e'N4΃ےz QrDwi wM]k, HB B(T% c%Pcm)HҦؓҴTFOj^#| îa䇞kpASzGPvܪh( ]w]tWDQq]!B6@5c('R؄=xHջ4XS";ͷywh@yPff.!9j%u+{r`pOp $LPUDe0+䘟 ׏1yg}?B66>w\kt?wo.GCilq2:J8j0*h!K¥lz ĎIdž3N qF+BZ8 X5eŁߵ~6(]K 0¨ʠpLu\a)pĴ}`R!%*6AdؔQWt6 e&",oA0lOs͍I܁$wk<4CȠN{/CBBCIdC9ه~DH:xxj]hQoOMzKo i{1ΩX 8 Ԇpo%u$E;u-c=_nms`3ruck':̕[mx"}X'\ɥw9P )&b47ܓ<6? 62Yw[H/ssQ7X}.e\F}|xv;r=x9~2WS@݀5_7,D?)4M Q2^"?qi Dx/r7:^.7T`]ݓfi *%fS,cU<4$JT `oP\WԘi|yPa9bfDg$.W_3Z XK>QQ D4 nC]] *nDng|Z\Z4 ټ:~d2 4aϗր3fӟʯ{4?ZB[_Bf2×cSӜs UݺCVGdŖ0O7x E]V5>V9ɡʴ2m}Q*x$-{ |_ d]_yzWѥ\_/=#2$-F]ܴi l#8z5^yn5G/BuUdMe!I uBOyJg>!# *&2 PPEsQY%xx,~L"2c3̶ 8}ů~bm/u޳y?Gv_jay~_XeBp\!{ FNPGw;GQR5РvFSCɀ_M{$crB Kj6/(,ˢXn!y~ 3,/C3ez2г5hϽ;^m)kk muEkaOVf[As#V"˷|W`2tn2| -C($.G~,-ϑŻomߖnf ~b%s3мaCO-g}׷{wdcbq!$-{#1C3l(2YyޓpB}XsӃJ#6{5E Ȕ&,/NkgC@ Һ-@A=.ud=o8|{OF:MW~ŦM@YQ{PzDGp#r/ül 8l g{+zQK8ꕥ+ %cJ1/ŵ- Z8r t+.;34FDTB7]zb ȳ>s ,3OxBH!APdDTC2|g<~Z _.b0 `Y YxH ##Z;D=kvi4ԗFI)F)yl'omJFՁmXJa/|?7O>+#|o?qߒH{8|[%'pͥzd^]5,q򖒟Y'QæE+I`[rAVx3Cs͛ȑl^\=A7p%H"BBb.c_0]Jα~Mt4WnzyyXtd)^aXL+d L{lFb"XR̉N6w}_L3ӑh4?V?!'{,gIa{%skJpc#aO1`{%Ci B)d t eTullIEp3<|oxUQoRV6F6xjJ1T6)c [@odھ/p8'棬-J \tF;Иjtc# _(>p!=ɷB'Oh*OmYiEN c&Fr5'Ņ~2=ò.9]F_e# B7h7up3J|>0՞monXvl\ʖ L:}z7ˊp;V>c B@>+)HDE޷8 2flC!=m>o _O:3 Yj;sNSqQh\=fon>./cN ֗4n[$Myfo6A`v ̜QzF,S$ݢ=<8ڜY:ִ=k4X7 we›<v|L2?K.3$[;-L7#^Ðbu8,UUuq2+j+!r7IK#x◱EAR[!hYy,jH+ IZ f0̾x7NJOi{g= 8@aWE"Hh80N}ƾxeAtg6[ɐ:l$:Qq0 m=ń)&5CZdq#nӰgX2Q ڌK 7|E1AJWxI8ɃeGzy:>T>I{MC۶-t%'كH}>?H=/ߓ(2ځFhڍIf#|LǑ;qv\pU`t!jXL\ sɍ?Cfgli'[[l F-;boLH~jȝĵ9}TiјxXuھgl`O;w yb >!9.CkO0ϥL~zG%RLAfۋn&p7cD6ݧ!@I$~'6ڑ^z֨z("?(*`Ћ`0fQҖD+J6Oͻ2?}%볆<]=mA{3qSUUS%1yI "GGuZ=B ;_OH8-j[y?'$]z>{8z;=XY=l$3k:[eBwz[5㣰ω_~ ?bL(9 Wڨ= Sf(ty|@(z ZI !R"|<ʀ(()=JQ"AT"Jm<Sk::q?@MFaOC@h A0O'&5SOOeOSOT𧩡4y@i@h $ CLM4B0 #M46ISM!4ڞA24&4T! ނ))dɩb~QzbBy SzOcD =A4 1 MA&TQI ) OIM1)ihOL$7'$1La6PxCA=Ajh=@)4hSM(D5=4#@ё 4QUŬDHw;!y@ d*9 P dR'CjhS 5ad$IؓhVIkH5)% ɒ!SUšFK"9X)$ԋFZ*MZÅ ÛJTU&aĭFb>KBȥ}3&Ԕ,M*KÜ") TʳZtʌ拝T7ZxqBAyx=D+4N$Eaans8sŭ:(RVC4!.\ @ %6p[5( R*  BDF,T)*ZpQMT",L $3i$)F\ԉfaT: j IIu!.%9SmJA HB.Ri1L U2klAtrEÆ؅a(BS8EQ!PؑUQw0éDCJ$U0jbfS~HIihӁYR؀$: HffKI@j&1 n&eѥJ jb*$) 6(8WIB*52 PwÙ(L@v".ݙ>MEFBAgv{2g4H"%T|$IɈhp/5 eBAFe[n{Ϳ\E'q ^C\#{tuK1M9 =gwj7(6it>GWq3 z1pTgY\(j\@{9Ljyu:3ry9Z[zچ{!\.yw\;V{zKtγ~ϭV|k\w%8^4 W3ަֳpg=P8/j¯G\qS^_AHDb < 9?"i?J>$xh2ڃ)B\%cmTM븜 j؃-ŜhښO>;V㜏]VlS:7kem-a5WR5ibb 1M\_LU)m}M8fVZ}Maiǖ"rBIӑ5I#~Ɛ[u469_aѵjqF(?D?;[FmOՇ6,ՓgW6=|H_LZ\pfg}oqr̋8?__}tsMurEOv:=0WW jŋoxvx6[q|+ݻM苗ǹ篦Ѫ\'F&Vfa;6z}oܷf~/^oN;pI5$W}=d3xTeNbkk}CO0w\ IiZldKgo3o,e ZqR^O9En/ݝpTn4+OϻcMJi M0@>:@ze 8'8P_q!zւ-j4][Tz)R(I%!D͘  d%H%nnw ]$ Hz6H0;7Ljy̼YN=J-+9: J ֆ4n=";ҞCԻ!GcnancȰZ5,֕*Yӛ1}*(MAD $!P|% *H S~e*Y?*pFR=T yLItE!"5"ESفJ&>NZoQp1q2Y}f`ĝ@_C&ィ~hqe㌌p7o! ΢9qƛ |J#ú>ݼwFubEUM¡ie)g!K[yˡtiMK.Ț/w5SΉPʘ9#\DUsx\o<ǝ[DqT=s#Z19[uuyutQƨ X0;,x\e B<\UQϡ=3q͵\]/y#[C'Yӝw=`Ĉ4H' 4\ZذbyJ3?FkՎ;qjuLq<7M|y|s q\z8ԍtnSq輾דbws㼧 ߎfh׎^Yaqmeܷs>yF08Dr^s2\ȸ`Ԍ$њQLB[ k=0I-DcY_I55#AAlbeRوcJTr;Iտ&Iw-H1!x%Ɛ:Nްj+8u|to+5Yt3ǥRZH90mkqh*e T tNi\a9`4LT¦vsKRPEqJյrV܆9L_)WFxE`HIeIt˼F)iFtő2r˱%[qzL CRxE4:yA1 C! 018a\(GRk1Rx`P08O@ƴ<bzFL0/:V8̌P|E u+ie჌bk 5u2c!dCQӆGbF(4!19+b YMPSBFVMU;y ʵk)jY}3Y vGΠQ#mGN,B9aj:IZ+KV@U=o WQõ}J,:Hi2@eG6i*:TK 9HNGjH9(䖹wc4<:jH\U8ƒy}`%e bǞ5:H^)E@<1Eu(M"hҊ\rZkB KMN2 51"|:HJ)A s E: L!dr<ข#ldTCŵ Ŧ:1KB8Υ%n UQ˻Xdͤ?yXAE䜤OeH|;hfY;8Mۙ% 'q\!~oߨ8E=_-TWFU;"0(r 1 M}$+3T'"cXo1ߐ)<=8į#e>_CoHGKw C%w9ıuS%FmWd,[I!ħ/tn-r#yGv&0y}$GI{s=<X6-Vfq\ף=ph u8;8^s##҂(}%V|ϜU'T堮MSo&|v|%-wt_z[>Y{sѳC@7'O0헻]|&$[7}ة?C1IGwN l$3Ns>8-&o:s۵Kfχ-~H)p0S}> ^ F0y=XA-Q=sӋf՟rm\Ib1D&o|(f X^Qpx[2+qT ?JFΥƝwdH:'ߟ'gW/VqT=fxϤoѻm5"h*`{\#:>Ǐ}Zӽ=мZ5 WQ9: |]a^Mfuu5fȝc_׻H7{Bo Vg8{aF=%H!cޱGucJ!H ;:(6eǠtj{BSq7)Bnd4^9ky"ӫ[r[o+;0t{;g'.6\͈,)_Xܬ@]jxo Rm. Sh]{GۼPfe ehjLFy0L{O3U.=3jz|룬bLABb_% .oHl'dF^RmdM"Bszw՞Ylgq˷U.nVO1>ūgs K[PS25}̛?sM^*^K\ (ndyZ};{;F?+̩X˸~81ї>a/ 0ŵ:=[]7>Lࢪ{TmmӚ*ҶeVP4P皱Һl<6aMDyo9HtMV}b'y\G3` (mtqI&\.NJPb dAg#Bb=30KLP(BDtwF=vu/&lB}֍\^D0B-DbHJ  " . ޝ.-?^&w'NaDy\G#ɐR'y]_ =m6m; "0(XQ@A| +@Qo}9 "QUUw/p=kZ_A@4"xTƲ9<@[XiW6'RVBtUu`Bkct(:g0x=[NIuEJ9,M>AFbaz*)wҔ kCeϠXmϫ@ǣ?<~,x4{0J#̯%ڏZR`]k-l茩:|z\|1gz6kDɃn'@O߼vUK~L=Vcǯ˭ v515ySӾ=OUk$vD@2Y yn+o !A&&QI~CNaNHkpy!U%2,HtI: "HD9YʘIv ;9.P}yyyʒF'F(T N$njf_f\qB(!dH2&MH.8`̖ $[B8*̲ ::#@,:՚$C[+Lfԁ3t"'6Q]rDEH_<׳[ :S= 櫌fc:2xo6k;f,K1vyy^=[9ڮyϟoqS,;+DiB&[Az5 VgA>.t).rVE(5% !4_^(qsQ~V}<{w!~my^z"ü1p_@C-toCmūQ7ojFũr![Ա[qp&&U^ue'4Xy ;y̔htw)Ff(P+`8Li2cA{':BL̮I$S fX2i8M0p(8Y6fOnjCg;kbpyTyx\vpF(Ia.ICuy`N!`C8$pQY"9w61G! %l[\riSC&ZaV 6k^MKcH#k'iaޓ2E e:$/=9n.Ok)TL)t:~-ו-K .Bw%fvJ %)S 2)I )GY)Ⱥu%yҬU*VsYQ RC$," T):Hc;H4YV*(M4Uf%)Vdn& j%qN TK+ 5%>yi֘5$&ʗguկm8Χ9ߺǑOcȯ~Cvb9wF#}Ǿr` !ݞ<}7ϻom#_c"Qe}ay7 W &U2B(``iY_njC/m!Uc$ÚB'c_ĠȂSi&stzd ?S134v(ãU o"%jL+knuY*vyS†%CFz |F(y;rw9̠``24)+y҉,AӛYS(ExV*^U4'ba@C >6ƫiv$z4aA ap !9G^0 ClesGf;'nPe$; `8cw]bxK; V\HR3R'2@q&C;`DR@6AFJDABB%YddД-Ѕ(Ѝ R1@U JDdBIJ J(\`C ZZ$ 2!hC$2iLd i2ke#`aM6 ahi$"Jq$0Ć56]0ؤr( @w1وR,dv2)r\&rB)02JP"DZ))  ɦ tdd]!*D*HS(l !U%DIJd@$vzA8 (q4 J&Bd(rUL(dFh@ai(62r E(\ )\2)6rP)W# ZJ)Z@iBiWƇ !rA)T@WfB¡Ģ$4 JRJDR @C`]:`m\` 8`B @ hZA,(@ Bs1R)Q2JEi(P(D1T)U2QiD%)UBD()Q@)AAvUD>&T2D(PhFB(;f44PMAHJM+H0J+JJ(!H"H(HR䪁@O`vԮ[0ҹ`۵lM!Lr J26q@ZM(olҦr&PD^^(\M'oP#\ 8 c>z};֯>;+Qa`NyUD &K!!$L 6,Bo nQ^nw8b7HpHD@ٚPf*p29 :$Q(b{I5J13EA:LA0VM2_ 2keB%Q ,4 )M GON!& Di"A$R 9*b rL+/+E⹥B:ђe(kp$"@r<"\OZoj GyLuLhd0Xɉ[! RDg5@ A%ykIk1Zni ZOuޞ9Y}(VϿ\__mídq-iyq- E5xT3q#n>ޖ?"AӥYiwe8U7:75CBzew('yN| m`> 3KSg&lR"xZ29CMN IC%t渓k9U{pqAA>bSiz4yn'#NwgqFN32VA &i#%I9 Q'Yr|1]:G#NU9{(8Dӽ9ٲG8ht{pH!< '^;S7̫y\)U׊8Qkӛ<3vෳsJGd);x2v(LDXo۽=;J'uN"*;F!0>QpFU>#/JGCqsl ,p#G`i.ٽ/F:nrz||)뎡J Kz]!C97pN7N#0hi((9cunKwuǯmφ=.Qˑ|Iq8IЏ-y%xQE^=hlqdC&E1y/[BL:] c&='^SH8 M-Tp~y/8n9wz O<R (s$8pispL"9 A#= SߒiÉGaE)PzAJPRH" KKMU*-"'i PW@P!E aLmw⢳-𜹀.hwoW6|puO<<8MB>^c>[yӊsB`yN]I("gTܵrq;nB;h8bPJy,W#DW%qT I&EkzֳZL{}6Ԩ৵CiPJ)Dp rUB` 5$RLn}>nG˟/uR!&6BÁ%tV~T D gvTW,m9M=Xg@Fb/2ΆR5s㌸LZ޹3f;zz# R环y[n"ϊdH"$Z46qR))7לV?ʃL :W =P<ʄ%4I`1) )EZ%a'"M-T,Ӫ1؝ԾHn~PU~o;=[˯vCXU9%r\ \$WfZuQZb:#"9j:W قK"~bOԧ~rAenǍ?Y5^55Yf< m߾򟫌o{s{,Gt x>]RW BS2a>B:3, YHH{%<;NFFGw#G#?EcoSf9>>|H0.0@޼$8#Hs6D.b /|Ta8xt Frw"syA|>:w5~{߃{x/^>D h`{VF<6Rn>*yқiXmqw "!FU/tel{UܧXJiIddqk>_S0~W/q+OwkC ; un9]zwǛFG̺ 87beed:fhYcm,2l*BRFx` mPU1ka:S73sO7tW~>qdǵ81{YӵyncI^H }~bqocoeAB4q|k;mK~>WR6Sy{;Leg|}UUSԆB(|8pryH?^wV~_ 0z!Zti.؍+z*G;%)-Z?w{on}Ϩ}TUl0 uSBP@:^{ (E0 1 4`L14<ɣF)?Dުl4=3OHhOҞ'{I hFHQQ2hzTDbdFCM= FF='laz JFZ;r5fCk9n&ֈ @6ʊ!g, :"HdAȳʆ0dD ,t83:}qg1|sƹ1nvqGSnuLޖtkbjqy?L6nSl;HvyQ6h-!wS4-DBD9iUDٮץ$35qGe?6Q.Md*7 |~|gAG꼙';ҸX"YE`ItF65/4H˥)#HX)3繭$K@šW"NwS=۝Q,s5T^! nѤ_YfPᮍp\oiy}Nz)u{°wHg*7Qlۮ+DA44/كa ( qSRkLnw6%Hɻpi.WV\,1іhY.9eGp .nIwFsd,[.c & f ()jL=ōvlש&|l9>/z ^lS>;c|,[LfXK ZrEf|˂;{ QK/J45eŘ ڥͭt5%¥Ԝ =~I}- LF|.Z2ZOg )ꍌZ3MHJI@K{`_#cxXW0%uM~/KZ1MKs"o-1ku٦\׮W*^V;Y(o<(iVMƫ%ۡqaNGܴ MKQ2Ԭ5VZnAv Tq#7NiճsmA3:5m8COEfvUhH!ЭO6 ZDkl6`ˌfu\tէZkZ]K ܭ#?$M`Yڪb?k1?E][zVákhjK:uV۽48 TvXBZ2CZ.^(B7\k9v.+zM: c˜')K:8SUw2Y &gg!mjPK6tԞX Q+b2Ϊ;|.-tA@;_C/s`*ӷi[[Dfe D mkԈPE䥁VzT, /3\ R&fY+&D-@P@`DD@~4YD $*d 䀂)K%duL<(EGв&T;4xFy|ˏ7Q9˩V'iM庢W ϝθP}q]ay]& v|Y_ I\`B ODa^|-_޶~ \7k{XwO->)k@#+07m},Jj/bP|o"ɣ@ 5wi ƿKgjq. {ZПJ>T"wH;Nhha"k1P?(k MRH9;;wKW鰚sk.S[ѭcc^yj8~/B/^ M*x=z|,PpbxvCpux`Ҫ +4 ctڢ63R!ՍU\\Xc;2sq8p޸eə!2UxKM̓+mMP\ڛ~2FnsYnxLA#mʷ0-4:w:8P7\:9P\JJiݺ/rN%Nܜh@7LM8dK +k[ͅF2 0v69ȲcN'%D L2i$Qh:Lo%.qѥ=t>V4*%8lȃYat "߲T_/^Gm[1tY[ڻOXUFTBE#A0%*B=4[bjƥQ~a lK=1ral"TK2[)rVA/K/]}p.FtaIƇ?=q{p?6vy>g<(ӟ^ϛD|OߛT&߀@]D3B?+5EO=qT7,9e0O*o\'ZԖ[ME TP\bv">*/* 6iв;`cʧHjw38+cQ[Ԓ9Xu=\ir-MsHoΈ @C=9k9T`V,cl<;wy;? 3fˀx{͒"KlW\^[3kLӡZ2Ca-/zV" Mg_uK|}SMl|N"KO<0תn@@gSr}G& 2O6@e5NPOEWW_VdL`Ehd vM lCO)=5Qjf=LLhzAM P2dOȚdЍ5SɔS=&@A@4hz4 @ &ȅZrG5gݘjVpX]3y) ;6VS; A$p)_7ꭥnΛfVcP\L_Hx[TI^_c HmM[h7-(U-yg %-}_4yݢ\Dm&ufZcIߕs[uFxbB ҩoX=)eH̰-j ~ [!p1ܚUIBt^w@^-j55lic vy$ p  )bꞬ`@P/!D@/4AAN#P48'ƥ A@Sn R0T_KTe"( |\ - ,nbPr .x-DqdT$}W-;T&2kyvIv̛>$ &`"( FKHXĒI]`V`[X]rI%@@aP ͠5$>׃/[DYWo^~wW@0wLanIFS55I풫^K@ܕY6K^[HU1 9dBXh+y&B \u-\!T\G"-ArZjjeVç>eEj+QZVhO]^ZcW.UlsDҭb_"H/yC&n ADHT @ITAJEDF I\+Dtlnлf]v@K 6*&K" 2EB3"\ pXˁuĻnATQ&Cl"9WRWl"3"ɍ0l)$i* M˔KaL nڱiK,.\,!2$*K02%KR7vK*] )%)$AE@@ I@)T  ]nV I@[bPTeBsg?|Z2\8mkzlԣYl?m"ZδYؠ]# /E ;M![O‰imY-aFAD@'Vc(I!9: 'ObISI'gͪw3:}==-o&U(9F^DS{l5o|5פHUL(cWumqz7ޘ"(H(5 tidyr/data/fish_encounters.rda0000644000176200001440000000062414360256575016223 0ustar liggesusersBZh91AY&SYZR>&g@)O5=TOM2O"j40Qz4``0DSCD 4#F`T.Ť`҅eT!$G&L(I-1%62P2FI"(dFa 41M@f+ ɘ?|mQMeKu(種FziUvjs2YJAF, )A9@I H  H#H \@@B@џbuyfخ*KkX. AvE ȭd 1_^?=!ӿնwixI4)T_'f!)ڤ Ɲc"Y7kqRD_.p!tidyr/data/household.rda0000644000176200001440000000050614360256575015016 0ustar liggesusersBZh91AY&SYeJJ8LX@ޠ@ LCSI=LAm!@ 4e@h@h )hl /W) \Q1J+ mjQ$L ,,c1u 4TAס}WQt-m˼wM(i@66 D%1޸3O,1Qbn= >1]( /+st,8; ̘=0sTJ8cΧ-wX MH$@Y@õMEk/TD*/b/ e!]BBQ-(tidyr/data/billboard.rda0000644000176200001440000002725414360256575014767 0ustar liggesusersBZh91AY&SY즩#xu"D$HoD$H#=w U(U7 ]z}JQ%U$EDRJT(d>:4:4.T)%':x@z@CFCA!i)馦hjzLO$@OD&THThiL2 M04TB)!<$@4  @i4d GSA4h h ?*1%b$줨HN4h. ͖ eedqBVjbĬgB,6iJX%d-\H3.8M5CKdh %:"CV˅ZUU;)Tg;KU 9jS4&]"E`Zs:8ɷIsZk2ьiJ!$YA bݑeݝYZY*f!ȺaҪM ò3mمEIĮ\1mCaYi-D3̔E ѶGf;G.bje$U+DK9r$QY2p1ҙJ@#Xkv%-%KBJ#&Q&:#t6Yj́55;!lֳfY͸BEeQpڨ5[3lani*ĊȴM2Z.}^?oyeKl6TYgJͦiҟp!uT4 jm:sa ,Q#@shI)*k( g,Ȳx)بF*U'L×[hXc;k 9(39K+c],Ye͚U\2MЪ4l!RT% )H29a]-e*p|zy}ůwϭIV꼼hPޜJCc,'TǏsL?_'|M3PT?_#|?!e?;H䃟4P5f}k|)5D{1_iP; o̓% hvFT $+s(mw(k%J] \?O @ޖjym&i mT*cӠG&KBluFQamսQ>͞*W1Z%xLܽX7q! S#gV#jKGsJulvAT[Os;c>}!zձ_23"R$x„V3Zbͻ$ryexd#jǢiSRlx+?׺Һ鴙e9§Fs}Xe-`[`9͜6FP(zuJ05{\{";=wLSD܄̜D}z-/wSn(fkxgV7N6HTPcN-xJn٨JN㭒a9D4X!e(gcbNA0 }+l;wsEVQn(ŨU-L*Uo]fg\ߎ!;hQ1b;V޻ Q$yRt;A Y9S& A{ngX86aH?..+:b^ӦϰoɄvݪ}ðr*ꥆ5fg-#֚prA}2a%?X;^T5lA8H|:.:$-[9@" |v+*{-&b.RA`yuCcDimg t‚iŐ-hԁ:MDY;Jg ;o̓9 s_1ښ S-\ㇴ9nyP 5:­UPQAmU7:R,w`oVBsYzsM8KG_ER^zdG;\KY1?#4%,j/H`&2kNu썱jj#-XWg(Npā༪%B:lL.3 B8zGnٟF8 qbƹi)kY1Q[!+S)޺8Qx&΄w//&E2f3ϣ΍xl :c0j EEmM.tЄR.APNhIEo!dL.6YPtԉ)vtZD\D+*zU|FîUA)[{njJw򞩗1NӌhD(+& YTn T"&?ntUIoU.[Kxu;?:Ö`Iz@[&)PU\y| k)V2{Zֵ?@YeY~,6k[fP  kZ3 ZֵXT!)JRH@ 9qjR~Q;" 0;v{(b_g$D]]g#鈤+" Y H-'FF7x3 C4W4X8aEi(5%:Muj1 QbSq 0h dC9(3a92I3f(inBoICNSQ*DrRJwZ;PC 4™(Paeyc%J+Ùth!bn`:L # UhH24%Yљ٤$I&%H2y8ȠMwqIRT+*0ywmc'E 3HH9)8 \,v8шBYCn4[nr:5'v.&br$8PALJ;f:Qs4!+fv\%;Ddu+hr*LEie[1,ɶ[At &Uf \! YSf6f⎑dE )XT%I`iMnR53e& S6sCRI.VR(* P99A*MSt804X-xmAnKk-l8E;3jbE'H 1euZ@-K16NlJNFZBs(Kj5,x܇<4i2:#[nZY8̞Z,(j0[qUԒW8PWڲPdeLQX+M(U*5j\&-K5v rvZdN(!TEڵh65YfmLD:͋Yݍ;;*F3+fU];3ZPNRHr)39FjM ÈY&Vlf'FH4fȃSLKT'./6,DfMagYYE- MZYCaY3R\3!xJQ `d-k4ȘEgn8&Z#;fNs4(s*8jbʰUFfA&td67N fc &msx/5 49 98Rb-D҂DY"*EQP jxǚlҵn# K%FkNkY3ZܳrZE۳6MHAНmno7nm4K2c(ҠHC"MHqD(0YsD8,&-j6VvZskNA,CLC;D7bD[kde[c'Ck]5Cnm-M$1k tb#yyBŰI\dۃ3H"&\YJK-H6ɵk##ՖQْ[AT[#-HpsnZIK6y9Q̊dj#1#T +ynٛB1%ƈٙa[j"LĶ8Hն ݩA!MQCKG:wBt4.ȹQh%;"s3l͇[9 LS &X3GM(vKIf9HQDJ"Rjk5U2TwWy޺q$'#!ԧ%ĐAć9NN#!D}`3tyQ"j?T9"}/P~~"xŽ$F#}@.1$^P/DX`;Nh?5T)ź FDv컟 |C~-+^kEK͟Oy + Yyq_eʚXS+4డE/IM:Q!`=>Qw?~IObz!$qF4A$'a^?Xl7<Dik^gxE LohiLys"ƒ>}]9q~%C il,?'"('P7~V _&uX*YBB4yB֢fu,9@CR]Y@.CMpd K _[klX$iW88Ƭ:,U߿;KeC)TEtT4YPa1Xa*|EYg [n/4zkSZfy z՜U]07Yʃ0H. b1X븸Ŵ"m[MXL+J&Zʱ?/e6e΂/) HK@/lo`_|gOn2IE:mFC#>iO.?G^vi`;;t\PGȃGSw@t^#/A(-j_qR$/xM*ǧ|^e'iaקT^lC0 @yF +"tR$c( )?&r`jP(PAΤN`K8yG%@t 'Cd!U#̌v̍'9M}dm#U#I :Z^kۯaCCM'EH.!C#YtVgٶ#_'j+ߗd0R4bonx)]@ u3?(XoLR|h%ʏG_Tj+AE:?B)PW~vUJz<Ω1/)b0Woi+@t]; nSR;Pu4xD2 %w;>)-gdp=9] 9AUOȇ3荱 :Rp,qd$":k A]H$ Y}8Tk`8Lj0\<_H5);7Hb]8erQuoCC* ġC8$*U$_o~;UBYak\BVj@P"J1o>ra"z|^v3ĉH}{*pv-LPzC8 J1)!M @.6y Br^SvVH vׂ"CI؈w4pTSvR4"  Ş&VnI.BRCU!/` PJE!A!VJsQ4bb JY1U\¹ Bwe 1GY!Ep@U()􏍗]a"a1J>+s <@`R_sXҰ>Y8㡗2>Yg+RXT@J>`{{_)>^iD |d_Gjte)++|: @Jڜ5J!JixG_].eij)% w3Jg\jeB &I/ct_QHa3V{4[ɥ]ٲL3Ć]ʞ d(z~QQAo< !ȟX PMyǖqLF& $L$ os|}RpfgBu]Cj6]HsFnݬ,m tr :ʓx2yP鈟ET3hUz0ZU"aᱤD eJ ƚDiǿ4 [㤥 |-f2ȳGHj0a8"E_MtQZz 3lаőgTLT@$XJ::i^i-$e9,0](\ҨsU~y _ 96sF19ZAp"tVa ӥmV\Z+:;`+W*,(TXzD^aɰ9Õ:j NYL:q PbjTZXBb [Y ; $ȠP:pH)2")\׃q2hN2±:,GiM4^yY[^tZL2 乌{KK=i5|/8my_4Qz5 izKXpx6NW֣\vkӈo٩PtSۧ]ͭ=a o|x]q;wz4ae{k\d7^8nJ0m?^zqQ|uҋzRONQ̺WZxҸf8jno]wymϞ{e4myzqjĖ-P[Іw\:v~ϢʺrWѻ-Lqo2e9Vl4/O6\;g1ɚg%pX2\u /m0\3S'.m|j{ N眭V=amvksӮϧ:akLxY|y83A}~6׶˗:j>wE8sҸcM;j'qӯFя^\MWOSQNGF%0ʪVlfy4+&' rDn&MI;)h.2fY"FCv"JdI;C4':Pu4 VJ2(qFu F )3HČL(,(&$@i]d]CCoFf3Uph2gp'T2k$VzBMHu+F5rHVzR (&]^H4ճVhl{3eSPQoU`V}U)„e5Htidyr/man/0000755000176200001440000000000014553565525012177 5ustar liggesuserstidyr/man/separate_longer_delim.Rd0000644000176200001440000000427414332223160016776 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/separate-longer.R \name{separate_longer_delim} \alias{separate_longer_delim} \alias{separate_longer_position} \title{Split a string into rows} \usage{ separate_longer_delim(data, cols, delim, ...) separate_longer_position(data, cols, width, ..., keep_empty = FALSE) } \arguments{ \item{data}{A data frame.} \item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to separate.} \item{delim}{For \code{separate_longer_delim()}, a string giving the delimiter between values. By default, it is interpreted as a fixed string; use \code{\link[stringr:modifiers]{stringr::regex()}} and friends to split in other ways.} \item{...}{These dots are for future extensions and must be empty.} \item{width}{For \code{separate_longer_position()}, an integer giving the number of characters to split by.} \item{keep_empty}{By default, you'll get \code{ceiling(nchar(x) / width)} rows for each observation. If \code{nchar(x)} is zero, this means the entire input row will be dropped from the output. If you want to preserve all rows, use \code{keep_empty = TRUE} to replace size-0 elements with a missing value.} } \value{ A data frame based on \code{data}. It has the same columns, but different rows. } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}} Each of these functions takes a string and splits it into multiple rows: \itemize{ \item \code{separate_longer_delim()} splits by a delimiter. \item \code{separate_longer_position()} splits by a fixed width. } } \examples{ df <- tibble(id = 1:4, x = c("x", "x y", "x y z", NA)) df \%>\% separate_longer_delim(x, delim = " ") # You can separate multiple columns at once if they have the same structure df <- tibble(id = 1:3, x = c("x", "x y", "x y z"), y = c("a", "a b", "a b c")) df \%>\% separate_longer_delim(c(x, y), delim = " ") # Or instead split by a fixed length df <- tibble(id = 1:3, x = c("ab", "def", "")) df \%>\% separate_longer_position(x, 1) df \%>\% separate_longer_position(x, 2) df \%>\% separate_longer_position(x, 2, keep_empty = TRUE) } tidyr/man/unite.Rd0000644000176200001440000000301014321316017013563 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unite.R \name{unite} \alias{unite} \title{Unite multiple columns into one by pasting strings together} \usage{ unite(data, col, ..., sep = "_", remove = TRUE, na.rm = FALSE) } \arguments{ \item{data}{A data frame.} \item{col}{The name of the new column, as a string or symbol. This argument is passed by expression and supports \link[rlang:topic-inject]{quasiquotation} (you can unquote strings and symbols). The name is captured from the expression with \code{\link[rlang:defusing-advanced]{rlang::ensym()}} (note that this kind of interface where symbols do not represent actual objects is now discouraged in the tidyverse; we support it here for backward compatibility).} \item{...}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to unite} \item{sep}{Separator to use between values.} \item{remove}{If \code{TRUE}, remove input columns from output data frame.} \item{na.rm}{If \code{TRUE}, missing values will be removed prior to uniting each value.} } \description{ Convenience function to paste together multiple columns into one. } \examples{ df <- expand_grid(x = c("a", NA), y = c("b", NA)) df df \%>\% unite("z", x:y, remove = FALSE) # To remove missing values: df \%>\% unite("z", x:y, na.rm = TRUE, remove = FALSE) # Separate is almost the complement of unite df \%>\% unite("xy", x:y) \%>\% separate(xy, c("x", "y")) # (but note `x` and `y` contain now "NA" not NA) } \seealso{ \code{\link[=separate]{separate()}}, the complement. } tidyr/man/expand.Rd0000644000176200001440000001126714363516001013734 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expand.R \name{expand} \alias{expand} \alias{crossing} \alias{nesting} \title{Expand data frame to include all possible combinations of values} \usage{ expand(data, ..., .name_repair = "check_unique") crossing(..., .name_repair = "check_unique") nesting(..., .name_repair = "check_unique") } \arguments{ \item{data}{A data frame.} \item{...}{<\code{\link[=tidyr_data_masking]{data-masking}}> Specification of columns to expand or complete. Columns can be atomic vectors or lists. \itemize{ \item To find all unique combinations of \code{x}, \code{y} and \code{z}, including those not present in the data, supply each variable as a separate argument: \code{expand(df, x, y, z)} or \code{complete(df, x, y, z)}. \item To find only the combinations that occur in the data, use \code{nesting}: \code{expand(df, nesting(x, y, z))}. \item You can combine the two forms. For example, \code{expand(df, nesting(school_id, student_id), date)} would produce a row for each present school-student combination for all possible dates. } When used with factors, \code{\link[=expand]{expand()}} and \code{\link[=complete]{complete()}} use the full set of levels, not just those that appear in the data. If you want to use only the values seen in the data, use \code{forcats::fct_drop()}. When used with continuous variables, you may need to fill in values that do not appear in the data: to do so use expressions like \code{year = 2010:2020} or \code{year = full_seq(year,1)}.} \item{.name_repair}{Treatment of problematic column names: \itemize{ \item \code{"minimal"}: No name repair or checks, beyond basic existence, \item \code{"unique"}: Make sure names are unique and not empty, \item \code{"check_unique"}: (default value), no name repair, but check they are \code{unique}, \item \code{"universal"}: Make the names \code{unique} and syntactic \item a function: apply custom name repair (e.g., \code{.name_repair = make.names} for names in the style of base R). \item A purrr-style anonymous function, see \code{\link[rlang:as_function]{rlang::as_function()}} } This argument is passed on as \code{repair} to \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}}. See there for more details on these terms and the strategies used to enforce them.} } \description{ \code{expand()} generates all combination of variables found in a dataset. It is paired with \code{nesting()} and \code{crossing()} helpers. \code{crossing()} is a wrapper around \code{\link[=expand_grid]{expand_grid()}} that de-duplicates and sorts its inputs; \code{nesting()} is a helper that only finds combinations already present in the data. \code{expand()} is often useful in conjunction with joins: \itemize{ \item use it with \code{right_join()} to convert implicit missing values to explicit missing values (e.g., fill in gaps in your data frame). \item use it with \code{anti_join()} to figure out which combinations are missing (e.g., identify gaps in your data frame). } } \section{Grouped data frames}{ With grouped data frames created by \code{\link[dplyr:group_by]{dplyr::group_by()}}, \code{expand()} operates \emph{within} each group. Because of this, you cannot expand on a grouping column. } \examples{ # Finding combinations ------------------------------------------------------ fruits <- tibble( type = c("apple", "orange", "apple", "orange", "orange", "orange"), year = c(2010, 2010, 2012, 2010, 2011, 2012), size = factor( c("XS", "S", "M", "S", "S", "M"), levels = c("XS", "S", "M", "L") ), weights = rnorm(6, as.numeric(size) + 2) ) # All combinations, including factor levels that are not used fruits \%>\% expand(type) fruits \%>\% expand(size) fruits \%>\% expand(type, size) fruits \%>\% expand(type, size, year) # Only combinations that already appear in the data fruits \%>\% expand(nesting(type)) fruits \%>\% expand(nesting(size)) fruits \%>\% expand(nesting(type, size)) fruits \%>\% expand(nesting(type, size, year)) # Other uses ---------------------------------------------------------------- # Use with `full_seq()` to fill in values of continuous variables fruits \%>\% expand(type, size, full_seq(year, 1)) fruits \%>\% expand(type, size, 2010:2013) # Use `anti_join()` to determine which observations are missing all <- fruits \%>\% expand(type, size, year) all all \%>\% dplyr::anti_join(fruits) # Use with `right_join()` to fill in missing rows (like `complete()`) fruits \%>\% dplyr::right_join(all) # Use with `group_by()` to expand within each group fruits \%>\% dplyr::group_by(type) \%>\% expand(year, size) } \seealso{ \code{\link[=complete]{complete()}} to expand list objects. \code{\link[=expand_grid]{expand_grid()}} to input vectors rather than a data frame. } tidyr/man/fill.Rd0000644000176200001440000000667614357015307013421 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fill.R \name{fill} \alias{fill} \title{Fill in missing values with previous or next value} \usage{ fill(data, ..., .direction = c("down", "up", "downup", "updown")) } \arguments{ \item{data}{A data frame.} \item{...}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to fill.} \item{.direction}{Direction in which to fill missing values. Currently either "down" (the default), "up", "downup" (i.e. first down and then up) or "updown" (first up and then down).} } \description{ Fills missing values in selected columns using the next or previous entry. This is useful in the common output format where values are not repeated, and are only recorded when they change. } \details{ Missing values are replaced in atomic vectors; \code{NULL}s are replaced in lists. } \section{Grouped data frames}{ With grouped data frames created by \code{\link[dplyr:group_by]{dplyr::group_by()}}, \code{fill()} will be applied \emph{within} each group, meaning that it won't fill across group boundaries. } \examples{ # direction = "down" -------------------------------------------------------- # Value (year) is recorded only when it changes sales <- tibble::tribble( ~quarter, ~year, ~sales, "Q1", 2000, 66013, "Q2", NA, 69182, "Q3", NA, 53175, "Q4", NA, 21001, "Q1", 2001, 46036, "Q2", NA, 58842, "Q3", NA, 44568, "Q4", NA, 50197, "Q1", 2002, 39113, "Q2", NA, 41668, "Q3", NA, 30144, "Q4", NA, 52897, "Q1", 2004, 32129, "Q2", NA, 67686, "Q3", NA, 31768, "Q4", NA, 49094 ) # `fill()` defaults to replacing missing data from top to bottom sales \%>\% fill(year) # direction = "up" ---------------------------------------------------------- # Value (pet_type) is missing above tidy_pets <- tibble::tribble( ~rank, ~pet_type, ~breed, 1L, NA, "Boston Terrier", 2L, NA, "Retrievers (Labrador)", 3L, NA, "Retrievers (Golden)", 4L, NA, "French Bulldogs", 5L, NA, "Bulldogs", 6L, "Dog", "Beagles", 1L, NA, "Persian", 2L, NA, "Maine Coon", 3L, NA, "Ragdoll", 4L, NA, "Exotic", 5L, NA, "Siamese", 6L, "Cat", "American Short" ) # For values that are missing above you can use `.direction = "up"` tidy_pets \%>\% fill(pet_type, .direction = "up") # direction = "downup" ------------------------------------------------------ # Value (n_squirrels) is missing above and below within a group squirrels <- tibble::tribble( ~group, ~name, ~role, ~n_squirrels, 1, "Sam", "Observer", NA, 1, "Mara", "Scorekeeper", 8, 1, "Jesse", "Observer", NA, 1, "Tom", "Observer", NA, 2, "Mike", "Observer", NA, 2, "Rachael", "Observer", NA, 2, "Sydekea", "Scorekeeper", 14, 2, "Gabriela", "Observer", NA, 3, "Derrick", "Observer", NA, 3, "Kara", "Scorekeeper", 9, 3, "Emily", "Observer", NA, 3, "Danielle", "Observer", NA ) # The values are inconsistently missing by position within the group # Use .direction = "downup" to fill missing values in both directions squirrels \%>\% dplyr::group_by(group) \%>\% fill(n_squirrels, .direction = "downup") \%>\% dplyr::ungroup() # Using `.direction = "updown"` accomplishes the same goal in this example } tidyr/man/nest.Rd0000644000176200001440000001227114363516001013422 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nest.R \name{nest} \alias{nest} \title{Nest rows into a list-column of data frames} \usage{ nest(.data, ..., .by = NULL, .key = NULL, .names_sep = NULL) } \arguments{ \item{.data}{A data frame.} \item{...}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to nest; these will appear in the inner data frames. Specified using name-variable pairs of the form \code{new_col = c(col1, col2, col3)}. The right hand side can be any valid tidyselect expression. If not supplied, then \code{...} is derived as all columns \emph{not} selected by \code{.by}, and will use the column name from \code{.key}. \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}: previously you could write \code{df \%>\% nest(x, y, z)}. Convert to \code{df \%>\% nest(data = c(x, y, z))}.} \item{.by}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to nest \emph{by}; these will remain in the outer data frame. \code{.by} can be used in place of or in conjunction with columns supplied through \code{...}. If not supplied, then \code{.by} is derived as all columns \emph{not} selected by \code{...}.} \item{.key}{The name of the resulting nested column. Only applicable when \code{...} isn't specified, i.e. in the case of \code{df \%>\% nest(.by = x)}. If \code{NULL}, then \code{"data"} will be used by default.} \item{.names_sep}{If \code{NULL}, the default, the inner names will come from the former outer names. If a string, the new inner names will use the outer names with \code{names_sep} automatically stripped. This makes \code{names_sep} roughly symmetric between nesting and unnesting.} } \description{ Nesting creates a list-column of data frames; unnesting flattens it back out into regular columns. Nesting is implicitly a summarising operation: you get one row for each group defined by the non-nested columns. This is useful in conjunction with other summaries that work with whole datasets, most notably models. Learn more in \code{vignette("nest")}. } \details{ If neither \code{...} nor \code{.by} are supplied, \code{nest()} will nest all variables, and will use the column name supplied through \code{.key}. } \section{New syntax}{ tidyr 1.0.0 introduced a new syntax for \code{nest()} and \code{unnest()} that's designed to be more similar to other functions. Converting to the new syntax should be straightforward (guided by the message you'll receive) but if you just need to run an old analysis, you can easily revert to the previous behaviour using \code{\link[=nest_legacy]{nest_legacy()}} and \code{\link[=unnest_legacy]{unnest_legacy()}} as follows: \if{html}{\out{
}}\preformatted{library(tidyr) nest <- nest_legacy unnest <- unnest_legacy }\if{html}{\out{
}} } \section{Grouped data frames}{ \code{df \%>\% nest(data = c(x, y))} specifies the columns to be nested; i.e. the columns that will appear in the inner data frame. \code{df \%>\% nest(.by = c(x, y))} specifies the columns to nest \emph{by}; i.e. the columns that will remain in the outer data frame. An alternative way to achieve the latter is to \code{nest()} a grouped data frame created by \code{\link[dplyr:group_by]{dplyr::group_by()}}. The grouping variables remain in the outer data frame and the others are nested. The result preserves the grouping of the input. Variables supplied to \code{nest()} will override grouping variables so that \code{df \%>\% group_by(x, y) \%>\% nest(data = !z)} will be equivalent to \code{df \%>\% nest(data = !z)}. You can't supply \code{.by} with a grouped data frame, as the groups already represent what you are nesting by. } \examples{ df <- tibble(x = c(1, 1, 1, 2, 2, 3), y = 1:6, z = 6:1) # Specify variables to nest using name-variable pairs. # Note that we get one row of output for each unique combination of # non-nested variables. df \%>\% nest(data = c(y, z)) # Specify variables to nest by (rather than variables to nest) using `.by` df \%>\% nest(.by = x) # In this case, since `...` isn't used you can specify the resulting column # name with `.key` df \%>\% nest(.by = x, .key = "cols") # Use tidyselect syntax and helpers, just like in `dplyr::select()` df \%>\% nest(data = any_of(c("y", "z"))) # `...` and `.by` can be used together to drop columns you no longer need, # or to include the columns you are nesting by in the inner data frame too. # This drops `z`: df \%>\% nest(data = y, .by = x) # This includes `x` in the inner data frame: df \%>\% nest(data = everything(), .by = x) # Multiple nesting structures can be specified at once iris \%>\% nest(petal = starts_with("Petal"), sepal = starts_with("Sepal")) iris \%>\% nest(width = contains("Width"), length = contains("Length")) # Nesting a grouped data frame nests all variables apart from the group vars fish_encounters \%>\% dplyr::group_by(fish) \%>\% nest() # That is similar to `nest(.by = )`, except here the result isn't grouped fish_encounters \%>\% nest(.by = fish) # Nesting is often useful for creating per group models mtcars \%>\% nest(.by = cyl) \%>\% dplyr::mutate(models = lapply(data, function(df) lm(mpg ~ wt, data = df))) } tidyr/man/fish_encounters.Rd0000644000176200001440000000154614013466035015655 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{fish_encounters} \alias{fish_encounters} \title{Fish encounters} \format{ A dataset with variables: \describe{ \item{fish}{Fish identifier} \item{station}{Measurement station} \item{seen}{Was the fish seen? (1 if yes, and true for all rows)} } } \source{ Dataset provided by Myfanwy Johnston; more details at \url{https://fishsciences.github.io/post/visualizing-fish-encounter-histories/} } \usage{ fish_encounters } \description{ Information about fish swimming down a river: each station represents an autonomous monitor that records if a tagged fish was seen at that location. Fish travel in one direction (migrating downstream). Information about misses is just as important as hits, but is not directly recorded in this form of the data. } \keyword{datasets} tidyr/man/separate_rows.Rd0000644000176200001440000000270214332223160015322 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/separate-rows.R \name{separate_rows} \alias{separate_rows} \title{Separate a collapsed column into multiple rows} \usage{ separate_rows(data, ..., sep = "[^[:alnum:].]+", convert = FALSE) } \arguments{ \item{data}{A data frame.} \item{...}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to separate across multiple rows} \item{sep}{Separator delimiting collapsed values.} \item{convert}{If \code{TRUE} will automatically run \code{\link[=type.convert]{type.convert()}} on the key column. This is useful if the column types are actually numeric, integer, or logical.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} \code{separate_rows()} has been superseded in favour of \code{\link[=separate_longer_delim]{separate_longer_delim()}} because it has a more consistent API with other separate functions. Superseded functions will not go away, but will only receive critical bug fixes. If a variable contains observations with multiple delimited values, \code{separate_rows()} separates the values and places each one in its own row. } \examples{ df <- tibble( x = 1:3, y = c("a", "d,e,f", "g,h"), z = c("1", "2,3,4", "5,6") ) separate_rows(df, y, z, convert = TRUE) # Now recommended df \%>\% separate_longer_delim(c(y, z), delim = ",") } tidyr/man/unnest_wider.Rd0000644000176200001440000001210114363516001015147 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unnest-wider.R \name{unnest_wider} \alias{unnest_wider} \title{Unnest a list-column into columns} \usage{ unnest_wider( data, col, names_sep = NULL, simplify = TRUE, strict = FALSE, names_repair = "check_unique", ptype = NULL, transform = NULL ) } \arguments{ \item{data}{A data frame.} \item{col}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> List-column(s) to unnest. When selecting multiple columns, values from the same row will be recycled to their common size.} \item{names_sep}{If \code{NULL}, the default, the names will be left as is. If a string, the outer and inner names will be pasted together using \code{names_sep} as a separator. If any values being unnested are unnamed, then \code{names_sep} must be supplied, otherwise an error is thrown. When \code{names_sep} is supplied, names are automatically generated for unnamed values as an increasing sequence of integers.} \item{simplify}{If \code{TRUE}, will attempt to simplify lists of length-1 vectors to an atomic vector. Can also be a named list containing \code{TRUE} or \code{FALSE} declaring whether or not to attempt to simplify a particular column. If a named list is provided, the default for any unspecified columns is \code{TRUE}.} \item{strict}{A single logical specifying whether or not to apply strict vctrs typing rules. If \code{FALSE}, typed empty values (like \code{list()} or \code{integer()}) nested within list-columns will be treated like \code{NULL} and will not contribute to the type of the unnested column. This is useful when working with JSON, where empty values tend to lose their type information and show up as \code{list()}.} \item{names_repair}{Used to check that output data frame has valid names. Must be one of the following options: \itemize{ \item \verb{"minimal}": no name repair or checks, beyond basic existence, \item \verb{"unique}": make sure names are unique and not empty, \item \verb{"check_unique}": (the default), no name repair, but check they are unique, \item \verb{"universal}": make the names unique and syntactic \item a function: apply custom name repair. \item \link{tidyr_legacy}: use the name repair from tidyr 0.8. \item a formula: a purrr-style anonymous function (see \code{\link[rlang:as_function]{rlang::as_function()}}) } See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more details on these terms and the strategies used to enforce them.} \item{ptype}{Optionally, a named list of prototypes declaring the desired output type of each component. Alternatively, a single empty prototype can be supplied, which will be applied to all components. Use this argument if you want to check that each element has the type you expect when simplifying. If a \code{ptype} has been specified, but \code{simplify = FALSE} or simplification isn't possible, then a \link[vctrs:list_of]{list-of} column will be returned and each element will have type \code{ptype}.} \item{transform}{Optionally, a named list of transformation functions applied to each component. Alternatively, a single function can be supplied, which will be applied to all components. Use this argument if you want to transform or parse individual elements as they are extracted. When both \code{ptype} and \code{transform} are supplied, the \code{transform} is applied before the \code{ptype}.} } \description{ \code{unnest_wider()} turns each element of a list-column into a column. It is most naturally suited to list-columns where every element is named, and the names are consistent from row-to-row. \code{unnest_wider()} preserves the rows of \code{x} while modifying the columns. Learn more in \code{vignette("rectangle")}. } \examples{ df <- tibble( character = c("Toothless", "Dory"), metadata = list( list( species = "dragon", color = "black", films = c( "How to Train Your Dragon", "How to Train Your Dragon 2", "How to Train Your Dragon: The Hidden World" ) ), list( species = "blue tang", color = "blue", films = c("Finding Nemo", "Finding Dory") ) ) ) df # Turn all components of metadata into columns df \%>\% unnest_wider(metadata) # Choose not to simplify list-cols of length-1 elements df \%>\% unnest_wider(metadata, simplify = FALSE) df \%>\% unnest_wider(metadata, simplify = list(color = FALSE)) # You can also widen unnamed list-cols: df <- tibble( x = 1:3, y = list(NULL, 1:3, 4:5) ) # but you must supply `names_sep` to do so, which generates automatic names: df \%>\% unnest_wider(y, names_sep = "_") # 0-length elements --------------------------------------------------------- # The defaults of `unnest_wider()` treat empty types (like `list()`) as `NULL`. json <- list( list(x = 1:2, y = 1:2), list(x = list(), y = 3:4), list(x = 3L, y = list()) ) df <- tibble(json = json) df \%>\% unnest_wider(json) # To instead enforce strict vctrs typing rules, use `strict` df \%>\% unnest_wider(json, strict = TRUE) } \seealso{ Other rectangling: \code{\link{hoist}()}, \code{\link{unnest_longer}()}, \code{\link{unnest}()} } \concept{rectangling} tidyr/man/relig_income.Rd0000644000176200001440000000110414363604046015105 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{relig_income} \alias{relig_income} \title{Pew religion and income survey} \format{ A dataset with variables: \describe{ \item{religion}{Name of religion} \item{\verb{<$10k}-\verb{Don\\'t know/refused}}{Number of respondees with income range in column name} } } \source{ Downloaded from \url{https://www.pewresearch.org/religion/religious-landscape-study/} (downloaded November 2009) } \usage{ relig_income } \description{ Pew religion and income survey } \keyword{datasets} tidyr/man/construction.Rd0000644000176200001440000000132114013466035015200 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{construction} \alias{construction} \title{Completed construction in the US in 2018} \format{ A dataset with variables: \describe{ \item{Year,Month}{Record date} \item{\verb{1 unit}, \verb{2 to 4 units}, \verb{5 units or mote}}{Number of completed units of each size} \item{Northeast,Midwest,South,West}{Number of completed units in each region} } } \source{ Completions of "New Residential Construction" found in Table 5 at \url{https://www.census.gov/construction/nrc/xls/newresconst.xls} (downloaded March 2019) } \usage{ construction } \description{ Completed construction in the US in 2018 } \keyword{datasets} tidyr/man/full_seq.Rd0000644000176200001440000000124314013466035014263 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/seq.R \name{full_seq} \alias{full_seq} \title{Create the full sequence of values in a vector} \usage{ full_seq(x, period, tol = 1e-06) } \arguments{ \item{x}{A numeric vector.} \item{period}{Gap between each observation. The existing data will be checked to ensure that it is actually of this periodicity.} \item{tol}{Numerical tolerance for checking periodicity.} } \description{ This is useful if you want to fill in missing values that should have been observed but weren't. For example, \code{full_seq(c(1, 2, 4, 6), 1)} will return \code{1:6}. } \examples{ full_seq(c(1, 2, 4, 5, 10), 1) } tidyr/man/cms_patient_experience.Rd0000644000176200001440000000402214315413441017162 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{cms_patient_experience} \alias{cms_patient_experience} \alias{cms_patient_care} \title{Data from the Centers for Medicare & Medicaid Services} \format{ \code{cms_patient_experience} is a data frame with 500 observations and five variables: \describe{ \item{org_pac_id,org_nm}{Organisation ID and name} \item{measure_cd,measure_title}{Measure code and title} \item{prf_rate}{Measure performance rate} } \code{cms_patient_care} is a data frame with 252 observations and five variables: \describe{ \item{ccn,facility_name}{Facility ID and name} \item{measure_abbr}{Abbreviated measurement title, suitable for use as variable name} \item{score}{Measure score} \item{type}{Whether score refers to the rating out of 100 ("observed"), or the maximum possible value of the raw score ("denominator")} } } \usage{ cms_patient_experience cms_patient_care } \description{ Two datasets from public data provided the Centers for Medicare & Medicaid Services, \url{https://data.cms.gov}. \itemize{ \item \code{cms_patient_experience} contains some lightly cleaned data from "Hospice - Provider Data", which provides a list of hospice agencies along with some data on quality of patient care, \url{https://data.cms.gov/provider-data/dataset/252m-zfp9}. \item \code{cms_patient_care} "Doctors and Clinicians Quality Payment Program PY 2020 Virtual Group Public Reporting", \url{https://data.cms.gov/provider-data/dataset/8c70-d353} } } \examples{ cms_patient_experience \%>\% dplyr::distinct(measure_cd, measure_title) cms_patient_experience \%>\% pivot_wider( id_cols = starts_with("org"), names_from = measure_cd, values_from = prf_rate ) cms_patient_care \%>\% pivot_wider( names_from = type, values_from = score ) cms_patient_care \%>\% pivot_wider( names_from = measure_abbr, values_from = score ) cms_patient_care \%>\% pivot_wider( names_from = c(measure_abbr, type), values_from = score ) } \keyword{datasets} tidyr/man/drop_na.Rd0000644000176200001440000000156314325573777014122 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/drop-na.R \name{drop_na} \alias{drop_na} \title{Drop rows containing missing values} \usage{ drop_na(data, ...) } \arguments{ \item{data}{A data frame.} \item{...}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to inspect for missing values. If empty, all columns are used.} } \description{ \code{drop_na()} drops rows where any column specified by \code{...} contains a missing value. } \details{ Another way to interpret \code{drop_na()} is that it only keeps the "complete" rows (where no rows contain missing values). Internally, this completeness is computed through \code{\link[vctrs:vec_detect_complete]{vctrs::vec_detect_complete()}}. } \examples{ df <- tibble(x = c(1, 2, NA), y = c("a", NA, "b")) df \%>\% drop_na() df \%>\% drop_na(x) vars <- "y" df \%>\% drop_na(x, any_of(vars)) } tidyr/man/chop.Rd0000644000176200001440000000732114360013543013402 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/chop.R \name{chop} \alias{chop} \alias{unchop} \title{Chop and unchop} \usage{ chop(data, cols, ..., error_call = current_env()) unchop( data, cols, ..., keep_empty = FALSE, ptype = NULL, error_call = current_env() ) } \arguments{ \item{data}{A data frame.} \item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to chop or unchop. For \code{unchop()}, each column should be a list-column containing generalised vectors (e.g. any mix of \code{NULL}s, atomic vector, S3 vectors, a lists, or data frames).} \item{...}{These dots are for future extensions and must be empty.} \item{error_call}{The execution environment of a currently running function, e.g. \code{caller_env()}. The function will be mentioned in error messages as the source of the error. See the \code{call} argument of \code{\link[rlang:abort]{abort()}} for more information.} \item{keep_empty}{By default, you get one row of output for each element of the list that you are unchopping/unnesting. This means that if there's a size-0 element (like \code{NULL} or an empty data frame or vector), then that entire row will be dropped from the output. If you want to preserve all rows, use \code{keep_empty = TRUE} to replace size-0 elements with a single row of missing values.} \item{ptype}{Optionally, a named list of column name-prototype pairs to coerce \code{cols} to, overriding the default that will be guessed from combining the individual values. Alternatively, a single empty ptype can be supplied, which will be applied to all \code{cols}.} } \description{ Chopping and unchopping preserve the width of a data frame, changing its length. \code{chop()} makes \code{df} shorter by converting rows within each group into list-columns. \code{unchop()} makes \code{df} longer by expanding list-columns so that each element of the list-column gets its own row in the output. \code{chop()} and \code{unchop()} are building blocks for more complicated functions (like \code{\link[=unnest]{unnest()}}, \code{\link[=unnest_longer]{unnest_longer()}}, and \code{\link[=unnest_wider]{unnest_wider()}}) and are generally more suitable for programming than interactive data analysis. } \details{ Generally, unchopping is more useful than chopping because it simplifies a complex data structure, and \code{\link[=nest]{nest()}}ing is usually more appropriate than \code{chop()}ing since it better preserves the connections between observations. \code{chop()} creates list-columns of class \code{\link[vctrs:list_of]{vctrs::list_of()}} to ensure consistent behaviour when the chopped data frame is emptied. For instance this helps getting back the original column types after the roundtrip chop and unchop. Because \verb{} keeps tracks of the type of its elements, \code{unchop()} is able to reconstitute the correct vector type even for empty list-columns. } \examples{ # Chop ---------------------------------------------------------------------- df <- tibble(x = c(1, 1, 1, 2, 2, 3), y = 1:6, z = 6:1) # Note that we get one row of output for each unique combination of # non-chopped variables df \%>\% chop(c(y, z)) # cf nest df \%>\% nest(data = c(y, z)) # Unchop -------------------------------------------------------------------- df <- tibble(x = 1:4, y = list(integer(), 1L, 1:2, 1:3)) df \%>\% unchop(y) df \%>\% unchop(y, keep_empty = TRUE) # unchop will error if the types are not compatible: df <- tibble(x = 1:2, y = list("1", 1:3)) try(df \%>\% unchop(y)) # Unchopping a list-col of data frames must generate a df-col because # unchop leaves the column names unchanged df <- tibble(x = 1:3, y = list(NULL, tibble(x = 1), tibble(y = 1:2))) df \%>\% unchop(y) df \%>\% unchop(y, keep_empty = TRUE) } tidyr/man/extract.Rd0000644000176200001440000000421614332223160014120 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract.R \name{extract} \alias{extract} \title{Extract a character column into multiple columns using regular expression groups} \usage{ extract( data, col, into, regex = "([[:alnum:]]+)", remove = TRUE, convert = FALSE, ... ) } \arguments{ \item{data}{A data frame.} \item{col}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Column to expand.} \item{into}{Names of new variables to create as character vector. Use \code{NA} to omit the variable in the output.} \item{regex}{A string representing a regular expression used to extract the desired values. There should be one group (defined by \verb{()}) for each element of \code{into}.} \item{remove}{If \code{TRUE}, remove input column from output data frame.} \item{convert}{If \code{TRUE}, will run \code{\link[=type.convert]{type.convert()}} with \code{as.is = TRUE} on new columns. This is useful if the component columns are integer, numeric or logical. NB: this will cause string \code{"NA"}s to be converted to \code{NA}s.} \item{...}{Additional arguments passed on to methods.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} \code{extract()} has been superseded in favour of \code{\link[=separate_wider_regex]{separate_wider_regex()}} because it has a more polished API and better handling of problems. Superseded functions will not go away, but will only receive critical bug fixes. Given a regular expression with capturing groups, \code{extract()} turns each group into a new column. If the groups don't match, or the input is NA, the output will be NA. } \examples{ df <- tibble(x = c(NA, "a-b", "a-d", "b-c", "d-e")) df \%>\% extract(x, "A") df \%>\% extract(x, c("A", "B"), "([[:alnum:]]+)-([[:alnum:]]+)") # Now recommended df \%>\% separate_wider_regex( x, patterns = c(A = "[[:alnum:]]+", "-", B = "[[:alnum:]]+") ) # If no match, NA: df \%>\% extract(x, c("A", "B"), "([a-d]+)-([a-d]+)") } \seealso{ \code{\link[=separate]{separate()}} to split up by a separator. } tidyr/man/pivot_longer.Rd0000644000176200001440000001515714357015307015174 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pivot-long.R \name{pivot_longer} \alias{pivot_longer} \title{Pivot data from wide to long} \usage{ pivot_longer( data, cols, ..., cols_vary = "fastest", names_to = "name", names_prefix = NULL, names_sep = NULL, names_pattern = NULL, names_ptypes = NULL, names_transform = NULL, names_repair = "check_unique", values_to = "value", values_drop_na = FALSE, values_ptypes = NULL, values_transform = NULL ) } \arguments{ \item{data}{A data frame to pivot.} \item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to pivot into longer format.} \item{...}{Additional arguments passed on to methods.} \item{cols_vary}{When pivoting \code{cols} into longer format, how should the output rows be arranged relative to their original row number? \itemize{ \item \code{"fastest"}, the default, keeps individual rows from \code{cols} close together in the output. This often produces intuitively ordered output when you have at least one key column from \code{data} that is not involved in the pivoting process. \item \code{"slowest"} keeps individual columns from \code{cols} close together in the output. This often produces intuitively ordered output when you utilize all of the columns from \code{data} in the pivoting process. }} \item{names_to}{A character vector specifying the new column or columns to create from the information stored in the column names of \code{data} specified by \code{cols}. \itemize{ \item If length 0, or if \code{NULL} is supplied, no columns will be created. \item If length 1, a single column will be created which will contain the column names specified by \code{cols}. \item If length >1, multiple columns will be created. In this case, one of \code{names_sep} or \code{names_pattern} must be supplied to specify how the column names should be split. There are also two additional character values you can take advantage of: \itemize{ \item \code{NA} will discard the corresponding component of the column name. \item \code{".value"} indicates that the corresponding component of the column name defines the name of the output column containing the cell values, overriding \code{values_to} entirely. } }} \item{names_prefix}{A regular expression used to remove matching text from the start of each variable name.} \item{names_sep, names_pattern}{If \code{names_to} contains multiple values, these arguments control how the column name is broken up. \code{names_sep} takes the same specification as \code{\link[=separate]{separate()}}, and can either be a numeric vector (specifying positions to break on), or a single string (specifying a regular expression to split on). \code{names_pattern} takes the same specification as \code{\link[=extract]{extract()}}, a regular expression containing matching groups (\verb{()}). If these arguments do not give you enough control, use \code{pivot_longer_spec()} to create a spec object and process manually as needed.} \item{names_ptypes, values_ptypes}{Optionally, a list of column name-prototype pairs. Alternatively, a single empty prototype can be supplied, which will be applied to all columns. A prototype (or ptype for short) is a zero-length vector (like \code{integer()} or \code{numeric()}) that defines the type, class, and attributes of a vector. Use these arguments if you want to confirm that the created columns are the types that you expect. Note that if you want to change (instead of confirm) the types of specific columns, you should use \code{names_transform} or \code{values_transform} instead.} \item{names_transform, values_transform}{Optionally, a list of column name-function pairs. Alternatively, a single function can be supplied, which will be applied to all columns. Use these arguments if you need to change the types of specific columns. For example, \code{names_transform = list(week = as.integer)} would convert a character variable called \code{week} to an integer. If not specified, the type of the columns generated from \code{names_to} will be character, and the type of the variables generated from \code{values_to} will be the common type of the input columns used to generate them.} \item{names_repair}{What happens if the output has invalid column names? The default, \code{"check_unique"} is to error if the columns are duplicated. Use \code{"minimal"} to allow duplicates in the output, or \code{"unique"} to de-duplicated by adding numeric suffixes. See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more options.} \item{values_to}{A string specifying the name of the column to create from the data stored in cell values. If \code{names_to} is a character containing the special \code{.value} sentinel, this value will be ignored, and the name of the value column will be derived from part of the existing column names.} \item{values_drop_na}{If \code{TRUE}, will drop rows that contain only \code{NA}s in the \code{value_to} column. This effectively converts explicit missing values to implicit missing values, and should generally be used only when missing values in \code{data} were created by its structure.} } \description{ \code{pivot_longer()} "lengthens" data, increasing the number of rows and decreasing the number of columns. The inverse transformation is \code{\link[=pivot_wider]{pivot_wider()}} Learn more in \code{vignette("pivot")}. } \details{ \code{pivot_longer()} is an updated approach to \code{\link[=gather]{gather()}}, designed to be both simpler to use and to handle more use cases. We recommend you use \code{pivot_longer()} for new code; \code{gather()} isn't going away but is no longer under active development. } \examples{ # See vignette("pivot") for examples and explanation # Simplest case where column names are character data relig_income relig_income \%>\% pivot_longer(!religion, names_to = "income", values_to = "count") # Slightly more complex case where columns have common prefix, # and missing missings are structural so should be dropped. billboard billboard \%>\% pivot_longer( cols = starts_with("wk"), names_to = "week", names_prefix = "wk", values_to = "rank", values_drop_na = TRUE ) # Multiple variables stored in column names who \%>\% pivot_longer( cols = new_sp_m014:newrel_f65, names_to = c("diagnosis", "gender", "age"), names_pattern = "new_?(.*)_(.)(.*)", values_to = "count" ) # Multiple observations per row. Since all columns are used in the pivoting # process, we'll use `cols_vary` to keep values from the original columns # close together in the output. anscombe anscombe \%>\% pivot_longer( everything(), cols_vary = "slowest", names_to = c(".value", "set"), names_pattern = "(.)(.)" ) } tidyr/man/tidyr_data_masking.Rd0000644000176200001440000000527114325573777016335 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/doc-params.R \name{tidyr_data_masking} \alias{tidyr_data_masking} \title{Argument type: data-masking} \description{ This page describes the \verb{} argument modifier which indicates that the argument uses \strong{data masking}, a sub-type of tidy evaluation. If you've never heard of tidy evaluation before, start with the practical introduction in \url{https://r4ds.hadley.nz/functions.html#data-frame-functions} then then read more about the underlying theory in \url{https://rlang.r-lib.org/reference/topic-data-mask.html}. } \section{Key techniques}{ \itemize{ \item To allow the user to supply the column name in a function argument, embrace the argument, e.g. \code{filter(df, {{ var }})}. \if{html}{\out{
}}\preformatted{dist_summary <- function(df, var) \{ df \%>\% summarise(n = n(), min = min(\{\{ var \}\}), max = max(\{\{ var \}\})) \} mtcars \%>\% dist_summary(mpg) mtcars \%>\% group_by(cyl) \%>\% dist_summary(mpg) }\if{html}{\out{
}} \item To work with a column name recorded as a string, use the \code{.data} pronoun, e.g. \code{summarise(df, mean = mean(.data[[var]]))}. \if{html}{\out{
}}\preformatted{for (var in names(mtcars)) \{ mtcars \%>\% count(.data[[var]]) \%>\% print() \} lapply(names(mtcars), function(var) mtcars \%>\% count(.data[[var]])) }\if{html}{\out{
}} \item To suppress \verb{R CMD check} \code{NOTE}s about unknown variables use \code{.data$var} instead of \code{var}: \if{html}{\out{
}}\preformatted{# has NOTE df \%>\% mutate(z = x + y) # no NOTE df \%>\% mutate(z = .data$x + .data$y) }\if{html}{\out{
}} You'll also need to import \code{.data} from rlang with (e.g.) \verb{@importFrom rlang .data}. } } \section{Dot-dot-dot (...)}{ \code{...} automatically provides indirection, so you can use it as is (i.e. without embracing) inside a function: \if{html}{\out{
}}\preformatted{grouped_mean <- function(df, var, ...) \{ df \%>\% group_by(...) \%>\% summarise(mean = mean(\{\{ var \}\})) \} }\if{html}{\out{
}} You can also use \verb{:=} instead of \code{=} to enable a glue-like syntax for creating variables from user supplied data: \if{html}{\out{
}}\preformatted{var_name <- "l100km" mtcars \%>\% mutate("\{var_name\}" := 235 / mpg) summarise_mean <- function(df, var) \{ df \%>\% summarise("mean_of_\{\{var\}\}" := mean(\{\{ var \}\})) \} mtcars \%>\% group_by(cyl) \%>\% summarise_mean(mpg) }\if{html}{\out{
}} Learn more in \url{https://rlang.r-lib.org/reference/topic-data-mask-programming.html}. } \keyword{internal} tidyr/man/us_rent_income.Rd0000644000176200001440000000111014013466035015453 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{us_rent_income} \alias{us_rent_income} \title{US rent and income data} \format{ A dataset with variables: \describe{ \item{GEOID}{FIP state identifier} \item{NAME}{Name of state} \item{variable}{Variable name: income = median yearly income, rent = median monthly rent} \item{estimate}{Estimated value} \item{moe}{90\% margin of error} } } \usage{ us_rent_income } \description{ Captured from the 2017 American Community Survey using the tidycensus package. } \keyword{datasets} tidyr/man/complete.Rd0000644000176200001440000000706614363516001014267 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/complete.R \name{complete} \alias{complete} \title{Complete a data frame with missing combinations of data} \usage{ complete(data, ..., fill = list(), explicit = TRUE) } \arguments{ \item{data}{A data frame.} \item{...}{<\code{\link[=tidyr_data_masking]{data-masking}}> Specification of columns to expand or complete. Columns can be atomic vectors or lists. \itemize{ \item To find all unique combinations of \code{x}, \code{y} and \code{z}, including those not present in the data, supply each variable as a separate argument: \code{expand(df, x, y, z)} or \code{complete(df, x, y, z)}. \item To find only the combinations that occur in the data, use \code{nesting}: \code{expand(df, nesting(x, y, z))}. \item You can combine the two forms. For example, \code{expand(df, nesting(school_id, student_id), date)} would produce a row for each present school-student combination for all possible dates. } When used with factors, \code{\link[=expand]{expand()}} and \code{\link[=complete]{complete()}} use the full set of levels, not just those that appear in the data. If you want to use only the values seen in the data, use \code{forcats::fct_drop()}. When used with continuous variables, you may need to fill in values that do not appear in the data: to do so use expressions like \code{year = 2010:2020} or \code{year = full_seq(year,1)}.} \item{fill}{A named list that for each variable supplies a single value to use instead of \code{NA} for missing combinations.} \item{explicit}{Should both implicit (newly created) and explicit (pre-existing) missing values be filled by \code{fill}? By default, this is \code{TRUE}, but if set to \code{FALSE} this will limit the fill to only implicit missing values.} } \description{ Turns implicit missing values into explicit missing values. This is a wrapper around \code{\link[=expand]{expand()}}, \code{\link[dplyr:mutate-joins]{dplyr::full_join()}} and \code{\link[=replace_na]{replace_na()}} that's useful for completing missing combinations of data. } \section{Grouped data frames}{ With grouped data frames created by \code{\link[dplyr:group_by]{dplyr::group_by()}}, \code{complete()} operates \emph{within} each group. Because of this, you cannot complete a grouping column. } \examples{ df <- tibble( group = c(1:2, 1, 2), item_id = c(1:2, 2, 3), item_name = c("a", "a", "b", "b"), value1 = c(1, NA, 3, 4), value2 = 4:7 ) df # Combinations -------------------------------------------------------------- # Generate all possible combinations of `group`, `item_id`, and `item_name` # (whether or not they appear in the data) df \%>\% complete(group, item_id, item_name) # Cross all possible `group` values with the unique pairs of # `(item_id, item_name)` that already exist in the data df \%>\% complete(group, nesting(item_id, item_name)) # Within each `group`, generate all possible combinations of # `item_id` and `item_name` that occur in that group df \%>\% dplyr::group_by(group) \%>\% complete(item_id, item_name) # Supplying values for new rows --------------------------------------------- # Use `fill` to replace NAs with some value. By default, affects both new # (implicit) and pre-existing (explicit) missing values. df \%>\% complete( group, nesting(item_id, item_name), fill = list(value1 = 0, value2 = 99) ) # Limit the fill to only the newly created (i.e. previously implicit) # missing values with `explicit = FALSE` df \%>\% complete( group, nesting(item_id, item_name), fill = list(value1 = 0, value2 = 99), explicit = FALSE ) } tidyr/man/who.Rd0000644000176200001440000000353114315413441013246 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{who} \alias{who} \alias{who2} \alias{population} \title{World Health Organization TB data} \format{ \subsection{\code{who}}{ A data frame with 7,240 rows and 60 columns: \describe{ \item{country}{Country name} \item{iso2, iso3}{2 & 3 letter ISO country codes} \item{year}{Year} \item{new_sp_m014 - new_rel_f65}{Counts of new TB cases recorded by group. Column names encode three variables that describe the group.} } } \subsection{\code{who2}}{ A data frame with 7,240 rows and 58 columns. } \subsection{\code{population}}{ A data frame with 4,060 rows and three columns: \describe{ \item{country}{Country name} \item{year}{Year} \item{population}{Population} } } } \source{ \url{https://www.who.int/teams/global-tuberculosis-programme/data} } \usage{ who who2 population } \description{ A subset of data from the World Health Organization Global Tuberculosis Report, and accompanying global populations. \code{who} uses the original codes from the World Health Organization. The column names for columns 5 through 60 are made by combining \code{new_} with: \itemize{ \item the method of diagnosis (\code{rel} = relapse, \code{sn} = negative pulmonary smear, \code{sp} = positive pulmonary smear, \code{ep} = extrapulmonary), \item gender (\code{f} = female, \code{m} = male), and \item age group (\code{014} = 0-14 yrs of age, \code{1524} = 15-24, \code{2534} = 25-34, \code{3544} = 35-44 years of age, \code{4554} = 45-54, \code{5564} = 55-64, \code{65} = 65 years or older). } \code{who2} is a lightly modified version that makes teaching the basics easier by tweaking the variables to be slightly more consistent and dropping \code{iso2} and \code{iso3}. \code{newrel} is replaced by \code{new_rel}, and a \verb{_} is added after the gender. } \keyword{datasets} tidyr/man/world_bank_pop.Rd0000644000176200001440000000130414520546617015457 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{world_bank_pop} \alias{world_bank_pop} \title{Population data from the World Bank} \format{ A dataset with variables: \describe{ \item{country}{Three letter country code} \item{indicator}{Indicator name: \code{SP.POP.GROW} = population growth, \code{SP.POP.TOTL} = total population, \code{SP.URB.GROW} = urban population growth, \code{SP.URB.TOTL} = total urban population} \item{2000-2018}{Value for each year} } } \source{ Dataset from the World Bank data bank: \url{https://data.worldbank.org} } \usage{ world_bank_pop } \description{ Data about population from the World Bank. } \keyword{datasets} tidyr/man/replace_na.Rd0000644000176200001440000000305014315413441014536 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/replace_na.R \name{replace_na} \alias{replace_na} \title{Replace NAs with specified values} \usage{ replace_na(data, replace, ...) } \arguments{ \item{data}{A data frame or vector.} \item{replace}{If \code{data} is a data frame, \code{replace} takes a named list of values, with one value for each column that has missing values to be replaced. Each value in \code{replace} will be cast to the type of the column in \code{data} that it being used as a replacement in. If \code{data} is a vector, \code{replace} takes a single value. This single value replaces all of the missing values in the vector. \code{replace} will be cast to the type of \code{data}.} \item{...}{Additional arguments for methods. Currently unused.} } \value{ \code{replace_na()} returns an object with the same type as \code{data}. } \description{ Replace NAs with specified values } \examples{ # Replace NAs in a data frame df <- tibble(x = c(1, 2, NA), y = c("a", NA, "b")) df \%>\% replace_na(list(x = 0, y = "unknown")) # Replace NAs in a vector df \%>\% dplyr::mutate(x = replace_na(x, 0)) # OR df$x \%>\% replace_na(0) df$y \%>\% replace_na("unknown") # Replace NULLs in a list: NULLs are the list-col equivalent of NAs df_list <- tibble(z = list(1:5, NULL, 10:20)) df_list \%>\% replace_na(list(z = list(5))) } \seealso{ \code{\link[dplyr:na_if]{dplyr::na_if()}} to replace specified values with \code{NA}s; \code{\link[dplyr:coalesce]{dplyr::coalesce()}} to replaces \code{NA}s with values from other vectors. } tidyr/man/pivot_longer_spec.Rd0000644000176200001440000001525614357015307016206 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pivot-long.R \name{pivot_longer_spec} \alias{pivot_longer_spec} \alias{build_longer_spec} \title{Pivot data from wide to long using a spec} \usage{ pivot_longer_spec( data, spec, ..., cols_vary = "fastest", names_repair = "check_unique", values_drop_na = FALSE, values_ptypes = NULL, values_transform = NULL, error_call = current_env() ) build_longer_spec( data, cols, ..., names_to = "name", values_to = "value", names_prefix = NULL, names_sep = NULL, names_pattern = NULL, names_ptypes = NULL, names_transform = NULL, error_call = current_env() ) } \arguments{ \item{data}{A data frame to pivot.} \item{spec}{A specification data frame. This is useful for more complex pivots because it gives you greater control on how metadata stored in the column names turns into columns in the result. Must be a data frame containing character \code{.name} and \code{.value} columns. Additional columns in \code{spec} should be named to match columns in the long format of the dataset and contain values corresponding to columns pivoted from the wide format. The special \code{.seq} variable is used to disambiguate rows internally; it is automatically removed after pivoting.} \item{...}{These dots are for future extensions and must be empty.} \item{cols_vary}{When pivoting \code{cols} into longer format, how should the output rows be arranged relative to their original row number? \itemize{ \item \code{"fastest"}, the default, keeps individual rows from \code{cols} close together in the output. This often produces intuitively ordered output when you have at least one key column from \code{data} that is not involved in the pivoting process. \item \code{"slowest"} keeps individual columns from \code{cols} close together in the output. This often produces intuitively ordered output when you utilize all of the columns from \code{data} in the pivoting process. }} \item{names_repair}{What happens if the output has invalid column names? The default, \code{"check_unique"} is to error if the columns are duplicated. Use \code{"minimal"} to allow duplicates in the output, or \code{"unique"} to de-duplicated by adding numeric suffixes. See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more options.} \item{values_drop_na}{If \code{TRUE}, will drop rows that contain only \code{NA}s in the \code{value_to} column. This effectively converts explicit missing values to implicit missing values, and should generally be used only when missing values in \code{data} were created by its structure.} \item{error_call}{The execution environment of a currently running function, e.g. \code{caller_env()}. The function will be mentioned in error messages as the source of the error. See the \code{call} argument of \code{\link[rlang:abort]{abort()}} for more information.} \item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to pivot into longer format.} \item{names_to}{A character vector specifying the new column or columns to create from the information stored in the column names of \code{data} specified by \code{cols}. \itemize{ \item If length 0, or if \code{NULL} is supplied, no columns will be created. \item If length 1, a single column will be created which will contain the column names specified by \code{cols}. \item If length >1, multiple columns will be created. In this case, one of \code{names_sep} or \code{names_pattern} must be supplied to specify how the column names should be split. There are also two additional character values you can take advantage of: \itemize{ \item \code{NA} will discard the corresponding component of the column name. \item \code{".value"} indicates that the corresponding component of the column name defines the name of the output column containing the cell values, overriding \code{values_to} entirely. } }} \item{values_to}{A string specifying the name of the column to create from the data stored in cell values. If \code{names_to} is a character containing the special \code{.value} sentinel, this value will be ignored, and the name of the value column will be derived from part of the existing column names.} \item{names_prefix}{A regular expression used to remove matching text from the start of each variable name.} \item{names_sep, names_pattern}{If \code{names_to} contains multiple values, these arguments control how the column name is broken up. \code{names_sep} takes the same specification as \code{\link[=separate]{separate()}}, and can either be a numeric vector (specifying positions to break on), or a single string (specifying a regular expression to split on). \code{names_pattern} takes the same specification as \code{\link[=extract]{extract()}}, a regular expression containing matching groups (\verb{()}). If these arguments do not give you enough control, use \code{pivot_longer_spec()} to create a spec object and process manually as needed.} \item{names_ptypes, values_ptypes}{Optionally, a list of column name-prototype pairs. Alternatively, a single empty prototype can be supplied, which will be applied to all columns. A prototype (or ptype for short) is a zero-length vector (like \code{integer()} or \code{numeric()}) that defines the type, class, and attributes of a vector. Use these arguments if you want to confirm that the created columns are the types that you expect. Note that if you want to change (instead of confirm) the types of specific columns, you should use \code{names_transform} or \code{values_transform} instead.} \item{names_transform, values_transform}{Optionally, a list of column name-function pairs. Alternatively, a single function can be supplied, which will be applied to all columns. Use these arguments if you need to change the types of specific columns. For example, \code{names_transform = list(week = as.integer)} would convert a character variable called \code{week} to an integer. If not specified, the type of the columns generated from \code{names_to} will be character, and the type of the variables generated from \code{values_to} will be the common type of the input columns used to generate them.} } \description{ This is a low level interface to pivoting, inspired by the cdata package, that allows you to describe pivoting with a data frame. } \examples{ # See vignette("pivot") for examples and explanation # Use `build_longer_spec()` to build `spec` using similar syntax to `pivot_longer()` # and run `pivot_longer_spec()` based on `spec`. spec <- relig_income \%>\% build_longer_spec( cols = !religion, names_to = "income", values_to = "count" ) spec pivot_longer_spec(relig_income, spec) # Is equivalent to: relig_income \%>\% pivot_longer( cols = !religion, names_to = "income", values_to = "count" ) } \keyword{internal} tidyr/man/deprecated-se.Rd0000644000176200001440000001271714553565525015203 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dep-lazyeval.R \name{deprecated-se} \alias{deprecated-se} \alias{complete_} \alias{drop_na_} \alias{expand_} \alias{crossing_} \alias{nesting_} \alias{extract_} \alias{fill_} \alias{gather_} \alias{nest_} \alias{separate_rows_} \alias{separate_} \alias{spread_} \alias{unite_} \alias{unnest_} \title{Deprecated SE versions of main verbs} \usage{ complete_(data, cols, fill = list(), ...) drop_na_(data, vars) expand_(data, dots, ...) crossing_(x) nesting_(x) extract_( data, col, into, regex = "([[:alnum:]]+)", remove = TRUE, convert = FALSE, ... ) fill_(data, fill_cols, .direction = c("down", "up")) gather_( data, key_col, value_col, gather_cols, na.rm = FALSE, convert = FALSE, factor_key = FALSE ) nest_(...) separate_rows_(data, cols, sep = "[^[:alnum:].]+", convert = FALSE) separate_( data, col, into, sep = "[^[:alnum:]]+", remove = TRUE, convert = FALSE, extra = "warn", fill = "warn", ... ) spread_( data, key_col, value_col, fill = NA, convert = FALSE, drop = TRUE, sep = NULL ) unite_(data, col, from, sep = "_", remove = TRUE) unnest_(...) } \arguments{ \item{data}{A data frame} \item{fill}{A named list that for each variable supplies a single value to use instead of \code{NA} for missing combinations.} \item{...}{<\code{\link[=tidyr_data_masking]{data-masking}}> Specification of columns to expand or complete. Columns can be atomic vectors or lists. \itemize{ \item To find all unique combinations of \code{x}, \code{y} and \code{z}, including those not present in the data, supply each variable as a separate argument: \code{expand(df, x, y, z)} or \code{complete(df, x, y, z)}. \item To find only the combinations that occur in the data, use \code{nesting}: \code{expand(df, nesting(x, y, z))}. \item You can combine the two forms. For example, \code{expand(df, nesting(school_id, student_id), date)} would produce a row for each present school-student combination for all possible dates. } When used with factors, \code{\link[=expand]{expand()}} and \code{\link[=complete]{complete()}} use the full set of levels, not just those that appear in the data. If you want to use only the values seen in the data, use \code{forcats::fct_drop()}. When used with continuous variables, you may need to fill in values that do not appear in the data: to do so use expressions like \code{year = 2010:2020} or \code{year = full_seq(year,1)}.} \item{vars, cols, col}{Name of columns.} \item{x}{For \code{nesting_} and \code{crossing_} a list of variables.} \item{into}{Names of new variables to create as character vector. Use \code{NA} to omit the variable in the output.} \item{regex}{A string representing a regular expression used to extract the desired values. There should be one group (defined by \verb{()}) for each element of \code{into}.} \item{remove}{If \code{TRUE}, remove input column from output data frame.} \item{convert}{If \code{TRUE}, will run \code{\link[=type.convert]{type.convert()}} with \code{as.is = TRUE} on new columns. This is useful if the component columns are integer, numeric or logical. NB: this will cause string \code{"NA"}s to be converted to \code{NA}s.} \item{fill_cols}{Character vector of column names.} \item{.direction}{Direction in which to fill missing values. Currently either "down" (the default), "up", "downup" (i.e. first down and then up) or "updown" (first up and then down).} \item{key_col, value_col}{Strings giving names of key and value cols.} \item{gather_cols}{Character vector giving column names to be gathered into pair of key-value columns.} \item{na.rm}{If \code{TRUE}, will remove rows from output where the value column is \code{NA}.} \item{factor_key}{If \code{FALSE}, the default, the key values will be stored as a character vector. If \code{TRUE}, will be stored as a factor, which preserves the original ordering of the columns.} \item{sep}{Separator delimiting collapsed values.} \item{extra}{If \code{sep} is a character vector, this controls what happens when there are too many pieces. There are three valid options: \itemize{ \item \code{"warn"} (the default): emit a warning and drop extra values. \item \code{"drop"}: drop any extra values without a warning. \item \code{"merge"}: only splits at most \code{length(into)} times }} \item{drop}{If \code{FALSE}, will keep factor levels that don't appear in the data, filling in missing combinations with \code{fill}.} \item{from}{Names of existing columns as character vector} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} tidyr used to offer twin versions of each verb suffixed with an underscore. These versions had standard evaluation (SE) semantics: rather than taking arguments by code, like NSE verbs, they took arguments by value. Their purpose was to make it possible to program with tidyr. However, tidyr now uses tidy evaluation semantics. NSE verbs still capture their arguments, but you can now unquote parts of these arguments. This offers full programmability with NSE verbs. Thus, the underscored versions are now superfluous. Unquoting triggers immediate evaluation of its operand and inlines the result within the captured expression. This result can be a value or an expression to be evaluated later with the rest of the argument. See \code{vignette("programming", "dplyr")} for more information. } \keyword{internal} tidyr/man/separate_wider_delim.Rd0000644000176200001440000002023314335231102016611 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/separate-wider.R \name{separate_wider_delim} \alias{separate_wider_delim} \alias{separate_wider_position} \alias{separate_wider_regex} \title{Split a string into columns} \usage{ separate_wider_delim( data, cols, delim, ..., names = NULL, names_sep = NULL, names_repair = "check_unique", too_few = c("error", "debug", "align_start", "align_end"), too_many = c("error", "debug", "drop", "merge"), cols_remove = TRUE ) separate_wider_position( data, cols, widths, ..., names_sep = NULL, names_repair = "check_unique", too_few = c("error", "debug", "align_start"), too_many = c("error", "debug", "drop"), cols_remove = TRUE ) separate_wider_regex( data, cols, patterns, ..., names_sep = NULL, names_repair = "check_unique", too_few = c("error", "debug", "align_start"), cols_remove = TRUE ) } \arguments{ \item{data}{A data frame.} \item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to separate.} \item{delim}{For \code{separate_wider_delim()}, a string giving the delimiter between values. By default, it is interpreted as a fixed string; use \code{\link[stringr:modifiers]{stringr::regex()}} and friends to split in other ways.} \item{...}{These dots are for future extensions and must be empty.} \item{names}{For \code{separate_wider_delim()}, a character vector of output column names. Use \code{NA} if there are components that you don't want to appear in the output; the number of non-\code{NA} elements determines the number of new columns in the result.} \item{names_sep}{If supplied, output names will be composed of the input column name followed by the separator followed by the new column name. Required when \code{cols} selects multiple columns. For \code{separate_wider_delim()} you can specify instead of \code{names}, in which case the names will be generated from the source column name, \code{names_sep}, and a numeric suffix.} \item{names_repair}{Used to check that output data frame has valid names. Must be one of the following options: \itemize{ \item \verb{"minimal}": no name repair or checks, beyond basic existence, \item \verb{"unique}": make sure names are unique and not empty, \item \verb{"check_unique}": (the default), no name repair, but check they are unique, \item \verb{"universal}": make the names unique and syntactic \item a function: apply custom name repair. \item \link{tidyr_legacy}: use the name repair from tidyr 0.8. \item a formula: a purrr-style anonymous function (see \code{\link[rlang:as_function]{rlang::as_function()}}) } See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more details on these terms and the strategies used to enforce them.} \item{too_few}{What should happen if a value separates into too few pieces? \itemize{ \item \code{"error"}, the default, will throw an error. \item \code{"debug"} adds additional columns to the output to help you locate and resolve the underlying problem. This option is intended to help you debug the issue and address and should not generally remain in your final code. \item \code{"align_start"} aligns starts of short matches, adding \code{NA} on the end to pad to the correct length. \item \code{"align_end"} (\code{separate_wider_delim()} only) aligns the ends of short matches, adding \code{NA} at the start to pad to the correct length. }} \item{too_many}{What should happen if a value separates into too many pieces? \itemize{ \item \code{"error"}, the default, will throw an error. \item \code{"debug"} will add additional columns to the output to help you locate and resolve the underlying problem. \item \code{"drop"} will silently drop any extra pieces. \item \code{"merge"} (\code{separate_wider_delim()} only) will merge together any additional pieces. }} \item{cols_remove}{Should the input \code{cols} be removed from the output? Always \code{FALSE} if \code{too_few} or \code{too_many} are set to \code{"debug"}.} \item{widths}{A named numeric vector where the names become column names, and the values specify the column width. Unnamed components will match, but not be included in the output.} \item{patterns}{A named character vector where the names become column names and the values are regular expressions that match the contents of the vector. Unnamed components will match, but not be included in the output.} } \value{ A data frame based on \code{data}. It has the same rows, but different columns: \itemize{ \item The primary purpose of the functions are to create new columns from components of the string. For \code{separate_wider_delim()} the names of new columns come from \code{names}. For \code{separate_wider_position()} the names come from the names of \code{widths}. For \code{separate_wider_regex()} the names come from the names of \code{patterns}. \item If \code{too_few} or \code{too_many} is \code{"debug"}, the output will contain additional columns useful for debugging: \itemize{ \item \verb{\{col\}_ok}: a logical vector which tells you if the input was ok or not. Use to quickly find the problematic rows. \item \verb{\{col\}_remainder}: any text remaining after separation. \item \verb{\{col\}_pieces}, \verb{\{col\}_width}, \verb{\{col\}_matches}: number of pieces, number of characters, and number of matches for \code{separate_wider_delim()}, \code{separate_wider_position()} and \code{separate_regexp_wider()} respectively. } \item If \code{cols_remove = TRUE} (the default), the input \code{cols} will be removed from the output. } } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}} Each of these functions takes a string column and splits it into multiple new columns: \itemize{ \item \code{separate_wider_delim()} splits by delimiter. \item \code{separate_wider_position()} splits at fixed widths. \item \code{separate_wider_regex()} splits with regular expression matches. } These functions are equivalent to \code{\link[=separate]{separate()}} and \code{\link[=extract]{extract()}}, but use \href{https://stringr.tidyverse.org/}{stringr} as the underlying string manipulation engine, and their interfaces reflect what we've learned from \code{\link[=unnest_wider]{unnest_wider()}} and \code{\link[=unnest_longer]{unnest_longer()}}. } \examples{ df <- tibble(id = 1:3, x = c("m-123", "f-455", "f-123")) # There are three basic ways to split up a string into pieces: # 1. with a delimiter df \%>\% separate_wider_delim(x, delim = "-", names = c("gender", "unit")) # 2. by length df \%>\% separate_wider_position(x, c(gender = 1, 1, unit = 3)) # 3. defining each component with a regular expression df \%>\% separate_wider_regex(x, c(gender = ".", ".", unit = "\\\\d+")) # Sometimes you split on the "last" delimiter df <- tibble(var = c("race_1", "race_2", "age_bucket_1", "age_bucket_2")) # _delim won't help because it always splits on the first delimiter try(df \%>\% separate_wider_delim(var, "_", names = c("var1", "var2"))) df \%>\% separate_wider_delim(var, "_", names = c("var1", "var2"), too_many = "merge") # Instead, you can use _regex df \%>\% separate_wider_regex(var, c(var1 = ".*", "_", var2 = ".*")) # this works because * is greedy; you can mimic the _delim behaviour with .*? df \%>\% separate_wider_regex(var, c(var1 = ".*?", "_", var2 = ".*")) # If the number of components varies, it's most natural to split into rows df <- tibble(id = 1:4, x = c("x", "x y", "x y z", NA)) df \%>\% separate_longer_delim(x, delim = " ") # But separate_wider_delim() provides some tools to deal with the problem # The default behaviour tells you that there's a problem try(df \%>\% separate_wider_delim(x, delim = " ", names = c("a", "b"))) # You can get additional insight by using the debug options df \%>\% separate_wider_delim( x, delim = " ", names = c("a", "b"), too_few = "debug", too_many = "debug" ) # But you can suppress the warnings df \%>\% separate_wider_delim( x, delim = " ", names = c("a", "b"), too_few = "align_start", too_many = "merge" ) # Or choose to automatically name the columns, producing as many as needed df \%>\% separate_wider_delim(x, delim = " ", names_sep = "", too_few = "align_start") } tidyr/man/pivot_wider_spec.Rd0000644000176200001440000001571614357015307016033 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pivot-wide.R \name{pivot_wider_spec} \alias{pivot_wider_spec} \alias{build_wider_spec} \title{Pivot data from long to wide using a spec} \usage{ pivot_wider_spec( data, spec, ..., names_repair = "check_unique", id_cols = NULL, id_expand = FALSE, values_fill = NULL, values_fn = NULL, unused_fn = NULL, error_call = current_env() ) build_wider_spec( data, ..., names_from = name, values_from = value, names_prefix = "", names_sep = "_", names_glue = NULL, names_sort = FALSE, names_vary = "fastest", names_expand = FALSE, error_call = current_env() ) } \arguments{ \item{data}{A data frame to pivot.} \item{spec}{A specification data frame. This is useful for more complex pivots because it gives you greater control on how metadata stored in the columns become column names in the result. Must be a data frame containing character \code{.name} and \code{.value} columns. Additional columns in \code{spec} should be named to match columns in the long format of the dataset and contain values corresponding to columns pivoted from the wide format. The special \code{.seq} variable is used to disambiguate rows internally; it is automatically removed after pivoting.} \item{...}{These dots are for future extensions and must be empty.} \item{names_repair}{What happens if the output has invalid column names? The default, \code{"check_unique"} is to error if the columns are duplicated. Use \code{"minimal"} to allow duplicates in the output, or \code{"unique"} to de-duplicated by adding numeric suffixes. See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more options.} \item{id_cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> A set of columns that uniquely identifies each observation. Defaults to all columns in \code{data} except for the columns specified in \code{spec$.value} and the columns of the \code{spec} that aren't named \code{.name} or \code{.value}. Typically used when you have redundant variables, i.e. variables whose values are perfectly correlated with existing variables.} \item{id_expand}{Should the values in the \code{id_cols} columns be expanded by \code{\link[=expand]{expand()}} before pivoting? This results in more rows, the output will contain a complete expansion of all possible values in \code{id_cols}. Implicit factor levels that aren't represented in the data will become explicit. Additionally, the row values corresponding to the expanded \code{id_cols} will be sorted.} \item{values_fill}{Optionally, a (scalar) value that specifies what each \code{value} should be filled in with when missing. This can be a named list if you want to apply different fill values to different value columns.} \item{values_fn}{Optionally, a function applied to the value in each cell in the output. You will typically use this when the combination of \code{id_cols} and \code{names_from} columns does not uniquely identify an observation. This can be a named list if you want to apply different aggregations to different \code{values_from} columns.} \item{unused_fn}{Optionally, a function applied to summarize the values from the unused columns (i.e. columns not identified by \code{id_cols}, \code{names_from}, or \code{values_from}). The default drops all unused columns from the result. This can be a named list if you want to apply different aggregations to different unused columns. \code{id_cols} must be supplied for \code{unused_fn} to be useful, since otherwise all unspecified columns will be considered \code{id_cols}. This is similar to grouping by the \code{id_cols} then summarizing the unused columns using \code{unused_fn}.} \item{error_call}{The execution environment of a currently running function, e.g. \code{caller_env()}. The function will be mentioned in error messages as the source of the error. See the \code{call} argument of \code{\link[rlang:abort]{abort()}} for more information.} \item{names_from, values_from}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> A pair of arguments describing which column (or columns) to get the name of the output column (\code{names_from}), and which column (or columns) to get the cell values from (\code{values_from}). If \code{values_from} contains multiple values, the value will be added to the front of the output column.} \item{names_prefix}{String added to the start of every variable name. This is particularly useful if \code{names_from} is a numeric vector and you want to create syntactic variable names.} \item{names_sep}{If \code{names_from} or \code{values_from} contains multiple variables, this will be used to join their values together into a single string to use as a column name.} \item{names_glue}{Instead of \code{names_sep} and \code{names_prefix}, you can supply a glue specification that uses the \code{names_from} columns (and special \code{.value}) to create custom column names.} \item{names_sort}{Should the column names be sorted? If \code{FALSE}, the default, column names are ordered by first appearance.} \item{names_vary}{When \code{names_from} identifies a column (or columns) with multiple unique values, and multiple \code{values_from} columns are provided, in what order should the resulting column names be combined? \itemize{ \item \code{"fastest"} varies \code{names_from} values fastest, resulting in a column naming scheme of the form: \verb{value1_name1, value1_name2, value2_name1, value2_name2}. This is the default. \item \code{"slowest"} varies \code{names_from} values slowest, resulting in a column naming scheme of the form: \verb{value1_name1, value2_name1, value1_name2, value2_name2}. }} \item{names_expand}{Should the values in the \code{names_from} columns be expanded by \code{\link[=expand]{expand()}} before pivoting? This results in more columns, the output will contain column names corresponding to a complete expansion of all possible values in \code{names_from}. Implicit factor levels that aren't represented in the data will become explicit. Additionally, the column names will be sorted, identical to what \code{names_sort} would produce.} } \description{ This is a low level interface to pivoting, inspired by the cdata package, that allows you to describe pivoting with a data frame. } \examples{ # See vignette("pivot") for examples and explanation us_rent_income spec1 <- us_rent_income \%>\% build_wider_spec(names_from = variable, values_from = c(estimate, moe)) spec1 us_rent_income \%>\% pivot_wider_spec(spec1) # Is equivalent to us_rent_income \%>\% pivot_wider(names_from = variable, values_from = c(estimate, moe)) # `pivot_wider_spec()` provides more control over column names and output format # instead of creating columns with estimate_ and moe_ prefixes, # keep original variable name for estimates and attach _moe as suffix spec2 <- tibble( .name = c("income", "rent", "income_moe", "rent_moe"), .value = c("estimate", "estimate", "moe", "moe"), variable = c("income", "rent", "income", "rent") ) us_rent_income \%>\% pivot_wider_spec(spec2) } \keyword{internal} tidyr/man/tidyr_legacy.Rd0000644000176200001440000000174614553565525015155 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{tidyr_legacy} \alias{tidyr_legacy} \title{Legacy name repair} \usage{ tidyr_legacy(nms, prefix = "V", sep = "") } \arguments{ \item{nms}{Character vector of names} \item{prefix}{prefix Prefix to use for unnamed column} \item{sep}{Separator to use between name and unique suffix} } \description{ Ensures all column names are unique using the approach found in tidyr 0.8.3 and earlier. Only use this function if you want to preserve the naming strategy, otherwise you're better off adopting the new tidyverse standard with \code{name_repair = "universal"} } \examples{ df <- tibble(x = 1:2, y = list(tibble(x = 3:5), tibble(x = 4:7))) # Doesn't work because it would produce a data frame with two # columns called x \dontrun{ unnest(df, y) } # The new tidyverse standard: unnest(df, y, names_repair = "universal") # The old tidyr approach unnest(df, y, names_repair = tidyr_legacy) } \keyword{internal} tidyr/man/nest_legacy.Rd0000644000176200001440000000636314325573777015000 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nest-legacy.R \name{nest_legacy} \alias{nest_legacy} \alias{unnest_legacy} \title{Legacy versions of \code{nest()} and \code{unnest()}} \usage{ nest_legacy(data, ..., .key = "data") unnest_legacy(data, ..., .drop = NA, .id = NULL, .sep = NULL, .preserve = NULL) } \arguments{ \item{data}{A data frame.} \item{...}{Specification of columns to unnest. Use bare variable names or functions of variables. If omitted, defaults to all list-cols.} \item{.key}{The name of the new column, as a string or symbol. This argument is passed by expression and supports \link[rlang:topic-inject]{quasiquotation} (you can unquote strings and symbols). The name is captured from the expression with \code{\link[rlang:defusing-advanced]{rlang::ensym()}} (note that this kind of interface where symbols do not represent actual objects is now discouraged in the tidyverse; we support it here for backward compatibility).} \item{.drop}{Should additional list columns be dropped? By default, \code{unnest()} will drop them if unnesting the specified columns requires the rows to be duplicated.} \item{.id}{Data frame identifier - if supplied, will create a new column with name \code{.id}, giving a unique identifier. This is most useful if the list column is named.} \item{.sep}{If non-\code{NULL}, the names of unnested data frame columns will combine the name of the original list-col with the names from the nested data frame, separated by \code{.sep}.} \item{.preserve}{Optionally, list-columns to preserve in the output. These will be duplicated in the same way as atomic vectors. This has \code{\link[dplyr:select]{dplyr::select()}} semantics so you can preserve multiple variables with \code{.preserve = c(x, y)} or \code{.preserve = starts_with("list")}.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} tidyr 1.0.0 introduced a new syntax for \code{\link[=nest]{nest()}} and \code{\link[=unnest]{unnest()}}. The majority of existing usage should be automatically translated to the new syntax with a warning. However, if you need to quickly roll back to the previous behaviour, these functions provide the previous interface. To make old code work as is, add the following code to the top of your script: \if{html}{\out{
}}\preformatted{library(tidyr) nest <- nest_legacy unnest <- unnest_legacy }\if{html}{\out{
}} } \examples{ # Nest and unnest are inverses df <- tibble(x = c(1, 1, 2), y = 3:1) df \%>\% nest_legacy(y) df \%>\% nest_legacy(y) \%>\% unnest_legacy() # nesting ------------------------------------------------------------------- as_tibble(iris) \%>\% nest_legacy(!Species) as_tibble(chickwts) \%>\% nest_legacy(weight) # unnesting ----------------------------------------------------------------- df <- tibble( x = 1:2, y = list( tibble(z = 1), tibble(z = 3:4) ) ) df \%>\% unnest_legacy(y) # You can also unnest multiple columns simultaneously df <- tibble( a = list(c("a", "b"), "c"), b = list(1:2, 3), c = c(11, 22) ) df \%>\% unnest_legacy(a, b) # If you omit the column names, it'll unnest all list-cols df \%>\% unnest_legacy() } tidyr/man/smiths.Rd0000644000176200001440000000050614013466035013761 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{smiths} \alias{smiths} \title{Some data about the Smith family} \format{ A data frame with 2 rows and 5 columns. } \usage{ smiths } \description{ A small demo dataset describing John and Mary Smith. } \keyword{datasets} tidyr/man/pack.Rd0000644000176200001440000000754514360013543013377 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pack.R \name{pack} \alias{pack} \alias{unpack} \title{Pack and unpack} \usage{ pack(.data, ..., .names_sep = NULL, .error_call = current_env()) unpack( data, cols, ..., names_sep = NULL, names_repair = "check_unique", error_call = current_env() ) } \arguments{ \item{...}{For \code{pack()}, <\code{\link[=tidyr_tidy_select]{tidy-select}}> columns to pack, specified using name-variable pairs of the form \code{new_col = c(col1, col2, col3)}. The right hand side can be any valid tidy select expression. For \code{unpack()}, these dots are for future extensions and must be empty.} \item{data, .data}{A data frame.} \item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to unpack.} \item{names_sep, .names_sep}{If \code{NULL}, the default, the names will be left as is. In \code{pack()}, inner names will come from the former outer names; in \code{unpack()}, the new outer names will come from the inner names. If a string, the inner and outer names will be used together. In \code{unpack()}, the names of the new outer columns will be formed by pasting together the outer and the inner column names, separated by \code{names_sep}. In \code{pack()}, the new inner names will have the outer names + \code{names_sep} automatically stripped. This makes \code{names_sep} roughly symmetric between packing and unpacking.} \item{names_repair}{Used to check that output data frame has valid names. Must be one of the following options: \itemize{ \item \verb{"minimal}": no name repair or checks, beyond basic existence, \item \verb{"unique}": make sure names are unique and not empty, \item \verb{"check_unique}": (the default), no name repair, but check they are unique, \item \verb{"universal}": make the names unique and syntactic \item a function: apply custom name repair. \item \link{tidyr_legacy}: use the name repair from tidyr 0.8. \item a formula: a purrr-style anonymous function (see \code{\link[rlang:as_function]{rlang::as_function()}}) } See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more details on these terms and the strategies used to enforce them.} \item{error_call, .error_call}{The execution environment of a currently running function, e.g. \code{caller_env()}. The function will be mentioned in error messages as the source of the error. See the \code{call} argument of \code{\link[rlang:abort]{abort()}} for more information.} } \description{ Packing and unpacking preserve the length of a data frame, changing its width. \code{pack()} makes \code{df} narrow by collapsing a set of columns into a single df-column. \code{unpack()} makes \code{data} wider by expanding df-columns back out into individual columns. } \details{ Generally, unpacking is more useful than packing because it simplifies a complex data structure. Currently, few functions work with df-cols, and they are mostly a curiosity, but seem worth exploring further because they mimic the nested column headers that are so popular in Excel. } \examples{ # Packing ------------------------------------------------------------------- # It's not currently clear why you would ever want to pack columns # since few functions work with this sort of data. df <- tibble(x1 = 1:3, x2 = 4:6, x3 = 7:9, y = 1:3) df df \%>\% pack(x = starts_with("x")) df \%>\% pack(x = c(x1, x2, x3), y = y) # .names_sep allows you to strip off common prefixes; this # acts as a natural inverse to name_sep in unpack() iris \%>\% as_tibble() \%>\% pack( Sepal = starts_with("Sepal"), Petal = starts_with("Petal"), .names_sep = "." ) # Unpacking ----------------------------------------------------------------- df <- tibble( x = 1:3, y = tibble(a = 1:3, b = 3:1), z = tibble(X = c("a", "b", "c"), Y = runif(3), Z = c(TRUE, FALSE, NA)) ) df df \%>\% unpack(y) df \%>\% unpack(c(y, z)) df \%>\% unpack(c(y, z), names_sep = "_") } tidyr/man/figures/0000755000176200001440000000000014520546620013630 5ustar liggesuserstidyr/man/figures/lifecycle-defunct.svg0000644000176200001440000000170414013466035017736 0ustar liggesuserslifecyclelifecycledefunctdefunct tidyr/man/figures/lifecycle-maturing.svg0000644000176200001440000000170614013466035020136 0ustar liggesuserslifecyclelifecyclematuringmaturing tidyr/man/figures/logo.png0000644000176200001440000013212214520546620015277 0ustar liggesusersPNG  IHDRޫh cHRMz&u0`:pQ<bKGDtIME !||IDATxwxu ܍F7r̙3$3IDe˲mYNdI#~^7Q$˲ײl+XV&'Mf&DyBwn$gģ aoFm/{ӵ>vn W`Q]n a| 8p*ްv41WJ۵>v~m;u`偛 ܌3߰vî5 xf ̭n {[a+nIs/ݰ7VW -~-a7솭6_X;f͡ۯ߰FZO\=f-a7`N}@kbvnXS[A=h"h1]h1 a7,e]Ƹ@]k -&tݰkmhs\{`u,Z~nدШOQ5KJzQo [khoQ5W+|-W"߰[1XlĩFQ<ɵ: 7dkn74Y !ロD^Co@?Z|Vnƶxq]wo><BҗZtLLL\%yS؍xum?Z殻?۷oj*RJ`xxx-Oކ2~?pZ/n0jVʀwpx6V;wrA:::P)%RJH)1ʹsqJMV?wttcjjꋕ6333/@UUK^gZ(ºu{ٺu+V)(*^sPk'O|9)s:tzpvWmD˴~'w޽{q݆7Lxz c7\XOp뭷rwPSScʀIyX~=frrrtFkk+)f߳!;w;zd7\9˷q0v綵-sE!p}YFGF:::p~zzzDYCCx |;1B9wO=/_FU5]$!555tuuQ]]M4e``>"UUUtuuQSS,8d2<|M c7XK['oujew}x^y5uS"J)gĉD5_r:;;ilԸ*CCC0??OYY۷oH gpp0Eg3_n8U@m)f3y{8p6h4Juuro>&''9s b1, BO<=kHvVڰLOO.q:5Eb1]*V| -UI~<#k~}oLwRJEE6 UUX,sba֭{lܸz@J ٳgfوe 9sATU5z3껪299Iww7333֣cރVҥKe'nX3F{p244M7hϦئMG?jbf>Rbhhhʕ+kHBdIJxnBTTTEmm-&S&o~~FGGI&kiӦM?URÇR_i 0ZFGGI$FպDV(0ո\.F~zL&#]msݴ܌(,N[܌fY0 DJ:l3NRʻ4:t2={Ѹ˻V ø\.TUUvWp8RR^^Nee&%heB<_Elގ`vv0LtttNYְR?x<|2^R:!Ŀ|̯"lAr0o&W)%n80==M}}=RJv;uuuڮr177k1LՅ% q9Iee8wzz&''KhACCN/l E)kiiy+_ڧݯ#U64߯2P($H)Zsҥ?3H0::֭[sSSSA<CWUUtvvRWWG2{<nJ}}=%>>I$%nB[[f+ǝnRJ ?l_F' W oA$###޽[DVuu58qbYwwdd$CЀd"L2?? ^Hd]\.iiil6311Aww7v6mDKK;b8fpp&:::z\mF07:7;ˀwrg###$Iw*N={pA, W\afff# N'~h48]]]ZR"jDGG 3BZ[[蠼Ȍs-yB;~mmmz,p뤑5_>O>~YlngAwՄaN1;;jd2Z~v;RJ<~9Yzi)KUVVDx"b $@ǹ%4hoox9sc.djjʈ5}H9cl,Ο9_ad~W333|>#Ǥ$I\.& UUls%?SOdf$ʼnRS>KI)0d~nM6ظ( 144Trjqs2<<\-l}swO߅e`o&נ 8P шIr davQT6166͛nB!3v\v8FkZ6l@kk@F87 6늊 ..G" HvVb2 u˗7hn\2d2;w6lbB]=[CCUc~~Y#p8\2 .2|2?&&&Y_***̶Z.՛SxZطmtBv?. \gjh ee~km&͛7(A:>>N$12Ք-҅ zMI  311AWWAor._\̯5q:KŹqN/,.+]`Ѐ+%:>,>s/I=0`oTw~d~WZZZ8x 7tÈAzKgV-x<`ǃ%Q}Y^}U"0B*GX,Ff[gi.u0ش F}ցCw7 /`gsۻſC3I> qFp(+f3d`Qee%o"`9 SSSFH{zzr~oF!>̑#G2btoIS:;;**΍F WV k@NopjzDt5V4ZCF64ìрi̖-[صk?Ou`s=477G2+nS__Oooھ},ذa"{Ǐ4F(с1d~CCC$ee~===LOOLx3Ohmmm=11am׍]OŷϺ0LX ½B 8rZYV:d7|>|{(HOOd|>SSS̪awX,ƙ3gxꩧ)//GUUv;'N̨tjZn7466.%36=SSSJBv۝.|Z{VL ܹ lV2\\S$FWqWI&LNnb0|/Z|p]oCk h_KK wccc:rQ{9.\,"Anܸ1x3gOsʕEI7HDPsO?4N"SYYI,3(.%8f edd [F@ (?##w!>x?C!rj%JŅa:~tc(83:m`>Ц-5Z>б(++{ocǎe$.]ĥK455q/%JU~pɼuxyl$ )nx΁p8}$zvjocccX,::: ,gΜaF7 8.d~(y˓pƤJ,^]>a2Vs k B//gbZo JK*6Mh|凹J2Y~LL xǍϨbQ%Gȑ#8pH$¥Kxg"pikk#X,(bjt+BipMڗǓ @N#G {VzU鱨>pPSS0P @HH9Njjjd"HzTTTH$?B!Kݍr-ccc qACYY"z^cZ`,3z755!Ȑ\.ַr=>9uO<cW]q.g X,wBPTYh|NYl3|=ԡ 0Jss3 h;єN_.[Z@I`*}~  H^d> 1}]zY#^955 //"̌!e~ƮYdK.SOq٢|2BB%ZZ4FH,-|B /^'(ֲC+ZbiVN򟥔{ȑ#%JlCc<6ZC2,3y)og)g]/Lgffjc={2OEQ=zg}L&SF˚l_}}1.L.=G))Qo9U٨%u,Z(L8W$T>H珎R}z5epb|EnsY1%lQs.1Ke~VZujX$1z&tR}6 { Ab6,$ҳ9s=z `{nvnϖj\R^ p9Z0)4.\ |0Dx< g6ҁp Μ9Cww76l0Ӽ KcZ3Zm3HQzzz fRh啦& 6.3)pNKT GǨ8J,f[B ,OpAwƢʖU{y@+ oXcGciy )?so[ݪHJq2ͥa-L(%_sx5ċl.E(扉ECJ$l1d~łD.rA>ͯb6SSSKND" ׷".x(EW̰T B`Eix?9o` ȹݚMwZŭ܆()˿Z+ /g4/.EA:A&LEz&[ZtmYwU`d~2'Ξ=kgQ]]j[,3;wΨ#2?XD8F[4A2?7hjRB$gQjNM祴y]TH<[T20lZzbʃPM'x154a3h6jPȠK644˗%~ѷϱc `e~4h4jL@馇z2G5s5he TKqe!A4m??I0vovk3hYzӉa箭6~xZTK+]^Ϋq:҂ݮKWH9 3Z1#\i~WB=t>;L288d'… 534ESVKW mX,+j.H]ke]ΖZ_C^_Z yq,ՐprحfvCJQHx5٥(UWJn3.6/~VQoOyĹY8f||vr ~gbE2i~VgJ026-/tIWp"p?uxBJ:t#DA. m 7m-^L&3 B3mQqZ!^Mv)J nO{ S!\ ? *LJŋٳgOCLl6Jgg!x!M b8==!۷oϔLOkrwh=(]B\ ck"k?R` !I /Z J-yAv) 4A)})7uh @ 8y-}Cڵ D0ĉ< gb\ $ :66i~RxJ%s_`$6ģ i_8tO>WQ] L@"9Ssw3ts!εOiyuڞxܭ\u!(fgŬ9bFũ l/漷 V?e<"rQ6E7lBU8?L珎Q{|ᡫ&+g$-szyTN^~Wr'BIz9Wo^/:::ol*QlIl]x;}WE8(v#esnpӜAH|tI΃ 5sCy52K/Ny)#\2dSW_X0e.lxD af{ 6??_R1v-mRVnP:CY*6ib&dH[@ 1 w"JX*-ૄEnay? =.˗/(\ftt:+"(nc; YIe⯜ 0LFl333SjL3 M lB&f2kgum1`W vL$%τ(U3 Wjv"KJ"A:t3|m+qk^M]Y~1uQA~ۨciw92ȣ8OF2? ̯;}o5혣Ň<-} 1x* C?D'}[%#G wK>G黎e~-׼ ǹ` b{"X~ y.|9dHna2d~P_J뮁`,lJeG MKj,|wܤ\ca8גxS?+(?YJEOױ0^̯[J얼U7/Ȯ<%t,8f>+6c;29_˧g^eoju0===/On+9hw<[`w.*^#ޱ0\USpx/r0+ !gg.R淜v sqeLBf[qFFGC#+&>%+10S? - y7%p].ܰX,LNNun '+6hCVܖ(ƃVn(<dqH;5 ~l_wI;.2Xo)ˮ$4ϠZsُDEp$J/"|,%+S(SPZnTy3<".3>od~BNmunwՙ^kOsRr6R2[i;p *R?8xHy%%9+ !@$!afd=6(Z "^+ׁQx"}NHo q~+ ip, "p}bwC!UNKWLϴXm!$[ADp$h v<<`sdɬ %ʰ^q?D+Ty<"QN8rzW]2d~Jmк v ZO%ZdQI.&0A,\-68hzsp.O|RgSTYIJ0W__)<їm:fG^ӡ 2 ڬnN'`'I{bWf/cBp |V<ȃlV4J-'|oƥX'ɍx<Ε+W |YDW%llyvkeXT/GE <}?C8(R)#m d(Kt m s=Q iDZFfpq5p N󭧎O|[5$#5!Q ߶mJUh 7k.TkQx*#eV3뀛rgUx)T2,M_#}_@`d*<ض@y#nj#ƃ *]6G4A&TŽ_O{((, N<',vHn8E2EO]\~аI`R2儔* ]X,TV2e:tQ 'cܗZ.MJCy;!Bp >&i-XyŪFCKvnvq QUED/Mֱ`Y ws ,3r/ɧ./}Yλt1r$H,)[.ظn0{?v.݊ie0iɦ\ mn־_*;t 2DFM|^`] ߝv~}]%8]N#J&ڄI$_4D$?9c|Jϯr-E5.&/5,?n9Og^A,F'h&}T.twyV"0]B* =* YE5M$9~ 1d"p lW4C-}*pO,h꟔y[O9@MXw9,c<"zff3tww333`n4\YDm߆ 7p *%'cZv7k\]}׭5v87ᨖIt@ IZ,c*jNө=^(a#2CKi.8:8!vx}X.j,':|}lZinђم&IM{ 2u̯wQ?^ {Ga;SwWsu%OTLYse] Ks5_Cw9 ~`,5Ot9_WdF$Lj[*O$3%a؅S(,dbPNy)])V[[۵(ƛٌ%naoia hmo%FI,,h@~cmP^}}e-E2<#0,d9UژVm(i| \ ݁jRKQ %`hNݙ‰I"xD>c!V$W?[2i<3eD p:]X"FkWqr$O/Yw6WҁLS %O^nÝv* Ťjkr!eN< [g]сI i3`%RP3ŢzmXbYT8_O-^ajVhwI+\=5>uf,v+VmQfY"$qs.¡3 {5^ծcἆ#ZyeVMtn־^IN 87 +d~>>kU V*@!nJ)]ͫ|Y>Ņ+t]PD"k `܈D"\t~¯5*wYY&LFI5FF/08F}:H̶ eyURh|eTYhGTRs_jŹLXXUW̯V>!eزPL&xyD>s\$ ,|륕]Rlpm-VtbcM]HLw5b]Ltw9&%S%BթmvuN/ U-9tVZ*IE}FK{ mk\<ͯ7OoRJe~VL6D<l(E^|Q>8oYT٦+҇§wd~uױa ;P UUx-}IcQ.9!ŢpZf63~>o:p2?CeK¢e9$HDH&Eebck#E̖>cf{'S߻ Nu#BDoL6[2Q:]`$OG4BFh-k[H#ۯlReA"W.784?T@,3N98ѳFVET[Gҩ1U>h4B< dp_xXgctqgmA8= c)@"Ѭ14n<+XyrB]l13"Xٔ=]D(k ЅTYlyrn ]z=+ A۪)$?؈k y,RKQJIL9X`Q=!v)."c(_Zz<=GTft^[V76Foe%Tg- P&TvO kKymI.Bj1*ͭ.^M^M빜 \8+,dNF!qnB"TAԝ+i&O9H/%ۇ-EU_j^>G|Ze6PC=s8;UsΞ=[̯*+0x9haf3n(\>y#Νy " M9n)Lw}(<%z.wX4.8Dm: _Us]SZD'B [ 87Wmv5΢˧0XTǹڈ<"Igug줦fQ(JusJmsU .'% z{{]V旑=37~<H& |y*-+Ŧ\X,(t;HF^QRøe\~=WUlZe}_Ho"TSe.g_ *T6aU/4f/e:O[%.,_xYJ뵜U?58<{re2n-[@UxWJ8&z*N8)D ٌn%\؍!F! gׄgbW:,d/Qr٦Z̙ڤTBOͮ0HEp[pN8o._ɺbkL9ínjnO|DՔ-^+E#JVl6U?OC)5OIE=xǼYĊ 8xfa5LOWvMto~GTT܅I8FKލҫ:U~NB/.S`[,=0j"j]N8H, ˕x)+F旮Rf3ZƢP**<<"E.Q>HOA ;dCvN ',N2?ޘKd>O % '., uiAZ!rq\cƎ2l t>1!= #i8BO@lƯfYh Bb4mX!]bnjZ1l,* (YO_={)Vh.̯UU7xlٲ%#[>_b})f3^Ea &;n5Mpl'STrL@jgzY(,5Sh~-ۂ%dﵔ x RlYGN()ļ^141:v_l3200PQ2nY.BpaǷZafM<~9C1[Fa7[h*bJ\z=f|w xjۊ6Cqꅗb9(YJAZ4~DVլ`L/g>ȏ***2s;YzLP4IC[z+tui 3U(  I&beRelDR,-Z[x~{+V5۬f t8,VWbQ}*EU xfF2]ޞ1^_ :9E}WueEEE7reϖ6 Iℙ ? {F\[T!>UW-vipujO87,tKP@<ѵחjYn,7WS^^n4eȖ M3 wϊKfQé%1Fd~B: 8&x9(tyǎ+{(~慾Sk\^!>\s'MPZ… 319ɏ@+{r^O.] q}x,լ`#^T,E5!&/5,=L"Cd~^+!8blQ i|hD؊(t0ʞ-oy?NfC-DguG,/Y 7 V3P H$ L,B]9B ͪ\^ K67żVՐ^mh,s$Ģ (Y+j~T3})BH.̺ˍDJIϔi~̊] v$R@"ʩyŽ/a-a` \_:hsaW:ZYhM|Gq>,˲JE[%P(X+fVC槳D<ȁYTd/Oc\#q}8kVZȈEKZ "rIJky^RJzwR1؞gC I%|EXq!紊 ػWsREiA, pπq7y,/_4sɵ깼ږm{M KXr^6!󃕳tjD/lDI2?=(VU 8w6bUTT)26)HnҼ`ܼݎw``K碻-(uN_Lg /3p Ιȥ 4o}fBh;k IUc4p7wNs-\˫miÍX"b+΢j+E(9X%WC'@hrE1} lkY]~^0Z\s)͎0 گ E5!94ERv]AL&uڪ]dWG{6x 9c+}Vۤ'Fq87ΦPz ?a[R旮Y +`+:bE^LrDՔK]9')&! rxC\$Ne'φp:l۶rLnE5a҄ R$ YL7tAبŹ6iL'&a8sFہspdY:P'<,R@sN~z{5d~+!h,*م`QjQR_z6z*a{bW*$Tz8:$ǛUUikkhGT0cznZ邴,@Y+(x5CyS'OѣZjf6i3# Oj ֠M?Dͣ5 20X̯N.݋P*H?5QRQQatEfC#*Ζu$Xc{?g i7'$5COcccF;p8͎ GО&׼ۥe~K nuu}&p^}eΎNWʘ+Aĥ;E^Tڈ,#J\.n8*0<̕@4TǼtpE0E|#D !gTVVk׮z݄B;p>#/NYLE-:pU8e(mr.wog>t|](vQz?015Q̯XNWXY$_OGq \2l+[m`OoȊ_VPU*%lz;jsxOp vܭ[nt8}Z#eg&-A^kn a(H,',8Ib&[ Rd~o_QR//Z\E!s}_`3')3'QRUV힎Z$f%Hj`OoG{.fxK*{]̏ݻXn0(C'2T${e*u7 'q3Yd+f`e2?HwbT #J5lDR2|DoDgRT!_y*U<   !x)1s,#$ F"p-iVU޿_2b!4Z;0d%mQ|į Hp})S%3?Y ϊpgQ,*sk<$/ټZ2?IgmA>/Ϝׇ/axhQ^!Owww M I?Wokk3__z;2…U-:_ݭnQ' V,%|Hd+f鏥̧{XTr{^X%Pƹ+iީJc /#קH}dv***z^429Ch4y׮]JVAoƤr:3YTZIA Ʀݦ)vM#%CHOf+f`e2?:S~&|29䓜fpM&}^)2b5H4*eFcⰹgCmwv;[n]vY__f=CY̯,f̯*F8|X#ehf8P c^tmCI.6T "՟SWO-o+X8Nr,p_戒_Or(_Z xVxc%Hu1+)GF+c$ HVTOfFd2I{{(}޶1lMd~6hrsV&ҁHhW^h%z@3&C} kwm߁\E*'z2?txw~(-y\vP/}J?.Z! e8&T٦HG`P(bł9{o$tETvHS\eY.w]QMppP0[}F*!f ;\%|0_1X@ NK7-}.3&Kyvu(Lۥ%:C"4\ đH?ǃf3ƈf0vޝSUpCߪW,qP4Ⰱ zh\!h,%0=t'r#GV$,ĻR2H,V9mX6>-ghXUTy3<".\h]Z:̓0%Bj$ \.NøD" `Rk؊2sMp*k(&p; V#訇z+ '%J(hn"yz+!m= 'M$ߝ\]8=="ρWfܵY d6apwYO~ #F,嚞UUӹ.% _oj[exFL& v;9UN~dR̅b6=%I!TD3#N\o~<-niV=롽NY\ ^aiڣ 1II*QRj6imJs;s6SO 'M5`8GjQNx3HU"-y˥7uN{ _{:uxQU=,BJDRaWf)LJ(& b60<&ݕ|D1UUnVn9l¤-N0P2̬gjVРwfOLh;2*d~L2Yՠv8 Iq/E#Ŗ29Y%d~!جL6j3k dt+Ll$ZayDWb+' H))/wcۍ% BX,p8pϚd6g9if>r=&Kn2ǯJ>vyNjqnr(C w9U!h\H-W7Bl;&VFHwOHKpuNs,-xT jT xQ2RM>[ĆZ/ZQUH$B PbL [DVaWᘈ'*;yhP;˚pFL&8Wz=\RFe|ҼsA㖓] :hB}LZ>.8_z`>Β":ǣpvRsL>p.>ڮ}~JS'J#\.k#JQ2F#J6=|&jF"A-l]T&tR[WEJ+M+բosa$ yвV'kD tݔ5YpH-\iG2rjqnh]ŠGc<9uM#YZo`dA%ËI$~PrE?9|63u#~?-34Qf6GUDf |IZ::Μ'9{5. 5$(|;v1Dd2Qk;[m.*Q͛լt2{r"Uv5v\gs%0ywKm &X6|y"(vʊQ6#gsgV^X 2?#]֔BN,}R[:%In"J&7_og&{t7E#$y;Ot0:OҚ,lUI π^::RoW*cmAӥe`p28Ost6TWW2>\"`cgN'e敤@XBJYhuᘊ ۅ3؇}}rgm=7S(TUUp8rs2~υPWF~l9\V *zbk[&MJ7ܿ&N*xx,H{Kt#y#HERwӵ]zw]Ip'[iqh]e^xwS|.5;fiRMuP:(w`yNՊ%s,-,L`$y Uk>73˶*nolN()I$AmU5ՠs2~"B4GcwmiXr>ߕ+J2y} N:{u(!nHX@$R}8ZrW@KX!1ELt>ibee4a嘅o5>Nyu9BY4w(*ўI9?2_ϬوB%2d~1| ?@3N̑$ 3[-H%OJ|I{(IuӱXaFGFhu.:R&UB>cDVL(b1|~?`djFRPy&CX:/ S}3--Zn݂O5bce6r"{ f/,@Dy>R/&T;" T^fz*+Q ՏUh'U*LQ'Ȕ*[)*"ƈ&#J5="d~6rvBhYs 6:Tr©޸b{ldȚPx{s+NVb s:v`z) 32;Dff Ȥ "&NFNq.//F5IWP a!m˵Xw8"s'8O l߿?UYVj]b$n". t9|4V &X'Z |o4j",`eف˙T/RXTq)>^ eCN_*# %׮)JL "M(lÐ}_؇=}}$C!Ա܃W *~f&f[5XS;Qz{ W $E< 1GK; b(ʁ^y-ছD\$SkuUeMٽ2wjHɴɾZ$˃Ksh 4PK!1L4ĺl ' .󃕳4|Ca`bFRe~z,lg)*)c&87! 0;5 t4ҨhYd2A0b~~q2QSIݞ9z;9=EwO,v`oe9J,݈ 2?rɹt%2 _^Ʈ ! A%Ĵ<8VxmKt/ywAOgiz S܄*Tfg f)GE^6 0?Jѿ qZ(IA! 1BKedda~u+TF4c.+ÚESdu ]V`za+0X/Ʀ7JQ}!4HZM%$7vӖڅPyI6pR`|bRPrt>։gЃT$d~L>["*)U^"yD`DI2?ӫCbj!A*IMbݣ~i$}UpJq-ΝuwaGps44cӭ@jU<25/)rHr4w$;6, !! $qvP@Y_P+y%%"ͩLu t_T#kAI*D+lOy?{KbQu *y5`Q- /'K.VSX"?fW_jTS- bc̤0T5~9EJs8P6LكhAd.>Fwc[i?կ]NЦhi ĥ—;yFmH/׼ {0j ˳2wR,rȢmd~Uig-58$=Q]KbB2$܉d  X Y쳼m;@iA6'n%rOmӭԡY %wԂݪ_27<; JBiYÔjezj"o|966Vʹc)խ%N<5Q.ӛgǹ:o $ FGGj彭lڰqn$od4˜E Kyy8w%!権\G{ e1TK:_)+i5/ 0ͮDM7w$]xjnRD`;q.HQV"v.nJcQ޺K)a)4kzPgCjׇ%zvܔID,#1 2j2PRYygE!hl5!f hE8]Υ7W-}XTjtk|AektROt9/$xKYs`,X ~Wr'MpwyB#Jz;-\P#4bmQ򬿟<;[Iu1ǘjd]-fuoJsuP)Ct`. w(bhz)+\v6޿>U*L@ 6? ZK%~k=Sc̯ f>#ghK1 1~bQBwZ^_C=99i ס9Zi/!,ssGMm&3T33^%P4Zx,/p!B ڗY*I[mp[ / ]wNp+kyb4ͯ5 ;295<1Eـ:l./DӗZmJa;R*m brREY̯ \4ѯp_|ؕo+Zu3d~e~`91$Su5uXlERswR^F4P, ].aV@FͨgQ_@BQ3wٲqnCnL"> n e.nNT8[ڛ-}|luF.w֝Ջ22fd9"YTTy<"1^_Uqn._.;eΚC2XFT2d~====N^2d~՘*c(3B@Ue'b-2D f?/߰BZqrP7 %m\k po0/,|#lk&Y.lu#(#J:Ck9!WKxL;.n-ˢ"|o2SLVEz.֤Yzd:-oone݉C|LC:Z1y+|V[ ܑ,T x\$_nC=ތ شhii9o%CLSg*K1͊]iq NNx|Z[!$렫z)rZ TYi&xd^/{nM?ieY)Յ5`Q- 9PKg^ ট HHY<2i~EN^l6(s g0у8*D'H܎/v\%`޼ys^_jz FbXpr .bޒ!91T:n)nP JvԿ9%MҭXNBzEW1šbCz{{z_l]׻s Be~j`0Xo5t3L7ޅAQn=W-&j]D0s5,TKFJ̳ހXŦ I2'$_`[4n[0Q~jeZeJ 'rH-M<, 3i: .//sͦ]r2?Ǥ.k2Nj0'{U -}hrQO7"Ö,w9uJ4UTΗ9PYI՞ef?B BroN,:iI ^9_ͻv ]?S@1{M7xK^_#f>)[ZĄ盼W3k¢hz^/__.54Y)̯ˌ0r*Q| 9X]e恁WM e O]~pj$mĠ#B#gh޾~)v6@g=ŹJz{j~OUz ^OVU12~ӻ2,{#ϖR2?ioUD4kmc B1|_v Huyw9q)TIew#TJ]*Iey2%yz kL҅=.Bҫ)RԚ޽>/Wv٫6jeU*DQؾثaiT 208=Y]Cb/2.*(TYh),ZYhЫs!cPb=0.Sh;4x-W޽rj u^=(!<,\!j%PνOexa3|5Ej)֪[:`YqIfT۬Y[F]II45o*a"X<'qe~n6 D)G,4 jϮŹ%WwnzճebE:)R&)(*]ZwB{J=WΎe* /+ ma^B܌ w8W9G`_!2P(d<:7o͆_ YUHfUH&r{i.ƅbFQJlY Jaf9Z,9^ĺ)tD-$՘?Djl[advd7@8Asq3-xnOE`y ^0VjoPy~VJW?_~b]͖V{Mw]!U4L7+Ui@A6u(q >̯wCe([ZHTieȄ"wY|O&TNNjI$Ka2f5XwctטlT1kX LWAWfi®qyVs*B.[V*^afՌ /.U`H)0+;g\D B!M! KR稅T[-U#XuE:d~{[¿_;5i_$00G^d鑔nr(Ɣz2]r`f%5N]VUs{w$lzL4}k!Ӫ$6re]#YTC,t5 -Dʩ U`Kh3c"NL =n ʏrE$D_ ZL(.adҠMNN漙{qdK(aI;5:fXֿsW?%5/{V'R$@&I̦}J*5J!TY\ 1Ge &OqSFJp+rΞFP~- V;QPPPdG!LdXQ#kYh$$*8*Զx\2J$h™`ܧMn6Պjj.{0e .r G>&QN߅k@ВZy9RPAJBe~v2? [dVVD~11jB-FG3<;v򲔘 ˄8uh@VrYHסЩʁZ}#-1c&1É?G(@5TKJY7۫Ecnd>?Y,N'6͈դe  >w_;RpBP+mZ|V?|+#H$ Vk0]fٲsuf !̒o42OuAh.UG?PyFcg [Nv?݈c[reGU {+HЉ&_ w$-FkIZ8fs|3\U&޲Ձ֜y(ApzL% ᰍ H|8/b2t8p8/H$B(BM+m3Qp?|pSa-|@r=N/+VMBeȕtT槃VUyQvg 6ϰe4樨`VAUk*v++4XMt@&&*Bǎ|%[&77P\$J;nBf[uSkz|7ˑc +] nmM{7t:ͥ> WVwߎ*%}II .3CZ BDQBM?dXa4" TYi)aG@.kmm\oH^̯e5.wOy-~ܞ(!2k!᯺;4Zjm^<3wz0AZ]648k@A`I֜9YxgPȊzK U, bNBb.Źc1e≔j"ʻw9Pg@8&Z1r]VNnnrߌbj_sPH$(시]("ZI7LYIJp^'yf&誟|2EYψgSI(5IUM sl1źZMK( 'hWVh|cf=נ|>#[*zF#]S,:*CE] )$qM%q͍`KIJ^!./.. !N/EO%nuނIrW#>NO\!E[oܼN,q鲒!tΚ]C_f}lQ9mӬ23]-f\;.|[,jXӲjhrMF zQفz5jYȖQx\Nk̻w;aeēC1~p"̑(#l* h;UQU᧧ @ }V̤OKI\#Q?!2;)$?[7JE#К}ox޻gYZ,a)re5)8}wPk\I%jgcss}šfۨN+Hev* 2?)$)NLy"1bV6+dGYIF) AnS8[o6\h5gOO^,bNm d@ *Tzy-PqCvt_?|5yHEləMֺK7~ACoy]I+L_@83sc ̻w9ˎ.Hpv$Ox'Jd+fUU5:[1K%WNj%o$HLl?̿^T- \<[?6>Sx*Ġ%M::eb-8kTe }Şf!2@7F127jM5e ms-vjZdx.τy\PNh s=P|bbœnbFs]Oˏ[䩦)-d+ǥA+I_"uO?_sK*~v @g 6lljvRE2?T#?FTj*cwrUmFQ`>(?>&ݍ&߳#jkkSSS87[1c2RjSWVc4ղ:¦<)B ]h൥~@2* (%V曱:gjVit|𝽼ؒE[UϞĿ 5.h$YwŒPE/;ɺ3IDATϤf+`wYKN !NŗT-!L`Ϥ}ƍٸq#(%rYVմ ( ܷLͰL"2JmNI2S?{:uDZácfDu,ff(JqMV$4;"<>QC a^{'!v=J1͒U inNL\&EU&~sy qj>bp67p8Xn[lHgKQFJ 1BQ;w wmK%HΔl%)C&0](cK‚2bCkNj N$~u{ukbC#Օk[VJ_1% oA2>ep6MTx %kh}I;ѳa7|WYiVu\7Ms?]2;H@h iJJW'\p'vInc<-[0-;8[@@sy;\2D)~ep_vGoCͯZ0 oz;\2Əq)]gۺlϬ?bw =?\v0~^K555=KI5z'O_CusVPς;,l^NjeIc}}}X*z75 Q|*^_^685؅]Ճ&hZe/]WAy|A槦d~gd~[-g 2qD1KyK FX4s x衇 䔷k"B~I _ؾ"TBKsg)%k |vWr?Lզ5LṸ:+n,,A*(K7KxU,d=)8M 2?9湔/>ainfB"X$o{3ƆQȫ{5eJA44{'$7d_bQɃKwIg`pI}_:ZLi~$:w#v$ urG%;9>!0]]X {Fa˃01g3d~ns(K0?=SmK FgyjjUYq=ZO'qӦqB7YEb@vQ,̎yb彭lҧ*i~#ggC&$GбLDmk Kfd~V*B) y}E2f޳߹8?82?X~~s.ţ_o$2F1ܴhD O?V"ؽEV q3. JnJe칷}QUmM!%d` ǘZd]-c\nO_U//e?h=mk"UKe33i24Nybp]ˍ)t;Rr'_J_VKJ^Jrͻ:h7Y0II2$㛟g"OM_4Ok#?ym',W#ZxtW,|U*I2?UL_dc͐ݽ;wB*\d~CsK ܸbM|Maak.WmWd60SOe% o /qL66)J$13rho`p8L?9=M q5A0kOo e% A$8e~S)Oy2̯֔I#}l2|:d5oxMbxtGkkϝkq<\Ǖ ~2,J]^VNͻ %ײ$HX$I\#̯;c̍d޲[کK"%YFލ?R{Cnq^ ǹ:::2T %{]TwZjC;ţ>xwٝ*3MYi6. đ:h,c:bi'P‹g[].u+^>rlS1RH\՜~32?;-wݭ)_Brb@sO I.#YM{CXozՕw_$1jl6* .QUACuGh h l'PfSR/ˑ*YO\Y1DM5Q5|VV7va=_mM׵8 XUv1۫Lc7(w(*tytdgy޺U[kiu^m06& ;`š cu23d2'ədI Hc/xnw[ZKI})U{-Rus{Ks{y,?KiH7k4rZO`p+=^a#7?IJ.O6RyØ wa¶{xGq@2?oԬH4Œ1AX5_n>__5zq{CqǞI r^bߒCy6e~W kϴMl6Z[[*`a)x8r.GsX|!AׂPdAd͝*dbiGS|uMkkkaf2)Ο?hɁ{EAQպ7ŋD"!UXfoXɴJ&+sYe뭏a HH$6c oWq4QAc6uB!z{{wmt:9taf G e!Y>ku$y:g"eߛ(,Q) 9Mpj,~I_K͑#G8t^wP,p.]*9rYTUo4|=7?SVHl_᥁JnB^T+-(ܗ&yg.orM=V,iZaJ!;Aricc/HYB&ze[nR Saax2ͿMd~hJU6 ^% Bti I4>UW=M7`F?ʑxmZIAF<7-׉l\LQiƇ/6?q((1kdCp8\x8TWAnyryI_y~4U'hg*;qE&H722Rҙs05@AUk~f^Q)7ɛEW].Wsubk7 (G~o~(F2?7aa;zdPWx$_!ZWVe"< o|/b1b)'&&x:;;(n4o83˜ /j\[!!O5#:ǙΝ;G(*]'ϧ`TTмHOWȲ ϟgff& K I,m\y8rt;$HX$AsE>û6uxvz#{ d~CCCPx#O0ze 08WUVDRM}yBP!)ꢽñ;H9a'=s-Eac A汣̦(E#}}}_'ljLj i$QQQA!*dH4J__~+D45?[̔cTT쭎،U /+D;BZ ]{!f`u8Ν۴Uu1y|Ԧ [! 0F6 TU7Q+[)Q?.ܶoC7?Ffm0'yżOOXP4ISg][S$H,G4âHl8a|"ołraXuX4ʤbCkhXӯ٪ <n1J 4=0uP"FGpXtLWG?xm_+ x;x2?w+2u/7`2&kh@0空[_T ʽXlk;MB:C4ہ|ܼEq8x2 EAa#~ ]vZ-HWTr>:') %yˮ z7kj{LNLP୭m2?1JQ;ݷDaI2EϝX,12Yv<vFIlv;UUUXV2%UC!,?PlYw|/e~`F`q5Ç?II#e~_U*^7:da6)'! 욝od5FVɖV' p xk}#*OIe~?p}+S5V~NaltdgPRՊt"2ňbX,`Ԇ RBW+$N%-|rgx-Uqk0]3|rhyx-Dۆ>2-+E$6盩yH$Iieddނb7ȎrmGڍ7XP}Yf*fOh4~1P]uTF.,0`Yg _CcIȲ`6*_fbblH|>hkCe@I$,,,dx<|>Vcb>sjgAfw锅~ZT& ?m 4Vnpw`&$>* W~ao=F <x~~]ADb̙3FGGkl!CKK˺* 0@Ւ$PU+W  1:cyѧqYrI\ 5"̯',Deumwb9d-022kYvfYFYŘ, XVM$iKEEE---wqey9a#bf{r&a~In1S>>30v8jó.t TdJc`?>!jj6QDdD~=W GB!9r?Hzn~U*,.7Ob&dzD&ɭh[Z+x^I?"7K<4.w^I^#?jAF٨WI>1v +I(" xgf&'o%PIzn~UAj,v{}nس8G,bֵD\EW<~l92 Ƈ89y"7ʼ[C):Vú~c /䙟7',=@W.\evֿ؉pޫ.DpsvaTʗ֎Upϊخ}$$\'DgƁP kι'Nri\P%p=' 7?%Dox߻~Kgbd&z8|5ˉu!+yhoog$B?մO...g>NUYAwo#ls\"vs^n$=k'NtE4yhBbh.$xb`c7?f>G9aBr_rM oM3{W xc0F}e;BM坷{һ:vٗUQjjjhkk3CH:ZZZp8>&YcBœG/2@"@E6?у#a^gUrԸlVcýn~͍fx{-Dtz7s3qK۔ApxYm݆2 ܛH$>244TE)xNMMZ#;Ejn0U$.|-3qxGA0q 3Ӝ9IZpޅ"K2u \3 4k$5o0zY>ZA_d}+OT03H E2w#32Z¶ak8lh)+7 2r7>ĉkUFJ(?^ oy",a$W9F&$Y`B]`aSw^WAwUHufb˵<(PH ^Yފ_r̯߯\xGbMC*`y)oBҁ4u,PtNWKT1XX2+-gŪY͊, 0y2,ZbCߠIllYMpa*}r$MDqِdىED2\Q6( ,+^ؓElCCMmmm)3J6W}0E I&!d BӘ) bPtcB&iDQfs$3Bs~aj\ƒ},óI~v!b2q2Uֶ¿Xz7$ЅD"# hZC޹5&+^@Ӵ?z?:}',o?~+B b{4cի2UaE8d/Ht$Ip[, $!:էNa]ZB(ԟ@X=X4 M҉fK$ \*Wh Xp|h8p,"kyCm'lΐNʼn+1 orirO/a@W. l"HFG1Z4j<Ό`OQf Bs9"ha*oSyhʈ̒,%Ht5c:),hE~ rsƊZҿ[%:$9Xo/e 9/$ەzo I^f2ra레Vv>wꮜ`3Hr!962.P)XcRJgd^c2 /!K?DB{#c5?Kn?7o"Kj|\q<ɲ̯l# VEc.|J[XB#pb&O2E!-iJ4VɬrXc4#1#nHZY&=1Du '~K\vEet_yeU}o|C,a~;)n\-`*3ə'5>\N+2Qd~["D.OSTm)/fǐ]*I_ '6PDd -l&[\r`#My薂聾T$$Uyuə{9k 嘍 ̴jQzV@Y]nYC гd~>w""ہ;1Ƿbs>r).F99"3eLWc鈝SF:u ^(8].| 8D(SUPPh_2>wU ~ XrN \`49S' ͇t#yws Ó7 Im k]Awnn`R&n~J>w nqS0le~˻h]h/y~0N} hw$a-2,&tO3;0HZGYʿO=?Ч]$,-G; pZ$RY Jpv<̯xzS0W 2D0_U.+ \FYx`T]y4u88p&Wy@0>>^N7G1eV>ˊ}M"?3`4oj^\Jn~gY-^K>ˌ>]M*+8DFKe$QUUE{{d~v_N12}_8|vݻS_]ޗ]e(֛n`09!Bu'2=)F]w_w `"5!D&$< [OG?C$dY.FFF6A[)Gk5Mcvv2Y Wؗ]'5U/^ We~,+IWn.0do%{aWp\x_za lSj&%5W>^89(+%<>v p FT7c~5bJpػl/`x%>ac;  4\}4:1s,7ѳ|j}cCvCtEXtSoftwareAdobe ImageReadyqe<IENDB`tidyr/man/figures/lifecycle-archived.svg0000644000176200001440000000170714013466035020076 0ustar liggesusers lifecyclelifecyclearchivedarchived tidyr/man/figures/lifecycle-soft-deprecated.svg0000644000176200001440000000172614013466035021363 0ustar liggesuserslifecyclelifecyclesoft-deprecatedsoft-deprecated tidyr/man/figures/lifecycle-questioning.svg0000644000176200001440000000171414013466035020654 0ustar liggesuserslifecyclelifecyclequestioningquestioning tidyr/man/figures/lifecycle-superseded.svg0000644000176200001440000000171314013473313020446 0ustar liggesusers lifecyclelifecyclesupersededsuperseded tidyr/man/figures/lifecycle-stable.svg0000644000176200001440000000167414013466035017566 0ustar liggesuserslifecyclelifecyclestablestable tidyr/man/figures/lifecycle-experimental.svg0000644000176200001440000000171614013466035021006 0ustar liggesuserslifecyclelifecycleexperimentalexperimental tidyr/man/figures/lifecycle-deprecated.svg0000644000176200001440000000171214013466035020405 0ustar liggesuserslifecyclelifecycledeprecateddeprecated tidyr/man/expand_grid.Rd0000644000176200001440000000404714325573777014764 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expand.R \name{expand_grid} \alias{expand_grid} \title{Create a tibble from all combinations of inputs} \usage{ expand_grid(..., .name_repair = "check_unique") } \arguments{ \item{...}{Name-value pairs. The name will become the column name in the output.} \item{.name_repair}{Treatment of problematic column names: \itemize{ \item \code{"minimal"}: No name repair or checks, beyond basic existence, \item \code{"unique"}: Make sure names are unique and not empty, \item \code{"check_unique"}: (default value), no name repair, but check they are \code{unique}, \item \code{"universal"}: Make the names \code{unique} and syntactic \item a function: apply custom name repair (e.g., \code{.name_repair = make.names} for names in the style of base R). \item A purrr-style anonymous function, see \code{\link[rlang:as_function]{rlang::as_function()}} } This argument is passed on as \code{repair} to \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}}. See there for more details on these terms and the strategies used to enforce them.} } \value{ A tibble with one column for each input in \code{...}. The output will have one row for each combination of the inputs, i.e. the size be equal to the product of the sizes of the inputs. This implies that if any input has length 0, the output will have zero rows. } \description{ \code{expand_grid()} is heavily motivated by \code{\link[=expand.grid]{expand.grid()}}. Compared to \code{expand.grid()}, it: \itemize{ \item Produces sorted output (by varying the first column the slowest, rather than the fastest). \item Returns a tibble, not a data frame. \item Never converts strings to factors. \item Does not add any additional attributes. \item Can expand any generalised vector, including data frames. } } \examples{ expand_grid(x = 1:3, y = 1:2) expand_grid(l1 = letters, l2 = LETTERS) # Can also expand data frames expand_grid(df = tibble(x = 1:2, y = c(2, 1)), z = 1:3) # And matrices expand_grid(x1 = matrix(1:4, nrow = 2), x2 = matrix(5:8, nrow = 2)) } tidyr/man/reexports.Rd0000644000176200001440000000215214165476376014525 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidyr.R \docType{import} \name{reexports} \alias{reexports} \alias{tribble} \alias{tibble} \alias{as_tibble} \alias{all_of} \alias{select_helpers} \alias{any_of} \alias{contains} \alias{ends_with} \alias{everything} \alias{last_col} \alias{matches} \alias{num_range} \alias{one_of} \alias{starts_with} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{tibble}{\code{\link[tibble]{as_tibble}}, \code{\link[tibble]{tibble}}, \code{\link[tibble]{tribble}}} \item{tidyselect}{\code{\link[tidyselect]{all_of}}, \code{\link[tidyselect:all_of]{any_of}}, \code{\link[tidyselect:starts_with]{contains}}, \code{\link[tidyselect:starts_with]{ends_with}}, \code{\link[tidyselect]{everything}}, \code{\link[tidyselect:everything]{last_col}}, \code{\link[tidyselect:starts_with]{matches}}, \code{\link[tidyselect:starts_with]{num_range}}, \code{\link[tidyselect]{one_of}}, \code{\link[tidyselect]{starts_with}}} }} tidyr/man/table1.Rd0000644000176200001440000000174214017432641013625 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{table1} \alias{table1} \alias{table2} \alias{table3} \alias{table4a} \alias{table4b} \alias{table5} \title{Example tabular representations} \source{ \url{https://www.who.int/teams/global-tuberculosis-programme/data} } \usage{ table1 table2 table3 table4a table4b table5 } \description{ Data sets that demonstrate multiple ways to layout the same tabular data. } \details{ \code{table1}, \code{table2}, \code{table3}, \code{table4a}, \code{table4b}, and \code{table5} all display the number of TB cases documented by the World Health Organization in Afghanistan, Brazil, and China between 1999 and 2000. The data contains values associated with four variables (country, year, cases, and population), but each table organizes the values in a different layout. The data is a subset of the data contained in the World Health Organization Global Tuberculosis Report } \keyword{datasets} tidyr/man/uncount.Rd0000644000176200001440000000176714321316017014153 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/uncount.R \name{uncount} \alias{uncount} \title{"Uncount" a data frame} \usage{ uncount(data, weights, ..., .remove = TRUE, .id = NULL) } \arguments{ \item{data}{A data frame, tibble, or grouped tibble.} \item{weights}{A vector of weights. Evaluated in the context of \code{data}; supports quasiquotation.} \item{...}{Additional arguments passed on to methods.} \item{.remove}{If \code{TRUE}, and \code{weights} is the name of a column in \code{data}, then this column is removed.} \item{.id}{Supply a string to create a new variable which gives a unique identifier for each created row.} } \description{ Performs the opposite operation to \code{\link[dplyr:count]{dplyr::count()}}, duplicating rows according to a weighting variable (or expression). } \examples{ df <- tibble(x = c("a", "b"), n = c(1, 2)) uncount(df, n) uncount(df, n, .id = "id") # You can also use constants uncount(df, 2) # Or expressions uncount(df, 2 / n) } tidyr/man/hoist.Rd0000644000176200001440000000663314325573777013631 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hoist.R \name{hoist} \alias{hoist} \title{Hoist values out of list-columns} \usage{ hoist( .data, .col, ..., .remove = TRUE, .simplify = TRUE, .ptype = NULL, .transform = NULL ) } \arguments{ \item{.data}{A data frame.} \item{.col}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> List-column to extract components from.} \item{...}{<\code{\link[rlang:dyn-dots]{dynamic-dots}}> Components of \code{.col} to turn into columns in the form \code{col_name = "pluck_specification"}. You can pluck by name with a character vector, by position with an integer vector, or with a combination of the two with a list. See \code{\link[purrr:pluck]{purrr::pluck()}} for details. The column names must be unique in a call to \code{hoist()}, although existing columns with the same name will be overwritten. When plucking with a single string you can choose to omit the name, i.e. \code{hoist(df, col, "x")} is short-hand for \code{hoist(df, col, x = "x")}.} \item{.remove}{If \code{TRUE}, the default, will remove extracted components from \code{.col}. This ensures that each value lives only in one place. If all components are removed from \code{.col}, then \code{.col} will be removed from the result entirely.} \item{.simplify}{If \code{TRUE}, will attempt to simplify lists of length-1 vectors to an atomic vector. Can also be a named list containing \code{TRUE} or \code{FALSE} declaring whether or not to attempt to simplify a particular column. If a named list is provided, the default for any unspecified columns is \code{TRUE}.} \item{.ptype}{Optionally, a named list of prototypes declaring the desired output type of each component. Alternatively, a single empty prototype can be supplied, which will be applied to all components. Use this argument if you want to check that each element has the type you expect when simplifying. If a \code{ptype} has been specified, but \code{simplify = FALSE} or simplification isn't possible, then a \link[vctrs:list_of]{list-of} column will be returned and each element will have type \code{ptype}.} \item{.transform}{Optionally, a named list of transformation functions applied to each component. Alternatively, a single function can be supplied, which will be applied to all components. Use this argument if you want to transform or parse individual elements as they are extracted. When both \code{ptype} and \code{transform} are supplied, the \code{transform} is applied before the \code{ptype}.} } \description{ \code{hoist()} allows you to selectively pull components of a list-column into their own top-level columns, using the same syntax as \code{\link[purrr:pluck]{purrr::pluck()}}. Learn more in \code{vignette("rectangle")}. } \examples{ df <- tibble( character = c("Toothless", "Dory"), metadata = list( list( species = "dragon", color = "black", films = c( "How to Train Your Dragon", "How to Train Your Dragon 2", "How to Train Your Dragon: The Hidden World" ) ), list( species = "blue tang", color = "blue", films = c("Finding Nemo", "Finding Dory") ) ) ) df # Extract only specified components df \%>\% hoist(metadata, "species", first_film = list("films", 1L), third_film = list("films", 3L) ) } \seealso{ Other rectangling: \code{\link{unnest_longer}()}, \code{\link{unnest_wider}()}, \code{\link{unnest}()} } \concept{rectangling} tidyr/man/check_pivot_spec.Rd0000644000176200001440000000363214350635333015770 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pivot.R \name{check_pivot_spec} \alias{check_pivot_spec} \title{Check assumptions about a pivot \code{spec}} \usage{ check_pivot_spec(spec, call = caller_env()) } \arguments{ \item{spec}{A specification data frame. This is useful for more complex pivots because it gives you greater control on how metadata stored in the columns become column names in the result. Must be a data frame containing character \code{.name} and \code{.value} columns. Additional columns in \code{spec} should be named to match columns in the long format of the dataset and contain values corresponding to columns pivoted from the wide format. The special \code{.seq} variable is used to disambiguate rows internally; it is automatically removed after pivoting.} } \description{ \code{check_pivot_spec()} is a developer facing helper function for validating the pivot spec used in \code{\link[=pivot_longer_spec]{pivot_longer_spec()}} or \code{\link[=pivot_wider_spec]{pivot_wider_spec()}}. It is only useful if you are extending \code{\link[=pivot_longer]{pivot_longer()}} or \code{\link[=pivot_wider]{pivot_wider()}} with new S3 methods. \code{check_pivot_spec()} makes the following assertions: \itemize{ \item \code{spec} must be a data frame. \item \code{spec} must have a character column named \code{.name}. \item \code{spec} must have a character column named \code{.value}. \item The \code{.name} column must be unique. \item The \code{.name} and \code{.value} columns must be the first two columns in the data frame, and will be reordered if that is not true. } } \examples{ # A valid spec spec <- tibble(.name = "a", .value = "b", foo = 1) check_pivot_spec(spec) spec <- tibble(.name = "a") try(check_pivot_spec(spec)) # `.name` and `.value` are forced to be the first two columns spec <- tibble(foo = 1, .value = "b", .name = "a") check_pivot_spec(spec) } \keyword{internal} tidyr/man/pipe.Rd0000644000176200001440000000036614013466035013413 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{\%>\%} \alias{\%>\%} \title{Pipe operator} \usage{ lhs \%>\% rhs } \description{ See \code{\link[magrittr]{\%>\%}} for more details. } \keyword{internal} tidyr/man/unnest.Rd0000644000176200001440000001244014363516001013763 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unnest.R \name{unnest} \alias{unnest} \title{Unnest a list-column of data frames into rows and columns} \usage{ unnest( data, cols, ..., keep_empty = FALSE, ptype = NULL, names_sep = NULL, names_repair = "check_unique", .drop = deprecated(), .id = deprecated(), .sep = deprecated(), .preserve = deprecated() ) } \arguments{ \item{data}{A data frame.} \item{cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> List-columns to unnest. When selecting multiple columns, values from the same row will be recycled to their common size.} \item{...}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}: previously you could write \code{df \%>\% unnest(x, y, z)}. Convert to \code{df \%>\% unnest(c(x, y, z))}. If you previously created a new variable in \code{unnest()} you'll now need to do it explicitly with \code{mutate()}. Convert \code{df \%>\% unnest(y = fun(x, y, z))} to \code{df \%>\% mutate(y = fun(x, y, z)) \%>\% unnest(y)}.} \item{keep_empty}{By default, you get one row of output for each element of the list that you are unchopping/unnesting. This means that if there's a size-0 element (like \code{NULL} or an empty data frame or vector), then that entire row will be dropped from the output. If you want to preserve all rows, use \code{keep_empty = TRUE} to replace size-0 elements with a single row of missing values.} \item{ptype}{Optionally, a named list of column name-prototype pairs to coerce \code{cols} to, overriding the default that will be guessed from combining the individual values. Alternatively, a single empty ptype can be supplied, which will be applied to all \code{cols}.} \item{names_sep}{If \code{NULL}, the default, the outer names will come from the inner names. If a string, the outer names will be formed by pasting together the outer and the inner column names, separated by \code{names_sep}.} \item{names_repair}{Used to check that output data frame has valid names. Must be one of the following options: \itemize{ \item \verb{"minimal}": no name repair or checks, beyond basic existence, \item \verb{"unique}": make sure names are unique and not empty, \item \verb{"check_unique}": (the default), no name repair, but check they are unique, \item \verb{"universal}": make the names unique and syntactic \item a function: apply custom name repair. \item \link{tidyr_legacy}: use the name repair from tidyr 0.8. \item a formula: a purrr-style anonymous function (see \code{\link[rlang:as_function]{rlang::as_function()}}) } See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more details on these terms and the strategies used to enforce them.} \item{.drop, .preserve}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}: all list-columns are now preserved; If there are any that you don't want in the output use \code{select()} to remove them prior to unnesting.} \item{.id}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}: convert \code{df \%>\% unnest(x, .id = "id")} to \verb{df \%>\% mutate(id = names(x)) \%>\% unnest(x))}.} \item{.sep}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}: use \code{names_sep} instead.} } \description{ Unnest expands a list-column containing data frames into rows and columns. } \section{New syntax}{ tidyr 1.0.0 introduced a new syntax for \code{nest()} and \code{unnest()} that's designed to be more similar to other functions. Converting to the new syntax should be straightforward (guided by the message you'll receive) but if you just need to run an old analysis, you can easily revert to the previous behaviour using \code{\link[=nest_legacy]{nest_legacy()}} and \code{\link[=unnest_legacy]{unnest_legacy()}} as follows: \if{html}{\out{
}}\preformatted{library(tidyr) nest <- nest_legacy unnest <- unnest_legacy }\if{html}{\out{
}} } \examples{ # unnest() is designed to work with lists of data frames df <- tibble( x = 1:3, y = list( NULL, tibble(a = 1, b = 2), tibble(a = 1:3, b = 3:1, c = 4) ) ) # unnest() recycles input rows for each row of the list-column # and adds a column for each column df \%>\% unnest(y) # input rows with 0 rows in the list-column will usually disappear, # but you can keep them (generating NAs) with keep_empty = TRUE: df \%>\% unnest(y, keep_empty = TRUE) # Multiple columns ---------------------------------------------------------- # You can unnest multiple columns simultaneously df <- tibble( x = 1:2, y = list( tibble(a = 1, b = 2), tibble(a = 3:4, b = 5:6) ), z = list( tibble(c = 1, d = 2), tibble(c = 3:4, d = 5:6) ) ) df \%>\% unnest(c(y, z)) # Compare with unnesting one column at a time, which generates # the Cartesian product df \%>\% unnest(y) \%>\% unnest(z) } \seealso{ Other rectangling: \code{\link{hoist}()}, \code{\link{unnest_longer}()}, \code{\link{unnest_wider}()} } \concept{rectangling} tidyr/man/tidyr-package.Rd0000644000176200001440000000237114520546620015202 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidyr.R \docType{package} \name{tidyr-package} \alias{tidyr} \alias{tidyr-package} \title{tidyr: Tidy Messy Data} \description{ \if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}} Tools to help to create tidy data, where each column is a variable, each row is an observation, and each cell contains a single value. 'tidyr' contains tools for changing the shape (pivoting) and hierarchy (nesting and 'unnesting') of a dataset, turning deeply nested lists into rectangular data frames ('rectangling'), and extracting values out of string columns. It also includes tools for working with missing values (both implicit and explicit). } \seealso{ Useful links: \itemize{ \item \url{https://tidyr.tidyverse.org} \item \url{https://github.com/tidyverse/tidyr} \item Report bugs at \url{https://github.com/tidyverse/tidyr/issues} } } \author{ \strong{Maintainer}: Hadley Wickham \email{hadley@posit.co} Authors: \itemize{ \item Davis Vaughan \email{davis@posit.co} \item Maximilian Girlich } Other contributors: \itemize{ \item Kevin Ushey \email{kevin@posit.co} [contributor] \item Posit Software, PBC [copyright holder, funder] } } \keyword{internal} tidyr/man/tidyr_tidy_select.Rd0000644000176200001440000000612114553563425016206 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/doc-params.R \name{tidyr_tidy_select} \alias{tidyr_tidy_select} \title{Argument type: tidy-select} \description{ This page describes the \verb{} argument modifier which indicates that the argument uses \strong{tidy selection}, a sub-type of tidy evaluation. If you've never heard of tidy evaluation before, start with the practical introduction in \url{https://r4ds.hadley.nz/functions.html#data-frame-functions} then then read more about the underlying theory in \url{https://rlang.r-lib.org/reference/topic-data-mask.html}. } \section{Overview of selection features}{ tidyselect implements a DSL for selecting variables. It provides helpers for selecting variables: \itemize{ \item \code{var1:var10}: variables lying between \code{var1} on the left and \code{var10} on the right. } \itemize{ \item \code{\link[tidyselect:starts_with]{starts_with("a")}}: names that start with \code{"a"}. \item \code{\link[tidyselect:starts_with]{ends_with("z")}}: names that end with \code{"z"}. \item \code{\link[tidyselect:starts_with]{contains("b")}}: names that contain \code{"b"}. \item \code{\link[tidyselect:starts_with]{matches("x.y")}}: names that match regular expression \code{x.y}. \item \code{\link[tidyselect:starts_with]{num_range(x, 1:4)}}: names following the pattern, \code{x1}, \code{x2}, ..., \code{x4}. \item \code{\link[tidyselect:all_of]{all_of(vars)}}/\code{\link[tidyselect:all_of]{any_of(vars)}}: matches names stored in the character vector \code{vars}. \code{all_of(vars)} will error if the variables aren't present; \code{any_of(var)} will match just the variables that exist. \item \code{\link[tidyselect:everything]{everything()}}: all variables. \item \code{\link[tidyselect:everything]{last_col()}}: furthest column on the right. \item \code{\link[tidyselect:where]{where(is.numeric)}}: all variables where \code{is.numeric()} returns \code{TRUE}. } As well as operators for combining those selections: \itemize{ \item \code{!selection}: only variables that don't match \code{selection}. \item \code{selection1 & selection2}: only variables included in both \code{selection1} and \code{selection2}. \item \code{selection1 | selection2}: all variables that match either \code{selection1} or \code{selection2}. } } \section{Key techniques}{ \itemize{ \item If you want the user to supply a tidyselect specification in a function argument, you need to tunnel the selection through the function argument. This is done by embracing the function argument \code{{{ }}}, e.g \code{unnest(df, {{ vars }})}. \item If you have a character vector of column names, use \code{all_of()} or \code{any_of()}, depending on whether or not you want unknown variable names to cause an error, e.g \code{unnest(df, all_of(vars))}, \code{unnest(df, !any_of(vars))}. \item To suppress \verb{R CMD check} \code{NOTE}s about unknown variables use \code{"var"} instead of \code{var}: } \if{html}{\out{
}}\preformatted{# has NOTE df \%>\% select(x, y, z) # no NOTE df \%>\% select("x", "y", "z") }\if{html}{\out{
}} } \keyword{internal} tidyr/man/unnest_longer.Rd0000644000176200001440000001300214357024447015337 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unnest-longer.R \name{unnest_longer} \alias{unnest_longer} \title{Unnest a list-column into rows} \usage{ unnest_longer( data, col, values_to = NULL, indices_to = NULL, indices_include = NULL, keep_empty = FALSE, names_repair = "check_unique", simplify = TRUE, ptype = NULL, transform = NULL ) } \arguments{ \item{data}{A data frame.} \item{col}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> List-column(s) to unnest. When selecting multiple columns, values from the same row will be recycled to their common size.} \item{values_to}{A string giving the column name (or names) to store the unnested values in. If multiple columns are specified in \code{col}, this can also be a glue string containing \code{"{col}"} to provide a template for the column names. The default, \code{NULL}, gives the output columns the same names as the input columns.} \item{indices_to}{A string giving the column name (or names) to store the inner names or positions (if not named) of the values. If multiple columns are specified in \code{col}, this can also be a glue string containing \code{"{col}"} to provide a template for the column names. The default, \code{NULL}, gives the output columns the same names as \code{values_to}, but suffixed with \code{"_id"}.} \item{indices_include}{A single logical value specifying whether or not to add an index column. If any value has inner names, the index column will be a character vector of those names, otherwise it will be an integer vector of positions. If \code{NULL}, defaults to \code{TRUE} if any value has inner names or if \code{indices_to} is provided. If \code{indices_to} is provided, then \code{indices_include} can't be \code{FALSE}.} \item{keep_empty}{By default, you get one row of output for each element of the list that you are unchopping/unnesting. This means that if there's a size-0 element (like \code{NULL} or an empty data frame or vector), then that entire row will be dropped from the output. If you want to preserve all rows, use \code{keep_empty = TRUE} to replace size-0 elements with a single row of missing values.} \item{names_repair}{Used to check that output data frame has valid names. Must be one of the following options: \itemize{ \item \verb{"minimal}": no name repair or checks, beyond basic existence, \item \verb{"unique}": make sure names are unique and not empty, \item \verb{"check_unique}": (the default), no name repair, but check they are unique, \item \verb{"universal}": make the names unique and syntactic \item a function: apply custom name repair. \item \link{tidyr_legacy}: use the name repair from tidyr 0.8. \item a formula: a purrr-style anonymous function (see \code{\link[rlang:as_function]{rlang::as_function()}}) } See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more details on these terms and the strategies used to enforce them.} \item{simplify}{If \code{TRUE}, will attempt to simplify lists of length-1 vectors to an atomic vector. Can also be a named list containing \code{TRUE} or \code{FALSE} declaring whether or not to attempt to simplify a particular column. If a named list is provided, the default for any unspecified columns is \code{TRUE}.} \item{ptype}{Optionally, a named list of prototypes declaring the desired output type of each component. Alternatively, a single empty prototype can be supplied, which will be applied to all components. Use this argument if you want to check that each element has the type you expect when simplifying. If a \code{ptype} has been specified, but \code{simplify = FALSE} or simplification isn't possible, then a \link[vctrs:list_of]{list-of} column will be returned and each element will have type \code{ptype}.} \item{transform}{Optionally, a named list of transformation functions applied to each component. Alternatively, a single function can be supplied, which will be applied to all components. Use this argument if you want to transform or parse individual elements as they are extracted. When both \code{ptype} and \code{transform} are supplied, the \code{transform} is applied before the \code{ptype}.} } \description{ \code{unnest_longer()} turns each element of a list-column into a row. It is most naturally suited to list-columns where the elements are unnamed and the length of each element varies from row to row. \code{unnest_longer()} generally preserves the number of columns of \code{x} while modifying the number of rows. Learn more in \code{vignette("rectangle")}. } \examples{ # `unnest_longer()` is useful when each component of the list should # form a row df <- tibble( x = 1:4, y = list(NULL, 1:3, 4:5, integer()) ) df \%>\% unnest_longer(y) # Note that empty values like `NULL` and `integer()` are dropped by # default. If you'd like to keep them, set `keep_empty = TRUE`. df \%>\% unnest_longer(y, keep_empty = TRUE) # If the inner vectors are named, the names are copied to an `_id` column df <- tibble( x = 1:2, y = list(c(a = 1, b = 2), c(a = 10, b = 11, c = 12)) ) df \%>\% unnest_longer(y) # Multiple columns ---------------------------------------------------------- # If columns are aligned, you can unnest simultaneously df <- tibble( x = 1:2, y = list(1:2, 3:4), z = list(5:6, 7:8) ) df \%>\% unnest_longer(c(y, z)) # This is important because sequential unnesting would generate the # Cartesian product of the rows df \%>\% unnest_longer(y) \%>\% unnest_longer(z) } \seealso{ Other rectangling: \code{\link{hoist}()}, \code{\link{unnest_wider}()}, \code{\link{unnest}()} } \concept{rectangling} tidyr/man/billboard.Rd0000644000176200001440000000117614013473313014405 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{billboard} \alias{billboard} \title{Song rankings for Billboard top 100 in the year 2000} \format{ A dataset with variables: \describe{ \item{artist}{Artist name} \item{track}{Song name} \item{date.enter}{Date the song entered the top 100} \item{wk1 -- wk76}{Rank of the song in each week after it entered} } } \source{ The "Whitburn" project, \url{https://waxy.org/2008/05/the_whitburn_project/}, (downloaded April 2008) } \usage{ billboard } \description{ Song rankings for Billboard top 100 in the year 2000 } \keyword{datasets} tidyr/man/extract_numeric.Rd0000644000176200001440000000056114013466035015647 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dep-extract.R \name{extract_numeric} \alias{extract_numeric} \title{Extract numeric component of variable.} \usage{ extract_numeric(x) } \arguments{ \item{x}{A character vector (or a factor).} } \description{ DEPRECATED: please use \code{readr::parse_number()} instead. } \keyword{internal} tidyr/man/separate.Rd0000644000176200001440000001064114335231102014247 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/separate.R \name{separate} \alias{separate} \title{Separate a character column into multiple columns with a regular expression or numeric locations} \usage{ separate( data, col, into, sep = "[^[:alnum:]]+", remove = TRUE, convert = FALSE, extra = "warn", fill = "warn", ... ) } \arguments{ \item{data}{A data frame.} \item{col}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Column to expand.} \item{into}{Names of new variables to create as character vector. Use \code{NA} to omit the variable in the output.} \item{sep}{Separator between columns. If character, \code{sep} is interpreted as a regular expression. The default value is a regular expression that matches any sequence of non-alphanumeric values. If numeric, \code{sep} is interpreted as character positions to split at. Positive values start at 1 at the far-left of the string; negative value start at -1 at the far-right of the string. The length of \code{sep} should be one less than \code{into}.} \item{remove}{If \code{TRUE}, remove input column from output data frame.} \item{convert}{If \code{TRUE}, will run \code{\link[=type.convert]{type.convert()}} with \code{as.is = TRUE} on new columns. This is useful if the component columns are integer, numeric or logical. NB: this will cause string \code{"NA"}s to be converted to \code{NA}s.} \item{extra}{If \code{sep} is a character vector, this controls what happens when there are too many pieces. There are three valid options: \itemize{ \item \code{"warn"} (the default): emit a warning and drop extra values. \item \code{"drop"}: drop any extra values without a warning. \item \code{"merge"}: only splits at most \code{length(into)} times }} \item{fill}{If \code{sep} is a character vector, this controls what happens when there are not enough pieces. There are three valid options: \itemize{ \item \code{"warn"} (the default): emit a warning and fill from the right \item \code{"right"}: fill with missing values on the right \item \code{"left"}: fill with missing values on the left }} \item{...}{Additional arguments passed on to methods.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} \code{separate()} has been superseded in favour of \code{\link[=separate_wider_position]{separate_wider_position()}} and \code{\link[=separate_wider_delim]{separate_wider_delim()}} because the two functions make the two uses more obvious, the API is more polished, and the handling of problems is better. Superseded functions will not go away, but will only receive critical bug fixes. Given either a regular expression or a vector of character positions, \code{separate()} turns a single character column into multiple columns. } \examples{ # If you want to split by any non-alphanumeric value (the default): df <- tibble(x = c(NA, "x.y", "x.z", "y.z")) df \%>\% separate(x, c("A", "B")) # If you just want the second variable: df \%>\% separate(x, c(NA, "B")) # We now recommend separate_wider_delim() instead: df \%>\% separate_wider_delim(x, ".", names = c("A", "B")) df \%>\% separate_wider_delim(x, ".", names = c(NA, "B")) # Controlling uneven splits ------------------------------------------------- # If every row doesn't split into the same number of pieces, use # the extra and fill arguments to control what happens: df <- tibble(x = c("x", "x y", "x y z", NA)) df \%>\% separate(x, c("a", "b")) # The same behaviour as previous, but drops the c without warnings: df \%>\% separate(x, c("a", "b"), extra = "drop", fill = "right") # Opposite of previous, keeping the c and filling left: df \%>\% separate(x, c("a", "b"), extra = "merge", fill = "left") # Or you can keep all three: df \%>\% separate(x, c("a", "b", "c")) # To only split a specified number of times use extra = "merge": df <- tibble(x = c("x: 123", "y: error: 7")) df \%>\% separate(x, c("key", "value"), ": ", extra = "merge") # Controlling column types -------------------------------------------------- # convert = TRUE detects column classes: df <- tibble(x = c("x:1", "x:2", "y:4", "z", NA)) df \%>\% separate(x, c("key", "value"), ":") \%>\% str() df \%>\% separate(x, c("key", "value"), ":", convert = TRUE) \%>\% str() } \seealso{ \code{\link[=unite]{unite()}}, the complement, \code{\link[=extract]{extract()}} which uses regular expression capturing groups. } tidyr/man/household.Rd0000644000176200001440000000111514315413441014437 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{household} \alias{household} \title{Household data} \format{ A data frame with 5 rows and 5 columns: \describe{ \item{family}{Family identifier} \item{dob_child1}{Date of birth of first child} \item{dob_child2}{Date of birth of second child} \item{name_child1}{Name of first child}? \item{name_child2}{Name of second child} } } \usage{ household } \description{ This dataset is based on an example in \code{vignette("datatable-reshape", package = "data.table")} } \keyword{datasets} tidyr/man/gather.Rd0000644000176200001440000000771114325573777013753 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gather.R \name{gather} \alias{gather} \title{Gather columns into key-value pairs} \usage{ gather( data, key = "key", value = "value", ..., na.rm = FALSE, convert = FALSE, factor_key = FALSE ) } \arguments{ \item{data}{A data frame.} \item{key, value}{Names of new key and value columns, as strings or symbols. This argument is passed by expression and supports \link[rlang:topic-inject]{quasiquotation} (you can unquote strings and symbols). The name is captured from the expression with \code{\link[rlang:defusing-advanced]{rlang::ensym()}} (note that this kind of interface where symbols do not represent actual objects is now discouraged in the tidyverse; we support it here for backward compatibility).} \item{...}{A selection of columns. If empty, all variables are selected. You can supply bare variable names, select all variables between x and z with \code{x:z}, exclude y with \code{-y}. For more options, see the \code{\link[dplyr:select]{dplyr::select()}} documentation. See also the section on selection rules below.} \item{na.rm}{If \code{TRUE}, will remove rows from output where the value column is \code{NA}.} \item{convert}{If \code{TRUE} will automatically run \code{\link[=type.convert]{type.convert()}} on the key column. This is useful if the column types are actually numeric, integer, or logical.} \item{factor_key}{If \code{FALSE}, the default, the key values will be stored as a character vector. If \code{TRUE}, will be stored as a factor, which preserves the original ordering of the columns.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} Development on \code{gather()} is complete, and for new code we recommend switching to \code{pivot_longer()}, which is easier to use, more featureful, and still under active development. \code{df \%>\% gather("key", "value", x, y, z)} is equivalent to \code{df \%>\% pivot_longer(c(x, y, z), names_to = "key", values_to = "value")} See more details in \code{vignette("pivot")}. } \section{Rules for selection}{ Arguments for selecting columns are passed to \code{\link[tidyselect:vars_select]{tidyselect::vars_select()}} and are treated specially. Unlike other verbs, selecting functions make a strict distinction between data expressions and context expressions. \itemize{ \item A data expression is either a bare name like \code{x} or an expression like \code{x:y} or \code{c(x, y)}. In a data expression, you can only refer to columns from the data frame. \item Everything else is a context expression in which you can only refer to objects that you have defined with \verb{<-}. } For instance, \code{col1:col3} is a data expression that refers to data columns, while \code{seq(start, end)} is a context expression that refers to objects from the contexts. If you need to refer to contextual objects from a data expression, you can use \code{all_of()} or \code{any_of()}. These functions are used to select data-variables whose names are stored in a env-variable. For instance, \code{all_of(a)} selects the variables listed in the character vector \code{a}. For more details, see the \code{\link[tidyselect:language]{tidyselect::select_helpers()}} documentation. } \examples{ # From https://stackoverflow.com/questions/1181060 stocks <- tibble( time = as.Date("2009-01-01") + 0:9, X = rnorm(10, 0, 1), Y = rnorm(10, 0, 2), Z = rnorm(10, 0, 4) ) gather(stocks, "stock", "price", -time) stocks \%>\% gather("stock", "price", -time) # get first observation for each Species in iris data -- base R mini_iris <- iris[c(1, 51, 101), ] # gather Sepal.Length, Sepal.Width, Petal.Length, Petal.Width gather(mini_iris, key = "flower_att", value = "measurement", Sepal.Length, Sepal.Width, Petal.Length, Petal.Width) # same result but less verbose gather(mini_iris, key = "flower_att", value = "measurement", -Species) } tidyr/man/rmd/0000755000176200001440000000000014325573777012766 5ustar liggesuserstidyr/man/rmd/overview.Rmd0000644000176200001440000000253114325573777015301 0ustar liggesusers tidyselect implements a DSL for selecting variables. It provides helpers for selecting variables: - `var1:var10`: variables lying between `var1` on the left and `var10` on the right. * [`starts_with("a")`][tidyselect::starts_with]: names that start with `"a"`. * [`ends_with("z")`][tidyselect::ends_with]: names that end with `"z"`. * [`contains("b")`][tidyselect::contains]: names that contain `"b"`. * [`matches("x.y")`][tidyselect::matches]: names that match regular expression `x.y`. * [`num_range(x, 1:4)`][tidyselect::num_range]: names following the pattern, `x1`, `x2`, ..., `x4`. * [`all_of(vars)`][tidyselect::all_of]/[`any_of(vars)`][tidyselect::any_of()]: matches names stored in the character vector `vars`. `all_of(vars)` will error if the variables aren't present; `any_of(var)` will match just the variables that exist. * [`everything()`][tidyselect::everything]: all variables. * [`last_col()`][tidyselect::last_col]: furthest column on the right. * [`where(is.numeric)`][tidyselect::where]: all variables where `is.numeric()` returns `TRUE`. As well as operators for combining those selections: - `!selection`: only variables that don't match `selection`. - `selection1 & selection2`: only variables included in both `selection1` and `selection2`. - `selection1 | selection2`: all variables that match either `selection1` or `selection2`. tidyr/man/spread.Rd0000644000176200001440000000523114325573777013752 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spread.R \name{spread} \alias{spread} \title{Spread a key-value pair across multiple columns} \usage{ spread(data, key, value, fill = NA, convert = FALSE, drop = TRUE, sep = NULL) } \arguments{ \item{data}{A data frame.} \item{key, value}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> Columns to use for \code{key} and \code{value}.} \item{fill}{If set, missing values will be replaced with this value. Note that there are two types of missingness in the input: explicit missing values (i.e. \code{NA}), and implicit missings, rows that simply aren't present. Both types of missing value will be replaced by \code{fill}.} \item{convert}{If \code{TRUE}, \code{\link[=type.convert]{type.convert()}} with \code{asis = TRUE} will be run on each of the new columns. This is useful if the value column was a mix of variables that was coerced to a string. If the class of the value column was factor or date, note that will not be true of the new columns that are produced, which are coerced to character before type conversion.} \item{drop}{If \code{FALSE}, will keep factor levels that don't appear in the data, filling in missing combinations with \code{fill}.} \item{sep}{If \code{NULL}, the column names will be taken from the values of \code{key} variable. If non-\code{NULL}, the column names will be given by \code{""}.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} Development on \code{spread()} is complete, and for new code we recommend switching to \code{pivot_wider()}, which is easier to use, more featureful, and still under active development. \code{df \%>\% spread(key, value)} is equivalent to \code{df \%>\% pivot_wider(names_from = key, values_from = value)} See more details in \code{vignette("pivot")}. } \examples{ stocks <- tibble( time = as.Date("2009-01-01") + 0:9, X = rnorm(10, 0, 1), Y = rnorm(10, 0, 2), Z = rnorm(10, 0, 4) ) stocksm <- stocks \%>\% gather(stock, price, -time) stocksm \%>\% spread(stock, price) stocksm \%>\% spread(time, price) # Spread and gather are complements df <- tibble(x = c("a", "b"), y = c(3, 4), z = c(5, 6)) df \%>\% spread(x, y) \%>\% gather("x", "y", a:b, na.rm = TRUE) # Use 'convert = TRUE' to produce variables of mixed type df <- tibble( row = rep(c(1, 51), each = 3), var = rep(c("Sepal.Length", "Species", "Species_num"), 2), value = c(5.1, "setosa", 1, 7.0, "versicolor", 2) ) df \%>\% spread(var, value) \%>\% str() df \%>\% spread(var, value, convert = TRUE) \%>\% str() } tidyr/man/unnest_auto.Rd0000644000176200001440000000175314325573777015045 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unnest-auto.R \name{unnest_auto} \alias{unnest_auto} \title{Automatically call \code{unnest_wider()} or \code{unnest_longer()}} \usage{ unnest_auto(data, col) } \arguments{ \item{data}{A data frame.} \item{col}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> List-column to unnest.} } \description{ \code{unnest_auto()} picks between \code{unnest_wider()} or \code{unnest_longer()} by inspecting the inner names of the list-col: \itemize{ \item If all elements are unnamed, it uses \code{unnest_longer(indices_include = FALSE)}. \item If all elements are named, and there's at least one name in common across all components, it uses \code{unnest_wider()}. \item Otherwise, it falls back to \code{unnest_longer(indices_include = TRUE)}. } It's handy for very rapid interactive exploration but I don't recommend using it in scripts, because it will succeed even if the underlying data radically changes. } \keyword{internal} tidyr/man/pivot_wider.Rd0000644000176200001440000001654114350635333015016 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pivot-wide.R \name{pivot_wider} \alias{pivot_wider} \title{Pivot data from long to wide} \usage{ pivot_wider( data, ..., id_cols = NULL, id_expand = FALSE, names_from = name, names_prefix = "", names_sep = "_", names_glue = NULL, names_sort = FALSE, names_vary = "fastest", names_expand = FALSE, names_repair = "check_unique", values_from = value, values_fill = NULL, values_fn = NULL, unused_fn = NULL ) } \arguments{ \item{data}{A data frame to pivot.} \item{...}{Additional arguments passed on to methods.} \item{id_cols}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> A set of columns that uniquely identify each observation. Typically used when you have redundant variables, i.e. variables whose values are perfectly correlated with existing variables. Defaults to all columns in \code{data} except for the columns specified through \code{names_from} and \code{values_from}. If a tidyselect expression is supplied, it will be evaluated on \code{data} after removing the columns specified through \code{names_from} and \code{values_from}.} \item{id_expand}{Should the values in the \code{id_cols} columns be expanded by \code{\link[=expand]{expand()}} before pivoting? This results in more rows, the output will contain a complete expansion of all possible values in \code{id_cols}. Implicit factor levels that aren't represented in the data will become explicit. Additionally, the row values corresponding to the expanded \code{id_cols} will be sorted.} \item{names_from, values_from}{<\code{\link[=tidyr_tidy_select]{tidy-select}}> A pair of arguments describing which column (or columns) to get the name of the output column (\code{names_from}), and which column (or columns) to get the cell values from (\code{values_from}). If \code{values_from} contains multiple values, the value will be added to the front of the output column.} \item{names_prefix}{String added to the start of every variable name. This is particularly useful if \code{names_from} is a numeric vector and you want to create syntactic variable names.} \item{names_sep}{If \code{names_from} or \code{values_from} contains multiple variables, this will be used to join their values together into a single string to use as a column name.} \item{names_glue}{Instead of \code{names_sep} and \code{names_prefix}, you can supply a glue specification that uses the \code{names_from} columns (and special \code{.value}) to create custom column names.} \item{names_sort}{Should the column names be sorted? If \code{FALSE}, the default, column names are ordered by first appearance.} \item{names_vary}{When \code{names_from} identifies a column (or columns) with multiple unique values, and multiple \code{values_from} columns are provided, in what order should the resulting column names be combined? \itemize{ \item \code{"fastest"} varies \code{names_from} values fastest, resulting in a column naming scheme of the form: \verb{value1_name1, value1_name2, value2_name1, value2_name2}. This is the default. \item \code{"slowest"} varies \code{names_from} values slowest, resulting in a column naming scheme of the form: \verb{value1_name1, value2_name1, value1_name2, value2_name2}. }} \item{names_expand}{Should the values in the \code{names_from} columns be expanded by \code{\link[=expand]{expand()}} before pivoting? This results in more columns, the output will contain column names corresponding to a complete expansion of all possible values in \code{names_from}. Implicit factor levels that aren't represented in the data will become explicit. Additionally, the column names will be sorted, identical to what \code{names_sort} would produce.} \item{names_repair}{What happens if the output has invalid column names? The default, \code{"check_unique"} is to error if the columns are duplicated. Use \code{"minimal"} to allow duplicates in the output, or \code{"unique"} to de-duplicated by adding numeric suffixes. See \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}} for more options.} \item{values_fill}{Optionally, a (scalar) value that specifies what each \code{value} should be filled in with when missing. This can be a named list if you want to apply different fill values to different value columns.} \item{values_fn}{Optionally, a function applied to the value in each cell in the output. You will typically use this when the combination of \code{id_cols} and \code{names_from} columns does not uniquely identify an observation. This can be a named list if you want to apply different aggregations to different \code{values_from} columns.} \item{unused_fn}{Optionally, a function applied to summarize the values from the unused columns (i.e. columns not identified by \code{id_cols}, \code{names_from}, or \code{values_from}). The default drops all unused columns from the result. This can be a named list if you want to apply different aggregations to different unused columns. \code{id_cols} must be supplied for \code{unused_fn} to be useful, since otherwise all unspecified columns will be considered \code{id_cols}. This is similar to grouping by the \code{id_cols} then summarizing the unused columns using \code{unused_fn}.} } \description{ \code{pivot_wider()} "widens" data, increasing the number of columns and decreasing the number of rows. The inverse transformation is \code{\link[=pivot_longer]{pivot_longer()}}. Learn more in \code{vignette("pivot")}. } \details{ \code{pivot_wider()} is an updated approach to \code{\link[=spread]{spread()}}, designed to be both simpler to use and to handle more use cases. We recommend you use \code{pivot_wider()} for new code; \code{spread()} isn't going away but is no longer under active development. } \examples{ # See vignette("pivot") for examples and explanation fish_encounters fish_encounters \%>\% pivot_wider(names_from = station, values_from = seen) # Fill in missing values fish_encounters \%>\% pivot_wider(names_from = station, values_from = seen, values_fill = 0) # Generate column names from multiple variables us_rent_income us_rent_income \%>\% pivot_wider( names_from = variable, values_from = c(estimate, moe) ) # You can control whether `names_from` values vary fastest or slowest # relative to the `values_from` column names using `names_vary`. us_rent_income \%>\% pivot_wider( names_from = variable, values_from = c(estimate, moe), names_vary = "slowest" ) # When there are multiple `names_from` or `values_from`, you can use # use `names_sep` or `names_glue` to control the output variable names us_rent_income \%>\% pivot_wider( names_from = variable, names_sep = ".", values_from = c(estimate, moe) ) us_rent_income \%>\% pivot_wider( names_from = variable, names_glue = "{variable}_{.value}", values_from = c(estimate, moe) ) # Can perform aggregation with `values_fn` warpbreaks <- as_tibble(warpbreaks[c("wool", "tension", "breaks")]) warpbreaks warpbreaks \%>\% pivot_wider( names_from = wool, values_from = breaks, values_fn = mean ) # Can pass an anonymous function to `values_fn` when you # need to supply additional arguments warpbreaks$breaks[1] <- NA warpbreaks \%>\% pivot_wider( names_from = wool, values_from = breaks, values_fn = ~ mean(.x, na.rm = TRUE) ) } \seealso{ \code{\link[=pivot_wider_spec]{pivot_wider_spec()}} to pivot "by hand" with a data frame that defines a pivoting specification. } tidyr/DESCRIPTION0000644000176200001440000000351414554221641013122 0ustar liggesusersPackage: tidyr Title: Tidy Messy Data Version: 1.3.1 Authors@R: c( person("Hadley", "Wickham", , "hadley@posit.co", role = c("aut", "cre")), person("Davis", "Vaughan", , "davis@posit.co", role = "aut"), person("Maximilian", "Girlich", role = "aut"), person("Kevin", "Ushey", , "kevin@posit.co", role = "ctb"), person("Posit Software, PBC", role = c("cph", "fnd")) ) Description: Tools to help to create tidy data, where each column is a variable, each row is an observation, and each cell contains a single value. 'tidyr' contains tools for changing the shape (pivoting) and hierarchy (nesting and 'unnesting') of a dataset, turning deeply nested lists into rectangular data frames ('rectangling'), and extracting values out of string columns. It also includes tools for working with missing values (both implicit and explicit). License: MIT + file LICENSE URL: https://tidyr.tidyverse.org, https://github.com/tidyverse/tidyr BugReports: https://github.com/tidyverse/tidyr/issues Depends: R (>= 3.6) Imports: cli (>= 3.4.1), dplyr (>= 1.0.10), glue, lifecycle (>= 1.0.3), magrittr, purrr (>= 1.0.1), rlang (>= 1.1.1), stringr (>= 1.5.0), tibble (>= 2.1.1), tidyselect (>= 1.2.0), utils, vctrs (>= 0.5.2) Suggests: covr, data.table, knitr, readr, repurrrsive (>= 1.1.0), rmarkdown, testthat (>= 3.0.0) LinkingTo: cpp11 (>= 0.4.0) VignetteBuilder: knitr Config/Needs/website: tidyverse/tidytemplate Config/testthat/edition: 3 Encoding: UTF-8 LazyData: true RoxygenNote: 7.3.0 NeedsCompilation: yes Packaged: 2024-01-23 14:27:23 UTC; hadleywickham Author: Hadley Wickham [aut, cre], Davis Vaughan [aut], Maximilian Girlich [aut], Kevin Ushey [ctb], Posit Software, PBC [cph, fnd] Maintainer: Hadley Wickham Repository: CRAN Date/Publication: 2024-01-24 14:50:09 UTC tidyr/build/0000755000176200001440000000000014553746313012517 5ustar liggesuserstidyr/build/vignette.rds0000644000176200001440000000050214553746313015053 0ustar liggesusersRn0tR@QRz%V ct!ް=15by,=`ZH5`!k6-y).,R3h(nuSj^R We%(idqt = H ec6˧4tx #|'4+, Dt0 ̀sSXFK,= xs?R$}? <ʍ5tٽy\vfPճ8 (>a뷣:B2zlOpuHtidyr/tests/0000755000176200001440000000000014013466035012551 5ustar liggesuserstidyr/tests/testthat/0000755000176200001440000000000014554221641014413 5ustar liggesuserstidyr/tests/testthat/test-pivot-wide.R0000644000176200001440000005516514553563421017622 0ustar liggesuserstest_that("can pivot all cols to wide", { df <- tibble(key = c("x", "y", "z"), val = 1:3) pv <- pivot_wider(df, names_from = key, values_from = val) expect_named(pv, c("x", "y", "z")) expect_equal(nrow(pv), 1) }) test_that("non-pivoted cols are preserved", { df <- tibble(a = 1, key = c("x", "y"), val = 1:2) pv <- pivot_wider(df, names_from = key, values_from = val) expect_named(pv, c("a", "x", "y")) expect_equal(nrow(pv), 1) }) test_that("implicit missings turn into explicit missings", { df <- tibble(a = 1:2, key = c("x", "y"), val = 1:2) pv <- pivot_wider(df, names_from = key, values_from = val) expect_equal(pv$a, c(1, 2)) expect_equal(pv$x, c(1, NA)) expect_equal(pv$y, c(NA, 2)) }) test_that("error when overwriting existing column", { df <- tibble( a = c(1, 1), key = c("a", "b"), val = c(1, 2) ) expect_snapshot( (expect_error(pivot_wider(df, names_from = key, values_from = val))) ) expect_snapshot( out <- pivot_wider(df, names_from = key, values_from = val, names_repair = "unique") ) expect_named(out, c("a...1", "a...2", "b")) }) test_that("`names_repair` happens after spec column reorganization (#1107)", { df <- tibble( test = c("a", "b"), name = c("test", "test2"), value = c(1, 2) ) out <- pivot_wider(df, names_repair = ~ make.unique(.x)) expect_identical(out$test, c("a", "b")) expect_identical(out$test.1, c(1, NA)) expect_identical(out$test2, c(NA, 2)) }) test_that("minimal `names_repair` doesn't overwrite a value column that collides with key column (#1107)", { df <- tibble( test = c("a", "b"), name = c("test", "test2"), value = c(1, 2) ) out <- pivot_wider(df, names_repair = "minimal") expect_identical(out[[1]], c("a", "b")) expect_identical(out[[2]], c(1, NA)) expect_identical(out[[3]], c(NA, 2)) }) test_that("grouping is preserved", { df <- tibble(g = 1, k = "x", v = 2) out <- df %>% dplyr::group_by(g) %>% pivot_wider(names_from = k, values_from = v) expect_equal(dplyr::group_vars(out), "g") }) # https://github.com/tidyverse/tidyr/issues/804 test_that("column with `...j` name can be used as `names_from`", { df <- tibble(...8 = c("x", "y", "z"), val = 1:3) pv <- pivot_wider(df, names_from = ...8, values_from = val) expect_named(pv, c("x", "y", "z")) expect_equal(nrow(pv), 1) }) test_that("data frame columns pivot correctly", { df <- tibble( i = c(1, 2, 1, 2), g = c("a", "a", "b", "b"), d = tibble(x = 1:4, y = 5:8) ) out <- pivot_wider(df, names_from = g, values_from = d) expect_equal(out$a$x, 1:2) expect_equal(out$b$y, 7:8) }) test_that("works with data.table and empty key_vars", { skip_if_not_installed("data.table") dt <- data.table::data.table(n = "a", v = 1) expect_equal( pivot_wider(dt, names_from = n, values_from = v), tibble(a = 1) ) }) test_that("`names_from` must be supplied if `name` isn't in `data` (#1240)", { df <- tibble(key = "x", val = 1) expect_snapshot((expect_error(pivot_wider(df, values_from = val)))) }) test_that("`values_from` must be supplied if `value` isn't in `data` (#1240)", { df <- tibble(key = "x", val = 1) expect_snapshot((expect_error(pivot_wider(df, names_from = key)))) }) test_that("`names_from` must identify at least 1 column (#1240)", { df <- tibble(key = "x", val = 1) expect_snapshot( (expect_error(pivot_wider(df, names_from = starts_with("foo"), values_from = val))) ) }) test_that("`values_from` must identify at least 1 column (#1240)", { df <- tibble(key = "x", val = 1) expect_snapshot( (expect_error(pivot_wider(df, names_from = key, values_from = starts_with("foo")))) ) }) test_that("`values_fn` emits an informative error when it doesn't result in unique values (#1238)", { df <- tibble(name = c("a", "a"), value = c(1, 2)) expect_snapshot( (expect_error(pivot_wider(df, values_fn = list(value = ~.x)))) ) }) test_that("can pivot a manual spec with spec columns that don't identify any rows (#1250)", { # Looking for `x = 1L` spec <- tibble(.name = "name", .value = "value", x = 1L) # But that doesn't exist here... df <- tibble(key = "a", value = 1L, x = 2L) expect_identical( pivot_wider_spec(df, spec, id_cols = key), tibble(key = "a", name = NA_integer_) ) # ...or here df <- tibble(key = character(), value = integer(), x = integer()) expect_identical( pivot_wider_spec(df, spec, id_cols = key), tibble(key = character(), name = integer()) ) }) test_that("pivoting with a manual spec and zero rows results in zero rows (#1252)", { spec <- tibble(.name = "name", .value = "value", x = 1L) df <- tibble(value = integer(), x = integer()) expect_identical(pivot_wider_spec(df, spec), tibble(name = integer())) }) test_that("can use `names_expand` to get sorted and expanded column names (#770)", { name1 <- factor(c(NA, "x"), levels = c("x", "y")) df <- tibble(name1 = name1, name2 = c("c", "d"), value = c(1, 2)) na <- NA_real_ expect_identical( pivot_wider(df, names_from = c(name1, name2), names_expand = TRUE), tibble(x_c = na, x_d = 2, y_c = na, y_d = na, NA_c = 1, NA_d = na) ) }) test_that("can fill only implicit missings from `names_expand`", { name1 <- factor(c(NA, "x"), levels = c("x", "y")) df <- tibble(name1 = name1, name2 = c("c", "d"), value = c(1, NA)) res <- pivot_wider( data = df, names_from = c(name1, name2), names_expand = TRUE, values_fill = list(value = 0) ) # But not the explicit missing! expect_identical( res, tibble(x_c = 0, x_d = NA_real_, y_c = 0, y_d = 0, NA_c = 1, NA_d = 0) ) }) test_that("expansion with `id_expand` and `names_expand` works with zero row data frames", { df <- tibble( id = factor(levels = c("b", "a")), name = factor(levels = c("a", "b")), value = integer() ) res <- pivot_wider(df, names_expand = TRUE, id_expand = TRUE) expect_identical(res$id, factor(c("b", "a"), levels = c("b", "a"))) expect_identical(res$a, c(NA_integer_, NA_integer_)) expect_identical(res$b, c(NA_integer_, NA_integer_)) }) test_that("`build_wider_spec()` requires empty dots", { df <- tibble(name = c("x", "y", "z"), value = 1:3) expect_snapshot({ (expect_error(build_wider_spec(df, 1))) (expect_error(build_wider_spec(df, name_prefix = ""))) }) }) test_that("`pivot_wider_spec()` requires empty dots", { df <- tibble(name = c("x", "y", "z"), value = 1:3) spec <- build_wider_spec(df) expect_snapshot({ (expect_error(pivot_wider_spec(df, spec, 1))) (expect_error(pivot_wider_spec(df, spec, name_repair = "check_unique"))) }) }) # column names ------------------------------------------------------------- test_that("names_glue affects output names", { df <- tibble( x = c("X", "Y"), y = 1:2, a = 1:2, b = 1:2 ) spec <- build_wider_spec( df, names_from = x:y, values_from = a:b, names_glue = "{x}{y}_{.value}" ) expect_equal(spec$.name, c("X1_a", "Y2_a", "X1_b", "Y2_b")) }) test_that("can sort column names", { df <- tibble( int = c(1, 3, 2), fac = factor(int, levels = 1:3, labels = c("Mon", "Tue", "Wed")), ) spec <- build_wider_spec(df, names_from = fac, values_from = int, names_sort = TRUE ) expect_equal(spec$.name, levels(df$fac)) }) test_that("can vary `names_from` values slowest (#839)", { df <- tibble( name = c("name1", "name2"), value1 = c(1, 2), value2 = c(4, 5) ) spec <- build_wider_spec(df, names_from = name, values_from = c(value1, value2)) expect_identical( spec$.name, c("value1_name1", "value1_name2", "value2_name1", "value2_name2") ) spec <- build_wider_spec(df, names_from = name, values_from = c(value1, value2), names_vary = "slowest") expect_identical( spec$.name, c("value1_name1", "value2_name1", "value1_name2", "value2_name2") ) }) test_that("`names_vary` is validated", { df <- tibble(name = c("a", "b"), value = c(1, 2)) expect_snapshot({ (expect_error(build_wider_spec(df, names_vary = 1))) (expect_error(build_wider_spec(df, names_vary = "x"))) }) }) test_that("`names_expand` generates sorted column names even if no expansion is done", { df <- tibble(name = c(2, 1), value = c(1, 2)) spec <- build_wider_spec(df, names_expand = TRUE) expect_identical(spec$.name, c("1", "2")) }) test_that("`names_expand` does a cartesian expansion of `names_from` columns (#770)", { df <- tibble(name1 = c("a", "b"), name2 = c("c", "d"), value = c(1, 2)) spec <- build_wider_spec(df, names_from = c(name1, name2), names_expand = TRUE) expect_identical(spec$.name, c("a_c", "a_d", "b_c", "b_d")) }) test_that("`names_expand` expands all levels of a factor `names_from` column (#770)", { name1 <- factor(c(NA, "x"), levels = c("x", "y")) df <- tibble(name1 = name1, name2 = c("c", "d"), value = c(1, 2)) spec <- build_wider_spec(df, names_from = c(name1, name2), names_expand = TRUE) expect_identical(spec$.name, c("x_c", "x_d", "y_c", "y_d", "NA_c", "NA_d")) }) test_that("`names_expand` is validated", { df <- tibble(name = c("a", "b"), value = c(1, 2)) expect_snapshot({ (expect_error(build_wider_spec(df, names_expand = 1))) (expect_error(build_wider_spec(df, names_expand = "x"))) }) }) # keys --------------------------------------------------------- test_that("can override default keys", { df <- tribble( ~row, ~name, ~var, ~value, 1, "Sam", "age", 10, 2, "Sam", "height", 1.5, 3, "Bob", "age", 20, ) pv <- df %>% pivot_wider(id_cols = name, names_from = var, values_from = value) expect_equal(nrow(pv), 2) }) test_that("`id_cols = everything()` excludes `names_from` and `values_from`", { df <- tibble(key = "x", name = "a", value = 1L) expect_identical( pivot_wider(df, id_cols = everything()), tibble(key = "x", a = 1L) ) spec <- build_wider_spec(df) expect_identical( pivot_wider_spec(df, spec, id_cols = everything()), tibble(key = "x", a = 1L) ) }) test_that("`id_cols` can't select columns from `names_from` or `values_from` (#1318)", { df <- tibble(name = c("x", "y"), value = c(1, 2)) # And gives a nice error message! expect_snapshot({ (expect_error(pivot_wider(df, id_cols = name, names_from = name, values_from = value))) (expect_error(pivot_wider(df, id_cols = value, names_from = name, values_from = value))) }) }) test_that("`id_cols` returns a tidyselect error if a column selection is OOB (#1318)", { df <- tibble(name = c("x", "y"), value = c(1, 2)) expect_snapshot( (expect_error(pivot_wider(df, id_cols = foo))) ) }) test_that("named `id_cols` gives clear error (#1104)", { df <- tibble(name = c("x", "y"), value = c(1, 2), x = 1, y = 2) expect_snapshot(pivot_wider(df, id_cols = c(z = x)), error = TRUE) }) test_that("pivoting a zero row data frame drops `names_from` and `values_from` (#1249)", { df <- tibble(key = character(), name = character(), value = integer()) expect_identical( pivot_wider(df, names_from = name, values_from = value), tibble(key = character()) ) }) test_that("known bug - building a wider spec with a zero row data frame loses `values_from` info (#1249)", { # We can't currently change this behavior in `pivot_wider_spec()`, # for fear of breaking backwards compatibility df <- tibble(key = character(), name = character(), value = integer()) # Building the spec loses the fact that `value` was specified as `values_from`, # which would normally be in the `spec$.value` column spec <- build_wider_spec(df, names_from = name, values_from = value) # So pivoting with this spec accidentally keeps `value` around expect_identical( pivot_wider_spec(df, spec), tibble(key = character(), value = integer()) ) # If you specify `id_cols` to be the `key` column, it works right expect_identical( pivot_wider_spec(df, spec, id_cols = key), tibble(key = character()) ) # But `id_cols = everything()` won't work as intended, because we can't know # to remove `value` from `names(data)` before computing the tidy-selection expect_identical( pivot_wider_spec(df, spec, id_cols = everything()), tibble(key = character(), value = integer()) ) }) test_that("`id_expand` generates sorted rows even if no expansion is done", { df <- tibble(id = c(2, 1), name = c("a", "b"), value = c(1, 2)) res <- pivot_wider(df, id_expand = TRUE) expect_identical(res$id, c(1, 2)) }) test_that("`id_expand` does a cartesian expansion of `id_cols` columns (#770)", { df <- tibble(id1 = c(1, 2), id2 = c(3, 4), name = c("a", "b"), value = c(1, 2)) expect_identical( pivot_wider(df, id_expand = TRUE), tibble( id1 = c(1, 1, 2, 2), id2 = c(3, 4, 3, 4), a = c(1, NA, NA, NA), b = c(NA, NA, NA, 2), ) ) }) test_that("`id_expand` expands all levels of a factor `id_cols` column (#770)", { id1 <- factor(c(NA, "x"), levels = c("x", "y")) df <- tibble(id1 = id1, id2 = c(1, 2), name = c("a", "b"), value = c(1, 2)) res <- pivot_wider(df, id_expand = TRUE) expect_identical(res$id1, factor(c("x", "x", "y", "y", NA, NA))) expect_identical(res$id2, c(1, 2, 1, 2, 1, 2)) }) test_that("`id_expand` with `values_fill` only fills implicit missings", { id1 <- factor(c("x", "x"), levels = c("x", "y")) df <- tibble(id1 = id1, id2 = c(1, 2), name = c("a", "b"), value = c(1, NA)) res <- pivot_wider(df, id_expand = TRUE, values_fill = 0) expect_identical(res$a, c(1, 0, 0, 0)) expect_identical(res$b, c(0, NA, 0, 0)) }) test_that("`id_expand` with `values_fill` can't accidentally fill missings in `id_cols`", { id1 <- factor(c(NA, "x"), levels = c("x", "y")) df <- tibble(id1 = id1, id2 = c(1, 2), name = c("a", "b"), value = c(1, 2)) res <- pivot_wider(df, id_expand = TRUE, values_fill = list(id1 = 0)) # Still has NAs! Both implicit (new combination) and explicit (pre-existing combination) expect_identical(res$id1, factor(c("x", "x", "y", "y", NA, NA))) }) test_that("`id_expand` is validated", { df <- tibble(name = c("a", "b"), value = c(1, 2)) expect_snapshot({ (expect_error(pivot_wider(df, id_expand = 1))) (expect_error(pivot_wider(df, id_expand = "x"))) }) }) # non-unique keys --------------------------------------------------------- test_that("duplicated keys produce list column with warning", { df <- tibble(a = c(1, 1, 2), key = c("x", "x", "x"), val = 1:3) expect_snapshot(pv <- pivot_wider(df, names_from = key, values_from = val)) expect_equal(pv$a, c(1, 2)) expect_equal(as.list(pv$x), list(c(1L, 2L), 3L)) }) test_that("duplicated key warning mentions every applicable column", { df <- tibble( key = c("x", "x"), a = c(1, 2), b = c(3, 4), c = c(5, 6) ) expect_snapshot( pivot_wider( df, names_from = key, values_from = c(a, b, c) ) ) expect_snapshot( pivot_wider( df, names_from = key, values_from = c(a, b, c), values_fn = list(b = sum) ) ) }) test_that("duplicated key warning backticks non-syntactic names", { df <- tibble( `a 1` = c(1, 1, 2), a2 = c(1, 1, 2), `the-key` = c("x", "x", "x"), val = 1:3 ) expect_snapshot(pv <- pivot_wider(df, names_from = `the-key`, values_from = val)) }) test_that("warning suppressed by supplying values_fn", { df <- tibble(a = c(1, 1, 2), key = c("x", "x", "x"), val = 1:3) expect_warning( pv <- pivot_wider(df, names_from = key, values_from = val, values_fn = list(val = list) ), NA ) expect_equal(pv$a, c(1, 2)) expect_equal(as.list(pv$x), list(c(1L, 2L), 3L)) }) test_that("values_fn can be a single function", { df <- tibble(a = c(1, 1, 2), key = c("x", "x", "x"), val = c(1, 10, 100)) pv <- pivot_wider(df, names_from = key, values_from = val, values_fn = sum) expect_equal(pv$x, c(11, 100)) }) test_that("values_fn can be an anonymous function (#1114)", { df <- tibble(a = c(1, 1, 2), key = c("x", "x", "x"), val = c(1, 10, 100)) pv <- pivot_wider(df, names_from = key, values_from = val, values_fn = ~ sum(.x)) expect_equal(pv$x, c(11, 100)) }) test_that("values_fn applied even when no-duplicates", { df <- tibble(a = c(1, 2), key = c("x", "x"), val = 1:2) pv <- pivot_wider(df, names_from = key, values_from = val, values_fn = list(val = list) ) expect_equal(pv$a, c(1, 2)) expect_equal(as.list(pv$x), list(1L, 2L)) }) test_that("values_fn is validated", { df <- tibble(name = "x", value = 1L) expect_snapshot( (expect_error(pivot_wider(df, values_fn = 1))) ) }) # can fill missing cells -------------------------------------------------- test_that("can fill in missing cells", { df <- tibble(g = c(1, 2), var = c("x", "y"), val = c(1, 2)) widen <- function(...) { df %>% pivot_wider(names_from = var, values_from = val, ...) } expect_equal(widen()$x, c(1, NA)) expect_equal(widen(values_fill = 0)$x, c(1, 0)) expect_equal(widen(values_fill = list(val = 0))$x, c(1, 0)) }) test_that("values_fill only affects missing cells", { df <- tibble(g = c(1, 2), names = c("x", "y"), value = c(1, NA)) out <- pivot_wider(df, names_from = names, values_from = value, values_fill = 0) expect_equal(out$y, c(0, NA)) }) test_that("values_fill works with data frame fill values", { df <- tibble( id = c(1L, 2L), name = c("x", "y"), value = tibble(a = 1:2, b = 2:3) ) out <- pivot_wider(df, values_fill = tibble(a = 0L, b = 0L)) expect_identical(out$x, tibble(a = c(1L, 0L), b = c(2L, 0L))) expect_identical(out$y, tibble(a = c(0L, 2L), b = c(0L, 3L))) }) # multiple values ---------------------------------------------------------- test_that("can pivot from multiple measure cols", { df <- tibble(row = 1, var = c("x", "y"), a = 1:2, b = 3:4) sp <- build_wider_spec(df, names_from = var, values_from = c(a, b)) pv <- pivot_wider_spec(df, sp) expect_named(pv, c("row", "a_x", "a_y", "b_x", "b_y")) expect_equal(pv$a_x, 1) expect_equal(pv$b_y, 4) }) test_that("can pivot from multiple measure cols using all keys", { df <- tibble(var = c("x", "y"), a = 1:2, b = 3:4) sp <- build_wider_spec(df, names_from = var, values_from = c(a, b)) pv <- pivot_wider_spec(df, sp) expect_named(pv, c("a_x", "a_y", "b_x", "b_y")) expect_equal(pv$a_x, 1) expect_equal(pv$b_y, 4) }) test_that("column order in output matches spec", { df <- tribble( ~hw, ~name, ~mark, ~pr, "hw1", "anna", 95, "ok", "hw2", "anna", 70, "meh", ) # deliberately create weird order sp <- tribble( ~hw, ~.value, ~.name, "hw1", "mark", "hw1_mark", "hw1", "pr", "hw1_pr", "hw2", "pr", "hw2_pr", "hw2", "mark", "hw2_mark", ) pv <- pivot_wider_spec(df, sp) expect_named(pv, c("name", sp$.name)) }) # unused ------------------------------------------------------------------- test_that("`unused_fn` can summarize unused columns (#990)", { df <- tibble( id = c(1, 1, 2, 2), unused1 = c(1, 2, 4, 3), unused2 = c(1, 2, 4, 3), name = c("a", "b", "a", "b"), value = c(1, 2, 3, 4) ) # By name res <- pivot_wider(df, id_cols = id, unused_fn = list(unused1 = max)) expect_named(res, c("id", "a", "b", "unused1")) expect_identical(res$unused1, c(2, 4)) # Globally res <- pivot_wider(df, id_cols = id, unused_fn = list) expect_named(res, c("id", "a", "b", "unused1", "unused2")) expect_identical(res$unused1, list(c(1, 2), c(4, 3))) expect_identical(res$unused2, list(c(1, 2), c(4, 3))) }) test_that("`unused_fn` works with anonymous functions", { df <- tibble( id = c(1, 1, 2, 2), unused = c(1, NA, 4, 3), name = c("a", "b", "a", "b"), value = c(1, 2, 3, 4) ) res <- pivot_wider(df, id_cols = id, unused_fn = ~ mean(.x, na.rm = TRUE)) expect_identical(res$unused, c(1, 3.5)) }) test_that("`unused_fn` must result in single summary values", { df <- tibble( id = c(1, 1, 2, 2), unused = c(1, 2, 4, 3), name = c("a", "b", "a", "b"), value = c(1, 2, 3, 4) ) expect_snapshot( (expect_error(pivot_wider(df, id_cols = id, unused_fn = identity))) ) }) test_that("`unused_fn` works with expanded key from `id_expand`", { df <- tibble( id = factor(c(1, 1, 2, 2), levels = 1:3), unused = c(1, 2, 4, 3), name = c("a", "b", "a", "b"), value = c(1, 2, 3, 4) ) res <- pivot_wider(df, id_cols = id, id_expand = TRUE, unused_fn = max) expect_identical(res$id, factor(1:3)) expect_identical(res$unused, c(2, 4, NA)) res <- pivot_wider(df, id_cols = id, id_expand = TRUE, unused_fn = ~ sum(is.na(.x))) expect_identical(res$unused, c(0L, 0L, 1L)) }) test_that("can't fill implicit missings in unused column with `values_fill`", { # (in theory this would need `unused_fill`, but it would only be used when # `id_expand = TRUE`, which doesn't feel that useful) df <- tibble( id = factor(c(1, 1, 2, 2), levels = 1:3), unused = c(1, 2, 4, 3), name = c("a", "b", "a", "b"), value = c(1, 2, 3, 4) ) res <- pivot_wider( data = df, id_cols = id, id_expand = TRUE, unused_fn = list, values_fill = 0 ) expect_identical(res$a, c(1, 3, 0)) expect_identical(res$b, c(2, 4, 0)) expect_identical(res$unused, list(c(1, 2), c(4, 3), NA_real_)) res <- pivot_wider( data = df, id_cols = id, id_expand = TRUE, unused_fn = list, values_fill = list(unused = 0) ) expect_identical(res$unused, list(c(1, 2), c(4, 3), NA_real_)) }) test_that("`values_fill` is validated", { df <- tibble(name = "a", value = 1) expect_snapshot( (expect_error(pivot_wider(df, values_fill = 1:2))) ) }) test_that("`unused_fn` is validated", { df <- tibble(id = 1, unused = 1, name = "a", value = 1) expect_snapshot( (expect_error(pivot_wider(df, id_cols = id, unused_fn = 1))) ) }) # deprecated --------------------------------------------------------------- test_that("`id_cols` has noisy compat behavior (#1353)", { df <- tibble( id = c(1, 2), id2 = c(3, 4), name = c("a", "b"), value = c(5, 6) ) # Noisy expect_snapshot({ out <- pivot_wider(df, id) }) # Silent expect_snapshot({ expect <- pivot_wider(df, id_cols = id) }) expect_identical(out, expect) }) test_that("`id_cols` compat behavior doesn't trigger if `id_cols` is specified too", { df <- tibble( id = c(1, 2), id2 = c(3, 4), name = c("a", "b"), value = c(5, 6) ) expect_snapshot(error = TRUE, { pivot_wider(df, id, id_cols = id2) }) }) test_that("`id_cols` compat behavior doesn't trigger if multiple `...` are supplied", { df <- tibble( id = c(1, 2), id2 = c(3, 4), name = c("a", "b"), value = c(5, 6) ) expect_snapshot(error = TRUE, { pivot_wider(df, id, id2) }) }) test_that("`id_cols` compat behavior doesn't trigger if named `...` are supplied", { df <- tibble( id = c(1, 2), id2 = c(3, 4), name = c("a", "b"), value = c(5, 6) ) expect_snapshot(error = TRUE, { pivot_wider(df, ids = id) }) }) tidyr/tests/testthat/test-complete.R0000644000176200001440000001206514323620576017333 0ustar liggesuserstest_that("complete with no variables returns data as is", { expect_equal(complete(mtcars), mtcars) }) test_that("basic invocation works", { df <- tibble(x = 1:2, y = 1:2, z = 3:4) out <- complete(df, x, y) expect_equal(nrow(out), 4) expect_equal(out$z, c(3, NA, NA, 4)) }) test_that("will complete within each group (#396)", { levels <- c("a", "b", "c") df <- tibble( g = c("a", "b", "a"), a = c(1L, 1L, 2L), b = factor(c("a", "a", "b"), levels = levels), c = c(4, 5, 6) ) gdf <- dplyr::group_by(df, g) out <- complete(gdf, a, b) # Still grouped expect_identical(dplyr::group_vars(out), "g") out <- nest(out, data = -g) expect_identical( out$data[[1]], tibble( a = vec_rep_each(c(1L, 2L), times = 3), b = factor(vec_rep(c("a", "b", "c"), times = 2)), c = c(4, NA, NA, NA, 6, NA) ) ) expect_identical( out$data[[2]], tibble( a = 1L, b = factor(c("a", "b", "c")), c = c(5, NA, NA) ) ) }) test_that("complete does not allow expansion on grouping variable (#1299)", { df <- tibble( g = "x", a = 1L ) gdf <- dplyr::group_by(df, g) # This is a dplyr error that we don't own expect_error(complete(gdf, g)) }) test_that("can use `.drop = FALSE` with complete (#1299)", { levels <- c("a", "b", "c") df <- tibble( g = factor(c("a", "b", "a"), levels = levels), a = c(1L, 1L, 2L), b = factor(c("a", "a", "b"), levels = levels) ) gdf <- dplyr::group_by(df, g, .drop = FALSE) # No data in group "c" for `a`, so we don't get that in the result expect_identical( complete(gdf, a), vec_sort(gdf) ) expect <- crossing(g = factor(levels = levels), b = factor(levels = levels)) expect <- dplyr::group_by(expect, g, .drop = FALSE) expect <- dplyr::full_join(expect, df, c("g", "b")) # Levels of empty vector in `b` are expanded for group "c" expect_identical(complete(gdf, b), expect) }) test_that("complete moves the grouping and completing variables to the front", { df <- tibble( a = 1L, g = "x", b = 2L ) gdf <- dplyr::group_by(df, g) expect_named(complete(gdf, b), c("g", "b", "a")) }) test_that("expands empty factors", { f <- factor(levels = c("a", "b", "c")) df <- tibble(one = f, two = f) expect_equal(nrow(complete(df, one, two)), 9) expect_equal(ncol(complete(df, one, two)), 2) }) test_that("empty expansion returns original", { df <- tibble(x = character()) rs <- complete(df, y = NULL) expect_equal(rs, df) df <- tibble(x = 1:4) rs <- complete(df, y = NULL) expect_equal(rs, df) }) test_that("not drop unspecified levels in complete", { df <- tibble(x = 1:3, y = 1:3, z = c("a", "b", "c")) df2 <- df %>% complete(z = c("a", "b")) expect <- df[c("z", "x", "y")] expect_equal(df2, expect) }) test_that("complete() with empty nesting() / crossing() calls 'ignores' them (#1258)", { df <- tibble(x = factor(c("a", "c"), letters[1:3])) expect_identical(complete(df, x), complete(df, x, nesting())) expect_identical(complete(df, x), complete(df, x, crossing())) expect_identical(complete(df, x), complete(df, x, nesting(NULL))) expect_identical(complete(df, x), complete(df, x, crossing(NULL))) }) test_that("complete() fills missing values even when there are no `...` (#1272)", { df <- tibble(a = c(1, NA, 3)) expect_identical( complete(df, fill = list(a = 0)), tibble(a = c(1, 0, 3)) ) }) test_that("both implicit and explicit missing values are filled by default", { df <- tibble( x = factor(1:2, levels = 1:3), a = c(1, NA) ) expect_identical( complete(df, x, fill = list(a = 0)), tibble(x = factor(1:3), a = c(1, 0, 0)) ) }) test_that("can limit the fill to only implicit missing values with `explicit` (#1270)", { df <- tibble( x = factor(1:2, levels = 1:3), a = c(1, NA) ) expect_identical( complete(df, x, fill = list(a = 0), explicit = FALSE), tibble(x = factor(1:3), a = c(1, NA, 0)) ) }) test_that("can't fill a grouping column", { df <- tibble( g = c(1, NA), x = factor(1:2, levels = 1:3) ) gdf <- dplyr::group_by(df, g) # Silently ignore it out <- complete(gdf, x, fill = list(g = 0)) expect_identical(out$g, c(1, 1, 1, NA, NA, NA)) }) test_that("if the completing variables have missings, `fill` will fill them after expansion", { # This behavior is admittedly a little weird, but should not be common # because you rarely specify a completing variable in `fill` df <- tibble( x = c(1, NA), y = c(NA, 1) ) # Expanded combinations that involved missings get filled expect_identical( complete(df, x, y, fill = list(x = 0, y = 0)), tibble(x = c(1, 1, 0, 0), y = c(1, 0, 1, 0)) ) # Can limit the fill to only the "new" combinations that weren't in the # original data. Here, the `x = NA, y = NA` combination that gets created. expect_identical( complete(df, x, y, fill = list(x = 0, y = 0), explicit = FALSE), tibble(x = c(1, 1, NA, 0), y = c(1, NA, 1, 0)) ) }) test_that("validates its inputs", { expect_snapshot(error = TRUE, { complete(mtcars, explicit = 1) }) }) tidyr/tests/testthat/test-append.R0000644000176200001440000000442014360013543016755 0ustar liggesuserstest_that("columns in y replace those in x", { df1 <- data.frame(x = 1) df2 <- data.frame(x = 2) expect_equal(df_append(df1, df2), df2) }) test_that("replaced columns retain the correct ordering (#1444)", { df1 <- data.frame( x = 1, y = 2, z = 3 ) df2 <- data.frame(x = 4) expect_identical( df_append(df1, df2, after = 0L), data.frame(x = 4, y = 2, z = 3) ) expect_identical( df_append(df1, df2, after = 1L), data.frame(x = 4, y = 2, z = 3) ) expect_identical( df_append(df1, df2, after = 2L), data.frame(y = 2, x = 4, z = 3) ) }) test_that("after must be integer or character", { df1 <- data.frame(x = 1) df2 <- data.frame(x = 2) expect_snapshot((expect_error(df_append(df1, df2, after = 1.5)))) }) test_that("always returns a bare data frame", { df1 <- tibble(x = 1) df2 <- tibble(y = 2) expect_identical(df_append(df1, df2), data.frame(x = 1, y = 2)) }) test_that("retains row names of data.frame `x` (#1454)", { # These can't be restored by `reconstruct_tibble()`, so it is reasonable to # retain them. `dplyr:::dplyr_col_modify()` works similarly. df <- data.frame(x = 1:2, row.names = c("a", "b")) cols <- list(y = 3:4, z = 5:6) expect_identical(row.names(df_append(df, cols)), c("a", "b")) expect_identical(row.names(df_append(df, cols, after = 0)), c("a", "b")) expect_identical(row.names(df_append(df, cols, remove = TRUE)), c("a", "b")) }) test_that("can append at any integer position", { df1 <- data.frame(x = 1, y = 2) df2 <- data.frame(a = 1) expect_named(df_append(df1, df2, 0L), c("a", "x", "y")) expect_named(df_append(df1, df2, 1L), c("x", "a", "y")) expect_named(df_append(df1, df2, 2L), c("x", "y", "a")) }) test_that("can append at any character position", { df1 <- data.frame(x = 1, y = 2) df2 <- data.frame(a = 1) expect_named(df_append(df1, df2, "x"), c("x", "a", "y")) expect_named(df_append(df1, df2, "y"), c("x", "y", "a")) }) test_that("can replace at any character position ", { df1 <- data.frame(x = 1, y = 2, z = 3) df2 <- data.frame(a = 1) expect_named(df_append(df1, df2, "x", remove = TRUE), c("a", "y", "z")) expect_named(df_append(df1, df2, "y", remove = TRUE), c("x", "a", "z")) expect_named(df_append(df1, df2, "z", remove = TRUE), c("x", "y", "a")) }) tidyr/tests/testthat/test-fill.R0000644000176200001440000000641414323620576016452 0ustar liggesuserstest_that("all missings left unchanged", { df <- tibble( lgl = c(NA, NA), int = c(NA_integer_, NA), dbl = c(NA_real_, NA), chr = c(NA_character_, NA) ) down <- fill(df, lgl, int, dbl, chr) up <- fill(df, lgl, int, dbl, chr, .direction = "up") expect_identical(down, df) expect_identical(up, df) }) test_that("missings are filled correctly", { # filled down from last non-missing df <- tibble(x = c(NA, 1, NA, 2, NA, NA)) out <- fill(df, x) expect_equal(out$x, c(NA, 1, 1, 2, 2, 2)) out <- fill(df, x, .direction = "up") expect_equal(out$x, c(1, 1, 2, 2, NA, NA)) out <- fill(df, x, .direction = "downup") expect_equal(out$x, c(1, 1, 1, 2, 2, 2)) out <- fill(df, x, .direction = "updown") expect_equal(out$x, c(1, 1, 2, 2, 2, 2)) }) test_that("missings filled down for each atomic vector", { df <- tibble( lgl = c(T, NA), int = c(1L, NA), dbl = c(1, NA), chr = c("a", NA), lst = list(1:5, NULL) ) out <- fill(df, tidyselect::everything()) expect_equal(out$lgl, c(TRUE, TRUE)) expect_equal(out$int, c(1L, 1L)) expect_equal(out$dbl, c(1, 1)) expect_equal(out$chr, c("a", "a")) expect_equal(out$lst, list(1:5, 1:5)) }) test_that("missings filled up for each vector", { df <- tibble( lgl = c(NA, T), int = c(NA, 1L), dbl = c(NA, 1), chr = c(NA, "a"), lst = list(NULL, 1:5) ) out <- fill(df, tidyselect::everything(), .direction = "up") expect_equal(out$lgl, c(TRUE, TRUE)) expect_equal(out$int, c(1L, 1L)) expect_equal(out$dbl, c(1, 1)) expect_equal(out$chr, c("a", "a")) expect_equal(out$lst, list(1:5, 1:5)) }) test_that("NaN is treated as missing (#982)", { df <- tibble(x = c(1, NaN, 2)) out <- fill(df, x) expect_identical(out$x, c(1, 1, 2)) out <- fill(df, x, .direction = "up") expect_identical(out$x, c(1, 2, 2)) }) test_that("can fill rcrd types", { col <- new_rcrd(list(x = c(1, NA, NA), y = c("x", NA, "y"))) df <- tibble(x = col) out <- fill(df, x) expect_identical(field(out$x, "x"), c(1, 1, NA)) expect_identical(field(out$x, "y"), c("x", "x", "y")) }) test_that("can fill df-cols", { # Uses vctrs missingness rules, so partially missing rows aren't filled df <- tibble(x = tibble(a = c(1, NA, NA), b = c("x", NA, "y"))) out <- fill(df, x) expect_identical(out$x$a, c(1, 1, NA)) expect_identical(out$x$b, c("x", "x", "y")) out <- fill(df, x, .direction = "up") expect_identical(out$x$a, c(1, NA, NA)) expect_identical(out$x$b, c("x", "y", "y")) }) test_that("fill preserves attributes", { df <- tibble(x = factor(c(NA, "a", NA))) out_d <- fill(df, x) out_u <- fill(df, x, .direction = "up") expect_equal(attributes(out_d$x), attributes(df$x)) expect_equal(attributes(out_u$x), attributes(df$x)) }) test_that("fill respects grouping", { df <- tibble(x = c(1, 1, 2), y = c(1, NA, NA)) out <- df %>% dplyr::group_by(x) %>% fill(y) expect_equal(out$y, c(1, 1, NA)) }) test_that("works when there is a column named `.direction` in the data (#1319)", { df <- tibble(x = c(1, NA, 2), .direction = 1:3) expect_error(out <- fill(df, x), NA) expect_identical(out$x, c(1, 1, 2)) }) test_that("validates its inputs", { df <- tibble(x = c(1, NA, 2)) expect_snapshot(error = TRUE, { df %>% fill(x, .direction = "foo") }) }) tidyr/tests/testthat/test-pivot.R0000644000176200001440000000122514165476376016673 0ustar liggesuserstest_that("basic sanity checks for spec occur", { expect_snapshot({ (expect_error(check_pivot_spec(1))) (expect_error(check_pivot_spec(mtcars))) }) }) test_that("`.name` column must be a character vector", { df <- tibble(.name = 1:2, .value = c("a", "b")) expect_snapshot((expect_error(check_pivot_spec(df)))) }) test_that("`.value` column must be a character vector", { df <- tibble(.name = c("x", "y"), .value = 1:2) expect_snapshot((expect_error(check_pivot_spec(df)))) }) test_that("`.name` column must be unique", { df <- tibble(.name = c("x", "x"), .value = c("a", "b")) expect_snapshot((expect_error(check_pivot_spec(df)))) }) tidyr/tests/testthat/test-pack.R0000644000176200001440000001102114360013543016417 0ustar liggesusers# pack -------------------------------------------------------------------- test_that("can pack multiple columns", { df <- tibble(a1 = 1, a2 = 2, b1 = 1, b2 = 2) out <- df %>% pack(a = c(a1, a2), b = c(b1, b2)) expect_named(out, c("a", "b")) expect_equal(out$a, df[c("a1", "a2")]) expect_equal(out$b, df[c("b1", "b2")]) }) test_that("packing no columns returns input", { df <- tibble(a1 = 1, a2 = 2, b1 = 1, b2 = 2) expect_equal(pack(df), df) }) test_that("can strip outer names from inner names", { df <- tibble(ax = 1, ay = 2) out <- pack(df, a = c(ax, ay), .names_sep = "") expect_named(out$a, c("x", "y")) }) test_that("grouping is preserved", { df <- tibble(g1 = 1, g2 = 1, g3 = 1) out <- df %>% dplyr::group_by(g1, g2) %>% pack(g = c(g2, g3)) expect_equal(dplyr::group_vars(out), "g1") }) test_that("pack disallows renaming", { df <- tibble(x = 1, y = 2) expect_snapshot(error = TRUE, { pack(df, data = c(a = x)) }) expect_snapshot(error = TRUE, { pack(df, data1 = x, data2 = c(a = y)) }) }) test_that("pack validates its inputs", { df <- tibble(a1 = 1, a2 = 2, b1 = 1, b2 = 2) expect_snapshot(error = TRUE,{ pack(1) pack(df, c(a1, a2), c(b1, b2)) pack(df, a = c(a1, a2), c(b1, b2)) pack(df, a = c(a1, a2), .names_sep = 1) }) }) # unpack ------------------------------------------------------------------ test_that("grouping is preserved", { df <- tibble(g = 1, x = tibble(y = 1)) out <- df %>% dplyr::group_by(g) %>% unpack(x) expect_equal(dplyr::group_vars(out), "g") }) test_that("non-df-cols are skipped (#1153)", { df <- tibble(x = 1:2, y = tibble(a = 1:2, b = 1:2)) expect_identical(unpack(df, x), df) expect_identical(unpack(df, everything()), unpack(df, y)) }) test_that("empty columns that aren't data frames aren't unpacked (#1191)", { df <- tibble(x = integer()) expect_identical(unpack(df, x), df) }) test_that("df-cols are directly unpacked", { df <- tibble(x = 1:3, y = tibble(a = 1:3, b = 3:1)) out <- df %>% unpack(y) expect_named(out, c("x", "a", "b")) expect_equal(out[c("a", "b")], df$y) }) test_that("can unpack 0-col dataframe", { df <- tibble(x = 1:3, y = tibble(.rows = 3)) out <- df %>% unpack(y) expect_named(out, c("x")) }) test_that("can unpack 0-row dataframe", { df <- tibble(x = integer(), y = tibble(a = integer())) out <- df %>% unpack(y) expect_named(out, c("x", "a")) }) test_that("can choose to add separtor", { df <- tibble(x = 1, y = tibble(a = 2), z = tibble(a = 3)) out <- df %>% unpack(c(y, z), names_sep = "_") expect_named(out, c("x", "y_a", "z_a")) }) test_that("can unpack 1-row but 0-col dataframe (#1189)", { df <- tibble(x = tibble(.rows = 1)) expect_identical(unpack(df, x), tibble::new_tibble(list(), nrow = 1L)) }) test_that("catches across inner name duplication (#1425)", { df <- tibble( x = tibble(a = 3, b = 4), y = tibble(b = 5), z = tibble(a = 6, b = 6) ) expect_snapshot(error = TRUE, { unpack(df, c(x, y)) }) expect_snapshot(error = TRUE, { unpack(df, c(x, y, z)) }) }) test_that("catches outer / inner name duplication (#1367)", { df <- tibble( a = 1, b = 2, c = 3, d = tibble(a = 4), e = tibble(d = 5), f = tibble(b = 6, c = 7, g = 8) ) expect_snapshot(error = TRUE, { unpack(df, d) }) expect_snapshot(error = TRUE, { unpack(df, c(d, e, f)) }) }) test_that("duplication error isn't triggered on the names you are unpacking", { df <- tibble(x = tibble(x = 1)) expect_identical(unpack(df, x), tibble(x = 1)) }) test_that("duplication errors aren't triggered if `names_sep` is specified", { df1 <- tibble( x = 1, y = tibble(x = 2) ) df2 <- tibble( x = tibble(a = 1), y = tibble(a = 2) ) expect_identical( unpack(df1, y, names_sep = "_"), tibble(x = 1, y_x = 2) ) expect_identical( unpack(df2, c(x, y), names_sep = "_"), tibble(x_a = 1, y_a = 2) ) }) test_that("duplication errors aren't triggered on duplicates within a single column you are unpacking", { df <- tibble( x = tibble(a = 1, a = 2, .name_repair = "minimal") ) # `vec_as_names()` handles this one expect_snapshot(error = TRUE, { unpack(df, x) }) }) test_that("unpack disallows renaming", { df <- tibble(x = tibble(a = 1)) expect_snapshot(error = TRUE, { unpack(df, c(y = x)) }) }) test_that("unpack() validates its inputs", { df <- tibble(x = 1:2, y = tibble(a = 1:2, b = 1:2)) expect_snapshot(error = TRUE, { unpack(1) unpack(df) unpack(df, y, names_sep = 1) }) }) tidyr/tests/testthat/test-unnest-helper.R0000644000176200001440000001407714321316017020307 0ustar liggesusers# df_simplify ------------------------------------------------------------ test_that("`simplify` is validated", { expect_snapshot({ (expect_error(df_simplify(data.frame(), simplify = 1))) (expect_error(df_simplify(data.frame(), simplify = NA))) (expect_error(df_simplify(data.frame(), simplify = c(TRUE, FALSE)))) (expect_error(df_simplify(data.frame(), simplify = list(1)))) (expect_error(df_simplify(data.frame(), simplify = list(x = 1, x = 1)))) }) }) test_that("`ptype` is validated", { expect_snapshot({ (expect_error(df_simplify(data.frame(), ptype = 1))) (expect_error(df_simplify(data.frame(), ptype = list(1)))) (expect_error(df_simplify(data.frame(), ptype = list(x = 1, x = 1)))) }) }) test_that("`transform` is validated", { expect_snapshot({ (expect_error(df_simplify(data.frame(), transform = list(~.x)))) (expect_error(df_simplify(data.frame(x = 1), transform = 1))) (expect_error(df_simplify(data.frame(), transform = list(x = 1)))) (expect_error(df_simplify(data.frame(), transform = list(x = 1, x = 1)))) }) }) test_that("`simplify` can be a named list (#995)", { df <- tibble(x = list(1), y = list("a")) expect_identical( df_simplify(df, simplify = list(x = FALSE)), data_frame(x = list(1), y = "a") ) expect_identical( df_simplify(df, simplify = list(x = TRUE, y = FALSE)), data_frame(x = 1, y = list("a")) ) }) test_that("`simplify` elements are ignored if they don't correspond to a column", { df <- tibble(x = list(1), y = list("a")) expect_identical( df_simplify(df, simplify = list(z = FALSE)), data_frame(x = 1, y = "a") ) }) test_that("`ptype` is allowed to be a single empty ptype (#1284)", { df <- tibble(x = list(1), y = list(1)) expect_identical( df_simplify(df, ptype = integer()), data_frame(x = 1L, y = 1L) ) }) test_that("`transform` is allowed to be a single function (#1284)", { df <- tibble(x = list("1"), y = list("1")) expect_identical( df_simplify(df, transform = ~ as.integer(.x)), data_frame(x = 1L, y = 1L) ) }) # col_simplify ----------------------------------------------------------- test_that("non-list isn't simplified", { expect_identical(col_simplify(1:5), 1:5) }) test_that("transform is applied to entire non-list", { expect_identical(col_simplify(1:5, transform = function(x) x + 1L), 2:6) }) test_that("transform is applied to list elements individually", { expect_identical( col_simplify(list(1, 2), transform = length), c(1L, 1L) ) }) test_that("transform is applied even if you can't simplify", { expect_identical( col_simplify(list(1:2, 2L), transform = ~ .x + 1L), list(2:3, 3L) ) }) test_that("transform can result in simplification", { expect_identical( col_simplify(list(1:2, 2L), transform = sum), c(3L, 2L) ) }) test_that("lose list-of status after applying transform", { x <- list_of(1L, 1:2) expect_identical( col_simplify(x, transform = ~ .x + 1), list(2, c(2, 3)) ) x <- list_of(NULL, .ptype = integer()) # Not `NA_integer_` expect_identical( col_simplify(x, transform = ~.x), NA ) }) test_that("ptype is applied to entire non-list", { expect_identical(col_simplify(1:5, ptype = double()), as.double(1:5)) }) test_that("ptype is applied to list elements individually", { expect_identical( col_simplify(list(1, 2, 3), ptype = integer()), c(1L, 2L, 3L) ) }) test_that("ptype is applied even if you can't simplify - and results in a list-of!", { expect_identical( col_simplify(list(c(1, 2), 2, 3), ptype = integer()), list_of(1:2, 2L, 3L) ) }) test_that("ptype is applied after transform", { expect_identical( col_simplify(list(1, 2, 3), ptype = integer(), transform = ~ .x + 1), c(2L, 3L, 4L) ) expect_snapshot((expect_error( col_simplify(list(1, 2, 3), ptype = integer(), transform = ~ .x + 1.5) ))) }) test_that("lists of lists aren't simplified", { x <- list(list(1), list(2)) expect_identical(col_simplify(x), x) x <- list_of(list(1), list(2)) expect_identical(col_simplify(x), x) }) test_that("lists of non-vectors aren't simplified", { x <- list(sym("x"), sym("y")) expect_identical(col_simplify(x), x) }) test_that("lists with length >1 vectors aren't simplified", { x <- list(1, 1:2, 3) expect_identical(col_simplify(x), x) x <- list_of(1L, 1:2, 3L) expect_identical(col_simplify(x), x) }) test_that("Empty elements are retained if we can't simplify", { x <- list(1, NULL, 1:2, integer()) expect_identical(col_simplify(x), x) }) test_that("`NULL` are initialized to size 1 equivalent", { x <- list(1, NULL, 2) expect_identical(col_simplify(x), c(1, NA, 2)) expect_identical(col_simplify(x, ptype = integer()), c(1L, NA, 2L)) x <- list_of(1, NULL, 2) expect_identical(col_simplify(x), c(1, NA, 2)) }) test_that("`NULL` is handled correctly when it is the only element", { x <- list(NULL) expect_identical(col_simplify(x), NA) expect_identical(col_simplify(x, ptype = integer()), NA_integer_) x <- list_of(NULL, .ptype = integer()) expect_identical(col_simplify(x), NA_integer_) expect_identical(col_simplify(x, ptype = double()), NA_real_) }) test_that("empty typed elements are initialized to size 1 equivalent", { x <- list(integer(), 1L, integer()) expect_identical(col_simplify(x), c(NA, 1L, NA)) x <- list_of(integer(), 1L) expect_identical(col_simplify(x), c(NA, 1L)) }) test_that("empty typed element is handled correctly if it is the only element", { x <- list(integer()) expect_identical(col_simplify(x), NA_integer_) expect_identical(col_simplify(x, ptype = double()), NA_real_) x <- list_of(integer()) expect_identical(col_simplify(x), NA_integer_) expect_identical(col_simplify(x, ptype = double()), NA_real_) }) test_that("can simplify record style objects (#999)", { rcrd <- new_rcrd(list(x = 1, y = 2)) x <- list(rcrd, rcrd) expect_identical(col_simplify(x), vec_c(rcrd, rcrd)) }) test_that("can simplify one row data frames (#1034)", { x <- list(tibble(x = 1, y = 2), tibble(x = 2, y = 3)) expect_identical(col_simplify(x), vec_c(x[[1]], x[[2]])) }) tidyr/tests/testthat/test-spread.R0000644000176200001440000002052314321316017016765 0ustar liggesuserstest_that("order doesn't matter", { df1 <- tibble(x = factor(c("a", "b")), y = 1:2) df2 <- tibble(x = factor(c("b", "a")), y = 2:1) one <- spread(df1, x, y) two <- spread(df2, x, y) %>% dplyr::select(a, b) %>% dplyr::arrange(a, b) expect_identical(one, two) df1 <- tibble(z = factor(c("b", "a")), x = factor(c("a", "b")), y = 1:2) df2 <- tibble(z = factor(c("a", "b")), x = factor(c("b", "a")), y = 2:1) one <- spread(df1, x, y) %>% dplyr::arrange(z) two <- spread(df2, x, y) expect_identical(one, two) }) test_that("convert turns strings into integers", { df <- tibble(key = "a", value = "1") out <- spread(df, key, value, convert = TRUE) expect_type(out$a, "integer") }) test_that("duplicate values for one key is an error", { df <- tibble(x = factor(c("a", "b", "b")), y = c(1, 2, 2), z = c(1, 2, 2)) expect_snapshot((expect_error(spread(df, x, y)))) }) test_that("factors are spread into columns (#35)", { data <- tibble( x = factor(c("a", "a", "b", "b")), y = factor(c("c", "d", "c", "d")), z = factor(c("w", "x", "y", "z")) ) out <- data %>% spread(x, z) expect_equal(names(out), c("y", "a", "b")) expect_true(all(vapply(out, is.factor, logical(1)))) expect_identical(levels(out$a), levels(data$z)) expect_identical(levels(out$b), levels(data$z)) }) test_that("drop = FALSE keeps missing combinations (#25)", { df <- tibble( x = factor("a", levels = c("a", "b")), y = factor("b", levels = c("a", "b")), z = 1 ) out <- df %>% spread(x, z, drop = FALSE) expect_equal(nrow(out), 2) expect_equal(ncol(out), 3) expect_equal(out$a[2], 1) }) test_that("drop = FALSE keeps missing combinations of 0-length factors (#56)", { df <- tibble( x = factor(, levels = c("a", "b")), y = factor(, levels = c("a", "b")), z = logical() ) out <- df %>% spread(x, z, drop = FALSE) expect_equal(nrow(out), 2) expect_equal(ncol(out), 3) expect_equal(out$a, c(NA, NA)) expect_equal(out$b, c(NA, NA)) }) test_that("drop = FALSE spread all levels including NA (#254)", { l <- c("a", "b", "c", "d") df <- tibble( x = factor(c("a", "b", "c", NA), levels = l), y = factor(c("a", "b", "c", "d")), z = factor(c("a", "b", "a", "b")) ) out <- df %>% spread(x, y, drop = FALSE) expect_equal(nrow(out), 2) expect_equal(ncol(out), 6) expect_equal(out$d, factor(c(NA, NA), levels = l)) expect_equal(out[[""]], factor(c(NA, "d"), levels = l)) }) test_that("spread preserves class of tibbles", { dat <- tibble( x = factor(c("a", "a", "b", "b")), y = factor(c("c", "d", "c", "d")), z = factor(c("w", "x", "y", "z")) ) dat %>% spread(x, z) %>% expect_s3_class("tbl_df") }) test_that("dates are spread into columns (#62)", { df <- tibble( id = c("a", "a", "b", "b"), key = c("begin", "end", "begin", "end"), date = Sys.Date() + 0:3 ) out <- spread(df, key, date) expect_identical(names(out), c("id", "begin", "end")) expect_s3_class(out$begin, "Date") expect_s3_class(out$end, "Date") }) test_that("spread can produce mixed variable types (#118)", { df <- tibble( row = rep(1:2, 3), column = rep(1:3, each = 2), cell_contents = as.character(c( rep("Argentina", 2), 62.485, 64.399, 1952, 1957 )) ) out <- spread(df, column, cell_contents, convert = TRUE) expect_equal( unname(vapply(out, class, "")), c("integer", "character", "numeric", "integer") ) }) test_that("factors can be used with convert = TRUE to produce mixed types", { df <- tibble( row = c(1, 2, 1, 2, 1, 2), column = c("f", "f", "g", "g", "h", "h"), contents = c("aa", "bb", "1", "2", "TRUE", "FALSE") ) out <- df %>% spread(column, contents, convert = TRUE) expect_type(out$f, "character") expect_type(out$g, "integer") expect_type(out$h, "logical") }) test_that("dates can be used with convert = TRUE", { df <- tibble( id = c("a", "a", "b", "b"), key = c("begin", "end", "begin", "end"), date = Sys.Date() + 0:3 ) out <- spread(df, key, date, convert = TRUE) expect_type(out$begin, "character") expect_type(out$end, "character") }) test_that("vars that are all NA are logical if convert = TRUE (#118)", { df <- tibble( row = c(1, 2, 1, 2), column = c("f", "f", "g", "g"), contents = c("aa", "bb", NA, NA) ) out <- df %>% spread(column, contents, convert = TRUE) expect_type(out$g, "logical") }) test_that("complex values are preserved (#134)", { df <- expand.grid(id = 1:2, key = letters[1:2], stringsAsFactors = TRUE) %>% dplyr::mutate(value = 1:4 + 1i) out1 <- spread(df, key, value, convert = FALSE) out2 <- spread(df, key, value, convert = TRUE) expect_equal(out1$a, 1:2 + 1i) expect_equal(out2$a, 1:2 + 1i) expect_equal(out1$b, 3:4 + 1i) expect_equal(out2$b, 3:4 + 1i) }) test_that("can spread with nested columns", { df <- tibble(x = c("a", "a"), y = 1:2, z = list(1:2, 3:5)) out <- spread(df, x, y) expect_equal(out$a, 1:2) expect_equal(out$z, df$z) }) test_that("spreading empty data frame gives empty data frame", { df <- tibble(x = character(), y = numeric(), z = character()) rs <- spread(df, x, y) expect_equal(nrow(rs), 0) expect_named(rs, "z") df <- tibble(x = character(), y = numeric()) rs <- spread(df, x, y) expect_equal(nrow(rs), 0) expect_equal(ncol(rs), 0) }) test_that("spread gives one column when no existing non-spread vars", { df <- tibble( key = c("a", "b", "c"), value = c(1, 2, 3) ) expect_equal(df %>% spread(key, value), tibble(a = 1, b = 2, c = 3)) }) test_that("grouping vars are kept where possible", { # Can keep df <- tibble(x = 1:2, key = factor(c("a", "b")), value = 1:2) out <- df %>% dplyr::group_by(x) %>% spread(key, value) expect_equal(dplyr::groups(out), list(quote(x))) # Can't keep df <- tibble(key = factor(c("a", "b")), value = 1:2) out <- df %>% dplyr::group_by(key) %>% spread(key, value) expect_equal(out, tibble(a = 1L, b = 2L)) }) test_that("col names never contains NA", { df <- tibble(x = c(1, NA), y = 1:2) df %>% spread(x, y) %>% expect_named(c("1", "")) df %>% spread(x, y, sep = "_") %>% expect_named(c("x_1", "x_NA")) }) test_that("never has row names (#305)", { df <- tibble(id = 1:2, x = letters[1:2], y = 1:2) expect_false( df %>% spread(x, y) %>% tibble::has_rownames() ) }) test_that("overwrites existing columns", { df <- tibble(x = 1:2, y = 2:1, key = c("x", "x"), value = 3:4) rs <- df %>% spread(key, value) expect_named(rs, c("y", "x")) expect_equal(rs$x, 3:4) }) test_that("spread doesn't convert data frames into tibbles", { df <- data.frame(x = c("a", "b"), y = 1:2) expect_equal(class(spread(df, x, y)), "data.frame") }) test_that("spread with fill replaces explicit missing values", { df <- tibble(key = factor("a"), value = NA) out <- spread(df, key, value, fill = 1) expect_equal(out, tibble(a = 1)) }) test_that("spread with fill replaces implicit missing values", { # Missing keys in some groups df <- tibble( x = factor(c("G1", "G2")), key = factor(c("a", "b")), value = c(1, 1) ) out <- spread(df, key, value, fill = 2) expect_equal(out, tibble(x = factor(c("G1", "G2")), a = c(1, 2), b = c(2, 1))) # Missing factor levels in key with drop = FALSE df <- tibble(key = factor("a", levels = c("a", "b")), value = 1) out <- spread(df, key, value, fill = 2, drop = FALSE) expect_equal(out, tibble(a = 1, b = 2)) }) test_that("ulevels preserves original factor levels", { x_na_lev <- factor(c("a", NA), exclude = NULL) expect_equal(levels(ulevels(x_na_lev)), c("a", NA)) x_na_lev_extra <- factor(c("a", NA), levels = c("a", "b", NA), exclude = NULL) expect_equal(levels(ulevels(x_na_lev_extra)), c("a", "b", NA)) x_no_na_lev <- factor(c("a", NA)) expect_equal(levels(ulevels(x_no_na_lev)), "a") x_no_na_lev_extra <- factor(c("a", NA), levels = c("a", "b")) expect_equal(levels(ulevels(x_no_na_lev_extra)), c("a", "b")) }) test_that("ulevels returns unique elements of a list for a list input", { test_list <- list(a = 1:6, b = 1:6) expect_equal(ulevels(test_list), unique(test_list)) }) test_that("spread works when id column has names (#525)", { df <- tibble( key = factor(c("a", "b", "c"), levels = letters[1:5]), out = 1:3, id = c(a = 1, b = 2, c = 3) ) res <- spread(df, key, out, drop = FALSE) expect_equal(names(res), c("id", letters[1:5])) }) tidyr/tests/testthat/test-seq.R0000644000176200001440000000176414323620576016317 0ustar liggesuserstest_that("full_seq with tol > 0 allows sequences to fall short of period", { expect_equal(full_seq(c(0, 10, 20), 11, tol = 2), c(0, 11, 22)) }) test_that("full_seq pads length correctly for tol > 0", { expect_equal(full_seq(c(0, 10, 16), 11, tol = 5), c(0, 11)) }) test_that("sequences don't have to start at zero", { expect_equal(full_seq(c(1, 5), 2), c(1, 3, 5)) }) test_that("full_seq fills in gaps", { expect_equal(full_seq(c(1, 3), 1), c(1, 2, 3)) }) test_that("preserves attributes", { x1 <- as.Date("2001-01-01") + c(0, 2) x2 <- as.POSIXct(x1) expect_s3_class(full_seq(x1, 2), "Date") expect_s3_class(full_seq(x2, 86400), c("POSIXct", "POSIXt")) }) test_that("full_seq errors if sequence isn't regular", { expect_snapshot({ (expect_error(full_seq(c(1, 3, 4), 2))) (expect_error(full_seq(c(0, 10, 20), 11, tol = 1.8))) }) }) test_that("validates inputs", { x <- 1:5 expect_snapshot(error = TRUE, { full_seq(x, period = "a") full_seq(x, 1, tol = "a") }) }) tidyr/tests/testthat/test-unnest-wider.R0000644000176200001440000002472314363516001020142 0ustar liggesusers test_that("number of rows is preserved", { df <- tibble( x = 1:3, y = list(NULL, c(a = 1), c(a = 1, b = 2)) ) out <- df %>% unnest_wider(y) expect_equal(nrow(out), 3) }) test_that("simplifies length-1 lists", { df <- tibble( x = 1:2, y = list( list(a = 1, b = 2, c = c(1, 2)), list(a = 3) ) ) out <- df %>% unnest_wider(y) expect_equal(out$a, c(1, 3)) expect_equal(out$b, c(2, NA)) expect_equal(out$c, list(c(1, 2), NULL)) # Works when casting too out <- df %>% unnest_wider(y, ptype = list(a = integer(), b = integer()) ) expect_equal(out$a, c(1L, 3L)) expect_equal(out$b, c(2L, NA)) expect_equal(out$c, list(c(1, 2), NULL)) }) test_that("treats data frames like lists where we have type info about each element", { df <- tibble(x = 1:2, y = list(tibble(a = 1:2))) out <- df %>% unnest_wider(y) expect_named(out, c("x", "a")) expect_equal(nrow(out), 2) # We know the types of this, even though we can't simplify it expect_identical(attr(out$a, "ptype"), integer()) df <- tibble(x = 1:2, y = list(list(a = 1:2))) out <- df %>% unnest_wider(y) expect_named(out, c("x", "a")) expect_equal(nrow(out), 2) # We don't know the types of this expect_identical(class(out$a), "list") }) test_that("unnest_wider - bad inputs generate errors", { df <- tibble(x = 1, y = list(mean)) expect_snapshot((expect_error( unnest_wider(df, y) ))) }) test_that("list of 0-length vectors yields no new columns", { df <- tibble(x = 1:2, y = list(integer(), integer())) expect_named(unnest_wider(df, y), "x") # similarly when empty df <- tibble(x = integer(), y = list()) expect_named(unnest_wider(df, y), "x") # similarly when using list_of() with 0-length elements df <- tibble(x = 1:2, y = list_of(integer(), integer())) expect_named(unnest_wider(df, y), "x") }) test_that("list-col with only `NULL` works (#1186)", { df <- tibble(x = list(NULL)) expect_identical(unnest_wider(df, x), tibble::new_tibble(list(), nrow = 1L)) }) test_that("empty list yields no new columns", { df <- tibble(x = list()) expect_identical(unnest_wider(df, x), tibble()) }) test_that("list_of columns can be unnested", { df <- tibble(x = 1:2, y = list_of(c(a = 1L), c(a = 1L, b = 2L))) expect_named(unnest_wider(df, y), c("x", "a", "b")) df <- tibble(x = 1:2, y = list_of(c(a = 1L), c(b = 1:2))) expect_named(unnest_wider(df, y), c("x", "a", "b1", "b2")) }) test_that("names_sep creates unique names", { df <- tibble( x = list("a", c("a", "b", "c")), y = list(c(a = 1), c(b = 2, a = 1)) ) expect_warning(out <- unnest_wider(df, x, names_sep = "_"), NA) expect_named(out, c("x_1", "x_2", "x_3", "y")) expect_warning(out <- unnest_wider(df, y, names_sep = "_"), NA) expect_named(out, c("x", "y_a", "y_b")) expect_equal(out$y_a, c(1, 1)) }) test_that("`names_sep` works with empty elements (#1185)", { df <- tibble(x = list(c(a = 1L), c(a = integer()))) out <- unnest_wider(df, x, names_sep = "_") expect_identical(out, tibble(x_a = c(1L, NA))) }) test_that("`names_sep` works with data frame columns", { df <- tibble(x = tibble(a = 1, b = 2)) out <- unnest_wider(df, x, names_sep = "_") expect_named(out, c("x_a", "x_b")) }) test_that("`names_sep` works with named non-list atomic vectors", { # Equivalent to `df <- tibble(x = list_of(c(a = 1), c(b = 2)))` df <- tibble(x = c(a = 1, b = 2)) out <- unnest_wider(df, x, names_sep = "_") expect_named(out, c("x_a", "x_b")) }) test_that("df-cols can be unnested (#1188)", { df <- tibble(a = 1:3, b = tibble(x = 1:3, y = 1:3)) out <- unnest_wider(df, b) expect_identical(out, unpack(df, b)) }) test_that("df-cols result in list-ofs when `simplify = FALSE`", { df <- tibble(a = 1:3, b = tibble(x = 1:3, y = 1:3)) out <- unnest_wider(df, b, simplify = FALSE) expect_identical(out, tibble(a = 1:3, x = list_of(1L, 2L, 3L), y = list_of(1L, 2L, 3L))) }) test_that("unnesting mixed empty types retains the column (#1125)", { df <- tibble(col = list(list(a = list()), list(a = integer()))) expect_identical(unnest_wider(df, col), tibble(a = c(NA, NA))) }) test_that("can unnest mixed empty types with `strict = FALSE`", { df <- tibble(col = list( list(a = "x"), list(a = list()), list(a = integer()) )) expect_identical( unnest_wider(df, col)$a, c("x", NA, NA) ) # They are replaced with `NULL` before simplification expect_identical( unnest_wider(df, col, simplify = FALSE)$a, list("x", NULL, NULL) ) }) test_that("can't unnest mixed empty types when in strict mode", { df <- tibble(col = list(list(a = list()), list(a = 1L))) # Not strict, useful for JSON data but doesn't follow vctrs type rules expect_identical(unnest_wider(df, col), tibble(a = c(NA, 1L))) # Strict means they can't be combined expect_identical( unnest_wider(df, col, strict = TRUE), tibble(a = list(list(), 1L)) ) }) test_that("can unnest multiple columns wider at once (#740)", { df <- tibble( x = list(list(a = 1), list(a = 2)), y = list(list(b = 1, c = "x"), list(b = 2, c = "y")) ) expect_identical( unnest_wider(df, c(x, y)), tibble(a = c(1, 2), b = c(1, 2), c = c("x", "y")) ) }) test_that("can unnest a vector with a mix of named/unnamed elements (#1200 comment, #1367)", { df <- tibble(x = c(a = 1L, 2L)) out <- unnest_wider(df, x, names_sep = "_") expect_identical(out$x_a, c(1L, NA)) expect_identical(out$x_1, c(NA, 2L)) }) test_that("can unnest a list with a mix of named/unnamed elements (#1200 comment)", { df <- tibble(x = list(a = 1:2, 3:4)) out <- unnest_wider(df, x, names_sep = "_") expect_identical(out$x_1, c(1L, 3L)) expect_identical(out$x_2, c(2L, 4L)) }) test_that("integer names are generated before applying `names_sep` (#1200 comment, #1367)", { df <- tibble(col = list(set_names(1, ""))) out <- unnest_wider(df, col, names_sep = "_") expect_named(out, "col_1") df <- tibble(col = list(set_names(1:2, c("", "")))) out <- unnest_wider(df, col, names_sep = "_") expect_named(out, c("col_1", "col_2")) }) test_that("integer names are generated for partially named vectors (#1367)", { df <- tibble(col = list(set_names(1:4, c("x", "", "z", "")))) out <- unnest_wider(df, col, names_sep = "_") expect_named(out, c("col_x", "col_2", "col_z", "col_4")) df <- tibble(col = list( set_names(1:4, c("x", "", "z", "")), set_names(5:8, c("", "", "z", "")) )) out <- unnest_wider(df, col, names_sep = "_") expect_named(out, c("col_x", "col_2", "col_z", "col_4", "col_1")) expect_identical(out$col_x, c(1L, NA)) expect_identical(out$col_1, c(NA, 5L)) }) test_that("`NA_character_` name is treated like the empty string (#1200 comment)", { col <- list( set_names(1, "a"), set_names(1, NA_character_), set_names(1, "") ) df <- tibble(col = col) out <- unnest_wider(df, col, names_sep = "_") expect_identical(out$col_a, c(1, NA, NA)) expect_identical(out$col_1, c(NA, 1, 1)) }) test_that("can combine ` + >`", { df <- tibble(col = list(list(a = 1:2), list_of(a = 1L))) out <- unnest_wider(df, col) expect_identical(out$a, list(1:2, 1L)) }) test_that("can't unnest unnamed elements without `names_sep` (#1367)", { df <- tibble(col = list(1)) expect_snapshot(error = TRUE, { unnest_wider(df, col) }) df <- tibble(col = list(set_names(1, ""))) expect_snapshot(error = TRUE, { unnest_wider(df, col) }) df <- tibble(col = list(set_names(1, NA_character_))) expect_snapshot(error = TRUE, { unnest_wider(df, col) }) # Partially missing within an element df <- tibble(col = list(c(a = 1), c(a = 1, 2))) expect_snapshot(error = TRUE, { unnest_wider(df, col) }) }) test_that("catches duplicate inner names in the same vector", { df <- tibble(col = list(c(a = 1, a = 2))) expect_snapshot(error = TRUE, { unnest_wider(df, col) }) expect_snapshot({ out <- unnest_wider(df, col, names_repair = "unique") }) expect_named(out, c("a...1", "a...2")) }) test_that("unnest_wider() advises on outer / inner name duplication (#1367)", { df <- tibble(x = 1, y = list(list(x = 2))) expect_snapshot(error = TRUE, { unnest_wider(df, y) }) }) test_that("unnest_wider() advises on inner / inner name duplication (#1367)", { df <- tibble(x = 1, y = list(list(a = 2)), z = list(list(a = 3))) expect_snapshot(error = TRUE, { unnest_wider(df, c(y, z)) }) }) test_that("unnest_wider() works with foreign lists recognized by `vec_is_list()` (#1327)", { new_foo <- function(...) { structure(list(...), class = c("foo", "list")) } # With empty types df <- tibble(x = new_foo(new_foo(a = 1, b = integer()))) expect_identical(unnest_wider(df, x, strict = TRUE), tibble(a = 1, b = NA_integer_)) # With `NULL`s df <- tibble(x = new_foo(new_foo(a = 1, b = NULL))) expect_identical(unnest_wider(df, x), tibble(a = 1, b = NA)) }) test_that("unnest_wider() validates its inputs", { df <- tibble(x = list(a = 1:2, b = 3:4)) expect_snapshot(error = TRUE, { unnest_wider(1) unnest_wider(df) unnest_wider(df, x, names_sep = 1) unnest_wider(df, x, strict = 1) }) }) test_that("invariant - final number of columns depends on element sizes", { df <- tibble(x = list_of(.ptype = integer())) expect_identical(dim(unnest_wider(df, x)), c(0L, 0L)) df <- tibble(x = list_of(NULL, .ptype = integer())) expect_identical(dim(unnest_wider(df, x)), c(1L, 0L)) df <- tibble(x = list_of(integer())) expect_identical(dim(unnest_wider(df, x)), c(1L, 0L)) df <- tibble(x = list_of(c(a = 1L))) expect_identical(dim(unnest_wider(df, x)), c(1L, 1L)) df <- tibble(x = list_of(c(a = 1L), c(a = 1L, b = 2L, c = 3L))) expect_identical(dim(unnest_wider(df, x)), c(2L, 3L)) df <- tibble(x = list_of(c(a = 1L, c = 3L), c(a = 1L, b = 2L))) expect_identical(dim(unnest_wider(df, x)), c(2L, 3L)) }) test_that("invariant - for list_of special case, final number of columns and types comes from ptype columns (#1187)", { df <- tibble(x = list_of(.ptype = tibble(a = integer()))) expect_identical(unnest_wider(df, x), tibble(a = integer())) df <- tibble(x = list_of(NULL, .ptype = tibble(a = integer()))) expect_identical(unnest_wider(df, x), tibble(a = NA_integer_)) df <- tibble(x = list_of(tibble(a = integer()))) expect_identical(unnest_wider(df, x), tibble(a = NA_integer_)) df <- tibble(x = list_of(tibble(a = 1L))) expect_identical(unnest_wider(df, x), tibble(a = 1L)) df <- tibble(x = list_of(tibble(a = 1:2))) expect_identical(unnest_wider(df, x), tibble(a = list_of(1:2))) }) tidyr/tests/testthat/test-hoist.R0000644000176200001440000001610114360013543016633 0ustar liggesusers test_that("hoist extracts named elements", { df <- tibble(x = list(list(1, b = "b"))) out <- df %>% hoist(x, a = 1, b = "b") expect_equal(out, tibble(a = 1, b = "b")) out <- df %>% hoist(x, a = 1, b = "b", .simplify = FALSE) expect_identical(out, tibble(a = list(1), b = list("b"))) }) test_that("can hoist named non-list elements at the deepest level", { df <- tibble(x = list(list(a = c(aa = 1, bb = 2)))) out <- hoist(df, x, bb = list("a", "bb")) expect_identical(out$bb, 2) }) test_that("can check check/transform values", { df <- tibble(x = list( list(a = 1), list(a = "a") )) expect_error( df %>% hoist(x, a = "a", .ptype = list(a = character())), class = "vctrs_error_incompatible_type" ) out <- df %>% hoist(x, a = "a", .transform = list(a = as.character)) expect_equal(out, tibble(a = c("1", "a"))) }) test_that("nested lists generate a cast error if they can't be cast to the ptype", { df <- tibble(x = list(list(b = list(1)))) expect_snapshot((expect_error( hoist(df, x, "b", .ptype = list(b = double())) ))) }) test_that("non-vectors generate a cast error if a ptype is supplied", { df <- tibble(x = list(list(b = quote(a)))) expect_snapshot((expect_error( hoist(df, x, "b", .ptype = list(b = integer())) ))) }) test_that("a ptype generates a list-of if the col can't be simplified (#998)", { df <- tibble(x = list(list(a = 1:2), list(a = 1), list(a = 1))) ptype <- list(a = integer()) out <- hoist(df, x, "a", .ptype = ptype) expect_identical(out$a, list_of(1:2, 1L, 1L, .ptype = integer())) }) test_that("doesn't simplify uneven lengths", { df <- tibble(x = list( list(a = 1), list(a = 2:3) )) out <- df %>% hoist(x, a = "a") expect_identical(out$a, list(1, 2:3)) }) test_that("doesn't simplify lists of lists", { df <- tibble(x = list( list(a = list(1)), list(a = list(2)) )) out <- df %>% hoist(x, a = "a") expect_identical(out$a, list(list(1), list(2))) }) test_that("doesn't simplify non-vectors", { df <- tibble(x = list( list(a = quote(a)), list(a = quote(b)) )) out <- df %>% hoist(x, a = "a") expect_identical(out$a, list(quote(a), quote(b))) }) test_that("can hoist out scalars", { df <- tibble( x = 1:2, y = list( list(mod = lm(mpg ~ wt, data = mtcars)), list(mod = lm(mpg ~ wt, data = mtcars)) ) ) out <- hoist(df, y, "mod") expect_identical(out$mod, list(df$y[[1]]$mod, df$y[[2]]$mod)) }) test_that("input validation catches problems", { df <- tibble(x = list(list(1, b = "b")), y = 1) expect_snapshot({ (expect_error(df %>% hoist(y))) (expect_error(df %>% hoist(x, 1))) (expect_error(df %>% hoist(x, a = "a", a = "b"))) }) }) test_that("string pluckers are automatically named", { out <- check_pluckers("x", y = "x", z = 1) expect_named(out, c("x", "y", "z")) }) test_that("can't hoist() from a data frame column", { df <- tibble(a = tibble(x = 1)) expect_snapshot((expect_error( hoist(df, a, xx = 1) ))) }) test_that("can hoist() without any pluckers", { df <- tibble(a = list(1)) expect_identical(hoist(df, a), df) }) test_that("can use a character vector for deep hoisting", { df <- tibble(x = list(list(b = list(a = 1)))) out <- hoist(df, x, ba = c("b", "a")) expect_identical(out$ba, 1) }) test_that("can use a numeric vector for deep hoisting", { df <- tibble(x = list(list(b = list(a = 1, b = 2)))) out <- hoist(df, x, bb = c(1, 2)) expect_identical(out$bb, 2) }) test_that("can maintain type stability with empty elements (#1203)", { df <- tibble( col = list( list(a = integer()), list(a = integer()) ) ) out <- hoist(df, col, "a") expect_identical(out$a, c(NA_integer_, NA_integer_)) }) test_that("can hoist out a rcrd style column (#999)", { x <- new_rcrd(list(x = 1, y = 2)) df <- tibble(a = list(list(x = x), list(x = x))) out <- hoist(df, a, "x") expect_identical(out$x, vec_c(x, x)) }) test_that("hoist() validates its inputs (#1224)", { df <- tibble(a = list(1)) expect_snapshot(error = TRUE, { hoist(1) hoist(df) hoist(df, a, .remove = 1) hoist(df, a, .ptype = 1) hoist(df, a, .transform = 1) hoist(df, a, .simplify = 1) }) }) test_that("hoist() can simplify on a per column basis (#995)", { df <- tibble( x = list( list(a = 1, b = 1), list(a = 2, b = 2) ) ) expect_identical( hoist(df, x, a = "a", b = "b", .simplify = list(a = FALSE)), tibble(a = list(1, 2), b = c(1, 2)) ) }) test_that("hoist() retrieves first of duplicated names and leaves the rest alone (#1259)", { elt <- list(x = 1, y = 2, x = 3, z = 2) df <- tibble(col = list(elt)) expect_identical( hoist(df, col, "x"), tibble(x = 1, col = list(list(y = 2, x = 3, z = 2))) ) expect_identical( hoist(df, col, "y"), tibble(y = 2, col = list(list(x = 1, x = 3, z = 2))) ) }) test_that("hoist() retains grouped data frame class (#1316)", { df <- tibble( g = c("x", "x", "z"), data = list( list(a = 1:2), list(a = 2:3), list(a = 3:4) ) ) gdf <- dplyr::group_by(df, g) expect_identical( hoist(gdf, data, "a"), dplyr::group_by(hoist(df, data, "a"), g) ) }) test_that("hoist() retains bare data.frame class", { df <- vctrs::data_frame( data = list( list(a = 1:2), list(a = 2:3), list(a = 3:4) ) ) expect_identical( hoist(df, data, "a"), vctrs::data_frame(a = list(1:2, 2:3, 3:4)) ) }) test_that("known bug - hoist() doesn't strike after each pluck (related to #1259)", { # All pluckers operate on the same initial list-col. # We don't currently strike after each pluck, so the repeated plucks pull the # first of the duplicated `x` names each time. But then the strike() loop # removes both of them, because it strikes with `"x"` twice in a row. # Fixing this probably requires significant work and likely isn't worth it. elt <- list(x = 1, x = 3, z = 2) df <- tibble(col = list(elt)) # Ideally we'd get `x1 = 1, x2 = 3` and no mention of `x` in `col` expect_identical( hoist(df, col, x1 = "x", x2 = "x"), tibble(x1 = 1, x2 = 1, col = list(list(z = 2))) ) }) # strike ------------------------------------------------------------------ test_that("strike can remove using a list", { x <- list(a = list(), b = list(a = 1, b = 2), c = "c") expect_equal(strike(x, list(1)), x[c(2, 3)]) expect_equal(strike(x, list("a")), x[c(2, 3)]) deep <- strike(x, list("b", 2)) expect_equal(deep, list(a = list(), b = list(a = 1), c = "c")) }) test_that("strike returns input if idx not present", { x <- list(a = list(), b = list(a = 1, b = 2), c = "c") expect_equal(strike(x, list(4)), x) expect_equal(strike(x, list("d")), x) expect_equal(strike(x, list("b", 3)), x) expect_equal(strike(x, list("d", 3)), x) expect_equal(strike(x, list("b", "c")), x) expect_equal(strike(x, list(3, "b")), x) expect_equal(strike(x, list(4, "b")), x) }) test_that("ignores weird inputs", { x <- list(a = list(), b = list(a = 1, b = 2), c = "c") expect_equal(strike(x, list()), x) expect_equal(strike(x, list(mean, mean)), x) }) tidyr/tests/testthat/test-separate-wider.R0000644000176200001440000002002514360013543020421 0ustar liggesusers# separate_wider_delim -------------------------------------------------------- test_that("separate_wider_delim() can create column names", { df <- tibble(x = c("a b", "x y")) out <- df %>% separate_wider_delim(x, " ", names_sep = "") expect_equal(out$x1, c("a", "x")) expect_equal(out$x2, c("b", "y")) }) test_that("separate_wider_delim() errors about too few/too many values", { df <- tibble(x = c("x", "x y", "x y z")) expect_snapshot(error = TRUE, df %>% separate_wider_delim(x, " ", names = c("a", "b")) ) }) test_that("separate_wider_delim() can ignore problems", { df <- tibble(x = c("x", "x y", "x y z")) out <- df %>% separate_wider_delim(x, " ", names = c("a", "b"), too_few = "align_start", too_many = "drop", ) expect_equal(out[1, ], tibble(a = "x", b = NA_character_)) expect_equal(out[3, ], tibble(a = "x", b = "y")) out <- df %>% separate_wider_delim(x, " ", names = c("a", "b"), too_few = "align_end", too_many = "merge", ) expect_equal(out[1, ], tibble(a = NA_character_, b = "x")) expect_equal(out[3, ], tibble(a = "x", b = "y z")) }) test_that("separate_wider_delim() can diagnose problems", { df <- tibble(x = c(NA, "x", "x y", "x y z")) expect_snapshot( out <- df %>% separate_wider_delim(x, " ", names = c("a", "b"), too_few = "debug", too_many = "debug", ) ) expect_equal(out$x, df$x) expect_equal(out$x_ok, c(TRUE, FALSE, TRUE, FALSE)) expect_equal(out$x_pieces, c(NA, 1, 2, 3)) expect_equal(out$x_remainder, c(NA, "", "", " z")) # And can do so selectively suppressWarnings( out <- df %>% separate_wider_delim(x, " ", names = c("a", "b"), too_few = "align_start", too_many = "debug", ) ) expect_equal(out$x_ok, c(TRUE, TRUE, TRUE, FALSE)) }) test_that("separate_wider_delim() doesn't count NA input as problem", { df <- tibble(x = NA) expect_equal( df %>% separate_wider_delim(x, ",", names = c("a", "b")), tibble(a = NA_character_, b = NA_character_) ) }) test_that("separate_wider_delim() works with empty data frames", { df <- tibble(x = character()) out <- separate_wider_delim(df, x, delim = ",", names = c("y", "z")) expect_equal(out, tibble(y = character(), z = character())) out <- separate_wider_delim(df, x, delim = ",", names_sep = "_") expect_equal(out, tibble()) }) test_that("separate_wider_delim() validates its inputs", { df <- tibble(x = "x") expect_snapshot(error = TRUE, { df %>% separate_wider_delim() df %>% separate_wider_delim(x) df %>% separate_wider_delim(x, 1) df %>% separate_wider_delim(x, "") df %>% separate_wider_delim(x, "-") df %>% separate_wider_delim(x, "-", names = 1) df %>% separate_wider_delim(x, "-", names = c(x = "x")) df %>% separate_wider_delim(x, "-", names_sep = "_", too_many = "merge") }) }) # separate_wider_position ------------------------------------------------------- test_that("separate_wider_position() errors if lengths are inconsistent", { df <- tibble(x = c("ab", "abc", "abcd")) expect_snapshot(error = TRUE, df %>% separate_wider_position(x, widths = c("a" = 2, "b" = 1)) ) }) test_that("separate_wider_position() can ignore problems", { df <- tibble(x = c("ab", "abc", "abcd")) out <- df %>% separate_wider_position( x, widths = c("a" = 2, "b" = 1), too_few = "align_start", too_many = "drop" ) expect_equal(out[1, ], tibble(a = "ab", b = NA_character_)) expect_equal(out[2, ], tibble(a = "ab", b = "c")) expect_equal(out[3, ], tibble(a = "ab", b = "c")) }) test_that("separate_wider_position() can diagnose problems", { df <- tibble(x = c(NA, "ab", "abc", "abcd")) expect_snapshot( out <- df %>% separate_wider_position( x, widths = c("a" = 2, "b" = 1), too_few = "debug", too_many = "debug" ) ) expect_equal(out$x, df$x) expect_equal(out$x_ok, c(TRUE, FALSE, TRUE, FALSE)) expect_equal(out$x_width, c(NA, 2, 3, 4)) expect_equal(out$x_remainder, c(NA, "", "", "d")) }) test_that("separate_wider_posiion() doesn't count NA input as problem", { df <- tibble(x = NA) expect_equal( df %>% separate_wider_position(x, widths = c(a = 1, b = 2)), tibble(a = NA_character_, b = NA_character_) ) }) test_that("separate_wider_position() can drop values", { df <- tibble(x = "a-b") out <- df %>% separate_wider_position(x, widths = c("a" = 1, 1, "b" = 1)) expect_equal(out, tibble(a = "a", b = "b")) }) test_that("separate_wider_position() works with empty data frames", { df <- tibble(x = character()) out <- separate_wider_position(df, x, widths = c(y = 1, z = 2)) expect_equal(out, tibble(y = character(), z = character())) }) test_that("separate_wider_position() validates its inputs", { df <- tibble(x = "x") expect_snapshot(error = TRUE, { df %>% separate_wider_position() df %>% separate_wider_position(x) df %>% separate_wider_position(x, widths = 1.5) df %>% separate_wider_position(x, widths = 1L) df %>% separate_wider_position(x, widths = c(x = 0)) }) }) # separate_wider_regex ---------------------------------------------------- test_that("separate_wider_regex() can extract columns", { df <- tibble(x = "a123") out <- df %>% separate_wider_regex(x, c("a" = ".", "b" = "\\d+")) expect_equal(out, tibble(a = "a", b = "123")) }) test_that("separate_wider_regex() errors if match fails", { df <- tibble(x = c("a-123", "b_123")) expect_snapshot(error = TRUE, { df %>% separate_wider_regex(x, c("a" = ".", "-", "b" = "\\d+")) }) }) test_that("separate_wider_regex() can silence errors", { df <- tibble(x = c("a-123", "b_123")) out <- df %>% separate_wider_regex( x, c("a" = ".", "-", "b" = "\\d+"), too_few = "align_start" ) expect_equal(out$a, c("a", "b")) expect_equal(out$b, c("123", NA)) }) test_that("separate_wider_regex() can diagnose errors", { df <- tibble(x = c(NA, "a-123", "b_123", "c-123x", "XXXX")) expect_snapshot({ out <- df %>% separate_wider_regex( x, c("a" = "[a-z]", "-", "b" = "\\d+"), too_few = "debug" ) }) expect_equal(out$x, df$x) expect_equal(out$x_ok, c(TRUE, TRUE, FALSE, FALSE, FALSE)) expect_equal(out$x_matches, c(NA, 3, 1, 3, 0)) expect_equal(out$x_remainder, c(NA, "", "_123", "x", "XXXX")) }) test_that("separate_wider_regex() doesn't count NA input as problem", { df <- tibble(x = NA) expect_equal( df %>% separate_wider_regex(x, patterns = c(a = ".", b = ".")), tibble(a = NA_character_, b = NA_character_) ) }) test_that("separate_wider_regex() can drop values", { df <- tibble(x = "ab123") out <- df %>% separate_wider_regex(x, c("a" = ".", ".", "b" = "\\d+")) expect_equal(out, tibble(a = "a", b = "123")) }) test_that("separate_wider_regex() can use odd names", { df <- tibble(x = "ab123") out <- df %>% separate_wider_regex(x, c("_" = ".", ".", "." = "\\d+")) expect_equal(out, tibble(`_` = "a", `.` = "123")) }) test_that("separate_wider_regex() gives informative error if () used", { df <- tibble(x = "x") expect_snapshot(error = TRUE, { df %>% separate_wider_regex(x, c("_" = "(.)")) }) }) test_that("separate_wider_regex() works with empty data frames", { df <- tibble(x = character()) out <- separate_wider_regex(df, x, patterns = c(y = ".", z = ".")) expect_equal(out, tibble(y = character(), z = character())) }) test_that("separate_wider_regex() advises on outer / inner name duplication (#1425)", { df <- tibble(x = 1, y = "g1") expect_snapshot(error = TRUE, { separate_wider_regex(df, y, patterns = c(x = ".", value = ".")) }) }) test_that("separate_wider_regex() advises on inner / inner name duplication (#1425)", { df <- tibble(x = "g1", y = "m2") expect_snapshot(error = TRUE, { separate_wider_regex(df, c(x, y), patterns = c(gender = ".", value = ".")) }) }) test_that("separate_wider_regex() validates its inputs", { df <- tibble(x = "x") expect_snapshot(error = TRUE, { df %>% separate_wider_regex() df %>% separate_wider_regex(x) df %>% separate_wider_regex(y, patterns = c(x = "-")) df %>% separate_wider_regex(x, patterns = ".") }) }) tidyr/tests/testthat/test-unnest-auto.R0000644000176200001440000000227114321316017017771 0ustar liggesusers # unnest_auto ------------------------------------------------------------- test_that("unnamed becomes longer", { df <- tibble(x = 1:2, y = list(1, 2:3)) expect_message(out <- df %>% unnest_auto(y), "unnest_longer") expect_equal(out$y, c(1, 2, 3)) }) test_that("common name becomes wider", { df <- tibble(x = 1:2, y = list(c(a = 1), c(a = 2))) expect_message(out <- df %>% unnest_auto(y), "unnest_wider") expect_named(out, c("x", "a")) }) test_that("no common name falls back to longer with index", { df <- tibble(x = 1:2, y = list(c(a = 1), c(b = 2))) expect_message(out <- df %>% unnest_auto(y), "unnest_longer") expect_named(out, c("x", "y", "y_id")) }) test_that("mix of named and unnamed becomes longer", { df <- tibble(x = 1:2, y = list(c(a = 1), 2)) expect_message(out <- df %>% unnest_auto(y), "unnest_longer") expect_named(out, c("x", "y")) }) # https://github.com/tidyverse/tidyr/issues/959 test_that("works with an input that has column named `col`", { df <- tibble( col = 1L, list_col = list(list(x = "a", y = "b"), list(x = "c", y = "d")) ) expect_message(out <- df %>% unnest_auto(list_col), "unnest_wider") expect_named(out, c("col", "x", "y")) }) tidyr/tests/testthat/test-unite.R0000644000176200001440000000377014360013543016641 0ustar liggesuserstest_that("unite pastes columns together & removes old col", { df <- tibble(x = "a", y = "b") out <- unite(df, z, x:y) expect_equal(names(out), "z") expect_equal(out$z, "a_b") }) test_that("unite does not remove new col in case of name clash", { df <- tibble(x = "a", y = "b") out <- unite(df, x, x:y) expect_equal(names(out), "x") expect_equal(out$x, "a_b") }) test_that("unite preserves grouping", { df <- tibble(g = 1, x = "a") %>% dplyr::group_by(g) rs <- df %>% unite(x, x) expect_equal(df, rs) expect_equal(class(df), class(rs)) expect_equal(dplyr::group_vars(df), dplyr::group_vars(rs)) }) test_that("drops grouping when needed", { df <- tibble(g = 1, x = "a") %>% dplyr::group_by(g) rs <- df %>% unite(gx, g, x) expect_equal(rs$gx, "1_a") expect_equal(dplyr::group_vars(rs), character()) }) test_that("preserves row names of data.frames (#1454)", { df <- data.frame(x = c("1", "2"), y = c("3", "4"), row.names = c("a", "b")) expect_identical(row.names(unite(df, "xy", x, y)), c("a", "b")) }) test_that("empty var spec uses all vars", { df <- tibble(x = "a", y = "b") expect_equal(unite(df, "z"), tibble(z = "a_b")) }) test_that("can remove missing vars on request", { df <- expand_grid(x = c("a", NA), y = c("b", NA)) out <- unite(df, "z", x:y, na.rm = TRUE) expect_equal(out$z, c("a_b", "a", "b", "")) }) test_that("regardless of the type of the NA", { vec_unite <- function(df, vars) { unite(df, "out", any_of(vars), na.rm = TRUE)$out } df <- tibble( x = c("x", "y", "z"), lgl = NA, dbl = NA_real_, chr = NA_character_ ) expect_equal(vec_unite(df, c("x", "lgl")), c("x", "y", "z")) expect_equal(vec_unite(df, c("x", "dbl")), c("x", "y", "z")) expect_equal(vec_unite(df, c("x", "chr")), c("x", "y", "z")) }) test_that("validates its inputs", { df <- tibble(x = "a", y = "b") expect_snapshot(error = TRUE, { unite(df) unite(df, "z", x:y, sep = 1) unite(df, "z", x:y, remove = 1) unite(df, "z", x:y, na.rm = 1) }) }) tidyr/tests/testthat/test-pivot-long.R0000644000176200001440000003302314357015307017613 0ustar liggesuserstest_that("can pivot all cols to long", { df <- tibble(x = 1:2, y = 3:4) pv <- pivot_longer(df, x:y) expect_named(pv, c("name", "value")) expect_equal(pv$name, rep(names(df), 2)) expect_equal(pv$value, c(1, 3, 2, 4)) }) test_that("values interleaved correctly", { df <- tibble( x = c(1, 2), y = c(10, 20), z = c(100, 200), ) pv <- pivot_longer(df, 1:3) expect_equal(pv$value, c(1, 10, 100, 2, 20, 200)) }) test_that("can add multiple columns from spec", { df <- tibble(x = 1:2, y = 3:4) sp <- tibble(.name = c("x", "y"), .value = "v", a = 1, b = 2) pv <- pivot_longer_spec(df, spec = sp) expect_named(pv, c("a", "b", "v")) }) test_that("preserves original keys", { df <- tibble(x = 1:2, y = 2, z = 1:2) pv <- pivot_longer(df, y:z) expect_named(pv, c("x", "name", "value")) expect_equal(pv$x, rep(df$x, each = 2)) }) test_that("can drop missing values", { df <- data.frame(x = c(1, NA), y = c(NA, 2)) pv <- pivot_longer(df, x:y, values_drop_na = TRUE) expect_equal(pv$name, c("x", "y")) expect_equal(pv$value, c(1, 2)) }) test_that("can handle missing combinations", { df <- tribble( ~id, ~x_1, ~x_2, ~y_2, "A", 1, 2, "a", "B", 3, 4, "b", ) pv <- pivot_longer(df, -id, names_to = c(".value", "n"), names_sep = "_") expect_named(pv, c("id", "n", "x", "y")) expect_equal(pv$x, 1:4) expect_equal(pv$y, c(NA, "a", NA, "b")) }) test_that("mixed columns are automatically coerced", { df <- data.frame(x = factor("a"), y = factor("b")) pv <- pivot_longer(df, x:y) expect_equal(pv$value, factor(c("a", "b"))) }) test_that("can override default output column type", { df <- tibble(x = "x", y = 1L) pv <- pivot_longer(df, x:y, values_transform = list(value = as.list)) expect_equal(pv$value, list("x", 1)) }) test_that("can pivot to multiple measure cols", { df <- tibble(x = "x", y = 1) sp <- tribble( ~.name, ~.value, ~row, "x", "X", 1, "y", "Y", 1, ) pv <- pivot_longer_spec(df, sp) expect_named(pv, c("row", "X", "Y")) expect_equal(pv$X, "x") expect_equal(pv$Y, 1) }) test_that("original col order is preserved", { df <- tribble( ~id, ~z_1, ~y_1, ~x_1, ~z_2, ~y_2, ~x_2, "A", 1, 2, 3, 4, 5, 6, "B", 7, 8, 9, 10, 11, 12, ) pv <- pivot_longer(df, -id, names_to = c(".value", "n"), names_sep = "_") expect_named(pv, c("id", "n", "z", "y", "x")) }) test_that("handles duplicated column names", { df <- tibble(x = 1, a = 1, a = 2, b = 3, b = 4, .name_repair = "minimal") pv <- pivot_longer(df, -x) expect_named(pv, c("x", "name", "value")) expect_equal(pv$name, c("a", "a", "b", "b")) expect_equal(pv$value, 1:4) }) test_that("can pivot duplicated names to .value", { df <- tibble(x = 1, a_1 = 1, a_2 = 2, b_1 = 3, b_2 = 4) pv1 <- pivot_longer(df, -x, names_to = c(".value", NA), names_sep = "_") pv2 <- pivot_longer(df, -x, names_to = c(".value", NA), names_pattern = "(.)_(.)") pv3 <- pivot_longer(df, -x, names_to = ".value", names_pattern = "(.)_.") expect_named(pv1, c("x", "a", "b")) expect_equal(pv1$a, c(1, 2)) expect_equal(pv2, pv1) expect_equal(pv3, pv1) }) test_that(".value can be at any position in `names_to`", { samp <- tibble( i = 1:4, y_t1 = rnorm(4), y_t2 = rnorm(4), z_t1 = rep(3, 4), z_t2 = rep(-2, 4), ) value_first <- pivot_longer(samp, -i, names_to = c(".value", "time"), names_sep = "_") samp2 <- dplyr::rename(samp, t1_y = y_t1, t2_y = y_t2, t1_z = z_t1, t2_z = z_t2) value_second <- pivot_longer(samp2, -i, names_to = c("time", ".value"), names_sep = "_") expect_identical(value_first, value_second) }) test_that("type error message use variable names", { df <- data.frame(abc = 1, xyz = "b") err <- capture_error(pivot_longer(df, everything())) expect_s3_class(err, "vctrs_error_incompatible_type") expect_equal(err$x_arg, "abc") expect_equal(err$y_arg, "xyz") }) test_that("when `values_ptypes` is provided, the type error uses variable names (#1364)", { df <- tibble(x = 1) expect_snapshot({ (expect_error(pivot_longer(df, x, values_ptypes = character()))) }) }) test_that("when `names_ptypes` is provided, the type error uses `names_to` names (#1364)", { df <- tibble(x = 1) expect_snapshot({ (expect_error({ pivot_longer( df, cols = x, names_to = "name", names_ptypes = double() ) })) }) }) test_that("error when overwriting existing column", { df <- tibble(x = 1, y = 2) expect_snapshot( (expect_error(pivot_longer(df, y, names_to = "x"))) ) expect_snapshot( out <- pivot_longer(df, y, names_to = "x", names_repair = "unique") ) expect_named(out, c("x...1", "x...2", "value")) }) test_that("grouping is preserved", { df <- tibble(g = 1, x1 = 1, x2 = 2) out <- df %>% dplyr::group_by(g) %>% pivot_longer(x1:x2, names_to = "x", values_to = "v") expect_equal(dplyr::group_vars(out), "g") }) test_that("zero row data frame works", { df <- tibble(x = integer(), y = integer()) pv <- pivot_longer(df, x:y) expect_named(pv, c("name", "value")) expect_equal(pv$name, character()) expect_equal(pv$value, integer()) }) test_that("`cols_vary` can adjust the resulting row ordering (#1312)", { df <- tibble(x = c(1, 2), y = c(3, 4)) expect_identical( pivot_longer(df, c(x, y), cols_vary = "fastest"), tibble(name = c("x", "y", "x", "y"), value = c(1, 3, 2, 4)) ) expect_identical( pivot_longer(df, c(x, y), cols_vary = "slowest"), tibble(name = c("x", "x", "y", "y"), value = c(1, 2, 3, 4)) ) }) test_that("`cols_vary` works with id columns not part of the pivoting process", { df <- tibble(id = c("a", "b"), x = c(1, 2), y = c(3, 4)) out <- pivot_longer(df, c(x, y), cols_vary = "fastest") expect_identical(out$id, c("a", "a", "b", "b")) expect_identical( out[c("name", "value")], pivot_longer(df[c("x", "y")], c(x, y), cols_vary = "fastest") ) out <- pivot_longer(df, c(x, y), cols_vary = "slowest") expect_identical(out$id, c("a", "b", "a", "b")) expect_identical( out[c("name", "value")], pivot_longer(df[c("x", "y")], c(x, y), cols_vary = "slowest") ) }) test_that("adjusting `cols_vary` works fine with `values_drop_na`", { df <- tibble(id = c("a", "b"), x = c(1, NA), y = c(3, 4)) expect_identical( pivot_longer(df, c(x, y), cols_vary = "slowest", values_drop_na = TRUE), tibble( id = c("a", "a", "b"), name = c("x", "y", "y"), value = c(1, 3, 4) ) ) }) # spec -------------------------------------------------------------------- test_that("validates inputs", { df <- tibble(x = 1) expect_error(build_longer_spec(df, x, values_to = letters[1:2]), class = "vctrs_error_assert" ) }) test_that("no names doesn't generate names (#1120)", { df <- tibble(x = 1) expect_identical( colnames(build_longer_spec(df, x, names_to = character())), c(".name", ".value") ) expect_identical( colnames(build_longer_spec(df, x, names_to = NULL)), c(".name", ".value") ) }) test_that("multiple names requires names_sep/names_pattern", { df <- tibble(x_y = 1) expect_snapshot({ (expect_error(build_longer_spec(df, x_y, names_to = c("a", "b")))) (expect_error( build_longer_spec( df, x_y, names_to = c("a", "b"), names_sep = "x", names_pattern = "x" ) )) }) }) test_that("names_sep generates correct spec", { df <- tibble(x_y = 1) sp <- build_longer_spec(df, x_y, names_to = c("a", "b"), names_sep = "_") expect_equal(sp$a, "x") expect_equal(sp$b, "y") }) test_that("names_sep fails with single name", { df <- tibble(x_y = 1) expect_snapshot({ (expect_error(build_longer_spec(df, x_y, names_to = "x", names_sep = "_"))) }) }) test_that("names_pattern generates correct spec", { df <- tibble(zx_y = 1) sp <- build_longer_spec(df, zx_y, names_to = c("a", "b"), names_pattern = "z(.)_(.)") expect_equal(sp$a, "x") expect_equal(sp$b, "y") sp <- build_longer_spec(df, zx_y, names_to = "a", names_pattern = "z(.)") expect_equal(sp$a, "x") }) test_that("names_to can override value_to", { df <- tibble(x_y = 1) sp <- build_longer_spec(df, x_y, names_to = c("a", ".value"), names_sep = "_") expect_equal(sp$.value, "y") }) test_that("names_prefix strips off from beginning", { df <- tibble(zzyz = 1) sp <- build_longer_spec(df, zzyz, names_prefix = "z") expect_equal(sp$name, "zyz") }) test_that("can cast to custom type", { df <- tibble(w1 = 1) sp <- build_longer_spec(df, w1, names_prefix = "w", names_transform = list(name = as.integer) ) expect_equal(sp$name, 1L) }) test_that("transform is applied before cast (#1233)", { df <- tibble(w1 = 1) sp <- build_longer_spec( df, w1, names_prefix = "w", names_ptypes = list(name = integer()), names_transform = list(name = as.numeric) ) expect_identical(sp$name, 1L) }) test_that("`names_ptypes` and `names_transform` work with single values (#1284)", { df <- tibble(`1x2` = 1) res <- build_longer_spec( data = df, cols = `1x2`, names_to = c("one", "two"), names_sep = "x", names_transform = as.numeric ) expect_identical(res$one, 1) expect_identical(res$two, 2) res <- build_longer_spec( data = df, cols = `1x2`, names_to = c("one", "two"), names_sep = "x", names_transform = as.numeric, names_ptypes = integer() ) expect_identical(res$one, 1L) expect_identical(res$two, 2L) }) test_that("`names_ptypes = list()` is interpreted as recycling for all name columns (#1296)", { df <- tibble(`1x2` = 1) res <- build_longer_spec( data = df, cols = `1x2`, names_to = c("one", "two"), names_sep = "x", names_transform = as.list, names_ptypes = list() ) expect_identical(res$one, list("1")) expect_identical(res$two, list("2")) }) test_that("`values_ptypes` works with single empty ptypes (#1284)", { df <- tibble(x_1 = 1, y_1 = 2) res <- pivot_longer( data = df, cols = everything(), names_to = c(".value", "set"), names_sep = "_", values_ptypes = integer() ) expect_identical(res$x, 1L) expect_identical(res$y, 2L) }) test_that("`values_ptypes = list()` is interpreted as recycling for all value columns (#1296)", { df <- tibble(x_1 = list_of(1L, 2:3, 4L), y_1 = list_of(2:3, 4L, 5:6)) res <- pivot_longer( data = df, cols = everything(), names_to = c(".value", "set"), names_sep = "_", values_ptypes = list() ) expect_identical(res$x, vec_cast(df$x_1, list())) expect_identical(res$y, vec_cast(df$y_1, list())) }) test_that("`values_transform` works with single functions (#1284)", { df <- tibble(x_1 = 1, y_1 = 2) res <- pivot_longer( data = df, cols = everything(), names_to = c(".value", "set"), names_sep = "_", values_transform = as.character ) expect_identical(res$x, "1") expect_identical(res$y, "2") }) test_that("Error if the `col` can't be selected.", { expect_snapshot({ (expect_error(pivot_longer(iris, matches("foo")))) }) }) test_that("named `cols` gives clear error (#1104)", { df <- data.frame(x = 1, y = 2) expect_snapshot(pivot_longer(df, c(z = y)), error = TRUE) }) test_that("`names_to` is validated", { df <- tibble(x = 1) expect_snapshot({ (expect_error(build_longer_spec(df, x, names_to = 1))) (expect_error(build_longer_spec(df, x, names_to = c("x", "y")))) (expect_error(build_longer_spec(df, x, names_to = c("x", "y"), names_sep = "_", names_pattern = "x"))) }) }) test_that("`names_ptypes` is validated", { df <- tibble(x = 1) expect_snapshot({ (expect_error(build_longer_spec(df, x, names_ptypes = 1))) (expect_error(build_longer_spec(df, x, names_ptypes = list(integer())))) }) }) test_that("`names_transform` is validated", { df <- tibble(x = 1) expect_snapshot({ (expect_error(build_longer_spec(df, x, names_transform = 1))) (expect_error(build_longer_spec(df, x, names_transform = list(~.x)))) }) }) test_that("`values_ptypes` is validated", { df <- tibble(x = 1) expect_snapshot({ (expect_error(pivot_longer(df, x, values_ptypes = 1))) (expect_error(pivot_longer(df, x, values_ptypes = list(integer())))) }) }) test_that("`values_transform` is validated", { df <- tibble(x = 1) expect_snapshot({ (expect_error(pivot_longer(df, x, values_transform = 1))) (expect_error(pivot_longer(df, x, values_transform = list(~.x)))) }) }) test_that("`cols_vary` is validated", { df <- tibble(x = 1) expect_snapshot({ (expect_error(pivot_longer(df, x, cols_vary = "fast"))) (expect_error(pivot_longer(df, x, cols_vary = 1))) }) }) test_that("`pivot_longer()` catches unused input passed through the dots", { df <- tibble(id = c("a", "b"), x = c(1, 2), y = c(3, 4)) expect_snapshot({ (expect_error(pivot_longer(df, c(x, y), 1))) (expect_error(pivot_longer(df, c(x, y), col_vary = "slowest"))) }) }) test_that("`build_longer_spec()` requires empty dots", { df <- tibble(id = c("a", "b"), x = c(1, 2), y = c(3, 4)) expect_snapshot({ (expect_error(build_longer_spec(df, c(x, y), 1))) (expect_error(build_longer_spec(df, c(x, y), name_to = "name"))) }) }) test_that("`pivot_longer_spec()` requires empty dots", { df <- tibble(id = c("a", "b"), x = c(1, 2), y = c(3, 4)) spec <- build_longer_spec(df, c(x, y)) expect_snapshot({ (expect_error(pivot_longer_spec(df, spec, 1))) (expect_error(pivot_longer_spec(df, spec, col_vary = "slowest"))) }) }) tidyr/tests/testthat/test-unnest.R0000644000176200001440000001771314360013543017033 0ustar liggesuserstest_that("can keep empty rows", { df <- tibble(x = 1:3, y = list(NULL, tibble(), tibble(a = 1))) out1 <- df %>% unnest(y) expect_equal(nrow(out1), 1) out2 <- df %>% unnest(y, keep_empty = TRUE) expect_equal(nrow(out2), 3) expect_equal(out2$a, c(NA, NA, 1)) }) test_that("empty rows still affect output type", { df <- tibble( x = 1:2, data = list( tibble(y = character(0)), tibble(z = integer(0)) ) ) out <- unnest(df, data) expect_equal(out, tibble(x = integer(), y = character(), z = integer())) }) test_that("bad inputs generate errors", { df <- tibble(x = 1, y = list(mean)) expect_snapshot((expect_error(unnest(df, y)))) }) test_that("unesting combines augmented vectors", { df <- tibble(x = as.list(as.factor(letters[1:3]))) expect_equal(unnest(df, x)$x, factor(letters[1:3])) }) test_that("vector unnest preserves names", { df <- tibble(x = list(1, 2:3), y = list("a", c("b", "c"))) out <- unnest(df, x) expect_named(out, c("x", "y")) }) test_that("rows and cols of nested-dfs are expanded", { df <- tibble(x = 1:2, y = list(tibble(a = 1), tibble(b = 1:2))) out <- df %>% unnest(y) expect_named(out, c("x", "a", "b")) expect_equal(nrow(out), 3) }) test_that("can unnest nested lists", { df <- tibble( x = 1:2, y = list(list("a"), list("b")) ) rs <- unnest(df, y) expect_identical(rs, tibble(x = 1:2, y = list("a", "b"))) }) test_that("can unnest mixture of name and unnamed lists of same length", { df <- tibble( x = c("a"), y = list(y = 1:2), z = list(1:2) ) expect_identical( unnest(df, c(y, z)), tibble(x = c("a", "a"), y = c(1:2), z = c(1:2)) ) }) test_that("can unnest list_of", { df <- tibble( x = 1:2, y = vctrs::list_of(1:3, 4:9) ) expect_equal( unnest(df, y), tibble(x = rep(1:2, c(3, 6)), y = 1:9) ) }) test_that("can combine NULL with vectors or data frames", { df1 <- tibble(x = 1:2, y = list(NULL, tibble(z = 1))) out <- unnest(df1, y) expect_named(out, c("x", "z")) expect_equal(out$z, 1) df2 <- tibble(x = 1:2, y = list(NULL, 1)) out <- unnest(df2, y) expect_named(out, c("x", "y")) expect_equal(out$y, 1) }) test_that("vectors become columns", { df <- tibble(x = 1:2, y = list(1, 1:2)) out <- unnest(df, y) expect_equal(out$y, c(1L, 1:2)) }) test_that("multiple columns must be same length", { df <- tibble(x = list(1:2), y = list(1:3)) expect_snapshot((expect_error(unnest(df, c(x, y))))) df <- tibble(x = list(1:2), y = list(tibble(y = 1:3))) expect_snapshot((expect_error(unnest(df, c(x, y))))) }) test_that("can use non-syntactic names", { out <- tibble("foo bar" = list(1:2, 3)) %>% unnest(`foo bar`) expect_named(out, "foo bar") }) test_that("unpacks df-cols (#1112)", { df <- tibble(x = 1, y = tibble(a = 1, b = 2)) expect_identical(unnest(df, y), tibble(x = 1, a = 1, b = 2)) }) test_that("unnesting column of mixed vector / data frame input is an error", { df <- tibble(x = list(1, tibble(a = 1))) expect_snapshot((expect_error(unnest(df, x)))) }) test_that("unnest() advises on outer / inner name duplication", { df <- tibble(x = 1, y = list(tibble(x = 2))) expect_snapshot(error = TRUE, { unnest(df, y) }) }) test_that("unnest() advises on inner / inner name duplication", { df <- tibble( x = list(tibble(a = 1)), y = list(tibble(a = 2)) ) expect_snapshot(error = TRUE, { unnest(df, c(x, y)) }) }) test_that("unnest() disallows renaming", { df <- tibble(x = list(tibble(a = 1))) expect_snapshot(error = TRUE, { unnest(df, c(y = x)) }) }) test_that("unnest() works on foreign list types recognized by `vec_is_list()` (#1327)", { new_foo <- function(...) { structure(list(...), class = c("foo", "list")) } df <- tibble(x = new_foo(tibble(a = 1L), tibble(a = 2:3))) expect_identical(unnest(df, x), tibble(a = 1:3)) # With empty list df <- tibble(x = new_foo()) expect_identical(unnest(df, x), tibble(x = unspecified())) # With empty types df <- tibble(x = new_foo(tibble(a = 1L), tibble(a = integer()))) expect_identical(unnest(df, x), tibble(a = 1L)) expect_identical(unnest(df, x, keep_empty = TRUE), tibble(a = c(1L, NA))) # With `NULL`s df <- tibble(x = new_foo(tibble(a = 1L), NULL)) expect_identical(unnest(df, x), tibble(a = 1L)) expect_identical(unnest(df, x, keep_empty = TRUE), tibble(a = c(1L, NA))) }) # other methods ----------------------------------------------------------------- test_that("rowwise_df becomes grouped_df", { skip_if_not_installed("dplyr", "0.8.99") df <- tibble(g = 1, x = list(1:3)) %>% dplyr::rowwise(g) rs <- df %>% unnest(x) expect_s3_class(rs, "grouped_df") expect_equal(dplyr::group_vars(rs), "g") }) test_that("grouping is preserved", { df <- tibble(g = 1, x = list(1:3)) %>% dplyr::group_by(g) rs <- df %>% unnest(x) expect_s3_class(rs, "grouped_df") expect_equal(dplyr::group_vars(rs), "g") }) # Empty inputs ------------------------------------------------------------ test_that("can unnest empty data frame", { df <- tibble(x = integer(), y = list()) out <- unnest(df, y) expect_equal(out, tibble(x = integer(), y = unspecified())) }) test_that("unnesting bare lists of NULLs is equivalent to unnesting empty lists", { df <- tibble(x = 1L, y = list(NULL)) out <- unnest(df, y) expect_identical(out, tibble(x = integer(), y = unspecified())) }) test_that("unnest() preserves ptype", { tbl <- tibble(x = integer(), y = list_of(ptype = tibble(a = integer()))) res <- unnest(tbl, y) expect_equal(res, tibble(x = integer(), a = integer())) }) test_that("unnesting typed lists of NULLs retains ptype", { df <- tibble(x = 1L, y = list_of(NULL, .ptype = tibble(a = integer()))) out <- unnest(df, y) expect_identical(out, tibble(x = integer(), a = integer())) }) test_that("ptype can be overriden manually (#1158)", { df <- tibble( a = list("a", c("b", "c")), b = list(1, c(2, 3)), ) ptype <- list(b = integer()) out <- unnest(df, c(a, b), ptype = ptype) expect_type(out$b, "integer") expect_identical(out$b, c(1L, 2L, 3L)) }) test_that("ptype works with nested data frames", { df <- tibble( a = list("a", "b"), b = list(tibble(x = 1, y = 2L), tibble(x = 2, y = 3L)), ) # x: double -> integer ptype <- list(b = tibble(x = integer(), y = integer())) out <- unnest(df, c(a, b), ptype = ptype) expect_identical(out$x, c(1L, 2L)) expect_identical(out$y, c(2L, 3L)) }) test_that("skips over vector columns", { df <- tibble(x = integer(), y = list()) expect_identical(unnest(df, x), df) }) test_that("unnest keeps list cols", { df <- tibble(x = 1:2, y = list(3, 4), z = list(5, 6:7)) out <- df %>% unnest(y) expect_equal(names(out), c("x", "y", "z")) }) # Deprecated behaviours --------------------------------------------------- test_that("cols must go in cols", { df <- tibble(x = list(3, 4), y = list("a", "b")) expect_snapshot(unnest(df, x, y)) }) test_that("need supply column names", { df <- tibble(x = 1:2, y = list("a", "b")) expect_snapshot(unnest(df)) }) test_that("sep combines column names", { local_options(lifecycle_verbosity = "warning") df <- tibble(x = list(tibble(x = 1)), y = list(tibble(x = 1))) expect_snapshot(out <- df %>% unnest(c(x, y), .sep = "_")) expect_named(out, c("x_x", "y_x")) }) test_that("unnest has mutate semantics", { df <- tibble(x = 1:3, y = list(1, 2:3, 4)) expect_snapshot(out <- df %>% unnest(z = map(y, `+`, 1))) expect_equal(out$z, 2:5) }) test_that(".drop and .preserve are deprecated", { local_options(lifecycle_verbosity = "warning") df <- tibble(x = list(3, 4), y = list("a", "b")) expect_snapshot(df %>% unnest(x, .preserve = y)) df <- tibble(x = list(3, 4), y = list("a", "b")) expect_snapshot(df %>% unnest(x, .drop = FALSE)) }) test_that(".id creates vector of names for vector unnest", { local_options(lifecycle_verbosity = "warning") df <- tibble(x = 1:2, y = list(a = 1, b = 1:2)) expect_snapshot(out <- unnest(df, y, .id = "name")) expect_equal(out$name, c("a", "b", "b")) }) tidyr/tests/testthat/test-separate-rows.R0000644000176200001440000000451014323620576020313 0ustar liggesuserstest_that("can handle collapsed rows", { df <- tibble(x = 1:3, y = c("a", "d,e,f", "g,h")) expect_equal(separate_rows(df, y)$y, unlist(strsplit(df$y, "\\,"))) }) test_that("can handle empty data frames (#308)", { df <- tibble(a = character(), b = character()) rs <- separate_rows(df, b) expect_equal(rs, tibble(a = character(), b = unspecified())) }) test_that("default pattern does not split decimals in nested strings", { df <- dplyr::tibble(x = 1:3, y = c("1", "1.0,1.1", "2.1")) expect_equal(separate_rows(df, y)$y, unlist(strsplit(df$y, ","))) }) test_that("preserves grouping", { df <- tibble(g = 1, x = "a:b") %>% dplyr::group_by(g) rs <- df %>% separate_rows(x) expect_equal(class(df), class(rs)) expect_equal(dplyr::group_vars(df), dplyr::group_vars(rs)) }) test_that("drops grouping when needed", { df <- tibble(x = 1, y = "a:b") %>% dplyr::group_by(x, y) out <- df %>% separate_rows(y) expect_equal(out$y, c("a", "b")) expect_equal(dplyr::group_vars(out), "x") out <- df %>% dplyr::group_by(y) %>% separate_rows(y) expect_equal(dplyr::group_vars(out), character()) }) test_that("drops grouping on zero row data frames when needed (#886)", { df <- tibble(x = numeric(), y = character()) %>% dplyr::group_by(y) out <- df %>% separate_rows(y) expect_equal(dplyr::group_vars(out), character()) }) test_that("convert produces integers etc", { df <- tibble(x = "1,2,3", y = "T,F,T", z = "a,b,c") out <- separate_rows(df, x, y, z, convert = TRUE) expect_equal(class(out$x), "integer") expect_equal(class(out$y), "logical") expect_equal(class(out$z), "character") }) test_that("leaves list columns intact (#300)", { df <- tibble(x = "1,2,3", y = list(1)) out <- separate_rows(df, x) # Can't compare tibbles with list columns directly expect_equal(names(out), c("x", "y")) expect_equal(out$x, as.character(1:3)) expect_equal(out$y, rep(list(1), 3)) }) test_that("does not silently drop blank values (#1014)", { df <- tibble(x = 1:3, y = c("a", "d,e,f", "")) out <- separate_rows(df, y) expect_equal( out, tibble(x = c(1, 2, 2, 2, 3), y = c("a", "d", "e", "f", "")) ) }) test_that("it validates its inputs", { df <- tibble(x = 1:3, y = c("a", "d,e,f", "")) expect_snapshot(error = TRUE, { separate_rows(df, x, sep = 1) separate_rows(df, x, convert = 1) }) }) tidyr/tests/testthat/test-nest.R0000644000176200001440000002051214363516001016457 0ustar liggesuserstest_that("nest turns grouped values into one list-df", { df <- tibble(x = c(1, 1, 1), y = 1:3) out <- nest(df, data = y) expect_equal(out$x, 1) expect_equal(length(out$data), 1L) expect_equal(out$data[[1L]], tibble(y = 1:3)) }) test_that("nest uses grouping vars if present", { df <- tibble(x = c(1, 1, 1), y = 1:3) out <- nest(dplyr::group_by(df, x)) expect_s3_class(out, "grouped_df") expect_equal(out$data[[1]], tibble(y = 1:3)) }) test_that("provided grouping vars override grouped defaults", { df <- tibble(x = 1, y = 2, z = 3) %>% dplyr::group_by(x) out <- df %>% nest(data = y) expect_s3_class(out, "grouped_df") expect_named(out, c("x", "z", "data")) expect_named(out$data[[1]], "y") }) test_that("puts data into the correct row", { df <- tibble(x = 1:3, y = c("B", "A", "A")) out <- nest(df, data = x) %>% dplyr::filter(y == "B") expect_equal(out$data[[1]]$x, 1) }) test_that("nesting everything yields a simple data frame", { df <- tibble(x = 1:3, y = c("B", "A", "A")) out <- nest(df, data = c(x, y)) expect_equal(length(out$data), 1L) expect_equal(out$data[[1L]], df) }) test_that("nest preserves order of data", { df <- tibble(x = c(1, 3, 2, 3, 2), y = 1:5) out <- nest(df, data = y) expect_equal(out$x, c(1, 3, 2)) }) test_that("can strip names", { df <- tibble(x = c(1, 1, 1), ya = 1:3, yb = 4:6) out <- nest(df, y = starts_with("y"), .names_sep = "") expect_named(out$y[[1]], c("a", "b")) }) test_that("`.names_sep` is passed through with bare data.frames (#1174)", { df <- data.frame(x = c(1, 1, 1), ya = 1:3, yb = 4:6) out <- nest(df, y = starts_with("y"), .names_sep = "") expect_named(out$y[[1]], c("a", "b")) }) test_that("empty factor levels don't affect nest", { df <- tibble( x = factor(c("z", "a"), levels = letters), y = 1:2 ) out <- nest(df, data = y) expect_equal(out$x, df$x) }) test_that("nesting works for empty data frames", { df <- tibble(x = integer(), y = character()) out <- nest(df, data = x) expect_named(out, c("y", "data")) expect_equal(nrow(out), 0L) out <- nest(df, data = c(x, y)) expect_named(out, "data") expect_equal(nrow(out), 0L) }) test_that("tibble conversion occurs in the `nest.data.frame()` method", { df <- data.frame(x = 1, y = 1:2) out <- df %>% nest(data = y) expect_s3_class(out, "tbl_df") expect_s3_class(out$data[[1L]], "tbl_df") }) test_that("can nest multiple columns", { df <- tibble(x = 1, a1 = 1, a2 = 2, b1 = 1, b2 = 2) out <- df %>% nest(a = c(a1, a2), b = c(b1, b2)) expect_named(out, c("x", "a", "b")) expect_equal(as.list(out$a), list(df[c("a1", "a2")])) expect_equal(as.list(out$b), list(df[c("b1", "b2")])) }) test_that("nesting no columns nests all inputs", { df <- tibble(a1 = 1, a2 = 2, b1 = 1, b2 = 2) out <- nest(df) expect_named(out, "data") expect_equal(out$data[[1]], df) }) test_that("can control output column name when nesting all inputs", { df <- tibble(a1 = 1, a2 = 2, b1 = 1, b2 = 2) out <- nest(df, .key = "foo") expect_named(out, "foo") expect_equal(out$foo[[1]], df) }) test_that("can control output column name when only supplying `.by`", { df <- tibble(a1 = 1, a2 = 2, b1 = 1, b2 = 2) out <- nest(df, .by = a2, .key = "foo") expect_named(out, c("a2", "foo")) expect_equal(out$foo[[1]], df[c("a1", "b1", "b2")]) }) test_that("can control output column name when nesting by groups", { df <- dplyr::group_by(tibble(x = c(1, 1, 1), y = 1:3), x) out <- nest(df, .key = "y") expect_named(out, c("x", "y")) }) test_that("can nest `.by` columns", { df <- tibble(x = c(1, 1, 1, 2, 2), y = c(2, 1, 2, 3, 4), z = 1:5) expect_identical( nest(df, .by = c(x, y)), nest(df, data = z) ) }) test_that("can combine `.by` with `...`", { df <- tibble(x = c(1, 1, 1, 2, 2), y = c(2, 1, 2, 3, 4), z = 1:5) expect_identical( nest(df, data = x, .by = y), nest(dplyr::select(df, -z), data = x) ) }) test_that("nest disallows renaming", { df <- tibble(x = 1) expect_snapshot(error = TRUE, { nest(df, data = c(a = x)) }) expect_snapshot(error = TRUE, { nest(df, .by = c(a = x)) }) }) test_that("catches when `...` overwrites an existing column", { df <- tibble(x = 1, y = 2) # Hardcoded as an error. # Name repair would likely break internal usage of `chop()`. expect_snapshot(error = TRUE, { nest(df, x = y) }) }) test_that("validates its inputs", { df <- tibble(x = c(1, 1, 1), ya = 1:3, yb = 4:6) expect_snapshot(error = TRUE, { nest(df, y = ya:yb, .names_sep = 1) }) expect_snapshot(error = TRUE, { nest(df, y = ya:yb, .key = 1) }) }) # nest_info() / .by ------------------------------------------------------- test_that("Supplied `...` + No `.by` works", { df <- tibble(x = 1, y = 2, z = 3) out <- nest_info(df, data = c(x, z), data2 = x) expect_identical(out$inner, c("x", "z")) expect_identical(out$outer, "y") expect_identical(quo_get_expr(out$cols$data), expr(all_of(!!c("x", "z")))) expect_identical(quo_get_expr(out$cols$data2), expr(all_of(!!"x"))) }) test_that("Supplied `...` + Supplied `.by` works", { df <- tibble(x = 1, y = 2, z = 3) out <- nest_info(df, data = c(x, y), .by = y) expect_identical(out$inner, c("x", "y")) expect_identical(out$outer, "y") }) test_that("No `...` + Supplied `.by` works", { df <- tibble(x = 1, y = 2, z = 3) out <- nest_info(df, .by = y) expect_identical(out$inner, c("x", "z")) expect_identical(out$outer, "y") }) test_that("No `...` + No `.by` works", { df <- tibble(x = 1, y = 2, z = 3) # We define this to mean "nest everything" rather than "nest by everything", # as the former is generally more useful and is backwards compatible out <- nest_info(df) expect_identical(out$inner, c("x", "y", "z")) expect_identical(out$outer, character()) }) test_that("`everything()` always selects from full data", { df <- tibble(x = 1, y = 2, z = 3) out <- nest_info(df, data = everything(), .by = everything()) expect_identical(out$inner, c("x", "y", "z")) expect_identical(out$outer, c("x", "y", "z")) }) test_that("`.key` can alter the implied inner name", { df <- tibble(x = 1, y = 2, z = 3) out <- nest_info(df, .key = "foo") expect_named(out$cols, "foo") out <- nest_info(df, .by = x, .key = "foo") expect_named(out$cols, "foo") }) test_that("warns if `.key` is supplied alongside `...`", { df <- tibble(x = 1, y = 2) expect_snapshot(out <- nest_info(df, data = 2, .key = "foo")) expect_named(out$cols, "data") # Checking for warning at top level too expect_snapshot(out <- nest(df, data = 2, .key = "foo")) }) test_that("`.by` isn't allowed for grouped data frames", { df <- tibble(g = 1, x = 2) df <- dplyr::group_by(df, g) expect_snapshot(error = TRUE, { nest(df, .by = x) }) }) # Deprecated behaviours --------------------------------------------------- test_that("warn about old style interface", { df <- tibble(x = c(1, 1, 1), y = 1:3) expect_snapshot(out <- nest(df, y)) expect_named(out, c("x", "data")) expect_snapshot(out <- nest(df, -y)) expect_named(out, c("y", "data")) }) test_that("can use `.by` with old style interface", { df <- tibble(x = c(1, 1, 1), y = 1:3, z = 1:3) expect_snapshot(out <- nest(df, y, .by = x)) expect_identical(out, nest(df, data = y, .by = x)) # Notably, no warning about using `...` and `.key` together expect_snapshot(out <- nest(df, y, .by = x, .key = "foo")) expect_identical(out, nest(df, foo = y, .by = x)) }) test_that("only warn about unnamed inputs (#1175)", { df <- tibble(x = 1:3, y = 1:3, z = 1:3) expect_snapshot(out <- nest(df, x, y, foo = z)) expect_named(out, c("foo", "data")) }) test_that("unnamed expressions are kept in the warning", { df <- tibble(x = 1:3, z = 1:3) expect_snapshot(out <- nest(df, x, starts_with("z"))) expect_named(out, "data") }) test_that("can control output column name", { df <- tibble(x = c(1, 1, 1), y = 1:3) expect_snapshot(out <- nest(df, y, .key = "y")) expect_named(out, c("x", "y")) }) test_that(".key gets warning with new interface", { df <- tibble(x = c(1, 1, 1), y = 1:3) expect_snapshot(out <- nest(df, y = y, .key = "foo")) expect_named(df, c("x", "y")) }) test_that("old usage of `.key = deprecated()` is translated to `.key = NULL`", { # For `nest()` S3 method authors that did this df <- tibble(x = c(1, 1, 2)) expect_identical( nest(df, data = x, .key = deprecated()), nest(df, data = x) ) }) tidyr/tests/testthat/test-expand.R0000644000176200001440000003074114315413441016773 0ustar liggesuserstest_that("expand completes all values", { df <- data.frame(x = 1:2, y = 1:2) out <- expand(df, x, y) expect_equal(nrow(out), 4) }) test_that("multiple variables in one arg doesn't expand", { df <- data.frame(x = 1:2, y = 1:2) out <- expand(df, c(x, y)) expect_equal(nrow(out), 2) }) test_that("expand with nesting doesn't expand values", { df <- tibble(x = 1:2, y = 1:2) expect_equal(expand(df, nesting(x, y)), df) }) test_that("unnamed data frames are flattened", { df <- data.frame(x = 1:2, y = 1:2) out <- expand(df, nesting(x, y)) expect_equal(out$x, df$x) out <- crossing(df) expect_equal(out$x, df$x) }) test_that("named data frames are not flattened", { df <- tibble(x = 1:2, y = 1:2) out <- expand(df, x = nesting(x, y)) expect_equal(out$x, df) out <- crossing(x = df) expect_equal(out$x, df) }) test_that("expand works with non-standard col names", { df <- tibble(` x ` = 1:2, `/y` = 1:2) out <- expand(df, ` x `, `/y`) expect_equal(nrow(out), 4) }) test_that("expand accepts expressions", { df <- expand(data.frame(), x = 1:3, y = 3:1) expect_equal(df, crossing(x = 1:3, y = 3:1)) }) test_that("expand will expand within each group (#396)", { df <- tibble( g = c("a", "b", "a"), a = c(1L, 1L, 2L), b = factor(c("a", "a", "b"), levels = c("a", "b", "c")) ) gdf <- dplyr::group_by(df, g) out <- expand(gdf, a, b) # Still grouped expect_identical(dplyr::group_vars(out), "g") out <- nest(out, data = -g) expect_identical(out$data[[1]], crossing(a = 1:2, b = factor(levels = c("a", "b", "c")))) expect_identical(out$data[[2]], crossing(a = 1L, b = factor(levels = c("a", "b", "c")))) }) test_that("expand does not allow expansion on grouping variable (#1299)", { df <- tibble( g = "x", a = 1L ) gdf <- dplyr::group_by(df, g) # This is a dplyr error that we don't own expect_error(expand(gdf, g)) }) test_that("can use `.drop = FALSE` with expand (#1299)", { levels <- c("a", "b", "c") df <- tibble( g = factor(c("a", "b", "a"), levels = levels), a = c(1L, 1L, 2L), b = factor(c("a", "a", "b"), levels = levels) ) gdf <- dplyr::group_by(df, g, .drop = FALSE) # No data in group "c" for `a`, so we don't get that in the result expect_identical( expand(gdf, a), vec_sort(gdf[c("g", "a")]) ) expect <- crossing(g = factor(levels = levels), b = factor(levels = levels)) expect <- dplyr::group_by(expect, g, .drop = FALSE) # Levels of empty vector in `b` are expanded for group "c" expect_identical(expand(gdf, b), expect) }) test_that("expand moves the grouping variables to the front", { df <- tibble( a = 1L, g = "x" ) gdf <- dplyr::group_by(df, g) expect_named(expand(gdf, a), c("g", "a")) }) test_that("preserves ordered factors", { df <- tibble(a = ordered("a")) out <- expand(df, a) expect_equal(df$a, ordered("a")) }) test_that("NULL inputs", { tb <- tibble(x = 1:5) expect_equal(expand(tb, x, y = NULL), tb) }) test_that("zero length input gives zero length output", { tb <- tibble(x = character()) expect_equal(expand(tb, x), tb) }) test_that("no input results in 1 row data frame", { tb <- tibble(x = "a") expect_identical(expand(tb), tibble(.rows = 1L)) expect_identical(expand(tb, y = NULL), tibble(.rows = 1L)) }) test_that("expand & crossing expand missing factor leves; nesting does not", { tb <- tibble( x = 1:3, f = factor("a", levels = c("a", "b")) ) expect_equal(nrow(expand(tb, x, f)), 6) expect_equal(nrow(crossing(!!!tb)), 6) expect_equal(nrow(nesting(!!!tb)), nrow(tb)) }) test_that("expand() reconstructs input dots is empty", { expect_s3_class(expand(mtcars), "data.frame") expect_s3_class(expand(as_tibble(mtcars)), "tbl_df") }) test_that("expand() with no inputs returns 1 row", { expect_identical(expand(tibble()), tibble(.rows = 1L)) }) test_that("expand() with empty nesting() / crossing() calls 'ignores' them (#1258)", { df <- tibble(x = factor(c("a", "c"), letters[1:3])) expect_identical(expand(df), expand(df, nesting())) expect_identical(expand(df), expand(df, crossing())) expect_identical(expand(df, x), expand(df, x, nesting())) expect_identical(expand(df, x), expand(df, x, crossing())) expect_identical(expand(df, x), expand(df, x, nesting(NULL))) expect_identical(expand(df, x), expand(df, x, crossing(NULL))) }) test_that("expand() retains `NA` data in factors (#1275)", { df <- tibble(x = factor(c(NA, "x"), levels = "x")) expect_identical( expand(df, x), tibble(x = factor(c("x", NA), levels = "x")) ) }) # ------------------------------------------------------------------------------ test_that("crossing checks for bad inputs", { expect_snapshot((expect_error(crossing(x = 1:10, y = quote(a))))) }) test_that("preserves NAs", { x <- c("A", "B", NA) expect_equal(crossing(x)$x, x) expect_equal(nesting(x)$x, x) }) test_that("crossing() preserves factor levels", { x_na_lev_extra <- factor(c("a", NA), levels = c("a", "b", NA), exclude = NULL) expect_equal(levels(crossing(x = x_na_lev_extra)$x), c("a", "b", NA)) }) test_that("NULL inputs", { tb <- tibble(x = 1:5) expect_equal(nesting(x = tb$x, y = NULL), tb) expect_equal(crossing(x = tb$x, y = NULL), tb) }) test_that("crossing handles list columns", { x <- 1:2 y <- list(1, 1:2) out <- crossing(x, y) expect_equal(nrow(out), 4) expect_s3_class(out, "tbl_df") expect_equal(out$x, rep(x, each = 2)) expect_equal(out$y, rep(y, 2)) }) test_that("expand() respects `.name_repair`", { x <- 1:2 df <- tibble(x) expect_snapshot( out <- df %>% expand(x = x, x = x, .name_repair = "unique") ) expect_named(out, c("x...1", "x...2")) }) test_that("crossing() / nesting() respect `.name_repair`", { x <- 1:2 expect_snapshot( out <- crossing(x = x, x = x, .name_repair = "unique") ) expect_named(out, c("x...1", "x...2")) expect_snapshot( out <- nesting(x = x, x = x, .name_repair = "unique") ) expect_named(out, c("x...1", "x...2")) }) test_that("crossing() / nesting() silently uniquely repairs names of unnamed inputs", { x <- 1:2 expect_silent(out <- crossing(x, x)) expect_named(out, c("x...1", "x...2")) expect_silent(out <- nesting(x, x)) expect_named(out, c("x...1", "x...2")) }) test_that("crossing() / nesting() works with very long inlined unnamed inputs (#1037)", { df1 <- tibble(a = c("a", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), b = c(1, 2)) df2 <- tibble(c = c("b", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), d = c(3, 4)) out <- crossing( tibble(a = c("a", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), b = c(1, 2)), tibble(c = c("b", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), d = c(3, 4)) ) expect_identical(out$a, vec_rep_each(df1$a, 2)) expect_identical(out$b, vec_rep_each(df1$b, 2)) expect_identical(out$c, vec_rep(df2$c, 2)) expect_identical(out$d, vec_rep(df2$d, 2)) out <- nesting( tibble(a = c("a", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), b = c(1, 2)), tibble(c = c("b", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), d = c(3, 4)) ) expect_identical(out$a, df1$a) expect_identical(out$b, df1$b) expect_identical(out$c, df2$c) expect_identical(out$d, df2$d) }) test_that("crossing() / nesting() doesn't overwrite after auto naming (#1092)", { x <- list(0:1, 2:3) expect_silent(out <- crossing(!!!x)) expect_identical(out[[1]], c(0L, 0L, 1L, 1L)) expect_identical(out[[2]], c(2L, 3L, 2L, 3L)) expect_silent(out <- nesting(!!!x)) expect_identical(out[[1]], c(0L, 1L)) expect_identical(out[[2]], c(2L, 3L)) }) test_that("crossing() with no inputs returns a 1 row data frame", { # Because it uses expand_grid(), which respects `prod() == 1L` expect_identical(crossing(), tibble(.rows = 1L)) expect_identical(crossing(NULL), tibble(.rows = 1L)) }) test_that("nesting() with no inputs returns a 1 row data frame", { # Because computations involving the "number of combinations" of an empty # set return 1 expect_identical(nesting(), tibble(.rows = 1L)) expect_identical(nesting(NULL), tibble(.rows = 1L)) }) test_that("can use `do.call()` or `reduce()` with `crossing()` (#992)", { x <- list(tibble(a = 1:2), tibble(b = 2:4), tibble(c = 5:6)) expect_identical( crossing(x[[1]], x[[2]], x[[3]]), do.call(crossing, x) ) expect_identical( crossing(crossing(x[[1]], x[[2]]), x[[3]]), purrr::reduce(x, crossing) ) }) test_that("crossing() / nesting() retain `NA` data in factors (#1275)", { x <- factor(c(NA, "x"), levels = "x") expect_identical( crossing(x), tibble(x = factor(c("x", NA), levels = "x")) ) expect_identical( nesting(x), tibble(x = factor(c("x", NA), levels = "x")) ) }) # ------------------------------------------------------------------------------ test_that("expand_grid() can control name_repair", { x <- 1:2 expect_snapshot((expect_error(expand_grid(x = x, x = x)))) expect_snapshot( out <- expand_grid(x = x, x = x, .name_repair = "unique") ) expect_named(out, c("x...1", "x...2")) out <- expand_grid(x = x, x = x, .name_repair = "minimal") expect_named(out, c("x", "x")) }) test_that("zero length input gives zero length output", { expect_equal( expand_grid(x = integer(), y = 1:3), tibble(x = integer(), y = integer()) ) }) test_that("no input results in 1 row data frame", { # Because `prod() == 1L` by definition expect_identical(expand_grid(), tibble(.rows = 1L)) expect_identical(expand_grid(NULL), tibble(.rows = 1L)) }) test_that("unnamed data frames are flattened", { df <- tibble(x = 1:2, y = 1:2) col <- 3:4 expect_identical( expand_grid(df, col), tibble(x = c(1L, 1L, 2L, 2L), y = c(1L, 1L, 2L, 2L), col = c(3L, 4L, 3L, 4L)) ) }) test_that("packed and unpacked data frames are expanded identically", { df <- tibble(x = 1:2, y = 1:2) col <- 3:4 expect_identical( expand_grid(df, col), unpack(expand_grid(df = df, col), df) ) }) test_that("expand_grid() works with unnamed inlined tibbles with long expressions (#1116)", { df <- expand_grid( dplyr::tibble(fruit = c("Apple", "Banana"), fruit_id = c("a", "b")), dplyr::tibble(status_id = c("c", "d"), status = c("cut_neatly", "devoured")) ) expect <- vec_cbind( vec_slice(tibble(fruit = c("Apple", "Banana"), fruit_id = c("a", "b")), c(1, 1, 2, 2)), vec_slice(tibble(status_id = c("c", "d"), status = c("cut_neatly", "devoured")), c(1, 2, 1, 2)) ) expect_identical(df, expect) }) test_that("expand_grid() works with 0 col tibbles (#1189)", { df <- tibble(.rows = 1) expect_identical(expand_grid(df), df) expect_identical(expand_grid(df, x = 1:2), tibble(x = 1:2)) }) test_that("expand_grid() works with 0 row tibbles", { df <- tibble(.rows = 0) expect_identical(expand_grid(df), df) expect_identical(expand_grid(df, x = 1:2), tibble(x = integer())) }) # ------------------------------------------------------------------------------ # grid_dots() test_that("grid_dots() silently repairs auto-names", { x <- 1 expect_named(grid_dots(x, x), c("x...1", "x...2")) expect_named(grid_dots(1, 1), c("1...1", "1...2")) }) test_that("grid_dots() doesn't repair duplicate supplied names", { expect_named(grid_dots(x = 1, x = 1), c("x", "x")) }) test_that("grid_dots() evaluates each expression in turn", { out <- grid_dots(x = seq(-2, 2), y = x) expect_equal(out$x, out$y) }) test_that("grid_dots() uses most recent override of column in iterative expressions", { out <- grid_dots(x = 1:2, x = 3:4, y = x) expect_identical(out, list(x = 1:2, x = 3:4, y = 3:4)) }) test_that("grid_dots() adds unnamed data frame columns into the mask", { out <- grid_dots(x = 1:2, data.frame(x = 3:4, y = 5:6), a = x, b = y) expect_identical(out$x, 1:2) expect_identical(out$a, 3:4) expect_identical(out$b, 5:6) expect_identical(out[[2]], data.frame(x = 3:4, y = 5:6)) expect_named(out, c("x", "", "a", "b")) }) test_that("grid_dots() drops `NULL`s", { expect_identical( grid_dots(NULL, x = 1L, y = NULL, y = 1:2), list(x = 1L, y = 1:2) ) }) test_that("grid_dots() reject non-vector input", { expect_snapshot((expect_error(grid_dots(lm(1 ~ 1))))) }) # ------------------------------------------------------------------------------ # fct_unique() test_that("fct_unique() retains `NA` at the end even if it isn't a level", { x <- factor(c(NA, "x")) expect_identical(fct_unique(x), factor(c("x", NA))) expect_identical(levels(fct_unique(x)), "x") }) test_that("fct_unique() doesn't alter level order if `NA` is an existing level", { x <- factor(c(NA, "x"), levels = c(NA, "x"), exclude = NULL) expect_identical(fct_unique(x), x) expect_identical(levels(fct_unique(x)), c(NA, "x")) }) tidyr/tests/testthat/test-separate-longer.R0000644000176200001440000000270014332223160020572 0ustar liggesuserstest_that("separate_longer_delim() creates rows", { df <- tibble(id = 1:2, x = c("x", "y,z")) out <- separate_longer_delim(df, x, delim = ",") expect_equal(out$id, c(1, 2, 2)) expect_equal(out$x, c("x", "y", "z")) }) test_that("separate_longer_delim() validates its inputs", { df <- tibble(x = "x") expect_snapshot(error = TRUE, { df %>% separate_longer_delim() df %>% separate_longer_delim(x, sep = 1) }) }) test_that("separate_longer_position() creates rows", { df <- tibble(id = 1:2, x = c("x", "yz")) out <- separate_longer_position(df, x, width = 1) expect_equal(out$id, c(1, 2, 2)) expect_equal(out$x, c("x", "y", "z")) }) test_that("separate_longer_position() can keep empty rows", { df <- tibble(id = 1:2, x = c("", "x")) out <- separate_longer_position(df, x, width = 1) expect_equal(out$id, 2) expect_equal(out$x, "x") out <- separate_longer_position(df, x, width = 1, keep_empty = TRUE) expect_equal(out$id, c(1, 2)) expect_equal(out$x, c(NA, "x")) }) test_that("works with zero-row data frame", { df <- tibble(x = character()) expect_equal(separate_longer_position(df, x, 1), df) expect_equal(separate_longer_delim(df, x, ","), df) }) test_that("separate_longer_position() validates its inputs", { df <- tibble(x = "x") expect_snapshot(error = TRUE, { df %>% separate_longer_position() df %>% separate_longer_position(y, width = 1) df %>% separate_longer_position(x, width = 1.5) }) }) tidyr/tests/testthat/test-unnest-longer.R0000644000176200001440000003062214360013543020311 0ustar liggesuserstest_that("uses input for default column names", { df <- tibble(x = 1:2, y = list(1, 1:2)) out <- df %>% unnest_longer(y) expect_named(out, c("x", "y")) }) test_that("can adjust the column name with `values_to`", { df <- tibble(x = 1:2, y = list(1, 1:2)) out <- df %>% unnest_longer(y, values_to = "y2") expect_named(out, c("x", "y2")) }) test_that("automatically adds id col if named", { df <- tibble(x = 1:2, y = list(c(a = 1), c(b = 2))) out <- df %>% unnest_longer(y) expect_named(out, c("x", "y", "y_id")) }) test_that("can force integer indexes", { df <- tibble(x = 1:2, y = list(1, 2)) out <- df %>% unnest_longer(y, indices_include = TRUE) expect_named(out, c("x", "y", "y_id")) out <- df %>% unnest_longer(y, indices_to = "y2") expect_named(out, c("x", "y", "y2")) }) test_that("can handle data frames consistently with vectors", { df <- tibble(x = 1:2, y = list(tibble(a = 1:2, b = 2:3))) out <- df %>% unnest_longer(y) expect_named(out, c("x", "y")) expect_equal(nrow(out), 4) }) test_that("can unnest dates", { x <- as.Date(c("2019-08-01", "2019-12-01")) df <- tibble(x = as.list(x)) out <- df %>% unnest_longer(x) expect_equal(out$x, x) }) test_that("unnest_longer - bad inputs generate errors", { df <- tibble(x = 1, y = list(mean)) expect_snapshot((expect_error( unnest_longer(df, y) ))) }) test_that("list_of columns can be unnested", { df <- tibble(x = 1:2, y = list_of(1L, 1:2)) out <- unnest_longer(df, y) expect_named(out, c("x", "y")) expect_equal(nrow(out), 3) # With id column df <- tibble(x = 1:2, y = list_of(c(a = 1L), c(b = 1:2))) expect_named(unnest_longer(df, y), c("x", "y", "y_id")) }) test_that("drops empty rows by default (#1363, #1339)", { df <- tibble( x = 1:4, y = list(NULL, NULL, 1, double()) ) out <- unnest_longer(df, y) expect_identical(out, tibble(x = 3L, y = 1)) }) test_that("can keep empty rows with `keep_empty` (#1339)", { df <- tibble( x = 1:4, y = list(NULL, NULL, 1, double()) ) out <- unnest_longer(df, y, keep_empty = TRUE) expect_identical(out, tibble(x = 1:4, y = c(NA, NA, 1, NA))) }) test_that("keeping empty rows uses `NA` as the index", { df <- tibble( x = 1:3, y = list(NULL, 1:2, integer()) ) # Integer case out <- unnest_longer(df, y, keep_empty = TRUE, indices_include = TRUE) expect_identical(out$y_id, c(NA, 1L, 2L, NA)) # Character case # Trigger names to be generated df$y[[2]] <- set_names(df$y[[2]], c("a", "b")) out <- unnest_longer(df, y, keep_empty = TRUE) expect_identical(out$y_id, c(NA, "a", "b", NA)) }) test_that("named empty vectors force an index column regardless of `keep_empty`", { df <- tibble( x = 1:2, y = list(1:2, set_names(integer(), character())) ) # Empty row is dropped, but names are still forced out <- unnest_longer(df, y) expect_identical(out$y_id, c("", "")) out <- unnest_longer(df, y, keep_empty = TRUE) expect_identical(out$y_id, c("", "", NA)) }) test_that("mix of unnamed and named can be unnested (#1029)", { df <- tibble(x = 1:4, y = list(1, c(b = 2), NULL, double())) out <- unnest_longer(df, y, indices_include = NULL) expect_identical(out$y_id, c("", "b")) out <- unnest_longer(df, y, indices_include = TRUE) expect_identical(out$y_id, c("", "b")) out <- unnest_longer(df, y, indices_include = NULL, keep_empty = TRUE) expect_identical(out$y_id, c("", "b", NA, NA)) out <- unnest_longer(df, y, indices_include = TRUE, keep_empty = TRUE) expect_identical(out$y_id, c("", "b", NA, NA)) }) test_that("unnesting empty typed column is a no-op and retains column (#1199) (#1196)", { df <- tibble(x = integer()) expect_identical(unnest_longer(df, x), df) df <- tibble(x = tibble()) expect_identical(unnest_longer(df, x), df) df <- tibble(x = tibble(a = integer())) expect_identical(unnest_longer(df, x), df) # In particular, #1196 df <- tibble(a = tibble(x = 1:2, y = 3:4, z = 5:6)) expect_identical(unnest_longer(df, a), df) }) test_that("unnesting empty list retains logical column (#1199)", { # Really an unspecified column that `vec_cast_common()` finalizes to logical df <- tibble(x = list()) expect_identical(unnest_longer(df, x), tibble(x = logical())) }) test_that("unnesting empty list with indices uses integer indices", { df <- tibble(x = list()) out <- unnest_longer(df, x, indices_include = TRUE) expect_identical(out$x_id, integer()) out <- unnest_longer(df, x, indices_include = TRUE, keep_empty = TRUE) expect_identical(out$x_id, integer()) }) test_that("unnesting empty list-of retains ptype (#1199)", { df <- tibble(x = list_of(.ptype = integer())) expect_identical(unnest_longer(df, x), tibble(x = integer())) }) test_that("unnesting list of data frames utilizes `values_to` (#1195)", { df <- tibble(x = list(tibble(a = 1:2), tibble(a = 3:4))) expect_identical( unnest_longer(df, x, values_to = "foo"), tibble(foo = tibble(a = 1:4)) ) }) test_that("unnesting list of data frames utilizes `indices_include` (#1194)", { df <- tibble(x = list(tibble(a = 1:2), tibble(a = 3:4))) expect_identical( unnest_longer(df, x, indices_include = TRUE), tibble(x = tibble(a = 1:4), x_id = c(1L, 2L, 1L, 2L)) ) }) test_that("can unnest a column with just `list(NULL)` or `list_of(NULL)` (#1193, #1363)", { df <- tibble(x = list(NULL)) expect_identical( unnest_longer(df, x), tibble(x = logical()) ) expect_identical( unnest_longer(df, x, keep_empty = TRUE), tibble(x = NA) ) df <- tibble(x = list_of(NULL, .ptype = integer())) expect_identical( unnest_longer(df, x), tibble(x = integer()) ) expect_identical( unnest_longer(df, x, keep_empty = TRUE), tibble(x = NA_integer_) ) }) test_that("can unnest a column with just `list(integer())`", { df <- tibble(x = list(integer())) expect_identical( unnest_longer(df, x), tibble(x = integer()) ) expect_identical( unnest_longer(df, x, keep_empty = TRUE), tibble(x = NA_integer_) ) }) test_that("unnesting `list(NULL)` with indices uses integer indices", { df <- tibble(x = list(NULL)) out <- unnest_longer(df, x, indices_include = TRUE) expect_identical(out$x_id, integer()) out <- unnest_longer(df, x, indices_include = TRUE, keep_empty = TRUE) expect_identical(out$x_id, NA_integer_) }) test_that("can unnest one row data frames (#1034)", { col <- list(tibble(x = 1, y = 2), tibble(x = 2, y = 3)) df <- tibble(col = col) expect_identical( unnest_longer(df, col), tibble(col = tibble(x = c(1, 2), y = c(2, 3))) ) }) test_that("named vectors are converted to lists with `vec_chop()`", { # Equivalent to `df <- tibble(x = list_of(c(a = 1), c(b = 2)))` df <- tibble(x = c(a = 1, b = 2)) out <- unnest_longer(df, x) expect_identical(out$x, c(a = 1, b = 2)) expect_identical(out$x_id, c("a", "b")) }) test_that("can unnest multiple columns (#740)", { df <- tibble(a = list(1:2, 3:4), b = list(1:2, 3:4)) expect_identical(unnest_longer(df, c(a, b)), unchop(df, c(a, b))) }) test_that("tidyverse recycling rules are applied when unnesting multiple cols", { df <- tibble(a = list(1L, 3:4), b = list(1:2, 4L)) out <- unnest_longer(df, c(a, b)) expect_identical(out$a, c(1L, 1L, 3L, 4L)) expect_identical(out$b, c(1L, 2L, 4L, 4L)) }) test_that("tidyverse recycling rules are applied after `keep_empty`", { df <- tibble(a = list(NULL, 3:4), b = list(1:2, 4L)) expect_snapshot(error = TRUE, { unnest_longer(df, c(a, b)) }) out <- unnest_longer(df, c(a, b), keep_empty = TRUE, indices_include = TRUE) expect_identical(out$a, c(NA, NA, 3L, 4L)) expect_identical(out$a_id, c(NA, NA, 1L, 2L)) expect_identical(out$b, c(1L, 2L, 4L, 4L)) expect_identical(out$b_id, c(1L, 2L, 1L, 1L)) }) test_that("unnesting multiple columns uses independent indices", { df <- tibble(a = list(c(x = 1), NULL), b = list(1, 2:3)) out <- unnest_longer(df, c(a, b), keep_empty = TRUE) expect_identical(out$a_id, c("x", NA, NA)) expect_named(out, c("a", "a_id", "b")) }) test_that("unnesting multiple columns works with `indices_include = TRUE`", { df <- tibble(a = list(c(x = 1), NULL), b = list(1, 2:3)) out <- unnest_longer(df, c(a, b), keep_empty = TRUE, indices_include = TRUE) expect_identical(out$a_id, c("x", NA, NA)) expect_identical(out$b_id, c(1L, 1L, 2L)) }) test_that("can use glue to name multiple `values_to` cols", { df <- tibble(a = list(1, 2:3), b = list(1, 2:3)) expect_named( unnest_longer(df, c(a, b), values_to = "{col}_"), c("a_", "b_") ) }) test_that("can use glue to name multiple `indices_to` cols", { df <- tibble(a = list(1, 2:3), b = list(1, 2:3)) expect_named( unnest_longer(df, c(a, b), indices_to = "{col}_name"), c("a", "a_name", "b", "b_name") ) }) test_that("default `indices_to` is based on `values_to` (#1201)", { df <- tibble(a = list(c(x = 1), 2)) expect_named( unnest_longer(df, a, values_to = "aa"), c("aa", "aa_id") ) }) test_that("can unnest a vector with a mix of named/unnamed elements (#1200 comment)", { df <- tibble(x = c(a = 1L, 2L)) out <- unnest_longer(df, x) expect_identical(out$x, df$x) expect_identical(out$x_id, c("a", "")) }) test_that("can unnest a list with a mix of named/unnamed elements (#1200 comment)", { df <- tibble(x = list(a = 1:2, 3:4)) out <- unnest_longer(df, x) expect_identical(out$x, 1:4) }) test_that("names are preserved when simplification isn't done and a ptype is supplied", { df <- tibble(x = list(list(a = 1L), list(b = 1L))) ptype <- list(x = integer()) # Explicit request not to simplify out <- unnest_longer(df, x, indices_include = TRUE, ptype = ptype, simplify = FALSE) expect_named(out$x, c("a", "b")) expect_identical(out$x_id, c("a", "b")) df <- tibble(x = list(list(a = 1:2), list(b = 1L))) ptype <- list(x = integer()) # Automatically can't simplify out <- unnest_longer(df, x, indices_include = TRUE, ptype = ptype) expect_named(out$x, c("a", "b")) expect_identical(out$x_id, c("a", "b")) }) test_that("works with foreign lists recognized by `vec_is_list()` (#1327)", { new_foo <- function(...) { structure(list(...), class = c("foo", "list")) } # With empty types df <- tibble(x = new_foo(1:2, integer())) expect_identical(unnest_longer(df, x), tibble(x = 1:2)) expect_identical(unnest_longer(df, x, keep_empty = TRUE), tibble(x = c(1:2, NA))) # With `NULL`s df <- tibble(x = new_foo(1:2, NULL)) expect_identical(unnest_longer(df, x), tibble(x = 1:2)) expect_identical(unnest_longer(df, x, keep_empty = TRUE), tibble(x = c(1:2, NA))) }) test_that("can't currently retain names when simplification isn't done and a ptype is supplied if there is a mix of named/unnamed elements (#1212)", { df <- tibble(x = list(list(a = 1L), list(1L))) ptype <- list(x = integer()) out <- unnest_longer(df, x, indices_include = TRUE, ptype = ptype, simplify = FALSE) expect_named(out$x, c("a", "")) expect_identical(out$x_id, c("a", "")) }) test_that("can't mix `indices_to` with `indices_include = FALSE`", { expect_snapshot((expect_error( unnest_longer(mtcars, mpg, indices_to = "x", indices_include = FALSE) ))) }) test_that("unnest_longer() validates its inputs", { df <- tibble(x = list(list(a = 1L), list(b = 1L))) expect_snapshot(error = TRUE, { unnest_longer(1) unnest_longer(df) unnest_longer(df, x, indices_to = "") unnest_longer(df, x, indices_include = 1) unnest_longer(df, x, values_to = "") }) }) test_that("`values_to` and `indices_to` glue can't reach into surrounding env", { x <- "foo" expect_error(unnest_longer(mtcars, mpg, indices_to = "{x}")) expect_error(unnest_longer(mtcars, mpg, values_to = "{x}")) }) test_that("`values_to` is validated", { expect_snapshot({ (expect_error(unnest_longer(mtcars, mpg, values_to = 1))) (expect_error(unnest_longer(mtcars, mpg, values_to = c("x", "y")))) }) }) test_that("`indices_to` is validated", { expect_snapshot({ (expect_error(unnest_longer(mtcars, mpg, indices_to = 1))) (expect_error(unnest_longer(mtcars, mpg, indices_to = c("x", "y")))) }) }) test_that("`indices_include` is validated", { expect_snapshot({ (expect_error(unnest_longer(mtcars, mpg, indices_include = 1))) (expect_error(unnest_longer(mtcars, mpg, indices_include = c(TRUE, FALSE)))) }) }) test_that("`keep_empty` is validated", { expect_snapshot({ (expect_error(unnest_longer(mtcars, mpg, keep_empty = 1))) (expect_error(unnest_longer(mtcars, mpg, keep_empty = c(TRUE, FALSE)))) }) }) tidyr/tests/testthat/test-separate.R0000644000176200001440000001054414323620576017327 0ustar liggesuserstest_that("missing values in input are missing in output", { df <- tibble(x = c(NA, "a b")) out <- separate(df, x, c("x", "y")) expect_equal(out$x, c(NA, "a")) expect_equal(out$y, c(NA, "b")) }) test_that("positive integer values specific position between characters", { df <- tibble(x = c(NA, "ab", "cd")) out <- separate(df, x, c("x", "y"), 1) expect_equal(out$x, c(NA, "a", "c")) expect_equal(out$y, c(NA, "b", "d")) }) test_that("negative integer values specific position between characters", { df <- tibble(x = c(NA, "ab", "cd")) out <- separate(df, x, c("x", "y"), -1) expect_equal(out$x, c(NA, "a", "c")) expect_equal(out$y, c(NA, "b", "d")) }) test_that("extreme integer values handled sensibly", { df <- tibble(x = c(NA, "a", "bc", "def")) out <- separate(df, x, c("x", "y"), 3) expect_equal(out$x, c(NA, "a", "bc", "def")) expect_equal(out$y, c(NA, "", "", "")) out <- separate(df, x, c("x", "y"), -3) expect_equal(out$x, c(NA, "", "", "")) expect_equal(out$y, c(NA, "a", "bc", "def")) }) test_that("convert produces integers etc", { df <- tibble(x = "1-1.5-FALSE") out <- separate(df, x, c("x", "y", "z"), "-", convert = TRUE) expect_equal(out$x, 1L) expect_equal(out$y, 1.5) expect_equal(out$z, FALSE) }) test_that("convert keeps characters as character", { df <- tibble(x = "X-1") out <- separate(df, x, c("x", "y"), "-", convert = TRUE) expect_equal(out$x, "X") expect_equal(out$y, 1L) }) test_that("too many pieces dealt with as requested", { df <- tibble(x = c("a b", "a b c")) expect_snapshot(separate(df, x, c("x", "y"))) merge <- separate(df, x, c("x", "y"), extra = "merge") expect_equal(merge[[1]], c("a", "a")) expect_equal(merge[[2]], c("b", "b c")) drop <- separate(df, x, c("x", "y"), extra = "drop") expect_equal(drop[[1]], c("a", "a")) expect_equal(drop[[2]], c("b", "b")) expect_snapshot(separate(df, x, c("x", "y"), extra = "error")) }) test_that("too few pieces dealt with as requested", { df <- tibble(x = c("a b", "a b c")) expect_snapshot(separate(df, x, c("x", "y", "z"))) left <- separate(df, x, c("x", "y", "z"), fill = "left") expect_equal(left$x, c(NA, "a")) expect_equal(left$y, c("a", "b")) expect_equal(left$z, c("b", "c")) right <- separate(df, x, c("x", "y", "z"), fill = "right") expect_equal(right$z, c(NA, "c")) }) test_that("preserves grouping", { df <- tibble(g = 1, x = "a:b") %>% dplyr::group_by(g) rs <- df %>% separate(x, c("a", "b")) expect_equal(class(df), class(rs)) expect_equal(dplyr::group_vars(df), dplyr::group_vars(rs)) }) test_that("drops grouping when needed", { df <- tibble(x = "a:b") %>% dplyr::group_by(x) rs <- df %>% separate(x, c("a", "b")) expect_equal(rs$a, "a") expect_equal(dplyr::group_vars(rs), character()) }) test_that("overwrites existing columns", { df <- tibble(x = "a:b") rs <- df %>% separate(x, c("x", "y")) expect_named(rs, c("x", "y")) expect_equal(rs$x, "a") }) test_that("drops NA columns", { df <- tibble(x = c(NA, "ab", "cd")) out <- separate(df, x, c(NA, "y"), 1) expect_equal(names(out), "y") expect_equal(out$y, c(NA, "b", "d")) }) test_that("validates inputs", { df <- tibble(x = "a:b") expect_snapshot(error = TRUE, { separate(df) separate(df, x, into = 1) separate(df, x, into = "x", sep = c("a", "b")) separate(df, x, into = "x", remove = 1) separate(df, x, into = "x", convert = 1) }) }) test_that("informative error if using stringr modifier functions (#693)", { df <- tibble(x = "a") sep <- structure("a", class = "pattern") expect_snapshot((expect_error(separate(df, x, "x", sep = sep)))) }) # helpers ----------------------------------------------------------------- test_that("str_split_n can cap number of splits", { expect_equal(str_split_n(c("x,x"), ",", 1), list("x,x")) expect_equal(str_split_n(c("x,x"), ",", 2), list(c("x", "x"))) expect_equal(str_split_n(c("x,x"), ",", 3), list(c("x", "x"))) }) test_that("str_split_n handles edge cases", { expect_equal(str_split_n(character(), ",", 1), list()) expect_equal(str_split_n(NA, ",", 1), list(NA_character_)) }) test_that("str_split_n handles factors", { expect_equal(str_split_n(factor(), ",", 1), list()) expect_equal(str_split_n(factor("x,x"), ",", 2), list(c("x", "x"))) }) test_that("list_indices truncates long warnings", { expect_equal(list_indices(letters, max = 3), "a, b, c, ...") }) tidyr/tests/testthat/test-nest-legacy.R0000644000176200001440000001767414315413441017741 0ustar liggesusers# nest -------------------------------------------------------------------- test_that("nest turns grouped values into one list-df", { df <- tibble(x = c(1, 1, 1), y = 1:3) out <- nest_legacy(df, y) expect_equal(out$x, 1) expect_equal(length(out$data), 1L) expect_equal(out$data[[1L]], tibble(y = 1:3)) }) test_that("nest works with data frames too", { df <- data.frame(x = c(1, 1, 1), y = 1:3) out <- nest_legacy(df, y) expect_equal(out$x, 1) expect_equal(length(out$data), 1L) expect_equal(out$data[[1L]], tibble(y = 1:3)) }) test_that("can control output column name", { df <- tibble(x = c(1, 1, 1), y = 1:3) out <- nest_legacy(df, y, .key = y) expect_equal(names(out), c("x", "y")) out <- nest_legacy(df, y, .key = "y") expect_equal(names(out), c("x", "y")) }) test_that("nest doesn't include grouping vars in nested data", { df <- tibble(x = c(1, 1, 1), y = 1:3) out <- df %>% dplyr::group_by(x) %>% nest_legacy() expect_equal(out$data[[1]], tibble(y = 1:3)) }) test_that("can restrict variables in grouped nest", { df <- tibble(x = 1, y = 2, z = 3) %>% dplyr::group_by(x) out <- df %>% nest_legacy(y) expect_equal(names(out$data[[1]]), "y") }) test_that("puts data into the correct row", { df <- tibble(x = 1:3, y = c("B", "A", "A")) out <- nest_legacy(df, x) %>% dplyr::filter(y == "B") expect_equal(out$data[[1]]$x, 1) }) test_that("nesting everything yields a simple data frame", { df <- tibble(x = 1:3, y = c("B", "A", "A")) out <- nest_legacy(df, x, y) expect_equal(length(out$data), 1L) expect_equal(out$data[[1L]], df) }) test_that("nest preserves order of data", { df <- tibble(x = c(1, 3, 2, 3, 2), y = 1:5) out <- nest_legacy(df, y) expect_equal(out$x, c(1, 3, 2)) }) test_that("empty factor levels don't affect nest", { df <- tibble( x = factor(c("z", "a"), levels = letters), y = 1:2 ) out <- nest_legacy(df, y) expect_equal(out$x, df$x) }) test_that("nesting works for empty data frames", { df <- tibble(x = 1:3, y = c("B", "A", "A"))[0, ] out <- nest_legacy(df, x) expect_equal(names(out), c("y", "data")) expect_equal(nrow(out), 0L) expect_equal(length(out$data), 0L) out <- nest_legacy(df, x, y) expect_equal(length(out$data), 1L) expect_equal(out$data[[1L]], df) }) test_that("tibble conversion occurs in the `nest.data.frame()` method", { tbl <- mtcars %>% nest_legacy(-am, -cyl) expect_s3_class(tbl, "tbl_df") expect_s3_class(tbl$data[[1L]], "tbl_df") }) test_that("nest_legacy() does not preserve grouping", { df <- tibble(x = c(1, 1, 2), y = 1:3) %>% dplyr::group_by(x) out <- nest_legacy(df) expect_false(inherits(out, "grouped_df")) }) # unnest ------------------------------------------------------------------ test_that("unnesting combines atomic vectors", { df <- tibble(x = list(1, 2:3, 4:10)) expect_equal(unnest_legacy(df)$x, 1:10) }) test_that("unesting combines augmented vectors", { df <- tibble::tibble(x = as.list(as.factor(letters[1:3]))) expect_equal(unnest_legacy(df)$x, factor(letters[1:3])) }) test_that("vector unnest preserves names", { df <- tibble(x = list(1, 2:3), y = list("a", c("b", "c"))) out <- unnest_legacy(df) expect_named(out, c("x", "y")) }) test_that("unnesting row binds data frames", { df <- tibble(x = list( tibble(x = 1:5), tibble(x = 6:10) )) expect_equal(unnest_legacy(df)$x, 1:10) }) test_that("can unnest nested lists", { df <- tibble( x = 1:2, y = list(list("a"), list("b")) ) rs <- unnest_legacy(df, y) expect_identical(rs, tibble(x = 1:2, y = list("a", "b"))) }) test_that("can unnest mixture of name and unnamed lists of same length", { df <- tibble( x = c("a"), y = list(y = 1:2), z = list(1:2) ) expect_identical(unnest_legacy(df), tibble(x = c("a", "a"), y = c(1:2), z = c(1:2))) }) test_that("elements must all be of same type", { df <- tibble(x = list(1, "a")) expect_error(unnest_legacy(df), class = "vctrs_error_incompatible_type") }) test_that("can't combine vectors and data frames", { df <- tibble(x = list(1, tibble(1))) expect_snapshot((expect_error(unnest_legacy(df)))) }) test_that("multiple columns must be same length", { df <- tibble(x = list(1), y = list(1:2)) expect_snapshot((expect_error(unnest_legacy(df)))) df <- tibble(x = list(1), y = list(tibble(x = 1:2))) expect_snapshot((expect_error(unnest_legacy(df)))) }) test_that("nested is split as a list (#84)", { df <- tibble(x = 1:3, y = list(1, 2:3, 4), z = list(5, 6:7, 8)) expect_warning(out <- unnest_legacy(df, y, z), NA) expect_equal(out$x, c(1, 2, 2, 3)) expect_equal(out$y, unlist(df$y)) expect_equal(out$z, unlist(df$z)) }) test_that("unnest has mutate semantics", { df <- tibble(x = 1:3, y = list(1, 2:3, 4)) out <- df %>% unnest_legacy(z = map(y, `+`, 1)) expect_equal(out$z, 2:5) }) test_that(".id creates vector of names for vector unnest", { df <- tibble(x = 1:2, y = list(a = 1, b = 1:2)) out <- unnest_legacy(df, .id = "name") expect_equal(out$name, c("a", "b", "b")) }) test_that(".id creates vector of names for grouped vector unnest", { df <- tibble(x = 1:2, y = list(a = 1, b = 1:2)) %>% dplyr::group_by(x) out <- unnest_legacy(df, .id = "name") expect_equal(out$name, c("a", "b", "b")) }) test_that(".id creates vector of names for data frame unnest", { df <- tibble(x = 1:2, y = list( a = tibble(y = 1), b = tibble(y = 1:2) )) out <- unnest_legacy(df, .id = "name") expect_equal(out$name, c("a", "b", "b")) }) test_that(".id creates vector of names for grouped data frame unnest", { df <- tibble(x = 1:2, y = list( a = tibble(y = 1), b = tibble(y = 1:2) )) %>% dplyr::group_by(x) out <- unnest_legacy(df, .id = "name") expect_equal(out$name, c("a", "b", "b")) }) test_that("can use non-syntactic names", { out <- tibble("foo bar" = list(1:2, 3)) %>% unnest_legacy() expect_named(out, "foo bar") }) test_that("sep combines column names", { ldf <- list(tibble(x = 1)) tibble(x = ldf, y = ldf) %>% unnest_legacy(.sep = "_") %>% expect_named(c("x_x", "y_x")) }) test_that("can unnest empty data frame", { df <- tibble(x = integer(), y = list()) out <- unnest_legacy(df, y) expect_equal(out, tibble(x = integer())) }) test_that("empty ... returns df if no list-cols", { df <- tibble(x = integer(), y = integer()) expect_equal(unnest_legacy(df), df) }) test_that("can optional preserve list cols", { df <- tibble(x = list(3, 4), y = list("a", "b")) rs <- df %>% unnest_legacy(x, .preserve = y) expect_identical(rs, tibble(y = df$y, x = c(3, 4))) df <- tibble(x = list(c("d", "e")), y = list(1:2)) rs <- df %>% unnest_legacy(.preserve = y) expect_identical(rs, tibble(y = rep(list(1:2), 2), x = c("d", "e"))) }) test_that("unnest drops list cols if expanding", { df <- tibble(x = 1:2, y = list(3, 4), z = list(5, 6:7)) out <- df %>% unnest_legacy(z) expect_equal(names(out), c("x", "z")) }) test_that("unnest keeps list cols if not expanding", { df <- tibble(x = 1:2, y = list(3, 4), z = list(5, 6:7)) out <- df %>% unnest_legacy(y) expect_equal(names(out), c("x", "z", "y")) }) test_that("unnest respects .drop_lists", { df <- tibble(x = 1:2, y = list(3, 4), z = list(5, 6:7)) expect_equal(df %>% unnest_legacy(y, .drop = TRUE) %>% names(), c("x", "y")) expect_equal(df %>% unnest_legacy(z, .drop = FALSE) %>% names(), c("x", "y", "z")) }) test_that("grouping is preserved", { df <- tibble(g = 1, x = list(1:3)) %>% dplyr::group_by(g) rs <- df %>% unnest_legacy(x) expect_equal(rs$x, 1:3) expect_equal(class(df), class(rs)) expect_equal(dplyr::group_vars(df), dplyr::group_vars(rs)) }) test_that("unnesting zero row column preserves names", { df <- tibble(a = character(), b = character()) expect_equal(df %>% unnest_legacy(b), tibble(a = character(), b = character())) }) test_that("unnest_legacy() recognize ptype", { tbl <- tibble(x = integer(), y = structure(list(), ptype = double())) res <- unnest_legacy(tbl) expect_equal(res, tibble(x = integer(), y = double())) }) tidyr/tests/testthat/test-id.R0000644000176200001440000000103014315413441016075 0ustar liggesuserstest_that("drop preserves count of factor levels", { x <- factor(levels = c("a", "b")) expect_equal(id_var(x), structure(integer(), n = 2)) expect_equal(id(data.frame(x)), structure(integer(), n = 2)) }) test_that("id works with dimensions beyond integer range", { df <- data.frame(matrix(c(1, 2), nrow = 2, ncol = 32)) expect_equal(id(df), structure(c(1, 2), n = 2^32)) }) test_that("id_var() handles named vectors (#525)", { res <- id_var(c(a = 5, b = 3, c = 5)) expect_equal(res, structure(c(2L, 1L, 2L), n = 2L)) }) tidyr/tests/testthat/test-drop-na.R0000644000176200001440000000506114315413441017051 0ustar liggesuserstest_that("empty call drops every row", { df <- tibble(x = c(1, 2, NA), y = c("a", NA, "b")) exp <- tibble(x = 1, y = "a") res <- drop_na(df) expect_identical(res, exp) }) test_that("tidyselection that selects no columns doesn't drop any rows (#1227)", { df <- tibble(x = c(1, 2, NA), y = c("a", NA, "b")) expect_identical(drop_na(df, starts_with("foo")), df) }) test_that("specifying (a) variables considers only that variable(s)", { df <- tibble(x = c(1, 2, NA), y = c("a", NA, "b")) exp <- tibble(x = c(1, 2), y = c("a", NA)) res <- drop_na(df, x) expect_identical(res, exp) exp <- tibble(x = c(1), y = c("a")) res <- drop_na(df, x:y) expect_identical(res, exp) }) test_that("groups are preserved", { df <- tibble(g = c("A", "A", "B"), x = c(1, 2, NA), y = c("a", NA, "b")) exp <- tibble(g = c("A", "B"), x = c(1, NA), y = c("a", "b")) gdf <- dplyr::group_by(df, "g") gexp <- dplyr::group_by(exp, "g") res <- drop_na(gdf, y) expect_identical(res, gexp) expect_identical(dplyr::group_vars(res), dplyr::group_vars(gexp)) }) test_that("errors are raised", { df <- tibble(x = c(1, 2, NA), y = c("a", NA, "b")) expect_snapshot((expect_error(drop_na(df, list())))) expect_snapshot((expect_error(drop_na(df, "z")))) }) test_that("single variable data.frame doesn't lose dimension", { df <- data.frame(x = c(1, 2, NA)) res <- drop_na(df, "x") exp <- data.frame(x = c(1, 2)) expect_identical(res, exp) }) test_that("works with list-cols", { df <- tibble(x = list(1L, NULL, 3L), y = c(1L, 2L, NA)) rs <- drop_na(df) expect_identical(rs, tibble(x = list(1L), y = 1L)) }) test_that("doesn't drop empty atomic elements of list-cols (#1228)", { df <- tibble(x = list(1L, NULL, integer())) expect_identical(drop_na(df), df[c(1, 3), ]) }) test_that("preserves attributes", { df <- tibble(x = structure(c(1, NA), attr = "!")) rs <- drop_na(df) expect_identical(rs$x, structure(1, attr = "!")) }) test_that("works with df-cols", { # if any packed row contains a missing value, it is incomplete df <- tibble(a = tibble(x = c(1, 1, NA, NA), y = c(1, NA, 1, NA))) expect_identical(drop_na(df, a), tibble(a = tibble(x = 1, y = 1))) }) test_that("works with rcrd cols", { skip_if( packageVersion("vctrs") <= "0.3.8", "vec_detect_complete() treated rcrds differently" ) # if any rcrd field contains a missing value, it is incomplete col <- new_rcrd(list(x = c(1, 1, NA, NA), y = c(1, NA, 1, NA))) df <- tibble(col = col) expect_identical( drop_na(df, col), tibble(col = new_rcrd(list(x = 1, y = 1))) ) }) tidyr/tests/testthat/_snaps/0000755000176200001440000000000014553563421015702 5ustar liggesuserstidyr/tests/testthat/_snaps/nest.md0000644000176200001440000000665014363516001017173 0ustar liggesusers# nest disallows renaming Code nest(df, data = c(a = x)) Condition Error in `nest()`: ! In expression named `data`: Caused by error: ! Can't rename variables in this context. --- Code nest(df, .by = c(a = x)) Condition Error in `nest()`: ! Can't rename variables in this context. # catches when `...` overwrites an existing column Code nest(df, x = y) Condition Error in `nest()`: ! Names must be unique. x These names are duplicated: * "x" at locations 1 and 2. # validates its inputs Code nest(df, y = ya:yb, .names_sep = 1) Condition Error in `nest()`: ! `.names_sep` must be a single string or `NULL`, not the number 1. --- Code nest(df, y = ya:yb, .key = 1) Condition Error in `nest()`: ! `.key` must be a single string, not the number 1. # warns if `.key` is supplied alongside `...` Code out <- nest_info(df, data = 2, .key = "foo") Condition Warning: Can't supply both `.key` and `...`. i `.key` will be ignored. --- Code out <- nest(df, data = 2, .key = "foo") Condition Warning in `nest()`: Can't supply both `.key` and `...`. i `.key` will be ignored. # `.by` isn't allowed for grouped data frames Code nest(df, .by = x) Condition Error in `nest()`: ! Can't supply `.by` when `.data` is a grouped data frame. # warn about old style interface Code out <- nest(df, y) Condition Warning: Supplying `...` without names was deprecated in tidyr 1.0.0. i Please specify a name for each selection. i Did you want `data = y`? --- Code out <- nest(df, -y) Condition Warning: Supplying `...` without names was deprecated in tidyr 1.0.0. i Please specify a name for each selection. i Did you want `data = -y`? # can use `.by` with old style interface Code out <- nest(df, y, .by = x) Condition Warning: Supplying `...` without names was deprecated in tidyr 1.0.0. i Please specify a name for each selection. i Did you want `data = y`? --- Code out <- nest(df, y, .by = x, .key = "foo") Condition Warning: Supplying `...` without names was deprecated in tidyr 1.0.0. i Please specify a name for each selection. i Did you want `foo = y`? # only warn about unnamed inputs (#1175) Code out <- nest(df, x, y, foo = z) Condition Warning: Supplying `...` without names was deprecated in tidyr 1.0.0. i Please specify a name for each selection. i Did you want `data = c(x, y)`? # unnamed expressions are kept in the warning Code out <- nest(df, x, starts_with("z")) Condition Warning: Supplying `...` without names was deprecated in tidyr 1.0.0. i Please specify a name for each selection. i Did you want `data = c(x, starts_with("z"))`? # can control output column name Code out <- nest(df, y, .key = "y") Condition Warning: Supplying `...` without names was deprecated in tidyr 1.0.0. i Please specify a name for each selection. i Did you want `y = y`? # .key gets warning with new interface Code out <- nest(df, y = y, .key = "foo") Condition Warning in `nest()`: Can't supply both `.key` and `...`. i `.key` will be ignored. tidyr/tests/testthat/_snaps/separate-rows.md0000644000176200001440000000052214350636231021012 0ustar liggesusers# it validates its inputs Code separate_rows(df, x, sep = 1) Condition Error in `separate_rows()`: ! `sep` must be a single string, not the number 1. Code separate_rows(df, x, convert = 1) Condition Error in `separate_rows()`: ! `convert` must be `TRUE` or `FALSE`, not the number 1. tidyr/tests/testthat/_snaps/chop.md0000644000176200001440000000270414360013543017147 0ustar liggesusers# chop() validates its input `cols` (#1205) Code chop(df$x) Condition Error in `chop()`: ! `data` must be a data frame, not an integer vector. Code chop(df) Condition Error in `chop()`: ! `cols` is absent but must be supplied. # incompatible sizes are caught Code (expect_error(unchop(df, c(x, y)))) Output Error in `unchop()`: ! In row 1, can't recycle input of size 2 to size 3. # empty typed inputs are considered in common size, but NULLs aren't Code (expect_error(unchop(df, c(x, y)))) Output Error in `unchop()`: ! In row 1, can't recycle input of size 0 to size 2. # unchop disallows renaming Code unchop(df, c(y = x)) Condition Error in `unchop()`: ! Can't rename variables in this context. # unchop validates its inputs Code unchop(1:10) Condition Error in `unchop()`: ! `data` must be a data frame, not an integer vector. Code unchop(df) Condition Error in `unchop()`: ! `cols` is absent but must be supplied. Code unchop(df, col, keep_empty = 1) Condition Error in `unchop()`: ! `keep_empty` must be `TRUE` or `FALSE`, not the number 1. Code unchop(df, col, ptype = 1) Condition Error in `unchop()`: ! `ptype` must be `NULL`, an empty ptype, or a named list of ptypes. tidyr/tests/testthat/_snaps/uncount.md0000644000176200001440000000113414350636232017712 0ustar liggesusers# validates inputs Code uncount(df, y) Condition Error in `uncount()`: ! Can't convert `weights` to . Code uncount(df, w) Condition Error in `uncount()`: ! `weights` must be a vector of positive numbers. Location 1 is negative. Code uncount(df, x, .remove = 1) Condition Error in `uncount()`: ! `.remove` must be `TRUE` or `FALSE`, not the number 1. Code uncount(df, x, .id = "") Condition Error in `uncount()`: ! `.id` must be a valid name or `NULL`, not the empty string "". tidyr/tests/testthat/_snaps/append.md0000644000176200001440000000067614520546617017506 0ustar liggesusers# after must be integer or character Code (expect_error(df_append(df1, df2, after = 1.5))) Output Error in `df_append()`: ! `after` must be a whole number, not the number 1.5. i This is an internal error that was detected in the tidyr package. Please report it at with a reprex () and the full backtrace. tidyr/tests/testthat/_snaps/replace_na.md0000644000176200001440000000171014350636231020307 0ustar liggesusers# can only be length 0 Code (expect_error(replace_na(1, 1:10))) Output Error in `replace_na()`: ! Replacement for `data` must be length 1, not length 10. # replacement must be castable to `data` Code (expect_error(replace_na(x, 1.5))) Output Error in `vec_assign()`: ! Can't convert from `replace` to `data` due to loss of precision. * Locations: 1 # replacement must be castable to corresponding column Code (expect_error(replace_na(df, list(a = 1.5)))) Output Error in `vec_assign()`: ! Can't convert from `replace$a` to `data$a` due to loss of precision. * Locations: 1 # validates its inputs Code replace_na(df, replace = 1) Condition Error in `replace_na()`: ! `replace` must be a list, not a number. tidyr/tests/testthat/_snaps/separate-wider.md0000644000176200001440000001505314520546617021146 0ustar liggesusers# separate_wider_delim() errors about too few/too many values Code df %>% separate_wider_delim(x, " ", names = c("a", "b")) Condition Error in `separate_wider_delim()`: ! Expected 2 pieces in each element of `x`. ! 1 value was too short. i Use `too_few = "debug"` to diagnose the problem. i Use `too_few = "align_start"/"align_end"` to silence this message. ! 1 value was too long. i Use `too_many = "debug"` to diagnose the problem. i Use `too_many = "drop"/"merge"` to silence this message. # separate_wider_delim() can diagnose problems Code out <- df %>% separate_wider_delim(x, " ", names = c("a", "b"), too_few = "debug", too_many = "debug", ) Condition Warning: Debug mode activated: adding variables `x_ok`, `x_pieces`, and `x_remainder`. # separate_wider_delim() validates its inputs Code df %>% separate_wider_delim() Condition Error in `separate_wider_delim()`: ! `cols` is absent but must be supplied. Code df %>% separate_wider_delim(x) Condition Error in `separate_wider_delim()`: ! `delim` must be a single string, not absent. Code df %>% separate_wider_delim(x, 1) Condition Error in `separate_wider_delim()`: ! `delim` must be a single string, not the number 1. Code df %>% separate_wider_delim(x, "") Condition Error in `separate_wider_delim()`: ! `delim` must be a single string, not the empty string "". Code df %>% separate_wider_delim(x, "-") Condition Error in `separate_wider_delim()`: ! Must specify at least one of `names` or `names_sep`. Code df %>% separate_wider_delim(x, "-", names = 1) Condition Error in `separate_wider_delim()`: ! `names` must be a character vector or `NULL`, not the number 1. Code df %>% separate_wider_delim(x, "-", names = c(x = "x")) Condition Error in `separate_wider_delim()`: ! `names` must be an unnamed character vector. Code df %>% separate_wider_delim(x, "-", names_sep = "_", too_many = "merge") Condition Error in `separate_wider_delim()`: ! Must provide `names` when `too_many = "merge"`. # separate_wider_position() errors if lengths are inconsistent Code df %>% separate_wider_position(x, widths = c(a = 2, b = 1)) Condition Error in `separate_wider_position()`: ! Expected 3 characters in each element of `x`. ! 1 value was too short. i Use `too_few = "debug"` to diagnose the problem. i Use `too_few = "align_start"` to silence this message. ! 1 value was too long. i Use `too_many = "debug"` to diagnose the problem. i Use `too_many = "drop"` to silence this message. # separate_wider_position() can diagnose problems Code out <- df %>% separate_wider_position(x, widths = c(a = 2, b = 1), too_few = "debug", too_many = "debug") Condition Warning: Debug mode activated: adding variables `x_ok`, `x_width`, and `x_remainder`. # separate_wider_position() validates its inputs Code df %>% separate_wider_position() Condition Error in `separate_wider_position()`: ! `cols` is absent but must be supplied. Code df %>% separate_wider_position(x) Condition Error in `separate_wider_position()`: ! `widths` is absent but must be supplied. Code df %>% separate_wider_position(x, widths = 1.5) Condition Error in `separate_wider_position()`: ! `widths` must be a (partially) named integer vector. Code df %>% separate_wider_position(x, widths = 1L) Condition Error in `separate_wider_position()`: ! `widths` must be a (partially) named integer vector. Code df %>% separate_wider_position(x, widths = c(x = 0)) Condition Error in `separate_wider_position()`: ! All values of `widths` must be positive. # separate_wider_regex() errors if match fails Code df %>% separate_wider_regex(x, c(a = ".", "-", b = "\\d+")) Condition Error in `separate_wider_regex()`: ! Expected each value of `x` to match the pattern, the whole pattern, and nothing but the pattern. ! 1 value has problem. i Use `too_few = "debug"` to diagnose the problem. i Use `too_few = "align_start"` to silence this message. # separate_wider_regex() can diagnose errors Code out <- df %>% separate_wider_regex(x, c(a = "[a-z]", "-", b = "\\d+"), too_few = "debug") Condition Warning: Debug mode activated: adding variables `x_ok`, `x_matches`, and `x_remainder`. # separate_wider_regex() gives informative error if () used Code df %>% separate_wider_regex(x, c(`_` = "(.)")) Condition Error in `separate_wider_regex()`: ! Invalid number of groups. i Did you use "()" instead of "(?:)" inside `patterns`? # separate_wider_regex() advises on outer / inner name duplication (#1425) Code separate_wider_regex(df, y, patterns = c(x = ".", value = ".")) Condition Error in `separate_wider_regex()`: ! Can't duplicate names between the affected columns and the original data. x These names are duplicated: i `x`, from `y`. i Use `names_sep` to disambiguate using the column name. i Or use `names_repair` to specify a repair strategy. # separate_wider_regex() advises on inner / inner name duplication (#1425) Code separate_wider_regex(df, c(x, y), patterns = c(gender = ".", value = ".")) Condition Error in `separate_wider_regex()`: ! Can't duplicate names within the affected columns. x These names are duplicated: i `gender`, within `x` and `y`. i `value`, within `x` and `y`. i Use `names_sep` to disambiguate using the column name. i Or use `names_repair` to specify a repair strategy. # separate_wider_regex() validates its inputs Code df %>% separate_wider_regex() Condition Error in `separate_wider_regex()`: ! `cols` is absent but must be supplied. Code df %>% separate_wider_regex(x) Condition Error in `separate_wider_regex()`: ! `patterns` must be a character vector, not absent. Code df %>% separate_wider_regex(y, patterns = c(x = "-")) Condition Error in `separate_wider_regex()`: ! Can't subset columns that don't exist. x Column `y` doesn't exist. Code df %>% separate_wider_regex(x, patterns = ".") Condition Error in `separate_wider_regex()`: ! `patterns` must be a named character vector. tidyr/tests/testthat/_snaps/unnest-wider.md0000644000176200001440000000633414363516001020645 0ustar liggesusers# unnest_wider - bad inputs generate errors Code (expect_error(unnest_wider(df, y))) Output Error in `unnest_wider()`: i In column: `y`. i In row: 1. Caused by error: ! List-column must only contain vectors. # can't unnest unnamed elements without `names_sep` (#1367) Code unnest_wider(df, col) Condition Error in `unnest_wider()`: i In column: `col`. i In row: 1. Caused by error: ! Can't unnest elements with missing names. i Supply `names_sep` to generate automatic names. --- Code unnest_wider(df, col) Condition Error in `unnest_wider()`: i In column: `col`. i In row: 1. Caused by error: ! Can't unnest elements with missing names. i Supply `names_sep` to generate automatic names. --- Code unnest_wider(df, col) Condition Error in `unnest_wider()`: i In column: `col`. i In row: 1. Caused by error: ! Can't unnest elements with missing names. i Supply `names_sep` to generate automatic names. --- Code unnest_wider(df, col) Condition Error in `unnest_wider()`: i In column: `col`. i In row: 2. Caused by error: ! Can't unnest elements with missing names. i Supply `names_sep` to generate automatic names. # catches duplicate inner names in the same vector Code unnest_wider(df, col) Condition Error in `unnest_wider()`: ! Names must be unique. x These names are duplicated: * "a" at locations 1 and 2. i Use argument `names_repair` to specify repair strategy. --- Code out <- unnest_wider(df, col, names_repair = "unique") Message New names: * `a` -> `a...1` * `a` -> `a...2` # unnest_wider() advises on outer / inner name duplication (#1367) Code unnest_wider(df, y) Condition Error in `unnest_wider()`: ! Can't duplicate names between the affected columns and the original data. x These names are duplicated: i `x`, from `y`. i Use `names_sep` to disambiguate using the column name. i Or use `names_repair` to specify a repair strategy. # unnest_wider() advises on inner / inner name duplication (#1367) Code unnest_wider(df, c(y, z)) Condition Error in `unnest_wider()`: ! Can't duplicate names within the affected columns. x These names are duplicated: i `a`, within `y` and `z`. i Use `names_sep` to disambiguate using the column name. i Or use `names_repair` to specify a repair strategy. # unnest_wider() validates its inputs Code unnest_wider(1) Condition Error in `unnest_wider()`: ! `data` must be a data frame, not a number. Code unnest_wider(df) Condition Error in `unnest_wider()`: ! `col` is absent but must be supplied. Code unnest_wider(df, x, names_sep = 1) Condition Error in `unnest_wider()`: ! `names_sep` must be a single string or `NULL`, not the number 1. Code unnest_wider(df, x, strict = 1) Condition Error in `unnest_wider()`: ! `strict` must be `TRUE` or `FALSE`, not the number 1. tidyr/tests/testthat/_snaps/pack.md0000644000176200001440000000655314363516001017142 0ustar liggesusers# pack disallows renaming Code pack(df, data = c(a = x)) Condition Error in `pack()`: ! In expression named `data`: Caused by error: ! Can't rename variables in this context. --- Code pack(df, data1 = x, data2 = c(a = y)) Condition Error in `pack()`: ! In expression named `data2`: Caused by error: ! Can't rename variables in this context. # pack validates its inputs Code pack(1) Condition Error in `pack()`: ! `.data` must be a data frame, not a number. Code pack(df, c(a1, a2), c(b1, b2)) Condition Error in `pack()`: ! All elements of `...` must be named. Code pack(df, a = c(a1, a2), c(b1, b2)) Condition Error in `pack()`: ! All elements of `...` must be named. Code pack(df, a = c(a1, a2), .names_sep = 1) Condition Error in `pack()`: ! `.names_sep` must be a single string or `NULL`, not the number 1. # catches across inner name duplication (#1425) Code unpack(df, c(x, y)) Condition Error in `unpack()`: ! Can't duplicate names within the affected columns. x These names are duplicated: i `b`, within `x` and `y`. i Use `names_sep` to disambiguate using the column name. i Or use `names_repair` to specify a repair strategy. --- Code unpack(df, c(x, y, z)) Condition Error in `unpack()`: ! Can't duplicate names within the affected columns. x These names are duplicated: i `a`, within `x` and `z`. i `b`, within `x`, `y`, and `z`. i Use `names_sep` to disambiguate using the column name. i Or use `names_repair` to specify a repair strategy. # catches outer / inner name duplication (#1367) Code unpack(df, d) Condition Error in `unpack()`: ! Can't duplicate names between the affected columns and the original data. x These names are duplicated: i `a`, from `d`. i Use `names_sep` to disambiguate using the column name. i Or use `names_repair` to specify a repair strategy. --- Code unpack(df, c(d, e, f)) Condition Error in `unpack()`: ! Can't duplicate names between the affected columns and the original data. x These names are duplicated: i `a`, from `d`. i `b` and `c`, from `f`. i Use `names_sep` to disambiguate using the column name. i Or use `names_repair` to specify a repair strategy. # duplication errors aren't triggered on duplicates within a single column you are unpacking Code unpack(df, x) Condition Error in `unpack()`: ! Names must be unique. x These names are duplicated: * "a" at locations 1 and 2. i Use argument `names_repair` to specify repair strategy. # unpack disallows renaming Code unpack(df, c(y = x)) Condition Error in `unpack()`: ! Can't rename variables in this context. # unpack() validates its inputs Code unpack(1) Condition Error in `unpack()`: ! `data` must be a data frame, not a number. Code unpack(df) Condition Error in `unpack()`: ! `cols` is absent but must be supplied. Code unpack(df, y, names_sep = 1) Condition Error in `unpack()`: ! `names_sep` must be a single string or `NULL`, not the number 1. tidyr/tests/testthat/_snaps/expand.md0000644000176200001440000000267514350636224017512 0ustar liggesusers# crossing checks for bad inputs Code (expect_error(crossing(x = 1:10, y = quote(a)))) Output Error in `crossing()`: ! `..2` must be a vector, not a symbol. # expand() respects `.name_repair` Code out <- df %>% expand(x = x, x = x, .name_repair = "unique") Message New names: * `x` -> `x...1` * `x` -> `x...2` # crossing() / nesting() respect `.name_repair` Code out <- crossing(x = x, x = x, .name_repair = "unique") Message New names: * `x` -> `x...1` * `x` -> `x...2` --- Code out <- nesting(x = x, x = x, .name_repair = "unique") Message New names: * `x` -> `x...1` * `x` -> `x...2` # expand_grid() can control name_repair Code (expect_error(expand_grid(x = x, x = x))) Output Error in `expand_grid()`: ! Names must be unique. x These names are duplicated: * "x" at locations 1 and 2. i Use argument `.name_repair` to specify repair strategy. --- Code out <- expand_grid(x = x, x = x, .name_repair = "unique") Message New names: * `x` -> `x...1` * `x` -> `x...2` # grid_dots() reject non-vector input Code (expect_error(grid_dots(lm(1 ~ 1)))) Output Error: ! `..1` must be a vector, not a object. tidyr/tests/testthat/_snaps/spread.md0000644000176200001440000000043414350636232017477 0ustar liggesusers# duplicate values for one key is an error Code (expect_error(spread(df, x, y))) Output Error in `spread()`: ! Each row of output must be identified by a unique combination of keys. i Keys are shared for 2 rows * 2, 3 tidyr/tests/testthat/_snaps/unnest-longer.md0000644000176200001440000000670514360013543021023 0ustar liggesusers# unnest_longer - bad inputs generate errors Code (expect_error(unnest_longer(df, y))) Output Error in `unnest_longer()`: ! List-column `y` must contain only vectors or `NULL`. # tidyverse recycling rules are applied after `keep_empty` Code unnest_longer(df, c(a, b)) Condition Error in `unnest_longer()`: ! In row 1, can't recycle input of size 0 to size 2. # can't mix `indices_to` with `indices_include = FALSE` Code (expect_error(unnest_longer(mtcars, mpg, indices_to = "x", indices_include = FALSE)) ) Output Error in `unnest_longer()`: ! Can't use `indices_include = FALSE` when `indices_to` is supplied. # unnest_longer() validates its inputs Code unnest_longer(1) Condition Error in `unnest_longer()`: ! `data` must be a data frame, not a number. Code unnest_longer(df) Condition Error in `unnest_longer()`: ! `col` is absent but must be supplied. Code unnest_longer(df, x, indices_to = "") Condition Error in `unnest_longer()`: ! `indices_to` must be a valid name or `NULL`, not the empty string "". Code unnest_longer(df, x, indices_include = 1) Condition Error in `unnest_longer()`: ! `indices_include` must be `TRUE`, `FALSE`, or `NULL`, not the number 1. Code unnest_longer(df, x, values_to = "") Condition Error in `unnest_longer()`: ! `values_to` must be a valid name or `NULL`, not the empty string "". # `values_to` is validated Code (expect_error(unnest_longer(mtcars, mpg, values_to = 1))) Output Error in `unnest_longer()`: ! `values_to` must be a valid name or `NULL`, not the number 1. Code (expect_error(unnest_longer(mtcars, mpg, values_to = c("x", "y")))) Output Error in `unnest_longer()`: ! `values_to` must be a valid name or `NULL`, not a character vector. # `indices_to` is validated Code (expect_error(unnest_longer(mtcars, mpg, indices_to = 1))) Output Error in `unnest_longer()`: ! `indices_to` must be a valid name or `NULL`, not the number 1. Code (expect_error(unnest_longer(mtcars, mpg, indices_to = c("x", "y")))) Output Error in `unnest_longer()`: ! `indices_to` must be a valid name or `NULL`, not a character vector. # `indices_include` is validated Code (expect_error(unnest_longer(mtcars, mpg, indices_include = 1))) Output Error in `unnest_longer()`: ! `indices_include` must be `TRUE`, `FALSE`, or `NULL`, not the number 1. Code (expect_error(unnest_longer(mtcars, mpg, indices_include = c(TRUE, FALSE)))) Output Error in `unnest_longer()`: ! `indices_include` must be `TRUE`, `FALSE`, or `NULL`, not a logical vector. # `keep_empty` is validated Code (expect_error(unnest_longer(mtcars, mpg, keep_empty = 1))) Output Error in `unnest_longer()`: ! `keep_empty` must be `TRUE` or `FALSE`, not the number 1. Code (expect_error(unnest_longer(mtcars, mpg, keep_empty = c(TRUE, FALSE)))) Output Error in `unnest_longer()`: ! `keep_empty` must be `TRUE` or `FALSE`, not a logical vector. tidyr/tests/testthat/_snaps/pivot.md0000644000176200001440000000170714350636231017365 0ustar liggesusers# basic sanity checks for spec occur Code (expect_error(check_pivot_spec(1))) Output Error: ! `spec` must be a data frame, not a number. Code (expect_error(check_pivot_spec(mtcars))) Output Error: ! `spec` must have `.name` and `.value` columns. # `.name` column must be a character vector Code (expect_error(check_pivot_spec(df))) Output Error: ! `spec$.name` must be a character vector, not an integer vector. # `.value` column must be a character vector Code (expect_error(check_pivot_spec(df))) Output Error: ! `spec$.value` must be a character vector, not an integer vector. # `.name` column must be unique Code (expect_error(check_pivot_spec(df))) Output Error: ! `spec$.name` must be unique. tidyr/tests/testthat/_snaps/separate.md0000644000176200001440000000375214350636232020033 0ustar liggesusers# too many pieces dealt with as requested Code separate(df, x, c("x", "y")) Condition Warning: Expected 2 pieces. Additional pieces discarded in 1 rows [2]. Output # A tibble: 2 x 2 x y 1 a b 2 a b --- Code separate(df, x, c("x", "y"), extra = "error") Condition Warning: `extra = "error"` is deprecated. Please use `extra = "warn"` instead Warning: Expected 2 pieces. Additional pieces discarded in 1 rows [2]. Output # A tibble: 2 x 2 x y 1 a b 2 a b # too few pieces dealt with as requested Code separate(df, x, c("x", "y", "z")) Condition Warning: Expected 3 pieces. Missing pieces filled with `NA` in 1 rows [1]. Output # A tibble: 2 x 3 x y z 1 a b 2 a b c # validates inputs Code separate(df) Condition Error in `separate()`: ! `col` is absent but must be supplied. Code separate(df, x, into = 1) Condition Error in `separate()`: ! `into` must be a character vector, not the number 1. Code separate(df, x, into = "x", sep = c("a", "b")) Condition Error in `separate()`: ! `sep` must be a string or numeric vector, not a character vector Code separate(df, x, into = "x", remove = 1) Condition Error in `separate()`: ! `remove` must be `TRUE` or `FALSE`, not the number 1. Code separate(df, x, into = "x", convert = 1) Condition Error in `separate()`: ! `convert` must be `TRUE` or `FALSE`, not the number 1. # informative error if using stringr modifier functions (#693) Code (expect_error(separate(df, x, "x", sep = sep))) Output Error in `separate()`: ! `sep` can't use modifiers from stringr. tidyr/tests/testthat/_snaps/hoist.md0000644000176200001440000000420514520546617017355 0ustar liggesusers# nested lists generate a cast error if they can't be cast to the ptype Code (expect_error(hoist(df, x, "b", .ptype = list(b = double())))) Output Error in `hoist()`: ! Can't convert `..1` to . # non-vectors generate a cast error if a ptype is supplied Code (expect_error(hoist(df, x, "b", .ptype = list(b = integer())))) Output Error in `hoist()`: ! `..1` must be a vector, not a symbol. # input validation catches problems Code (expect_error(df %>% hoist(y))) Output Error in `hoist()`: ! `.data[[.col]]` must be a list, not the number 1. Code (expect_error(df %>% hoist(x, 1))) Output Error in `hoist()`: ! All elements of `...` must be named. Code (expect_error(df %>% hoist(x, a = "a", a = "b"))) Output Error in `hoist()`: ! The names of `...` must be unique. # can't hoist() from a data frame column Code (expect_error(hoist(df, a, xx = 1))) Output Error in `hoist()`: ! `.data[[.col]]` must be a list, not a object. # hoist() validates its inputs (#1224) Code hoist(1) Condition Error in `hoist()`: ! `.data` must be a data frame, not a number. Code hoist(df) Condition Error in `hoist()`: ! `.col` is absent but must be supplied. Code hoist(df, a, .remove = 1) Condition Error in `hoist()`: ! `.remove` must be `TRUE` or `FALSE`, not the number 1. Code hoist(df, a, .ptype = 1) Condition Error in `hoist()`: ! `.ptype` must be `NULL`, an empty ptype, or a named list of ptypes. Code hoist(df, a, .transform = 1) Condition Error in `hoist()`: ! `.transform` must be `NULL`, a function, or a named list of functions. Code hoist(df, a, .simplify = 1) Condition Error in `hoist()`: ! `.simplify` must be a list or a single `TRUE` or `FALSE`. tidyr/tests/testthat/_snaps/pivot-wide.md0000644000176200001440000002430114553563421020313 0ustar liggesusers# error when overwriting existing column Code (expect_error(pivot_wider(df, names_from = key, values_from = val))) Output Error in `pivot_wider()`: ! Names must be unique. x These names are duplicated: * "a" at locations 1 and 2. i Use argument `names_repair` to specify repair strategy. --- Code out <- pivot_wider(df, names_from = key, values_from = val, names_repair = "unique") Message New names: * `a` -> `a...1` * `a` -> `a...2` # `names_from` must be supplied if `name` isn't in `data` (#1240) Code (expect_error(pivot_wider(df, values_from = val))) Output Error in `pivot_wider()`: ! Can't subset columns that don't exist. x Column `name` doesn't exist. # `values_from` must be supplied if `value` isn't in `data` (#1240) Code (expect_error(pivot_wider(df, names_from = key))) Output Error in `pivot_wider()`: ! Can't subset columns that don't exist. x Column `value` doesn't exist. # `names_from` must identify at least 1 column (#1240) Code (expect_error(pivot_wider(df, names_from = starts_with("foo"), values_from = val)) ) Output Error in `pivot_wider()`: ! Must select at least one item. # `values_from` must identify at least 1 column (#1240) Code (expect_error(pivot_wider(df, names_from = key, values_from = starts_with("foo"))) ) Output Error in `pivot_wider()`: ! Must select at least one item. # `values_fn` emits an informative error when it doesn't result in unique values (#1238) Code (expect_error(pivot_wider(df, values_fn = list(value = ~.x)))) Output Error in `pivot_wider()`: ! Applying `values_fn` to `value` must result in a single summary value per key. i Applying `values_fn` resulted in a vector of length 2. # `build_wider_spec()` requires empty dots Code (expect_error(build_wider_spec(df, 1))) Output Error in `build_wider_spec()`: ! `...` must be empty. x Problematic argument: * ..1 = 1 i Did you forget to name an argument? Code (expect_error(build_wider_spec(df, name_prefix = ""))) Output Error in `build_wider_spec()`: ! `...` must be empty. x Problematic argument: * name_prefix = "" # `pivot_wider_spec()` requires empty dots Code (expect_error(pivot_wider_spec(df, spec, 1))) Output Error in `pivot_wider_spec()`: ! `...` must be empty. x Problematic argument: * ..1 = 1 i Did you forget to name an argument? Code (expect_error(pivot_wider_spec(df, spec, name_repair = "check_unique"))) Output Error in `pivot_wider_spec()`: ! `...` must be empty. x Problematic argument: * name_repair = "check_unique" # `names_vary` is validated Code (expect_error(build_wider_spec(df, names_vary = 1))) Output Error in `build_wider_spec()`: ! `names_vary` must be a string or character vector. Code (expect_error(build_wider_spec(df, names_vary = "x"))) Output Error in `build_wider_spec()`: ! `names_vary` must be one of "fastest" or "slowest", not "x". # `names_expand` is validated Code (expect_error(build_wider_spec(df, names_expand = 1))) Output Error in `build_wider_spec()`: ! `names_expand` must be `TRUE` or `FALSE`, not the number 1. Code (expect_error(build_wider_spec(df, names_expand = "x"))) Output Error in `build_wider_spec()`: ! `names_expand` must be `TRUE` or `FALSE`, not the string "x". # `id_cols` can't select columns from `names_from` or `values_from` (#1318) Code (expect_error(pivot_wider(df, id_cols = name, names_from = name, values_from = value)) ) Output Error in `pivot_wider()`: ! `id_cols` can't select a column already selected by `names_from`. i Column `name` has already been selected. Code (expect_error(pivot_wider(df, id_cols = value, names_from = name, values_from = value)) ) Output Error in `pivot_wider()`: ! `id_cols` can't select a column already selected by `values_from`. i Column `value` has already been selected. # `id_cols` returns a tidyselect error if a column selection is OOB (#1318) Code (expect_error(pivot_wider(df, id_cols = foo))) Output Error in `pivot_wider()`: ! Can't subset columns that don't exist. x Column `foo` doesn't exist. # named `id_cols` gives clear error (#1104) Code pivot_wider(df, id_cols = c(z = x)) Condition Error in `pivot_wider()`: ! Can't rename variables in this context. # `id_expand` is validated Code (expect_error(pivot_wider(df, id_expand = 1))) Output Error in `pivot_wider()`: ! `id_expand` must be `TRUE` or `FALSE`, not the number 1. Code (expect_error(pivot_wider(df, id_expand = "x"))) Output Error in `pivot_wider()`: ! `id_expand` must be `TRUE` or `FALSE`, not the string "x". # duplicated keys produce list column with warning Code pv <- pivot_wider(df, names_from = key, values_from = val) Condition Warning: Values from `val` are not uniquely identified; output will contain list-cols. * Use `values_fn = list` to suppress this warning. * Use `values_fn = {summary_fun}` to summarise duplicates. * Use the following dplyr code to identify duplicates. {data} |> dplyr::summarise(n = dplyr::n(), .by = c(a, key)) |> dplyr::filter(n > 1L) # duplicated key warning mentions every applicable column Code pivot_wider(df, names_from = key, values_from = c(a, b, c)) Condition Warning: Values from `a`, `b` and `c` are not uniquely identified; output will contain list-cols. * Use `values_fn = list` to suppress this warning. * Use `values_fn = {summary_fun}` to summarise duplicates. * Use the following dplyr code to identify duplicates. {data} |> dplyr::summarise(n = dplyr::n(), .by = c(key)) |> dplyr::filter(n > 1L) Output # A tibble: 1 x 3 a_x b_x c_x 1 --- Code pivot_wider(df, names_from = key, values_from = c(a, b, c), values_fn = list(b = sum)) Condition Warning: Values from `a` and `c` are not uniquely identified; output will contain list-cols. * Use `values_fn = list` to suppress this warning. * Use `values_fn = {summary_fun}` to summarise duplicates. * Use the following dplyr code to identify duplicates. {data} |> dplyr::summarise(n = dplyr::n(), .by = c(key)) |> dplyr::filter(n > 1L) Output # A tibble: 1 x 3 a_x b_x c_x 1 7 # duplicated key warning backticks non-syntactic names Code pv <- pivot_wider(df, names_from = `the-key`, values_from = val) Condition Warning: Values from `val` are not uniquely identified; output will contain list-cols. * Use `values_fn = list` to suppress this warning. * Use `values_fn = {summary_fun}` to summarise duplicates. * Use the following dplyr code to identify duplicates. {data} |> dplyr::summarise(n = dplyr::n(), .by = c(`a 1`, a2, `the-key`)) |> dplyr::filter(n > 1L) # values_fn is validated Code (expect_error(pivot_wider(df, values_fn = 1))) Output Error in `pivot_wider()`: ! `values_fn` must be `NULL`, a function, or a named list of functions. # `unused_fn` must result in single summary values Code (expect_error(pivot_wider(df, id_cols = id, unused_fn = identity))) Output Error in `pivot_wider()`: ! Applying `unused_fn` to `unused` must result in a single summary value per key. i Applying `unused_fn` resulted in a vector of length 2. # `values_fill` is validated Code (expect_error(pivot_wider(df, values_fill = 1:2))) Output Error in `pivot_wider()`: ! `values_fill` must be `NULL`, a scalar, or a named list, not an integer vector. # `unused_fn` is validated Code (expect_error(pivot_wider(df, id_cols = id, unused_fn = 1))) Output Error in `pivot_wider()`: ! `unused_fn` must be `NULL`, a function, or a named list of functions. # `id_cols` has noisy compat behavior (#1353) Code out <- pivot_wider(df, id) Condition Warning: Specifying the `id_cols` argument by position was deprecated in tidyr 1.3.0. i Please explicitly name `id_cols`, like `id_cols = id`. --- Code expect <- pivot_wider(df, id_cols = id) # `id_cols` compat behavior doesn't trigger if `id_cols` is specified too Code pivot_wider(df, id, id_cols = id2) Condition Error in `pivot_wider()`: ! `...` must be empty. x Problematic argument: * ..1 = id i Did you forget to name an argument? # `id_cols` compat behavior doesn't trigger if multiple `...` are supplied Code pivot_wider(df, id, id2) Condition Error in `pivot_wider()`: ! `...` must be empty. x Problematic arguments: * ..1 = id * ..2 = id2 i Did you forget to name an argument? # `id_cols` compat behavior doesn't trigger if named `...` are supplied Code pivot_wider(df, ids = id) Condition Error in `pivot_wider()`: ! `...` must be empty. x Problematic argument: * ids = id tidyr/tests/testthat/_snaps/seq.md0000644000176200001440000000124414350636232017011 0ustar liggesusers# full_seq errors if sequence isn't regular Code (expect_error(full_seq(c(1, 3, 4), 2))) Output Error in `full_seq()`: ! `x` is not a regular sequence. Code (expect_error(full_seq(c(0, 10, 20), 11, tol = 1.8))) Output Error in `full_seq()`: ! `x` is not a regular sequence. # validates inputs Code full_seq(x, period = "a") Condition Error in `full_seq()`: ! `period` must be a number, not the string "a". Code full_seq(x, 1, tol = "a") Condition Error in `full_seq()`: ! `tol` must be a number, not the string "a". tidyr/tests/testthat/_snaps/gather.md0000644000176200001440000000263014357015307017474 0ustar liggesusers# gather throws error for POSIXlt Code (expect_error(gather(df, key, val, -x))) Output Code (expect_error(gather(df, key, val, -y))) Output # gather throws error for weird objects Code (expect_error(gather(df, key, val, -x))) Output Code (expect_error(gather(df, key, val, -y))) Output --- Code (expect_error(gather(df, key, val, -x))) Output Code (expect_error(gather(df, key, val, -y))) Output # factors coerced to characters, not integers Code out <- gather(df, k, v) Condition Warning: attributes are not identical across measure variables; they will be dropped # varying attributes are dropped with a warning Code gather(df, k, v) Condition Warning: attributes are not identical across measure variables; they will be dropped Output k v 1 date1 1546300800 2 date2 17897 tidyr/tests/testthat/_snaps/separate-longer.md0000644000176200001440000000167114350636231021314 0ustar liggesusers# separate_longer_delim() validates its inputs Code df %>% separate_longer_delim() Condition Error in `separate_longer_delim()`: ! `cols` is absent but must be supplied. Code df %>% separate_longer_delim(x, sep = 1) Condition Error in `separate_longer_delim()`: ! `delim` must be a single string, not absent. # separate_longer_position() validates its inputs Code df %>% separate_longer_position() Condition Error in `separate_longer_position()`: ! `cols` is absent but must be supplied. Code df %>% separate_longer_position(y, width = 1) Condition Error in `separate_longer_position()`: ! Can't subset columns that don't exist. x Column `y` doesn't exist. Code df %>% separate_longer_position(x, width = 1.5) Condition Error in `separate_longer_position()`: ! `width` must be a whole number, not the number 1.5. tidyr/tests/testthat/_snaps/complete.md0000644000176200001440000000026214350636223020030 0ustar liggesusers# validates its inputs Code complete(mtcars, explicit = 1) Condition Error in `complete()`: ! `explicit` must be `TRUE` or `FALSE`, not the number 1. tidyr/tests/testthat/_snaps/fill.md0000644000176200001440000000030614350636224017146 0ustar liggesusers# validates its inputs Code df %>% fill(x, .direction = "foo") Condition Error in `fill()`: ! `.direction` must be one of "down", "up", "downup", or "updown", not "foo". tidyr/tests/testthat/_snaps/nest-legacy.md0000644000176200001440000000125514350636225020440 0ustar liggesusers# can't combine vectors and data frames Code (expect_error(unnest_legacy(df))) Output Error in `unnest_legacy()`: ! Each column must either be a list of vectors or a list of data frames. i Problems in: `x` # multiple columns must be same length Code (expect_error(unnest_legacy(df))) Output Error in `unnest_legacy()`: ! All nested columns must have the same number of elements. --- Code (expect_error(unnest_legacy(df))) Output Error in `unnest_legacy()`: ! All nested columns must have the same number of elements. tidyr/tests/testthat/_snaps/drop-na.md0000644000176200001440000000073514350636224017566 0ustar liggesusers# errors are raised Code (expect_error(drop_na(df, list()))) Output Error in `drop_na()`: ! Can't subset columns with `list()`. x `list()` must be numeric or character, not an empty list. --- Code (expect_error(drop_na(df, "z"))) Output Error in `drop_na()`: ! Can't subset columns that don't exist. x Column `z` doesn't exist. tidyr/tests/testthat/_snaps/unite.md0000644000176200001440000000107514350636233017350 0ustar liggesusers# validates its inputs Code unite(df) Condition Error in `unite()`: ! `col` is absent but must be supplied. Code unite(df, "z", x:y, sep = 1) Condition Error in `unite()`: ! `sep` must be a single string, not the number 1. Code unite(df, "z", x:y, remove = 1) Condition Error in `unite()`: ! `remove` must be `TRUE` or `FALSE`, not the number 1. Code unite(df, "z", x:y, na.rm = 1) Condition Error in `unite()`: ! `na.rm` must be `TRUE` or `FALSE`, not the number 1. tidyr/tests/testthat/_snaps/extract.md0000644000176200001440000000235014350636224017673 0ustar liggesusers# informative error message if wrong number of groups Code (expect_error(extract(df, x, "y", "."))) Output Error in `extract()`: ! `regex` should define 1 groups; 0 found. Code (expect_error(extract(df, x, c("y", "z"), "."))) Output Error in `extract()`: ! `regex` should define 2 groups; 0 found. # informative error if using stringr modifier functions (#693) Code (expect_error(extract(df, x, "x", regex = regex))) Output Error in `extract()`: ! `regex` can't use modifiers from stringr. # validates its inputs Code df %>% extract() Condition Error in `extract()`: ! `col` is absent but must be supplied. Code df %>% extract(x, regex = 1) Condition Error in `extract()`: ! `regex` must be a single string, not the number 1. Code df %>% extract(x, into = 1:3) Condition Error in `extract()`: ! `into` must be a character vector, not an integer vector. Code df %>% extract(x, into = "x", convert = 1) Condition Error in `extract()`: ! `convert` must be `TRUE` or `FALSE`, not the number 1. tidyr/tests/testthat/_snaps/pivot-long.md0000644000176200001440000001640714520546617020334 0ustar liggesusers# when `values_ptypes` is provided, the type error uses variable names (#1364) Code (expect_error(pivot_longer(df, x, values_ptypes = character()))) Output Error in `pivot_longer()`: ! Can't convert `x` to . # when `names_ptypes` is provided, the type error uses `names_to` names (#1364) Code (expect_error({ pivot_longer(df, cols = x, names_to = "name", names_ptypes = double()) })) Output Error in `pivot_longer()`: ! Can't convert `name` to . # error when overwriting existing column Code (expect_error(pivot_longer(df, y, names_to = "x"))) Output Error in `pivot_longer()`: ! Names must be unique. x These names are duplicated: * "x" at locations 1 and 2. i Use argument `names_repair` to specify repair strategy. --- Code out <- pivot_longer(df, y, names_to = "x", names_repair = "unique") Message New names: * `x` -> `x...1` * `x` -> `x...2` # multiple names requires names_sep/names_pattern Code (expect_error(build_longer_spec(df, x_y, names_to = c("a", "b")))) Output Error in `build_longer_spec()`: ! If you supply multiple names in `names_to` you must also supply one of `names_sep` or `names_pattern`. Code (expect_error(build_longer_spec(df, x_y, names_to = c("a", "b"), names_sep = "x", names_pattern = "x"))) Output Error in `build_longer_spec()`: ! If you supply multiple names in `names_to` you must also supply one of `names_sep` or `names_pattern`. # names_sep fails with single name Code (expect_error(build_longer_spec(df, x_y, names_to = "x", names_sep = "_"))) Output Error in `build_longer_spec()`: ! `names_sep` can't be used with a length 1 `names_to`. # Error if the `col` can't be selected. Code (expect_error(pivot_longer(iris, matches("foo")))) Output Error in `pivot_longer()`: ! `cols` must select at least one column. # named `cols` gives clear error (#1104) Code pivot_longer(df, c(z = y)) Condition Error in `pivot_longer()`: ! Can't rename variables in this context. # `names_to` is validated Code (expect_error(build_longer_spec(df, x, names_to = 1))) Output Error in `build_longer_spec()`: ! `names_to` must be a character vector or `NULL`, not the number 1. Code (expect_error(build_longer_spec(df, x, names_to = c("x", "y")))) Output Error in `build_longer_spec()`: ! If you supply multiple names in `names_to` you must also supply one of `names_sep` or `names_pattern`. Code (expect_error(build_longer_spec(df, x, names_to = c("x", "y"), names_sep = "_", names_pattern = "x"))) Output Error in `build_longer_spec()`: ! If you supply multiple names in `names_to` you must also supply one of `names_sep` or `names_pattern`. # `names_ptypes` is validated Code (expect_error(build_longer_spec(df, x, names_ptypes = 1))) Output Error in `build_longer_spec()`: ! `names_ptypes` must be `NULL`, an empty ptype, or a named list of ptypes. Code (expect_error(build_longer_spec(df, x, names_ptypes = list(integer())))) Output Error in `build_longer_spec()`: ! All elements of `names_ptypes` must be named. # `names_transform` is validated Code (expect_error(build_longer_spec(df, x, names_transform = 1))) Output Error in `build_longer_spec()`: ! `names_transform` must be `NULL`, a function, or a named list of functions. Code (expect_error(build_longer_spec(df, x, names_transform = list(~.x)))) Output Error in `build_longer_spec()`: ! All elements of `names_transform` must be named. # `values_ptypes` is validated Code (expect_error(pivot_longer(df, x, values_ptypes = 1))) Output Error in `pivot_longer()`: ! `values_ptypes` must be `NULL`, an empty ptype, or a named list of ptypes. Code (expect_error(pivot_longer(df, x, values_ptypes = list(integer())))) Output Error in `pivot_longer()`: ! All elements of `values_ptypes` must be named. # `values_transform` is validated Code (expect_error(pivot_longer(df, x, values_transform = 1))) Output Error in `pivot_longer()`: ! `values_transform` must be `NULL`, a function, or a named list of functions. Code (expect_error(pivot_longer(df, x, values_transform = list(~.x)))) Output Error in `pivot_longer()`: ! All elements of `values_transform` must be named. # `cols_vary` is validated Code (expect_error(pivot_longer(df, x, cols_vary = "fast"))) Output Error in `pivot_longer()`: ! `cols_vary` must be one of "fastest" or "slowest", not "fast". i Did you mean "fastest"? Code (expect_error(pivot_longer(df, x, cols_vary = 1))) Output Error in `pivot_longer()`: ! `cols_vary` must be a string or character vector. # `pivot_longer()` catches unused input passed through the dots Code (expect_error(pivot_longer(df, c(x, y), 1))) Output Error in `pivot_longer()`: ! Arguments in `...` must be used. x Problematic argument: * ..1 = 1 i Did you misspell an argument name? Code (expect_error(pivot_longer(df, c(x, y), col_vary = "slowest"))) Output Error in `pivot_longer()`: ! Arguments in `...` must be used. x Problematic argument: * col_vary = "slowest" i Did you misspell an argument name? # `build_longer_spec()` requires empty dots Code (expect_error(build_longer_spec(df, c(x, y), 1))) Output Error in `build_longer_spec()`: ! `...` must be empty. x Problematic argument: * ..1 = 1 i Did you forget to name an argument? Code (expect_error(build_longer_spec(df, c(x, y), name_to = "name"))) Output Error in `build_longer_spec()`: ! `...` must be empty. x Problematic argument: * name_to = "name" # `pivot_longer_spec()` requires empty dots Code (expect_error(pivot_longer_spec(df, spec, 1))) Output Error in `pivot_longer_spec()`: ! `...` must be empty. x Problematic argument: * ..1 = 1 i Did you forget to name an argument? Code (expect_error(pivot_longer_spec(df, spec, col_vary = "slowest"))) Output Error in `pivot_longer_spec()`: ! `...` must be empty. x Problematic argument: * col_vary = "slowest" tidyr/tests/testthat/_snaps/unnest.md0000644000176200001440000000735614360013543017542 0ustar liggesusers# bad inputs generate errors Code (expect_error(unnest(df, y))) Output Error in `list_sizes()`: ! `x[[1]]` must be a vector, not a function. # multiple columns must be same length Code (expect_error(unnest(df, c(x, y)))) Output Error in `unnest()`: ! In row 1, can't recycle input of size 2 to size 3. --- Code (expect_error(unnest(df, c(x, y)))) Output Error in `unnest()`: ! In row 1, can't recycle input of size 2 to size 3. # unnesting column of mixed vector / data frame input is an error Code (expect_error(unnest(df, x))) Output Error in `list_unchop()`: ! Can't combine `x[[1]]` and `x[[2]]` . # unnest() advises on outer / inner name duplication Code unnest(df, y) Condition Error in `unnest()`: ! Can't duplicate names between the affected columns and the original data. x These names are duplicated: i `x`, from `y`. i Use `names_sep` to disambiguate using the column name. i Or use `names_repair` to specify a repair strategy. # unnest() advises on inner / inner name duplication Code unnest(df, c(x, y)) Condition Error in `unnest()`: ! Can't duplicate names within the affected columns. x These names are duplicated: i `a`, within `x` and `y`. i Use `names_sep` to disambiguate using the column name. i Or use `names_repair` to specify a repair strategy. # unnest() disallows renaming Code unnest(df, c(y = x)) Condition Error in `unnest()`: ! Can't rename variables in this context. # cols must go in cols Code unnest(df, x, y) Condition Warning: `unnest()` has a new interface. See `?unnest` for details. i Try `df %>% unnest(c(x, y))`, with `mutate()` if needed. Output # A tibble: 2 x 2 x y 1 3 a 2 4 b # need supply column names Code unnest(df) Condition Warning: `cols` is now required when using `unnest()`. i Please use `cols = c(y)`. Output # A tibble: 2 x 2 x y 1 1 a 2 2 b # sep combines column names Code out <- df %>% unnest(c(x, y), .sep = "_") Condition Warning: The `.sep` argument of `unnest()` is deprecated as of tidyr 1.0.0. i Use `names_sep = '_'` instead. # unnest has mutate semantics Code out <- df %>% unnest(z = map(y, `+`, 1)) Condition Warning: `unnest()` has a new interface. See `?unnest` for details. i Try `df %>% unnest(c(z))`, with `mutate()` if needed. # .drop and .preserve are deprecated Code df %>% unnest(x, .preserve = y) Condition Warning: The `.preserve` argument of `unnest()` is deprecated as of tidyr 1.0.0. i All list-columns are now preserved Output # A tibble: 2 x 2 x y 1 3 2 4 --- Code df %>% unnest(x, .drop = FALSE) Condition Warning: The `.drop` argument of `unnest()` is deprecated as of tidyr 1.0.0. i All list-columns are now preserved. Output # A tibble: 2 x 2 x y 1 3 2 4 # .id creates vector of names for vector unnest Code out <- unnest(df, y, .id = "name") Condition Warning: The `.id` argument of `unnest()` is deprecated as of tidyr 1.0.0. i Manually create column of names instead. tidyr/tests/testthat/_snaps/unnest-helper.md0000644000176200001440000000512614520546617021023 0ustar liggesusers# `simplify` is validated Code (expect_error(df_simplify(data.frame(), simplify = 1))) Output Error: ! `simplify` must be a list or a single `TRUE` or `FALSE`. Code (expect_error(df_simplify(data.frame(), simplify = NA))) Output Error: ! `simplify` must be a list or a single `TRUE` or `FALSE`. Code (expect_error(df_simplify(data.frame(), simplify = c(TRUE, FALSE)))) Output Error: ! `simplify` must be a list or a single `TRUE` or `FALSE`. Code (expect_error(df_simplify(data.frame(), simplify = list(1)))) Output Error: ! All elements of `simplify` must be named. Code (expect_error(df_simplify(data.frame(), simplify = list(x = 1, x = 1)))) Output Error: ! The names of `simplify` must be unique. # `ptype` is validated Code (expect_error(df_simplify(data.frame(), ptype = 1))) Output Error: ! `ptype` must be `NULL`, an empty ptype, or a named list of ptypes. Code (expect_error(df_simplify(data.frame(), ptype = list(1)))) Output Error: ! All elements of `ptype` must be named. Code (expect_error(df_simplify(data.frame(), ptype = list(x = 1, x = 1)))) Output Error: ! The names of `ptype` must be unique. # `transform` is validated Code (expect_error(df_simplify(data.frame(), transform = list(~.x)))) Output Error: ! All elements of `transform` must be named. Code (expect_error(df_simplify(data.frame(x = 1), transform = 1))) Output Error: ! `transform` must be `NULL`, a function, or a named list of functions. Code (expect_error(df_simplify(data.frame(), transform = list(x = 1)))) Output Error: ! Can't convert `transform$x`, a double vector, to a function. Code (expect_error(df_simplify(data.frame(), transform = list(x = 1, x = 1)))) Output Error: ! The names of `transform` must be unique. # ptype is applied after transform Code (expect_error(col_simplify(list(1, 2, 3), ptype = integer(), transform = ~ .x + 1.5))) Output Error: ! Can't convert from `..1` to due to loss of precision. * Locations: 1 tidyr/tests/testthat/test-chop.R0000644000176200001440000002415414360013543016445 0ustar liggesusers# chop -------------------------------------------------------------------- test_that("can chop multiple columns", { df <- tibble(x = c(1, 1, 2), a = 1:3, b = 1:3) out <- df %>% chop(c(a, b)) expect_named(out, c("x", "a", "b")) expect_equal(out$a, list_of(1:2, 3L)) expect_equal(out$b, list_of(1:2, 3L)) }) test_that("chopping no columns returns input", { df <- tibble(a1 = 1, a2 = 2, b1 = 1, b2 = 2) expect_equal(chop(df, c()), df) }) test_that("grouping is preserved", { df <- tibble(g = c(1, 1), x = 1:2) out <- df %>% dplyr::group_by(g) %>% chop(x) expect_equal(dplyr::group_vars(out), "g") }) test_that("chop() validates its input `cols` (#1205)", { df <- tibble(x = 1:2) expect_snapshot(error = TRUE, { chop(df$x) chop(df) }) }) test_that("can chop empty data frame (#1206)", { df <- tibble(x = integer(), y = integer()) expect_identical( chop(df, y), tibble(x = integer(), y = list_of(.ptype = integer())) ) expect_identical( chop(df, x), tibble(y = integer(), x = list_of(.ptype = integer())) ) expect_identical( chop(df, c(x, y)), tibble(x = list_of(.ptype = integer()), y = list_of(.ptype = integer())) ) }) # unchop ------------------------------------------------------------------ test_that("extends into rows", { df <- tibble(x = 1:2, y = list(NULL, 1:4)) out <- df %>% unchop(y) expect_equal(out$x, rep(2, 4)) expect_equal(out$y, 1:4) }) test_that("can unchop multiple cols", { df <- tibble(x = 1:2, y = list(1, 2:3), z = list(4, 5:6)) out <- df %>% unchop(c(y, z)) expect_equal(out$x, c(1, 2, 2)) expect_equal(out$y, 1:3) expect_equal(out$z, 4:6) }) test_that("unchopping nothing leaves input unchanged", { df <- tibble(x = 1:3, y = 4:6) expect_equal(unchop(df, integer()), df) }) test_that("unchopping vectors is a no-op", { df <- tibble(x = 1:3, y = 4:6) expect_identical(unchop(df, c(x, y)), df) }) test_that("NULL inputs are automatically dropped", { df <- tibble(x = 1:4, y = list(NULL, 1:2, 4, NULL), z = list(NULL, 1:2, NULL, 5)) out <- df %>% unchop(c(y, z)) expect_equal(out$x, c(2, 2, 3, 4)) expect_equal(out$y, c(1, 2, 4, NA)) expect_equal(out$z, c(1, 2, NA, 5)) }) test_that("empty typed inputs are automatically dropped", { df <- tibble( x = 1:4, y = list(integer(), 1:2, 4L, integer()), z = list(integer(), 1:2, integer(), 5L) ) out <- unchop(df, c(y, z)) expect_identical(out$x, c(2L, 2L)) expect_identical(out$y, c(1L, 2L)) expect_identical(out$z, c(1L, 2L)) }) test_that("optionally keep empty rows", { df <- tibble(x = 1:2, y = list(NULL, 1:2), z = list(tibble(x = integer()), tibble(x = 1:2))) out <- df %>% unchop(y, keep_empty = TRUE) expect_equal(out$x, c(1, 2, 2)) expect_equal(out$y, c(NA, 1, 2)) out <- df %>% unchop(z, keep_empty = TRUE) expect_equal(out$x, c(1, 2, 2)) expect_equal(out$z, tibble(x = c(NA, 1L, 2L))) }) test_that("mixing vectors with lists prevents NULLs from being dropped", { df <- tibble(x = 1:2, y = list(NULL, 1)) expect_identical(unchop(df, c(x, y)), tibble(x = 1:2, y = c(NA, 1))) }) test_that("preserves columns of empty inputs", { df <- tibble(x = integer(), y = list(), z = list()) expect_named(df %>% unchop(y), c("x", "y", "z")) expect_named(df %>% unchop(c(y, z)), c("x", "y", "z")) }) test_that("respects list_of types", { df <- tibble(x = integer(), y = list_of(.ptype = integer())) expect_equal(unchop(df, y), tibble(x = integer(), y = integer())) expect_equal(unchop(df, y, keep_empty = TRUE), tibble(x = integer(), y = integer())) df <- tibble(x = 1L, y = list_of(NULL, .ptype = integer())) expect_equal(unchop(df, y), tibble(x = integer(), y = integer())) expect_equal(unchop(df, y, keep_empty = TRUE), tibble(x = 1L, y = NA_integer_)) }) test_that("grouping is preserved", { df <- tibble(g = 1, x = list(1, 2)) out <- df %>% dplyr::group_by(g) %>% unchop(x) expect_equal(dplyr::group_vars(out), "g") }) test_that("unchop() only creates unspecified vectors for empty lists", { df <- data.frame(x = integer(), y = integer()) expect_identical(unchop(df, y)$y, integer()) df <- tibble(x = integer(), y = data.frame(z = integer())) expect_identical(unchop(df, y)$y, data.frame(z = integer())) }) test_that("correctly performs tidy recycling with size 1 inputs", { df <- tibble(x = list(1, 2:3), y = list(2:3, 1)) expect <- tibble(x = c(1, 1, 2, 3), y = c(2, 3, 1, 1)) expect_identical(unchop(df, c(x, y)), expect) }) test_that("nonexistent `ptype` columns are ignored", { df <- tibble(x = 1, y = list(1, 2)) ptype <- list(y = numeric(), z = numeric()) expect_identical(unchop(df, y, ptype = ptype), unchop(df, y)) }) test_that("can specify a ptype to force an output type", { df <- tibble(x = list(1L, 2L)) ptype <- list(x = numeric()) expect_identical(unchop(df, x, ptype = ptype), tibble(x = c(1, 2))) }) test_that("ptype overrides unspecified() result", { df <- tibble(x = list()) expect_identical( unchop(df, x, ptype = list(x = integer())), tibble(x = integer()) ) df <- tibble(x = list(NULL, NULL)) expect_identical( unchop(df, x, ptype = list(x = integer())), tibble(x = integer()) ) expect_identical( unchop(df, x, ptype = list(x = integer()), keep_empty = TRUE), tibble(x = c(NA_integer_, NA_integer_)) ) }) test_that("ptype overrides list-of ptype", { df <- tibble(x = list_of(1L, 2:3)) expect_identical( unchop(df, x, ptype = list(x = double())), tibble(x = c(1, 2, 3)) ) }) test_that("ptype is utilized on non-list columns (#1211)", { df <- tibble(x = 1) expect_identical( unchop(df, x, ptype = list(x = integer())), tibble(x = 1L) ) }) test_that("`ptype` is allowed to be an empty ptype (#1284)", { df <- tibble(x = list(1), y = list(1)) expect_identical( unchop(df, c(x, y), ptype = integer()), tibble(x = 1L, y = 1L) ) }) test_that("data frame ptype works", { df <- tibble(x = tibble(a = 1)) expect_identical( unchop(df, x, ptype = tibble(a = integer())), tibble(x = tibble(a = 1L)) ) }) test_that("`ptype = list()` uses list ptype", { df <- tibble(x = list(list(1))) expect_identical( unchop(df, x, ptype = list()), tibble(x = list(1)) ) }) test_that("unchopping a bare empty list results in unspecified()", { df <- tibble(x = integer(), y = list()) expect <- tibble(x = integer(), y = unspecified()) expect_identical(unchop(df, y), expect) expect_identical(unchop(df, y, keep_empty = TRUE), expect) }) test_that("unchopping a bare fully `NULL` list results in unspecified()", { df <- tibble(x = 1:2, y = list(NULL, NULL), z = list(NULL, NULL)) expect <- tibble(x = integer(), y = unspecified(), z = unspecified()) expect_identical(unchop(df, c(y, z)), expect) }) test_that("unchopping a bare fully `NULL` list with `keep_empty = TRUE` results in logical missings", { df <- tibble(x = 1:2, y = list(NULL, NULL), z = list(NULL, NULL)) expect <- tibble(x = 1:2, y = c(NA, NA), z = c(NA, NA)) expect_identical(unchop(df, c(y, z), keep_empty = TRUE), expect) }) test_that("unchopping list of empty types retains type", { df <- tibble(x = 1:2, y = list(integer(), double())) expect <- tibble(x = integer(), y = double()) expect_identical(unchop(df, y), expect) expect <- tibble(x = 1:2, y = c(NA_real_, NA_real_)) expect_identical(unchop(df, y, keep_empty = TRUE), expect) }) test_that("unchop retrieves correct types with emptied chopped df", { chopped <- chop(tibble(x = 1:3, y = 4:6), y) empty <- vec_slice(chopped, 0L) expect_identical(unchop(empty, y), tibble(x = integer(), y = integer())) }) test_that("unchop works with data frame columns (treating them like vectors) (#1128)", { df <- tibble(x = tibble(a = 1:2, b = "a"), y = list(3:4)) expect_identical(unchop(df, c(x, y)), unchop(df, y)) }) test_that("unchop works with record columns (treating them like vectors)", { df <- tibble(x = list(1:2, 1), y = new_rcrd(list(x = 1:2))) expect_identical( unchop(df, c(x, y)), tibble(x = c(1, 2, 1), y = new_rcrd(list(x = c(1L, 1L, 2L)))) ) }) test_that("incompatible sizes are caught", { df <- tibble(x = list(1:2), y = list(1:3)) expect_snapshot((expect_error(unchop(df, c(x, y))))) }) test_that("empty typed inputs are considered in common size, but NULLs aren't", { df <- tibble(x = list(NULL), y = list(1:2)) expect_error(unchop(df, c(x, y)), NA) df <- tibble(x = list(integer()), y = list(1:2)) expect_snapshot((expect_error(unchop(df, c(x, y))))) }) test_that("unchopping retains inner names from tibble elements", { df <- tibble(x = list(tibble(col = list(NAMED = "x")))) out <- unchop(df, x) expect_named(out$x$col, "NAMED") }) test_that("unchopping retains inner names from atomic elements (#1154)", { df <- tibble(x = list(c(a = 1), c(b = 2))) out <- unchop(df, x) expect_named(out$x, c("a", "b")) }) test_that("unchopping drops outer names", { df <- tibble(col = list(a = 1, b = 2:3)) out <- unchop(df, col) expect_named(out$col, NULL) }) test_that("unchop disallows renaming", { df <- tibble(x = list(1)) expect_snapshot(error = TRUE, { unchop(df, c(y = x)) }) }) test_that("unchop works on foreign list types recognized by `vec_is_list()` (#1327)", { new_foo <- function(...) { structure(list(...), class = c("foo", "list")) } df <- tibble(x = new_foo(1L, 2:3)) expect_identical(unchop(df, x), tibble(x = 1:3)) # With empty list df <- tibble(x = new_foo()) expect_identical(unchop(df, x), tibble(x = unspecified())) # With empty types df <- tibble(x = new_foo(1L, integer())) expect_identical(unchop(df, x), tibble(x = 1L)) expect_identical(unchop(df, x, keep_empty = TRUE), tibble(x = c(1L, NA))) # With `NULL`s df <- tibble(x = new_foo(1L, NULL)) expect_identical(unchop(df, x), tibble(x = 1L)) expect_identical(unchop(df, x, keep_empty = TRUE), tibble(x = c(1L, NA))) # With custom `ptype` df <- tibble(x = new_foo(1, 3L)) expect_identical(unchop(df, x, ptype = integer()), tibble(x = c(1L, 3L))) }) test_that("unchop validates its inputs", { df <- tibble(col = list(a = 1, b = 2:3)) expect_snapshot(error = TRUE, { unchop(1:10) unchop(df) unchop(df, col, keep_empty = 1) unchop(df, col, ptype = 1) }) }) tidyr/tests/testthat/test-utils.R0000644000176200001440000000067414013473313016655 0ustar liggesuserstest_that("tidyr_legacy copies old approach", { expect_equal(tidyr_legacy(c()), character()) expect_equal(tidyr_legacy(c("x", "x", "y")), c("x", "x1", "y")) expect_equal(tidyr_legacy(c("", "", "")), c("V1", "V2", "V3")) }) test_that("reconstruct doesn't repair names", { # This ensures that name repair elsewhere isn't overridden df <- tibble(x = 1, x = 2, .name_repair = "minimal") expect_equal(reconstruct_tibble(df, df), df) }) tidyr/tests/testthat/test-uncount.R0000644000176200001440000000245614323620576017221 0ustar liggesuserstest_that("symbols weights are dropped in output", { df <- tibble(x = 1, w = 1) expect_equal(uncount(df, w), tibble(x = 1)) }) test_that("can request to preserve symbols", { df <- tibble(x = 1, w = 1) expect_equal(uncount(df, w, .remove = FALSE), df) }) test_that("unique identifiers created on request", { df <- tibble(w = 1:3) expect_equal(uncount(df, w, .id = "id"), tibble(id = c(1L, 1:2, 1:3))) }) test_that("expands constants and expressions", { df <- tibble(x = 1, w = 2) expect_equal(uncount(df, 2), df[c(1, 1), ]) expect_equal(uncount(df, 1 + 1), df[c(1, 1), ]) }) test_that("works with groups", { df <- tibble(g = 1, x = 1, w = 1) %>% dplyr::group_by(g) expect_equal(uncount(df, w), df %>% dplyr::select(-w)) }) test_that("must evaluate to integer", { df <- tibble(x = 1, w = 1 / 2) expect_error(uncount(df, w), class = "vctrs_error_cast_lossy") df <- tibble(x = 1) expect_error(uncount(df, "W"), class = "vctrs_error_incompatible_type") }) test_that("works with 0 weights", { df <- tibble(x = 1:2, w = c(0, 1)) expect_equal(uncount(df, w), tibble(x = 2)) }) test_that("validates inputs", { df <- tibble(x = 1, y = "a", w = -1) expect_snapshot(error = TRUE, { uncount(df, y) uncount(df, w) uncount(df, x, .remove = 1) uncount(df, x, .id = "") }) }) tidyr/tests/testthat/test-extract.R0000644000176200001440000000514014323620576017171 0ustar liggesuserstest_that("default returns first alpha group", { df <- data.frame(x = c("a.b", "a.d", "b.c")) out <- df %>% extract(x, "A") expect_equal(out$A, c("a", "a", "b")) }) test_that("can match multiple groups", { df <- data.frame(x = c("a.b", "a.d", "b.c")) out <- df %>% extract(x, c("A", "B"), "([[:alnum:]]+)\\.([[:alnum:]]+)") expect_equal(out$A, c("a", "a", "b")) expect_equal(out$B, c("b", "d", "c")) }) test_that("can drop groups", { df <- data.frame(x = c("a.b.e", "a.d.f", "b.c.g")) out <- df %>% extract(x, c("x", NA, "y"), "([a-z])\\.([a-z])\\.([a-z])") expect_named(out, c("x", "y")) expect_equal(out$y, c("e", "f", "g")) }) test_that("match failures give NAs", { df <- data.frame(x = c("a.b", "a")) out <- df %>% extract(x, "a", "(b)") expect_equal(out$a, c("b", NA)) }) test_that("extract keeps characters as character", { df <- tibble(x = "X-1") out <- extract(df, x, c("x", "y"), "(.)-(.)", convert = TRUE) expect_equal(out$x, "X") expect_equal(out$y, 1L) }) test_that("can combine into multiple columns", { df <- tibble(x = "abcd") out <- extract(df, x, c("a", "b", "a", "b"), "(.)(.)(.)(.)", convert = TRUE) expect_equal(out, tibble(a = "ac", b = "bd")) }) test_that("groups are preserved", { df <- tibble(g = 1, x = "X1") %>% dplyr::group_by(g) rs <- df %>% extract(x, c("x", "y"), "(.)(.)") expect_equal(class(df), class(rs)) expect_equal(dplyr::group_vars(df), dplyr::group_vars(rs)) }) test_that("informative error message if wrong number of groups", { df <- tibble(x = "a") expect_snapshot({ (expect_error(extract(df, x, "y", "."))) (expect_error(extract(df, x, c("y", "z"), "."))) }) }) test_that("informative error if using stringr modifier functions (#693)", { df <- tibble(x = "a") regex <- structure("a", class = "pattern") expect_snapshot((expect_error(extract(df, x, "x", regex = regex)))) }) test_that("str_match_first handles edge cases", { expect_identical( str_match_first(c("r-2", "d-2-3-4"), "(.)-(.)"), list(c("r", "d"), c("2", "2")) ) expect_identical( str_match_first(NA, "test"), list() ) expect_equal( str_match_first(c("", " "), "^(.*)$"), list(c("", " ")) ) expect_equal( str_match_first("", "(.)-(.)"), list(NA_character_, NA_character_) ) expect_equal( str_match_first(character(), "(.)-(.)"), list(character(), character()) ) }) test_that("validates its inputs", { df <- data.frame(x = letters) expect_snapshot(error = TRUE, { df %>% extract() df %>% extract(x, regex = 1) df %>% extract(x, into = 1:3) df %>% extract(x, into = "x", convert = 1) }) }) tidyr/tests/testthat/test-gather.R0000644000176200001440000001242414323546567017003 0ustar liggesuserstest_that("gather all columns when ... is empty", { df <- data.frame( x = 1:5, y = 6:10 ) out <- gather(df, key, val) expect_equal(nrow(out), 10) expect_equal(names(out), c("key", "val")) }) test_that("gather returns input if no columns gathered", { df <- data.frame(x = 1:2, y = 1:2) out <- gather(df, a, b, -x, -y) expect_equal(df, out) }) test_that("if not supply, key and value default to key and value", { df <- data.frame(x = 1:2) out <- gather(df) expect_equal(nrow(out), 2) expect_equal(names(out), c("key", "value")) }) test_that("Missing values removed when na.rm = TRUE", { df <- data.frame(x = c(1, NA)) out <- gather(df, k, v) expect_equal(out$v, df$x) out <- gather(df, k, v, na.rm = TRUE) expect_equal(out$v, 1) }) test_that("key converted to character by default", { df <- data.frame(y = 1, x = 2) out <- gather(df, k, v) expect_equal(out$k, c("y", "x")) }) test_that("covert will generate integers if needed", { df <- tibble(`1` = 1, `2` = 2) out <- gather(df, convert = TRUE) expect_identical(out$key, c(1L, 2L)) }) test_that("key preserves column ordering when factor_key = TRUE", { df <- data.frame(y = 1, x = 2) out <- gather(df, k, v, factor_key = TRUE) expect_equal(out$k, factor(c("y", "x"), levels = c("y", "x"))) }) test_that("preserve class of input", { dat <- data.frame(x = 1:2) dat %>% as_tibble() %>% gather() %>% expect_s3_class("tbl_df") }) test_that("additional inputs control which columns to gather", { data <- tibble(a = 1, b1 = 1, b2 = 2, b3 = 3) out <- gather(data, key, val, b1:b3) expect_equal(names(out), c("a", "key", "val")) expect_equal(out$val, 1:3) }) test_that("group_vars are kept where possible", { df <- tibble(x = 1, y = 1, z = 1) # Can't keep out <- df %>% dplyr::group_by(x) %>% gather(key, val, x:z) expect_equal(out, tibble(key = c("x", "y", "z"), val = 1)) # Can keep out <- df %>% dplyr::group_by(x) %>% gather(key, val, y:z) expect_equal(dplyr::group_vars(out), "x") }) test_that("overwrites existing vars", { df <- data.frame( X = 1, Y = 1, Z = 2 ) rs <- gather(df, key = "name", value = "Y") expect_named(rs, c("name", "Y")) expect_equal(rs$Y, c(1, 2)) }) # Column types ------------------------------------------------------------ test_that("can gather all atomic vectors", { df1 <- data.frame(x = 1, y = FALSE) df2 <- data.frame(x = 1, y = 1L) df3 <- data.frame(x = 1, y = 1) df4 <- data.frame(x = 1, y = "a", stringsAsFactors = FALSE) df5 <- data.frame(x = 1, y = 1 + 1i, stringsAsFactors = FALSE) gathered_val <- function(val) { data.frame(x = 1, key = "y", val = val, stringsAsFactors = FALSE) } gathered_key <- function(key) { data.frame(y = key, key = "x", val = 1, stringsAsFactors = FALSE) } expect_equal(gather(df1, key, val, -x), gathered_val(FALSE)) expect_equal(gather(df2, key, val, -x), gathered_val(1L)) expect_equal(gather(df3, key, val, -x), gathered_val(1)) expect_equal(gather(df4, key, val, -x), gathered_val("a")) expect_equal(gather(df5, key, val, -x), gathered_val(1 + 1i)) expect_equal(gather(df1, key, val, -y), gathered_key(FALSE)) expect_equal(gather(df2, key, val, -y), gathered_key(1L)) expect_equal(gather(df3, key, val, -y), gathered_key(1)) expect_equal(gather(df4, key, val, -y), gathered_key("a")) expect_equal(gather(df5, key, val, -y), gathered_key(1 + 1i)) }) test_that("gather throws error for POSIXlt", { df <- data.frame(y = 1) df$x <- as.POSIXlt(Sys.time()) expect_snapshot({ (expect_error(gather(df, key, val, -x))) (expect_error(gather(df, key, val, -y))) }) }) test_that("gather throws error for weird objects", { df <- data.frame(y = 1) df$x <- expression(x) expect_snapshot({ (expect_error(gather(df, key, val, -x))) (expect_error(gather(df, key, val, -y))) }) e <- new.env(parent = emptyenv()) e$x <- 1 df <- data.frame(y = 1) df$x <- e expect_snapshot({ (expect_error(gather(df, key, val, -x))) (expect_error(gather(df, key, val, -y))) }) }) test_that("factors coerced to characters, not integers", { df <- data.frame( v1 = 1:3, v2 = factor(letters[1:3]) ) expect_snapshot(out <- gather(df, k, v)) expect_equal(out$v, c(1:3, letters[1:3])) }) test_that("attributes of id variables are preserved", { df <- data.frame(x = factor(1:3), y = 1:3, z = 3:1) out <- gather(df, key, val, -x) expect_equal(attributes(df$x), attributes(out$x)) }) test_that("common attributes are preserved", { df <- data.frame(date1 = Sys.Date(), date2 = Sys.Date() + 10) out <- gather(df, k, v) expect_s3_class(out$v, "Date") }) test_that("varying attributes are dropped with a warning", { df <- data.frame( date1 = as.POSIXct("2019-01-01", tz = "UTC"), date2 = as.Date("2019-01-01") ) expect_snapshot(gather(df, k, v)) }) test_that("gather preserves OBJECT bit on e.g. POSIXct", { df <- data.frame(now = Sys.time()) out <- gather(df, k, v) expect_true(is.object(out$v)) }) test_that("can handle list-columns", { df <- tibble(x = 1:2, y = list("a", TRUE)) out <- gather(df, k, v, -y) expect_identical(out$y, df$y) }) test_that("can gather list-columns", { df <- tibble(x = 1:2, y = list(1, 2), z = list(3, 4)) out <- gather(df, k, v, y:z) expect_equal(out$v, list(1, 2, 3, 4)) }) tidyr/tests/testthat/test-replace_na.R0000644000176200001440000000523714323620576017617 0ustar liggesusers# vector ------------------------------------------------------------------ test_that("empty call does nothing", { x <- c(1, NA) expect_equal(replace_na(x), x) }) test_that("missing values are replaced", { x <- c(1, NA) expect_equal(replace_na(x, 0), c(1, 0)) }) test_that("can only be length 0", { expect_snapshot((expect_error(replace_na(1, 1:10)))) }) test_that("can replace missing rows in arrays", { x <- matrix(c(NA, NA, NA, 6), nrow = 2) replace <- matrix(c(-1, -2), nrow = 1) expect <- matrix(c(-1, NA, -2, 6), nrow = 2) expect_identical(replace_na(x, replace), expect) }) test_that("can replace missing values in rcrds", { x <- new_rcrd(list(x = c(1, NA, NA), y = c(1, NA, 2))) expect <- new_rcrd(list(x = c(1, 0, NA), y = c(1, 0, 2))) expect_identical( replace_na(x, new_rcrd(list(x = 0, y = 0))), expect ) }) test_that("replacement must be castable to `data`", { x <- c(1L, NA) expect_snapshot((expect_error(replace_na(x, 1.5)))) }) test_that("empty atomic elements are not replaced in lists (#1168)", { x <- list(character(), NULL) expect_identical( replace_na(x, replace = list("foo")), list(character(), "foo") ) }) test_that("can replace value in `NULL` (#1292)", { expect_identical(replace_na(NULL, replace = "NA"), NULL) expect_identical(replace_na(NULL, replace = 1L), NULL) }) # data frame ------------------------------------------------------------- test_that("empty call does nothing", { df <- tibble(x = c(1, NA)) out <- replace_na(df) expect_equal(out, df) }) test_that("missing values are replaced", { df <- tibble(x = c(1, NA)) out <- replace_na(df, list(x = 0)) expect_equal(out$x, c(1, 0)) }) test_that("don't complain about variables that don't exist", { df <- tibble(a = c(1, NA)) out <- replace_na(df, list(a = 100, b = 0)) expect_equal(out, tibble(a = c(1, 100))) }) test_that("can replace NULLs in list-column", { df <- tibble(x = list(1, NULL)) rs <- replace_na(df, list(x = list(1:5))) expect_identical(rs, tibble(x = list(1, 1:5))) }) test_that("df-col rows must be completely missing to be replaceable", { col <- tibble(x = c(1, NA, NA), y = c(1, 2, NA)) df <- tibble(a = col) col <- tibble(x = c(1, NA, -1), y = c(1, 2, -2)) expect <- tibble(a = col) replace <- tibble(x = -1, y = -2) expect_identical( replace_na(df, list(a = replace)), expect ) }) test_that("replacement must be castable to corresponding column", { df <- tibble(a = c(1L, NA)) expect_snapshot((expect_error(replace_na(df, list(a = 1.5))))) }) test_that("validates its inputs", { df <- tibble(a = c(1L, NA)) expect_snapshot(error = TRUE, { replace_na(df, replace = 1) }) }) tidyr/tests/testthat.R0000644000176200001440000000006614013466035014536 0ustar liggesuserslibrary(testthat) library(tidyr) test_check("tidyr") tidyr/src/0000755000176200001440000000000014553746313012207 5ustar liggesuserstidyr/src/melt.cpp0000644000176200001440000002056514013466035013653 0ustar liggesusers#include #include "cpp11/R.hpp" #include "cpp11/protect.hpp" #include "cpp11/sexp.hpp" #include "cpp11/integers.hpp" #include "cpp11/strings.hpp" #include "cpp11/list.hpp" #include "cpp11/data_frame.hpp" #if R_VERSION < R_Version(3, 5, 0) void* DATAPTR(SEXP x) { switch(TYPEOF(x)) { case STRSXP: return (char*) CHAR(x); break; case LGLSXP: return (char*) LOGICAL(x); case INTSXP: return (char*) INTEGER(x); case RAWSXP: return (char*) RAW(x); case CPLXSXP: return (char*) COMPLEX(x); case REALSXP: return (char*) REAL(x); default: cpp11::stop("Invalid type %s", Rf_type2char(TYPEOF(x))); } return nullptr; } #endif // A debug macro -- change to 'debug(x) x' for debug output #define debug(x) // An optimized rep #define DO_REP(RTYPE, CTYPE, ACCESSOR) \ { \ for (int i = 0; i < n; ++i) { \ memcpy((char*)ACCESSOR(output) + i * xn * sizeof(CTYPE), \ (char*)ACCESSOR(x), \ sizeof(CTYPE) * xn); \ } \ } SEXP rep_(SEXP x, int n, std::string var_name) { if (!Rf_isVectorAtomic(x) && TYPEOF(x) != VECSXP) { cpp11::stop("All columns must be atomic vectors or lists. Problem with '%s'", var_name.c_str()); } if (Rf_inherits(x, "POSIXlt")) { cpp11::stop("'%s' is a POSIXlt. Please convert to POSIXct.", var_name.c_str()); } int xn = Rf_length(x); int nout = xn * n; cpp11::sexp output(Rf_allocVector(TYPEOF(x), nout)); switch (TYPEOF(x)) { case INTSXP: DO_REP(INTSXP, int, INTEGER); break; case REALSXP: DO_REP(REALSXP, double, REAL); break; case LGLSXP: DO_REP(LGLSXP, int, LOGICAL); break; case CPLXSXP: DO_REP(CPLXSXP, Rcomplex, COMPLEX); break; case RAWSXP: DO_REP(RAWSXP, Rbyte, RAW); break; case STRSXP: { int counter = 0; for (int i = 0; i < n; ++i) { for (int j = 0; j < xn; ++j) { SET_STRING_ELT(output, counter, STRING_ELT(x, j)); ++counter; } } break; } case VECSXP: { int counter = 0; for (int i = 0; i < n; ++i) { for (int j = 0; j < xn; ++j) { SET_VECTOR_ELT(output, counter, VECTOR_ELT(x, j)); ++counter; } } break; } default: { cpp11::stop("Unhandled RTYPE in '%s'", var_name.c_str()); return R_NilValue; } } Rf_copyMostAttrib(x, output); return output; } // Optimized factor routine for the case where we want to make // a factor from a vector of names -- used for generating the // 'variable' column in the melted data.frame cpp11::integers make_variable_column_factor(cpp11::strings x, int nrow) { cpp11::writable::integers output(x.size() * nrow); int idx = 0; for (int i = 0; i < x.size(); ++i) for (int j = 0; j < nrow; ++j) output[idx++] = i + 1; output.attr("levels") = x; output.attr("class") = "factor"; return output; } cpp11::strings make_variable_column_character(cpp11::strings x, int nrow) { cpp11::writable::strings output(x.size() * nrow); int idx = 0; for (int i = 0; i < x.size(); ++i) for (int j = 0; j < nrow; ++j) output[idx++] = x[i]; return output; } // Concatenate vectors for the 'value' column #define DO_CONCATENATE(CTYPE) \ { \ memcpy((char*)DATAPTR(output) + i* nrow * sizeof(CTYPE), \ (char*)DATAPTR(tmp), \ nrow * sizeof(CTYPE)); \ break; \ } SEXP concatenate(const cpp11::data_frame& x, cpp11::integers ind, bool factorsAsStrings) { int nrow = x.nrow(); int n_ind = ind.size(); // We coerce up to the 'max type' if necessary, using the fact // that R's SEXPTYPEs are also ordered in terms of 'precision' // Note: we convert factors to characters if necessary int max_type = 0; int ctype = 0; for (int i = 0; i < n_ind; ++i) { if (Rf_isFactor(x[ind[i]]) and factorsAsStrings) { ctype = STRSXP; } else { ctype = TYPEOF(x[ind[i]]); } max_type = ctype > max_type ? ctype : max_type; } debug(printf("Max type of value variables is %s\n", Rf_type2char(max_type))); cpp11::sexp tmp; cpp11::sexp output(Rf_allocVector(max_type, nrow * n_ind)); for (int i = 0; i < n_ind; ++i) { SEXP col = x[ind[i]]; if (Rf_inherits(col, "POSIXlt")) { cpp11::stop("Column %i is a POSIXlt. Please convert to POSIXct.", i + 1); } // a 'tmp' pointer to the current column being iterated over, or // a coerced version if necessary if (TYPEOF(col) == max_type) { tmp = col; } else if (Rf_isFactor(col) and factorsAsStrings) { tmp = Rf_asCharacterFactor(col); } else { tmp = Rf_coerceVector(col, max_type); } switch (max_type) { case INTSXP: DO_CONCATENATE(int); case REALSXP: DO_CONCATENATE(double); case LGLSXP: DO_CONCATENATE(int); case CPLXSXP: DO_CONCATENATE(Rcomplex); case STRSXP: { for (int j = 0; j < nrow; ++j) { SET_STRING_ELT(output, i * nrow + j, STRING_ELT(tmp, j)); } break; } case VECSXP: { for (int j = 0; j < nrow; ++j) { SET_VECTOR_ELT(output, i * nrow + j, VECTOR_ELT(tmp, j)); } break; } default: cpp11::stop("All columns be atomic vectors or lists (not %s)", Rf_type2char(max_type)); } } return output; } [[cpp11::register]] cpp11::list melt_dataframe(cpp11::data_frame data, const cpp11::integers& id_ind, const cpp11::integers& measure_ind, cpp11::strings variable_name, cpp11::strings value_name, cpp11::sexp attrTemplate, bool factorsAsStrings, bool valueAsFactor, bool variableAsFactor) { int nrow = data.nrow(); cpp11::strings data_names(data.attr("names")); int n_id = id_ind.size(); debug(Rprintf("n_id == %i\n", n_id)); int n_measure = measure_ind.size(); debug(Rprintf("n_measure == %i\n", n_measure)); // Don't melt if the value variables are non-atomic for (int i = 0; i < n_measure; ++i) { if (!Rf_isVector(data[measure_ind[i]]) || Rf_inherits(data[measure_ind[i]], "data.frame")) { cpp11::stop("All columns must be atomic vectors or lists. Problem with column %i.", measure_ind[i] + 1); } } // The output should be a data.frame with: // number of columns == number of id vars + 'variable' + 'value', // with number of rows == data.nrow() * number of value vars cpp11::writable::list output(n_id + 2); // First, allocate the ID variables // we repeat each ID vector n_measure times for (int i = 0; i < n_id; ++i) { SEXP object = data[id_ind[i]]; std::string var_name = std::string(data_names[id_ind[i]]); output[i] = rep_(object, n_measure, var_name); } // Now, we assign the 'variable' and 'value' columns // 'variable' is made up of repeating the names of the 'measure' variables, // each nrow times. We want this to be a factor as well. cpp11::writable::strings id_names(n_measure); for (int i = 0; i < n_measure; ++i) { id_names[i] = data_names[measure_ind[i]]; } if (variableAsFactor) { output[n_id] = make_variable_column_factor(id_names, nrow); } else { output[n_id] = make_variable_column_character(id_names, nrow); } // 'value' is made by concatenating each of the 'value' variables output[n_id + 1] = concatenate(data, measure_ind, factorsAsStrings); if (!Rf_isNull(attrTemplate)) { Rf_copyMostAttrib(attrTemplate, output[n_id + 1]); } // Make the List more data.frame like // Set the row names output.attr("row.names") = {NA_INTEGER, -(nrow * n_measure)}; // Set the names cpp11::writable::strings out_names(n_id + 2); for (int i = 0; i < n_id; ++i) { out_names[i] = data_names[id_ind[i]]; } out_names[n_id] = variable_name[0]; out_names[n_id + 1] = value_name[0]; output.attr("names") = out_names; // Set the class output.attr("class") = "data.frame"; return output; } tidyr/src/cpp11.cpp0000644000176200001440000000371714323611637013642 0ustar liggesusers// Generated by cpp11: do not edit by hand // clang-format off #include "cpp11/declarations.hpp" #include // melt.cpp cpp11::list melt_dataframe(cpp11::data_frame data, const cpp11::integers& id_ind, const cpp11::integers& measure_ind, cpp11::strings variable_name, cpp11::strings value_name, cpp11::sexp attrTemplate, bool factorsAsStrings, bool valueAsFactor, bool variableAsFactor); extern "C" SEXP _tidyr_melt_dataframe(SEXP data, SEXP id_ind, SEXP measure_ind, SEXP variable_name, SEXP value_name, SEXP attrTemplate, SEXP factorsAsStrings, SEXP valueAsFactor, SEXP variableAsFactor) { BEGIN_CPP11 return cpp11::as_sexp(melt_dataframe(cpp11::as_cpp>(data), cpp11::as_cpp>(id_ind), cpp11::as_cpp>(measure_ind), cpp11::as_cpp>(variable_name), cpp11::as_cpp>(value_name), cpp11::as_cpp>(attrTemplate), cpp11::as_cpp>(factorsAsStrings), cpp11::as_cpp>(valueAsFactor), cpp11::as_cpp>(variableAsFactor))); END_CPP11 } // simplifyPieces.cpp cpp11::list simplifyPieces(cpp11::list pieces, int p, bool fillLeft); extern "C" SEXP _tidyr_simplifyPieces(SEXP pieces, SEXP p, SEXP fillLeft) { BEGIN_CPP11 return cpp11::as_sexp(simplifyPieces(cpp11::as_cpp>(pieces), cpp11::as_cpp>(p), cpp11::as_cpp>(fillLeft))); END_CPP11 } extern "C" { static const R_CallMethodDef CallEntries[] = { {"_tidyr_melt_dataframe", (DL_FUNC) &_tidyr_melt_dataframe, 9}, {"_tidyr_simplifyPieces", (DL_FUNC) &_tidyr_simplifyPieces, 3}, {NULL, NULL, 0} }; } extern "C" attribute_visible void R_init_tidyr(DllInfo* dll){ R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); R_forceSymbols(dll, TRUE); } tidyr/src/simplifyPieces.cpp0000644000176200001440000000257014013466035015673 0ustar liggesusers#include "cpp11/list.hpp" #include "cpp11/strings.hpp" #include "cpp11/as.hpp" #include [[cpp11::register]] cpp11::list simplifyPieces(cpp11::list pieces, int p, bool fillLeft = true) { std::vector tooSml, tooBig; int n = pieces.size(); cpp11::writable::list list(p); for (int j = 0; j < p; ++j) list[j] = cpp11::writable::strings(n); cpp11::writable::list out(list); for (int i = 0; i < n; ++i) { cpp11::strings x(pieces[i]); if (x.size() == 1 && x[0] == NA_STRING) { for (int j = 0; j < p; ++j) SET_STRING_ELT(out[j], i, NA_STRING); } else if (x.size() > p) { // too big tooBig.push_back(i + 1); for (int j = 0; j < p; ++j) SET_STRING_ELT(out[j], i, x[j]); } else if (x.size() < p) { // too small tooSml.push_back(i + 1); int gap = p - x.size(); for (int j = 0; j < p; ++j) { if (fillLeft) { SET_STRING_ELT(out[j], i, (j >= gap) ? static_cast(x[j - gap]) : NA_STRING); } else { SET_STRING_ELT(out[j], i, (j < x.size()) ? static_cast(x[j]) : NA_STRING); } } } else { for (int j = 0; j < p; ++j) SET_STRING_ELT(out[j], i, x[j]); } } using namespace cpp11::literals; return cpp11::writable::list({ "strings"_nm = out, "too_big"_nm = tooBig, "too_sml"_nm = tooSml} ); } tidyr/vignettes/0000755000176200001440000000000014553746313013430 5ustar liggesuserstidyr/vignettes/classroom2.csv0000644000176200001440000000016214013466035016217 0ustar liggesusers"assessment","Billy","Suzy","Lionel","Jenny" "quiz1",NA,"F","B","A" "quiz2","D",NA,"C","A" "test1","C",NA,"B","B" tidyr/vignettes/tidy-data.Rmd0000644000176200001440000005352314553563421015761 0ustar liggesusers--- title: "Tidy data" output: rmarkdown::html_vignette description: | A tidy dataset has variables in columns, observations in rows, and one value in each cell. This vignette introduces the theory of "tidy data" and shows you how it saves you time during data analysis. vignette: > %\VignetteIndexEntry{Tidy data} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, echo = FALSE} knitr::opts_chunk$set(collapse = TRUE, comment = "#>") set.seed(1014) options(dplyr.print_max = 10) ``` (This is an informal and code heavy version of the full [tidy data paper](https://vita.had.co.nz/papers/tidy-data.html). Please refer to that for more details.) ## Data tidying It is often said that 80% of data analysis is spent on the cleaning and preparing data. And it's not just a first step, but it must be repeated many times over the course of analysis as new problems come to light or new data is collected. To get a handle on the problem, this paper focuses on a small, but important, aspect of data cleaning that I call data **tidying**: structuring datasets to facilitate analysis. The principles of tidy data provide a standard way to organise data values within a dataset. A standard makes initial data cleaning easier because you don't need to start from scratch and reinvent the wheel every time. The tidy data standard has been designed to facilitate initial exploration and analysis of the data, and to simplify the development of data analysis tools that work well together. Current tools often require translation. You have to spend time munging the output from one tool so you can input it into another. Tidy datasets and tidy tools work hand in hand to make data analysis easier, allowing you to focus on the interesting domain problem, not on the uninteresting logistics of data. ## Defining tidy data {#defining} > Happy families are all alike; every unhappy family is unhappy in its own way > --- Leo Tolstoy Like families, tidy datasets are all alike but every messy dataset is messy in its own way. Tidy datasets provide a standardized way to link the structure of a dataset (its physical layout) with its semantics (its meaning). In this section, I'll provide some standard vocabulary for describing the structure and semantics of a dataset, and then use those definitions to define tidy data. ### Data structure Most statistical datasets are data frames made up of **rows** and **columns**. The columns are almost always labeled and the rows are sometimes labeled. The following code provides some data about an imaginary classroom in a format commonly seen in the wild. The table has three columns and four rows, and both rows and columns are labeled. ```{r} library(tibble) classroom <- tribble( ~name, ~quiz1, ~quiz2, ~test1, "Billy", NA, "D", "C", "Suzy", "F", NA, NA, "Lionel", "B", "C", "B", "Jenny", "A", "A", "B" ) classroom ``` There are many ways to structure the same underlying data. The following table shows the same data as above, but the rows and columns have been transposed. ```{r} tribble( ~assessment, ~Billy, ~Suzy, ~Lionel, ~Jenny, "quiz1", NA, "F", "B", "A", "quiz2", "D", NA, "C", "A", "test1", "C", NA, "B", "B" ) ``` The data is the same, but the layout is different. Our vocabulary of rows and columns is simply not rich enough to describe why the two tables represent the same data. In addition to appearance, we need a way to describe the underlying semantics, or meaning, of the values displayed in the table. ### Data semantics A dataset is a collection of **values**, usually either numbers (if quantitative) or strings (if qualitative). Values are organised in two ways. Every value belongs to a **variable** and an **observation**. A variable contains all values that measure the same underlying attribute (like height, temperature, duration) across units. An observation contains all values measured on the same unit (like a person, or a day, or a race) across attributes. A tidy version of the classroom data looks like this: (you'll learn how the functions work a little later) ```{r setup, message = FALSE} library(tidyr) library(dplyr) ``` ```{r} classroom2 <- classroom %>% pivot_longer(quiz1:test1, names_to = "assessment", values_to = "grade") %>% arrange(name, assessment) classroom2 ``` This makes the values, variables, and observations more clear. The dataset contains 36 values representing three variables and 12 observations. The variables are: 1. `name`, with four possible values (Billy, Suzy, Lionel, and Jenny). 2. `assessment`, with three possible values (quiz1, quiz2, and test1). 3. `grade`, with five or six values depending on how you think of the missing value (`r sort(unique(classroom2$grade), na.last = TRUE)`). The tidy data frame explicitly tells us the definition of an observation. In this classroom, every combination of `name` and `assessment` is a single measured observation. The dataset also informs us of missing values, which can and do have meaning. Billy was absent for the first quiz, but tried to salvage his grade. Suzy failed the first quiz, so she decided to drop the class. To calculate Billy's final grade, we might replace this missing value with an F (or he might get a second chance to take the quiz). However, if we want to know the class average for Test 1, dropping Suzy's structural missing value would be more appropriate than imputing a new value. For a given dataset, it's usually easy to figure out what are observations and what are variables, but it is surprisingly difficult to precisely define variables and observations in general. For example, if the columns in the classroom data were `height` and `weight` we would have been happy to call them variables. If the columns were `height` and `width`, it would be less clear cut, as we might think of height and width as values of a `dimension` variable. If the columns were `home phone` and `work phone`, we could treat these as two variables, but in a fraud detection environment we might want variables `phone number` and `number type` because the use of one phone number for multiple people might suggest fraud. A general rule of thumb is that it is easier to describe functional relationships between variables (e.g., `z` is a linear combination of `x` and `y`, `density` is the ratio of `weight` to `volume`) than between rows, and it is easier to make comparisons between groups of observations (e.g., average of group a vs. average of group b) than between groups of columns. In a given analysis, there may be multiple levels of observation. For example, in a trial of new allergy medication we might have three observational types: demographic data collected from each person (`age`, `sex`, `race`), medical data collected from each person on each day (`number of sneezes`, `redness of eyes`), and meteorological data collected on each day (`temperature`, `pollen count`). Variables may change over the course of analysis. Often the variables in the raw data are very fine grained, and may add extra modelling complexity for little explanatory gain. For example, many surveys ask variations on the same question to better get at an underlying trait. In early stages of analysis, variables correspond to questions. In later stages, you change focus to traits, computed by averaging together multiple questions. This considerably simplifies analysis because you don't need a hierarchical model, and you can often pretend that the data is continuous, not discrete. ### Tidy data Tidy data is a standard way of mapping the meaning of a dataset to its structure. A dataset is messy or tidy depending on how rows, columns and tables are matched up with observations, variables and types. In **tidy data**: 1. Each variable is a column; each column is a variable. 2. Each observation is a row; each row is an observation. 3. Each value is a cell; each cell is a single value. This is Codd's 3rd normal form, but with the constraints framed in statistical language, and the focus put on a single dataset rather than the many connected datasets common in relational databases. **Messy data** is any other arrangement of the data. Tidy data makes it easy for an analyst or a computer to extract needed variables because it provides a standard way of structuring a dataset. Compare the different versions of the classroom data: in the messy version you need to use different strategies to extract different variables. This slows analysis and invites errors. If you consider how many data analysis operations involve all of the values in a variable (every aggregation function), you can see how important it is to extract these values in a simple, standard way. Tidy data is particularly well suited for vectorised programming languages like R, because the layout ensures that values of different variables from the same observation are always paired. While the order of variables and observations does not affect analysis, a good ordering makes it easier to scan the raw values. One way of organising variables is by their role in the analysis: are values fixed by the design of the data collection, or are they measured during the course of the experiment? Fixed variables describe the experimental design and are known in advance. Computer scientists often call fixed variables dimensions, and statisticians usually denote them with subscripts on random variables. Measured variables are what we actually measure in the study. Fixed variables should come first, followed by measured variables, each ordered so that related variables are contiguous. Rows can then be ordered by the first variable, breaking ties with the second and subsequent (fixed) variables. This is the convention adopted by all tabular displays in this paper. ## Tidying messy datasets {#tidying} Real datasets can, and often do, violate the three precepts of tidy data in almost every way imaginable. While occasionally you do get a dataset that you can start analysing immediately, this is the exception, not the rule. This section describes the five most common problems with messy datasets, along with their remedies: - Column headers are values, not variable names. - Multiple variables are stored in one column. - Variables are stored in both rows and columns. - Multiple types of observational units are stored in the same table. - A single observational unit is stored in multiple tables. Surprisingly, most messy datasets, including types of messiness not explicitly described above, can be tidied with a small set of tools: pivoting (longer and wider) and separating. The following sections illustrate each problem with a real dataset that I have encountered, and show how to tidy them. ### Column headers are values, not variable names A common type of messy dataset is tabular data designed for presentation, where variables form both the rows and columns, and column headers are values, not variable names. While I would call this arrangement messy, in some cases it can be extremely useful. It provides efficient storage for completely crossed designs, and it can lead to extremely efficient computation if desired operations can be expressed as matrix operations. The following code shows a subset of a typical dataset of this form. This dataset explores the relationship between income and religion in the US. It comes from a report produced by the Pew Research Center, an American think-tank that collects data on attitudes to topics ranging from religion to the internet, and produces many reports that contain datasets in this format. ```{r} relig_income ``` This dataset has three variables, `religion`, `income` and `frequency`. To tidy it, we need to **pivot** the non-variable columns into a two-column key-value pair. This action is often described as making a wide dataset longer (or taller). When pivoting variables, we need to provide the name of the new key-value columns to create. After defining the columns to pivot (every column except for religion), you will need the name of the key column, which is the name of the variable defined by the values of the column headings. In this case, it's `income`. The second argument is the name of the value column, `frequency`. ```{r} relig_income %>% pivot_longer(-religion, names_to = "income", values_to = "frequency") ``` This form is tidy because each column represents a variable and each row represents an observation, in this case a demographic unit corresponding to a combination of `religion` and `income`. This format is also used to record regularly spaced observations over time. For example, the Billboard dataset shown below records the date a song first entered the billboard top 100. It has variables for `artist`, `track`, `date.entered`, `rank` and `week`. The rank in each week after it enters the top 100 is recorded in 75 columns, `wk1` to `wk75`. This form of storage is not tidy, but it is useful for data entry. It reduces duplication since otherwise each song in each week would need its own row, and song metadata like title and artist would need to be repeated. This will be discussed in more depth in [multiple types](#multiple-types). ```{r} billboard ``` To tidy this dataset, we first use `pivot_longer()` to make the dataset longer. We transform the columns from `wk1` to `wk76`, making a new column for their names, `week`, and a new value for their values, `rank`: ```{r} billboard2 <- billboard %>% pivot_longer( wk1:wk76, names_to = "week", values_to = "rank", values_drop_na = TRUE ) billboard2 ``` Here we use `values_drop_na = TRUE` to drop any missing values from the rank column. In this data, missing values represent weeks that the song wasn't in the charts, so can be safely dropped. In this case it's also nice to do a little cleaning, converting the week variable to a number, and figuring out the date corresponding to each week on the charts: ```{r} billboard3 <- billboard2 %>% mutate( week = as.integer(gsub("wk", "", week)), date = as.Date(date.entered) + 7 * (week - 1), date.entered = NULL ) billboard3 ``` Finally, it's always a good idea to sort the data. We could do it by artist, track and week: ```{r} billboard3 %>% arrange(artist, track, week) ``` Or by date and rank: ```{r} billboard3 %>% arrange(date, rank) ``` ### Multiple variables stored in one column After pivoting columns, the key column is sometimes a combination of multiple underlying variable names. This happens in the `tb` (tuberculosis) dataset, shown below. This dataset comes from the World Health Organisation, and records the counts of confirmed tuberculosis cases by `country`, `year`, and demographic group. The demographic groups are broken down by `sex` (m, f) and `age` (0-14, 15-25, 25-34, 35-44, 45-54, 55-64, unknown). ```{r} tb <- as_tibble(read.csv("tb.csv", stringsAsFactors = FALSE)) tb ``` First we use `pivot_longer()` to gather up the non-variable columns: ```{r} tb2 <- tb %>% pivot_longer( !c(iso2, year), names_to = "demo", values_to = "n", values_drop_na = TRUE ) tb2 ``` Column headers in this format are often separated by a non-alphanumeric character (e.g. `.`, `-`, `_`, `:`), or have a fixed width format, like in this dataset. `separate()` makes it easy to split a compound variables into individual variables. You can either pass it a regular expression to split on (the default is to split on non-alphanumeric columns), or a vector of character positions. In this case we want to split after the first character: ```{r} tb3 <- tb2 %>% separate(demo, c("sex", "age"), 1) tb3 ``` Storing the values in this form resolves a problem in the original data. We want to compare rates, not counts, which means we need to know the population. In the original format, there is no easy way to add a population variable. It has to be stored in a separate table, which makes it hard to correctly match populations to counts. In tidy form, adding variables for population and rate is easy because they're just additional columns. In this case, we could also do the transformation in a single step by supplying multiple column names to `names_to` and also supplying a grouped regular expression to `names_pattern`: ```{r} tb %>% pivot_longer( !c(iso2, year), names_to = c("sex", "age"), names_pattern = "(.)(.+)", values_to = "n", values_drop_na = TRUE ) ``` ### Variables are stored in both rows and columns The most complicated form of messy data occurs when variables are stored in both rows and columns. The code below loads daily weather data from the Global Historical Climatology Network for one weather station (MX17004) in Mexico for five months in 2010. ```{r} weather <- as_tibble(read.csv("weather.csv", stringsAsFactors = FALSE)) weather ``` It has variables in individual columns (`id`, `year`, `month`), spread across columns (`day`, d1-d31) and across rows (`tmin`, `tmax`) (minimum and maximum temperature). Months with fewer than 31 days have structural missing values for the last day(s) of the month. To tidy this dataset we first use pivot_longer to gather the day columns: ```{r} weather2 <- weather %>% pivot_longer( d1:d31, names_to = "day", values_to = "value", values_drop_na = TRUE ) weather2 ``` For presentation, I've dropped the missing values, making them implicit rather than explicit. This is ok because we know how many days are in each month and can easily reconstruct the explicit missing values. We'll also do a little cleaning: ```{r} weather3 <- weather2 %>% mutate(day = as.integer(gsub("d", "", day))) %>% select(id, year, month, day, element, value) weather3 ``` This dataset is mostly tidy, but the `element` column is not a variable; it stores the names of variables. (Not shown in this example are the other meteorological variables `prcp` (precipitation) and `snow` (snowfall)). Fixing this requires widening the data: `pivot_wider()` is inverse of `pivot_longer()`, pivoting `element` and `value` back out across multiple columns: ```{r} weather3 %>% pivot_wider(names_from = element, values_from = value) ``` This form is tidy: there's one variable in each column, and each row represents one day. ### Multiple types in one table {#multiple-types} Datasets often involve values collected at multiple levels, on different types of observational units. During tidying, each type of observational unit should be stored in its own table. This is closely related to the idea of database normalisation, where each fact is expressed in only one place. It's important because otherwise inconsistencies can arise. The billboard dataset actually contains observations on two types of observational units: the song and its rank in each week. This manifests itself through the duplication of facts about the song: `artist` is repeated many times. This dataset needs to be broken down into two pieces: a song dataset which stores `artist` and `song name`, and a ranking dataset which gives the `rank` of the `song` in each `week`. We first extract a `song` dataset: ```{r} song <- billboard3 %>% distinct(artist, track) %>% mutate(song_id = row_number()) song ``` Then use that to make a `rank` dataset by replacing repeated song facts with a pointer to song details (a unique song id): ```{r} rank <- billboard3 %>% left_join(song, c("artist", "track")) %>% select(song_id, date, week, rank) rank ``` You could also imagine a `week` dataset which would record background information about the week, maybe the total number of songs sold or similar "demographic" information. Normalisation is useful for tidying and eliminating inconsistencies. However, there are few data analysis tools that work directly with relational data, so analysis usually also requires denormalisation or the merging the datasets back into one table. ### One type in multiple tables It's also common to find data values about a single type of observational unit spread out over multiple tables or files. These tables and files are often split up by another variable, so that each represents a single year, person, or location. As long as the format for individual records is consistent, this is an easy problem to fix: 1. Read the files into a list of tables. 2. For each table, add a new column that records the original file name (the file name is often the value of an important variable). 3. Combine all tables into a single table. Purrr makes this straightforward in R. The following code generates a vector of file names in a directory (`data/`) which match a regular expression (ends in `.csv`). Next we name each element of the vector with the name of the file. We do this because will preserve the names in the following step, ensuring that each row in the final data frame is labeled with its source. Finally, `map_dfr()` loops over each path, reading in the csv file and combining the results into a single data frame. ```{r, eval = FALSE} library(purrr) paths <- dir("data", pattern = "\\.csv$", full.names = TRUE) names(paths) <- basename(paths) map_dfr(paths, read.csv, stringsAsFactors = FALSE, .id = "filename") ``` Once you have a single table, you can perform additional tidying as needed. An example of this type of cleaning can be found at which takes 129 yearly baby name tables provided by the US Social Security Administration and combines them into a single file. A more complicated situation occurs when the dataset structure changes over time. For example, the datasets may contain different variables, the same variables with different names, different file formats, or different conventions for missing values. This may require you to tidy each file to individually (or, if you're lucky, in small groups) and then combine them once tidied. An example of this type of tidying is illustrated in , which shows the tidying of epa fuel economy data for over 50,000 cars from 1978 to 2008. The raw data is available online, but each year is stored in a separate file and there are four major formats with many minor variations, making tidying this dataset a considerable challenge. tidyr/vignettes/nest.Rmd0000644000176200001440000000550214165475471015052 0ustar liggesusers--- title: "Nested data" output: rmarkdown::html_vignette description: | A nested data frame contains a list-column of data frames. It's an alternative way of representing grouped data, that works particularly well when you're modelling. vignette: > %\VignetteIndexEntry{Nested data} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, message = FALSE} library(tidyr) library(dplyr) library(purrr) ``` ## Basics A nested data frame is a data frame where one (or more) columns is a list of data frames. You can create simple nested data frames by hand: ```{r} df1 <- tibble( g = c(1, 2, 3), data = list( tibble(x = 1, y = 2), tibble(x = 4:5, y = 6:7), tibble(x = 10) ) ) df1 ``` (It is possible to create list-columns in regular data frames, not just in tibbles, but it's considerably more work because the default behaviour of `data.frame()` is to treat lists as lists of columns.) But more commonly you'll create them with `tidyr::nest()`: ```{r} df2 <- tribble( ~g, ~x, ~y, 1, 1, 2, 2, 4, 6, 2, 5, 7, 3, 10, NA ) df2 %>% nest(data = c(x, y)) ``` `nest()` specifies which variables should be nested inside; an alternative is to use `dplyr::group_by()` to describe which variables should be kept outside. ```{r} df2 %>% group_by(g) %>% nest() ``` I think nesting is easiest to understand in connection to grouped data: each row in the output corresponds to one _group_ in the input. We'll see shortly this is particularly convenient when you have other per-group objects. The opposite of `nest()` is `unnest()`. You give it the name of a list-column containing data frames, and it row-binds the data frames together, repeating the outer columns the right number of times to line up. ```{r} df1 %>% unnest(data) ``` ## Nested data and models Nested data is a great fit for problems where you have one of _something_ for each group. A common place this arises is when you're fitting multiple models. ```{r} mtcars_nested <- mtcars %>% group_by(cyl) %>% nest() mtcars_nested ``` Once you have a list of data frames, it's very natural to produce a list of models: ```{r} mtcars_nested <- mtcars_nested %>% mutate(model = map(data, function(df) lm(mpg ~ wt, data = df))) mtcars_nested ``` And then you could even produce a list of predictions: ```{r} mtcars_nested <- mtcars_nested %>% mutate(model = map(model, predict)) mtcars_nested ``` This workflow works particularly well in conjunction with [broom](https://broom.tidymodels.org/), which makes it easy to turn models into tidy data frames which can then be `unnest()`ed to get back to flat data frames. You can see a bigger example in the [broom and dplyr vignette](https://broom.tidymodels.org/articles/broom_and_dplyr.html). tidyr/vignettes/pivot.Rmd0000644000176200001440000006441314553565751015252 0ustar liggesusers--- title: "Pivoting" output: rmarkdown::html_vignette description: Learn how use the new `pivot_longer()` and `pivot_wider()` functions which change the representation of a dataset without changing the data it contains. vignette: > %\VignetteIndexEntry{Pivoting} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(tibble.print_max = 10) ``` ## Introduction This vignette describes the use of the new `pivot_longer()` and `pivot_wider()` functions. Their goal is to improve the usability of `gather()` and `spread()`, and incorporate state-of-the-art features found in other packages. For some time, it's been obvious that there is something fundamentally wrong with the design of `spread()` and `gather()`. Many people don't find the names intuitive and find it hard to remember which direction corresponds to spreading and which to gathering. It also seems surprisingly hard to remember the arguments to these functions, meaning that many people (including me!) have to consult the documentation every time. There are two important new features inspired by other R packages that have been advancing reshaping in R: * `pivot_longer()` can work with multiple value variables that may have different types, inspired by the enhanced `melt()` and `dcast()` functions provided by the [data.table][data.table] package by Matt Dowle and Arun Srinivasan. * `pivot_longer()` and `pivot_wider()` can take a data frame that specifies precisely how metadata stored in column names becomes data variables (and vice versa), inspired by the [cdata][cdata] package by John Mount and Nina Zumel. In this vignette, you'll learn the key ideas behind `pivot_longer()` and `pivot_wider()` as you see them used to solve a variety of data reshaping challenges ranging from simple to complex. To begin we'll load some needed packages. In real analysis code, I'd imagine you'd do with the `library(tidyverse)`, but I can't do that here since this vignette is embedded in a package. ```{r setup, message = FALSE} library(tidyr) library(dplyr) library(readr) ``` ## Longer `pivot_longer()` makes datasets __longer__ by increasing the number of rows and decreasing the number of columns. I don't believe it makes sense to describe a dataset as being in "long form". Length is a relative term, and you can only say (e.g.) that dataset A is longer than dataset B. `pivot_longer()` is commonly needed to tidy wild-caught datasets as they often optimise for ease of data entry or ease of comparison rather than ease of analysis. The following sections show how to use `pivot_longer()` for a wide range of realistic datasets. ### String data in column names {#pew} The `relig_income` dataset stores counts based on a survey which (among other things) asked people about their religion and annual income: ```{r} relig_income ``` This dataset contains three variables: * `religion`, stored in the rows, * `income` spread across the column names, and * `count` stored in the cell values. To tidy it we use `pivot_longer()`: ```{r} relig_income %>% pivot_longer( cols = !religion, names_to = "income", values_to = "count" ) ``` * The first argument is the dataset to reshape, `relig_income`. * `cols` describes which columns need to be reshaped. In this case, it's every column apart from `religion`. * `names_to` gives the name of the variable that will be created from the data stored in the column names, i.e. `income`. * `values_to` gives the name of the variable that will be created from the data stored in the cell value, i.e. `count`. Neither the `names_to` nor the `values_to` column exists in `relig_income`, so we provide them as strings surrounded by quotes. ### Numeric data in column names {#billboard} The `billboard` dataset records the billboard rank of songs in the year 2000. It has a form similar to the `relig_income` data, but the data encoded in the column names is really a number, not a string. ```{r} billboard ``` We can start with the same basic specification as for the `relig_income` dataset. Here we want the names to become a variable called `week`, and the values to become a variable called `rank`. I also use `values_drop_na` to drop rows that correspond to missing values. Not every song stays in the charts for all 76 weeks, so the structure of the input data force the creation of unnecessary explicit `NA`s. ```{r} billboard %>% pivot_longer( cols = starts_with("wk"), names_to = "week", values_to = "rank", values_drop_na = TRUE ) ``` It would be nice to easily determine how long each song stayed in the charts, but to do that, we'll need to convert the `week` variable to an integer. We can do that by using two additional arguments: `names_prefix` strips off the `wk` prefix, and `names_transform` converts `week` into an integer: ```{r, eval = FALSE} billboard %>% pivot_longer( cols = starts_with("wk"), names_to = "week", names_prefix = "wk", names_transform = as.integer, values_to = "rank", values_drop_na = TRUE, ) ``` Alternatively, you could do this with a single argument by using `readr::parse_number()` which automatically strips non-numeric components: ```{r, eval = FALSE} billboard %>% pivot_longer( cols = starts_with("wk"), names_to = "week", names_transform = readr::parse_number, values_to = "rank", values_drop_na = TRUE, ) ``` ### Many variables in column names A more challenging situation occurs when you have multiple variables crammed into the column names. For example, take the `who` dataset: ```{r} who ``` `country`, `iso2`, `iso3`, and `year` are already variables, so they can be left as is. But the columns from `new_sp_m014` to `newrel_f65` encode four variables in their names: * The `new_`/`new` prefix indicates these are counts of new cases. This dataset only contains new cases, so we'll ignore it here because it's constant. * `sp`/`rel`/`ep` describe how the case was diagnosed. * `m`/`f` gives the gender. * `014`/`1524`/`2535`/`3544`/`4554`/`65` supplies the age range. We can break these variables up by specifying multiple column names in `names_to`, and then either providing `names_sep` or `names_pattern`. Here `names_pattern` is the most natural fit. It has a similar interface to `extract`: you give it a regular expression containing groups (defined by `()`) and it puts each group in a column. ```{r} who %>% pivot_longer( cols = new_sp_m014:newrel_f65, names_to = c("diagnosis", "gender", "age"), names_pattern = "new_?(.*)_(.)(.*)", values_to = "count" ) ``` We could go one step further use readr functions to convert the gender and age to factors. I think this is good practice when you have categorical variables with a known set of values. ```{r, eval = FALSE} who %>% pivot_longer( cols = new_sp_m014:newrel_f65, names_to = c("diagnosis", "gender", "age"), names_pattern = "new_?(.*)_(.)(.*)", names_transform = list( gender = ~ readr::parse_factor(.x, levels = c("f", "m")), age = ~ readr::parse_factor( .x, levels = c("014", "1524", "2534", "3544", "4554", "5564", "65"), ordered = TRUE ) ), values_to = "count", ) ``` Doing it this way is a little more efficient than doing a mutate after the fact, `pivot_longer()` only has to transform one occurence of each name where a `mutate()` would need to transform many repetitions. ### Multiple observations per row So far, we have been working with data frames that have one observation per row, but many important pivoting problems involve multiple observations per row. You can usually recognise this case because name of the column that you want to appear in the output is part of the column name in the input. In this section, you'll learn how to pivot this sort of data. The following example is adapted from the [data.table vignette](https://CRAN.R-project.org/package=data.table/vignettes/datatable-reshape.html), as inspiration for tidyr's solution to this problem. ```{r} household ``` Note that we have two pieces of information (or values) for each child: their `name` and their `dob` (date of birth). These need to go into separate columns in the result. Again we supply multiple variables to `names_to`, using `names_sep` to split up each variable name. Note the special name `.value`: this tells `pivot_longer()` that that part of the column name specifies the "value" being measured (which will become a variable in the output). ```{r} household %>% pivot_longer( cols = !family, names_to = c(".value", "child"), names_sep = "_", values_drop_na = TRUE ) ``` Note the use of `values_drop_na = TRUE`: the input shape forces the creation of explicit missing variables for observations that don't exist. A similar problem problem also exists in the `anscombe` dataset built in to base R: ```{r} anscombe ``` This dataset contains four pairs of variables (`x1` and `y1`, `x2` and `y2`, etc) that underlie Anscombe's quartet, a collection of four datasets that have the same summary statistics (mean, sd, correlation etc), but have quite different data. We want to produce a dataset with columns `set`, `x` and `y`. ```{r} anscombe %>% pivot_longer( cols = everything(), cols_vary = "slowest", names_to = c(".value", "set"), names_pattern = "(.)(.)" ) ``` Setting `cols_vary` to `"slowest"` groups the values from columns `x1` and `y1` together in the rows of the output before moving on to `x2` and `y2`. This argument often produces more intuitively ordered output when you are pivoting every column in your dataset. A similar situation can arise with panel data. For example, take this example dataset provided by [Thomas Leeper](https://github.com/gesistsa/rio/issues/193). We can tidy it using the same approach as for `anscombe`: ```{r} pnl <- tibble( x = 1:4, a = c(1, 1,0, 0), b = c(0, 1, 1, 1), y1 = rnorm(4), y2 = rnorm(4), z1 = rep(3, 4), z2 = rep(-2, 4), ) pnl %>% pivot_longer( cols = !c(x, a, b), names_to = c(".value", "time"), names_pattern = "(.)(.)" ) ``` ## Wider `pivot_wider()` is the opposite of `pivot_longer()`: it makes a dataset __wider__ by increasing the number of columns and decreasing the number of rows. It's relatively rare to need `pivot_wider()` to make tidy data, but it's often useful for creating summary tables for presentation, or data in a format needed by other tools. ### Capture-recapture data The `fish_encounters` dataset, contributed by [Myfanwy Johnston](https://fishsciences.github.io/post/visualizing-fish-encounter-histories/), describes when fish swimming down a river are detected by automatic monitoring stations: ```{r} fish_encounters ``` Many tools used to analyse this data need it in a form where each station is a column: ```{r} fish_encounters %>% pivot_wider( names_from = station, values_from = seen ) ``` This dataset only records when a fish was detected by the station - it doesn't record when it wasn't detected (this is common with this type of data). That means the output data is filled with `NA`s. However, in this case we know that the absence of a record means that the fish was not `seen`, so we can ask `pivot_wider()` to fill these missing values in with zeros: ```{r} fish_encounters %>% pivot_wider( names_from = station, values_from = seen, values_fill = 0 ) ``` ### Aggregation You can also use `pivot_wider()` to perform simple aggregation. For example, take the `warpbreaks` dataset built in to base R (converted to a tibble for the better print method): ```{r} warpbreaks <- warpbreaks %>% as_tibble() %>% select(wool, tension, breaks) warpbreaks ``` This is a designed experiment with nine replicates for every combination of `wool` (`A` and `B`) and `tension` (`L`, `M`, `H`): ```{r} warpbreaks %>% count(wool, tension) ``` What happens if we attempt to pivot the levels of `wool` into the columns? ```{r} warpbreaks %>% pivot_wider( names_from = wool, values_from = breaks ) ``` We get a warning that each cell in the output corresponds to multiple cells in the input. The default behaviour produces list-columns, which contain all the individual values. A more useful output would be summary statistics, e.g. `mean` breaks for each combination of wool and tension: ```{r} warpbreaks %>% pivot_wider( names_from = wool, values_from = breaks, values_fn = mean ) ``` For more complex summary operations, I recommend summarising before reshaping, but for simple cases it's often convenient to summarise within `pivot_wider()`. ### Generate column name from multiple variables Imagine, as in , that we have information containing the combination of product, country, and year. In tidy form it might look like this: ```{r} production <- expand_grid( product = c("A", "B"), country = c("AI", "EI"), year = 2000:2014 ) %>% filter((product == "A" & country == "AI") | product == "B") %>% mutate(production = rnorm(nrow(.))) production ``` We want to widen the data so we have one column for each combination of `product` and `country`. The key is to specify multiple variables for `names_from`: ```{r} production %>% pivot_wider( names_from = c(product, country), values_from = production ) ``` When either `names_from` or `values_from` select multiple variables, you can control how the column names in the output constructed with `names_sep` and `names_prefix`, or the workhorse `names_glue`: ```{r} production %>% pivot_wider( names_from = c(product, country), values_from = production, names_sep = ".", names_prefix = "prod." ) production %>% pivot_wider( names_from = c(product, country), values_from = production, names_glue = "prod_{product}_{country}" ) ``` ### Tidy census The `us_rent_income` dataset contains information about median income and rent for each state in the US for 2017 (from the American Community Survey, retrieved with the [tidycensus][tidycensus] package). ```{r} us_rent_income ``` Here both `estimate` and `moe` are values columns, so we can supply them to `values_from`: ```{r} us_rent_income %>% pivot_wider( names_from = variable, values_from = c(estimate, moe) ) ``` Note that the name of the variable is automatically appended to the output columns. ### Implicit missing values Occasionally, you'll come across data where your names variable is encoded as a factor, but not all of the data will be represented. ```{r} weekdays <- c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") daily <- tibble( day = factor(c("Tue", "Thu", "Fri", "Mon"), levels = weekdays), value = c(2, 3, 1, 5) ) daily ``` `pivot_wider()` defaults to generating columns from the values that are actually represented in the data, but you might want to include a column for each possible level in case the data changes in the future. ```{r} daily %>% pivot_wider( names_from = day, values_from = value ) ``` The `names_expand` argument will turn implicit factor levels into explicit ones, forcing them to be represented in the result. It also sorts the column names using the level order, which produces more intuitive results in this case. ```{r} daily %>% pivot_wider( names_from = day, values_from = value, names_expand = TRUE ) ``` If multiple `names_from` columns are provided, `names_expand` will generate a Cartesian product of all possible combinations of the `names_from` values. Notice that the following data has omitted some rows where the percentage value would be `0`. `names_expand` allows us to make those explicit during the pivot. ```{r} percentages <- tibble( year = c(2018, 2019, 2020, 2020), type = factor(c("A", "B", "A", "B"), levels = c("A", "B")), percentage = c(100, 100, 40, 60) ) percentages percentages %>% pivot_wider( names_from = c(year, type), values_from = percentage, names_expand = TRUE, values_fill = 0 ) ``` A related problem can occur when there are implicit missing factor levels or combinations in the `id_cols`. In this case, there are missing rows (rather than columns) that you'd like to explicitly represent. For this example, we'll modify our `daily` data with a `type` column, and pivot on that instead, keeping `day` as an id column. ```{r} daily <- mutate(daily, type = factor(c("A", "B", "B", "A"))) daily ``` All of our `type` levels are represented in the columns, but we are missing some rows related to the unrepresented `day` factor levels. ```{r} daily %>% pivot_wider( names_from = type, values_from = value, values_fill = 0 ) ``` We can use `id_expand` in the same way that we used `names_expand`, which will expand out (and sort) the implicit missing rows in the `id_cols`. ```{r} daily %>% pivot_wider( names_from = type, values_from = value, values_fill = 0, id_expand = TRUE ) ``` ### Unused columns Imagine you've found yourself in a situation where you have columns in your data that are completely unrelated to the pivoting process, but you'd still like to retain their information somehow. For example, in `updates` we'd like to pivot on the `system` column to create one row summaries of each county's system updates. ```{r} updates <- tibble( county = c("Wake", "Wake", "Wake", "Guilford", "Guilford"), date = c(as.Date("2020-01-01") + 0:2, as.Date("2020-01-03") + 0:1), system = c("A", "B", "C", "A", "C"), value = c(3.2, 4, 5.5, 2, 1.2) ) updates ``` We could do that with a typical `pivot_wider()` call, but we completely lose all information about the `date` column. ```{r} updates %>% pivot_wider( id_cols = county, names_from = system, values_from = value ) ``` For this example, we'd like to retain the most recent update date across all systems in a particular county. To accomplish that we can use the `unused_fn` argument, which allows us to summarize values from the columns not utilized in the pivoting process. ```{r} updates %>% pivot_wider( id_cols = county, names_from = system, values_from = value, unused_fn = list(date = max) ) ``` You can also retain the data but delay the aggregation entirely by using `list()` as the summary function. ```{r} updates %>% pivot_wider( id_cols = county, names_from = system, values_from = value, unused_fn = list(date = list) ) ``` ### Contact list A final challenge is inspired by [Jiena Gu](https://github.com/jienagu/tidyverse_examples/blob/master/example_long_wide.R). Imagine you have a contact list that you've copied and pasted from a website: ```{r} contacts <- tribble( ~field, ~value, "name", "Jiena McLellan", "company", "Toyota", "name", "John Smith", "company", "google", "email", "john@google.com", "name", "Huxley Ratcliffe" ) ``` This is challenging because there's no variable that identifies which observations belong together. We can fix this by noting that every contact starts with a name, so we can create a unique id by counting every time we see "name" as the `field`: ```{r} contacts <- contacts %>% mutate( person_id = cumsum(field == "name") ) contacts ``` Now that we have a unique identifier for each person, we can pivot `field` and `value` into the columns: ```{r} contacts %>% pivot_wider( names_from = field, values_from = value ) ``` ## Longer, then wider Some problems can't be solved by pivoting in a single direction. The examples in this section show how you might combine `pivot_longer()` and `pivot_wider()` to solve more complex problems. ### World bank `world_bank_pop` contains data from the World Bank about population per country from 2000 to 2018. ```{r} world_bank_pop ``` My goal is to produce a tidy dataset where each variable is in a column. It's not obvious exactly what steps are needed yet, but I'll start with the most obvious problem: year is spread across multiple columns. ```{r} pop2 <- world_bank_pop %>% pivot_longer( cols = `2000`:`2017`, names_to = "year", values_to = "value" ) pop2 ``` Next we need to consider the `indicator` variable: ```{r} pop2 %>% count(indicator) ``` Here `SP.POP.GROW` is population growth, `SP.POP.TOTL` is total population, and `SP.URB.*` are the same but only for urban areas. Let's split this up into two variables: `area` (total or urban) and the actual variable (population or growth): ```{r} pop3 <- pop2 %>% separate(indicator, c(NA, "area", "variable")) pop3 ``` Now we can complete the tidying by pivoting `variable` and `value` to make `TOTL` and `GROW` columns: ```{r} pop3 %>% pivot_wider( names_from = variable, values_from = value ) ``` ### Multi-choice Based on a suggestion by [Maxime Wack](https://github.com/MaximeWack), ), the final example shows how to deal with a common way of recording multiple choice data. Often you will get such data as follows: ```{r} multi <- tribble( ~id, ~choice1, ~choice2, ~choice3, 1, "A", "B", "C", 2, "C", "B", NA, 3, "D", NA, NA, 4, "B", "D", NA ) ``` But the actual order isn't important, and you'd prefer to have the individual questions in the columns. You can achieve the desired transformation in two steps. First, you make the data longer, eliminating the explicit `NA`s, and adding a column to indicate that this choice was chosen: ```{r} multi2 <- multi %>% pivot_longer( cols = !id, values_drop_na = TRUE ) %>% mutate(checked = TRUE) multi2 ``` Then you make the data wider, filling in the missing observations with `FALSE`: ```{r} multi2 %>% pivot_wider( id_cols = id, names_from = value, values_from = checked, values_fill = FALSE ) ``` ## Manual specs The arguments to `pivot_longer()` and `pivot_wider()` allow you to pivot a wide range of datasets. But the creativity that people apply to their data structures is seemingly endless, so it's quite possible that you will encounter a dataset that you can't immediately see how to reshape with `pivot_longer()` and `pivot_wider()`. To gain more control over pivoting, you can instead create a "spec" data frame that describes exactly how data stored in the column names becomes variables (and vice versa). This section introduces you to the spec data structure, and show you how to use it when `pivot_longer()` and `pivot_wider()` are insufficient. ### Longer To see how this works, lets return to the simplest case of pivoting applied to the `relig_income` dataset. Now pivoting happens in two steps: we first create a spec object (using `build_longer_spec()`) then use that to describe the pivoting operation: ```{r} spec <- relig_income %>% build_longer_spec( cols = !religion, names_to = "income", values_to = "count" ) pivot_longer_spec(relig_income, spec) ``` (This gives the same result as before, just with more code. There's no need to use it here, it is presented as a simple example for using `spec`.) What does `spec` look like? It's a data frame with one row for each column in the wide format version of the data that is not present in the long format, and two special columns that start with `.`: * `.name` gives the name of the column. * `.value` gives the name of the column that the values in the cells will go into. There is also one column in `spec` for each column present in the long format of the data that is not present in the wide format of the data. This corresponds to the `names_to` argument in `pivot_longer()` and `build_longer_spec()` and the `names_from` argument in `pivot_wider()` and `build_wider_spec()`. In this example, the income column is a character vector of the names of columns being pivoted. ```{r} spec ``` ### Wider Below we widen `us_rent_income` with `pivot_wider()`. The result is ok, but I think it could be improved: ```{r} us_rent_income %>% pivot_wider( names_from = variable, values_from = c(estimate, moe) ) ``` I think it would be better to have columns `income`, `rent`, `income_moe`, and `rent_moe`, which we can achieve with a manual spec. The current spec looks like this: ```{r} spec1 <- us_rent_income %>% build_wider_spec( names_from = variable, values_from = c(estimate, moe) ) spec1 ``` For this case, we mutate `spec` to carefully construct the column names: ```{r} spec2 <- spec1 %>% mutate( .name = paste0(variable, ifelse(.value == "moe", "_moe", "")) ) spec2 ``` Supplying this spec to `pivot_wider()` gives us the result we're looking for: ```{r} us_rent_income %>% pivot_wider_spec(spec2) ``` ### By hand Sometimes it's not possible (or not convenient) to compute the spec, and instead it's more convenient to construct the spec "by hand". For example, take this `construction` data, which is lightly modified from Table 5 "completions" found at : ```{r} construction ``` This sort of data is not uncommon from government agencies: the column names actually belong to different variables, and here we have summaries for number of units (1, 2-4, 5+) and regions of the country (NE, NW, midwest, S, W). We can most easily describe that with a tibble: ```{r} spec <- tribble( ~.name, ~.value, ~units, ~region, "1 unit", "n", "1", NA, "2 to 4 units", "n", "2-4", NA, "5 units or more", "n", "5+", NA, "Northeast", "n", NA, "Northeast", "Midwest", "n", NA, "Midwest", "South", "n", NA, "South", "West", "n", NA, "West", ) ``` Which yields the following longer form: ```{r} construction %>% pivot_longer_spec(spec) ``` Note that there is no overlap between the `units` and `region` variables; here the data would really be most naturally described in two independent tables. ### Theory One neat property of the `spec` is that you need the same spec for `pivot_longer()` and `pivot_wider()`. This makes it very clear that the two operations are symmetric: ```{r} construction %>% pivot_longer_spec(spec) %>% pivot_wider_spec(spec) ``` The pivoting spec allows us to be more precise about exactly how `pivot_longer(df, spec = spec)` changes the shape of `df`: it will have `nrow(df) * nrow(spec)` rows, and `ncol(df) - nrow(spec) + ncol(spec) - 2` columns. [cdata]: https://winvector.github.io/cdata/ [data.table]: https://github.com/Rdatatable/data.table/wiki [tidycensus]: https://walker-data.com/tidycensus/ tidyr/vignettes/classroom.csv0000644000176200001440000000015414013466035016136 0ustar liggesusers"name","quiz1","quiz2","test1" "Billy",NA,"D","C" "Suzy","F",NA,NA "Lionel","B","C","B" "Jenny","A","A","B" tidyr/vignettes/programming.Rmd0000644000176200001440000001173614553565525016432 0ustar liggesusers--- title: "Programming with tidyr" output: rmarkdown::html_vignette description: | Notes on programming with tidy evaluation as it relates to tidyr. vignette: > %\VignetteIndexEntry{Programming with tidyr} %\VignetteEngine{knitr::rmarkdown} %\usepackage[utf8]{inputenc} --- ```{r setup, echo = FALSE, message = FALSE} knitr::opts_chunk$set(collapse = TRUE, comment = "#>") options(tibble.print_min = 6L, tibble.print_max = 6L) set.seed(1014) # Manually "import"; only needed for old dplyr which uses old tidyselect # which doesn't attach automatically in tidy-select contexts all_of <- tidyselect::all_of ``` ## Introduction Most tidyr verbs use **tidy evaluation** to make interactive data exploration fast and fluid. Tidy evaluation is a special type of non-standard evaluation used throughout the tidyverse. Here's some typical tidyr code: ```{r} library(tidyr) iris %>% nest(data = !Species) ``` Tidy evaluation is why we can use `!Species` to say "all the columns except `Species`", without having to quote the column name (`"Species"`) or refer to the enclosing data frame (`iris$Species`). Two basic forms of tidy evaluation are used in tidyr: * **Tidy selection**: `drop_na()`, `fill()`, `pivot_longer()`/`pivot_wider()`, `nest()`/`unnest()`, `separate()`/`extract()`, and `unite()` let you select variables based on position, name, or type (e.g. `1:3`, `starts_with("x")`, or `is.numeric`). Literally, you can use all the same techniques as with `dplyr::select()`. * **Data masking**: `expand()`, `crossing()` and `nesting()` let you refer to use data variables as if they were variables in the environment (i.e. you write `my_variable` not `df$my_variable`). We focus on tidy selection here, since it's the most common. You can learn more about data masking in the equivalent vignette in dplyr: . For other considerations when writing tidyr code in packages, please see `vignette("in-packages")`. We've pointed out that tidyr's tidy evaluation interface is optimized for interactive exploration. The flip side is that this adds some challenges to indirect use, i.e. when you're working inside a `for` loop or a function. This vignette shows you how to overcome those challenges. We'll first go over the basics of tidy selection and data masking, talk about how to use them indirectly, and then show you a number of recipes to solve common problems. Before we go on, we reveal the version of tidyr we're using and make a small dataset to use in examples. ```{r} packageVersion("tidyr") mini_iris <- as_tibble(iris)[c(1, 2, 51, 52, 101, 102), ] mini_iris ``` ## Tidy selection Underneath all functions that use tidy selection is the [tidyselect](https://tidyselect.r-lib.org/) package. It provides a miniature domain specific language that makes it easy to select columns by name, position, or type. For example: * `select(df, 1)` selects the first column; `select(df, last_col())` selects the last column. * `select(df, c(a, b, c))` selects columns `a`, `b`, and `c`. * `select(df, starts_with("a"))` selects all columns whose name starts with "a"; `select(df, ends_with("z"))` selects all columns whose name ends with "z". * `select(df, where(is.numeric))` selects all numeric columns. You can see more details in `?tidyr_tidy_select`. ### Indirection Tidy selection makes a common task easier at the cost of making a less common task harder. When you want to use tidy select indirectly with the column specification stored in an intermediate variable, you'll need to learn some new tools. There are three main cases where this comes up: * When you have the tidy-select specification in a function argument, you must **embrace** the argument by surrounding it in doubled braces. ```{r} nest_egg <- function(df, cols) { nest(df, egg = {{ cols }}) } nest_egg(mini_iris, !Species) ``` * When you have a character vector of variable names, you must use `all_of()` or `any_of()` depending on whether you want the function to error if a variable is not found. These functions allow you to write for loops or a function that takes variable names as a character vector. ```{r} nest_egg <- function(df, cols) { nest(df, egg = all_of(cols)) } vars <- c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") nest_egg(mini_iris, vars) ``` * In more complicated cases, you might want to use tidyselect directly: ```{r} sel_vars <- function(df, cols) { tidyselect::eval_select(rlang::enquo(cols), df) } sel_vars(mini_iris, !Species) ``` Learn more in `vignette("tidyselect")`. Note that many tidyr functions use `...` so you can easily select many variables, e.g. `fill(df, x, y, z)`. I now believe that the disadvantages of this approach outweigh the benefits, and that this interface would have been better as `fill(df, c(x, y, z))`. For new functions that select columns, please just use a single argument and not `...`. tidyr/vignettes/tb.csv0000755000176200001440000143643014013466035014557 0ustar liggesusers"iso2","year","m04","m514","m014","m1524","m2534","m3544","m4554","m5564","m65","mu","f04","f514","f014","f1524","f2534","f3544","f4554","f5564","f65","fu" "AD",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AD",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AD",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AD",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AD",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AD",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AD",1996,NA,NA,0,0,0,4,1,0,0,NA,NA,NA,0,1,1,0,0,1,0,NA "AD",1997,NA,NA,0,0,1,2,2,1,6,NA,NA,NA,0,1,2,3,0,0,1,NA "AD",1998,NA,NA,0,0,0,1,0,0,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AD",1999,NA,NA,0,0,0,1,1,0,0,NA,NA,NA,0,0,0,1,0,0,0,NA "AD",2000,NA,NA,0,0,1,0,0,0,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AD",2001,NA,NA,0,NA,NA,2,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AD",2002,NA,NA,0,0,0,1,0,0,0,NA,NA,NA,0,1,0,0,0,0,0,NA "AD",2003,NA,NA,0,0,0,1,2,0,0,NA,NA,NA,0,1,1,1,0,0,0,NA "AD",2004,NA,NA,0,0,0,1,1,0,0,NA,NA,NA,0,0,1,0,0,0,0,NA "AD",2005,0,0,0,0,1,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0 "AD",2006,0,0,0,1,1,2,0,1,1,0,0,0,0,0,1,0,1,0,0,0 "AD",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,1,NA,NA,NA "AD",2008,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,0 "AE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",1999,NA,NA,4,9,3,2,4,6,5,NA,NA,NA,9,11,5,3,0,3,2,NA "AE",2000,NA,NA,2,4,4,6,5,12,10,NA,NA,NA,3,16,1,3,0,0,4,NA "AE",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",2002,NA,NA,1,2,0,6,6,10,0,NA,NA,NA,3,3,8,3,4,10,1,NA "AE",2003,NA,NA,2,10,8,12,3,2,10,NA,NA,NA,4,9,5,3,3,2,4,NA "AE",2004,NA,NA,1,7,6,7,3,1,7,NA,NA,NA,3,6,2,7,2,2,3,NA "AE",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AE",2006,0,0,0,5,3,7,3,1,4,NA,0,2,2,6,4,5,3,4,5,NA "AE",2007,0,2,2,5,6,3,4,3,10,NA,0,1,1,8,6,3,2,0,0,NA "AE",2008,0,0,0,6,1,7,5,3,6,0,0,0,0,10,4,1,1,3,3,0 "AF",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AF",1997,NA,NA,0,10,6,3,5,2,0,NA,NA,NA,5,38,36,14,8,0,1,NA "AF",1998,NA,NA,30,129,128,90,89,64,41,NA,NA,NA,45,350,419,194,118,61,20,NA "AF",1999,NA,NA,8,55,55,47,34,21,8,NA,NA,NA,25,139,160,110,50,25,8,NA "AF",2000,NA,NA,52,228,183,149,129,94,80,NA,NA,NA,93,414,565,339,205,99,36,NA "AF",2001,NA,NA,129,379,349,274,204,139,103,NA,NA,NA,146,799,888,586,375,179,89,NA "AF",2002,NA,NA,90,476,481,368,246,241,189,NA,NA,NA,192,1119,1251,792,526,320,218,NA "AF",2003,NA,NA,127,511,436,284,256,288,203,NA,NA,NA,245,1152,1287,814,462,305,158,NA "AF",2004,NA,NA,139,537,568,360,358,386,310,NA,NA,NA,256,1360,1561,1096,645,413,256,NA "AF",2005,NA,NA,151,606,560,472,453,470,419,NA,NA,NA,320,1651,1959,1302,869,471,246,NA "AF",2006,NA,NA,193,837,791,574,572,572,410,NA,NA,NA,442,2139,2340,1654,1006,630,309,NA "AF",2007,NA,NA,186,856,840,597,566,630,507,NA,NA,NA,475,2224,2357,1708,1143,771,353,NA "AF",2008,NA,NA,187,941,773,545,570,630,575,NA,NA,NA,428,2094,2449,1614,1149,817,364,NA "AG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1996,NA,NA,0,0,0,0,1,0,0,NA,NA,NA,0,0,0,1,0,0,0,NA "AG",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",2000,NA,NA,0,0,0,0,0,0,1,NA,NA,NA,1,1,1,0,0,0,0,NA "AG",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,1,0,0,0,0,0,NA "AG",2002,NA,NA,0,0,1,0,0,0,0,NA,NA,NA,2,0,1,0,0,0,0,NA "AG",2003,NA,NA,0,0,1,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "AG",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",2005,NA,NA,NA,NA,NA,1,1,NA,NA,NA,NA,NA,NA,2,2,NA,NA,NA,NA,NA "AG",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AG",2007,0,0,0,0,0,0,1,1,0,NA,0,0,0,0,0,0,0,0,0,NA "AG",2008,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA "AI",2004,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "AI",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AI",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1995,NA,NA,0,0,0,0,19,40,30,NA,NA,NA,0,1,0,0,13,20,16,NA "AL",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AL",1997,NA,NA,0,23,43,33,25,21,19,NA,NA,NA,1,16,19,14,9,10,8,NA "AL",1998,NA,NA,1,17,21,24,18,26,24,NA,NA,NA,2,19,11,12,13,11,13,NA "AL",1999,NA,NA,0,13,23,25,19,15,15,NA,NA,NA,0,5,13,11,5,8,16,NA "AL",2000,NA,NA,2,19,21,14,24,19,16,NA,NA,NA,3,11,10,8,8,5,11,NA "AL",2001,NA,NA,3,13,18,17,19,20,30,NA,NA,NA,1,12,10,5,7,7,9,NA "AL",2002,NA,NA,0,21,27,29,19,23,25,NA,NA,NA,2,20,19,9,6,8,17,NA "AL",2003,NA,NA,0,28,19,32,16,22,19,NA,NA,NA,2,13,8,6,14,12,20,NA "AL",2004,NA,NA,5,12,19,21,24,23,20,NA,NA,NA,2,12,12,8,11,10,22,NA "AL",2005,0,0,0,26,21,16,31,20,37,0,0,0,0,3,9,5,5,5,18,0 "AL",2006,1,4,5,24,19,22,21,19,20,0,0,2,2,12,8,7,7,7,13,0 "AL",2007,0,0,0,19,13,16,24,16,19,0,0,2,2,13,9,7,7,11,9,0 "AL",2008,1,0,1,23,26,13,19,13,16,NA,0,1,1,20,10,8,5,5,10,NA "AM",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AM",1995,NA,NA,1,18,16,11,10,8,1,NA,NA,NA,1,1,7,2,1,1,NA,NA "AM",1996,NA,NA,2,53,100,51,43,30,7,NA,NA,NA,0,9,11,9,8,4,0,NA "AM",1997,NA,NA,2,85,59,77,51,34,12,NA,NA,NA,3,16,22,17,7,14,1,NA "AM",1998,NA,NA,2,159,90,79,39,17,15,NA,NA,NA,1,21,20,11,10,5,6,NA "AM",1999,NA,NA,4,151,88,115,76,37,20,NA,NA,NA,6,22,15,20,6,9,7,NA "AM",2000,NA,NA,2,152,130,131,63,26,21,NA,NA,NA,1,24,27,24,8,8,4,NA "AM",2001,NA,NA,2,154,120,93,54,24,9,NA,NA,NA,1,40,31,22,10,7,5,NA "AM",2002,NA,NA,1,95,63,109,93,21,14,NA,NA,NA,2,16,24,32,36,4,1,NA "AM",2003,NA,NA,12,120,98,75,104,49,18,NA,NA,NA,2,29,31,13,12,9,3,NA "AM",2004,NA,NA,2,130,72,76,62,25,24,NA,NA,NA,1,24,17,12,10,3,3,NA "AM",2005,1,2,3,170,104,83,84,30,24,0,0,3,3,27,21,10,11,4,7,0 "AM",2006,0,0,0,113,116,96,98,38,17,0,0,3,3,28,29,16,15,7,4,0 "AM",2007,0,1,1,81,87,100,92,29,20,0,0,2,2,31,22,11,7,7,7,0 "AM",2008,0,0,0,53,103,74,87,37,15,0,0,4,4,44,25,10,14,10,11,0 "AN",1996,NA,NA,0,0,0,0,1,1,1,NA,NA,NA,0,0,0,1,0,0,0,NA "AN",1997,NA,NA,0,0,1,1,0,1,3,NA,NA,NA,0,0,2,2,1,1,1,NA "AN",1998,NA,NA,0,0,0,0,0,1,2,NA,NA,NA,0,1,0,2,1,0,0,NA "AN",1999,NA,NA,0,0,1,0,1,0,0,NA,NA,NA,0,1,1,0,0,0,0,NA "AN",2000,NA,NA,0,0,1,2,0,0,0,NA,NA,NA,0,0,1,0,0,1,0,NA "AN",2001,NA,NA,0,0,1,5,0,0,0,NA,NA,NA,0,0,1,0,0,1,0,NA "AN",2002,NA,NA,0,1,1,3,2,3,1,NA,NA,NA,0,1,0,0,1,0,1,NA "AN",2003,NA,NA,0,0,2,1,0,0,3,NA,NA,NA,0,0,1,1,0,1,0,NA "AN",2004,NA,NA,1,1,0,4,3,0,1,NA,NA,NA,0,0,NA,1,0,0,0,NA "AN",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AN",2006,0,0,0,0,0,2,1,1,0,NA,0,1,1,0,0,0,0,0,0,NA "AN",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AN",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AO",1995,NA,NA,386,724,562,346,224,155,14,NA,NA,NA,371,707,443,264,248,130,18,NA "AO",1996,NA,NA,360,1036,926,1027,469,272,169,NA,NA,NA,388,1066,963,615,370,242,113,NA "AO",1997,NA,NA,419,913,1026,819,507,304,193,NA,NA,NA,431,1165,1007,627,411,234,145,NA "AO",1998,NA,NA,240,915,950,839,470,219,135,NA,NA,NA,297,1084,970,623,319,169,92,NA "AO",1999,NA,NA,391,1134,1237,973,518,314,234,NA,NA,NA,459,1197,1157,743,474,217,194,NA "AO",2000,NA,NA,186,999,1003,912,482,312,194,NA,NA,NA,247,1142,1091,844,417,200,120,NA "AO",2001,NA,NA,230,892,752,648,420,197,173,NA,NA,NA,279,993,869,647,323,200,182,NA "AO",2002,NA,NA,435,2223,2292,1915,1187,624,444,NA,NA,NA,640,2610,2208,1600,972,533,305,NA "AO",2003,NA,NA,409,2355,2598,1908,1090,512,361,NA,NA,NA,591,3078,2641,1747,1157,395,129,NA "AO",2004,NA,NA,554,2684,2659,1998,1196,561,321,NA,NA,NA,733,3198,2772,1854,1029,505,269,NA "AO",2005,NA,NA,520,2549,2797,1918,1255,665,461,NA,NA,NA,704,2926,2682,1797,1138,581,417,NA "AO",2006,NA,NA,540,2632,3049,2182,1397,729,428,NA,NA,NA,689,2851,2892,1990,1223,583,314,NA "AO",2007,NA,NA,484,2824,3197,2255,1357,699,465,NA,NA,NA,703,2943,2721,1812,1041,554,367,NA "AO",2008,NA,NA,367,2970,3493,2418,1480,733,420,0,NA,NA,512,3199,2786,2082,1209,556,337,0 "AR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AR",1996,NA,NA,113,611,535,338,276,230,286,NA,NA,NA,107,643,591,528,546,520,463,NA "AR",1997,NA,NA,96,580,600,523,489,417,393,NA,NA,NA,133,566,511,336,240,199,234,NA "AR",1998,NA,NA,84,578,571,463,476,387,370,NA,NA,NA,94,558,498,271,229,207,276,NA "AR",1999,NA,NA,90,490,546,435,464,384,380,NA,NA,NA,108,502,442,292,210,177,249,NA "AR",2000,NA,NA,97,278,594,402,419,368,330,NA,NA,NA,121,544,479,262,230,179,216,NA "AR",2001,NA,NA,78,682,611,495,471,404,436,NA,NA,NA,85,674,561,303,242,215,249,NA "AR",2002,NA,NA,70,612,658,463,477,389,399,NA,NA,NA,117,622,580,301,237,203,255,NA "AR",2003,NA,NA,89,574,565,413,461,366,405,NA,NA,NA,99,516,513,294,241,192,231,NA "AR",2004,NA,NA,64,588,543,399,387,332,345,NA,NA,NA,98,519,477,271,237,169,237,NA "AR",2005,NA,NA,64,621,530,358,384,340,348,NA,NA,NA,90,530,474,290,198,169,240,NA "AR",2006,19,48,67,519,484,360,351,346,321,NA,18,56,74,438,437,235,197,173,213,NA "AR",2007,14,63,77,656,623,401,415,389,324,NA,14,56,70,558,500,246,217,172,246,NA "AR",2008,11,58,69,633,611,390,416,364,295,30,5,61,66,536,506,252,221,157,204,8 "AS",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1997,NA,NA,1,0,0,1,1,1,1,NA,NA,NA,0,0,0,0,1,0,0,NA "AS",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",2000,NA,NA,NA,NA,NA,NA,1,1,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA "AS",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,1,NA,NA "AS",2002,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",2003,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA "AS",2004,NA,NA,NA,NA,NA,NA,NA,2,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA "AS",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,2,NA,NA,NA "AS",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,2,NA,NA,NA "AS",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AS",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1995,NA,NA,4,37,95,82,89,71,73,NA,NA,NA,6,22,52,32,21,18,59,NA "AT",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1997,NA,NA,24,59,117,170,168,122,185,NA,NA,NA,33,41,93,62,52,52,172,NA "AT",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AT",1999,NA,NA,0,13,40,54,52,37,49,NA,NA,NA,0,10,16,18,6,2,26,NA "AT",2000,NA,NA,1,17,30,59,42,23,41,NA,NA,NA,1,11,22,12,11,6,22,NA "AT",2001,NA,NA,1,15,27,39,37,37,27,NA,NA,NA,1,8,13,15,4,6,18,NA "AT",2002,NA,NA,1,8,14,32,43,20,25,NA,NA,NA,0,8,13,7,5,7,21,NA "AT",2003,NA,NA,0,19,31,37,43,19,28,NA,NA,NA,2,10,25,12,7,4,12,NA "AT",2004,NA,NA,1,19,19,38,27,24,21,NA,NA,NA,0,12,15,9,3,3,16,NA "AT",2005,1,0,1,32,23,22,41,24,30,0,0,0,0,13,11,8,3,5,10,0 "AT",2006,0,1,1,9,25,36,39,19,19,0,1,1,2,12,12,16,5,3,15,0 "AT",2007,NA,1,1,12,15,27,26,18,25,NA,1,1,2,10,14,7,11,2,19,NA "AT",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AU",1997,NA,NA,1,8,24,18,13,17,28,NA,NA,NA,0,10,15,9,5,10,12,NA "AU",1998,NA,NA,0,11,22,18,13,15,31,NA,NA,NA,2,19,24,15,8,2,24,NA "AU",1999,NA,NA,0,13,40,54,52,37,49,NA,NA,NA,0,10,16,18,6,2,26,NA "AU",2000,NA,NA,3,16,35,25,24,19,49,NA,NA,NA,0,15,19,12,15,5,14,NA "AU",2001,NA,NA,1,23,20,18,18,13,35,NA,NA,NA,1,21,27,16,7,8,20,NA "AU",2002,NA,NA,1,15,20,26,19,13,34,NA,NA,NA,0,15,21,15,6,4,23,NA "AU",2003,NA,NA,0,14,10,2,11,5,30,NA,NA,NA,0,9,13,3,5,4,7,NA "AU",2004,NA,NA,0,18,16,17,15,11,32,NA,NA,NA,0,6,17,5,7,3,19,NA "AU",2005,NA,NA,0,32,27,23,11,12,30,NA,NA,NA,2,18,26,11,10,6,14,NA "AU",2006,1,1,1,33,35,23,21,16,43,NA,1,1,2,18,27,14,7,9,21,NA "AU",2007,0,3,3,30,33,20,15,14,37,NA,0,4,4,26,37,20,12,7,23,NA "AU",2008,NA,NA,2,46,33,20,27,23,42,NA,NA,NA,3,27,32,14,6,11,10,NA "AZ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",1995,NA,NA,0,13,29,14,6,4,1,NA,NA,NA,0,5,18,0,0,0,0,NA "AZ",1996,NA,NA,0,57,302,231,47,101,16,NA,NA,NA,5,20,154,86,16,13,10,NA "AZ",1997,NA,NA,0,120,244,194,89,12,3,NA,NA,NA,0,42,70,178,23,0,0,NA "AZ",1998,NA,NA,0,44,47,24,9,4,2,NA,NA,NA,0,7,10,7,6,1,0,NA "AZ",1999,NA,NA,0,96,46,217,184,49,0,NA,NA,NA,0,4,17,73,58,19,0,NA "AZ",2000,NA,NA,0,9,24,33,42,30,0,NA,NA,NA,0,3,3,6,3,0,0,NA "AZ",2001,NA,NA,3,1,3,NA,NA,NA,NA,NA,NA,NA,2,NA,1,NA,NA,NA,NA,NA "AZ",2002,NA,NA,6,290,433,359,190,72,16,NA,NA,NA,5,48,88,80,34,19,21,NA "AZ",2003,NA,NA,3,212,258,215,113,66,20,NA,NA,NA,1,72,60,62,48,26,5,NA "AZ",2004,NA,NA,8,248,311,222,167,92,83,NA,NA,NA,8,120,65,57,34,33,24,NA "AZ",2005,2,75,77,109,297,215,209,187,88,0,2,88,90,64,98,47,32,24,24,0 "AZ",2006,1,5,6,241,362,365,120,78,30,0,0,2,2,51,66,66,44,15,8,0 "AZ",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "AZ",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BA",1995,NA,NA,0,15,61,90,140,139,100,NA,NA,NA,0,40,67,64,49,77,23,NA "BA",1996,NA,NA,5,30,48,62,51,60,39,NA,NA,NA,0,23,28,39,16,27,39,NA "BA",1997,NA,NA,5,48,84,99,66,36,74,NA,NA,NA,11,53,42,50,35,65,83,NA "BA",1998,NA,NA,1,28,75,85,75,55,75,NA,NA,NA,3,37,42,27,22,30,85,NA "BA",1999,NA,NA,2,44,76,113,89,60,68,NA,NA,NA,6,49,59,34,24,38,87,NA "BA",2000,NA,NA,4,56,82,99,66,58,77,NA,NA,NA,4,30,46,29,29,48,124,NA "BA",2001,NA,NA,6,39,70,110,89,53,99,NA,NA,NA,7,45,50,34,17,50,127,NA "BA",2002,NA,NA,1,36,48,70,69,33,63,NA,NA,NA,2,22,33,18,19,31,81,NA "BA",2003,NA,NA,4,32,42,49,52,45,50,NA,NA,NA,4,27,37,34,14,30,73,NA "BA",2004,NA,NA,3,66,79,95,82,77,115,NA,NA,NA,3,45,67,43,51,59,102,NA "BA",2005,0,1,1,22,58,61,78,44,80,1,0,2,2,35,39,33,28,28,130,0 "BA",2006,0,0,0,40,58,47,53,42,66,2,0,0,0,41,50,24,29,20,88,2 "BA",2007,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 "BA",2008,0,0,0,20,35,38,84,49,79,0,0,3,3,26,26,20,11,6,112,0 "BB",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1996,NA,NA,0,0,1,0,1,1,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",2000,NA,NA,0,0,0,2,0,0,0,NA,NA,NA,0,0,1,0,0,0,0,NA "BB",2001,NA,NA,0,1,1,1,0,0,1,NA,NA,NA,0,0,0,1,1,0,0,NA "BB",2002,NA,NA,NA,2,NA,NA,1,NA,1,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA "BB",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",2004,NA,NA,NA,NA,2,2,6,NA,4,NA,NA,NA,1,NA,2,1,1,NA,1,NA "BB",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BB",2006,NA,NA,NA,NA,2,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,1,NA "BB",2007,0,0,0,0,0,0,5,0,0,NA,0,0,0,0,0,0,3,0,0,NA "BB",2008,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 "BD",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BD",1995,NA,NA,29,505,983,1001,748,648,424,NA,NA,NA,64,309,546,360,236,132,38,NA "BD",1996,NA,NA,45,723,1322,1264,908,737,472,NA,NA,NA,89,434,663,434,268,151,41,NA "BD",1997,NA,NA,201,2639,5013,5226,3926,2901,2003,NA,NA,NA,328,1883,2634,1748,1029,527,236,NA "BD",1998,NA,NA,254,3516,5988,6242,4771,3522,2569,NA,NA,NA,438,2663,3319,2156,1212,649,324,NA "BD",1999,NA,NA,259,3504,5777,6143,4748,3568,2701,NA,NA,NA,405,2753,3276,2180,1315,712,333,NA "BD",2000,NA,NA,256,3640,5643,5750,4718,3667,2837,NA,NA,NA,495,3029,3238,2247,1315,778,370,NA "BD",2001,NA,NA,283,3976,5834,6257,5172,3682,3039,NA,NA,NA,428,3392,3538,2260,1492,763,371,NA "BD",2002,NA,NA,449,4490,6288,7038,5981,4493,3682,NA,NA,NA,575,3104,3926,2791,2101,988,865,NA "BD",2003,NA,NA,320,5166,7275,8058,6947,5501,4142,NA,NA,NA,544,4298,4282,3258,2086,1150,591,NA "BD",2004,NA,NA,420,6171,8281,8914,8327,6276,5144,NA,NA,NA,589,5081,4869,3758,2518,1434,718,NA "BD",2005,NA,NA,524,8170,10443,11423,11038,8476,7453,NA,NA,NA,751,6776,6785,5538,3960,2281,1230,NA "BD",2006,NA,NA,607,9937,12166,12889,13378,10283,9513,NA,NA,NA,850,8164,8048,6395,5020,2982,1735,NA "BD",2007,NA,NA,523,10210,12442,13003,13307,10653,9830,NA,NA,NA,829,8562,8164,6678,5220,3057,1818,NA "BD",2008,NA,NA,422,10618,12926,12761,13355,10772,10386,0,NA,NA,764,9189,8389,6465,5206,3173,1947,0 "BE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BE",1995,NA,NA,3,23,49,63,52,54,102,NA,NA,NA,3,12,24,32,17,10,34,NA "BE",1996,NA,NA,1,20,43,49,45,32,59,NA,NA,NA,4,9,30,25,11,10,26,NA "BE",1997,NA,NA,3,18,45,56,43,41,115,NA,NA,NA,2,11,26,22,13,11,28,NA "BE",1998,NA,NA,3,22,50,58,48,36,78,NA,NA,NA,2,6,30,20,17,13,35,NA "BE",1999,NA,NA,2,18,40,49,46,38,83,NA,NA,NA,4,20,38,19,22,8,16,NA "BE",2000,NA,NA,3,20,57,39,55,32,56,NA,NA,NA,6,15,15,19,4,13,27,NA "BE",2001,NA,NA,8,31,40,47,44,23,54,NA,NA,NA,6,14,21,24,15,7,18,NA "BE",2002,NA,NA,1,19,56,52,33,19,58,NA,NA,NA,6,21,19,16,9,16,16,NA "BE",2003,NA,NA,6,27,33,30,40,17,35,NA,NA,NA,5,20,17,15,11,2,7,NA "BE",2004,NA,NA,1,26,55,30,37,24,48,NA,NA,NA,6,18,16,20,3,8,14,NA "BE",2005,1,0,1,26,50,32,27,15,47,0,1,1,2,27,31,15,12,4,23,0 "BE",2006,3,1,4,26,52,38,45,27,42,0,3,3,6,25,25,18,6,7,22,0 "BE",2007,1,1,2,23,55,35,38,18,43,NA,3,1,4,13,31,23,7,8,22,NA "BE",2008,2,1,3,23,40,43,32,24,41,0,2,1,3,17,40,22,8,3,12,0 "BF",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BF",1995,NA,NA,4,67,133,124,62,48,29,NA,NA,NA,7,76,53,39,26,11,10,NA "BF",1996,NA,NA,3,47,161,148,115,65,36,NA,NA,NA,10,51,97,67,58,31,10,NA "BF",1997,NA,NA,3,47,161,148,115,65,36,NA,NA,NA,10,51,97,67,58,31,10,NA "BF",1998,NA,NA,4,104,270,236,159,89,65,NA,NA,NA,9,83,98,86,82,32,14,NA "BF",1999,NA,NA,13,85,247,216,118,83,56,NA,NA,NA,8,67,141,92,63,39,20,NA "BF",2000,NA,NA,12,91,274,252,133,68,65,NA,NA,NA,7,59,128,101,45,38,14,NA "BF",2001,NA,NA,7,124,283,279,168,122,70,NA,NA,NA,17,80,155,100,49,32,32,NA "BF",2002,NA,NA,6,123,273,266,156,124,83,NA,NA,NA,12,85,159,104,80,30,25,NA "BF",2003,NA,NA,14,148,313,321,162,129,80,NA,NA,NA,19,102,131,132,70,46,36,NA "BF",2004,NA,NA,10,155,375,308,204,138,102,NA,NA,NA,22,109,196,148,72,54,33,NA "BF",2005,NA,NA,18,181,430,370,273,144,113,NA,NA,NA,15,125,248,174,109,54,40,NA "BF",2006,NA,NA,13,227,473,433,307,183,140,NA,NA,NA,33,155,252,198,99,99,47,NA "BF",2007,2,6,8,233,442,429,303,176,145,NA,4,25,29,157,243,187,129,88,45,NA "BF",2008,NA,NA,8,225,555,448,314,174,146,0,NA,NA,33,143,250,180,116,107,57,0 "BG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BG",2000,NA,NA,0,13,16,20,3,9,10,NA,NA,NA,0,11,14,7,3,4,6,NA "BG",2001,NA,NA,1,15,20,23,23,18,13,NA,NA,NA,1,11,16,13,5,2,9,NA "BG",2002,NA,NA,2,62,86,116,132,58,56,NA,NA,NA,6,48,73,45,19,9,30,NA "BG",2003,NA,NA,3,99,169,178,200,121,89,NA,NA,NA,7,85,106,63,44,32,58,NA "BG",2004,NA,NA,10,97,156,166,204,153,111,NA,NA,NA,4,84,111,64,49,35,71,NA "BG",2005,0,9,9,98,150,195,195,150,136,0,0,9,9,90,111,59,29,37,70,0 "BG",2006,NA,NA,6,86,146,170,184,133,123,NA,NA,NA,12,76,96,86,34,24,59,NA "BG",2007,2,5,7,63,122,181,176,131,90,NA,2,2,4,63,77,53,38,29,46,NA "BG",2008,1,1,2,80,145,151,159,127,68,0,1,0,1,57,78,54,31,23,44,0 "BH",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BH",1995,NA,NA,0,0,1,2,3,1,3,NA,NA,NA,0,1,1,2,0,1,1,NA "BH",1996,NA,NA,0,8,25,24,16,7,6,NA,NA,NA,1,10,11,7,0,3,3,NA "BH",1997,NA,NA,1,11,32,19,10,4,10,NA,NA,NA,0,4,11,4,2,1,1,NA "BH",1998,NA,NA,0,3,40,36,20,13,22,NA,NA,NA,3,4,9,7,5,2,6,NA "BH",1999,NA,NA,0,12,19,14,13,5,3,NA,NA,NA,0,6,11,6,0,1,3,NA "BH",2000,NA,NA,0,0,3,2,5,3,4,NA,NA,NA,0,1,2,0,1,1,1,NA "BH",2001,NA,NA,0,1,2,2,6,1,6,NA,NA,NA,0,1,2,0,2,0,0,NA "BH",2002,NA,NA,0,1,1,2,2,1,5,NA,NA,NA,0,1,1,1,1,0,1,NA "BH",2003,NA,NA,0,2,2,1,1,3,4,NA,NA,NA,0,1,0,0,1,1,0,NA "BH",2004,NA,NA,0,0,0,2,2,0,1,NA,NA,NA,0,1,1,1,0,1,2,NA "BH",2005,NA,NA,0,0,0,2,3,0,4,NA,NA,NA,1,1,0,3,1,0,0,NA "BH",2006,NA,NA,0,10,25,11,18,1,1,NA,NA,NA,0,7,14,4,5,2,0,NA "BH",2007,NA,NA,0,8,26,15,8,4,3,NA,NA,NA,1,10,15,5,3,0,1,NA "BH",2008,0,0,0,17,48,27,8,2,3,0,0,0,0,12,16,8,1,0,0,0 "BI",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",1995,NA,NA,5,128,238,224,73,32,19,NA,NA,NA,19,109,124,89,33,12,4,NA "BI",1996,NA,NA,16,217,283,274,116,41,17,NA,NA,NA,18,132,203,112,53,16,7,NA "BI",1997,NA,NA,21,208,446,431,198,79,32,NA,NA,NA,30,189,265,198,71,39,19,NA "BI",1998,NA,NA,45,301,527,530,319,102,33,NA,NA,NA,49,265,321,240,126,47,16,NA "BI",1999,NA,NA,64,349,566,492,281,102,57,NA,NA,NA,66,291,253,236,109,30,28,NA "BI",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BI",2001,NA,NA,34,344,559,469,238,75,39,NA,NA,NA,81,369,364,337,86,30,15,NA "BI",2002,NA,NA,16,310,470,520,270,97,52,NA,NA,NA,48,243,242,324,152,24,23,NA "BI",2003,NA,NA,32,348,572,488,260,106,35,NA,NA,NA,75,308,361,276,119,27,10,NA "BI",2004,NA,NA,24,352,674,468,292,78,48,NA,NA,NA,51,325,370,232,114,40,19,NA "BI",2005,NA,NA,34,352,591,525,372,111,55,NA,NA,NA,46,298,399,288,122,36,33,NA "BI",2006,NA,NA,30,347,600,488,320,114,64,NA,NA,NA,41,296,367,242,140,56,14,NA "BI",2007,0,0,26,425,637,542,372,177,88,NA,0,0,55,360,392,276,140,67,38,NA "BI",2008,NA,NA,30,430,684,526,459,175,80,NA,NA,NA,38,335,340,264,139,72,38,NA "BJ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",1995,NA,NA,14,186,352,306,176,101,92,NA,NA,NA,26,148,197,118,69,32,22,NA "BJ",1996,NA,NA,17,215,348,277,166,102,77,NA,NA,NA,34,169,218,136,46,42,21,NA "BJ",1997,NA,NA,20,215,376,306,180,76,107,NA,NA,NA,26,169,226,124,64,21,26,NA "BJ",1998,NA,NA,20,233,367,251,205,113,71,NA,NA,NA,15,189,242,159,65,39,19,NA "BJ",1999,NA,NA,14,250,444,293,207,124,85,NA,NA,NA,28,207,254,153,74,39,30,NA "BJ",2000,NA,NA,19,277,428,327,213,103,74,NA,NA,NA,36,239,275,149,76,45,25,NA "BJ",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",2002,NA,NA,16,248,489,304,231,125,94,NA,NA,NA,35,255,298,159,86,47,24,NA "BJ",2003,NA,NA,20,266,504,370,188,117,92,NA,NA,NA,32,226,304,150,93,47,25,NA "BJ",2004,NA,NA,16,308,529,344,229,125,82,NA,NA,NA,43,263,354,147,73,45,24,NA "BJ",2005,NA,NA,21,306,595,396,270,135,87,NA,NA,NA,25,249,331,145,89,51,39,NA "BJ",2006,NA,NA,18,298,624,465,247,124,106,NA,NA,NA,32,310,371,158,111,38,41,NA "BJ",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BJ",2008,NA,NA,20,333,604,439,284,163,100,0,NA,NA,38,245,386,173,99,52,30,0 "BM",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",2004,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "BM",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",2006,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA "BM",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BM",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",1999,NA,NA,0,16,42,30,11,25,31,NA,NA,NA,1,16,36,16,16,13,14,NA "BN",2000,NA,NA,0,6,4,15,5,7,15,NA,NA,NA,0,4,6,9,6,3,4,NA "BN",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BN",2002,NA,NA,2,15,15,7,6,8,14,NA,NA,NA,0,11,9,8,5,5,7,NA "BN",2003,NA,NA,0,5,25,17,8,8,9,NA,NA,NA,0,9,14,11,4,5,6,NA "BN",2004,NA,NA,0,10,13,12,16,11,10,NA,NA,NA,0,6,11,12,8,2,4,NA "BN",2005,NA,NA,0,9,19,19,12,9,0,NA,NA,NA,0,9,11,8,3,2,0,NA "BN",2006,NA,NA,2,10,11,12,13,10,11,NA,NA,NA,1,5,11,8,11,4,9,NA "BN",2007,0,0,0,5,10,15,21,10,17,NA,0,2,0,6,6,12,15,9,2,NA "BN",2008,0,0,0,10,10,12,21,6,23,0,0,1,1,6,11,8,7,7,10,0 "BO",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BO",1996,NA,NA,178,1359,887,636,525,321,324,NA,NA,NA,172,841,676,422,244,189,175,NA "BO",1997,NA,NA,150,1214,792,579,485,334,318,NA,NA,NA,202,792,548,380,285,192,187,NA "BO",1998,NA,NA,167,1254,885,579,488,364,353,NA,NA,NA,202,777,587,274,264,157,230,NA "BO",1999,NA,NA,222,1182,862,527,482,379,400,NA,NA,NA,225,798,554,352,263,205,222,NA "BO",2000,NA,NA,166,1182,797,518,466,340,366,NA,NA,NA,191,831,588,334,254,192,233,NA "BO",2001,NA,NA,165,1235,761,483,489,359,355,NA,NA,NA,241,915,664,302,226,194,283,NA "BO",2002,NA,NA,231,1235,787,492,417,356,386,NA,NA,NA,281,938,630,358,238,185,295,NA "BO",2003,NA,NA,156,1164,742,501,438,336,442,NA,NA,NA,167,811,549,313,224,196,305,NA "BO",2004,NA,NA,161,1205,750,505,431,319,399,NA,NA,NA,151,793,555,272,205,186,281,NA "BO",2005,NA,NA,157,1320,725,439,391,346,415,NA,NA,NA,160,846,533,276,226,182,262,NA "BO",2006,NA,NA,127,1147,699,471,390,333,398,NA,NA,NA,179,764,461,253,177,148,241,NA "BO",2007,NA,NA,116,1100,604,379,348,328,354,NA,NA,NA,125,736,453,243,193,162,259,NA "BO",2008,NA,NA,128,1253,734,412,391,363,429,NA,NA,NA,147,850,482,237,193,178,251,NA "BR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BR",1999,NA,NA,301,3662,5401,5827,4630,2634,2121,NA,NA,NA,372,2909,3450,2621,1661,1042,1106,NA "BR",2000,NA,NA,1894,7268,11568,11906,8623,5085,4494,NA,NA,NA,1859,6719,7215,5395,3582,2384,2496,NA "BR",2001,NA,NA,468,4455,5536,5184,4285,2353,1694,NA,NA,NA,516,3632,3303,2296,1701,1050,1018,NA "BR",2002,NA,NA,344,4695,5890,6325,4834,2738,2080,NA,NA,NA,380,3715,3584,2817,1755,1031,535,NA "BR",2003,NA,NA,382,4485,5709,6034,4863,2589,2057,NA,NA,NA,401,3582,3542,2540,1676,1001,1022,NA "BR",2004,NA,NA,337,5041,6321,6481,5157,2716,2169,NA,NA,NA,375,3684,3763,2742,1865,1041,1189,NA "BR",2005,NA,NA,317,5074,6119,6128,5259,2803,2140,NA,NA,NA,355,3496,3663,2626,1897,1112,1104,NA "BR",2006,136,207,343,4783,6098,6050,5042,2885,2221,NA,65,278,343,3132,3506,2569,1885,1121,1139,NA "BR",2007,175,196,371,4399,5990,5456,4878,2726,2075,NA,78,266,344,2952,3250,2327,1727,977,972,NA "BR",2008,115,183,298,4436,6173,5305,4854,2650,1905,0,69,287,356,2709,3233,2266,1669,964,879,0 "BS",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",1995,NA,NA,3,3,5,7,4,2,2,NA,NA,NA,0,1,7,2,0,0,1,NA "BS",1996,NA,NA,0,1,4,4,5,0,0,NA,NA,NA,0,2,4,4,0,1,1,NA "BS",1997,NA,NA,0,2,14,11,5,3,2,NA,NA,NA,0,2,5,7,4,0,2,NA "BS",1998,NA,NA,0,3,2,7,5,3,0,NA,NA,NA,1,1,3,5,1,0,0,NA "BS",1999,NA,NA,1,0,10,8,6,0,0,NA,NA,NA,0,1,3,4,2,0,0,NA "BS",2000,NA,NA,1,2,7,9,4,3,2,NA,NA,NA,2,5,7,8,2,3,1,NA "BS",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",2002,NA,NA,2,2,2,7,7,3,2,NA,NA,NA,4,1,6,3,3,1,1,NA "BS",2003,NA,NA,0,2,5,6,3,1,3,NA,NA,NA,2,2,2,4,3,2,3,NA "BS",2004,NA,NA,3,3,6,14,9,0,1,NA,NA,NA,0,3,4,1,0,3,0,NA "BS",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BS",2007,0,0,0,3,3,9,4,1,0,NA,0,0,0,3,4,3,1,1,0,NA "BS",2008,0,0,0,2,6,5,7,2,0,0,0,0,0,1,4,4,0,0,0,0 "BT",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BT",1995,NA,NA,2,42,65,36,35,24,11,NA,NA,NA,12,43,44,25,12,9,8,NA "BT",1996,NA,NA,4,51,45,43,22,12,8,NA,NA,NA,8,42,41,22,14,6,6,NA "BT",1997,NA,NA,4,39,42,36,21,24,8,NA,NA,NA,4,43,34,16,7,6,3,NA "BT",1998,NA,NA,3,45,39,24,24,22,9,NA,NA,NA,7,45,26,25,8,11,6,NA "BT",1999,NA,NA,10,27,42,31,29,22,14,NA,NA,NA,9,33,34,23,18,14,9,NA "BT",2000,NA,NA,6,65,41,30,24,12,2,NA,NA,NA,7,57,34,31,23,3,2,NA "BT",2001,NA,NA,3,51,50,37,32,10,16,NA,NA,NA,6,58,45,20,12,11,8,NA "BT",2002,NA,NA,5,54,51,32,26,22,19,NA,NA,NA,6,54,38,22,20,5,10,NA "BT",2003,NA,NA,9,62,50,20,25,20,13,NA,NA,NA,14,57,39,17,13,15,6,NA "BT",2004,NA,NA,1,54,52,28,27,18,23,NA,NA,NA,8,54,35,33,10,8,5,NA "BT",2005,NA,NA,1,47,58,26,23,14,12,NA,NA,NA,9,45,38,13,11,9,2,NA "BT",2006,NA,NA,0,65,55,22,20,12,11,NA,NA,NA,5,61,27,10,9,6,9,NA "BT",2007,NA,NA,2,60,44,29,26,17,13,NA,NA,NA,3,59,28,21,10,10,6,NA "BT",2008,NA,NA,3,85,46,12,18,13,14,NA,NA,NA,9,151,77,32,33,23,23,NA "BW",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1997,NA,NA,29,193,509,422,244,143,95,NA,NA,NA,28,291,359,181,97,60,35,NA "BW",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BW",1999,NA,NA,18,177,526,492,274,139,93,NA,NA,NA,46,274,434,225,90,30,37,NA "BW",2000,NA,NA,25,185,605,488,267,135,96,NA,NA,NA,37,335,469,262,98,57,36,NA "BW",2001,NA,NA,15,190,539,490,288,116,73,NA,NA,NA,33,328,493,309,116,46,23,NA "BW",2002,NA,NA,17,226,595,517,244,136,84,NA,NA,NA,45,393,566,290,144,54,26,NA "BW",2003,NA,NA,22,203,552,446,244,136,78,NA,NA,NA,32,338,524,276,104,52,43,NA "BW",2004,NA,NA,29,245,490,436,271,122,96,NA,NA,NA,49,358,544,290,110,52,36,NA "BW",2005,NA,NA,27,260,563,506,272,135,97,NA,NA,NA,45,321,491,253,97,55,48,NA "BW",2006,NA,NA,36,262,577,490,289,122,104,NA,NA,NA,54,326,507,259,133,55,38,NA "BW",2007,NA,NA,25,251,535,442,263,120,82,NA,NA,NA,46,347,430,254,123,47,37,NA "BW",2008,4,23,27,254,528,384,220,113,85,0,5,37,42,347,474,226,135,51,30,0 "BY",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BY",2001,NA,NA,2,NA,NA,NA,NA,NA,NA,NA,NA,NA,4,NA,NA,NA,NA,NA,NA,NA "BY",2002,NA,NA,0,66,133,217,159,75,51,NA,NA,NA,0,12,22,28,17,17,41,NA "BY",2003,NA,NA,0,67,134,243,226,96,60,NA,NA,NA,0,18,39,43,26,21,45,NA "BY",2004,NA,NA,NA,84,170,260,235,83,56,NA,NA,NA,1,31,38,38,35,11,67,NA "BY",2005,NA,NA,NA,71,180,273,287,118,62,NA,NA,NA,NA,25,53,50,43,11,62,NA "BY",2006,NA,NA,NA,61,134,217,260,96,71,NA,NA,1,1,32,38,43,43,18,58,NA "BY",2007,NA,NA,NA,57,142,205,244,110,56,NA,NA,NA,NA,28,58,41,35,17,58,NA "BY",2008,NA,NA,NA,44,149,207,261,106,69,NA,NA,NA,NA,26,43,36,34,20,65,NA "BZ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1995,NA,NA,1,1,2,4,0,1,1,NA,NA,NA,0,6,2,0,1,1,2,NA "BZ",1996,NA,NA,1,3,2,2,2,3,2,NA,NA,NA,0,1,4,0,0,0,0,NA "BZ",1997,NA,NA,2,2,6,3,10,3,6,NA,NA,NA,2,3,2,2,0,4,3,NA "BZ",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "BZ",2000,NA,NA,2,5,7,2,6,3,5,NA,NA,NA,0,2,1,2,4,1,4,NA "BZ",2001,NA,NA,0,0,5,9,7,9,11,NA,NA,NA,0,0,3,4,1,2,2,NA "BZ",2002,NA,NA,4,7,5,7,11,4,4,NA,NA,NA,3,5,6,3,4,4,4,NA "BZ",2003,NA,NA,1,4,8,10,3,2,8,NA,NA,NA,2,6,3,3,2,5,5,NA "BZ",2004,NA,NA,0,4,6,8,6,3,2,NA,NA,NA,1,0,1,1,2,0,0,NA "BZ",2005,NA,NA,0,8,8,6,8,5,3,NA,NA,NA,0,4,4,4,3,2,4,NA "BZ",2006,0,3,3,4,4,7,5,1,3,NA,0,2,2,6,5,3,5,6,6,NA "BZ",2007,1,0,1,6,8,8,7,6,3,NA,0,0,0,8,2,5,2,2,3,NA "BZ",2008,0,1,1,8,12,15,9,5,8,0,0,0,0,5,0,6,4,3,8,0 "CA",1980,NA,NA,12,54,75,83,100,108,186,NA,NA,NA,18,62,51,34,31,33,104,NA "CA",1981,NA,NA,8,49,61,64,87,103,141,NA,NA,NA,6,46,57,26,28,35,92,NA "CA",1982,NA,NA,6,52,66,69,90,91,150,NA,NA,NA,7,51,57,30,25,38,80,NA "CA",1983,NA,NA,9,47,63,62,90,92,123,NA,NA,NA,11,50,50,29,24,35,86,NA "CA",1984,NA,NA,3,44,75,58,68,83,169,NA,NA,NA,9,51,59,28,28,36,100,NA "CA",1985,NA,NA,11,42,70,59,77,81,168,NA,NA,NA,5,30,56,19,28,48,97,NA "CA",1986,NA,NA,9,58,73,62,59,73,147,NA,NA,NA,10,33,54,33,20,26,95,NA "CA",1987,NA,NA,9,40,71,60,49,64,129,NA,NA,NA,8,39,48,29,17,26,79,NA "CA",1988,NA,NA,4,43,73,62,52,68,131,NA,NA,NA,6,38,56,27,16,26,80,NA "CA",1989,NA,NA,10,45,56,60,54,62,122,NA,NA,NA,6,37,51,23,24,21,81,NA "CA",1990,NA,NA,3,35,70,55,40,42,100,NA,NA,NA,1,30,38,26,17,20,72,NA "CA",1991,NA,NA,7,37,79,53,37,36,110,NA,NA,NA,4,23,37,31,9,20,60,NA "CA",1992,NA,NA,6,42,47,58,41,51,79,NA,NA,NA,2,27,28,21,11,15,78,NA "CA",1993,NA,NA,8,33,47,53,43,33,74,NA,NA,NA,6,22,50,22,21,21,55,NA "CA",1994,NA,NA,2,42,54,42,43,34,87,NA,NA,NA,3,37,37,19,11,13,59,NA "CA",1995,NA,NA,1,28,31,60,34,41,70,NA,NA,NA,7,33,28,22,12,18,51,NA "CA",1996,NA,NA,3,28,49,48,31,34,70,NA,NA,NA,2,23,34,28,14,16,50,NA "CA",1997,NA,NA,0,21,55,44,30,44,90,NA,NA,NA,1,36,44,26,13,16,53,NA "CA",1998,NA,NA,4,33,43,51,31,26,80,NA,NA,NA,1,26,31,26,14,18,54,NA "CA",1999,NA,NA,0,23,47,51,36,33,94,NA,NA,NA,4,33,31,28,13,11,51,NA "CA",2000,NA,NA,5,34,45,46,41,32,79,NA,NA,NA,4,33,40,30,25,12,66,NA "CA",2001,NA,NA,6,24,49,56,40,22,76,NA,NA,NA,5,23,41,33,16,14,53,NA "CA",2002,NA,NA,0,25,34,50,34,27,64,NA,NA,NA,6,32,31,26,17,17,45,NA "CA",2003,NA,NA,1,26,36,37,32,21,42,NA,NA,NA,3,21,28,25,15,9,36,NA "CA",2004,NA,NA,2,25,34,38,32,31,64,NA,NA,NA,0,34,55,34,19,22,48,NA "CA",2005,NA,NA,3,37,45,44,40,20,68,NA,NA,NA,6,28,40,27,24,13,37,NA "CA",2006,1,1,2,34,34,33,42,26,64,NA,0,4,4,39,30,25,16,6,52,NA "CA",2007,4,1,5,31,41,51,50,35,75,NA,0,2,2,32,33,33,11,13,51,NA "CA",2008,0,2,2,39,36,49,53,38,62,0,0,3,3,36,39,39,27,20,45,0 "CD",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CD",1995,NA,NA,373,1572,2382,1890,1184,634,289,NA,NA,NA,331,1223,1532,1232,863,427,137,NA "CD",1996,NA,NA,228,1040,1627,1492,998,548,285,NA,NA,NA,292,1153,1528,1142,728,377,149,NA "CD",1997,NA,NA,259,1401,1996,1599,996,614,276,NA,NA,NA,321,1376,1874,1271,723,386,150,NA "CD",1998,NA,NA,455,3684,5073,3578,2002,997,518,NA,NA,NA,651,4074,4536,2716,1295,722,272,NA "CD",1999,NA,NA,474,4061,5886,4191,2250,1279,626,NA,NA,NA,708,4472,4991,3117,1725,836,305,NA "CD",2000,NA,NA,485,4048,5833,4151,2549,1295,602,NA,NA,NA,718,4422,5146,3309,1724,855,351,NA "CD",2001,NA,NA,581,4651,6794,4817,2876,1384,724,NA,NA,NA,842,4922,5586,3704,2057,1042,470,NA "CD",2002,NA,NA,649,4965,7414,4994,3065,1388,791,NA,NA,NA,874,5378,6230,3939,2262,1055,476,NA "CD",2003,NA,NA,854,5885,8427,6193,3776,1836,1047,NA,NA,NA,1233,6630,7711,4826,2866,1457,592,NA "CD",2004,NA,NA,1195,7007,9467,7114,4442,2376,1229,NA,NA,NA,1679,7630,8540,5529,3413,1850,721,NA "CD",2005,NA,NA,1321,6675,9808,7577,5022,2637,1499,NA,NA,NA,1695,7570,8501,5832,3898,2054,951,NA "CD",2006,NA,NA,1122,6391,9486,7321,5011,2657,1504,NA,NA,NA,1517,7236,8522,5621,3762,2019,975,NA "CD",2007,NA,NA,1343,6485,9548,7925,5341,2801,1752,NA,NA,NA,1842,7130,8415,5939,4127,2352,1099,NA "CD",2008,NA,NA,1515,6497,9988,8552,5756,3131,1686,NA,NA,NA,1828,7304,8995,6393,4104,2516,1212,NA "CF",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",1995,NA,NA,38,162,356,206,120,40,18,NA,NA,NA,39,233,350,145,57,21,9,NA "CF",1996,NA,NA,46,192,385,234,94,57,15,NA,NA,NA,52,273,346,177,62,45,6,NA "CF",1997,NA,NA,54,211,403,282,144,65,26,NA,NA,NA,53,301,394,207,100,23,10,NA "CF",1998,NA,NA,28,205,482,328,157,76,46,NA,NA,NA,67,353,476,217,115,61,26,NA "CF",1999,NA,NA,28,224,529,367,123,67,65,NA,NA,NA,72,376,498,196,86,52,42,NA "CF",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",2001,NA,NA,15,127,279,171,78,45,16,NA,NA,NA,25,179,236,123,64,23,1,NA "CF",2002,NA,NA,76,264,462,414,154,82,22,NA,NA,NA,66,315,402,262,139,82,18,NA "CF",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",2004,NA,NA,12,58,694,575,241,30,14,NA,NA,NA,14,60,430,559,181,46,9,NA "CF",2005,NA,NA,29,40,1136,160,26,35,15,NA,NA,NA,30,32,420,145,30,40,15,NA "CF",2006,NA,NA,48,409,770,923,152,83,30,NA,NA,NA,52,538,613,647,126,42,16,NA "CF",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CF",2008,NA,NA,68,466,643,515,276,160,81,0,NA,NA,102,481,673,378,196,136,57,0 "CG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1995,NA,NA,16,265,409,221,73,44,15,NA,NA,NA,17,296,353,167,61,38,11,NA "CG",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",1999,NA,NA,17,272,407,229,99,39,27,NA,NA,NA,25,297,348,143,83,24,22,NA "CG",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",2001,NA,NA,31,557,756,437,174,85,65,NA,NA,NA,53,554,706,377,177,85,107,NA "CG",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",2004,NA,NA,9,602,887,451,251,78,32,NA,NA,NA,38,310,800,373,156,88,44,NA "CG",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CG",2006,NA,NA,32,371,656,392,174,69,51,NA,NA,NA,44,384,500,247,138,79,54,NA "CG",2007,NA,NA,28,351,635,482,233,78,63,NA,NA,NA,45,411,608,334,153,71,60,NA "CG",2008,NA,NA,31,417,606,469,195,68,49,NA,NA,NA,56,396,505,308,135,85,51,NA "CH",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CH",1995,NA,NA,0,12,23,26,23,13,27,NA,NA,NA,1,13,20,9,1,2,15,NA "CH",1996,NA,NA,1,12,28,27,17,17,22,NA,NA,NA,0,6,18,4,4,4,12,NA "CH",1997,NA,NA,0,16,20,15,11,7,25,NA,NA,NA,0,14,14,6,2,2,12,NA "CH",1998,NA,NA,0,15,30,26,12,10,23,NA,NA,NA,1,11,15,6,4,1,11,NA "CH",1999,NA,NA,1,12,15,17,6,6,5,NA,NA,NA,0,5,16,7,0,2,6,NA "CH",2000,NA,NA,0,5,18,10,7,5,8,NA,NA,NA,1,9,12,8,2,1,6,NA "CH",2001,NA,NA,0,9,18,13,7,10,7,NA,NA,NA,0,3,9,3,3,1,7,NA "CH",2002,NA,NA,0,9,16,11,16,5,8,NA,NA,NA,0,11,13,7,4,1,6,NA "CH",2003,NA,NA,0,11,7,19,10,4,11,NA,NA,NA,1,10,10,4,0,3,3,NA "CH",2004,NA,NA,0,10,14,8,11,6,11,NA,NA,NA,0,6,11,7,6,4,5,NA "CH",2005,0,1,1,10,9,13,12,2,7,0,0,0,0,6,11,8,3,2,4,0 "CH",2006,0,1,1,11,15,11,8,7,12,0,0,1,1,10,16,11,5,1,2,0 "CH",2007,0,0,0,11,10,11,7,5,11,0,0,1,1,9,17,3,0,3,7,0 "CH",2008,0,0,0,8,13,5,4,3,4,0,0,1,1,3,13,6,2,1,1,0 "CI",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",1995,NA,NA,41,989,2092,1344,759,283,130,NA,NA,NA,99,810,813,497,273,105,19,NA "CI",1996,NA,NA,118,903,1670,1107,535,262,178,NA,NA,NA,139,803,836,409,194,158,75,NA "CI",1997,NA,NA,87,1140,1850,1326,662,398,260,NA,NA,NA,118,955,1123,548,291,184,99,NA "CI",1998,NA,NA,72,1173,1747,1471,795,433,273,NA,NA,NA,104,955,1087,703,347,126,105,NA "CI",1999,NA,NA,98,1069,1794,1240,629,378,251,NA,NA,NA,132,1022,1137,644,260,186,112,NA "CI",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CI",2001,NA,NA,108,1205,1818,1378,686,393,302,NA,NA,NA,127,1111,1345,735,342,239,112,NA "CI",2002,NA,NA,102,1271,2194,1490,833,385,307,NA,NA,NA,135,1151,1620,827,358,210,142,NA "CI",2003,NA,NA,116,1232,2075,1517,818,416,366,NA,NA,NA,154,1193,1617,878,443,222,151,NA "CI",2004,NA,NA,114,1418,2323,1530,875,474,387,NA,NA,NA,160,1266,1734,916,472,273,194,NA "CI",2005,NA,NA,128,1346,2449,1606,888,422,385,NA,NA,NA,193,1280,1756,989,528,232,201,NA "CI",2006,NA,NA,171,1467,2476,1614,915,564,368,NA,NA,NA,191,1327,1776,1069,445,275,209,NA "CI",2007,NA,NA,173,1576,2705,1817,981,532,429,NA,NA,NA,225,1349,1973,1126,596,354,235,NA "CI",2008,NA,NA,261,1764,2944,1842,1121,649,482,NA,NA,NA,277,1477,2085,1171,641,326,254,NA "CK",1980,NA,NA,0,2,0,1,1,0,0,NA,NA,NA,0,3,0,0,1,0,0,NA "CK",1981,NA,NA,0,0,1,0,0,0,0,NA,NA,NA,0,0,0,0,0,1,0,NA "CK",1982,NA,NA,0,0,0,1,2,3,1,NA,NA,NA,0,2,0,3,0,0,0,NA "CK",1983,NA,NA,0,2,1,0,0,1,1,NA,NA,NA,0,4,0,3,1,2,0,NA "CK",1984,NA,NA,0,0,1,0,1,0,1,NA,NA,NA,0,0,0,0,0,0,0,NA "CK",1985,NA,NA,1,0,0,1,2,1,0,NA,NA,NA,0,1,1,0,0,1,0,NA "CK",1986,NA,NA,0,1,0,0,0,0,0,NA,NA,NA,1,0,0,0,0,0,0,NA "CK",1987,NA,NA,0,0,0,1,0,0,0,NA,NA,NA,1,0,0,0,0,0,0,NA "CK",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CK",1989,NA,NA,0,0,0,0,0,0,1,NA,NA,NA,1,0,0,0,0,0,0,NA "CK",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CK",1991,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,1,0,0,0,0,0,NA "CK",1992,NA,NA,0,0,0,0,1,2,0,NA,NA,NA,0,0,0,1,0,1,1,NA "CK",1993,NA,NA,0,0,0,0,2,0,0,NA,NA,NA,0,0,1,0,0,1,0,NA "CK",1994,NA,NA,0,0,0,0,0,3,1,NA,NA,NA,1,0,0,0,0,0,0,NA "CK",1995,NA,NA,0,0,0,0,0,1,0,NA,NA,NA,0,0,0,0,1,0,0,NA "CK",1996,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,1,0,NA "CK",1997,NA,NA,0,0,0,0,0,1,0,NA,NA,NA,0,0,0,1,0,0,0,NA "CK",1998,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "CK",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CK",2000,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "CK",2001,NA,NA,0,0,0,0,0,1,1,NA,NA,NA,0,0,0,0,0,0,0,NA "CK",2002,NA,NA,0,0,0,0,0,1,0,NA,NA,NA,0,0,0,0,0,0,0,NA "CK",2003,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "CK",2004,NA,NA,0,0,0,0,0,0,1,NA,NA,NA,0,0,0,0,0,0,0,NA "CK",2005,NA,NA,0,1,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "CK",2006,0,0,0,0,0,0,0,0,0,NA,0,0,0,0,0,0,0,0,0,NA "CK",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CK",2008,NA,NA,NA,NA,NA,1,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1995,NA,NA,24,148,182,204,155,141,163,NA,NA,NA,24,100,120,108,75,73,107,NA "CL",1996,NA,NA,8,123,201,207,207,125,139,NA,NA,NA,11,88,117,72,63,47,72,NA "CL",1997,NA,NA,11,107,182,224,165,153,163,NA,NA,NA,11,92,121,80,66,60,88,NA "CL",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CL",1999,NA,NA,4,118,173,204,206,132,132,NA,NA,NA,9,87,109,97,52,76,98,NA "CL",2000,NA,NA,6,81,160,198,150,132,126,NA,NA,NA,10,66,96,70,54,58,83,NA "CL",2001,NA,NA,2,78,183,213,190,116,138,NA,NA,NA,9,69,85,76,58,55,83,NA "CL",2002,NA,NA,6,87,163,196,193,144,160,NA,NA,NA,7,64,91,82,76,54,89,NA "CL",2003,NA,NA,1,77,131,181,183,150,136,NA,NA,NA,8,59,106,81,42,41,80,NA "CL",2004,NA,NA,3,87,148,179,187,124,168,NA,NA,NA,5,58,74,76,57,57,74,NA "CL",2005,NA,NA,3,74,128,179,162,115,133,NA,NA,NA,4,55,78,60,56,36,93,NA "CL",2006,5,7,12,107,140,176,197,179,199,NA,0,7,7,70,91,74,95,64,122,NA "CL",2007,0,3,3,86,137,140,169,139,121,NA,0,8,8,59,75,63,49,39,78,NA "CL",2008,1,6,7,86,131,148,167,135,118,NA,2,1,3,52,86,49,37,30,65,NA "CM",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CM",1995,NA,NA,20,208,569,323,287,204,164,NA,NA,NA,9,185,313,223,153,106,93,NA "CM",1996,NA,NA,34,151,735,291,178,38,17,NA,NA,NA,21,123,388,202,103,24,8,NA "CM",1997,NA,NA,36,321,1011,387,269,79,24,NA,NA,NA,25,277,522,341,179,63,14,NA "CM",1998,NA,NA,15,651,1006,787,262,87,35,NA,NA,NA,30,443,595,268,112,53,30,NA "CM",1999,NA,NA,49,602,1595,736,433,166,59,NA,NA,NA,47,506,783,505,235,95,21,NA "CM",2000,NA,NA,41,518,842,584,284,130,75,NA,NA,NA,63,368,530,293,139,60,33,NA "CM",2001,NA,NA,24,643,1000,732,322,154,86,NA,NA,NA,49,482,609,328,155,62,50,NA "CM",2002,NA,NA,66,818,1335,1117,619,258,125,NA,NA,NA,59,950,1053,545,236,140,44,NA "CM",2003,NA,NA,100,1176,2274,1516,788,330,160,NA,NA,NA,136,1273,1542,745,363,217,72,NA "CM",2004,NA,NA,127,1312,2147,1575,928,408,259,NA,NA,NA,181,1310,1449,756,412,214,140,NA "CM",2005,NA,NA,134,1472,2482,1766,1035,463,289,NA,NA,NA,226,1467,1788,1028,503,205,143,NA "CM",2006,NA,NA,112,1401,2550,1820,1080,437,300,NA,NA,NA,151,1358,1823,960,470,266,142,NA "CM",2007,NA,NA,121,1392,2613,1874,1011,480,307,NA,NA,NA,152,1443,1963,985,483,248,148,NA "CM",2008,0,0,108,1613,2861,2016,1135,526,281,0,0,0,173,1506,2041,1027,568,234,148,0 "CN",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CN",1995,NA,NA,1102,12791,18306,15487,13105,13489,10130,NA,NA,NA,1169,10890,13250,8376,5679,4579,2841,NA "CN",1996,NA,NA,1409,16490,24057,19695,17024,16758,13697,NA,NA,NA,1624,13773,17218,10214,7020,5346,3945,NA "CN",1997,NA,NA,1456,18547,28247,23006,20330,19667,17041,NA,NA,NA,1534,15258,19547,11758,8259,6422,4823,NA "CN",1998,NA,NA,1481,19699,30093,25088,23483,21651,20501,NA,NA,NA,1558,15726,20203,12672,9399,7122,5728,NA "CN",1999,NA,NA,1247,18961,29328,25095,24239,21564,21367,NA,NA,NA,1431,15178,18846,12370,9838,7131,5663,NA "CN",2000,NA,NA,1131,19111,29399,25206,25593,21429,21771,NA,NA,NA,1420,14536,18496,12377,9899,7102,6296,NA "CN",2001,NA,NA,1213,19121,28520,25544,25759,20789,22799,NA,NA,NA,1405,14500,17446,12041,9963,7175,6491,NA "CN",2002,NA,NA,925,17933,25242,22645,23884,19564,22562,NA,NA,NA,1152,13250,15188,10505,8796,6586,6740,NA "CN",2003,NA,NA,1133,25125,32760,31604,32585,27243,32027,NA,NA,NA,1407,18811,19248,14783,12101,8988,9465,NA "CN",2004,NA,NA,1375,35465,43594,45408,46256,41846,50797,NA,NA,NA,1659,25951,25150,20613,16995,14038,15739,NA "CN",2005,NA,NA,1416,43005,49558,55400,54872,53822,69779,NA,NA,NA,1864,31180,27759,24728,19889,18203,21244,NA "CN",2006,NA,NA,1023,44528,48232,56733,54301,53746,68557,NA,NA,NA,1408,30904,26526,24564,18775,17782,21212,NA "CN",2007,NA,NA,878,44011,46374,56224,54960,56288,70376,NA,NA,NA,1235,29960,24914,23542,18129,17647,21339,NA "CN",2008,NA,NA,751,45596,44651,56182,55740,57492,69678,NA,NA,NA,964,29223,23484,22370,17565,17814,21086,NA "CO",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",1999,NA,NA,270,1730,1473,1796,1500,350,1210,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CO",2000,NA,NA,246,763,1030,963,743,610,746,NA,NA,NA,194,587,758,523,381,304,510,NA "CO",2001,NA,NA,223,1037,703,722,869,646,653,NA,NA,NA,186,865,544,429,436,350,359,NA "CO",2002,NA,NA,209,614,696,688,593,472,662,NA,NA,NA,167,524,545,402,318,258,371,NA "CO",2003,NA,NA,237,684,816,844,853,642,761,NA,NA,NA,174,662,692,512,382,292,421,NA "CO",2004,NA,NA,208,732,824,743,725,564,737,NA,NA,NA,205,624,647,513,361,331,426,NA "CO",2005,NA,NA,178,623,685,666,687,510,695,NA,NA,NA,179,581,533,457,389,292,395,NA "CO",2006,NA,NA,219,709,713,737,785,573,766,NA,NA,NA,210,603,653,520,377,314,469,NA "CO",2007,NA,NA,144,618,704,694,712,574,786,NA,NA,NA,138,599,620,459,393,286,461,NA "CO",2008,NA,NA,136,666,736,666,749,610,797,0,NA,NA,133,580,608,441,384,284,406,0 "CR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CR",1995,NA,NA,1,17,38,24,19,23,22,NA,NA,NA,2,17,15,11,7,9,14,NA "CR",1996,NA,NA,0,11,11,19,15,19,15,NA,NA,NA,0,4,9,7,3,4,5,NA "CR",1997,NA,NA,37,30,82,69,52,35,45,NA,NA,NA,31,28,45,40,30,24,30,NA "CR",1998,NA,NA,30,53,78,67,53,43,36,NA,NA,NA,23,19,43,47,27,17,26,NA "CR",1999,NA,NA,4,28,63,89,70,51,73,NA,NA,NA,10,23,42,37,32,33,36,NA "CR",2000,NA,NA,14,31,53,62,39,28,49,NA,NA,NA,13,21,33,24,20,23,24,NA "CR",2001,NA,NA,2,26,53,72,50,29,36,NA,NA,NA,1,18,31,20,16,16,15,NA "CR",2002,NA,NA,3,26,45,44,43,19,38,NA,NA,NA,6,13,24,19,14,15,19,NA "CR",2003,NA,NA,3,33,47,32,39,28,33,NA,NA,NA,4,25,24,21,30,11,16,NA "CR",2004,NA,NA,1,49,62,45,36,29,43,NA,NA,NA,5,35,29,34,11,18,22,NA "CR",2005,NA,NA,1,43,38,53,34,20,34,NA,NA,NA,1,21,31,18,16,6,14,NA "CR",2006,0,1,1,27,36,29,34,25,24,NA,0,4,4,27,24,20,15,8,11,NA "CR",2007,0,2,4,44,57,28,32,17,31,NA,0,3,3,16,24,19,16,16,15,NA "CR",2008,NA,3,3,24,39,31,38,17,29,NA,NA,2,2,19,31,20,14,6,14,NA "CU",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",1995,NA,NA,2,59,118,83,75,75,156,NA,NA,NA,1,17,52,29,39,48,80,NA "CU",1996,NA,NA,0,54,136,86,93,84,138,NA,NA,NA,1,29,44,20,45,42,63,NA "CU",1997,NA,NA,0,69,151,83,63,77,116,NA,NA,NA,2,16,49,33,32,28,46,NA "CU",1998,NA,NA,0,60,140,109,75,53,102,NA,NA,NA,1,17,48,23,30,31,55,NA "CU",1999,NA,NA,1,55,163,97,68,72,100,NA,NA,NA,2,15,37,27,20,28,35,NA "CU",2000,NA,NA,0,71,167,90,74,55,75,NA,NA,NA,2,9,22,26,22,23,39,NA "CU",2001,NA,NA,0,36,136,87,39,54,67,NA,NA,NA,1,24,17,22,17,20,39,NA "CU",2002,NA,NA,0,21,104,83,67,45,77,NA,NA,NA,3,15,28,22,21,20,34,NA "CU",2003,NA,NA,2,23,90,91,62,51,78,NA,NA,NA,0,11,14,20,23,13,29,NA "CU",2004,NA,NA,0,17,68,95,63,45,50,NA,NA,NA,0,16,20,15,16,20,29,NA "CU",2005,NA,NA,2,20,73,90,50,58,51,NA,NA,NA,2,14,17,26,13,22,29,NA "CU",2006,NA,NA,NA,22,73,93,50,47,50,NA,NA,NA,NA,8,18,22,12,14,23,NA "CU",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CU",2008,0,2,2,30,66,108,67,59,53,NA,1,2,3,11,14,22,18,11,34,NA "CV",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",1997,NA,NA,0,11,16,19,4,10,10,NA,NA,NA,2,10,10,8,3,10,9,NA "CV",1998,NA,NA,2,9,14,14,6,6,9,NA,NA,NA,2,12,4,5,3,5,10,NA "CV",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CV",2001,NA,NA,0,5,15,6,5,5,1,NA,NA,NA,2,7,9,7,1,2,3,NA "CV",2002,NA,NA,3,9,29,20,14,1,2,NA,NA,NA,2,11,11,12,3,4,4,NA "CV",2003,NA,NA,3,12,32,32,9,7,8,NA,NA,NA,1,6,7,13,7,4,11,NA "CV",2004,NA,NA,1,8,33,17,20,2,7,NA,NA,NA,2,17,34,11,7,6,4,NA "CV",2005,NA,NA,0,22,23,26,9,2,8,NA,NA,NA,2,9,16,4,5,3,6,NA "CV",2006,NA,2,2,15,22,18,8,6,4,NA,NA,2,2,14,16,5,6,4,9,NA "CV",2007,NA,NA,0,24,30,26,18,4,6,NA,NA,NA,0,18,17,5,1,3,6,NA "CV",2008,0,0,0,23,33,29,27,12,6,0,NA,NA,6,18,21,11,3,5,3,0 "CY",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1995,NA,NA,0,1,1,0,1,1,2,NA,NA,NA,0,1,1,1,2,0,1,NA "CY",1996,NA,NA,0,0,0,0,0,1,0,NA,NA,NA,0,2,0,0,0,0,0,NA "CY",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",1998,NA,NA,0,0,1,0,0,0,1,NA,NA,NA,0,1,3,0,0,0,0,NA "CY",1999,NA,NA,5,1,6,2,2,10,0,NA,NA,NA,4,1,2,3,1,2,0,NA "CY",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CY",2002,NA,NA,0,2,1,1,1,0,2,NA,NA,NA,0,1,0,0,0,0,0,NA "CY",2003,NA,NA,0,1,4,3,0,0,1,NA,NA,NA,0,0,2,2,1,0,0,NA "CY",2004,NA,NA,0,3,3,0,1,1,1,NA,NA,NA,0,1,0,0,0,0,0,NA "CY",2005,0,0,0,3,1,1,1,0,1,0,0,0,0,1,0,0,0,0,0,0 "CY",2006,0,0,0,0,1,1,0,1,0,0,0,0,0,2,3,0,0,0,0,0 "CY",2007,0,0,0,2,1,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0 "CY",2008,0,0,0,1,0,0,1,0,0,0,0,0,0,1,2,1,0,0,0,0 "CZ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "CZ",1995,NA,NA,2,10,22,83,88,53,90,NA,NA,NA,0,9,11,20,13,19,88,NA "CZ",1996,NA,NA,1,10,40,77,121,66,90,NA,NA,NA,1,10,17,11,21,20,89,NA "CZ",1997,NA,NA,0,5,25,71,94,64,83,NA,NA,NA,0,12,8,12,17,18,72,NA "CZ",1998,NA,NA,0,7,37,88,104,67,95,NA,NA,NA,1,6,17,12,18,11,82,NA "CZ",1999,NA,NA,2,13,27,62,98,45,75,NA,NA,NA,1,5,14,18,15,3,71,NA "CZ",2000,NA,NA,0,7,31,52,89,61,59,NA,NA,NA,0,15,13,9,10,7,57,NA "CZ",2001,NA,NA,0,18,39,47,85,43,50,NA,NA,NA,0,10,17,8,11,9,54,NA "CZ",2002,NA,NA,0,14,28,39,89,38,40,NA,NA,NA,0,6,10,8,8,6,43,NA "CZ",2003,NA,NA,0,11,28,42,67,48,50,NA,NA,NA,0,9,15,15,12,7,34,NA "CZ",2004,NA,NA,0,10,28,36,71,30,35,NA,NA,NA,0,11,17,9,13,13,29,NA "CZ",2005,0,0,0,8,24,57,55,45,46,0,0,0,0,3,14,16,7,5,28,0 "CZ",2006,0,0,0,6,19,39,56,38,25,0,0,0,0,4,12,12,10,6,30,0 "CZ",2007,0,0,0,14,26,35,63,39,29,0,0,0,0,6,8,5,9,5,28,0 "CZ",2008,0,0,0,7,29,39,44,36,32,0,0,0,0,4,14,8,5,10,23,0 "DE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",1995,NA,NA,14,179,453,539,460,442,625,NA,NA,NA,17,115,251,167,89,104,397,NA "DE",1996,NA,NA,20,181,377,520,413,405,607,NA,NA,NA,19,150,214,180,97,108,389,NA "DE",1997,NA,NA,11,166,375,459,384,424,509,NA,NA,NA,16,109,204,154,93,99,343,NA "DE",1998,NA,NA,9,179,333,448,358,349,538,NA,NA,NA,11,121,166,141,93,80,298,NA "DE",1999,NA,NA,13,145,308,419,362,335,449,NA,NA,NA,15,118,177,98,85,99,295,NA "DE",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DE",2001,NA,NA,3,3,89,136,106,94,119,NA,NA,NA,1,36,59,48,42,26,79,NA "DE",2002,NA,NA,3,34,75,102,88,81,101,NA,NA,NA,1,32,61,50,14,16,64,NA "DE",2003,NA,NA,2,68,107,177,163,103,155,NA,NA,NA,10,61,96,86,43,22,102,NA "DE",2004,NA,NA,5,63,130,182,161,110,198,NA,NA,NA,6,75,110,97,42,32,116,NA "DE",2005,2,4,6,59,113,171,167,92,167,0,3,1,4,51,104,73,43,37,103,0 "DE",2006,0,2,2,78,138,169,189,103,199,0,2,5,7,66,109,77,39,24,102,0 "DE",2007,0,2,2,116,248,314,344,184,362,0,0,4,4,120,176,152,116,46,178,0 "DE",2008,0,2,2,40,95,114,142,96,148,0,1,5,6,35,68,61,41,33,71,0 "DJ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DJ",1996,NA,NA,30,421,429,139,77,52,27,NA,NA,NA,31,247,212,67,38,21,5,NA "DJ",1997,NA,NA,52,428,442,167,115,66,23,NA,NA,NA,51,202,225,75,38,17,4,NA "DJ",1998,NA,NA,23,348,396,191,81,57,23,NA,NA,NA,28,208,197,76,43,17,9,NA "DJ",1999,NA,NA,25,348,371,159,87,67,22,NA,NA,NA,20,158,168,84,38,20,3,NA "DJ",2000,NA,NA,17,302,347,139,67,60,42,NA,NA,NA,12,147,156,47,31,17,10,NA "DJ",2001,NA,NA,17,267,331,125,65,51,23,NA,NA,NA,17,156,134,59,44,15,8,NA "DJ",2002,NA,NA,20,256,320,124,58,55,25,NA,NA,NA,18,142,136,48,28,19,4,NA "DJ",2003,NA,NA,10,222,288,132,76,42,24,NA,NA,NA,19,127,123,55,38,28,8,NA "DJ",2004,NA,NA,19,217,225,142,68,38,28,NA,NA,NA,16,111,115,49,23,25,10,NA "DJ",2005,NA,NA,18,220,252,119,62,47,29,NA,NA,NA,23,123,117,66,23,13,8,NA "DJ",2006,NA,NA,14,225,246,165,63,33,20,NA,NA,NA,24,117,129,59,35,18,5,NA "DJ",2007,4,10,14,241,264,142,83,44,23,NA,2,6,8,129,131,62,35,14,18,NA "DJ",2008,NA,NA,17,232,275,180,93,56,46,NA,NA,NA,22,138,159,79,53,15,10,NA "DK",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DK",1995,NA,NA,0,7,16,28,18,9,11,NA,NA,NA,2,7,13,8,4,3,2,NA "DK",1996,NA,NA,0,4,16,13,13,8,6,NA,NA,NA,0,5,9,8,4,3,8,NA "DK",1997,NA,NA,1,11,19,23,16,6,6,NA,NA,NA,1,6,8,2,5,2,8,NA "DK",1998,NA,NA,0,7,20,21,18,7,9,NA,NA,NA,1,6,16,8,7,6,6,NA "DK",1999,NA,NA,4,9,29,23,21,8,9,NA,NA,NA,1,11,18,11,7,8,11,NA "DK",2000,NA,NA,5,10,20,24,16,11,14,NA,NA,NA,5,16,15,14,6,7,8,NA "DK",2001,NA,NA,1,10,15,20,15,4,9,NA,NA,NA,5,5,12,13,7,5,3,NA "DK",2002,NA,NA,2,11,8,25,14,6,9,NA,NA,NA,1,14,17,11,10,2,5,NA "DK",2003,NA,NA,3,11,20,23,22,12,9,NA,NA,NA,0,6,13,12,6,2,4,NA "DK",2004,NA,NA,1,6,12,17,27,15,12,NA,NA,NA,2,10,16,10,9,7,2,NA "DK",2005,0,0,0,12,12,18,23,9,7,0,0,2,2,11,5,13,9,3,5,0 "DK",2006,0,0,0,8,13,15,27,10,8,0,0,1,1,6,12,9,5,5,4,0 "DK",2007,0,0,0,6,12,20,29,16,6,0,1,0,1,8,12,8,4,5,8,0 "DK",2008,0,0,0,8,15,9,24,10,8,0,0,2,2,5,7,5,8,4,1,0 "DM",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",1996,NA,NA,0,0,1,2,1,1,0,NA,NA,NA,0,0,1,0,1,0,0,NA "DM",1997,NA,NA,0,0,0,0,1,1,1,NA,NA,NA,0,0,0,0,1,1,0,NA "DM",1998,NA,NA,0,0,0,0,1,0,0,NA,NA,NA,0,2,0,1,0,1,0,NA "DM",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",2002,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA "DM",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DM",2006,0,0,0,0,1,1,0,1,1,NA,0,0,0,1,0,1,1,1,0,NA "DM",2007,0,0,0,0,0,1,0,1,0,NA,0,0,0,0,0,0,0,1,0,NA "DM",2008,0,0,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0 "DO",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",1996,NA,NA,17,231,262,128,79,45,42,NA,NA,NA,21,178,147,61,44,23,22,NA "DO",1997,NA,NA,76,450,471,246,145,111,81,NA,NA,NA,53,314,329,171,102,77,56,NA "DO",1998,NA,NA,62,340,416,184,130,114,50,NA,NA,NA,60,265,247,141,79,73,33,NA "DO",1999,NA,NA,90,507,485,356,238,166,183,NA,NA,NA,99,363,359,226,160,121,136,NA "DO",2000,NA,NA,73,410,481,344,173,125,113,NA,NA,NA,65,317,325,212,115,79,75,NA "DO",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DO",2002,NA,NA,39,295,417,270,145,86,71,NA,NA,NA,35,251,241,137,81,49,62,NA "DO",2003,NA,NA,52,364,518,331,194,116,112,NA,NA,NA,48,301,288,211,116,82,73,NA "DO",2004,NA,NA,45,391,502,363,180,122,104,NA,NA,NA,39,301,288,177,104,60,44,NA "DO",2005,NA,NA,43,399,483,386,228,123,105,NA,NA,NA,57,339,332,209,119,72,54,NA "DO",2006,NA,NA,25,342,480,340,207,111,92,NA,NA,NA,38,287,320,189,106,63,58,NA "DO",2007,NA,NA,23,290,403,362,209,108,85,NA,NA,NA,29,249,242,174,103,53,43,NA "DO",2008,2,14,16,322,398,337,198,122,105,0,4,30,34,288,272,163,96,55,52,0 "DZ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1997,NA,NA,659,1422,1982,639,357,312,396,NA,NA,NA,92,1102,702,405,242,236,356,NA "DZ",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "DZ",1999,NA,NA,40,1193,1344,556,706,263,315,NA,NA,NA,92,884,621,281,221,243,329,NA "DZ",2000,NA,NA,59,927,1516,610,491,234,299,NA,NA,NA,36,1005,1293,746,314,208,312,NA "DZ",2001,NA,NA,41,1345,1614,708,401,283,390,NA,NA,NA,79,1057,782,352,287,280,334,NA "DZ",2002,NA,NA,39,1364,1580,630,406,273,280,NA,NA,NA,71,1840,730,334,224,217,258,NA "DZ",2003,NA,NA,40,1316,1633,706,429,231,328,NA,NA,NA,74,1017,702,326,242,241,356,NA "DZ",2004,NA,NA,63,1326,1694,758,434,271,373,NA,NA,NA,92,1011,798,320,253,227,359,NA "DZ",2005,NA,NA,53,1309,1841,919,473,314,426,NA,NA,NA,102,1044,820,389,270,229,465,NA "DZ",2006,NA,NA,41,1173,1573,692,409,251,360,NA,NA,NA,80,971,679,339,223,197,408,NA "DZ",2007,NA,NA,95,1388,1749,813,494,296,407,NA,NA,NA,109,1031,811,335,273,247,391,NA "DZ",2008,NA,NA,99,1505,1786,794,447,198,463,NA,NA,NA,150,1263,827,346,256,226,286,NA "EC",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",1998,NA,NA,169,402,286,58,NA,NA,NA,NA,NA,NA,44,290,175,99,NA,NA,NA,NA "EC",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",2001,NA,NA,39,673,832,269,202,251,116,NA,NA,NA,37,591,584,267,180,208,190,NA "EC",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EC",2003,NA,NA,18,310,266,194,125,75,96,NA,NA,NA,24,217,140,94,56,44,40,NA "EC",2004,NA,NA,84,732,537,563,265,315,153,NA,NA,NA,108,522,342,268,170,161,120,NA "EC",2005,NA,NA,48,446,468,308,237,150,159,NA,NA,NA,48,329,305,199,139,85,127,NA "EC",2006,NA,NA,32,479,496,340,259,181,183,NA,NA,NA,46,321,315,183,143,92,112,NA "EC",2007,NA,NA,42,555,486,367,282,178,227,NA,NA,NA,57,365,335,198,133,100,123,NA "EC",2008,NA,NA,32,507,518,372,278,187,202,NA,NA,NA,56,334,331,185,126,107,145,NA "EE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EE",1996,NA,NA,1,7,34,53,39,28,19,NA,NA,NA,0,10,14,16,5,2,12,NA "EE",1997,NA,NA,0,4,23,59,53,29,17,NA,NA,NA,0,8,16,17,14,6,12,NA "EE",1998,NA,NA,0,15,49,60,64,34,22,NA,NA,NA,0,7,7,15,9,7,10,NA "EE",1999,NA,NA,0,14,35,72,55,19,17,NA,NA,NA,0,8,9,20,16,2,7,NA "EE",2000,NA,NA,0,6,31,53,56,35,15,NA,NA,NA,0,9,11,14,11,4,10,NA "EE",2001,NA,NA,0,10,25,43,37,24,14,NA,NA,NA,0,6,11,17,11,6,8,NA "EE",2002,NA,NA,0,9,20,47,45,19,7,NA,NA,NA,0,7,11,16,9,5,8,NA "EE",2003,NA,NA,0,7,28,38,35,24,18,NA,NA,NA,0,7,4,11,12,2,15,NA "EE",2004,NA,NA,0,6,24,42,54,14,11,NA,NA,NA,0,4,12,10,13,6,7,NA "EE",2005,0,0,0,9,25,19,40,12,7,0,0,0,0,6,11,8,11,6,8,0 "EE",2006,0,0,0,4,19,24,40,12,7,0,0,0,0,3,9,10,9,4,6,0 "EE",2007,0,0,0,6,26,32,37,21,12,0,0,0,0,2,5,5,8,7,6,0 "EE",2008,0,0,0,3,14,26,34,12,13,0,0,0,0,2,7,6,8,4,15,0 "EG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "EG",1995,NA,NA,223,542,665,460,408,463,160,NA,NA,NA,134,288,367,274,256,160,75,NA "EG",1996,NA,NA,58,714,1056,703,485,308,154,NA,NA,NA,64,52,420,259,229,89,44,NA "EG",1997,NA,NA,50,737,1033,767,465,291,142,NA,NA,NA,64,525,388,264,200,114,34,NA "EG",1998,NA,NA,45,761,943,761,475,286,174,NA,NA,NA,60,489,405,291,204,139,44,NA "EG",1999,NA,NA,31,708,889,691,458,288,170,NA,NA,NA,57,485,347,248,193,112,36,NA "EG",2000,NA,NA,21,641,827,667,476,307,158,NA,NA,NA,55,457,343,257,211,112,48,NA "EG",2001,NA,NA,34,586,879,614,453,268,159,NA,NA,NA,57,438,396,265,207,109,49,NA "EG",2002,NA,NA,39,662,774,682,576,303,171,NA,NA,NA,77,424,365,245,254,145,60,NA "EG",2003,NA,NA,42,586,814,675,631,404,195,NA,NA,NA,57,463,338,268,282,175,71,NA "EG",2004,NA,NA,14,563,763,588,502,502,204,NA,NA,NA,44,491,317,233,233,111,54,NA "EG",2005,NA,NA,25,524,606,421,414,243,123,NA,NA,NA,48,431,298,205,218,132,42,NA "EG",2006,19,35,54,542,728,563,587,340,136,NA,8,55,64,470,367,338,279,155,87,NA "EG",2007,NA,NA,35,588,853,629,643,359,214,NA,NA,NA,25,500,325,245,225,173,72,NA "EG",2008,0,13,13,581,640,807,791,431,242,8,0,7,7,382,412,308,195,169,113,3 "ER",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ER",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ER",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ER",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ER",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ER",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ER",1997,NA,NA,0,2,12,21,12,6,4,NA,NA,NA,0,3,10,20,10,4,5,NA "ER",1998,NA,NA,4,36,30,19,15,8,10,NA,NA,NA,3,43,29,11,9,6,5,NA "ER",1999,NA,NA,3,55,75,49,51,30,17,NA,NA,NA,3,65,94,34,30,17,7,NA "ER",2000,NA,NA,9,70,75,57,32,25,20,NA,NA,NA,10,100,87,71,21,12,8,NA "ER",2001,NA,NA,5,79,95,77,40,42,21,NA,NA,NA,9,96,76,66,50,31,15,NA "ER",2002,NA,NA,16,85,88,53,41,24,23,NA,NA,NA,15,75,85,52,39,30,20,NA "ER",2003,NA,NA,17,90,85,55,46,44,36,NA,NA,NA,27,120,149,100,60,36,22,NA "ER",2004,NA,NA,14,67,61,45,45,39,29,NA,NA,NA,13,95,118,67,43,23,20,NA "ER",2005,NA,NA,9,68,73,50,45,51,39,NA,NA,NA,8,67,127,72,39,21,18,NA "ER",2006,NA,NA,6,50,55,44,52,42,36,NA,NA,NA,17,109,123,64,45,19,18,NA "ER",2007,4,17,21,56,85,73,62,53,44,NA,0,2,2,70,89,56,47,21,15,NA "ER",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1995,NA,NA,22,132,337,242,150,112,228,NA,NA,NA,23,90,129,64,39,34,98,NA "ES",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",1998,NA,NA,25,186,361,294,195,114,205,NA,NA,NA,3,149,167,61,27,25,104,NA "ES",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ES",2001,NA,NA,13,160,355,351,215,134,232,NA,NA,NA,15,140,237,116,37,21,83,NA "ES",2002,NA,NA,22,189,392,405,300,192,337,NA,NA,NA,17,194,265,131,56,29,117,NA "ES",2003,NA,NA,7,153,334,305,219,132,222,NA,NA,NA,6,138,218,113,51,29,87,NA "ES",2004,NA,NA,14,140,301,312,229,142,227,NA,NA,NA,9,158,202,125,48,22,82,NA "ES",2005,6,7,13,166,394,367,230,140,230,2,6,4,10,142,252,151,63,24,108,2 "ES",2006,12,6,18,142,332,311,232,105,175,2,8,9,17,122,264,137,48,19,77,1 "ES",2007,4,6,10,184,375,379,257,128,191,2,8,4,12,164,291,136,63,23,93,2 "ES",2008,9,9,18,179,355,349,268,157,200,0,3,10,13,168,294,173,68,16,73,1 "ET",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ET",1995,NA,NA,247,1221,1017,541,276,142,51,NA,NA,NA,283,908,781,382,152,64,15,NA "ET",1996,NA,NA,302,1739,1609,854,427,201,71,NA,NA,NA,369,1564,1147,576,246,88,32,NA "ET",1997,NA,NA,579,2810,2520,1365,736,401,193,NA,NA,NA,687,2469,2173,1039,481,192,108,NA "ET",1998,NA,NA,715,2643,3187,1610,839,429,171,NA,NA,NA,832,3016,2434,1220,519,194,55,NA "ET",1999,NA,NA,692,3916,3673,1925,1045,471,230,NA,NA,NA,798,3310,2949,1539,713,225,69,NA "ET",2000,NA,NA,915,5095,5187,3082,1495,610,397,NA,NA,NA,1037,4699,4424,2105,976,366,122,NA "ET",2001,NA,NA,913,5730,5594,3233,1581,742,354,NA,NA,NA,1107,5109,4830,2372,1014,338,111,NA "ET",2002,NA,NA,1251,6764,5669,3128,1544,821,372,NA,NA,NA,1614,5607,5692,2685,935,323,136,NA "ET",2003,NA,NA,1110,6923,6648,3737,2022,976,483,NA,NA,NA,1387,5936,5908,2780,1239,412,137,NA "ET",2004,NA,NA,1160,7167,7002,4060,1988,911,456,NA,NA,NA,1367,6422,6091,2984,1284,414,124,NA "ET",2005,NA,NA,1109,6726,6181,3454,1985,1027,475,NA,NA,NA,1326,5885,5663,2730,1296,513,155,NA "ET",2006,NA,NA,978,6137,5950,3567,2016,1066,521,NA,NA,NA,1178,5238,5326,2704,1324,510,159,NA "ET",2007,NA,NA,1055,6522,6114,3545,2038,1051,559,NA,NA,NA,1229,5426,5507,2850,1429,502,213,NA "ET",2008,NA,NA,978,6512,6794,4067,2290,1176,685,NA,NA,NA,1167,5490,5893,3251,1553,616,322,NA "FI",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FI",1995,NA,NA,1,1,10,25,28,24,61,NA,NA,NA,1,1,6,7,4,10,65,NA "FI",1996,NA,NA,0,2,5,24,26,23,77,NA,NA,NA,0,4,5,3,7,6,58,NA "FI",1997,NA,NA,0,1,5,22,24,26,53,NA,NA,NA,0,2,6,2,5,5,35,NA "FI",1998,NA,NA,0,4,4,9,15,21,63,NA,NA,NA,0,3,4,9,4,12,40,NA "FI",1999,NA,NA,0,NA,4,13,26,20,53,NA,NA,NA,0,2,6,NA,11,5,39,NA "FI",2000,NA,NA,0,3,8,22,19,28,53,NA,NA,NA,0,1,5,3,4,6,49,NA "FI",2001,NA,NA,0,1,9,13,17,13,43,NA,NA,NA,0,3,4,5,8,10,22,NA "FI",2002,NA,NA,0,0,5,8,17,20,36,NA,NA,NA,0,4,3,0,3,6,26,NA "FI",2003,NA,NA,0,2,3,8,19,17,29,NA,NA,NA,0,2,10,3,6,5,31,NA "FI",2004,NA,NA,0,1,5,7,17,13,33,NA,NA,NA,0,1,0,3,4,3,15,NA "FI",2005,0,1,1,5,4,3,14,11,25,0,0,0,0,3,4,1,0,6,20,0 "FI",2006,0,0,0,5,6,5,9,6,20,0,0,0,0,2,4,3,4,1,19,0 "FI",2007,0,0,0,4,5,5,10,7,24,0,0,0,0,6,4,2,5,0,13,0 "FI",2008,0,0,0,3,5,8,13,11,35,0,0,0,0,4,3,2,4,2,14,0 "FJ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FJ",1994,NA,NA,2,8,6,2,3,11,3,NA,NA,NA,0,4,6,3,6,7,1,NA "FJ",1995,NA,NA,0,8,10,9,4,2,3,NA,NA,NA,1,10,9,2,3,4,3,NA "FJ",1996,NA,NA,1,8,8,9,9,3,2,NA,NA,NA,3,6,8,2,6,2,2,NA "FJ",1997,NA,NA,1,4,8,6,8,6,2,NA,NA,NA,0,6,9,1,5,7,3,NA "FJ",1998,NA,NA,0,7,11,7,8,4,2,NA,NA,NA,2,10,10,4,4,3,2,NA "FJ",1999,NA,NA,1,13,7,5,8,3,3,NA,NA,NA,0,5,7,5,2,5,1,NA "FJ",2000,NA,NA,0,8,6,13,5,4,2,NA,NA,NA,0,7,5,7,1,4,0,NA "FJ",2001,NA,NA,0,6,8,11,7,4,2,NA,NA,NA,0,7,5,7,1,2,2,NA "FJ",2002,NA,NA,1,13,11,7,4,7,2,NA,NA,NA,2,8,4,6,3,4,2,NA "FJ",2003,NA,NA,2,8,9,6,11,7,6,NA,NA,NA,1,4,7,4,4,8,1,NA "FJ",2004,NA,NA,0,8,6,8,6,7,2,NA,NA,NA,0,8,6,3,5,2,1,NA "FJ",2005,NA,NA,7,9,18,18,14,16,6,NA,NA,NA,7,7,9,6,4,6,5,NA "FJ",2006,0,0,0,8,11,4,7,5,4,NA,0,1,1,12,5,6,4,6,0,NA "FJ",2007,NA,NA,1,7,7,7,4,1,4,NA,NA,NA,7,11,4,6,5,1,2,NA "FJ",2008,NA,NA,NA,10,10,4,9,4,4,0,NA,NA,NA,13,6,5,6,3,4,0 "FM",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",1995,NA,NA,0,1,0,3,1,0,0,NA,NA,NA,0,0,1,0,0,0,1,NA "FM",1996,NA,NA,0,1,0,0,1,2,0,NA,NA,NA,0,1,1,1,3,0,0,NA "FM",1997,NA,NA,0,0,0,0,2,0,0,NA,NA,NA,1,1,1,2,2,0,2,NA "FM",1998,NA,NA,4,5,3,4,1,0,5,NA,NA,NA,2,1,0,1,2,0,0,NA "FM",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",2000,NA,NA,0,2,0,1,0,0,1,NA,NA,NA,4,3,1,1,0,1,1,NA "FM",2001,NA,NA,0,2,0,0,2,1,0,NA,NA,NA,1,0,1,0,0,1,0,NA "FM",2002,NA,NA,2,0,1,1,1,1,0,NA,NA,NA,3,5,1,1,2,0,2,NA "FM",2003,NA,NA,0,3,2,2,0,2,1,NA,NA,NA,4,4,4,1,1,2,1,NA "FM",2004,NA,NA,0,4,0,2,0,2,1,NA,NA,NA,3,4,4,1,1,3,1,NA "FM",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FM",2006,7,7,14,21,3,6,8,6,1,NA,0,5,5,23,5,7,4,6,4,NA "FM",2007,NA,NA,1,8,5,4,0,1,0,NA,NA,NA,5,11,6,2,2,2,0,NA "FM",2008,1,0,1,9,3,1,3,1,0,0,1,0,1,10,2,5,1,0,0,0 "FR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1995,NA,NA,30,156,431,502,414,297,496,NA,NA,NA,36,138,226,176,90,92,365,NA "FR",1996,NA,NA,36,124,335,413,351,248,475,NA,NA,NA,22,124,195,131,79,82,376,NA "FR",1997,NA,NA,24,113,288,362,271,194,343,NA,NA,NA,17,117,166,115,80,57,274,NA "FR",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "FR",1999,NA,NA,13,147,267,310,276,157,318,NA,NA,NA,25,110,145,120,80,60,284,NA "FR",2000,NA,NA,10,136,248,247,211,125,244,NA,NA,NA,18,108,127,89,46,43,155,NA "FR",2001,NA,NA,10,124,230,260,205,119,211,NA,NA,NA,17,131,132,102,63,40,183,NA "FR",2002,NA,NA,24,138,265,223,219,119,180,NA,NA,NA,13,106,127,90,56,33,161,NA "FR",2003,NA,NA,18,129,249,223,190,127,210,NA,NA,NA,16,114,129,79,44,32,159,NA "FR",2004,NA,NA,13,109,222,220,200,138,216,NA,NA,NA,11,96,116,82,53,34,171,NA "FR",2005,7,5,12,127,212,222,196,134,205,0,5,11,16,104,134,82,56,38,180,0 "FR",2006,8,9,17,137,214,238,209,153,278,0,4,11,15,112,158,91,67,44,170,0 "FR",2007,8,9,17,120,225,196,219,156,273,0,6,14,20,127,167,91,56,61,188,0 "FR",2008,5,5,10,73,136,161,134,110,175,1,1,7,8,57,96,71,50,38,101,0 "GA",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1995,NA,NA,3,45,74,80,54,30,15,NA,NA,NA,9,47,54,28,25,19,3,NA "GA",1996,NA,NA,0,28,44,40,22,11,2,NA,NA,NA,5,33,26,19,11,4,4,NA "GA",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",1998,NA,NA,14,93,159,129,76,43,32,NA,NA,NA,15,97,85,67,32,30,14,NA "GA",1999,NA,NA,14,98,158,129,76,43,32,NA,NA,NA,15,97,110,67,32,30,14,NA "GA",2001,NA,NA,21,137,205,147,73,60,46,NA,NA,NA,27,127,139,73,34,21,21,NA "GA",2002,NA,NA,10,137,173,148,63,27,40,NA,NA,NA,18,125,140,71,32,21,28,NA "GA",2003,NA,NA,14,165,225,149,103,48,22,NA,NA,NA,16,138,144,107,51,33,18,NA "GA",2004,NA,NA,17,197,289,143,83,47,50,NA,NA,NA,15,173,141,80,37,24,27,NA "GA",2005,NA,NA,13,123,199,140,70,38,25,NA,NA,NA,19,128,123,88,29,29,18,NA "GA",2006,NA,NA,20,157,207,148,89,40,23,NA,NA,NA,19,160,123,79,39,20,21,NA "GA",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GA",2008,1,13,14,209,297,196,104,45,39,NA,1,28,29,209,158,93,59,32,17,NA "GB",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GB",1997,NA,NA,2,68,87,90,84,60,107,NA,NA,NA,8,67,64,43,34,24,51,NA "GB",1998,NA,NA,11,103,164,141,108,105,225,NA,NA,NA,9,105,103,71,44,39,114,NA "GB",1999,NA,NA,8,68,93,68,53,51,126,NA,NA,NA,6,55,80,60,29,30,69,NA "GB",2000,NA,NA,8,86,130,96,87,75,138,NA,NA,NA,9,95,114,60,31,31,67,NA "GB",2001,NA,NA,10,99,135,105,96,81,117,NA,NA,NA,15,74,104,57,54,38,105,NA "GB",2002,NA,NA,6,94,142,132,98,90,153,NA,NA,NA,6,82,131,66,44,33,93,NA "GB",2003,NA,NA,13,101,182,128,81,59,92,NA,NA,NA,14,108,148,88,47,17,55,NA "GB",2004,NA,NA,10,118,203,148,103,85,94,NA,NA,NA,13,126,176,85,47,33,65,NA "GB",2005,4,5,9,135,200,166,95,95,124,0,0,14,14,115,163,80,39,28,83,1 "GB",2006,3,6,9,173,244,213,148,88,191,0,2,20,22,168,192,112,60,42,97,0 "GB",2007,2,11,13,183,286,223,169,97,202,1,1,19,20,145,222,91,58,45,138,2 "GB",2008,1,4,5,125,188,155,111,61,99,0,4,12,16,134,181,90,27,28,62,0 "GD",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1997,NA,NA,0,0,1,0,0,0,0,NA,NA,NA,0,1,0,0,0,0,0,NA "GD",1998,NA,NA,0,1,0,1,0,0,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",1999,NA,NA,0,0,1,0,0,1,0,NA,NA,NA,0,0,0,0,0,0,1,NA "GD",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",2003,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA "GD",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA "GD",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GD",2006,0,0,0,0,1,0,0,0,0,NA,0,0,0,0,0,0,0,0,0,NA "GD",2007,NA,NA,NA,NA,NA,NA,NA,1,1,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA "GD",2008,NA,NA,NA,1,NA,1,2,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GE",1995,NA,NA,2,20,30,25,40,18,12,NA,NA,NA,2,8,17,17,18,7,5,NA "GE",1996,NA,NA,4,27,82,93,76,38,16,NA,NA,NA,1,13,28,36,23,27,11,NA "GE",1997,NA,NA,0,75,97,91,67,58,16,NA,NA,NA,0,38,46,40,36,19,12,NA "GE",1998,NA,NA,4,64,91,99,58,52,19,NA,NA,NA,4,41,52,25,14,17,7,NA "GE",1999,NA,NA,5,135,176,151,77,55,23,NA,NA,NA,3,27,40,26,10,10,8,NA "GE",2000,NA,NA,4,76,111,113,63,45,28,NA,NA,NA,1,49,37,33,17,10,5,NA "GE",2001,NA,NA,4,142,233,199,117,46,46,NA,NA,NA,2,63,63,37,22,18,22,NA "GE",2002,NA,NA,1,155,197,181,119,54,42,NA,NA,NA,5,54,68,39,31,20,18,NA "GE",2003,NA,NA,1,112,220,185,111,65,53,NA,NA,NA,1,65,59,56,19,23,17,NA "GE",2004,NA,NA,3,157,292,226,177,80,66,NA,NA,NA,3,87,81,52,32,26,29,NA "GE",2005,0,0,0,226,272,268,207,76,60,NA,0,4,4,109,105,58,46,17,47,NA "GE",2006,0,3,3,315,392,300,241,86,72,0,0,5,5,115,110,71,60,26,34,1 "GE",2007,6,1,7,277,388,308,230,96,75,1,2,4,6,153,140,67,54,17,46,0 "GE",2008,39,122,161,710,974,906,739,427,347,0,25,83,108,396,374,252,171,109,160,0 "GH",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GH",1995,NA,NA,42,223,397,398,302,190,112,NA,NA,NA,40,199,272,205,122,88,48,NA "GH",1996,NA,NA,30,216,345,368,255,165,157,NA,NA,NA,32,177,260,200,110,64,43,NA "GH",1997,NA,NA,77,406,941,781,623,367,294,NA,NA,NA,90,363,651,418,276,188,121,NA "GH",1998,NA,NA,83,553,1009,913,775,509,487,NA,NA,NA,85,483,758,493,366,248,243,NA "GH",1999,NA,NA,64,586,1132,1008,767,389,389,NA,NA,NA,80,491,753,492,302,192,180,NA "GH",2000,NA,NA,73,550,1266,1115,811,495,426,NA,NA,NA,74,456,791,566,338,179,176,NA "GH",2001,NA,NA,84,587,1223,1144,857,471,460,NA,NA,NA,128,515,814,623,370,209,227,NA "GH",2002,NA,NA,80,535,1245,1282,883,507,429,NA,NA,NA,98,489,806,592,325,223,238,NA "GH",2003,NA,NA,79,579,1265,1234,924,509,441,NA,NA,NA,83,487,744,586,380,200,203,NA "GH",2004,NA,NA,54,532,1246,1250,854,472,413,NA,NA,NA,69,454,701,518,303,202,191,NA "GH",2005,NA,NA,49,592,1201,1311,944,462,414,NA,NA,NA,68,450,693,527,366,207,221,NA "GH",2006,NA,NA,33,557,1273,1388,956,529,443,NA,NA,NA,70,494,711,515,381,207,229,NA "GH",2007,NA,NA,66,596,1164,1239,861,477,506,NA,NA,NA,75,453,667,564,371,183,207,NA "GH",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GM",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GM",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GM",1995,NA,NA,3,68,181,88,72,29,24,NA,NA,NA,4,39,61,44,25,12,8,NA "GM",1996,NA,NA,29,42,148,100,66,46,48,NA,NA,NA,20,50,64,46,46,30,22,NA "GM",1997,NA,NA,2,83,219,126,61,63,37,NA,NA,NA,5,55,76,45,20,20,8,NA "GM",1998,NA,NA,6,99,193,158,79,61,35,NA,NA,NA,7,60,95,53,25,18,14,NA "GM",1999,NA,NA,6,99,180,124,86,65,39,NA,NA,NA,10,58,82,54,30,16,13,NA "GM",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GM",2002,NA,NA,2,135,240,160,100,60,37,NA,NA,NA,5,71,112,42,40,21,10,NA "GM",2003,NA,NA,3,162,236,149,83,52,31,NA,NA,NA,8,81,85,62,39,27,17,NA "GM",2004,NA,NA,5,145,260,151,103,46,23,NA,NA,NA,7,55,81,59,38,21,18,NA "GM",2005,NA,NA,13,133,292,206,62,53,44,NA,NA,NA,2,84,87,64,38,22,27,NA "GM",2006,13,0,13,126,284,170,112,58,56,NA,5,0,5,88,126,71,49,25,26,NA "GM",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GM",2008,1,6,7,151,307,167,125,87,34,0,1,8,9,94,133,90,43,45,8,0 "GN",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GN",1995,NA,NA,18,244,538,357,189,98,61,NA,NA,NA,28,202,255,153,64,37,19,NA "GN",1996,NA,NA,29,319,631,416,214,133,104,NA,NA,NA,30,223,338,213,111,52,31,NA "GN",1997,NA,NA,25,326,653,483,220,147,100,NA,NA,NA,38,246,383,189,91,48,32,NA "GN",1998,NA,NA,22,409,763,494,271,168,117,NA,NA,NA,37,303,365,202,115,71,25,NA "GN",1999,NA,NA,30,434,736,519,294,173,104,NA,NA,NA,44,345,395,259,110,78,41,NA "GN",2000,NA,NA,39,551,860,570,282,203,103,NA,NA,NA,66,314,446,245,114,82,45,NA "GN",2001,NA,NA,24,506,876,612,325,185,154,NA,NA,NA,59,419,433,249,127,77,46,NA "GN",2002,NA,NA,24,413,958,634,336,139,149,NA,NA,NA,42,399,439,259,109,77,50,NA "GN",2003,NA,NA,34,617,1052,671,368,172,134,NA,NA,NA,53,353,451,307,137,106,40,NA "GN",2004,NA,NA,38,728,1091,726,389,199,135,NA,NA,NA,62,470,521,334,159,100,63,NA "GN",2005,NA,NA,51,749,1165,778,463,195,130,NA,NA,NA,65,594,583,354,203,94,55,NA "GN",2006,NA,NA,31,834,1168,916,512,274,162,NA,NA,NA,85,586,581,396,187,118,53,NA "GN",2007,NA,NA,46,901,1315,936,503,240,204,NA,NA,NA,76,631,613,367,207,106,79,NA "GN",2008,NA,NA,56,970,1419,985,561,264,198,NA,NA,NA,64,610,686,377,190,93,88,NA "GQ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1995,NA,NA,8,15,45,37,15,11,7,NA,NA,NA,2,18,28,20,4,7,1,NA "GQ",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",1997,NA,NA,5,32,40,36,25,8,4,NA,NA,NA,3,23,20,14,10,3,3,NA "GQ",1998,NA,NA,6,30,46,39,29,16,11,NA,NA,NA,3,37,31,20,7,5,4,NA "GQ",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",2004,NA,NA,5,50,63,54,41,17,15,NA,NA,NA,9,45,48,30,15,10,4,NA "GQ",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GQ",2008,NA,NA,8,68,95,85,44,17,11,0,NA,NA,10,57,66,35,23,13,9,0 "GR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GR",1997,NA,NA,0,16,30,34,42,39,47,NA,NA,NA,0,14,19,8,3,9,26,NA "GR",1998,NA,NA,15,15,31,31,22,31,47,NA,NA,NA,11,20,12,13,8,5,26,NA "GR",1999,NA,NA,3,11,11,17,18,18,27,NA,NA,NA,1,5,8,8,2,3,10,NA "GR",2000,NA,NA,1,10,22,32,24,19,46,NA,NA,NA,0,2,9,10,5,6,25,NA "GR",2001,NA,NA,0,10,23,29,20,17,37,NA,NA,NA,0,7,11,7,4,7,27,NA "GR",2002,NA,NA,0,1,13,27,33,30,10,NA,NA,NA,0,0,3,17,11,5,2,NA "GR",2003,NA,NA,2,20,28,25,23,25,36,NA,NA,NA,0,7,9,7,2,5,18,NA "GR",2004,NA,NA,1,9,14,22,18,13,34,NA,NA,NA,0,3,7,10,3,3,14,NA "GR",2005,0,1,1,14,25,22,14,12,23,5,0,0,0,13,18,8,7,2,17,0 "GR",2006,0,0,0,11,32,22,24,22,27,3,0,0,0,13,12,8,5,6,24,1 "GR",2007,0,1,1,21,22,34,28,15,54,1,0,0,0,13,19,11,8,3,24,2 "GR",2008,0,0,0,5,14,12,4,6,9,0,0,0,0,5,9,2,4,3,7,0 "GT",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",1995,NA,NA,51,235,280,236,165,142,139,NA,NA,NA,51,224,255,221,146,129,94,NA "GT",1996,NA,NA,75,230,230,218,152,132,142,NA,NA,NA,48,214,250,212,172,134,99,NA "GT",1997,NA,NA,45,246,217,229,161,155,150,NA,NA,NA,33,228,225,183,120,109,117,NA "GT",1998,NA,NA,60,206,248,234,163,148,152,NA,NA,NA,45,203,216,199,160,118,103,NA "GT",1999,NA,NA,34,216,248,235,171,141,158,NA,NA,NA,24,229,230,194,174,121,89,NA "GT",2000,NA,NA,36,220,236,216,177,112,140,NA,NA,NA,41,199,167,175,135,87,111,NA "GT",2001,NA,NA,27,171,201,169,137,98,97,NA,NA,NA,33,180,173,118,101,74,90,NA "GT",2002,NA,NA,27,217,219,171,158,117,146,NA,NA,NA,42,192,171,147,116,68,74,NA "GT",2003,NA,NA,29,175,200,169,156,125,128,NA,NA,NA,24,186,179,157,104,88,75,NA "GT",2004,NA,NA,43,282,291,209,210,144,129,NA,NA,NA,31,278,201,227,144,72,78,NA "GT",2005,NA,NA,39,251,258,185,187,127,115,NA,NA,NA,38,339,245,277,176,88,95,NA "GT",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GT",2007,18,115,74,169,207,226,203,159,155,NA,28,55,183,163,246,145,153,143,122,NA "GT",2008,0,19,19,220,257,193,163,124,167,0,0,22,22,199,189,162,154,100,101,0 "GU",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GU",2000,NA,NA,2,1,6,6,9,6,9,NA,NA,NA,0,3,1,2,5,2,2,NA "GU",2001,NA,NA,0,1,4,10,9,3,6,NA,NA,NA,0,2,3,3,4,2,1,NA "GU",2002,NA,NA,3,3,5,5,6,12,4,NA,NA,NA,5,1,6,3,3,2,7,NA "GU",2003,NA,NA,0,2,1,3,4,7,5,NA,NA,NA,1,3,1,4,2,1,5,NA "GU",2004,NA,NA,0,0,1,2,6,2,3,NA,NA,NA,0,0,1,2,3,1,1,NA "GU",2005,NA,NA,0,2,4,4,2,2,4,NA,NA,NA,0,3,1,1,2,0,2,NA "GU",2006,NA,NA,0,1,1,2,3,2,6,NA,NA,NA,0,0,0,1,1,2,2,NA "GU",2007,0,0,0,0,0,2,0,0,0,NA,0,0,0,0,0,0,1,1,1,NA "GU",2008,0,0,0,0,1,7,8,3,4,0,0,0,0,1,2,0,1,1,3,0 "GW",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1996,NA,NA,9,110,159,113,99,60,36,NA,NA,NA,7,49,80,94,62,31,13,NA "GW",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",2000,NA,NA,2,52,92,80,64,39,19,NA,NA,NA,4,30,46,47,24,15,12,NA "GW",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",2002,NA,NA,7,101,146,128,70,52,34,NA,NA,NA,9,80,108,66,49,37,12,NA "GW",2003,NA,NA,9,101,153,118,108,63,27,NA,NA,NA,7,97,82,78,58,38,24,NA "GW",2004,NA,NA,16,86,175,147,130,84,51,NA,NA,NA,11,87,115,103,98,54,29,NA "GW",2005,NA,NA,14,116,167,153,130,72,42,NA,NA,NA,13,78,110,92,82,44,19,NA "GW",2006,NA,NA,8,86,178,143,90,74,24,NA,NA,NA,7,82,116,90,81,36,15,NA "GW",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GW",2008,NA,NA,8,119,194,191,109,79,30,NA,NA,NA,12,85,129,123,92,41,11,NA "GY",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",1995,NA,NA,7,8,5,6,9,6,7,NA,NA,NA,3,5,7,6,5,2,4,NA "GY",1996,NA,NA,4,8,14,4,5,4,7,NA,NA,NA,4,8,4,4,5,0,0,NA "GY",1997,NA,NA,1,15,19,12,2,5,8,NA,NA,NA,3,9,8,8,4,7,4,NA "GY",1998,NA,NA,9,29,56,42,36,2,12,NA,NA,NA,13,32,38,26,4,8,11,NA "GY",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "GY",2000,NA,NA,4,20,19,14,7,6,9,NA,NA,NA,1,11,8,7,5,5,3,NA "GY",2001,NA,NA,1,15,47,44,12,2,1,NA,NA,NA,0,6,16,16,9,3,2,NA "GY",2002,NA,NA,20,49,90,94,51,19,23,NA,NA,NA,26,32,36,34,19,15,18,NA "GY",2003,NA,NA,10,56,111,114,58,27,13,NA,NA,NA,12,35,61,56,27,10,5,NA "GY",2004,NA,NA,9,45,113,97,87,NA,4,NA,NA,NA,15,35,38,29,23,NA,15,NA "GY",2005,NA,NA,12,48,130,116,81,41,20,NA,NA,NA,14,41,62,41,30,11,9,NA "GY",2006,NA,NA,6,37,61,59,40,15,5,NA,NA,NA,1,15,21,20,14,3,1,NA "GY",2007,NA,NA,2,15,43,44,41,12,8,NA,NA,NA,1,20,19,17,5,3,3,NA "GY",2008,2,5,7,29,51,71,47,22,11,0,0,4,4,20,23,13,12,3,7,0 "HK",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1997,NA,NA,5,90,122,174,198,271,593,NA,NA,NA,12,85,114,83,49,64,176,NA "HK",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HK",1999,NA,NA,3,88,121,162,173,233,432,NA,NA,NA,8,85,109,72,50,43,188,NA "HK",2000,NA,NA,4,78,102,160,211,236,578,NA,NA,NA,5,65,115,86,44,45,211,NA "HK",2001,NA,NA,6,79,99,162,196,201,519,NA,NA,NA,13,88,119,83,58,34,200,NA "HK",2002,NA,NA,2,99,105,163,207,218,543,NA,NA,NA,8,97,115,90,57,35,153,NA "HK",2003,NA,NA,8,104,91,140,195,180,472,NA,NA,NA,10,88,136,102,65,43,160,NA "HK",2004,NA,NA,3,59,94,128,226,175,477,NA,NA,NA,6,97,112,87,56,34,140,NA "HK",2005,0,3,3,76,84,108,200,168,453,NA,0,3,3,67,81,92,57,34,135,NA "HK",2006,0,3,3,75,84,135,174,161,439,NA,1,8,9,59,97,73,54,42,132,NA "HK",2007,1,4,5,63,80,110,177,175,425,NA,0,1,1,59,94,74,64,37,137,NA "HK",2008,0,0,0,59,79,95,166,208,414,0,0,8,8,65,84,65,45,40,131,0 "HN",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HN",1995,NA,NA,42,280,540,204,130,236,58,NA,NA,NA,54,208,292,134,76,136,48,NA "HN",1996,NA,NA,51,247,389,142,108,190,21,NA,NA,NA,43,167,245,106,69,149,17,NA "HN",1997,NA,NA,26,214,321,111,78,140,28,NA,NA,NA,38,166,174,80,61,116,26,NA "HN",1998,NA,NA,147,277,256,211,205,181,50,NA,NA,NA,103,206,192,158,152,135,38,NA "HN",1999,NA,NA,150,288,268,219,220,190,52,NA,NA,NA,100,214,201,164,160,140,40,NA "HN",2000,NA,NA,30,123,371,246,277,214,43,NA,NA,NA,25,21,269,258,270,160,38,NA "HN",2001,NA,NA,12,47,509,344,337,257,27,NA,NA,NA,13,25,347,352,339,196,34,NA "HN",2002,NA,NA,76,29,519,353,338,257,24,NA,NA,NA,65,23,351,339,354,193,35,NA "HN",2003,NA,NA,52,20,344,235,227,161,17,NA,NA,NA,42,15,232,225,236,127,23,NA "HN",2004,NA,NA,54,20,379,259,247,189,18,NA,NA,NA,40,13,218,211,220,121,23,NA "HN",2005,NA,NA,13,238,280,215,152,134,152,NA,NA,NA,27,219,222,125,107,81,104,NA "HN",2006,5,16,21,213,297,213,139,96,147,NA,4,24,28,206,234,123,85,87,129,NA "HN",2007,5,16,21,204,293,194,158,123,180,NA,0,29,29,185,175,110,106,84,112,NA "HN",2008,1,10,11,254,263,202,140,103,174,0,0,26,26,152,170,114,90,82,116,0 "HR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1995,NA,NA,6,38,97,210,132,178,141,NA,NA,NA,10,50,57,57,38,60,130,NA "HR",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",1997,NA,NA,12,65,88,180,124,118,117,NA,NA,NA,13,43,43,54,28,52,136,NA "HR",1998,NA,NA,14,48,81,177,176,106,129,NA,NA,NA,19,44,64,54,38,48,131,NA "HR",1999,NA,NA,1,29,45,83,93,46,45,NA,NA,NA,2,14,18,15,15,16,53,NA "HR",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",2001,NA,NA,0,32,64,186,126,88,64,NA,NA,NA,2,32,36,54,34,28,92,NA "HR",2002,NA,NA,1,18,40,75,77,32,43,NA,NA,NA,0,18,18,20,19,16,54,NA "HR",2003,NA,NA,0,15,27,68,80,42,60,NA,NA,NA,1,14,19,18,10,15,69,NA "HR",2004,NA,NA,1,18,32,68,81,39,53,NA,NA,NA,3,18,17,11,12,7,56,NA "HR",2005,0,1,1,24,27,48,72,47,34,0,0,1,1,12,18,15,11,6,56,0 "HR",2006,0,0,0,20,23,58,69,30,48,0,1,1,2,16,26,16,22,7,59,0 "HR",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HR",2008,0,0,0,15,25,34,63,32,47,0,0,1,1,11,16,13,15,12,44,0 "HT",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HT",1996,NA,NA,148,358,438,289,160,87,97,NA,NA,NA,224,417,492,303,176,62,100,NA "HT",1997,NA,NA,156,683,817,453,260,150,162,NA,NA,NA,149,760,878,515,250,119,97,NA "HT",1998,NA,NA,188,804,971,656,331,177,142,NA,NA,NA,208,827,958,620,300,141,119,NA "HT",1999,NA,NA,286,812,1059,672,348,186,145,NA,NA,NA,285,919,918,614,312,162,110,NA "HT",2000,NA,NA,67,836,898,613,350,147,118,NA,NA,NA,96,914,857,513,275,132,71,NA "HT",2001,NA,NA,72,752,785,587,319,169,112,NA,NA,NA,113,882,843,498,273,109,93,NA "HT",2002,NA,NA,79,903,904,572,377,184,148,NA,NA,NA,118,980,851,550,303,120,99,NA "HT",2003,NA,NA,89,1002,981,625,406,208,147,NA,NA,NA,122,1114,1064,606,378,165,108,NA "HT",2004,NA,NA,94,918,964,606,376,207,176,NA,NA,NA,137,1146,1045,688,386,176,125,NA "HT",2005,NA,NA,69,1045,1035,701,451,222,156,NA,NA,NA,116,1097,1099,633,414,170,132,NA "HT",2006,NA,NA,93,1110,1132,672,455,201,174,NA,NA,NA,137,1113,1039,638,387,184,126,NA "HT",2007,26,78,104,1166,1199,760,471,219,192,NA,27,120,147,1261,1107,632,344,182,131,NA "HT",2008,8,82,90,1137,1337,696,491,242,175,0,19,135,154,1272,1204,677,378,179,139,0 "HU",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "HU",1996,NA,NA,3,28,139,151,209,140,105,NA,NA,NA,2,30,42,51,63,27,76,NA "HU",1997,NA,NA,0,8,51,163,164,90,69,NA,NA,NA,1,9,25,35,24,19,44,NA "HU",1998,NA,NA,0,14,64,149,163,62,59,NA,NA,NA,0,13,27,34,25,17,40,NA "HU",1999,NA,NA,2,16,48,155,183,74,47,NA,NA,NA,4,17,19,37,19,7,32,NA "HU",2000,NA,NA,0,8,24,85,104,58,27,NA,NA,NA,1,7,17,19,22,10,30,NA "HU",2001,NA,NA,1,11,42,97,133,73,42,NA,NA,NA,0,10,17,31,27,13,37,NA "HU",2002,NA,NA,1,10,41,102,145,61,39,NA,NA,NA,1,9,27,36,26,14,38,NA "HU",2003,NA,NA,0,6,30,89,140,70,38,NA,NA,NA,0,16,26,27,30,11,33,NA "HU",2004,NA,NA,2,7,38,99,145,64,63,NA,NA,NA,2,6,23,25,29,14,40,NA "HU",2005,0,0,0,6,24,67,117,67,39,0,0,1,1,5,13,11,22,15,33,0 "HU",2006,0,2,2,10,31,71,98,54,33,0,0,3,3,17,16,19,28,11,29,0 "HU",2007,0,0,0,7,31,48,103,50,35,0,1,2,3,12,22,18,17,6,29,0 "HU",2008,0,0,0,12,23,47,86,72,24,0,0,0,0,11,13,15,12,5,25,1 "ID",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",1995,NA,NA,6,203,297,306,302,228,109,NA,NA,NA,16,160,244,282,192,90,33,NA "ID",1996,NA,NA,28,781,1349,1443,1305,1037,510,NA,NA,NA,54,860,1175,1091,915,586,247,NA "ID",1997,NA,NA,46,1320,2139,2221,2122,1461,753,NA,NA,NA,65,1305,1671,1751,1365,788,357,NA "ID",1998,NA,NA,78,2732,3873,4054,3486,2654,1517,NA,NA,NA,108,2674,3412,3130,2335,1610,617,NA "ID",1999,NA,NA,106,3741,5277,4999,4401,3267,1697,NA,NA,NA,140,3595,12859,3624,2812,1909,745,NA "ID",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ID",2001,NA,NA,298,5400,7279,6241,5538,4076,1914,NA,NA,NA,354,5213,6040,4849,3537,2381,845,NA "ID",2002,NA,NA,569,7826,10248,8760,7668,5332,2891,NA,NA,NA,650,7366,8794,6773,4943,3118,1292,NA "ID",2003,NA,NA,532,9570,12647,10925,9558,6720,3615,NA,NA,NA,608,8734,10127,7889,6085,3907,1649,NA "ID",2004,NA,NA,697,12546,17137,14881,14772,9669,5197,NA,NA,NA,803,11509,13597,10953,9586,5422,2212,NA "ID",2005,NA,NA,846,15215,20906,18401,17847,13509,6390,NA,NA,NA,946,13916,16393,13022,10927,7539,2783,NA "ID",2006,NA,NA,899,16285,22752,20332,20059,15869,7348,NA,NA,NA,985,14377,17628,14421,12376,8786,3203,NA "ID",2007,213,636,849,14835,21297,18606,18283,14176,6762,NA,148,772,920,13371,16055,13211,11391,7965,2896,NA "ID",2008,161,710,871,15339,22325,19224,18545,14907,6831,0,120,895,1015,13987,16292,13513,11899,8485,3143,0 "IE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IE",1998,NA,NA,1,11,8,21,8,8,19,NA,NA,NA,0,5,11,1,1,4,13,NA "IE",1999,NA,NA,0,7,15,10,12,7,19,NA,NA,NA,0,9,9,3,8,3,13,NA "IE",2000,NA,NA,0,10,7,7,6,4,12,NA,NA,NA,0,13,8,13,6,7,15,NA "IE",2001,NA,NA,0,6,12,14,8,7,7,NA,NA,NA,0,4,6,3,1,1,5,NA "IE",2002,NA,NA,0,7,18,13,14,12,6,NA,NA,NA,0,4,3,5,2,0,4,NA "IE",2003,NA,NA,0,10,11,13,14,7,11,NA,NA,NA,0,4,7,6,4,1,10,NA "IE",2004,NA,NA,1,4,17,10,12,7,10,NA,NA,NA,0,10,9,2,2,3,6,NA "IE",2005,0,1,1,6,10,21,10,7,6,0,0,0,0,9,10,3,3,0,8,0 "IE",2006,0,0,0,8,18,17,11,16,13,1,0,0,0,11,20,8,4,3,3,0 "IE",2007,0,0,0,26,49,32,28,26,26,0,0,0,0,14,28,22,14,2,4,0 "IE",2008,1,1,2,9,18,6,21,10,13,0,0,0,0,12,16,8,3,3,2,0 "IL",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IL",1996,NA,NA,1,5,18,21,15,11,33,NA,NA,NA,2,8,4,2,4,5,18,NA "IL",1997,NA,NA,5,9,27,20,17,19,43,NA,NA,NA,2,16,10,18,7,3,27,NA "IL",1998,NA,NA,1,20,29,35,19,16,30,NA,NA,NA,1,9,10,7,9,9,26,NA "IL",1999,NA,NA,2,13,22,22,16,10,27,NA,NA,NA,3,4,15,7,2,5,22,NA "IL",2000,NA,NA,0,16,28,17,24,10,31,NA,NA,NA,2,11,15,7,3,7,25,NA "IL",2001,NA,NA,1,7,26,17,17,10,42,NA,NA,NA,2,6,14,8,2,2,18,NA "IL",2002,NA,NA,2,7,18,13,12,9,23,NA,NA,NA,3,19,12,15,7,8,16,NA "IL",2003,NA,NA,2,9,12,22,10,6,25,NA,NA,NA,1,13,13,13,5,1,18,NA "IL",2004,NA,NA,2,2,7,13,10,3,16,NA,NA,NA,1,4,12,8,2,0,11,NA "IL",2005,0,0,0,5,10,12,12,5,14,0,1,0,1,3,9,8,6,1,9,0 "IL",2006,0,0,0,3,12,14,4,6,10,0,0,0,0,1,5,4,4,2,7,0 "IL",2007,0,1,1,9,20,23,13,10,17,0,0,1,1,4,17,5,3,6,14,0 "IL",2008,0,0,0,14,25,18,14,15,25,0,1,1,2,4,11,14,8,4,19,0 "IN",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IN",1995,NA,NA,16,334,391,287,216,123,68,NA,NA,NA,32,179,169,80,49,30,11,NA "IN",1996,NA,NA,47,966,1143,934,666,424,213,NA,NA,NA,79,618,571,281,167,103,42,NA "IN",1997,NA,NA,50,1257,1351,1056,753,499,245,NA,NA,NA,125,861,799,369,187,102,54,NA "IN",1998,NA,NA,84,1773,2013,1851,1389,885,419,NA,NA,NA,190,1375,1121,670,349,200,102,NA "IN",1999,NA,NA,327,7058,8856,7900,6172,3864,1982,NA,NA,NA,785,5497,4848,2773,1504,898,436,NA "IN",2000,NA,NA,1588,20963,31090,30829,24230,15308,8534,NA,NA,NA,2250,14495,17287,11768,7516,4594,2697,NA "IN",2001,NA,NA,1063,22483,30007,29649,23961,14879,7779,NA,NA,NA,2125,15973,16743,10103,5633,3353,1526,NA "IN",2002,NA,NA,2551,39923,54719,55829,44532,28199,14960,NA,NA,NA,4200,28573,31946,21378,13233,7636,3814,NA "IN",2003,NA,NA,2411,47251,61758,63587,52865,33739,18018,NA,NA,NA,4745,34511,36317,23320,14055,8322,3985,NA "IN",2004,NA,NA,3018,57208,72132,74450,62173,40769,22388,NA,NA,NA,5860,41017,42808,27000,16121,9705,5016,NA "IN",2005,NA,NA,3185,62620,74678,76870,64843,43038,24726,NA,NA,NA,6292,45136,45629,28577,17042,10513,5408,NA "IN",2006,NA,NA,3566,68346,79037,82939,71621,49320,28716,NA,NA,NA,6963,47702,47420,31128,18870,11752,6417,NA "IN",2007,NA,NA,4305,73947,83850,88045,76408,53414,31922,NA,NA,NA,7575,50289,49519,32407,20316,13195,7395,NA "IN",2008,NA,NA,4648,77121,83798,90498,78815,56928,36079,NA,NA,NA,8319,51485,49887,33664,21486,14407,8357,NA "IQ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1995,NA,NA,1125,862,1409,1085,863,900,271,NA,NA,NA,725,304,1208,915,800,886,200,NA "IQ",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IQ",1997,NA,NA,416,791,708,541,832,664,208,NA,NA,NA,384,738,665,499,722,641,192,NA "IQ",1998,NA,NA,453,879,781,583,913,735,242,NA,NA,NA,426,806,740,542,806,712,232,NA "IQ",1999,NA,NA,519,1434,1246,1081,704,632,376,NA,NA,NA,509,1208,978,824,571,527,202,NA "IQ",2000,NA,NA,21,627,317,297,205,135,101,NA,NA,NA,37,338,241,136,134,103,87,NA "IQ",2001,NA,NA,10,722,737,275,260,200,142,NA,NA,NA,26,362,295,147,171,126,86,NA "IQ",2002,NA,NA,47,706,923,308,284,205,158,NA,NA,NA,45,338,288,172,176,129,116,NA "IQ",2003,NA,NA,30,659,876,355,293,168,143,NA,NA,NA,43,258,241,154,160,143,34,NA "IQ",2004,NA,NA,28,615,770,288,244,183,125,NA,NA,NA,57,334,243,139,162,113,80,NA "IQ",2005,NA,NA,13,424,644,261,245,189,148,NA,NA,NA,44,305,260,151,197,135,80,NA "IQ",2006,0,14,14,409,593,278,230,147,107,NA,0,38,38,338,264,133,154,111,70,NA "IQ",2007,0,20,20,319,531,276,223,188,126,NA,0,34,34,289,228,154,134,130,74,NA "IQ",2008,2,16,18,348,525,317,273,224,147,0,1,53,54,377,281,125,175,161,125,0 "IR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IR",1995,NA,NA,118,751,754,636,494,737,921,NA,NA,NA,234,1039,890,664,613,685,788,NA "IR",1996,NA,NA,63,390,449,431,274,450,577,NA,NA,NA,113,599,412,347,323,452,534,NA "IR",1997,NA,NA,54,391,470,420,304,395,608,NA,NA,NA,92,518,393,361,342,341,582,NA "IR",1998,NA,NA,35,426,492,400,245,363,579,NA,NA,NA,87,561,403,307,290,431,522,NA "IR",1999,NA,NA,27,370,460,383,260,335,591,NA,NA,NA,51,551,360,277,279,396,637,NA "IR",2000,NA,NA,29,438,467,387,295,344,642,NA,NA,NA,77,593,410,322,320,407,647,NA "IR",2001,NA,NA,37,469,529,371,309,299,649,NA,NA,NA,104,621,401,278,327,442,693,NA "IR",2002,NA,NA,29,457,502,375,322,302,668,NA,NA,NA,77,558,332,275,298,439,732,NA "IR",2003,NA,NA,32,413,528,396,282,294,673,NA,NA,NA,76,442,282,254,300,440,776,NA "IR",2004,NA,NA,16,360,542,357,305,298,640,NA,NA,NA,65,419,301,213,293,378,710,NA "IR",2005,NA,NA,16,352,531,338,281,260,630,NA,NA,NA,45,394,205,186,260,382,701,NA "IR",2006,4,8,12,357,495,365,318,249,686,NA,3,45,48,430,236,185,292,336,793,NA "IR",2007,3,7,10,311,511,330,285,261,680,NA,2,40,42,394,236,173,268,387,813,NA "IR",2008,0,11,11,292,466,330,322,267,706,0,2,40,42,386,254,137,263,367,879,0 "IS",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IS",1995,NA,NA,0,0,0,0,0,0,1,NA,NA,NA,0,0,0,0,0,0,1,NA "IS",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,1,0,0,0,0,0,NA "IS",1997,NA,NA,0,0,0,0,0,0,1,NA,NA,NA,0,1,0,0,0,1,2,NA "IS",1998,NA,NA,0,0,1,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,1,NA "IS",1999,NA,NA,0,NA,NA,1,NA,NA,NA,NA,NA,NA,0,NA,1,NA,NA,NA,NA,NA "IS",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,NA,1,NA,NA,NA,NA,NA "IS",2001,NA,NA,0,1,NA,NA,NA,NA,1,NA,NA,NA,0,NA,NA,1,NA,NA,NA,NA "IS",2002,NA,NA,0,1,0,0,0,0,0,NA,NA,NA,0,0,1,0,0,0,0,NA "IS",2003,NA,NA,0,0,0,0,0,0,1,NA,NA,NA,0,0,0,0,0,0,0,NA "IS",2004,NA,NA,0,0,0,0,1,1,0,NA,NA,NA,0,0,0,0,0,0,0,NA "IS",2005,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0 "IS",2006,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,0,0,0,1,0 "IS",2007,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0 "IS",2008,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0 "IT",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "IT",1995,NA,NA,9,59,202,157,94,124,289,NA,NA,NA,7,52,93,57,40,51,168,NA "IT",1996,NA,NA,12,72,196,168,125,155,319,NA,NA,NA,2,53,116,60,33,56,172,NA "IT",1997,NA,NA,14,93,228,244,168,187,381,NA,NA,NA,5,74,129,90,48,68,201,NA "IT",1998,NA,NA,15,128,327,248,189,226,429,NA,NA,NA,10,105,150,110,58,75,283,NA "IT",1999,NA,NA,7,78,155,137,114,104,247,NA,NA,NA,8,49,63,63,35,32,141,NA "IT",2000,NA,NA,12,63,96,75,58,54,112,NA,NA,NA,6,38,58,33,13,19,39,NA "IT",2001,NA,NA,4,43,130,98,63,50,99,NA,NA,NA,4,37,77,46,24,14,54,NA "IT",2002,NA,NA,6,51,139,127,74,68,134,NA,NA,NA,6,51,94,55,18,28,85,NA "IT",2003,NA,NA,19,79,219,168,80,61,146,NA,NA,NA,6,63,121,77,24,13,91,NA "IT",2004,NA,NA,34,52,130,115,64,43,123,NA,NA,NA,16,48,73,39,21,19,56,NA "IT",2005,7,1,8,93,191,137,101,61,115,24,0,3,3,80,145,56,25,19,70,9 "IT",2006,6,1,7,113,201,197,105,75,152,19,3,6,9,88,165,82,48,16,88,10 "IT",2007,1,2,3,75,170,113,87,48,106,11,0,7,7,74,94,58,31,19,76,6 "IT",2008,1,12,13,78,148,137,72,42,104,5,3,7,10,65,106,56,24,21,56,1 "JM",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JM",1995,NA,NA,2,9,14,9,11,8,9,NA,NA,NA,2,7,6,5,5,2,2,NA "JM",1996,NA,NA,1,9,10,12,12,8,3,NA,NA,NA,1,5,3,5,2,3,1,NA "JM",1997,NA,NA,1,2,9,16,12,8,1,NA,NA,NA,0,5,6,4,3,1,3,NA "JM",1998,NA,NA,0,3,19,9,10,7,7,NA,NA,NA,1,8,3,8,1,4,2,NA "JM",1999,NA,NA,2,10,16,6,6,15,6,NA,NA,NA,2,5,9,3,4,5,3,NA "JM",2000,NA,NA,0,6,13,13,15,6,5,NA,NA,NA,1,8,8,7,2,5,1,NA "JM",2001,NA,NA,3,10,9,21,5,1,1,NA,NA,NA,2,2,7,6,3,2,3,NA "JM",2002,NA,NA,0,9,11,8,7,7,4,NA,NA,NA,1,3,3,3,1,3,0,NA "JM",2003,NA,NA,1,11,9,14,12,4,6,NA,NA,NA,2,7,8,2,3,0,2,NA "JM",2004,NA,NA,0,9,7,4,13,8,10,NA,NA,NA,0,4,6,4,1,2,1,NA "JM",2005,NA,NA,0,4,6,6,10,6,7,NA,NA,NA,0,1,5,4,0,1,3,NA "JM",2006,0,0,0,9,10,9,6,6,9,NA,0,0,0,2,5,3,1,0,1,NA "JM",2007,0,0,NA,12,10,7,17,7,3,NA,0,2,2,5,2,6,2,2,3,NA "JM",2008,2,1,3,2,10,10,11,11,5,2,0,1,1,8,3,3,1,1,6,1 "JO",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JO",1995,NA,NA,0,19,37,17,20,26,11,NA,NA,NA,1,15,4,10,14,12,7,NA "JO",1996,NA,NA,2,22,30,17,13,21,9,NA,NA,NA,1,8,11,8,16,8,4,NA "JO",1997,NA,NA,5,14,18,11,10,19,22,NA,NA,NA,3,8,6,4,4,5,7,NA "JO",1998,NA,NA,0,22,26,7,12,10,6,NA,NA,NA,0,7,6,3,3,7,1,NA "JO",1999,NA,NA,0,16,19,16,10,8,2,NA,NA,NA,0,8,7,2,2,7,5,NA "JO",2000,NA,NA,0,8,16,13,9,14,2,NA,NA,NA,0,8,9,1,2,2,5,NA "JO",2001,NA,NA,2,7,22,10,10,8,7,NA,NA,NA,0,8,6,1,0,9,4,NA "JO",2002,NA,NA,0,8,9,11,12,11,5,NA,NA,NA,0,9,4,3,2,12,5,NA "JO",2003,NA,NA,0,19,20,17,8,13,0,NA,NA,NA,1,6,7,2,3,12,0,NA "JO",2004,NA,NA,0,8,12,14,6,17,0,NA,NA,NA,0,10,4,3,5,12,0,NA "JO",2005,NA,NA,0,8,17,9,4,6,5,NA,NA,NA,1,6,6,6,5,8,5,NA "JO",2006,0,0,0,9,23,16,7,4,10,NA,0,0,0,8,11,3,5,2,6,NA "JO",2007,0,0,0,7,20,14,9,7,5,NA,0,0,0,9,12,6,1,12,7,NA "JO",2008,0,0,0,13,10,3,5,13,5,0,0,0,0,20,15,6,4,7,3,0 "JP",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "JP",1995,NA,NA,15,342,627,995,1847,2059,4089,NA,NA,NA,14,258,476,298,476,637,2234,NA "JP",1996,NA,NA,16,309,621,843,1756,1878,3639,NA,NA,NA,7,224,409,262,364,565,1974,NA "JP",1997,NA,NA,8,304,625,798,1793,1908,4055,NA,NA,NA,11,248,455,237,405,547,2177,NA "JP",1998,NA,NA,2,306,597,724,1571,1660,3545,NA,NA,NA,6,243,418,233,329,417,1884,NA "JP",1999,NA,NA,6,290,623,706,1605,1768,4117,NA,NA,NA,7,236,459,253,292,419,2128,NA "JP",2000,NA,NA,2,246,572,676,1494,1509,3816,NA,NA,NA,5,222,464,213,292,384,1958,NA "JP",2001,NA,NA,3,220,576,632,1319,1513,3840,NA,NA,NA,5,175,437,228,250,330,1880,NA "JP",2002,NA,NA,2,191,549,579,1192,1334,3747,NA,NA,NA,3,192,395,259,248,308,1808,NA "JP",2003,NA,NA,1,210,521,550,1063,1388,3731,NA,NA,NA,2,203,395,246,254,313,1966,NA "JP",2004,NA,NA,2,193,462,599,934,1363,3759,NA,NA,NA,6,182,364,230,222,294,1861,NA "JP",2005,NA,NA,9,197,488,605,868,1418,3867,NA,NA,NA,5,187,428,249,224,309,2077,NA "JP",2006,NA,3,3,175,436,529,743,1388,3728,NA,3,2,5,179,361,280,213,256,1863,NA "JP",2007,1,0,1,142,372,512,668,1174,3678,NA,0,3,3,134,318,231,156,212,1832,NA "JP",2008,0,2,2,117,339,456,599,1063,3482,0,0,1,1,115,293,230,173,253,1872,0 "KE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KE",1995,NA,NA,154,2072,3073,1675,920,485,296,NA,NA,NA,187,1802,1759,741,411,242,117,NA "KE",1996,NA,NA,151,2492,3820,2097,993,498,311,NA,NA,NA,252,2290,2327,926,462,245,114,NA "KE",1997,NA,NA,53,2881,4374,2333,1100,482,284,NA,NA,NA,242,2573,2604,1086,499,242,103,NA "KE",1998,NA,NA,210,3372,5477,2983,1378,626,382,NA,NA,NA,318,3315,3469,1378,656,324,141,NA "KE",1999,NA,NA,237,3835,6078,3349,1545,645,405,NA,NA,NA,373,3850,3997,1596,760,348,179,NA "KE",2000,NA,NA,264,3739,6653,3548,1630,630,414,NA,NA,NA,416,3916,4363,1874,831,347,148,NA "KE",2001,NA,NA,299,4083,7070,3903,1771,723,443,NA,NA,NA,464,4116,4822,2063,935,394,221,NA "KE",2002,NA,NA,299,4445,7708,4306,2023,807,433,NA,NA,NA,392,4542,5465,2267,996,445,190,NA "KE",2003,NA,NA,341,4918,8515,4560,2167,928,567,NA,NA,NA,487,5003,6023,2618,1171,551,309,NA "KE",2004,NA,NA,391,5388,9016,5142,2404,973,576,NA,NA,NA,519,5458,6326,2850,1236,558,312,NA "KE",2005,NA,NA,359,4790,8832,5069,2521,1031,590,NA,NA,NA,577,5144,6521,2781,1266,593,315,NA "KE",2006,NA,NA,387,4708,8229,4975,2467,1037,645,NA,NA,NA,583,4953,6052,2792,1343,604,379,NA "KE",2007,23,451,474,4752,8132,4959,2361,1084,601,NA,24,575,599,4594,5979,2774,1180,542,329,NA "KE",2008,40,411,451,4709,8128,4924,2302,1025,583,0,40,546,586,4355,5475,2431,1065,481,296,0 "KG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KG",1995,NA,NA,3,109,171,165,65,38,30,NA,NA,NA,1,70,94,34,18,15,19,NA "KG",1996,NA,NA,4,148,210,156,86,38,40,NA,NA,NA,8,90,93,55,18,26,31,NA "KG",1997,NA,NA,1,212,381,349,143,90,38,NA,NA,NA,4,115,133,64,22,29,37,NA "KG",1998,NA,NA,4,105,176,141,75,43,21,NA,NA,NA,3,68,89,54,20,22,9,NA "KG",1999,NA,NA,5,216,388,244,142,73,49,NA,NA,NA,8,137,199,75,40,31,35,NA "KG",2000,NA,NA,4,128,227,205,115,52,46,NA,NA,NA,6,128,146,100,41,30,29,NA "KG",2001,NA,NA,0,176,287,217,159,54,44,NA,NA,NA,0,133,183,105,45,30,48,NA "KG",2002,NA,NA,0,202,268,233,137,61,45,NA,NA,NA,0,153,179,116,44,39,67,NA "KG",2003,NA,NA,0,189,298,241,145,63,70,NA,NA,NA,0,178,227,109,61,29,42,NA "KG",2004,NA,NA,3,221,277,265,164,58,69,NA,NA,NA,11,196,228,104,59,34,72,NA "KG",2005,0,1,1,247,303,269,194,66,84,0,0,15,15,215,236,141,70,33,98,0 "KG",2006,NA,3,3,245,298,245,179,75,75,NA,NA,13,13,228,203,107,75,32,65,NA "KG",2007,NA,3,3,243,274,186,186,62,63,NA,NA,11,11,216,213,114,67,47,61,NA "KG",2008,NA,1,1,261,275,190,155,70,53,NA,NA,13,13,209,217,104,61,41,62,NA "KH",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1995,NA,NA,161,453,1244,1147,1253,1257,707,NA,NA,NA,123,388,1133,1435,1426,1180,578,NA "KH",1996,NA,NA,148,32,1272,1363,1348,1226,726,NA,NA,NA,124,27,1087,1430,1534,1201,547,NA "KH",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KH",1998,NA,NA,36,446,1330,1477,1521,1293,924,NA,NA,NA,23,367,1184,1531,1667,1359,691,NA "KH",1999,NA,NA,41,525,1389,1734,1645,1578,1089,NA,NA,NA,51,445,1229,1861,1857,1448,852,NA "KH",2000,NA,NA,26,519,1323,1618,1456,1373,1058,NA,NA,NA,38,457,1157,1649,1798,1459,892,NA "KH",2001,NA,NA,29,600,1302,1601,1406,1403,1037,NA,NA,NA,25,455,1033,1526,1687,1428,829,NA "KH",2002,NA,NA,54,791,1449,1956,1799,1624,1432,NA,NA,NA,54,600,1114,1737,1898,1650,1100,NA "KH",2003,NA,NA,37,805,1514,2183,1848,1729,1487,NA,NA,NA,46,691,1287,1975,2208,1857,1256,NA "KH",2004,NA,NA,36,850,1466,2261,1942,1759,1538,NA,NA,NA,28,658,1276,1882,2176,1836,1270,NA "KH",2005,NA,NA,49,894,1600,2349,2043,1964,1811,NA,NA,NA,45,790,1413,2089,2323,2058,1573,NA "KH",2006,NA,NA,50,791,1486,2205,1902,1689,1665,NA,NA,NA,44,749,1330,1839,2072,1915,1557,NA "KH",2007,NA,NA,50,883,1526,2190,2102,1761,1644,NA,NA,NA,64,749,1351,1698,2105,1839,1459,NA "KH",2008,NA,NA,49,920,1570,2040,2117,1746,1683,NA,NA,NA,72,808,1403,1809,2093,1943,1607,NA "KI",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KI",1996,NA,NA,0,4,1,2,2,2,2,NA,NA,NA,2,3,3,4,2,1,1,NA "KI",1997,NA,NA,1,2,0,0,5,1,1,NA,NA,NA,2,0,0,2,0,2,1,NA "KI",1998,NA,NA,1,6,10,2,3,2,1,NA,NA,NA,2,7,5,3,5,1,1,NA "KI",1999,NA,NA,2,6,4,2,4,4,3,NA,NA,NA,1,9,9,6,2,3,4,NA "KI",2000,NA,NA,2,9,3,3,3,8,2,NA,NA,NA,2,5,6,3,4,1,3,NA "KI",2001,NA,NA,4,10,7,3,3,5,3,NA,NA,NA,4,7,7,3,3,4,1,NA "KI",2002,NA,NA,5,11,1,7,7,7,NA,NA,NA,NA,5,15,8,8,3,4,1,NA "KI",2003,NA,NA,5,13,5,9,6,6,0,NA,NA,NA,5,20,4,12,7,3,4,NA "KI",2004,NA,NA,8,17,10,12,10,9,3,NA,NA,NA,7,31,9,12,7,6,1,NA "KI",2005,NA,NA,3,15,15,12,17,4,1,NA,NA,NA,5,22,12,7,7,3,1,NA "KI",2006,NA,NA,3,18,18,16,18,3,7,NA,NA,NA,5,15,5,5,1,8,3,NA "KI",2007,0,2,2,15,7,10,6,10,3,NA,0,8,8,13,6,8,9,4,2,NA "KI",2008,0,2,2,30,9,15,10,2,5,0,0,4,4,33,9,12,9,3,4,0 "KM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",1995,NA,NA,0,18,13,9,7,8,4,NA,NA,NA,1,13,9,8,6,5,2,NA "KM",1996,NA,NA,1,19,16,12,4,8,8,NA,NA,NA,1,7,12,6,4,10,3,NA "KM",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",1998,NA,NA,0,15,10,13,11,6,0,NA,NA,NA,0,7,9,5,8,4,4,NA "KM",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",2000,NA,NA,0,18,7,14,9,3,4,NA,NA,NA,1,9,6,12,1,2,1,NA "KM",2001,NA,NA,0,15,11,10,11,3,5,NA,NA,NA,2,10,11,8,4,2,0,NA "KM",2002,NA,NA,0,10,9,8,4,3,3,NA,NA,NA,0,11,6,7,6,2,3,NA "KM",2003,NA,NA,1,7,12,5,1,3,3,NA,NA,NA,0,5,7,1,1,1,1,NA "KM",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",2005,NA,NA,0,12,9,6,4,2,4,NA,NA,NA,2,10,7,4,8,3,8,NA "KM",2006,1,0,0,12,9,7,4,4,1,NA,1,0,0,5,5,9,6,4,1,NA "KM",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KM",2008,0,2,2,11,11,4,9,5,5,0,0,2,2,6,11,1,6,2,2,0 "KN",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1996,NA,NA,0,0,1,1,0,0,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1998,NA,NA,0,0,0,2,1,0,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,1,NA "KN",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA "KN",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",2006,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KN",2007,NA,NA,NA,1,1,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA "KN",2008,0,0,0,0,0,0,3,0,0,0,0,0,0,0,1,0,1,0,0,0 "KP",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KP",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KP",1997,NA,NA,5,375,430,640,620,430,240,NA,NA,NA,2,205,295,210,205,175,148,NA "KP",1998,NA,NA,0,21,36,34,36,31,25,NA,NA,NA,0,11,24,24,25,20,15,NA "KP",1999,NA,NA,14,294,438,401,294,151,30,NA,NA,NA,10,162,235,327,237,68,12,NA "KP",2000,NA,NA,293,928,1508,2927,2519,1167,651,NA,NA,NA,167,683,1121,2004,1524,591,357,NA "KP",2001,NA,NA,207,1081,1593,2276,2208,1149,606,NA,NA,NA,123,690,1132,1354,1120,553,336,NA "KP",2002,NA,NA,199,1444,2282,2584,2618,1235,745,NA,NA,NA,140,1049,1720,1642,1505,892,521,NA "KP",2003,NA,NA,86,1154,2279,2678,2469,1412,634,NA,NA,NA,93,823,1623,1607,1395,769,370,NA "KP",2004,NA,NA,175,1284,2559,2991,2858,1464,460,NA,NA,NA,118,887,1577,1640,1473,724,269,NA "KP",2005,NA,NA,167,1409,2422,2688,2040,1185,485,NA,NA,NA,166,1127,1756,1890,1381,764,336,NA "KP",2006,NA,NA,157,1498,2393,3219,2301,1479,591,NA,NA,NA,87,725,1373,2051,1373,791,397,NA "KP",2007,NA,NA,353,1947,2748,3717,2831,2093,674,NA,NA,NA,406,1233,1682,2672,1723,1056,440,NA "KP",2008,11,430,441,2341,3320,4263,3988,2704,948,0,7,472,479,1259,1792,2428,2282,1177,604,0 "KR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KR",1995,NA,NA,27,1131,1613,1425,1207,1307,1225,NA,NA,NA,46,908,863,431,296,408,867,NA "KR",1996,NA,NA,31,1150,1587,1457,1118,1216,1116,NA,NA,NA,32,950,827,460,297,340,839,NA "KR",1997,NA,NA,24,935,1276,1221,982,1069,1099,NA,NA,NA,31,790,685,445,234,359,807,NA "KR",1998,NA,NA,19,977,1334,1329,999,1074,1119,NA,NA,NA,37,765,708,455,238,393,912,NA "KR",1999,NA,NA,27,884,1205,1180,871,962,1136,NA,NA,NA,40,704,653,402,256,306,933,NA "KR",2000,NA,NA,19,821,1085,988,853,731,901,NA,NA,NA,25,546,544,393,220,295,795,NA "KR",2001,NA,NA,23,942,1415,1419,1293,1103,1361,NA,NA,NA,45,839,890,489,326,390,1270,NA "KR",2002,NA,NA,20,806,1333,1374,1265,1029,1390,NA,NA,NA,19,759,854,456,334,377,1329,NA "KR",2003,NA,NA,22,732,1208,1265,1207,992,1472,NA,NA,NA,32,681,793,501,365,381,1325,NA "KR",2004,NA,NA,18,709,1276,1364,1248,1017,1595,NA,NA,NA,26,659,847,496,340,360,1516,NA "KR",2005,NA,NA,22,687,1171,1326,1336,1005,1669,NA,NA,NA,27,590,842,491,370,373,1729,NA "KR",2006,0,19,19,652,1109,1223,1406,955,1698,NA,3,24,27,579,859,507,403,371,1705,NA "KR",2007,3,48,16,589,953,1144,1308,906,1684,NA,1,64,34,570,807,466,387,347,1716,NA "KR",2008,3,18,21,492,865,1093,1400,958,1848,0,1,31,32,483,722,483,402,360,1889,0 "KW",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KW",1995,NA,NA,0,15,51,32,17,9,0,NA,NA,NA,0,8,24,9,4,4,2,NA "KW",1996,NA,NA,0,11,45,16,30,4,1,NA,NA,NA,1,12,17,8,3,3,2,NA "KW",1997,NA,NA,1,23,38,37,22,6,7,NA,NA,NA,1,17,26,11,7,1,4,NA "KW",1998,NA,NA,0,14,42,42,20,11,5,NA,NA,NA,0,13,14,9,5,5,5,NA "KW",1999,NA,NA,0,18,49,26,11,10,4,NA,NA,NA,2,9,23,5,6,4,2,NA "KW",2000,NA,NA,0,10,44,32,21,11,5,NA,NA,NA,1,11,24,12,5,3,1,NA "KW",2001,NA,NA,0,13,37,29,19,1,6,NA,NA,NA,1,13,30,14,4,5,2,NA "KW",2002,NA,NA,0,14,47,32,26,9,3,NA,NA,NA,0,15,37,11,7,3,2,NA "KW",2003,NA,NA,1,14,39,33,26,11,5,NA,NA,NA,1,16,31,18,2,3,1,NA "KW",2004,NA,NA,0,20,63,38,22,9,7,NA,NA,NA,0,14,44,12,7,5,7,NA "KW",2005,NA,NA,0,12,45,29,26,8,3,NA,NA,NA,0,13,31,11,3,1,5,NA "KW",2006,NA,NA,1,19,72,40,37,14,3,NA,NA,NA,0,17,41,23,5,6,6,NA "KW",2007,0,1,1,16,69,25,29,8,5,NA,0,0,0,26,53,18,13,7,4,NA "KW",2008,0,0,0,18,90,56,34,11,9,0,0,2,2,33,47,27,7,5,6,0 "KY",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",1998,NA,NA,0,0,0,0,0,0,1,NA,NA,NA,0,0,1,0,0,0,0,NA "KY",1999,NA,NA,0,0,0,1,0,0,0,NA,NA,NA,0,1,0,0,0,0,0,NA "KY",2000,NA,NA,0,0,3,1,0,1,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",2001,NA,NA,0,0,1,0,0,0,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",2003,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "KY",2004,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,1,0,0,0,NA "KY",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KY",2006,0,0,0,0,0,0,0,0,0,NA,0,0,0,0,0,0,0,0,0,NA "KY",2007,0,0,0,0,0,0,0,1,0,NA,0,0,0,0,0,0,0,0,0,NA "KY",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",1997,NA,NA,22,68,827,725,475,485,211,NA,NA,NA,47,78,673,423,141,142,115,NA "KZ",1998,NA,NA,34,795,1009,817,628,515,78,NA,NA,NA,62,625,713,457,204,174,69,NA "KZ",1999,NA,NA,34,778,1217,1026,560,368,165,NA,NA,NA,60,822,872,452,226,171,136,NA "KZ",2000,NA,NA,36,1057,1409,1379,923,439,218,NA,NA,NA,84,999,1079,599,275,202,204,NA "KZ",2001,NA,NA,38,1038,1477,1485,1011,429,211,NA,NA,NA,88,1040,1062,570,263,194,173,NA "KZ",2002,NA,NA,33,1067,1565,1490,1042,435,212,NA,NA,NA,68,1035,1086,669,348,194,208,NA "KZ",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "KZ",2004,NA,NA,24,989,1291,1183,899,336,196,NA,NA,NA,62,844,912,517,307,178,189,NA "KZ",2005,0,31,31,917,1142,983,795,274,175,0,1,45,46,751,767,436,286,121,187,0 "KZ",2006,NA,NA,11,888,981,848,744,287,169,NA,NA,NA,30,741,636,370,234,116,150,NA "KZ",2007,3,11,14,881,976,859,714,279,150,0,0,38,38,782,605,367,249,124,157,0 "KZ",2008,0,14,14,897,968,811,752,306,160,0,1,43,44,710,659,320,230,137,185,0 "LA",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LA",1995,NA,NA,6,56,71,68,78,90,55,NA,NA,NA,3,49,49,69,54,52,26,NA "LA",1996,NA,NA,1,42,80,97,131,127,84,NA,NA,NA,4,31,62,80,64,59,24,NA "LA",1997,NA,NA,2,61,91,151,158,156,124,NA,NA,NA,2,60,83,102,102,88,54,NA "LA",1998,NA,NA,4,77,152,150,177,211,152,NA,NA,NA,6,59,121,122,108,90,65,NA "LA",1999,NA,NA,5,91,175,183,213,193,191,NA,NA,NA,9,60,115,142,141,98,90,NA "LA",2000,NA,NA,7,92,128,166,201,177,176,NA,NA,NA,10,59,95,131,122,91,71,NA "LA",2001,NA,NA,10,81,137,176,219,186,164,NA,NA,NA,6,51,99,121,138,104,71,NA "LA",2002,NA,NA,4,86,159,220,223,227,185,NA,NA,NA,2,72,141,151,152,117,90,NA "LA",2003,NA,NA,6,91,180,239,226,207,196,NA,NA,NA,7,77,107,162,156,114,98,NA "LA",2004,NA,NA,14,120,181,231,318,259,268,NA,NA,NA,12,72,137,157,172,164,121,NA "LA",2005,NA,NA,13,136,223,296,373,300,352,NA,NA,NA,7,101,186,205,244,192,178,NA "LA",2006,NA,NA,12,145,245,340,406,345,354,NA,NA,NA,13,109,196,221,228,222,205,NA "LA",2007,NA,NA,11,150,258,307,418,361,350,NA,NA,NA,7,126,175,215,293,206,207,NA "LA",2008,NA,NA,6,159,262,329,380,409,373,NA,NA,NA,10,101,165,209,264,220,192,NA "LB",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LB",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LB",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LB",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LB",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LB",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LB",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LB",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LB",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LB",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LB",1995,NA,NA,3,26,32,30,16,16,10,NA,NA,NA,1,16,18,13,8,5,3,NA "LB",1996,NA,NA,4,28,41,18,12,9,19,NA,NA,NA,4,24,13,11,5,6,4,NA "LB",1997,NA,NA,1,18,33,22,19,17,17,NA,NA,NA,3,30,20,6,7,8,3,NA "LB",1998,NA,NA,1,27,33,22,19,17,17,NA,NA,NA,3,23,20,6,7,8,3,NA "LB",1999,NA,NA,3,27,44,35,17,17,11,NA,NA,NA,1,33,26,17,6,9,3,NA "LB",2000,NA,NA,5,16,28,20,15,17,14,NA,NA,NA,4,31,26,9,7,4,6,NA "LB",2001,NA,NA,0,22,20,18,16,8,8,NA,NA,NA,3,25,28,7,6,4,5,NA "LB",2002,NA,NA,1,19,25,14,10,7,9,NA,NA,NA,2,17,21,8,9,3,3,NA "LB",2003,NA,NA,0,19,26,22,6,5,7,NA,NA,NA,3,14,12,9,5,2,4,NA "LB",2004,NA,NA,1,11,25,18,18,8,6,NA,NA,NA,0,18,21,10,5,1,4,NA "LB",2005,NA,NA,0,12,19,15,10,12,8,NA,NA,NA,1,25,14,8,3,3,1,NA "LB",2006,0,0,0,11,12,18,14,10,8,NA,0,1,1,16,12,5,2,2,1,NA "LB",2007,0,0,0,12,19,13,12,11,5,NA,0,1,1,17,30,13,5,3,2,NA "LB",2008,0,1,1,9,14,12,17,14,6,0,0,2,2,24,32,15,7,3,2,0 "LC",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",1997,NA,NA,0,0,1,2,0,1,1,NA,NA,NA,1,1,3,0,0,0,1,NA "LC",1998,NA,NA,0,2,1,1,0,0,1,NA,NA,NA,0,3,2,0,0,1,1,NA "LC",1999,NA,NA,NA,1,NA,NA,3,3,NA,NA,NA,NA,NA,1,1,NA,NA,NA,NA,NA "LC",2000,NA,NA,0,0,0,1,0,1,2,NA,NA,NA,0,1,0,1,0,1,0,NA "LC",2001,NA,NA,0,1,1,0,1,3,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LC",2002,NA,NA,NA,NA,1,1,1,2,1,NA,NA,NA,NA,NA,NA,NA,1,1,NA,NA "LC",2003,NA,NA,0,0,0,1,2,2,2,NA,NA,NA,0,1,1,0,3,2,1,NA "LC",2004,NA,NA,0,0,0,1,2,2,2,NA,NA,NA,0,2,3,0,0,0,3,NA "LC",2005,NA,NA,0,0,0,0,2,1,2,NA,NA,NA,1,1,0,1,1,0,2,NA "LC",2006,NA,NA,NA,NA,NA,NA,3,5,5,NA,NA,NA,NA,NA,NA,NA,NA,1,1,NA "LC",2007,NA,NA,NA,NA,3,3,2,4,3,NA,NA,1,NA,NA,NA,NA,1,NA,1,NA "LC",2008,0,0,0,2,0,2,2,2,1,0,0,1,1,0,1,3,2,1,2,0 "LK",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LK",1995,NA,NA,10,163,361,519,521,365,261,NA,NA,NA,15,207,206,142,122,81,56,NA "LK",1996,NA,NA,10,163,327,491,523,355,253,NA,NA,NA,18,197,168,147,133,111,62,NA "LK",1997,NA,NA,11,215,390,596,623,396,271,NA,NA,NA,23,245,217,173,176,89,81,NA "LK",1998,NA,NA,7,237,430,628,663,445,304,NA,NA,NA,22,228,235,169,173,119,87,NA "LK",1999,NA,NA,8,255,406,621,646,440,325,NA,NA,NA,10,264,231,168,148,126,101,NA "LK",2000,NA,NA,25,266,459,695,793,484,360,NA,NA,NA,23,312,264,176,202,144,113,NA "LK",2001,NA,NA,6,284,446,713,779,528,336,NA,NA,NA,18,296,247,194,174,156,131,NA "LK",2002,NA,NA,11,287,411,682,788,551,366,NA,NA,NA,19,320,248,205,151,151,107,NA "LK",2003,NA,NA,12,311,467,694,791,495,389,NA,NA,NA,14,305,218,186,187,132,120,NA "LK",2004,NA,NA,6,358,472,664,800,521,371,NA,NA,NA,18,263,237,192,176,122,102,NA "LK",2005,NA,NA,9,341,520,724,918,657,424,NA,NA,NA,19,295,261,189,200,154,130,NA "LK",2006,NA,NA,8,342,496,600,816,563,402,NA,NA,NA,13,301,248,178,189,157,129,NA "LK",2007,NA,NA,10,288,477,664,802,649,412,NA,NA,NA,16,279,228,183,182,176,111,NA "LK",2008,NA,NA,11,283,488,717,810,649,415,NA,NA,NA,26,298,288,183,173,172,133,NA "LR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1996,NA,NA,38,69,105,84,42,33,9,NA,NA,NA,44,72,78,51,24,12,7,NA "LR",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",1998,NA,NA,18,150,229,158,72,34,11,NA,NA,NA,18,164,160,98,45,17,16,NA "LR",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",2000,NA,NA,12,133,196,127,52,17,26,NA,NA,NA,21,140,149,88,28,16,16,NA "LR",2001,NA,NA,16,111,174,132,63,17,11,NA,NA,NA,18,108,143,77,35,17,12,NA "LR",2002,NA,NA,20,252,315,295,143,60,44,NA,NA,NA,26,256,250,150,86,41,26,NA "LR",2003,NA,NA,5,180,215,204,99,49,23,NA,NA,NA,12,148,180,109,43,30,22,NA "LR",2004,NA,NA,32,333,427,285,198,71,51,NA,NA,NA,39,268,397,183,123,41,42,NA "LR",2005,NA,NA,26,240,352,333,155,74,65,NA,NA,NA,37,232,297,171,108,52,25,NA "LR",2006,NA,NA,59,324,442,371,250,125,97,NA,NA,NA,55,292,371,242,125,85,68,NA "LR",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LR",2008,3,104,107,129,412,532,308,169,98,0,1,55,56,115,237,367,298,170,44,0 "LS",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",1995,NA,NA,9,108,214,256,189,96,88,NA,NA,NA,14,106,125,71,49,17,19,NA "LS",1996,NA,NA,12,123,272,367,223,149,87,NA,NA,NA,7,164,189,94,44,29,28,NA "LS",1997,NA,NA,11,180,392,463,307,173,69,NA,NA,NA,29,216,272,152,71,40,23,NA "LS",1998,NA,NA,6,190,407,488,372,190,87,NA,NA,NA,10,200,283,125,65,30,23,NA "LS",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",2000,NA,NA,8,165,458,517,395,198,76,NA,NA,NA,11,222,336,195,83,36,29,NA "LS",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LS",2002,NA,NA,10,218,547,535,347,211,80,NA,NA,NA,14,304,447,207,125,41,17,NA "LS",2003,NA,NA,10,219,614,592,466,219,83,NA,NA,NA,32,328,567,313,219,59,33,NA "LS",2004,NA,NA,29,286,728,696,448,206,78,NA,NA,NA,22,459,691,364,161,68,36,NA "LS",2005,NA,NA,32,395,695,397,148,82,37,NA,NA,NA,19,226,721,616,494,297,121,NA "LS",2006,NA,NA,33,228,628,550,440,218,49,NA,NA,NA,50,370,642,430,171,90,125,NA "LS",2007,1,5,6,32,135,73,87,52,28,NA,0,4,4,78,121,106,40,13,13,NA "LS",2008,0,21,21,223,615,542,380,242,138,0,1,23,24,343,700,358,146,76,54,0 "LT",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LT",1995,NA,NA,4,46,132,225,176,90,77,NA,NA,NA,5,6,53,45,32,16,42,NA "LT",1996,NA,NA,2,60,133,224,206,133,82,NA,NA,NA,6,37,62,73,40,29,44,NA "LT",1997,NA,NA,2,53,136,227,213,146,98,NA,NA,NA,5,40,77,73,38,25,67,NA "LT",1998,NA,NA,0,38,77,165,163,81,57,NA,NA,NA,0,27,25,65,22,21,46,NA "LT",1999,NA,NA,0,42,90,153,22,91,67,NA,NA,NA,0,32,48,55,25,20,40,NA "LT",2000,NA,NA,1,38,97,145,155,74,68,NA,NA,NA,0,20,37,39,32,22,48,NA "LT",2001,NA,NA,0,35,112,197,155,88,76,NA,NA,NA,1,33,59,57,35,28,59,NA "LT",2002,NA,NA,1,24,95,176,142,88,59,NA,NA,NA,0,30,59,45,32,18,52,NA "LT",2003,NA,NA,1,35,116,175,174,107,60,NA,NA,NA,0,35,49,37,38,20,50,NA "LT",2004,NA,NA,0,39,100,161,173,92,71,NA,NA,NA,1,21,48,47,47,24,32,NA "LT",2005,0,0,0,42,118,186,187,108,67,0,0,1,1,25,41,57,49,23,54,0 "LT",2006,0,0,0,38,120,207,211,107,74,0,0,0,0,25,48,56,52,38,53,0 "LT",2007,0,0,0,31,77,165,235,109,76,0,0,0,0,34,41,48,50,22,37,0 "LT",2008,0,0,0,39,110,162,182,104,71,0,0,1,1,20,51,46,36,18,44,0 "LU",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",1996,NA,NA,0,5,3,4,2,3,2,NA,NA,NA,1,3,2,0,1,0,3,NA "LU",1997,NA,NA,1,2,2,7,3,2,3,NA,NA,NA,1,3,0,1,0,1,5,NA "LU",1998,NA,NA,0,3,6,7,4,2,0,NA,NA,NA,1,0,1,2,2,0,2,NA "LU",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LU",2001,NA,NA,0,0,1,0,0,0,2,NA,NA,NA,0,2,2,1,1,1,2,NA "LU",2002,NA,NA,0,0,1,3,3,2,1,NA,NA,NA,0,0,2,1,1,1,2,NA "LU",2003,NA,NA,0,2,10,7,1,2,2,NA,NA,NA,0,2,1,1,0,0,0,NA "LU",2004,NA,NA,0,1,0,4,3,1,1,NA,NA,NA,0,0,5,5,0,0,0,NA "LU",2005,0,0,0,0,2,2,1,1,2,0,0,0,0,0,2,1,1,1,0,0 "LU",2006,0,0,0,0,3,2,3,2,3,0,0,0,0,2,3,2,0,1,1,0 "LU",2007,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0 "LU",2008,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 "LV",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LV",1995,NA,NA,0,20,44,71,70,40,30,NA,NA,NA,0,22,49,55,47,27,29,NA "LV",1996,NA,NA,0,28,69,130,89,67,42,NA,NA,NA,0,32,39,31,22,10,16,NA "LV",1997,NA,NA,0,47,109,145,106,61,29,NA,NA,NA,1,27,22,37,20,16,14,NA "LV",1998,NA,NA,0,58,105,129,121,68,35,NA,NA,NA,1,24,45,26,23,15,18,NA "LV",1999,NA,NA,1,48,87,110,103,57,30,NA,NA,NA,2,28,24,40,29,11,18,NA "LV",2000,NA,NA,0,53,106,124,111,64,34,NA,NA,NA,2,25,41,27,28,7,15,NA "LV",2001,NA,NA,0,48,109,138,101,64,32,NA,NA,NA,2,24,33,41,31,18,20,NA "LV",2002,NA,NA,0,32,98,123,121,64,26,NA,NA,NA,0,37,42,37,23,11,22,NA "LV",2003,NA,NA,0,36,74,141,106,59,32,NA,NA,NA,0,31,42,42,35,17,26,NA "LV",2004,NA,NA,0,30,74,119,109,53,38,NA,NA,NA,2,29,32,36,29,12,19,NA "LV",2005,0,1,1,22,71,104,117,55,34,0,0,0,0,17,31,31,23,18,12,0 "LV",2006,0,2,2,27,78,82,105,51,26,0,0,0,0,17,27,33,28,9,13,0 "LV",2007,NA,NA,NA,33,65,93,102,49,19,NA,NA,1,1,18,27,32,18,12,9,NA "LV",2008,0,0,0,28,54,71,71,47,27,0,0,0,0,11,23,26,21,9,12,0 "LY",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1995,NA,NA,2,112,212,78,46,22,21,NA,NA,NA,5,34,31,19,20,13,11,NA "LY",1996,NA,NA,4,93,142,82,31,28,19,NA,NA,NA,4,30,35,17,10,9,11,NA "LY",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",1999,NA,NA,2,110,257,115,53,36,33,NA,NA,NA,6,43,59,25,15,14,27,NA "LY",2000,NA,NA,5,101,239,86,36,29,32,NA,NA,NA,6,43,35,24,24,16,22,NA "LY",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "LY",2003,NA,NA,0,108,266,142,32,25,19,NA,NA,NA,4,43,28,30,25,21,21,NA "LY",2004,NA,NA,5,113,310,173,53,24,20,NA,NA,NA,1,44,50,20,23,13,23,NA "LY",2005,NA,NA,2,114,293,168,52,19,35,NA,NA,NA,8,36,36,35,21,21,20,NA "LY",2006,0,0,1,98,247,150,49,23,23,NA,0,0,8,55,34,24,10,12,11,NA "LY",2007,NA,NA,2,61,143,78,26,12,10,NA,NA,NA,4,23,17,12,8,7,11,NA "LY",2008,1,1,2,116,298,162,85,24,19,0,0,6,6,56,35,22,20,9,17,0 "MA",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MA",1995,NA,NA,142,2508,2872,1737,819,573,553,NA,NA,NA,191,1708,1288,703,461,317,299,NA "MA",1996,NA,NA,118,2618,2844,1721,772,602,583,NA,NA,NA,217,1697,1300,677,437,400,292,NA "MA",1997,NA,NA,119,2328,2891,1659,761,591,557,NA,NA,NA,238,1799,1331,745,416,424,275,NA "MA",1998,NA,NA,116,2308,2573,1744,843,560,527,NA,NA,NA,182,1600,1150,679,412,402,330,NA "MA",1999,NA,NA,78,2296,2696,1641,815,559,562,NA,NA,NA,156,1654,1143,691,446,351,332,NA "MA",2000,NA,NA,99,2061,2423,1705,855,485,595,NA,NA,NA,170,1530,1121,672,398,406,352,NA "MA",2001,NA,NA,85,2200,2256,1731,929,561,606,NA,NA,NA,156,1477,1046,596,402,399,360,NA "MA",2002,NA,NA,79,2190,2341,1647,941,525,577,NA,NA,NA,144,1483,1088,713,443,357,386,NA "MA",2003,NA,NA,91,2225,2347,1667,1004,525,550,NA,NA,NA,168,1455,1029,633,431,366,351,NA "MA",2004,NA,NA,68,2081,2397,1676,1114,533,539,NA,NA,NA,149,1196,981,517,373,331,325,NA "MA",2005,NA,NA,79,2222,2515,1583,1057,580,591,NA,NA,NA,167,1330,943,546,403,343,398,NA "MA",2006,4,69,73,2104,2373,1498,1036,527,551,NA,6,149,155,1273,1025,597,426,335,307,NA "MA",2007,4,70,74,2098,2370,1545,1165,545,529,NA,6,117,123,1177,837,444,354,306,370,NA "MA",2008,5,46,51,1992,2372,1514,1179,633,589,0,4,120,124,1081,803,479,360,290,358,0 "MC",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",1999,NA,NA,0,0,0,0,1,0,0,NA,NA,NA,0,0,0,0,0,0,1,NA "MC",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MC",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MD",1995,NA,NA,0,55,115,166,95,65,15,NA,NA,NA,2,42,38,31,19,10,12,NA "MD",1996,NA,NA,0,26,33,55,34,9,5,NA,NA,NA,2,10,14,18,4,3,6,NA "MD",1997,NA,NA,0,51,65,86,47,35,13,NA,NA,NA,0,24,32,16,14,6,8,NA "MD",1998,NA,NA,2,72,67,116,56,36,16,NA,NA,NA,2,34,20,34,10,5,7,NA "MD",1999,NA,NA,1,89,123,144,84,29,14,NA,NA,NA,3,31,32,27,19,7,6,NA "MD",2000,NA,NA,2,52,31,36,13,13,6,NA,NA,NA,1,16,32,45,23,14,6,NA "MD",2001,NA,NA,1,152,197,230,158,62,32,NA,NA,NA,6,58,61,46,33,14,10,NA "MD",2002,NA,NA,5,159,220,237,181,49,33,NA,NA,NA,11,71,76,41,32,23,8,NA "MD",2003,NA,NA,1,152,201,252,206,62,25,NA,NA,NA,1,101,71,64,40,16,22,NA "MD",2004,NA,NA,8,210,277,284,267,89,42,NA,NA,NA,11,91,97,57,53,28,22,NA "MD",2005,0,2,2,211,337,345,313,106,31,0,0,3,3,97,92,57,61,23,18,0 "MD",2006,NA,NA,2,175,302,349,312,106,32,0,NA,NA,7,91,108,72,67,25,31,0 "MD",2007,0,0,0,181,281,343,314,107,35,0,0,2,2,97,85,57,58,25,25,0 "MD",2008,0,1,1,167,271,314,317,105,32,0,2,2,4,85,81,57,52,22,25,0 "ME",2005,0,0,0,3,5,7,15,4,8,0,0,0,0,0,7,3,4,0,8,0 "ME",2006,0,0,0,0,7,7,12,9,3,0,0,0,NA,3,4,4,4,3,2,0 "ME",2007,0,0,0,0,6,3,10,1,3,1,0,0,0,3,3,4,3,3,1,0 "ME",2008,0,0,0,2,7,10,5,5,1,0,0,0,0,4,5,5,10,4,7,0 "MG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1995,NA,NA,79,791,1289,1173,630,423,242,NA,NA,NA,100,799,1108,744,340,230,78,NA "MG",1996,NA,NA,68,888,1325,1271,673,484,285,NA,NA,NA,106,808,1031,744,393,197,79,NA "MG",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",1998,NA,NA,70,827,1545,1420,829,485,282,NA,NA,NA,108,852,1193,824,430,253,117,NA "MG",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MG",2001,NA,NA,103,1033,1588,1625,1094,613,404,NA,NA,NA,190,1010,1349,1094,546,289,154,NA "MG",2002,NA,NA,94,1023,1594,1563,1174,609,398,NA,NA,NA,163,983,1372,1000,598,234,135,NA "MG",2003,NA,NA,123,1249,1830,1839,1413,723,438,NA,NA,NA,216,1164,1578,1240,743,326,191,NA "MG",2004,NA,NA,118,1025,1593,1482,1026,495,300,NA,NA,NA,130,950,1130,841,513,220,95,NA "MG",2005,NA,NA,98,1159,1867,1732,1349,582,333,NA,NA,NA,150,1012,1451,1047,614,248,129,NA "MG",2006,NA,NA,117,1500,2391,2220,1714,766,458,NA,NA,NA,208,1458,1944,1444,874,353,166,NA "MG",2007,94,102,196,1473,2353,2097,1671,823,438,NA,16,207,223,1456,1810,1354,880,378,192,NA "MG",2008,21,121,142,1499,2294,2113,1669,836,465,0,19,232,251,1433,1846,1352,911,383,171,0 "MH",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MH",1996,NA,NA,7,8,3,3,5,3,0,NA,NA,NA,12,7,3,3,2,3,2,NA "MH",1998,NA,NA,0,2,0,1,1,1,0,NA,NA,NA,0,1,3,1,0,1,0,NA "MH",1999,NA,NA,5,10,3,4,1,6,0,NA,NA,NA,2,10,7,2,2,2,0,NA "MH",2000,NA,NA,3,5,4,1,3,5,3,NA,NA,NA,7,7,3,0,2,2,0,NA "MH",2001,NA,NA,3,8,4,2,4,2,0,NA,NA,NA,5,6,4,7,8,2,1,NA "MH",2002,NA,NA,0,1,2,1,3,2,2,NA,NA,NA,0,2,0,0,3,1,1,NA "MH",2003,NA,NA,6,4,2,7,7,2,2,NA,NA,NA,4,9,2,4,6,1,4,NA "MH",2004,NA,NA,2,5,4,3,3,2,NA,NA,NA,NA,1,7,5,3,3,0,1,NA "MH",2005,NA,NA,2,4,4,5,6,1,1,NA,NA,NA,1,9,2,4,3,4,2,NA "MH",2006,NA,NA,NA,4,3,4,6,3,2,NA,NA,NA,2,2,3,3,7,4,2,NA "MH",2007,0,0,0,1,1,2,5,1,0,NA,0,0,1,3,3,2,3,3,0,NA "MH",2008,0,1,1,1,1,2,3,0,0,0,0,2,2,3,2,4,5,1,0,0 "MK",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MK",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MK",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MK",1995,NA,NA,2,15,42,45,33,29,24,NA,NA,NA,2,32,30,20,11,17,17,NA "MK",1996,NA,NA,5,17,20,35,16,25,22,NA,NA,NA,3,15,25,17,9,19,12,NA "MK",1997,NA,NA,1,8,21,25,24,21,21,NA,NA,NA,1,14,19,13,6,7,11,NA "MK",1998,NA,NA,3,14,19,56,18,21,13,NA,NA,NA,5,16,18,8,8,6,4,NA "MK",1999,NA,NA,1,11,10,19,27,15,5,NA,NA,NA,1,7,9,5,1,10,1,NA "MK",2000,NA,NA,5,8,14,20,19,20,14,NA,NA,NA,1,15,14,17,5,5,10,NA "MK",2001,NA,NA,1,10,17,17,15,21,16,NA,NA,NA,1,17,18,14,7,3,7,NA "MK",2002,NA,NA,2,20,17,28,31,22,7,NA,NA,NA,3,18,24,12,4,6,6,NA "MK",2003,NA,NA,1,20,23,35,28,17,17,NA,NA,NA,0,16,16,9,9,1,8,NA "MK",2004,NA,NA,2,12,18,19,33,21,15,NA,NA,NA,0,15,20,19,6,3,17,NA "MK",2005,0,2,2,14,20,23,20,18,13,1,1,1,2,17,13,10,7,5,13,0 "MK",2006,0,0,0,15,15,25,37,18,7,0,0,3,3,16,9,9,6,7,11,0 "MK",2007,0,1,1,12,22,27,46,21,19,0,0,4,4,11,12,9,4,4,8,0 "MK",2008,0,1,1,18,21,13,25,15,15,0,0,2,2,24,15,14,8,7,10,0 "ML",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",1995,NA,NA,27,72,357,294,181,138,102,NA,NA,NA,31,132,184,128,107,61,52,NA "ML",1996,NA,NA,19,182,408,364,226,157,136,NA,NA,NA,21,153,197,128,95,51,36,NA "ML",1997,NA,NA,16,226,559,493,357,255,164,NA,NA,NA,15,178,264,167,111,60,34,NA "ML",1998,NA,NA,13,193,501,428,308,205,130,NA,NA,NA,11,173,237,164,88,79,30,NA "ML",1999,NA,NA,19,235,475,429,315,216,129,NA,NA,NA,21,180,226,171,120,96,58,NA "ML",2000,NA,NA,23,206,430,396,297,235,144,NA,NA,NA,14,174,232,152,106,75,43,NA "ML",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ML",2002,NA,NA,20,209,547,447,430,151,72,NA,NA,NA,39,141,250,166,190,71,24,NA "ML",2003,NA,NA,32,348,619,438,330,201,115,NA,NA,NA,29,172,278,212,123,73,45,NA "ML",2004,NA,NA,28,302,584,473,316,219,147,NA,NA,NA,29,191,284,183,151,105,57,NA "ML",2005,NA,NA,26,350,628,539,365,263,193,NA,NA,NA,33,208,348,245,152,101,72,NA "ML",2006,NA,NA,28,361,679,550,436,272,216,NA,NA,NA,30,250,371,249,168,116,76,NA "ML",2007,NA,NA,29,369,696,570,422,291,213,NA,NA,NA,30,263,385,258,160,113,95,NA "ML",2008,2,20,22,453,809,640,503,314,250,0,3,34,37,332,516,320,245,172,121,0 "MM",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MM",1995,NA,NA,42,713,1423,1401,977,677,298,NA,NA,NA,58,535,729,729,450,343,154,NA "MM",1996,NA,NA,58,767,1511,1535,1110,798,400,NA,NA,NA,55,577,938,817,558,408,184,NA "MM",1997,NA,NA,56,676,1452,1405,1061,753,441,NA,NA,NA,54,535,883,715,492,308,183,NA "MM",1998,NA,NA,64,798,1491,1584,1187,763,438,NA,NA,NA,73,650,997,856,577,382,229,NA "MM",1999,NA,NA,37,936,1800,1805,1366,833,540,NA,NA,NA,58,737,1076,919,647,420,284,NA "MM",2000,NA,NA,88,1459,2636,2781,2161,1235,836,NA,NA,NA,72,1040,1592,1397,987,592,378,NA "MM",2001,NA,NA,69,1800,3253,3353,2624,1443,931,NA,NA,NA,98,1306,1918,1568,1186,650,487,NA "MM",2002,NA,NA,64,2125,3986,4016,3022,1671,1067,NA,NA,NA,109,1563,2044,1758,1348,845,544,NA "MM",2003,NA,NA,107,2536,4408,4427,3269,1974,1296,NA,NA,NA,154,1781,2442,2003,1491,943,617,NA "MM",2004,NA,NA,96,2777,5025,4966,4081,2271,1567,NA,NA,NA,120,2020,2622,2228,1800,1122,713,NA "MM",2005,NA,NA,132,3401,5877,5888,4585,2557,1764,NA,NA,NA,147,2376,3047,2563,2101,1218,885,NA "MM",2006,NA,NA,113,3572,6328,6536,5143,2988,2033,NA,NA,NA,171,2453,3338,2820,2282,1448,1016,NA "MM",2007,NA,NA,127,3591,6569,6826,5507,3152,2155,NA,NA,NA,159,2719,3500,2998,2486,1601,1198,NA "MM",2008,NA,NA,118,3416,6311,6396,5327,3312,2235,0,NA,NA,180,2526,3474,2850,2357,1644,1102,0 "MN",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MN",1994,NA,NA,1,23,40,25,19,6,1,NA,NA,NA,10,27,24,13,8,2,1,NA "MN",1995,NA,NA,37,99,111,68,19,13,15,NA,NA,NA,30,70,78,33,15,9,25,NA "MN",1996,NA,NA,8,103,150,91,42,24,19,NA,NA,NA,17,98,114,45,27,19,12,NA "MN",1997,NA,NA,6,173,298,204,72,32,17,NA,NA,NA,12,109,134,71,21,13,9,NA "MN",1998,NA,NA,17,213,251,158,65,32,22,NA,NA,NA,32,162,221,115,32,21,15,NA "MN",1999,NA,NA,12,213,314,178,63,34,26,NA,NA,NA,25,205,252,113,43,18,17,NA "MN",2000,NA,NA,6,181,260,171,68,38,23,NA,NA,NA,32,200,213,113,41,26,17,NA "MN",2001,NA,NA,13,236,269,179,86,45,36,NA,NA,NA,25,253,260,125,48,28,29,NA "MN",2002,NA,NA,9,242,272,184,94,57,47,NA,NA,NA,16,263,253,133,55,22,23,NA "MN",2003,NA,NA,10,206,217,171,93,55,39,NA,NA,NA,19,254,233,148,45,32,19,NA "MN",2004,NA,NA,6,287,256,229,112,54,43,NA,NA,NA,18,283,249,162,62,24,23,NA "MN",2005,NA,NA,7,271,253,232,147,52,36,NA,NA,NA,15,320,270,145,63,32,25,NA "MN",2006,NA,NA,7,317,335,241,157,64,41,NA,NA,NA,16,372,265,180,81,24,29,NA "MN",2007,NA,NA,4,280,270,232,158,48,34,NA,NA,NA,23,273,250,139,80,36,29,NA "MN",2008,NA,NA,7,289,260,235,151,59,36,0,NA,NA,18,283,229,127,86,32,26,0 "MO",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",1995,NA,NA,0,7,19,20,13,12,16,NA,NA,NA,0,9,18,12,4,5,6,NA "MO",1996,NA,NA,1,16,29,34,20,16,26,NA,NA,NA,0,10,21,14,3,3,11,NA "MO",1997,NA,NA,1,15,38,47,37,34,55,NA,NA,NA,4,10,16,21,5,6,15,NA "MO",1998,NA,NA,0,11,26,42,23,28,56,NA,NA,NA,1,9,13,22,6,3,21,NA "MO",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MO",2000,NA,NA,0,10,8,25,22,9,17,NA,NA,NA,0,10,4,6,6,3,13,NA "MO",2001,NA,NA,0,9,17,26,25,11,23,NA,NA,NA,1,5,7,11,10,1,11,NA "MO",2002,NA,NA,1,13,8,21,20,17,21,NA,NA,NA,1,7,10,7,9,1,11,NA "MO",2003,NA,NA,0,9,9,16,27,9,27,NA,NA,NA,0,7,7,11,7,4,5,NA "MO",2004,NA,NA,0,8,7,18,31,12,14,NA,NA,NA,0,5,7,12,3,2,9,NA "MO",2005,NA,NA,3,6,9,21,23,17,22,NA,NA,NA,0,5,9,7,8,1,5,NA "MO",2006,NA,NA,0,15,6,17,32,19,19,NA,NA,NA,1,7,8,9,4,3,4,NA "MO",2007,0,0,0,14,12,14,30,16,13,NA,0,0,2,10,4,6,8,3,6,NA "MO",2008,0,1,1,18,12,10,29,19,13,0,0,2,2,7,6,5,6,6,5,0 "MP",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1995,NA,NA,1,1,3,5,10,3,3,NA,NA,NA,0,0,2,6,4,1,1,NA "MP",1996,NA,NA,0,2,8,5,3,1,1,NA,NA,NA,1,1,1,0,1,1,1,NA "MP",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",1998,NA,NA,0,0,6,3,5,2,2,NA,NA,NA,0,3,4,1,0,0,0,NA "MP",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MP",2000,NA,NA,1,4,8,9,9,3,2,NA,NA,NA,0,10,17,7,3,1,1,NA "MP",2001,NA,NA,0,1,3,0,4,2,0,NA,NA,NA,0,5,4,0,0,0,0,NA "MP",2002,NA,NA,1,2,3,7,10,5,2,NA,NA,NA,0,9,10,3,1,0,0,NA "MP",2003,NA,NA,0,2,2,2,1,0,2,NA,NA,NA,1,3,0,2,1,0,0,NA "MP",2004,NA,NA,0,0,2,2,4,1,0,NA,NA,NA,0,1,2,1,1,0,0,NA "MP",2005,NA,NA,0,0,1,3,4,1,2,NA,NA,NA,0,0,0,1,1,1,1,NA "MP",2006,NA,NA,0,0,2,3,1,0,0,NA,NA,NA,0,2,2,3,1,0,1,NA "MP",2007,0,0,0,0,0,3,4,0,2,NA,0,0,0,0,2,1,1,1,2,NA "MP",2008,0,0,0,1,0,1,5,0,3,0,0,0,0,0,0,2,0,1,0,0 "MR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1997,NA,NA,188,165,321,341,613,232,185,NA,NA,NA,125,131,319,230,484,384,70,NA "MR",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",1999,NA,NA,15,290,450,262,177,113,92,NA,NA,NA,7,157,97,110,76,43,20,NA "MR",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",2004,NA,NA,15,204,343,235,154,129,108,NA,NA,NA,14,102,114,114,58,44,28,NA "MR",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MR",2006,NA,NA,12,197,294,203,150,106,96,NA,NA,NA,16,109,114,86,49,29,25,NA "MR",2007,NA,NA,14,206,355,261,144,139,83,NA,NA,NA,21,103,152,92,64,38,42,NA "MR",2008,NA,NA,10,199,292,249,172,107,90,NA,NA,NA,16,127,111,97,44,51,40,NA "MS",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",2003,NA,NA,0,0,1,0,0,0,1,NA,NA,NA,0,0,0,0,0,0,0,NA "MS",2004,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "MS",2005,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MS",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA "MS",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",1995,NA,NA,0,0,0,1,0,0,0,NA,NA,NA,0,0,1,0,0,1,2,NA "MT",1996,NA,NA,0,0,0,0,1,2,1,NA,NA,NA,0,0,0,1,0,0,0,NA "MT",1997,NA,NA,0,1,0,0,0,0,1,NA,NA,NA,0,0,0,0,0,0,1,NA "MT",1998,NA,NA,0,1,0,0,1,0,3,NA,NA,NA,0,1,0,0,0,0,0,NA "MT",1999,NA,NA,0,0,1,0,0,5,3,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",2000,NA,NA,0,1,0,1,1,0,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MT",2001,NA,NA,0,NA,NA,NA,1,NA,1,NA,NA,NA,0,NA,NA,1,NA,NA,NA,NA "MT",2002,NA,NA,0,1,0,1,0,1,0,NA,NA,NA,0,0,1,0,0,0,1,NA "MT",2003,NA,NA,0,0,1,0,1,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "MT",2004,NA,NA,0,0,0,0,1,0,0,NA,NA,NA,0,0,0,0,1,0,0,NA "MT",2005,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0 "MT",2006,0,0,0,1,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0 "MT",2007,0,0,0,0,2,0,0,1,3,0,0,0,0,2,0,0,0,0,0,0 "MT",2008,0,0,0,3,6,2,0,0,0,0,0,0,0,1,1,0,0,0,2,0 "MU",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1995,NA,NA,2,17,13,22,27,13,8,NA,NA,NA,2,4,12,10,8,4,4,NA "MU",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",1998,NA,NA,1,12,10,21,19,10,19,NA,NA,NA,0,9,5,4,3,3,6,NA "MU",1999,NA,NA,0,7,20,15,13,12,12,NA,NA,NA,0,13,7,7,8,2,3,NA "MU",2000,NA,NA,2,6,9,18,19,14,8,NA,NA,NA,1,5,8,8,6,7,4,NA "MU",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MU",2002,NA,NA,1,12,6,21,12,7,4,NA,NA,NA,1,3,8,7,1,2,1,NA "MU",2003,NA,NA,0,9,12,10,17,11,9,NA,NA,NA,1,6,8,4,4,3,5,NA "MU",2004,NA,NA,1,13,13,24,20,4,5,NA,NA,NA,0,6,8,10,4,4,5,NA "MU",2005,NA,NA,NA,10,15,21,20,10,6,NA,NA,NA,NA,4,5,5,11,2,1,NA "MU",2006,NA,NA,0,4,9,22,10,12,6,NA,NA,NA,1,3,7,3,4,1,3,NA "MU",2007,0,0,0,9,9,12,15,9,6,NA,0,0,0,4,7,3,5,4,3,NA "MU",2008,0,0,0,11,15,14,15,7,8,0,0,0,0,2,3,2,5,2,1,0 "MV",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MV",1995,NA,NA,1,28,11,10,8,10,6,NA,NA,NA,1,13,8,4,6,6,2,NA "MV",1996,NA,NA,0,24,9,3,5,14,8,NA,NA,NA,1,12,9,5,6,5,5,NA "MV",1997,NA,NA,1,13,6,2,13,9,8,NA,NA,NA,3,15,8,9,4,3,0,NA "MV",1998,NA,NA,1,19,18,8,4,6,2,NA,NA,NA,1,13,5,1,6,4,0,NA "MV",1999,NA,NA,0,14,8,9,7,7,8,NA,NA,NA,3,10,6,3,6,6,1,NA "MV",2000,NA,NA,0,9,10,2,5,5,3,NA,NA,NA,0,11,4,5,4,5,2,NA "MV",2001,NA,NA,1,12,5,3,5,7,1,NA,NA,NA,1,10,3,2,6,1,2,NA "MV",2002,NA,NA,0,11,9,0,1,5,8,NA,NA,NA,1,8,5,4,5,1,2,NA "MV",2003,NA,NA,1,14,7,4,9,9,4,NA,NA,NA,0,8,5,1,5,1,0,NA "MV",2004,NA,NA,0,13,11,3,8,5,6,NA,NA,NA,0,8,3,2,1,2,4,NA "MV",2005,NA,NA,0,9,8,5,6,6,5,NA,NA,NA,1,10,7,1,2,2,4,NA "MV",2006,0,0,0,8,9,3,4,3,6,NA,0,NA,0,6,3,4,3,2,2,NA "MV",2007,0,0,0,14,4,6,5,6,5,NA,0,1,1,5,2,5,5,0,1,NA "MV",2008,0,0,0,9,11,3,5,6,3,NA,0,0,0,7,1,3,0,3,2,NA "MW",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",1995,NA,NA,25,493,1195,833,519,215,89,NA,NA,NA,65,802,1028,573,294,108,45,NA "MW",1996,NA,NA,27,562,1388,937,529,224,110,NA,NA,NA,92,887,1187,715,347,133,37,NA "MW",1997,NA,NA,47,578,1418,995,521,254,101,NA,NA,NA,84,1009,1307,767,347,123,36,NA "MW",1998,NA,NA,46,677,1581,1158,643,281,151,NA,NA,NA,85,1131,1585,867,437,148,63,NA "MW",1999,NA,NA,43,588,1475,1083,588,239,126,NA,NA,NA,80,1052,1487,777,376,154,62,NA "MW",2000,NA,NA,50,653,1476,1113,585,245,114,NA,NA,NA,66,1038,1481,831,401,148,64,NA "MW",2001,NA,NA,37,704,1486,1025,591,230,129,NA,NA,NA,74,1070,1520,862,384,139,58,NA "MW",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MW",2003,NA,NA,43,596,1374,936,489,209,128,NA,NA,NA,76,963,1531,790,374,155,52,NA "MW",2004,NA,NA,47,647,1505,1081,508,264,124,NA,NA,NA,78,1009,1694,910,412,191,96,NA "MW",2005,NA,NA,58,622,1653,1031,549,279,157,NA,NA,NA,84,913,1598,859,386,180,74,NA "MW",2006,NA,NA,42,584,1647,1054,491,256,182,NA,NA,NA,80,848,1545,813,348,183,93,NA "MW",2007,0,61,61,614,1454,954,473,233,158,NA,0,109,109,768,1497,715,342,146,84,NA "MW",2008,1,55,56,570,1562,982,502,280,176,NA,1,111,166,707,1327,727,365,172,89,NA "MX",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1996,NA,NA,198,936,1021,940,721,708,469,NA,NA,NA,243,685,681,627,482,472,312,NA "MX",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX",1998,NA,NA,229,1031,1330,1200,1241,813,892,NA,NA,NA,268,856,829,874,742,583,585,NA "MX",1999,NA,NA,143,1013,1141,1093,1022,880,1128,NA,NA,NA,151,773,795,641,665,592,710,NA "MX",2000,NA,NA,214,1079,1387,1162,1235,972,1126,NA,NA,NA,176,663,828,698,832,595,709,NA "MX",2001,NA,NA,130,1448,1639,1683,1606,1229,1566,NA,NA,NA,146,1131,993,845,952,787,948,NA "MX",2002,NA,NA,154,1090,1292,1301,1146,986,1144,NA,NA,NA,149,769,754,716,700,621,733,NA "MX",2003,NA,NA,187,1207,1461,1417,1313,1005,1352,NA,NA,NA,184,850,826,734,813,743,841,NA "MX",2004,NA,NA,86,1053,1276,1181,1201,958,1209,NA,NA,NA,102,760,649,693,695,626,725,NA "MX",2005,NA,NA,100,1095,1376,1314,1238,1042,1288,NA,NA,NA,125,771,733,710,784,637,784,NA "MX",2006,68,61,129,986,1320,1333,1275,1012,1215,NA,44,109,153,696,774,662,794,722,803,NA "MX",2007,61,84,145,981,1286,1286,1266,942,1226,NA,41,99,140,645,742,694,748,642,788,NA "MX",2008,62,62,124,966,1292,1314,1267,1004,1213,0,30,96,126,752,826,710,774,699,836,0 "MY",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MY",1995,NA,NA,59,640,879,775,788,374,1072,NA,NA,NA,58,446,448,345,316,149,339,NA "MY",1996,NA,NA,45,720,1026,894,838,671,868,NA,NA,NA,77,457,463,371,327,242,270,NA "MY",1997,NA,NA,44,701,1036,961,816,380,1123,NA,NA,NA,51,535,485,383,338,141,343,NA "MY",1998,NA,NA,31,670,1090,1050,872,426,1282,NA,NA,NA,45,519,526,398,330,157,406,NA "MY",1999,NA,NA,27,692,1147,1152,977,902,880,NA,NA,NA,32,513,558,422,351,286,268,NA "MY",2000,NA,NA,32,694,1138,1177,908,814,891,NA,NA,NA,41,464,564,424,367,356,286,NA "MY",2001,NA,NA,48,713,1198,1221,1011,934,738,NA,NA,NA,36,510,506,445,374,353,222,NA "MY",2002,NA,NA,22,562,1106,1182,997,758,844,NA,NA,NA,30,421,524,415,485,319,293,NA "MY",2003,NA,NA,216,1211,2010,2073,1798,1438,1601,NA,NA,NA,196,969,1044,857,669,584,626,NA "MY",2004,NA,NA,191,1195,2105,2189,1890,1440,1535,NA,NA,NA,227,925,1014,852,694,605,532,NA "MY",2005,NA,NA,244,1179,2218,2277,1980,1427,1507,NA,NA,NA,208,1044,1061,947,816,586,572,NA "MY",2006,148,NA,15,507,855,734,678,443,496,NA,NA,NA,3,30,300,403,321,257,161,NA "MY",2007,79,137,216,1291,2224,2082,1839,1394,1395,NA,59,167,226,1098,1101,849,782,585,514,NA "MY",2008,61,160,221,1436,2445,2318,2169,1599,1543,2,66,174,240,1161,1283,906,878,648,657,0 "MZ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1995,NA,NA,187,1136,1475,1338,1022,664,320,NA,NA,NA,226,994,1314,1016,551,234,89,NA "MZ",1996,NA,NA,141,1163,1507,1367,980,639,275,NA,NA,NA,205,1060,1357,938,533,239,74,NA "MZ",1997,NA,NA,163,1194,1608,1439,1076,666,313,NA,NA,NA,187,1147,1381,1002,606,265,78,NA "MZ",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MZ",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA NA,1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA NA,1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA NA,1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA NA,1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA NA,1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA NA,1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA NA,1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA NA,1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA NA,1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA NA,1995,NA,NA,0,68,235,113,55,21,6,NA,NA,NA,5,49,78,50,16,1,0,NA NA,1996,NA,NA,16,205,613,472,230,137,101,NA,NA,NA,17,249,330,245,87,72,51,NA NA,1997,NA,NA,18,232,791,479,296,161,93,NA,NA,NA,23,249,401,275,104,56,47,NA NA,1998,NA,NA,21,270,816,541,267,148,111,NA,NA,NA,34,300,536,310,117,62,65,NA NA,1999,NA,NA,20,247,908,613,260,135,110,NA,NA,NA,25,339,540,336,114,77,36,NA NA,2000,NA,NA,18,269,874,665,300,147,81,NA,NA,NA,16,352,654,348,161,76,52,NA NA,2001,NA,NA,20,322,993,732,318,150,116,NA,NA,NA,32,394,729,404,168,91,66,NA NA,2002,NA,NA,19,301,1033,750,326,146,96,NA,NA,NA,42,357,795,484,182,91,67,NA NA,2003,NA,NA,31,364,1109,838,419,196,108,NA,NA,NA,47,451,927,571,216,108,102,NA NA,2004,NA,NA,31,319,1092,866,371,159,131,NA,NA,NA,30,400,819,554,203,106,74,NA NA,2005,NA,NA,98,355,1027,874,365,146,120,NA,NA,NA,105,399,809,525,213,95,91,NA NA,2006,NA,NA,86,347,1052,799,386,174,146,NA,NA,NA,74,485,875,521,239,92,80,NA NA,2007,NA,NA,57,370,1018,786,346,149,120,NA,NA,NA,69,417,826,513,242,102,76,NA NA,2008,NA,NA,30,387,1033,757,346,149,132,0,NA,NA,73,466,702,437,226,110,80,0 "NC",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1995,NA,NA,3,2,3,4,2,2,3,NA,NA,NA,2,1,1,3,3,0,1,NA "NC",1996,NA,NA,1,3,1,3,5,8,3,NA,NA,NA,0,2,2,1,2,1,1,NA "NC",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NC",1999,NA,NA,0,0,6,1,2,1,7,NA,NA,NA,0,0,4,1,0,2,3,NA "NC",2000,NA,NA,1,1,3,4,2,3,4,NA,NA,NA,1,8,1,1,3,2,4,NA "NC",2001,NA,NA,0,1,8,1,5,6,6,NA,NA,NA,1,1,2,1,0,0,3,NA "NC",2002,NA,NA,0,2,2,1,1,1,3,NA,NA,NA,0,4,2,2,3,0,0,NA "NC",2003,NA,NA,0,1,1,1,1,1,3,NA,NA,NA,0,0,2,2,0,0,3,NA "NC",2004,NA,NA,0,2,1,3,2,1,2,NA,NA,NA,0,2,1,0,0,1,0,NA "NC",2005,NA,NA,0,2,1,0,0,3,0,NA,NA,NA,0,1,2,1,2,0,4,NA "NC",2006,NA,NA,0,0,3,1,1,NA,1,NA,NA,NA,0,1,0,0,0,0,2,NA "NC",2007,NA,NA,0,1,1,2,1,3,2,NA,NA,NA,0,0,0,1,0,0,1,NA "NC",2008,0,0,0,1,1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0 "NE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",1997,NA,NA,4,148,395,215,92,58,25,NA,NA,NA,7,70,112,67,58,14,8,NA "NE",1998,NA,NA,4,218,511,399,234,159,61,NA,NA,NA,14,92,160,126,86,46,15,NA "NE",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",2000,NA,NA,29,270,174,441,252,151,78,NA,NA,NA,31,123,206,168,151,63,9,NA "NE",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",2003,NA,NA,41,485,1051,779,512,299,169,NA,NA,NA,30,201,356,279,177,83,42,NA "NE",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NE",2005,NA,NA,35,557,1204,819,497,350,198,NA,NA,NA,34,214,388,330,223,131,70,NA "NE",2006,NA,NA,25,537,1265,909,487,359,217,NA,NA,NA,37,270,427,306,207,149,84,NA "NE",2007,NA,NA,40,571,1380,958,577,405,249,NA,NA,NA,57,287,412,323,248,157,109,NA "NE",2008,5,30,35,659,1453,852,562,429,333,NA,4,53,57,259,414,307,237,146,110,NA "NG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",1995,NA,NA,450,845,921,937,557,611,515,NA,NA,NA,404,842,795,770,724,654,451,NA "NG",1996,NA,NA,234,2097,2557,1791,853,486,309,NA,NA,NA,411,1954,2175,1253,871,458,215,NA "NG",1997,NA,NA,116,1518,2095,1177,734,436,338,NA,NA,NA,156,1556,1517,753,458,261,120,NA "NG",1998,NA,NA,125,1798,2543,1282,889,451,369,NA,NA,NA,169,1856,1808,881,560,298,132,NA "NG",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NG",2000,NA,NA,157,2173,3164,1836,1091,566,463,NA,NA,NA,239,2934,2434,1110,676,344,231,NA "NG",2001,NA,NA,164,2196,3281,2076,1283,654,488,NA,NA,NA,272,2619,2510,1201,715,387,251,NA "NG",2002,NA,NA,163,2274,3719,2283,1352,696,534,NA,NA,NA,242,2633,2884,1368,787,420,241,NA "NG",2003,NA,NA,267,3263,5388,3590,2106,1139,719,NA,NA,NA,356,3394,3956,1973,1159,536,327,NA "NG",2004,NA,NA,408,3679,6252,4262,2614,1310,1267,NA,NA,NA,469,3768,4463,2220,1495,981,567,NA "NG",2005,NA,NA,325,3824,6758,4544,2863,1464,950,NA,NA,NA,482,3996,4884,2448,1350,745,415,NA "NG",2006,NA,NA,247,4488,8145,5517,3330,1431,897,NA,NA,NA,385,4029,5430,2516,1894,1049,545,NA "NG",2007,NA,NA,503,4251,8541,5776,3767,1853,1341,NA,NA,NA,685,4522,5944,3088,1926,1194,625,NA "NG",2008,NA,NA,579,4518,8910,6210,3821,1987,1267,0,NA,NA,745,4431,6391,3351,2057,1099,660,0 "NI",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NI",1995,NA,NA,23,178,172,175,126,96,92,NA,NA,NA,24,176,215,98,83,64,46,NA "NI",1996,NA,NA,27,231,200,191,120,94,94,NA,NA,NA,33,200,199,137,77,63,56,NA "NI",1997,NA,NA,18,211,210,163,115,90,83,NA,NA,NA,37,212,223,117,77,61,53,NA "NI",1998,NA,NA,24,221,193,155,106,94,110,NA,NA,NA,34,202,215,114,64,61,55,NA "NI",1999,NA,NA,26,217,212,167,125,75,85,NA,NA,NA,27,194,168,108,73,42,45,NA "NI",2000,NA,NA,18,194,174,147,108,64,90,NA,NA,NA,34,188,173,98,76,46,61,NA "NI",2001,NA,NA,24,213,203,139,93,75,95,NA,NA,NA,32,188,173,92,67,52,64,NA "NI",2002,NA,NA,22,168,180,140,101,73,74,NA,NA,NA,26,149,135,91,72,45,44,NA "NI",2003,NA,NA,14,179,210,135,103,68,65,NA,NA,NA,42,174,150,91,71,54,48,NA "NI",2004,NA,NA,24,161,179,105,104,87,72,NA,NA,NA,23,159,154,90,75,44,50,NA "NI",2005,NA,NA,17,163,159,116,106,61,79,NA,NA,NA,23,135,122,103,61,54,47,NA "NI",2006,0,0,15,162,151,129,98,90,72,NA,0,0,25,168,144,90,65,38,38,NA "NI",2007,0,0,16,172,194,144,130,77,91,NA,0,0,27,158,168,100,76,45,55,NA "NI",2008,NA,NA,20,174,190,130,108,90,67,0,NA,NA,38,165,164,93,54,54,55,0 "NL",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NL",1995,NA,NA,22,79,119,75,28,9,10,NA,NA,NA,24,56,50,13,10,8,7,NA "NL",1996,NA,NA,8,48,65,46,26,21,34,NA,NA,NA,1,24,40,14,5,6,20,NA "NL",1997,NA,NA,3,33,65,47,32,12,31,NA,NA,NA,4,17,31,10,12,4,11,NA "NL",1998,NA,NA,2,31,40,41,21,11,26,NA,NA,NA,2,19,25,17,4,6,9,NA "NL",1999,NA,NA,5,44,67,32,24,12,19,NA,NA,NA,5,26,39,16,2,1,16,NA "NL",2000,NA,NA,0,34,63,41,25,10,21,NA,NA,NA,4,29,22,16,9,5,10,NA "NL",2001,NA,NA,1,51,51,33,29,12,24,NA,NA,NA,1,26,32,19,9,5,10,NA "NL",2002,NA,NA,1,40,54,39,33,7,20,NA,NA,NA,5,27,32,12,13,4,9,NA "NL",2003,NA,NA,2,35,50,38,17,15,15,NA,NA,NA,0,16,30,12,10,3,5,NA "NL",2004,NA,NA,6,36,54,37,26,22,31,NA,NA,NA,4,20,33,15,4,4,12,NA "NL",2005,0,0,0,23,42,23,26,14,19,0,0,3,3,14,19,11,9,1,4,0 "NL",2006,0,0,0,25,23,31,23,17,19,0,0,3,3,15,17,12,5,3,10,0 "NL",2007,1,0,1,10,22,28,21,15,15,0,0,1,1,12,22,17,6,5,12,0 "NL",2008,0,0,0,16,24,26,19,18,19,0,0,2,2,13,19,14,6,6,7,0 "NO",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NO",1995,NA,NA,0,4,8,6,3,5,12,NA,NA,NA,0,4,7,2,0,3,8,NA "NO",1996,NA,NA,3,8,7,14,6,2,24,NA,NA,NA,1,4,10,5,2,0,17,NA "NO",1997,NA,NA,3,6,10,7,6,2,27,NA,NA,NA,0,3,4,8,4,2,18,NA "NO",1998,NA,NA,0,1,4,3,1,2,17,NA,NA,NA,0,8,2,3,1,2,5,NA "NO",1999,NA,NA,0,2,5,3,2,1,1,NA,NA,NA,0,3,2,2,0,0,0,NA "NO",2000,NA,NA,0,1,9,3,6,2,4,NA,NA,NA,1,3,1,NA,NA,2,5,NA "NO",2001,NA,NA,0,6,8,8,4,1,8,NA,NA,NA,1,6,9,1,1,2,4,NA "NO",2002,NA,NA,0,4,4,4,2,0,4,NA,NA,NA,0,3,5,1,2,0,2,NA "NO",2003,NA,NA,0,3,3,4,4,2,2,NA,NA,NA,0,4,9,4,2,0,1,NA "NO",2004,NA,NA,1,5,6,6,1,1,2,NA,NA,NA,0,3,8,4,2,1,6,NA "NO",2005,0,0,0,9,4,6,4,4,3,0,0,0,0,4,7,2,1,0,3,0 "NO",2006,0,0,0,5,10,5,3,3,1,0,0,1,1,5,5,2,2,1,3,0 "NO",2007,NA,NA,NA,4,12,2,3,1,2,NA,NA,1,1,4,2,5,1,NA,1,NA "NO",2008,0,1,1,10,8,7,2,4,3,0,0,0,0,1,6,4,0,1,6,0 "NP",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1996,NA,NA,91,1451,1285,1221,1035,738,407,NA,NA,NA,155,853,734,534,288,190,110,NA "NP",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NP",1998,NA,NA,133,1621,1522,1500,1292,884,480,NA,NA,NA,173,1112,838,621,407,219,170,NA "NP",1999,NA,NA,150,1872,1800,1703,1545,1161,799,NA,NA,NA,185,1239,1133,754,553,316,200,NA "NP",2000,NA,NA,170,1904,1763,1713,1491,1294,772,NA,NA,NA,176,1267,1078,833,575,419,228,NA "NP",2001,NA,NA,155,1957,1709,1743,1491,1300,775,NA,NA,NA,171,1295,1060,838,573,375,222,NA "NP",2002,NA,NA,129,1980,1707,1686,1579,1465,758,NA,NA,NA,202,1203,1041,796,544,426,198,NA "NP",2003,NA,NA,122,2039,1658,1619,1769,1639,735,NA,NA,NA,189,1283,1107,873,609,486,220,NA "NP",2004,NA,NA,121,1991,1749,1652,1710,1739,763,NA,NA,NA,188,1282,1138,849,677,540,215,NA "NP",2005,NA,NA,148,1946,1685,1722,1806,1759,820,NA,NA,NA,195,1208,1111,797,658,532,230,NA "NP",2006,0,0,125,1914,1651,1640,1688,1695,808,NA,0,0,179,1164,1001,788,613,519,243,NA "NP",2007,0,0,150,2025,1591,1636,1720,1715,919,NA,0,0,175,1149,1027,793,619,578,258,NA "NP",2008,NA,NA,81,150,1409,1558,1706,1515,792,1416,NA,NA,107,832,820,704,630,523,226,631 "NR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",1999,NA,NA,0,0,0,0,1,1,0,NA,NA,NA,0,0,1,3,0,0,0,NA "NR",2000,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,1,NA,NA "NR",2001,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA "NR",2002,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA "NR",2003,NA,NA,0,0,0,0,1,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "NR",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NR",2006,0,0,0,1,0,0,0,0,0,NA,0,0,0,0,0,1,0,0,0,NA "NR",2007,0,0,1,1,0,1,0,0,0,NA,0,0,0,0,0,0,0,0,0,NA "NR",2008,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0 "NU",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",1999,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,1,NA,NA,NA,NA,NA,NA "NU",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NU",2004,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "NU",2005,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "NU",2006,0,0,0,0,0,0,0,0,0,NA,0,0,0,0,0,0,0,0,0,NA "NU",2007,0,0,0,0,0,0,0,0,0,NA,0,0,0,0,0,0,0,0,0,NA "NU",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "NZ",1995,NA,NA,0,4,3,3,5,7,7,NA,NA,NA,1,2,3,4,2,2,4,NA "NZ",1996,NA,NA,2,4,3,9,10,3,12,NA,NA,NA,2,6,9,3,6,3,13,NA "NZ",1997,NA,NA,0,3,6,3,4,4,7,NA,NA,NA,0,4,6,5,2,5,7,NA "NZ",1998,NA,NA,1,8,10,8,7,7,4,NA,NA,NA,0,11,6,8,2,4,5,NA "NZ",1999,NA,NA,1,10,8,4,3,8,15,NA,NA,NA,1,6,7,2,3,0,1,NA "NZ",2000,NA,NA,0,6,5,6,8,10,7,NA,NA,NA,1,6,6,5,0,4,10,NA "NZ",2001,NA,NA,1,7,2,7,4,2,12,NA,NA,NA,3,9,14,3,1,3,5,NA "NZ",2002,NA,NA,0,10,14,5,6,4,10,NA,NA,NA,1,15,8,4,3,5,3,NA "NZ",2003,NA,NA,5,9,10,6,6,8,9,NA,NA,NA,7,18,8,1,10,4,5,NA "NZ",2004,NA,NA,3,10,13,10,6,5,16,NA,NA,NA,0,10,15,4,4,1,13,NA "NZ",2005,NA,NA,4,6,10,6,6,5,10,NA,NA,NA,1,11,9,6,6,1,2,NA "NZ",2006,0,5,5,14,5,8,4,3,7,NA,0,1,1,12,12,12,3,6,4,NA "NZ",2007,0,0,0,11,1,7,4,4,8,NA,1,0,1,14,7,8,6,6,4,NA "NZ",2008,0,0,0,9,4,9,5,10,18,0,1,0,1,8,13,9,3,3,9,0 "OM",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "OM",1995,NA,NA,1,7,12,7,7,10,11,NA,NA,NA,2,18,13,5,5,6,3,NA "OM",1996,NA,NA,0,15,14,8,11,8,11,NA,NA,NA,3,18,4,3,5,1,7,NA "OM",1997,NA,NA,0,18,16,14,10,11,10,NA,NA,NA,2,14,7,4,5,4,5,NA "OM",1998,NA,NA,0,18,9,8,14,9,12,NA,NA,NA,3,14,6,6,5,3,2,NA "OM",1999,NA,NA,2,10,11,23,15,7,10,NA,NA,NA,3,16,4,6,1,4,8,NA "OM",2000,NA,NA,1,8,9,11,12,9,11,NA,NA,NA,2,17,5,7,5,11,6,NA "OM",2001,NA,NA,1,10,8,12,6,8,8,NA,NA,NA,4,17,8,5,9,5,8,NA "OM",2002,NA,NA,7,22,18,20,16,26,20,NA,NA,NA,16,41,15,12,13,7,7,NA "OM",2003,NA,NA,5,28,32,31,29,13,15,NA,NA,NA,10,26,18,12,13,11,7,NA "OM",2004,NA,NA,1,15,12,23,30,12,14,NA,NA,NA,0,0,9,1,0,0,0,NA "OM",2005,NA,NA,1,21,11,24,15,19,5,NA,NA,NA,2,13,5,3,4,5,3,NA "OM",2006,0,6,6,18,19,18,18,12,2,NA,0,2,2,21,22,7,13,12,14,NA "OM",2007,NA,NA,0,16,25,25,20,13,8,NA,NA,NA,3,22,13,11,10,7,14,NA "OM",2008,0,0,0,18,28,28,28,14,10,0,0,1,1,20,10,4,4,5,1,0 "PA",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PA",1995,NA,NA,86,155,193,112,126,42,83,NA,NA,NA,72,120,111,75,57,16,40,NA "PA",1996,NA,NA,52,68,132,87,65,44,45,NA,NA,NA,58,62,76,59,36,35,26,NA "PA",1997,NA,NA,41,79,173,117,75,70,39,NA,NA,NA,23,45,86,46,23,26,19,NA "PA",1998,NA,NA,2,14,14,10,4,3,5,NA,NA,NA,1,9,13,7,3,1,2,NA "PA",1999,NA,NA,38,107,209,134,106,81,72,NA,NA,NA,53,83,100,62,52,43,37,NA "PA",2000,NA,NA,3,44,78,61,37,27,26,NA,NA,NA,6,43,34,35,19,12,16,NA "PA",2001,NA,NA,7,58,109,89,73,50,39,NA,NA,NA,9,45,70,46,27,12,23,NA "PA",2002,NA,NA,7,89,108,101,76,68,68,NA,NA,NA,7,50,54,59,29,18,34,NA "PA",2003,NA,NA,10,91,122,81,74,61,67,NA,NA,NA,14,51,77,50,30,24,28,NA "PA",2004,NA,NA,16,89,123,118,91,65,50,NA,NA,NA,9,98,66,59,33,34,33,NA "PA",2005,NA,NA,5,76,129,129,84,57,49,NA,NA,NA,11,73,81,62,33,30,41,NA "PA",2006,NA,NA,7,100,134,107,88,48,57,NA,NA,NA,14,64,83,52,45,26,33,NA "PA",2007,NA,NA,7,106,139,116,81,50,61,NA,NA,NA,7,56,74,59,33,21,23,NA "PA",2008,1,8,9,95,117,91,107,55,60,NA,0,9,9,56,73,55,46,29,27,NA "PE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PE",1995,NA,NA,147,1311,849,454,322,200,216,NA,NA,NA,149,1005,660,373,259,162,152,NA "PE",1996,NA,NA,151,1351,789,420,261,190,167,NA,NA,NA,169,896,561,290,171,132,144,NA "PE",1997,NA,NA,745,6913,3853,1971,1174,842,748,NA,NA,NA,864,4560,2784,1224,734,590,496,NA "PE",1998,NA,NA,704,6271,3987,2095,1337,831,889,NA,NA,NA,862,4560,2894,1431,686,537,623,NA "PE",1999,NA,NA,712,4861,3007,1586,852,624,714,NA,NA,NA,700,4783,2958,1560,838,613,703,NA "PE",2000,NA,NA,552,5290,2875,1546,1041,801,796,NA,NA,NA,633,3686,2472,1156,609,499,624,NA "PE",2001,NA,NA,11,5591,2887,1550,979,843,696,NA,NA,NA,11,4015,2382,1117,626,480,497,NA "PE",2002,NA,NA,65,983,622,298,194,164,138,NA,NA,NA,62,688,496,251,129,96,100,NA "PE",2003,NA,NA,101,758,506,355,206,139,165,NA,NA,NA,107,659,380,228,138,106,98,NA "PE",2004,NA,NA,385,3860,2085,1357,894,747,675,NA,NA,NA,410,3258,1935,1094,678,440,471,NA "PE",2005,NA,NA,371,3802,2670,1513,1075,641,708,NA,NA,NA,375,2674,2111,1046,699,333,472,NA "PE",2006,36,364,400,4071,2470,1494,1106,884,869,NA,14,421,435,2713,1852,1082,762,557,556,NA "PE",2007,10,385,395,3436,2239,1585,1152,654,702,NA,5,330,335,2684,1603,1127,813,402,669,NA "PE",2008,10,74,84,3406,2233,1564,1121,608,921,0,5,47,52,2644,1599,1112,791,373,899,0 "PF",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PF",1996,NA,NA,1,4,7,5,2,1,3,NA,NA,NA,0,5,4,2,0,2,1,NA "PF",1997,NA,NA,1,4,2,4,3,5,3,NA,NA,NA,1,5,3,2,3,2,3,NA "PF",1998,NA,NA,0,4,4,3,1,5,4,NA,NA,NA,3,1,2,5,1,0,1,NA "PF",1999,NA,NA,0,2,2,2,1,2,4,NA,NA,NA,4,2,2,4,2,3,3,NA "PF",2000,NA,NA,1,3,3,4,4,4,3,NA,NA,NA,1,4,1,0,1,0,0,NA "PF",2001,NA,NA,2,5,1,2,4,4,5,NA,NA,NA,3,7,1,1,3,4,3,NA "PF",2002,NA,NA,0,4,2,1,3,3,1,NA,NA,NA,0,4,2,1,2,2,2,NA "PF",2003,NA,NA,NA,2,2,1,2,4,3,NA,NA,NA,NA,3,1,1,1,0,1,NA "PF",2004,NA,NA,1,1,2,3,0,1,4,NA,NA,NA,NA,4,6,1,4,2,1,NA "PF",2005,NA,NA,0,2,2,2,0,4,2,NA,NA,NA,0,2,3,0,1,1,3,NA "PF",2006,NA,NA,1,1,1,3,3,1,1,NA,NA,NA,1,6,1,0,0,2,3,NA "PF",2007,NA,NA,NA,NA,2,2,2,NA,NA,NA,NA,1,1,1,1,5,0,3,2,NA "PF",2008,0,1,1,3,1,1,1,2,2,2,0,0,0,1,1,1,0,1,2,1 "PG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PG",1996,NA,NA,11,31,25,18,4,3,2,NA,NA,NA,11,41,30,11,10,6,2,NA "PG",1997,NA,NA,2,9,8,5,2,2,0,NA,NA,NA,1,11,5,3,1,0,0,NA "PG",1998,NA,NA,9,69,57,30,25,14,4,NA,NA,NA,11,94,51,27,21,3,1,NA "PG",1999,NA,NA,1,33,25,9,8,3,0,NA,NA,NA,0,32,20,13,6,0,1,NA "PG",2000,NA,NA,8,87,70,30,21,12,5,NA,NA,NA,6,77,45,21,15,5,1,NA "PG",2001,NA,NA,4,101,72,29,26,9,4,NA,NA,NA,7,91,64,32,17,5,1,NA "PG",2002,NA,NA,18,139,133,74,62,37,6,NA,NA,NA,22,160,149,60,47,18,1,NA "PG",2003,NA,NA,17,190,153,96,65,32,7,NA,NA,NA,28,193,171,59,29,20,7,NA "PG",2004,NA,NA,28,153,138,90,61,43,6,NA,NA,NA,30,164,161,66,38,18,3,NA "PG",2005,NA,NA,28,183,205,108,94,48,12,NA,NA,NA,38,200,204,124,65,35,2,NA "PG",2006,NA,NA,32,221,220,122,84,48,3,NA,NA,NA,41,226,215,142,75,24,3,NA "PG",2007,NA,NA,16,178,171,112,67,50,6,NA,NA,NA,32,148,153,84,36,15,3,NA "PG",2008,NA,NA,65,250,207,160,95,58,12,NA,NA,NA,71,261,230,113,75,48,10,NA "PH",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",1995,NA,NA,2,43,56,61,46,47,26,NA,NA,NA,1,20,32,26,20,19,11,NA "PH",1996,NA,NA,1,26,47,58,50,28,28,NA,NA,NA,1,11,20,19,15,5,9,NA "PH",1997,NA,NA,5,136,273,303,262,238,129,NA,NA,NA,6,80,111,131,110,98,70,NA "PH",1998,NA,NA,2,157,292,356,256,206,81,NA,NA,NA,4,76,109,119,106,69,56,NA "PH",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PH",2003,NA,NA,356,6360,9302,11458,10713,6445,3648,NA,NA,NA,300,3218,4551,4761,4000,2858,2018,NA "PH",2004,NA,NA,312,6792,10328,12229,11413,7526,4289,NA,NA,NA,291,3507,5090,5008,4327,3210,2183,NA "PH",2005,NA,NA,482,7358,11275,13253,12531,7646,4279,NA,NA,NA,374,3710,5268,5565,4603,3274,2029,NA "PH",2006,NA,NA,419,7878,11697,13478,12733,8074,4640,NA,NA,NA,379,4337,5746,5630,5007,3485,2237,NA "PH",2007,NA,NA,466,8524,11781,13810,12846,8481,4862,NA,NA,NA,380,4389,5594,5291,4612,3313,2217,NA "PH",2008,NA,NA,369,8735,11741,13529,12808,8249,4348,0,NA,NA,341,4529,5452,5123,4527,3086,2188,0 "PK",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1995,NA,NA,29,274,230,178,140,124,95,NA,NA,NA,85,375,381,267,178,143,79,NA "PK",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PK",1998,NA,NA,59,633,449,328,335,194,137,NA,NA,NA,159,735,507,260,209,90,50,NA "PK",1999,NA,NA,49,229,178,65,211,162,113,NA,NA,NA,33,259,373,97,146,243,114,NA "PK",2000,NA,NA,55,498,387,256,232,153,130,NA,NA,NA,130,591,416,274,163,103,56,NA "PK",2001,NA,NA,139,1191,891,673,664,496,306,NA,NA,NA,241,1007,915,650,421,252,142,NA "PK",2002,NA,NA,225,1964,1734,1270,1113,864,554,NA,NA,NA,512,2401,1917,1283,809,539,303,NA "PK",2003,NA,NA,284,2605,2346,1851,1652,1288,870,NA,NA,NA,622,3007,2471,1669,1280,845,503,NA "PK",2004,NA,NA,363,3812,3309,2676,2329,2057,1581,NA,NA,NA,950,4281,3656,2452,1794,1350,837,NA "PK",2005,NA,NA,621,5278,4759,4263,3834,3332,2453,NA,NA,NA,1447,6463,5611,3987,2866,2060,1338,NA "PK",2006,NA,NA,820,7290,6896,5594,5427,4392,3439,NA,NA,NA,1941,8410,7030,5404,3913,2802,1950,NA "PK",2007,NA,NA,1017,9598,8790,7717,7237,6258,5156,NA,NA,NA,2443,11522,9162,7352,5496,4065,2934,NA "PK",2008,NA,NA,1213,10521,9889,8428,8284,6890,5959,0,NA,NA,2696,12838,10489,8146,6387,4750,3547,NA "PL",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PL",1995,NA,NA,3,122,295,795,565,369,377,NA,NA,NA,4,129,163,225,111,107,414,NA "PL",1996,NA,NA,10,248,545,1365,1128,687,724,NA,NA,NA,9,180,324,415,202,159,823,NA "PL",1997,NA,NA,3,104,278,781,594,374,359,NA,NA,NA,7,91,155,205,96,94,345,NA "PL",1998,NA,NA,4,99,266,752,647,311,367,NA,NA,NA,5,102,161,219,127,81,361,NA "PL",1999,NA,NA,0,84,219,681,654,305,306,NA,NA,NA,10,95,113,178,129,81,322,NA "PL",2000,NA,NA,1,99,303,812,782,361,434,NA,NA,NA,1,99,158,211,170,82,421,NA "PL",2001,NA,NA,5,78,242,603,662,275,322,NA,NA,NA,4,99,148,170,124,63,360,NA "PL",2002,NA,NA,4,100,206,515,687,264,309,NA,NA,NA,7,90,135,157,148,70,368,NA "PL",2003,NA,NA,2,93,234,436,653,305,349,NA,NA,NA,3,91,108,152,132,65,358,NA "PL",2004,NA,NA,1,85,225,425,664,243,292,NA,NA,NA,2,92,136,126,118,79,285,NA "PL",2005,2,1,3,109,199,389,639,292,310,0,0,3,3,95,142,112,151,63,316,0 "PL",2006,1,0,1,92,215,390,649,357,285,0,0,1,1,83,142,112,118,72,318,0 "PL",2007,0,2,2,85,213,395,677,344,285,0,0,4,4,65,149,120,132,79,277,0 "PL",2008,3,3,6,66,175,397,653,355,239,0,0,3,3,65,106,112,132,77,264,0 "PR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PR",1995,NA,NA,4,3,12,20,15,9,19,NA,NA,NA,1,2,6,5,7,4,9,NA "PR",1996,NA,NA,2,1,20,18,15,10,16,NA,NA,NA,0,5,5,5,6,2,4,NA "PR",1997,NA,NA,1,4,13,18,19,13,18,NA,NA,NA,0,3,6,3,5,14,8,NA "PR",1998,NA,NA,1,9,11,16,12,14,12,NA,NA,NA,1,0,5,6,4,9,6,NA "PR",1999,NA,NA,0,5,9,22,9,11,20,NA,NA,NA,1,4,5,3,6,5,6,NA "PR",2000,NA,NA,0,1,4,19,9,10,14,NA,NA,NA,1,4,5,3,7,1,3,NA "PR",2001,NA,NA,0,5,4,11,12,6,11,NA,NA,NA,0,3,1,4,9,2,6,NA "PR",2002,NA,NA,2,4,7,12,10,9,7,NA,NA,NA,0,1,5,9,2,5,5,NA "PR",2003,NA,NA,0,3,5,8,10,12,9,NA,NA,NA,0,3,2,3,1,3,3,NA "PR",2004,NA,NA,0,2,7,8,7,12,7,NA,NA,NA,0,2,3,4,6,2,5,NA "PR",2005,NA,NA,0,4,4,7,9,7,7,NA,NA,NA,0,3,2,5,4,1,7,NA "PR",2006,NA,NA,1,4,7,6,13,9,7,NA,NA,NA,1,4,3,6,3,2,3,NA "PR",2007,0,0,0,6,2,9,8,10,6,NA,0,0,0,0,2,4,7,1,1,NA "PR",2008,0,0,0,2,4,3,13,11,6,0,0,0,0,1,4,3,2,3,0,0 "PS",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",1995,NA,NA,1,2,0,0,1,0,3,NA,NA,NA,0,1,0,0,1,0,0,NA "PS",1996,NA,NA,0,2,2,2,2,2,4,NA,NA,NA,1,2,1,2,0,3,1,NA "PS",1998,NA,NA,NA,1,1,NA,2,NA,1,NA,NA,NA,NA,NA,NA,1,NA,1,1,NA "PS",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PS",2003,NA,NA,0,1,1,1,3,0,2,NA,NA,NA,0,1,0,0,3,0,3,NA "PS",2004,NA,NA,NA,1,1,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA "PS",2005,NA,NA,NA,1,NA,NA,1,3,NA,NA,NA,NA,NA,NA,1,NA,1,NA,NA,NA "PS",2006,0,0,0,1,3,4,1,1,2,NA,0,0,0,0,0,1,1,1,1,NA "PS",2007,0,0,1,1,3,2,0,3,1,NA,0,0,0,0,1,0,0,2,0,NA "PS",2008,0,0,0,1,1,3,2,2,2,0,0,0,0,2,0,0,1,2,0,0 "PT",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PT",1995,NA,NA,11,215,363,328,200,173,164,NA,NA,NA,7,139,172,87,33,42,85,NA "PT",1996,NA,NA,12,176,359,331,192,158,203,NA,NA,NA,6,114,177,76,31,37,66,NA "PT",1997,NA,NA,8,135,313,303,217,130,84,NA,NA,NA,4,105,141,77,27,23,61,NA "PT",1998,NA,NA,8,154,367,362,232,141,173,NA,NA,NA,5,132,160,123,44,33,82,NA "PT",1999,NA,NA,13,113,288,378,232,146,189,NA,NA,NA,9,98,134,86,30,28,57,NA "PT",2000,NA,NA,8,147,375,349,208,140,140,NA,NA,NA,5,114,154,87,41,25,64,NA "PT",2001,NA,NA,9,156,329,356,218,109,140,NA,NA,NA,13,110,160,83,36,30,63,NA "PT",2002,NA,NA,12,156,342,411,272,129,171,NA,NA,NA,5,99,141,87,33,29,73,NA "PT",2003,NA,NA,11,134,297,333,227,99,148,NA,NA,NA,7,99,163,82,39,27,47,NA "PT",2004,NA,NA,4,97,258,336,216,98,115,NA,NA,NA,3,89,122,65,22,16,50,NA "PT",2005,3,2,5,85,227,284,181,90,93,5,3,4,7,67,109,66,29,11,42,1 "PT",2006,4,3,7,80,211,259,190,94,108,2,1,3,4,56,107,85,33,22,41,1 "PT",2007,1,3,4,69,178,268,188,82,112,0,0,2,2,49,95,61,27,12,26,0 "PT",2008,1,1,2,51,155,212,179,80,84,0,0,3,3,54,86,55,38,15,39,0 "PW",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1995,NA,NA,0,2,3,0,2,1,0,NA,NA,NA,0,0,0,0,1,0,0,NA "PW",1996,NA,NA,0,1,0,0,0,2,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1997,NA,NA,0,0,1,2,0,2,0,NA,NA,NA,0,0,0,2,0,0,0,NA "PW",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",1999,NA,NA,0,2,2,5,1,2,1,NA,NA,NA,0,1,3,1,0,2,0,NA "PW",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",2002,NA,NA,1,0,1,1,2,2,1,NA,NA,NA,0,0,3,0,0,0,0,NA "PW",2003,NA,NA,0,0,1,1,1,1,0,NA,NA,NA,1,0,0,1,0,1,2,NA "PW",2004,NA,NA,NA,NA,NA,NA,1,2,NA,NA,NA,NA,NA,NA,NA,1,1,NA,NA,NA "PW",2005,NA,NA,NA,NA,2,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PW",2006,1,0,1,0,1,2,1,0,0,NA,0,0,0,0,1,0,0,0,0,NA "PW",2007,0,0,0,0,1,0,2,1,0,NA,0,0,0,0,0,0,0,1,0,NA "PW",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "PY",1995,NA,NA,18,64,71,96,74,57,61,NA,NA,NA,13,65,49,46,35,34,53,NA "PY",1996,NA,NA,17,84,100,79,91,63,49,NA,NA,NA,16,80,91,50,50,48,59,NA "PY",1997,NA,NA,25,100,82,75,76,58,74,NA,NA,NA,27,91,72,58,48,50,42,NA "PY",1998,NA,NA,14,100,101,96,82,66,85,NA,NA,NA,17,87,55,37,36,34,38,NA "PY",1999,NA,NA,19,113,157,111,114,69,67,NA,NA,NA,22,84,72,56,43,48,60,NA "PY",2000,NA,NA,16,112,103,105,86,80,71,NA,NA,NA,12,69,86,41,41,30,46,NA "PY",2001,NA,NA,18,114,106,85,89,74,73,NA,NA,NA,22,91,71,46,51,31,41,NA "PY",2002,NA,NA,20,119,127,112,105,78,78,NA,NA,NA,12,88,83,50,36,55,39,NA "PY",2003,NA,NA,11,163,174,109,123,81,91,NA,NA,NA,28,87,71,77,50,40,61,NA "PY",2004,NA,NA,18,160,132,120,107,103,121,NA,NA,NA,21,106,87,69,63,50,43,NA "PY",2005,NA,NA,23,168,185,136,117,87,99,NA,NA,NA,31,89,98,69,52,29,71,NA "PY",2006,6,14,20,188,221,143,150,124,116,NA,2,14,16,130,79,73,55,63,66,NA "PY",2007,3,11,14,171,221,152,135,94,100,NA,2,13,15,100,98,46,46,34,47,NA "PY",2008,3,8,11,238,227,138,138,91,90,4,1,9,10,92,87,60,61,42,56,0 "QA",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "QA",1995,NA,NA,0,8,12,11,13,4,4,NA,NA,NA,1,2,3,1,0,0,1,NA "QA",1996,NA,NA,0,2,7,16,10,3,1,NA,NA,NA,0,0,1,0,2,1,0,NA "QA",1997,NA,NA,0,8,11,7,3,4,0,NA,NA,NA,0,2,1,0,1,1,1,NA "QA",1998,NA,NA,0,10,17,8,10,4,2,NA,NA,NA,1,4,2,3,2,3,2,NA "QA",1999,NA,NA,0,5,15,12,12,3,2,NA,NA,NA,0,2,3,3,1,0,0,NA "QA",2000,NA,NA,0,7,19,9,7,2,1,NA,NA,NA,0,0,4,3,1,0,0,NA "QA",2001,NA,NA,1,2,0,3,4,0,3,NA,NA,NA,0,1,0,0,1,1,1,NA "QA",2002,NA,NA,NA,8,12,9,8,1,3,NA,NA,NA,NA,6,13,1,3,NA,NA,NA "QA",2003,NA,NA,1,10,27,17,16,5,5,NA,NA,NA,0,4,6,0,2,0,2,NA "QA",2004,NA,NA,0,9,13,13,8,10,1,NA,NA,NA,0,6,5,4,2,2,0,NA "QA",2005,NA,NA,NA,19,15,17,19,5,1,NA,NA,NA,NA,5,10,2,1,2,0,NA "QA",2006,NA,NA,0,22,21,17,22,6,1,NA,NA,NA,0,6,11,7,1,0,1,NA "QA",2007,0,0,0,26,38,19,10,4,0,NA,0,0,1,4,6,5,3,0,0,NA "QA",2008,0,1,1,47,67,26,18,10,2,0,0,0,0,4,14,6,2,0,2,0 "RO",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RO",1995,NA,NA,387,1662,2322,3608,2587,1751,784,NA,NA,NA,355,1352,1240,871,479,396,417,NA "RO",1996,NA,NA,35,851,1640,2606,1901,1236,500,NA,NA,NA,48,749,630,547,302,237,249,NA "RO",1997,NA,NA,31,1073,1618,2535,1990,1116,461,NA,NA,NA,53,735,745,545,318,200,245,NA "RO",1998,NA,NA,21,895,1624,2327,1762,1011,522,NA,NA,NA,43,725,692,448,300,219,232,NA "RO",1999,NA,NA,34,842,1524,2043,1653,918,472,NA,NA,NA,48,732,709,496,318,198,317,NA "RO",2000,NA,NA,46,832,1508,1799,1684,916,533,NA,NA,NA,53,701,766,484,341,207,321,NA "RO",2001,NA,NA,60,790,1670,1925,2000,975,685,NA,NA,NA,70,713,825,497,391,228,347,NA "RO",2002,NA,NA,102,742,1682,1854,1914,854,605,NA,NA,NA,74,669,839,435,370,202,351,NA "RO",2003,NA,NA,37,750,1565,1695,1953,836,594,NA,NA,NA,58,667,770,470,412,196,404,NA "RO",2004,NA,NA,31,718,1582,1798,1999,917,629,NA,NA,NA,59,682,797,546,458,230,432,NA "RO",2005,10,26,36,752,1511,1786,1999,952,638,4,8,47,55,758,780,493,374,219,442,2 "RO",2006,9,21,30,748,1306,1624,1738,847,580,0,12,25,37,669,763,448,334,224,465,1 "RO",2007,12,13,25,706,1149,1559,1704,889,611,NA,3,31,34,665,634,439,332,230,448,NA "RO",2008,8,14,22,671,1124,1656,1713,977,625,0,5,32,37,557,567,518,320,225,499,0 "RS",2005,0,3,3,62,96,118,156,112,132,0,0,6,6,69,76,55,49,22,149,0 "RS",2006,2,4,6,87,91,107,167,83,144,1,0,7,7,78,74,43,44,44,152,0 "RS",2007,0,0,0,42,59,102,163,94,106,0,0,2,2,38,52,43,43,26,135,0 "RS",2008,0,1,1,37,51,86,131,112,107,0,0,3,3,42,46,33,39,26,138,0 "RU",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",1996,NA,NA,0,12,46,69,55,36,17,NA,NA,NA,0,8,6,9,9,4,12,NA "RU",1997,NA,NA,0,38,100,150,114,77,39,NA,NA,NA,0,20,30,30,20,13,29,NA "RU",1998,NA,NA,0,45,89,161,131,81,34,NA,NA,NA,2,24,24,33,20,15,24,NA "RU",1999,NA,NA,17,1858,4138,5037,3992,1618,859,NA,NA,NA,33,761,1022,989,600,313,507,NA "RU",2000,NA,NA,1,295,526,596,402,151,54,NA,NA,NA,1,43,73,74,38,31,44,NA "RU",2001,NA,NA,26,2124,4317,5912,5435,2026,941,NA,NA,NA,37,1019,1315,1374,1040,442,598,NA "RU",2002,NA,NA,0,2081,4497,6003,5810,2074,1061,NA,NA,NA,0,1120,1496,1492,1100,452,632,NA "RU",2003,NA,NA,0,2128,4812,5979,5924,2014,1058,NA,NA,NA,0,1156,1753,1537,1281,462,698,NA "RU",2004,NA,NA,18,2355,5079,6165,6053,2167,1184,NA,NA,NA,45,1399,2051,1695,1415,528,736,NA "RU",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RU",2006,NA,NA,18,2445,5774,5923,6342,2440,1120,NA,NA,NA,40,1514,2207,1703,1492,560,757,NA "RU",2007,3,17,20,2492,6008,5874,6363,2491,1291,NA,5,35,40,1444,2418,1684,1454,653,871,NA "RU",2008,1,11,12,2495,6475,6005,6300,2687,1147,0,0,33,33,1467,2569,1707,1530,687,835,0 "RW",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1996,NA,NA,48,222,398,325,124,85,24,NA,NA,NA,45,229,278,161,47,23,5,NA "RW",1997,NA,NA,78,284,633,537,209,87,40,NA,NA,NA,78,274,343,175,67,37,10,NA "RW",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",1999,NA,NA,93,245,530,424,224,70,31,NA,NA,NA,59,189,262,166,49,31,5,NA "RW",2000,NA,NA,155,466,974,824,393,129,56,NA,NA,NA,105,396,473,309,109,52,14,NA "RW",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "RW",2002,NA,NA,13,96,167,184,79,38,13,NA,NA,NA,15,98,113,58,22,15,8,NA "RW",2003,NA,NA,32,364,517,424,270,83,48,NA,NA,NA,36,312,340,161,79,41,17,NA "RW",2004,NA,NA,52,561,722,595,353,171,64,NA,NA,NA,73,460,469,293,150,53,25,NA "RW",2005,NA,NA,45,494,713,592,408,142,71,NA,NA,NA,73,483,442,262,157,60,29,NA "RW",2006,NA,NA,25,598,769,591,407,182,100,NA,NA,NA,80,494,467,259,139,72,37,NA "RW",2007,7,44,51,523,805,556,352,168,91,NA,3,78,81,477,468,245,131,70,35,NA "RW",2008,4,29,33,528,811,573,373,191,125,NA,1,64,65,439,472,280,161,82,40,NA "SA",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SA",1998,NA,NA,2,76,140,96,65,45,62,NA,NA,NA,16,82,86,32,27,28,40,NA "SA",1999,NA,NA,5,155,314,245,152,103,143,NA,NA,NA,39,182,201,94,74,73,80,NA "SA",2000,NA,NA,0,131,268,213,158,86,107,NA,NA,NA,28,172,182,79,51,50,70,NA "SA",2001,NA,NA,7,141,221,163,135,62,106,NA,NA,NA,28,161,163,88,44,39,44,NA "SA",2002,NA,NA,11,148,309,211,138,104,110,NA,NA,NA,28,186,194,72,60,51,52,NA "SA",2003,NA,NA,5,150,285,200,145,102,107,NA,NA,NA,18,210,181,75,58,51,59,NA "SA",2004,NA,NA,4,202,289,217,163,89,85,NA,NA,NA,24,204,171,80,53,47,64,NA "SA",2005,NA,NA,8,182,276,201,175,70,107,NA,NA,NA,31,205,184,98,73,51,61,NA "SA",2006,NA,NA,10,256,323,229,169,94,101,NA,NA,NA,39,226,211,107,56,37,56,NA "SA",2007,NA,NA,8,246,312,219,187,111,92,NA,NA,NA,30,298,197,110,71,39,64,NA "SA",2008,4,12,16,295,334,184,153,93,102,NA,2,31,33,274,271,137,85,48,83,NA "SB",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",1995,NA,NA,2,14,6,5,7,9,3,NA,NA,NA,3,17,11,7,12,13,0,NA "SB",1996,NA,NA,4,9,9,3,7,4,6,NA,NA,NA,5,5,9,8,12,6,3,NA "SB",1997,NA,NA,2,20,8,6,9,4,5,NA,NA,NA,3,19,14,10,8,4,1,NA "SB",1998,NA,NA,3,15,9,14,18,7,12,NA,NA,NA,2,14,16,10,11,7,2,NA "SB",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",2000,NA,NA,3,13,4,8,8,10,6,NA,NA,NA,8,15,13,7,7,5,2,NA "SB",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SB",2002,NA,NA,3,16,12,9,9,7,4,NA,NA,NA,0,16,15,4,2,7,4,NA "SB",2003,NA,NA,4,14,9,12,14,8,0,NA,NA,NA,9,14,14,16,13,10,1,NA "SB",2004,NA,NA,6,11,12,8,11,9,5,NA,NA,NA,10,22,20,13,14,8,3,NA "SB",2005,NA,NA,4,14,18,9,15,12,11,NA,NA,NA,9,23,21,12,11,9,1,NA "SB",2006,NA,NA,1,13,11,4,4,14,8,NA,NA,NA,4,16,14,9,14,8,4,NA "SB",2007,0,5,5,15,16,12,9,8,6,NA,0,5,5,12,25,9,10,5,5,NA "SB",2008,0,3,3,17,12,11,10,11,7,0,0,4,4,13,23,11,13,3,2,0 "SC",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",1995,NA,NA,0,2,0,1,1,2,1,NA,NA,NA,0,0,1,0,0,0,1,NA "SC",1996,NA,NA,0,0,1,3,1,1,0,NA,NA,NA,0,1,0,0,0,1,1,NA "SC",1997,NA,NA,0,1,2,2,1,1,1,NA,NA,NA,0,1,0,0,0,1,1,NA "SC",1998,NA,NA,0,0,1,3,2,1,1,NA,NA,NA,0,0,0,1,0,1,0,NA "SC",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",2000,NA,NA,NA,NA,2,4,1,1,NA,NA,NA,NA,NA,NA,1,0,1,1,NA,NA "SC",2001,NA,NA,0,0,2,4,0,2,2,NA,NA,NA,0,0,2,0,1,0,1,NA "SC",2002,NA,NA,0,1,3,1,0,1,1,NA,NA,NA,0,0,0,0,0,0,2,NA "SC",2003,NA,NA,0,1,0,0,1,2,0,NA,NA,NA,0,0,0,0,0,0,0,NA "SC",2004,NA,NA,0,0,2,0,3,2,0,NA,NA,NA,0,0,1,2,2,1,0,NA "SC",2005,NA,NA,0,2,1,2,1,0,0,NA,NA,NA,0,0,1,1,0,0,0,NA "SC",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SC",2008,0,0,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0 "SD",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1995,NA,NA,250,604,796,634,486,362,337,NA,NA,NA,359,490,613,299,403,342,305,NA "SD",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SD",1998,NA,NA,805,1079,1533,1133,820,523,453,NA,NA,NA,680,875,1036,723,528,356,247,NA "SD",1999,NA,NA,842,1100,1456,1270,978,841,839,NA,NA,NA,903,1035,1111,1104,817,594,615,NA "SD",2000,NA,NA,785,1028,1511,1351,1119,638,677,NA,NA,NA,817,925,1134,905,771,327,323,NA "SD",2001,NA,NA,732,1018,1368,1085,777,462,301,NA,NA,NA,590,787,910,715,467,212,58,NA "SD",2002,NA,NA,559,1171,1494,1168,852,511,405,NA,NA,NA,498,865,1007,840,523,275,170,NA "SD",2003,NA,NA,489,1195,1644,1271,856,645,473,NA,NA,NA,443,881,1052,879,562,384,219,NA "SD",2004,NA,NA,537,1377,1791,1465,1035,697,467,NA,NA,NA,426,978,1187,897,601,400,237,NA "SD",2005,NA,NA,425,1358,1990,1541,1151,724,493,NA,NA,NA,381,1102,1203,978,729,411,244,NA "SD",2006,0,0,297,1351,1890,1504,1102,710,532,NA,0,0,312,965,1108,948,763,442,270,NA "SD",2007,NA,NA,288,1355,1903,1540,1102,729,556,NA,NA,NA,334,992,1318,990,729,467,324,NA "SD",2008,40,277,317,1241,1740,1301,903,610,534,0,41,280,321,850,962,841,601,344,235,0 "SE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SE",1995,NA,NA,1,5,12,8,5,4,27,NA,NA,NA,0,10,13,5,5,4,14,NA "SE",1996,NA,NA,1,11,8,3,5,5,17,NA,NA,NA,1,4,10,4,1,6,14,NA "SE",1997,NA,NA,0,6,9,13,5,0,16,NA,NA,NA,2,10,8,9,2,3,11,NA "SE",1998,NA,NA,2,6,9,3,8,3,15,NA,NA,NA,1,10,15,5,0,2,18,NA "SE",1999,NA,NA,0,13,18,12,5,2,22,NA,NA,NA,1,7,14,7,3,2,10,NA "SE",2000,NA,NA,0,9,10,12,11,4,25,NA,NA,NA,1,9,8,10,2,2,15,NA "SE",2001,NA,NA,1,10,15,5,3,1,23,NA,NA,NA,1,4,12,8,2,2,18,NA "SE",2002,NA,NA,0,6,15,10,8,7,8,NA,NA,NA,0,11,14,8,7,2,13,NA "SE",2003,NA,NA,0,8,14,12,4,5,20,NA,NA,NA,0,10,18,6,2,0,10,NA "SE",2004,NA,NA,1,10,19,8,8,12,13,NA,NA,NA,0,11,11,13,2,3,9,NA "SE",2005,0,0,0,7,21,16,10,5,16,0,0,1,1,10,15,12,5,3,13,0 "SE",2006,0,0,0,4,15,14,5,3,16,0,0,1,1,12,14,9,1,2,10,0 "SE",2007,0,0,0,7,20,10,5,3,9,0,0,1,1,5,11,8,4,1,12,0 "SE",2008,0,0,0,14,15,9,5,7,6,0,0,0,0,12,13,5,4,1,6,0 "SG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1995,NA,NA,0,9,40,60,62,70,94,NA,NA,NA,1,8,18,21,22,19,31,NA "SG",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SG",1997,NA,NA,0,8,27,49,60,88,101,NA,NA,NA,0,11,16,12,16,12,32,NA "SG",1998,NA,NA,1,9,36,70,63,81,104,NA,NA,NA,2,6,18,11,20,25,34,NA "SG",1999,NA,NA,0,18,23,41,72,55,124,NA,NA,NA,0,12,21,18,23,17,29,NA "SG",2000,NA,NA,1,8,9,34,51,26,64,NA,NA,NA,1,9,8,7,9,5,16,NA "SG",2001,NA,NA,1,6,19,39,70,66,76,NA,NA,NA,1,5,7,19,15,9,24,NA "SG",2002,NA,NA,0,14,28,73,88,65,130,NA,NA,NA,2,10,15,30,32,24,38,NA "SG",2003,NA,NA,1,17,28,68,96,80,133,NA,NA,NA,0,6,26,30,20,20,58,NA "SG",2004,NA,NA,1,12,32,56,83,75,119,NA,NA,NA,0,6,15,18,17,19,48,NA "SG",2005,NA,NA,0,8,25,61,94,96,118,NA,NA,NA,0,5,20,33,29,20,43,NA "SG",2006,1,1,2,7,31,67,107,75,106,NA,0,0,0,19,22,22,22,27,31,NA "SG",2007,0,0,0,15,18,63,98,80,105,NA,0,1,1,13,13,25,23,11,39,NA "SG",2008,0,0,0,10,21,46,106,94,127,0,0,0,0,9,16,20,26,17,33,0 "SI",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SI",1995,NA,NA,1,13,39,63,36,26,27,NA,NA,NA,0,7,24,11,9,5,42,NA "SI",1996,NA,NA,0,5,27,46,37,28,13,NA,NA,NA,0,8,15,9,7,7,19,NA "SI",1997,NA,NA,0,4,16,33,19,15,20,NA,NA,NA,0,8,15,8,1,4,13,NA "SI",1998,NA,NA,0,5,22,27,19,13,14,NA,NA,NA,0,8,12,10,7,2,18,NA "SI",1999,NA,NA,0,3,21,40,27,11,15,NA,NA,NA,0,0,5,6,5,6,20,NA "SI",2000,NA,NA,0,3,11,36,22,14,17,NA,NA,NA,0,3,9,3,4,3,20,NA "SI",2001,NA,NA,0,4,11,30,27,11,7,NA,NA,NA,0,5,11,11,3,5,14,NA "SI",2002,NA,NA,0,8,11,25,26,14,9,NA,NA,NA,0,3,7,6,1,3,17,NA "SI",2003,NA,NA,0,3,9,23,22,7,15,NA,NA,NA,0,5,5,4,3,4,16,NA "SI",2004,NA,NA,0,5,7,10,10,8,13,NA,NA,NA,0,4,6,4,3,2,17,NA "SI",2005,0,0,0,4,10,16,15,11,14,0,0,0,0,4,4,6,5,4,16,0 "SI",2006,0,0,0,3,5,9,12,7,6,0,0,0,0,5,7,4,2,4,19,0 "SI",2007,0,0,0,0,7,15,14,12,9,0,0,0,0,1,5,6,2,3,16,0 "SI",2008,0,0,0,3,12,9,17,12,3,0,0,0,0,2,7,2,5,2,7,0 "SK",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SK",1995,NA,NA,4,18,44,123,108,63,152,NA,NA,NA,5,16,17,22,24,33,159,NA "SK",1996,NA,NA,2,16,42,64,105,61,134,NA,NA,NA,4,23,28,24,17,17,203,NA "SK",1997,NA,NA,1,2,24,54,38,31,40,NA,NA,NA,0,10,11,10,3,9,50,NA "SK",1998,NA,NA,0,5,30,53,50,37,35,NA,NA,NA,0,5,3,16,6,5,58,NA "SK",1999,NA,NA,1,2,19,42,51,19,29,NA,NA,NA,0,8,10,7,7,8,43,NA "SK",2000,NA,NA,2,6,15,31,50,16,32,NA,NA,NA,0,5,9,7,5,4,54,NA "SK",2001,NA,NA,0,8,13,30,48,26,22,NA,NA,NA,1,4,9,12,8,4,41,NA "SK",2002,NA,NA,0,4,18,35,40,21,26,NA,NA,NA,0,6,9,7,3,5,26,NA "SK",2003,NA,NA,1,6,8,31,36,19,25,NA,NA,NA,1,8,9,10,3,4,38,NA "SK",2004,NA,NA,0,2,17,30,30,12,21,NA,NA,NA,1,1,2,3,6,4,26,NA "SK",2005,0,0,0,3,13,16,25,25,20,0,0,0,0,1,8,9,5,6,27,0 "SK",2006,1,3,4,8,11,18,27,29,17,0,0,0,0,6,6,7,4,3,20,0 "SK",2007,0,0,0,8,10,18,51,15,23,0,0,1,1,5,3,5,6,3,28,0 "SK",2008,0,0,0,2,7,16,46,10,11,0,1,0,1,2,4,4,5,1,17,0 "SL",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",1995,NA,NA,10,184,305,201,99,47,22,NA,NA,NA,18,165,193,110,65,24,11,NA "SL",1996,NA,NA,23,249,450,310,180,79,51,NA,NA,NA,35,201,278,218,89,49,22,NA "SL",1997,NA,NA,14,230,470,359,182,89,47,NA,NA,NA,21,207,328,228,67,39,15,NA "SL",1998,NA,NA,14,226,445,338,191,78,42,NA,NA,NA,36,235,294,217,86,43,17,NA "SL",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SL",2000,NA,NA,18,287,486,361,190,113,47,NA,NA,NA,27,249,298,225,92,49,30,NA "SL",2001,NA,NA,19,268,546,406,230,123,51,NA,NA,NA,36,279,292,234,120,61,27,NA "SL",2002,NA,NA,23,317,561,427,246,102,58,NA,NA,NA,31,300,382,284,133,48,26,NA "SL",2003,NA,NA,19,351,564,481,264,149,77,NA,NA,NA,26,308,394,249,122,77,32,NA "SL",2004,NA,NA,19,417,659,581,364,153,130,NA,NA,NA,40,304,440,319,170,89,50,NA "SL",2005,NA,NA,45,490,792,651,397,226,124,NA,NA,NA,54,393,518,312,207,114,47,NA "SL",2006,NA,NA,43,485,851,709,446,216,166,NA,NA,NA,68,375,536,357,207,111,59,NA "SL",2007,1,44,45,538,1032,797,520,258,172,NA,0,74,74,398,568,468,255,143,79,NA "SL",2008,3,43,NA,625,1062,938,573,265,188,NA,1,53,NA,460,609,501,269,153,83,NA "SM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,0,1,0,0,0,0,NA "SM",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",2000,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",2003,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "SM",2004,NA,NA,0,0,0,0,0,0,0,NA,NA,NA,0,0,0,0,0,0,0,NA "SM",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SM",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SN",1995,NA,NA,94,717,1219,813,408,300,213,NA,NA,NA,84,428,461,283,203,126,72,NA "SN",1996,NA,NA,74,773,1281,973,474,277,264,NA,NA,NA,89,450,549,341,209,121,74,NA "SN",1997,NA,NA,64,753,1151,876,467,267,215,NA,NA,NA,75,421,509,292,191,87,62,NA "SN",1998,NA,NA,90,781,1208,856,453,250,215,NA,NA,NA,84,412,447,307,178,98,75,NA "SN",1999,NA,NA,50,721,1070,749,424,233,185,NA,NA,NA,58,441,434,298,184,106,58,NA "SN",2000,NA,NA,60,772,1297,857,470,279,189,NA,NA,NA,77,521,540,376,217,107,61,NA "SN",2001,NA,NA,77,908,1331,890,498,258,226,NA,NA,NA,90,540,531,333,204,113,95,NA "SN",2002,NA,NA,58,815,1271,813,488,279,212,NA,NA,NA,61,545,523,317,210,118,86,NA "SN",2003,NA,NA,50,1005,1438,896,531,293,250,NA,NA,NA,77,629,600,398,212,122,86,NA "SN",2004,NA,NA,60,1085,1464,915,506,264,213,NA,NA,NA,73,620,485,324,211,139,78,NA "SN",2005,NA,NA,71,1050,1561,904,533,274,236,NA,NA,NA,83,709,568,351,185,116,81,NA "SN",2006,NA,NA,60,1124,1606,919,553,292,230,NA,NA,NA,74,676,572,360,204,124,88,NA "SN",2007,NA,NA,57,1053,1722,875,549,329,251,NA,NA,NA,73,761,603,378,241,121,95,NA "SN",2008,2,65,67,1274,1767,907,593,351,228,0,1,78,79,816,659,368,230,148,97,0 "SO",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SO",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SO",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SO",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SO",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SO",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SO",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SO",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SO",1995,NA,NA,46,334,730,201,127,278,109,NA,NA,NA,38,158,139,97,40,25,16,NA "SO",1996,NA,NA,45,439,557,263,153,82,63,NA,NA,NA,49,221,224,108,50,38,26,NA "SO",1997,NA,NA,72,565,658,311,187,172,112,NA,NA,NA,63,296,313,173,82,56,37,NA "SO",1998,NA,NA,99,541,599,337,198,145,126,NA,NA,NA,77,270,321,176,78,74,80,NA "SO",1999,NA,NA,136,643,678,383,175,175,124,NA,NA,NA,131,302,302,190,100,74,74,NA "SO",2000,NA,NA,113,740,724,408,254,195,142,NA,NA,NA,85,354,319,219,110,72,41,NA "SO",2001,NA,NA,125,899,880,476,310,257,196,NA,NA,NA,91,439,413,259,129,97,69,NA "SO",2002,NA,NA,119,922,821,478,307,219,176,NA,NA,NA,112,468,447,302,172,111,75,NA "SO",2003,NA,NA,118,1054,850,513,319,250,214,NA,NA,NA,106,535,462,333,171,161,104,NA "SO",2004,NA,NA,175,1228,1059,610,419,326,278,NA,NA,NA,129,676,618,428,266,157,110,NA "SO",2005,NA,NA,125,1343,1114,725,458,330,319,NA,NA,NA,169,752,636,436,292,212,157,NA "SO",2006,NA,NA,166,1377,1121,647,436,309,336,NA,NA,NA,170,668,628,432,269,171,131,NA "SO",2007,NA,NA,125,1239,1008,578,407,296,289,NA,NA,NA,135,602,520,378,243,181,129,NA "SO",2008,NA,NA,116,1273,1067,635,422,314,298,0,NA,NA,138,604,587,439,285,191,151,0 "SR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1996,NA,NA,1,5,11,11,0,4,6,NA,NA,NA,2,3,6,5,0,3,3,NA "SR",1997,NA,NA,0,6,7,3,2,0,2,NA,NA,NA,0,4,1,0,1,0,1,NA "SR",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",2000,NA,NA,1,6,6,3,2,0,4,NA,NA,NA,2,3,6,3,0,1,1,NA "SR",2001,NA,NA,1,2,7,3,2,4,5,NA,NA,NA,0,2,5,3,2,0,2,NA "SR",2002,NA,NA,2,1,12,10,2,3,2,NA,NA,NA,0,3,2,4,2,0,1,NA "SR",2003,NA,NA,0,5,1,13,6,1,6,NA,NA,NA,1,4,2,2,2,1,1,NA "SR",2004,NA,NA,0,6,8,14,6,2,4,NA,NA,NA,0,3,2,1,1,1,1,NA "SR",2005,NA,NA,0,7,8,12,6,3,4,NA,NA,NA,0,3,2,1,2,1,2,NA "SR",2006,1,4,5,6,13,9,4,1,7,NA,0,2,2,1,4,1,8,0,2,NA "SR",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SR",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ST",2000,NA,NA,1,5,11,4,7,3,10,NA,NA,NA,3,7,15,5,7,4,15,NA "ST",2001,NA,NA,0,7,14,6,6,5,12,NA,NA,NA,1,4,10,4,8,6,14,NA "ST",2002,NA,NA,1,7,6,2,2,2,2,NA,NA,NA,0,6,5,2,3,2,2,NA "ST",2003,NA,NA,1,2,4,5,3,0,1,NA,NA,NA,0,3,7,8,1,0,0,NA "ST",2004,NA,NA,3,5,7,6,5,2,3,NA,NA,NA,1,4,5,5,2,1,1,NA "ST",2005,NA,NA,2,5,7,6,4,5,2,NA,NA,NA,1,4,5,3,2,3,0,NA "ST",2006,0,0,0,5,8,4,2,1,2,NA,0,0,1,4,7,0,0,1,1,NA "ST",2007,0,0,0,4,12,8,4,4,0,NA,0,0,0,9,6,3,3,5,0,NA "ST",2008,NA,NA,1,5,13,5,6,1,0,0,NA,NA,2,5,6,6,2,0,0,0 "SV",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SV",1996,NA,NA,102,76,76,77,57,54,92,NA,NA,NA,102,76,62,43,49,38,61,NA "SV",1997,NA,NA,13,86,110,117,71,68,75,NA,NA,NA,17,67,72,44,33,42,45,NA "SV",1998,NA,NA,21,95,131,99,87,65,84,NA,NA,NA,21,81,93,53,40,43,71,NA "SV",1999,NA,NA,18,102,128,104,88,88,104,NA,NA,NA,20,81,73,61,47,44,65,NA "SV",2000,NA,NA,13,99,124,114,92,62,107,NA,NA,NA,28,81,76,63,63,39,47,NA "SV",2001,NA,NA,20,101,144,100,78,62,101,NA,NA,NA,22,68,86,59,59,53,50,NA "SV",2002,NA,NA,8,85,127,101,91,59,93,NA,NA,NA,6,80,84,61,49,51,85,NA "SV",2003,NA,NA,7,75,105,103,81,59,89,NA,NA,NA,7,70,71,47,48,44,64,NA "SV",2004,NA,NA,5,92,121,90,84,77,91,NA,NA,NA,15,64,73,55,44,48,67,NA "SV",2005,NA,NA,5,97,140,128,104,74,117,NA,NA,NA,6,85,82,59,50,42,70,NA "SV",2006,NA,NA,6,93,124,101,76,54,103,NA,NA,NA,7,71,80,49,50,38,61,NA "SV",2007,0,8,8,79,179,110,73,62,95,NA,0,4,4,63,85,50,45,33,56,NA "SV",2008,0,8,8,107,168,112,67,79,89,0,0,8,8,73,71,55,42,40,66,0 "SY",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",1995,NA,NA,13,332,255,111,70,59,50,NA,NA,NA,22,158,97,53,44,37,20,NA "SY",1996,NA,NA,11,390,290,110,90,69,60,NA,NA,NA,12,200,107,57,51,45,31,NA "SY",1997,NA,NA,6,337,295,118,74,52,52,NA,NA,NA,23,201,112,47,31,36,18,NA "SY",1998,NA,NA,5,335,293,111,93,48,50,NA,NA,NA,20,197,99,43,49,18,21,NA "SY",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SY",2000,NA,NA,8,359,289,125,86,76,55,NA,NA,NA,23,195,101,53,46,38,28,NA "SY",2001,NA,NA,8,317,248,134,108,64,47,NA,NA,NA,26,210,116,56,50,42,28,NA "SY",2002,NA,NA,12,359,278,121,80,62,61,NA,NA,NA,23,182,116,53,43,31,26,NA "SY",2003,NA,NA,10,343,279,127,98,75,64,NA,NA,NA,26,242,99,68,48,33,33,NA "SY",2004,NA,NA,13,318,308,115,113,77,50,NA,NA,NA,20,230,121,46,56,59,35,NA "SY",2005,NA,NA,9,266,237,111,112,62,63,NA,NA,NA,27,182,108,59,59,32,23,NA "SY",2006,0,8,8,225,267,137,110,71,44,NA,0,18,18,195,109,42,53,39,34,NA "SY",2007,0,7,7,198,222,123,74,49,59,NA,0,14,14,148,106,41,43,30,41,NA "SY",2008,2,16,18,170,212,128,82,61,52,NA,0,30,30,149,80,48,32,29,25,NA "SZ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",1995,NA,NA,4,59,117,130,98,40,16,NA,NA,NA,5,52,57,39,29,8,6,NA "SZ",1996,NA,NA,79,39,250,335,263,200,120,NA,NA,NA,64,78,352,204,114,58,38,NA "SZ",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "SZ",2000,NA,NA,11,130,352,249,138,37,17,NA,NA,NA,10,198,298,62,62,24,5,NA "SZ",2001,NA,NA,16,180,468,374,238,70,34,NA,NA,NA,22,362,474,196,74,18,16,NA "SZ",2002,NA,NA,1,94,244,182,117,33,10,NA,NA,NA,9,236,274,127,50,13,9,NA "SZ",2003,NA,NA,15,120,298,171,96,48,19,NA,NA,NA,14,242,325,145,60,20,8,NA "SZ",2004,NA,NA,6,152,316,245,140,53,21,NA,NA,NA,17,271,381,182,74,19,14,NA "SZ",2005,NA,NA,9,162,406,285,139,57,27,NA,NA,NA,14,318,453,207,73,21,8,NA "SZ",2006,NA,NA,32,187,452,268,164,91,45,NA,NA,NA,35,367,464,245,107,48,25,NA "SZ",2007,2,29,NA,223,479,344,182,57,27,NA,5,42,NA,411,576,232,98,39,18,NA "SZ",2008,1,28,29,231,552,357,193,80,35,0,2,37,39,427,663,309,114,57,19,0 "TC",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",2002,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA "TC",2003,NA,NA,0,0,2,0,0,0,0,NA,NA,NA,0,0,2,0,2,0,0,NA "TC",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",2006,0,0,0,1,1,0,0,0,0,NA,0,0,0,1,1,1,1,1,0,NA "TC",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TC",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",1999,NA,NA,20,172,414,957,477,42,4,NA,NA,NA,13,28,230,458,78,16,11,NA "TD",2002,NA,NA,24,90,1029,794,269,37,17,NA,NA,NA,18,28,495,500,187,18,11,NA "TD",2003,NA,NA,155,256,428,549,303,191,78,NA,NA,NA,112,206,363,497,259,151,51,NA "TD",2004,NA,NA,72,141,466,415,207,61,29,NA,NA,NA,41,89,317,262,129,25,16,NA "TD",2005,NA,NA,25,194,535,409,229,123,82,NA,NA,NA,28,148,298,211,148,59,27,NA "TD",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TD",2008,NA,NA,63,NA,NA,1543,584,NA,NA,NA,NA,NA,78,NA,NA,777,264,NA,NA,NA "TG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1995,NA,NA,7,95,151,123,82,64,49,NA,NA,NA,9,80,96,45,38,23,15,NA "TG",1996,NA,NA,11,95,153,134,89,37,50,NA,NA,NA,12,89,117,45,45,15,19,NA "TG",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",1998,NA,NA,13,85,177,136,86,48,36,NA,NA,NA,15,95,113,52,36,17,18,NA "TG",1999,NA,NA,11,92,169,124,80,42,37,NA,NA,NA,7,88,123,64,32,25,10,NA "TG",2000,NA,NA,4,101,168,144,109,48,39,NA,NA,NA,13,107,124,50,36,24,15,NA "TG",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TG",2002,NA,NA,10,140,239,166,104,55,40,NA,NA,NA,12,125,148,79,43,29,13,NA "TG",2003,NA,NA,10,126,229,192,120,66,57,NA,NA,NA,15,102,149,80,55,26,28,NA "TG",2004,NA,NA,9,145,286,233,143,69,66,NA,NA,NA,12,150,205,103,55,37,28,NA "TG",2005,NA,NA,11,177,320,283,125,79,69,NA,NA,NA,23,157,236,146,67,41,32,NA "TG",2006,NA,NA,15,174,358,344,183,94,79,NA,NA,NA,29,214,268,170,96,58,49,NA "TG",2007,NA,0,7,156,309,276,170,73,66,NA,NA,0,17,184,256,150,67,35,30,NA "TG",2008,NA,NA,15,194,379,338,214,113,78,0,NA,NA,29,202,302,170,94,47,59,0 "TH",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TH",1995,NA,NA,59,1191,2936,2948,2434,2607,2346,NA,NA,NA,52,741,888,782,936,1175,1178,NA "TH",1996,NA,NA,54,1088,2857,2496,1935,2004,1772,NA,NA,NA,50,598,844,683,718,918,980,NA "TH",1997,NA,NA,53,864,2336,2101,1488,1434,1326,NA,NA,NA,38,545,725,504,475,633,690,NA "TH",1998,NA,NA,11,427,1153,1098,892,945,985,NA,NA,NA,17,297,401,317,386,475,558,NA "TH",1999,NA,NA,20,791,2123,2015,1702,1705,1795,NA,NA,NA,30,511,771,676,750,879,1164,NA "TH",2000,NA,NA,27,859,2570,2380,2117,1908,2213,NA,NA,NA,32,624,1035,780,873,1016,1321,NA "TH",2001,NA,NA,37,1868,5192,4516,3269,2617,2912,NA,NA,NA,58,999,1550,1231,1251,1265,1777,NA "TH",2002,NA,NA,35,1352,3805,3699,3155,2556,3077,NA,NA,NA,61,897,1525,1212,1143,1307,1769,NA "TH",2003,NA,NA,41,1636,4615,4259,3497,2740,3241,NA,NA,NA,49,944,1678,1350,1279,1264,1866,NA "TH",2004,NA,NA,46,1421,4211,4542,3831,2787,3379,NA,NA,NA,50,951,1602,1335,1217,1203,1846,NA "TH",2005,NA,NA,44,1344,3814,4393,4003,2831,3407,NA,NA,NA,57,907,1662,1334,1367,1259,1938,NA "TH",2006,NA,NA,43,1276,3732,4664,4055,3084,3732,NA,NA,NA,65,884,1542,1379,1349,1287,1989,NA "TH",2007,NA,NA,48,1261,3398,4487,4168,3122,3748,NA,NA,NA,50,885,1481,1418,1302,1281,1938,NA "TH",2008,NA,NA,66,1222,3374,4425,4164,3167,3836,NA,NA,NA,77,865,1513,1345,1407,1276,2051,NA "TJ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",1996,NA,NA,4,22,25,23,12,12,4,NA,NA,NA,5,18,26,46,20,11,4,NA "TJ",1997,NA,NA,8,16,38,26,18,12,8,NA,NA,NA,7,17,33,24,17,12,9,NA "TJ",1998,NA,NA,9,67,90,48,18,22,10,NA,NA,NA,2,33,60,37,21,10,8,NA "TJ",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",2001,NA,NA,8,129,152,89,43,17,16,NA,NA,NA,0,61,83,62,25,11,8,NA "TJ",2002,NA,NA,7,134,133,66,45,28,19,NA,NA,NA,6,69,84,46,29,15,6,NA "TJ",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",2004,NA,NA,7,146,90,58,34,12,10,NA,NA,NA,11,77,59,41,23,17,16,NA "TJ",2005,0,8,8,308,279,164,104,54,48,0,0,26,26,225,185,151,89,43,53,0 "TJ",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TJ",2007,NA,NA,13,413,361,194,132,63,65,0,NA,NA,21,329,243,154,92,61,87,0 "TJ",2008,0,7,7,437,358,165,113,52,64,0,0,25,25,290,211,121,101,40,73,0 "TK",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TK",2007,0,0,0,0,0,0,0,0,0,NA,0,0,0,0,0,0,0,0,0,NA "TK",2008,NA,NA,NA,10,10,4,9,4,NA,0,NA,NA,NA,13,6,5,6,3,4,0 "TL",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TL",2002,NA,NA,13,119,145,119,107,58,35,NA,NA,NA,20,118,124,88,91,40,13,NA "TL",2003,NA,NA,5,130,135,107,98,66,41,NA,NA,NA,13,98,116,76,76,43,17,NA "TL",2004,NA,NA,5,133,134,95,99,65,48,NA,NA,NA,19,109,116,83,51,27,16,NA "TL",2005,NA,NA,8,136,149,116,119,52,47,NA,NA,NA,8,127,90,76,60,18,29,NA "TL",2006,NA,NA,1,128,115,103,75,48,49,NA,NA,NA,8,102,76,82,63,34,23,NA "TL",2007,NA,NA,4,128,129,89,77,69,65,NA,NA,NA,10,120,98,89,76,36,31,NA "TL",2008,NA,NA,6,115,88,93,73,52,54,0,NA,NA,4,87,72,85,67,35,36,0 "TM",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TM",1995,NA,NA,1,11,188,0,79,30,0,NA,NA,NA,2,15,146,0,47,25,0,NA "TM",1996,NA,NA,0,15,114,101,44,37,23,NA,NA,NA,2,12,75,72,25,19,18,NA "TM",1997,NA,NA,2,14,208,77,90,69,10,NA,NA,NA,0,10,95,75,47,45,22,NA "TM",1998,NA,NA,0,100,210,131,64,48,12,NA,NA,NA,2,59,69,43,28,16,8,NA "TM",1999,NA,NA,5,129,225,174,77,43,17,NA,NA,NA,2,51,103,65,32,27,14,NA "TM",2000,NA,NA,16,103,185,144,127,31,21,NA,NA,NA,19,73,140,76,31,34,17,NA "TM",2001,NA,NA,1,169,295,196,93,46,21,NA,NA,NA,3,113,137,70,40,32,27,NA "TM",2002,NA,NA,2,164,249,224,112,38,21,NA,NA,NA,3,113,143,74,57,34,20,NA "TM",2003,NA,NA,3,148,265,212,112,37,14,NA,NA,NA,5,94,139,84,42,21,21,NA "TM",2004,NA,NA,0,129,250,174,123,37,12,NA,NA,NA,2,90,128,68,45,26,19,NA "TM",2005,0,2,2,148,181,146,97,51,13,0,1,2,3,100,101,72,46,27,8,0 "TM",2006,NA,NA,0,140,273,191,120,33,18,0,NA,NA,5,107,115,72,34,24,23,0 "TM",2007,0,2,2,176,272,224,137,56,23,0,0,6,6,129,132,81,69,36,35,0 "TM",2008,NA,NA,3,176,235,201,164,56,24,NA,NA,NA,4,126,146,81,56,29,30,NA "TN",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TN",1998,NA,NA,11,134,206,155,108,88,95,NA,NA,NA,12,80,65,43,39,43,26,NA "TN",1999,NA,NA,18,137,221,181,106,88,129,NA,NA,NA,15,80,76,62,40,29,53,NA "TN",2000,NA,NA,16,139,208,156,109,65,101,NA,NA,NA,7,68,59,43,21,21,58,NA "TN",2001,NA,NA,23,141,185,157,103,83,100,NA,NA,NA,9,62,42,47,30,42,53,NA "TN",2002,NA,NA,1,112,184,153,99,67,65,NA,NA,NA,6,55,50,36,28,34,37,NA "TN",2003,NA,NA,3,100,164,129,95,66,74,NA,NA,NA,7,57,56,36,34,24,33,NA "TN",2004,NA,NA,9,100,181,128,123,62,91,NA,NA,NA,7,44,55,39,47,19,39,NA "TN",2005,NA,NA,5,103,172,133,115,53,81,NA,NA,NA,7,66,61,39,36,16,28,NA "TN",2006,NA,NA,5,125,174,119,111,58,85,NA,NA,NA,3,53,52,33,33,33,38,NA "TN",2007,NA,NA,1,124,171,117,104,71,75,NA,NA,NA,11,69,54,42,28,29,45,NA "TN",2008,NA,NA,6,130,188,118,125,80,59,NA,NA,NA,7,68,57,35,24,18,52,NA "TO",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",1995,NA,NA,0,1,0,0,0,1,2,NA,NA,NA,0,0,1,1,0,2,1,NA "TO",1996,NA,NA,0,1,1,2,0,6,0,NA,NA,NA,0,1,1,1,0,1,2,NA "TO",1997,NA,NA,0,2,1,1,0,1,2,NA,NA,NA,0,4,0,0,1,0,0,NA "TO",1998,NA,NA,0,2,3,1,2,1,2,NA,NA,NA,1,1,0,0,0,1,2,NA "TO",1999,NA,NA,0,1,0,0,1,3,2,NA,NA,NA,0,1,0,0,0,2,0,NA "TO",2000,NA,NA,NA,2,1,1,NA,1,5,NA,NA,NA,NA,1,1,1,NA,1,1,NA "TO",2001,NA,NA,0,0,1,0,0,2,1,NA,NA,NA,0,0,2,1,1,0,0,NA "TO",2002,NA,NA,NA,1,NA,NA,4,NA,10,NA,NA,NA,NA,1,1,1,NA,1,4,NA "TO",2003,NA,NA,0,1,1,1,1,0,2,NA,NA,NA,0,1,0,1,1,2,0,NA "TO",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TO",2005,NA,NA,0,2,1,0,2,1,0,NA,NA,NA,0,2,1,0,0,2,0,NA "TO",2006,0,0,0,1,0,0,1,2,4,NA,0,0,0,1,1,2,0,0,2,NA "TO",2007,0,0,0,2,1,0,0,1,5,NA,0,0,0,3,1,1,0,0,0,NA "TO",2008,0,0,0,2,0,2,2,1,1,0,0,0,0,0,0,0,1,2,0,0 "TR",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TR",2004,NA,NA,0,50,38,50,41,28,19,NA,NA,NA,2,24,21,8,4,7,6,NA "TR",2005,1,32,33,1148,1295,1028,963,534,429,0,2,48,50,699,474,243,175,166,213,0 "TR",2006,2,38,40,1212,1391,1003,1045,575,473,0,4,52,56,769,507,235,155,149,256,0 "TR",2007,13,37,50,1091,1245,984,978,571,512,0,2,61,63,708,531,246,165,128,255,0 "TR",2008,7,41,48,940,1090,953,947,607,453,0,5,52,57,653,485,233,152,139,236,0 "TT",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1995,NA,NA,2,6,15,10,12,7,4,NA,NA,NA,0,6,4,2,5,3,0,NA "TT",1996,NA,NA,0,4,7,9,9,6,6,NA,NA,NA,0,5,5,5,1,1,1,NA "TT",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TT",1998,NA,NA,0,9,12,21,14,9,8,NA,NA,NA,1,5,3,5,2,5,4,NA "TT",1999,NA,NA,0,11,18,13,8,5,6,NA,NA,NA,0,4,6,7,3,4,1,NA "TT",2000,NA,NA,0,7,18,27,17,7,7,NA,NA,NA,0,5,7,9,5,2,4,NA "TT",2001,NA,NA,5,10,21,36,24,17,18,NA,NA,NA,5,10,11,15,9,9,8,NA "TT",2002,NA,NA,0,8,13,20,12,12,3,NA,NA,NA,0,4,11,3,2,0,7,NA "TT",2003,NA,NA,0,9,13,10,13,10,6,NA,NA,NA,1,2,2,0,8,1,2,NA "TT",2004,NA,NA,NA,3,10,24,7,10,7,NA,NA,NA,2,3,8,1,5,1,NA,NA "TT",2005,NA,NA,0,10,11,13,21,10,3,NA,NA,NA,0,4,9,3,5,4,3,NA "TT",2006,NA,2,2,7,27,23,20,16,12,NA,NA,1,1,3,10,5,4,8,23,NA "TT",2007,0,1,1,10,16,21,28,18,5,NA,0,0,0,5,7,7,4,3,5,NA "TT",2008,0,2,2,9,15,19,34,29,14,0,1,0,1,11,12,4,8,4,7,0 "TV",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1995,NA,NA,1,0,1,0,0,1,0,NA,NA,NA,0,1,1,0,0,1,0,NA "TV",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",1998,NA,NA,1,NA,3,2,NA,1,1,NA,NA,NA,6,NA,1,1,1,NA,1,NA "TV",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",2003,NA,NA,4,2,0,1,6,0,0,NA,NA,NA,0,3,0,1,0,0,0,NA "TV",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TV",2005,NA,NA,NA,NA,NA,NA,1,1,NA,NA,NA,NA,NA,1,NA,NA,NA,2,NA,NA "TV",2006,7,0,0,1,0,0,0,0,0,NA,7,0,0,1,0,1,1,0,0,NA "TV",2007,NA,NA,1,1,0,2,0,0,2,NA,NA,NA,2,0,0,0,1,3,0,NA "TV",2008,0,2,2,2,1,0,1,0,0,0,0,0,0,1,2,2,1,0,0,0 "TZ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "TZ",1995,NA,NA,183,2108,4091,2916,1754,1007,640,NA,NA,NA,201,1904,2532,1324,735,380,179,NA "TZ",1996,NA,NA,171,2176,4275,3107,1843,1109,656,NA,NA,NA,221,2087,2885,1461,806,472,203,NA "TZ",1997,NA,NA,188,2210,4538,3066,1090,1134,699,NA,NA,NA,251,2146,2876,1502,908,501,198,NA "TZ",1998,NA,NA,198,2528,4910,3400,1973,1112,767,NA,NA,NA,240,2234,3243,1686,835,466,241,NA "TZ",1999,NA,NA,170,2422,4887,3401,2068,1160,823,NA,NA,NA,230,2160,3469,1724,876,501,232,NA "TZ",2000,NA,NA,200,2357,4836,3430,2022,1202,834,NA,NA,NA,257,2106,3426,1738,868,494,269,NA "TZ",2001,NA,NA,212,2302,4912,3545,2031,1136,930,NA,NA,NA,312,2117,3609,1847,891,522,319,NA "TZ",2002,NA,NA,187,2309,4814,3525,2075,1211,944,NA,NA,NA,241,1927,3511,1706,907,475,304,NA "TZ",2003,NA,NA,181,2172,4964,3728,2166,1237,1025,NA,NA,NA,244,2063,3504,1833,929,509,344,NA "TZ",2004,NA,NA,208,2216,5203,3884,2254,1272,1129,NA,NA,NA,280,1996,3537,1960,1011,544,329,NA "TZ",2005,NA,NA,190,2062,4939,4025,2310,1279,1054,NA,NA,NA,271,1852,3521,1892,968,547,354,NA "TZ",2006,NA,NA,204,2060,4926,3832,2154,1348,1029,NA,NA,NA,293,1745,3326,1970,995,507,335,NA "TZ",2007,NA,NA,189,2021,4665,3855,2231,1317,1066,NA,NA,NA,238,1735,3388,1945,947,535,388,NA "TZ",2008,NA,NA,191,1963,4595,3874,2349,1230,1077,NA,NA,NA,233,1663,3152,1901,964,564,415,NA "UA",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",1995,NA,NA,10,385,1076,2064,1515,1087,437,NA,NA,NA,21,314,380,327,182,185,280,NA "UA",1996,NA,NA,9,569,1199,2318,1704,1264,544,NA,NA,NA,13,379,410,397,196,192,370,NA "UA",1997,NA,NA,12,623,1310,2107,1718,1141,555,NA,NA,NA,22,383,474,359,256,126,377,NA "UA",1998,NA,NA,24,687,1500,2460,1873,1140,576,NA,NA,NA,36,468,556,431,248,194,393,NA "UA",1999,NA,NA,11,661,1463,2351,1825,1067,557,NA,NA,NA,25,485,577,478,297,222,393,NA "UA",2000,NA,NA,21,693,1552,2385,2007,1062,532,NA,NA,NA,41,487,590,447,298,218,405,NA "UA",2001,NA,NA,9,757,1721,2720,2393,1050,559,NA,NA,NA,18,544,649,525,354,235,418,NA "UA",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",2003,NA,NA,10,850,2033,2808,2634,983,617,NA,NA,NA,29,514,745,557,363,221,421,NA "UA",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UA",2006,NA,NA,8,926,2522,2979,2714,1087,568,0,NA,NA,16,600,909,704,446,246,481,0 "UA",2007,5,9,14,1556,4507,5206,5024,2130,1090,0,0,20,7,982,1661,1314,855,438,861,0 "UA",2008,1,8,9,901,2696,2859,2769,1140,574,NA,4,20,24,585,979,762,544,255,477,NA "UG",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UG",1995,NA,NA,370,1193,2491,1797,1115,602,323,NA,NA,NA,402,1376,1845,1104,635,312,113,NA "UG",1996,NA,NA,372,1271,2706,2026,1252,646,353,NA,NA,NA,455,1482,2099,1246,728,379,160,NA "UG",1997,NA,NA,340,1485,3278,2919,1439,733,353,NA,NA,NA,375,1700,2489,1368,812,379,131,NA "UG",1998,NA,NA,334,1512,3672,2491,1429,676,428,NA,NA,NA,467,1682,2760,1441,744,395,191,NA "UG",1999,NA,NA,310,1510,3475,2526,1354,613,413,NA,NA,NA,434,1654,2591,1415,680,331,162,NA "UG",2000,NA,NA,283,1511,3497,2479,1279,607,395,NA,NA,NA,400,1649,2782,1510,671,316,163,NA "UG",2001,NA,NA,231,1461,3483,2540,1242,638,392,NA,NA,NA,334,1603,2656,1528,703,292,180,NA "UG",2002,NA,NA,259,1503,3783,2865,1399,723,465,NA,NA,NA,371,1689,3011,1708,765,374,184,NA "UG",2003,NA,NA,261,1643,4142,3011,1578,719,501,NA,NA,NA,377,1770,3176,1815,749,356,214,NA "UG",2004,NA,NA,284,1803,4222,3269,1599,810,525,NA,NA,NA,371,1803,3110,1780,812,358,193,NA "UG",2005,NA,NA,257,1598,4075,3209,1576,725,539,NA,NA,NA,371,1811,3099,1800,818,389,257,NA "UG",2006,NA,NA,255,1624,4084,3391,1591,718,511,NA,NA,NA,363,1792,2909,1736,812,332,238,NA "UG",2007,NA,NA,234,1741,4406,3551,1681,766,505,NA,NA,NA,343,1874,3008,1742,824,382,246,NA "UG",2008,0,269,269,1953,4697,3922,1981,875,565,0,0,382,382,2006,2985,1749,862,314,206,0 "US",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "US",1995,NA,NA,19,355,876,1417,1121,742,1099,NA,NA,NA,26,280,579,499,285,202,591,NA "US",1996,NA,NA,15,333,815,1219,1073,678,1007,NA,NA,NA,21,289,487,478,279,217,541,NA "US",1997,NA,NA,12,330,701,1127,979,679,944,NA,NA,NA,28,269,449,447,254,201,514,NA "US",1998,NA,NA,10,321,663,1009,1007,628,914,NA,NA,NA,15,269,425,424,267,179,492,NA "US",1999,NA,NA,18,331,616,1011,930,601,801,NA,NA,NA,16,232,391,394,245,244,444,NA "US",2000,NA,NA,6,365,602,906,904,577,738,NA,NA,NA,14,246,376,349,253,152,396,NA "US",2001,NA,NA,17,320,613,824,876,524,649,NA,NA,NA,21,239,410,346,247,176,389,NA "US",2002,NA,NA,14,343,562,813,795,490,592,NA,NA,NA,15,233,423,362,255,167,370,NA "US",2003,NA,NA,11,365,526,754,828,487,650,NA,NA,NA,12,277,353,310,269,169,354,NA "US",2004,NA,NA,12,362,547,728,829,504,582,NA,NA,NA,19,265,339,302,252,166,344,NA "US",2005,NA,NA,14,383,535,666,767,499,624,NA,NA,NA,11,241,348,276,242,161,322,NA "US",2006,4,8,12,388,568,659,759,531,596,NA,2,9,11,257,384,263,212,146,303,NA "US",2007,4,8,12,414,490,572,744,533,562,NA,2,10,12,257,338,260,225,135,308,NA "US",2008,4,7,11,375,513,495,725,526,561,0,4,18,22,220,329,269,224,172,300,0 "UY",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UY",1995,NA,NA,4,28,40,35,49,38,50,NA,NA,NA,2,21,26,18,12,9,17,NA "UY",1996,NA,NA,4,34,43,58,59,53,42,NA,NA,NA,4,24,35,17,21,10,22,NA "UY",1997,NA,NA,3,37,44,53,53,55,53,NA,NA,NA,5,26,28,29,12,8,17,NA "UY",1998,NA,NA,2,30,47,52,47,38,39,NA,NA,NA,2,30,29,15,14,6,23,NA "UY",1999,NA,NA,1,45,48,42,46,48,41,NA,NA,NA,4,20,25,33,14,11,14,NA "UY",2000,NA,NA,0,36,48,45,41,30,34,NA,NA,NA,2,28,22,21,13,12,16,NA "UY",2001,NA,NA,2,33,38,49,42,31,44,NA,NA,NA,4,25,31,7,15,3,16,NA "UY",2002,NA,NA,1,33,33,37,36,23,32,NA,NA,NA,1,25,25,20,10,11,21,NA "UY",2003,NA,NA,3,46,50,35,42,38,26,NA,NA,NA,1,28,24,13,13,6,14,NA "UY",2004,NA,NA,1,38,59,53,48,26,40,NA,NA,NA,2,34,25,12,17,11,7,NA "UY",2005,NA,NA,1,42,48,39,45,34,36,NA,NA,NA,1,33,30,17,9,8,12,NA "UY",2006,NA,NA,1,38,53,34,30,38,29,NA,NA,NA,4,21,19,11,6,11,10,NA "UY",2007,NA,NA,1,39,69,37,50,39,39,NA,NA,NA,1,23,26,22,14,7,13,NA "UY",2008,0,1,1,49,71,64,45,28,34,0,0,4,4,26,35,26,15,13,13,0 "UZ",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1996,NA,NA,2,96,1042,650,0,196,0,NA,NA,NA,5,87,799,324,0,149,0,NA "UZ",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "UZ",1998,NA,NA,0,6,5,0,1,1,0,NA,NA,NA,0,9,9,4,2,1,1,NA "UZ",1999,NA,NA,4,429,926,519,262,146,100,NA,NA,NA,11,346,647,339,186,136,124,NA "UZ",2000,NA,NA,6,351,749,510,346,213,107,NA,NA,NA,11,261,547,288,213,112,111,NA "UZ",2001,NA,NA,7,390,905,523,396,253,133,NA,NA,NA,21,337,631,338,267,216,181,NA "UZ",2002,NA,NA,10,330,481,318,178,87,111,NA,NA,NA,18,277,394,214,127,96,125,NA "UZ",2003,NA,NA,9,487,828,595,412,253,220,NA,NA,NA,29,360,588,353,210,172,174,NA "UZ",2004,NA,NA,23,512,835,607,502,275,252,NA,NA,NA,31,430,600,341,274,211,226,NA "UZ",2005,1,24,25,596,831,723,522,263,313,NA,1,39,40,538,597,375,288,217,367,NA "UZ",2006,8,11,19,568,807,717,565,268,329,NA,5,36,41,544,597,346,327,224,421,NA "UZ",2007,0,18,18,569,768,579,583,282,380,0,1,24,25,485,507,342,255,235,436,0 "UZ",2008,3,7,10,515,688,572,544,287,369,NA,2,21,23,427,479,309,273,227,394,NA "VC",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1998,NA,NA,0,0,2,0,1,0,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",2000,NA,NA,0,1,0,4,2,0,1,NA,NA,NA,1,0,0,0,0,0,0,NA "VC",2001,NA,NA,NA,NA,NA,1,2,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",2002,NA,NA,NA,NA,NA,1,1,2,2,NA,NA,NA,NA,NA,1,NA,NA,NA,1,NA "VC",2003,NA,NA,NA,NA,NA,2,NA,1,NA,NA,NA,NA,NA,1,NA,1,NA,NA,1,NA "VC",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VC",2005,NA,NA,0,0,0,2,1,0,2,NA,NA,NA,0,0,1,0,1,0,0,NA "VC",2006,NA,NA,NA,NA,NA,NA,NA,4,2,NA,NA,NA,NA,NA,NA,NA,NA,NA,2,NA "VC",2007,0,0,0,0,1,3,0,1,0,NA,0,0,0,0,0,0,0,0,0,NA "VC",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",1999,NA,NA,32,378,452,420,368,283,346,NA,NA,NA,28,283,315,195,169,134,267,NA "VE",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VE",2002,NA,NA,19,339,429,425,380,246,313,NA,NA,NA,42,274,280,218,158,123,198,NA "VE",2003,NA,NA,39,361,459,453,405,284,316,NA,NA,NA,46,340,355,240,204,140,240,NA "VE",2004,NA,NA,24,373,454,459,407,272,316,NA,NA,NA,36,311,324,239,184,135,242,NA "VE",2005,NA,NA,35,312,395,413,402,265,332,NA,NA,NA,37,351,299,267,183,146,216,NA "VE",2006,1,9,10,323,405,413,422,267,320,NA,2,40,42,322,297,188,173,140,225,NA "VE",2007,0,17,17,324,382,390,389,272,295,NA,0,40,40,276,271,199,160,147,230,NA "VE",2008,0,18,18,364,358,326,389,271,285,0,0,25,25,309,272,228,171,146,182,0 "VG",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VG",2000,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VG",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VG",2002,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1,NA "VG",2003,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VG",2004,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA,NA,NA,1,NA,NA,NA,NA,NA "VG",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VG",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VG",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VG",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1995,NA,NA,0,0,0,1,1,0,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",1996,NA,NA,0,0,1,1,0,1,0,NA,NA,NA,0,0,1,0,0,0,1,NA "VI",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VI",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VN",1996,NA,NA,92,1994,5716,7137,5170,5839,6292,NA,NA,NA,91,1127,2606,3045,2504,3360,3938,NA "VN",1997,NA,NA,103,2162,6427,8363,5820,5892,6989,NA,NA,NA,73,1163,2809,3302,2590,3614,4340,NA "VN",1998,NA,NA,56,2441,6567,8765,6143,5925,7274,NA,NA,NA,60,1344,2749,3102,2576,3296,4575,NA "VN",1999,NA,NA,58,2254,6355,8392,6465,5530,7371,NA,NA,NA,68,1361,2511,3029,2549,3034,4828,NA "VN",2000,NA,NA,51,2367,6147,8209,6713,5150,7712,NA,NA,NA,64,1334,2320,2754,2594,2847,4907,NA "VN",2001,NA,NA,39,2756,6319,8457,7054,5205,7643,NA,NA,NA,48,1390,2357,2656,2574,2530,5174,NA "VN",2002,NA,NA,57,3250,6762,8855,8040,5162,8184,NA,NA,NA,68,1571,2357,2508,2619,2409,4969,NA "VN",2003,NA,NA,49,3475,7036,8486,7965,5066,7793,NA,NA,NA,66,1659,2262,2327,2574,2283,4896,NA "VN",2004,NA,NA,54,3486,7364,9110,8743,5257,8206,NA,NA,NA,66,1740,2398,2218,2551,2226,4970,NA "VN",2005,NA,NA,54,3408,7105,8738,8606,4958,7573,NA,NA,NA,47,1747,2293,2116,2298,2023,4604,NA "VN",2006,NA,NA,49,3761,7549,8931,8717,5037,7408,NA,NA,NA,62,1827,2381,2036,2283,1996,4400,NA "VN",2007,NA,NA,48,3587,7431,8391,8451,5046,7026,NA,NA,NA,59,1939,2354,1923,2170,1891,4144,NA "VN",2008,NA,NA,36,3401,7148,8230,8811,5158,6667,0,NA,NA,48,1993,2416,1820,2087,1858,3811,0 "VU",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1995,NA,NA,0,6,2,5,3,4,0,NA,NA,NA,0,5,0,2,3,0,0,NA "VU",1996,NA,NA,2,4,2,6,4,4,1,NA,NA,NA,2,10,3,5,3,4,0,NA "VU",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "VU",1998,NA,NA,2,4,5,1,0,2,2,NA,NA,NA,2,5,9,4,1,0,1,NA "VU",1999,NA,NA,0,0,4,1,2,0,0,NA,NA,NA,0,2,10,4,1,0,0,NA "VU",2000,NA,NA,2,7,5,1,10,5,2,NA,NA,NA,5,3,15,7,3,3,1,NA "VU",2001,NA,NA,1,7,5,4,8,6,1,NA,NA,NA,1,10,4,3,2,1,4,NA "VU",2002,NA,NA,0,7,2,3,10,2,1,NA,NA,NA,0,3,1,5,0,3,1,NA "VU",2003,NA,NA,1,2,4,7,5,2,3,NA,NA,NA,0,4,4,3,2,1,2,NA "VU",2004,NA,NA,1,7,11,2,6,3,5,NA,NA,NA,3,5,8,2,2,2,2,NA "VU",2005,NA,NA,1,4,5,5,0,4,1,NA,NA,NA,0,5,1,2,4,1,2,NA "VU",2006,NA,0,1,5,3,1,4,4,0,NA,NA,0,2,7,9,2,4,0,0,NA "VU",2007,NA,NA,1,3,2,4,2,2,2,NA,NA,NA,1,6,8,1,6,1,2,NA "VU",2008,0,1,NA,4,4,3,5,4,3,0,0,4,NA,3,4,1,3,5,1,0 "WF",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",1996,NA,NA,0,1,1,0,0,0,0,NA,NA,NA,0,1,3,1,1,0,0,NA "WF",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,0,0,0,0,0,1,NA "WF",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",2002,NA,NA,NA,1,NA,1,1,NA,1,NA,NA,NA,NA,NA,NA,NA,3,2,1,NA "WF",2003,NA,NA,0,0,2,2,2,0,0,NA,NA,NA,0,0,1,0,0,0,0,NA "WF",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",2005,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",2006,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WF",2007,0,0,0,0,0,0,1,0,0,NA,0,0,0,0,0,0,0,0,0,NA "WF",2008,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",1995,NA,NA,0,1,1,1,0,3,2,NA,NA,NA,1,2,2,0,0,1,1,NA "WS",1996,NA,NA,0,0,0,0,0,1,2,NA,NA,NA,0,0,3,2,2,0,0,NA "WS",1997,NA,NA,0,1,4,1,0,1,1,NA,NA,NA,0,2,1,1,0,2,0,NA "WS",1998,NA,NA,1,1,1,0,1,1,0,NA,NA,NA,0,1,1,0,0,0,0,NA "WS",1999,NA,NA,0,1,2,0,1,1,4,NA,NA,NA,0,3,2,1,0,0,2,NA "WS",2000,NA,NA,0,3,1,1,1,2,1,NA,NA,NA,0,2,1,1,0,0,0,NA "WS",2001,NA,NA,1,3,1,1,0,0,1,NA,NA,NA,0,1,1,2,1,1,1,NA "WS",2002,NA,NA,0,1,2,0,1,1,1,NA,NA,NA,1,4,5,0,2,0,0,NA "WS",2003,NA,NA,NA,2,NA,NA,NA,1,NA,NA,NA,NA,NA,2,2,2,NA,2,1,NA "WS",2004,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",2005,NA,NA,0,4,0,1,1,0,0,NA,NA,NA,0,2,0,2,0,1,0,NA "WS",2006,NA,NA,NA,3,2,1,1,1,2,NA,1,NA,NA,3,NA,NA,1,NA,NA,NA "WS",2007,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "WS",2008,0,0,0,1,0,0,0,1,0,NA,0,0,0,1,1,1,0,0,1,NA "YE",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YE",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YE",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YE",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YE",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YE",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YE",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YE",1995,NA,NA,57,400,605,256,201,148,45,NA,NA,NA,83,420,720,348,200,106,92,NA "YE",1996,NA,NA,15,91,92,71,45,15,12,NA,NA,NA,14,89,100,73,41,14,11,NA "YE",1997,NA,NA,87,307,1249,329,213,165,34,NA,NA,NA,196,872,449,474,259,71,13,NA "YE",1998,NA,NA,83,718,698,491,271,160,115,NA,NA,NA,115,689,632,400,294,158,72,NA "YE",1999,NA,NA,96,552,531,390,245,161,85,NA,NA,NA,111,557,532,426,244,120,80,NA "YE",2000,NA,NA,110,789,689,493,314,255,127,NA,NA,NA,161,799,627,517,345,247,92,NA "YE",2001,NA,NA,82,695,631,491,350,252,114,NA,NA,NA,154,647,562,452,293,192,53,NA "YE",2002,NA,NA,266,650,559,377,265,148,117,NA,NA,NA,163,500,443,334,244,122,71,NA "YE",2003,NA,NA,40,581,587,399,250,154,103,NA,NA,NA,74,470,426,317,204,114,74,NA "YE",2004,NA,NA,49,571,559,377,214,139,76,NA,NA,NA,72,442,376,269,160,86,44,NA "YE",2005,NA,NA,48,493,553,366,242,149,78,NA,NA,NA,44,426,410,265,181,85,39,NA "YE",2006,NA,NA,29,535,555,358,246,143,103,NA,NA,NA,55,435,358,244,166,73,42,NA "YE",2007,NA,NA,23,488,626,379,252,165,119,NA,NA,NA,50,430,374,272,189,113,57,NA "YE",2008,NA,NA,29,547,541,316,241,155,119,0,NA,NA,57,473,455,265,179,102,61,0 "YU",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1995,NA,NA,10,108,204,317,296,350,386,NA,NA,NA,11,127,167,133,83,158,275,NA "YU",1996,NA,NA,45,207,310,461,396,389,474,NA,NA,NA,57,192,159,183,152,217,384,NA "YU",1997,NA,NA,45,136,310,450,415,410,463,NA,NA,NA,30,146,274,254,170,239,399,NA "YU",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "YU",2001,NA,NA,3,52,48,44,34,31,18,NA,NA,NA,5,49,46,23,25,23,20,NA "YU",2002,NA,NA,7,37,53,44,29,22,33,NA,NA,NA,9,46,48,19,17,19,19,NA "YU",2003,NA,NA,1,51,64,70,113,54,61,NA,NA,NA,1,44,58,38,28,20,54,NA "YU",2004,NA,NA,4,61,106,125,182,128,157,NA,NA,NA,3,66,89,75,48,41,145,NA "ZA",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZA",1999,NA,NA,52,624,1697,1834,966,434,221,NA,NA,NA,75,972,1384,779,314,159,110,NA "ZA",2000,NA,NA,116,723,1999,2135,1146,435,212,NA,NA,NA,122,1283,1716,933,423,167,80,NA "ZA",2001,NA,NA,163,1490,3844,3540,1838,690,255,NA,NA,NA,275,2237,3220,1748,781,295,168,NA "ZA",2002,NA,NA,3081,5147,13681,13215,7038,2342,942,NA,NA,NA,3261,7081,11312,6080,2611,1076,600,NA "ZA",2003,NA,NA,1769,10107,20392,17862,9540,3604,1495,NA,NA,NA,2341,12600,16867,9207,4080,1972,1172,NA "ZA",2004,NA,NA,2269,11030,22120,19675,10653,3908,1580,NA,NA,NA,2810,14166,18975,10839,4887,2182,1174,NA "ZA",2005,NA,NA,2035,10422,20576,19465,11143,4124,1705,NA,NA,NA,2561,13632,19343,11338,5416,2352,1348,NA "ZA",2006,655,1407,2062,10498,21273,19743,11752,4392,1862,NA,620,1959,2579,14073,20387,12656,5767,2550,1505,NA "ZA",2007,340,594,1909,10514,21948,20076,12164,4792,2021,NA,293,894,2511,14410,21049,13190,6245,2964,1811,NA "ZA",2008,344,1519,1863,10172,21706,20699,12724,5169,2246,0,298,2062,2360,14010,21579,13778,7146,3234,2117,0 "ZM",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1995,NA,NA,91,659,1668,1124,487,231,130,NA,NA,NA,129,1125,1779,717,257,117,63,NA "ZM",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",2000,NA,NA,349,2175,2610,3045,435,261,174,NA,NA,NA,150,932,1118,1305,186,112,75,NA "ZM",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZM",2002,NA,NA,1135,1013,3051,2000,788,162,405,NA,NA,NA,1099,1383,2730,1434,657,297,197,NA "ZM",2003,NA,NA,302,1733,4182,2390,995,386,308,NA,NA,NA,292,2061,3439,1626,680,297,243,NA "ZM",2004,NA,NA,209,1498,3963,2262,968,313,324,NA,NA,NA,247,1811,2961,1646,608,245,192,NA "ZM",2005,NA,NA,135,1240,3166,2160,917,358,321,NA,NA,NA,168,1507,2463,1433,569,235,185,NA "ZM",2006,NA,NA,150,945,3496,1645,684,323,186,NA,NA,NA,224,1500,2834,1257,452,207,122,NA "ZM",2007,NA,NA,152,1235,2971,1848,805,319,204,NA,NA,NA,195,1335,2193,1188,558,244,131,NA "ZM",2008,NA,NA,101,1120,3244,2094,737,299,229,0,NA,NA,165,1246,2062,1114,498,187,115,0 "ZW",1980,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1981,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1982,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1983,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1984,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1985,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1986,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1987,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1988,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1989,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1990,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1991,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1992,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1993,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1994,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1995,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1996,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1997,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1998,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",1999,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",2000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",2001,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "ZW",2002,NA,NA,191,600,2548,1662,744,315,159,NA,NA,NA,222,914,2185,1095,421,140,65,NA "ZW",2003,NA,NA,133,874,3048,2228,981,367,205,NA,NA,NA,180,1232,2856,1480,565,225,114,NA "ZW",2004,NA,NA,187,833,2908,2298,1056,366,198,NA,NA,NA,225,1140,2858,1565,622,214,111,NA "ZW",2005,NA,NA,210,837,2264,1855,762,295,656,NA,NA,NA,269,1136,2242,1255,578,193,603,NA "ZW",2006,NA,NA,215,736,2391,1939,896,348,199,NA,NA,NA,237,1020,2424,1355,632,230,96,NA "ZW",2007,6,132,138,500,3693,0,716,292,153,NA,7,178,185,739,3311,0,553,213,90,NA "ZW",2008,NA,NA,127,614,0,3316,704,263,185,0,NA,NA,145,840,0,2890,467,174,105,0 tidyr/vignettes/rectangle.Rmd0000644000176200001440000002245214357024447016044 0ustar liggesusers--- title: "Rectangling" output: rmarkdown::html_vignette description: | Rectangling is the art and craft of taking a deeply nested list (often sourced from wild caught JSON or XML) and taming it into a tidy data set of rows and columns. This vignette introduces you to the main rectangling tools provided by tidyr: `unnest_longer()`, `unnest_wider()`, and `hoist()`. vignette: > %\VignetteIndexEntry{Rectangling} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction Rectangling is the art and craft of taking a deeply nested list (often sourced from wild caught JSON or XML) and taming it into a tidy data set of rows and columns. There are three functions from tidyr that are particularly useful for rectangling: * `unnest_longer()` takes each element of a list-column and makes a new row. * `unnest_wider()` takes each element of a list-column and makes a new column. * `hoist()` is similar to `unnest_wider()` but only plucks out selected components, and can reach down multiple levels. (Alternative, for complex inputs where you need to rectangle a nested list according to a specification, see the [tibblify](https://github.com/mgirlich/tibblify) package.) A very large number of data rectangling problems can be solved by combining `jsonlite::read_json()` with these functions and a splash of dplyr (largely eliminating prior approaches that combined `mutate()` with multiple `purrr::map()`s). Note that jsonlite has another important function called `fromJSON()`. We don't recommend it here because it performs its own automatic simplification (`simplifyVector = TRUE`). This often works well, particularly in simple cases, but we think you're better off doing the rectangling yourself so you know exactly what's happening and can more easily handle the most complicated nested structures. To illustrate these techniques, we'll use the repurrrsive package, which provides a number deeply nested lists originally mostly captured from web APIs. ```{r setup, message = FALSE} library(tidyr) library(dplyr) library(repurrrsive) ``` ## GitHub users We'll start with `gh_users`, a list which contains information about six GitHub users. To begin, we put the `gh_users` list into a data frame: ```{r} users <- tibble(user = gh_users) ``` This seems a bit counter-intuitive: why is the first step in making a list simpler to make it more complicated? But a data frame has a big advantage: it bundles together multiple vectors so that everything is tracked together in a single object. Each `user` is a named list, where each element represents a column. ```{r} names(users$user[[1]]) ``` There are two ways to turn the list components into columns. `unnest_wider()` takes every component and makes a new column: ```{r} users %>% unnest_wider(user) ``` But in this case, there are many components and we don't need most of them so we can instead use `hoist()`. `hoist()` allows us to pull out selected components using the same syntax as `purrr::pluck()`: ```{r} users %>% hoist(user, followers = "followers", login = "login", url = "html_url" ) ``` `hoist()` removes the named components from the `user` list-column, so you can think of it as moving components out of the inner list into the top-level data frame. ## GitHub repos We start off `gh_repos` similarly, by putting it in a tibble: ```{r} repos <- tibble(repo = gh_repos) repos ``` This time the elements of `repos` are a list of repositories that belong to that user. These are observations, so should become new rows, so we use `unnest_longer()` rather than `unnest_wider()`: ```{r} repos <- repos %>% unnest_longer(repo) repos ``` Then we can use `unnest_wider()` or `hoist()`: ```{r} repos %>% hoist(repo, login = c("owner", "login"), name = "name", homepage = "homepage", watchers = "watchers_count" ) ``` Note the use of `c("owner", "login")`: this allows us to reach two levels deep inside of a list. An alternative approach would be to pull out just `owner` and then put each element of it in a column: ```{r} repos %>% hoist(repo, owner = "owner") %>% unnest_wider(owner) ``` ## Game of Thrones characters `got_chars` has a similar structure to `gh_users`: it's a list of named lists, where each element of the inner list describes some attribute of a GoT character. We start in the same way, first by creating a data frame and then by unnesting each component into a column: ```{r} chars <- tibble(char = got_chars) chars chars2 <- chars %>% unnest_wider(char) chars2 ``` This is more complex than `gh_users` because some component of `char` are themselves a list, giving us a collection of list-columns: ```{r} chars2 %>% select_if(is.list) ``` What you do next will depend on the purposes of the analysis. Maybe you want a row for every book and TV series that the character appears in: ```{r} chars2 %>% select(name, books, tvSeries) %>% pivot_longer(c(books, tvSeries), names_to = "media", values_to = "value") %>% unnest_longer(value) ``` Or maybe you want to build a table that lets you match title to name: ```{r} chars2 %>% select(name, title = titles) %>% unnest_longer(title) ``` (Note that the empty titles (`""`) are due to an infelicity in the input `got_chars`: ideally people without titles would have a title vector of length 0, not a title vector of length 1 containing an empty string.) ## Geocoding with google Next we'll tackle a more complex form of data that comes from Google's geocoding service, stored in the repurssive package ```{r} repurrrsive::gmaps_cities ``` `json` is a list-column of named lists, so it makes sense to start with `unnest_wider()`: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) ``` Notice that `results` is a list of lists. Most of the cities have 1 element (representing a unique match from the geocoding API), but Washington and Arlington have two. We can pull these out into separate rows with `unnest_longer()`: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) ``` Now these all have the same components, as revealed by `unnest_wider()`: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) %>% unnest_wider(results) ``` We can find the latitude and longitude by unnesting `geometry`: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) %>% unnest_wider(results) %>% unnest_wider(geometry) ``` And then location: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) %>% unnest_wider(results) %>% unnest_wider(geometry) %>% unnest_wider(location) ``` We could also just look at the first address for each city: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) %>% hoist(results, first_result = 1) %>% unnest_wider(first_result) %>% unnest_wider(geometry) %>% unnest_wider(location) ``` Or use `hoist()` to dive deeply to get directly to `lat` and `lng`: ```{r} repurrrsive::gmaps_cities %>% hoist(json, lat = list("results", 1, "geometry", "location", "lat"), lng = list("results", 1, "geometry", "location", "lng") ) ``` ## Sharla Gelfand's discography We'll finish off with the most complex list, from [Sharla Gelfand's](https://sharla.party/post/discog-purrr/) discography. We'll start the usual way: putting the list into a single column data frame, and then widening so each component is a column. I also parse the `date_added` column into a real date-time[^readr]. [^readr]: I'd normally use `readr::parse_datetime()` or `lubridate::ymd_hms()`, but I can't here because it's a vignette and I don't want to add a dependency to tidyr just to simplify one example. ```{r} discs <- tibble(disc = discog) %>% unnest_wider(disc) %>% mutate(date_added = as.POSIXct(strptime(date_added, "%Y-%m-%dT%H:%M:%S"))) discs ``` At this level, we see information about when each disc was added to Sharla's discography, not any information about the disc itself. To do that we need to widen the `basic_information` column: ```{r, error = TRUE} discs %>% unnest_wider(basic_information) ``` Unfortunately that fails because there's an `id` column inside `basic_information`. We can quickly see what's going on by setting `names_repair = "unique"`: ```{r} discs %>% unnest_wider(basic_information, names_repair = "unique") ``` The problem is that `basic_information` repeats the `id` column that's also stored at the top-level, so we can just drop that: ```{r} discs %>% select(!id) %>% unnest_wider(basic_information) ``` Alternatively, we could use `hoist()`: ```{r} discs %>% hoist(basic_information, title = "title", year = "year", label = list("labels", 1, "name"), artist = list("artists", 1, "name") ) ``` Here I quickly extract the name of the first label and artist by indexing deeply into the nested list. A more systematic approach would be to create separate tables for artist and label: ```{r} discs %>% hoist(basic_information, artist = "artists") %>% select(disc_id = id, artist) %>% unnest_longer(artist) %>% unnest_wider(artist) discs %>% hoist(basic_information, format = "formats") %>% select(disc_id = id, format) %>% unnest_longer(format) %>% unnest_wider(format) %>% unnest_longer(descriptions) ``` Then you could join these back on to the original dataset as needed. tidyr/vignettes/weather.csv0000644000176200001440000000552214013466035015577 0ustar liggesusers"id","year","month","element","d1","d2","d3","d4","d5","d6","d7","d8","d9","d10","d11","d12","d13","d14","d15","d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29","d30","d31" "MX17004",2010,1,"tmax",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,27.8,NA "MX17004",2010,1,"tmin",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,14.5,NA "MX17004",2010,2,"tmax",NA,27.3,24.1,NA,NA,NA,NA,NA,NA,NA,29.7,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,29.9,NA,NA,NA,NA,NA,NA,NA,NA "MX17004",2010,2,"tmin",NA,14.4,14.4,NA,NA,NA,NA,NA,NA,NA,13.4,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,10.7,NA,NA,NA,NA,NA,NA,NA,NA "MX17004",2010,3,"tmax",NA,NA,NA,NA,32.1,NA,NA,NA,NA,34.5,NA,NA,NA,NA,NA,31.1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX17004",2010,3,"tmin",NA,NA,NA,NA,14.2,NA,NA,NA,NA,16.8,NA,NA,NA,NA,NA,17.6,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX17004",2010,4,"tmax",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,36.3,NA,NA,NA,NA "MX17004",2010,4,"tmin",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,16.7,NA,NA,NA,NA "MX17004",2010,5,"tmax",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,33.2,NA,NA,NA,NA "MX17004",2010,5,"tmin",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,18.2,NA,NA,NA,NA "MX17004",2010,6,"tmax",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,28,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,30.1,NA,NA "MX17004",2010,6,"tmin",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,17.5,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,18,NA,NA "MX17004",2010,7,"tmax",NA,NA,28.6,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,29.9,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX17004",2010,7,"tmin",NA,NA,17.5,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,16.5,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX17004",2010,8,"tmax",NA,NA,NA,NA,29.6,NA,NA,29,NA,NA,NA,NA,29.8,NA,NA,NA,NA,NA,NA,NA,NA,NA,26.4,NA,29.7,NA,NA,NA,28,NA,25.4 "MX17004",2010,8,"tmin",NA,NA,NA,NA,15.8,NA,NA,17.3,NA,NA,NA,NA,16.5,NA,NA,NA,NA,NA,NA,NA,NA,NA,15,NA,15.6,NA,NA,NA,15.3,NA,15.4 "MX17004",2010,10,"tmax",NA,NA,NA,NA,27,NA,28.1,NA,NA,NA,NA,NA,NA,29.5,28.7,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,31.2,NA,NA,NA "MX17004",2010,10,"tmin",NA,NA,NA,NA,14,NA,12.9,NA,NA,NA,NA,NA,NA,13,10.5,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,15,NA,NA,NA "MX17004",2010,11,"tmax",NA,31.3,NA,27.2,26.3,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,28.1,27.7,NA,NA,NA,NA "MX17004",2010,11,"tmin",NA,16.3,NA,12,7.9,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,12.1,14.2,NA,NA,NA,NA "MX17004",2010,12,"tmax",29.9,NA,NA,NA,NA,27.8,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA "MX17004",2010,12,"tmin",13.8,NA,NA,NA,NA,10.5,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA tidyr/vignettes/in-packages.Rmd0000644000176200001440000003261614363604046016261 0ustar liggesusers--- title: "In packages" output: rmarkdown::html_vignette description: | Things to bear in mind when using tidyr in a package. vignette: > %\VignetteIndexEntry{In packages} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction This vignette serves two distinct, but related, purposes: * It documents general best practices for using tidyr in a package, inspired by [using ggplot2 in packages][ggplot2-packages]. * It describes migration patterns for the transition from tidyr v0.8.3 to v1.0.0. This release includes breaking changes to `nest()` and `unnest()` in order to increase consistency within tidyr and with the rest of the tidyverse. Before we go on, we'll attach the packages we use, expose the version of tidyr, and make a small dataset to use in examples. ```{r setup} library(tidyr) library(dplyr, warn.conflicts = FALSE) library(purrr) packageVersion("tidyr") mini_iris <- as_tibble(iris)[c(1, 2, 51, 52, 101, 102), ] mini_iris ``` ## Using tidyr in packages Here we assume that you're already familiar with using tidyr in functions, as described in `vignette("programming.Rmd")`. There are two important considerations when using tidyr in a package: * How to avoid `R CMD CHECK` notes when using fixed variable names. * How to alert yourself to upcoming changes in the development version of tidyr. ### Fixed column names If you know the column names, this code works in the same way regardless of whether its inside or outside of a package: ```{r} mini_iris %>% nest( petal = c(Petal.Length, Petal.Width), sepal = c(Sepal.Length, Sepal.Width) ) ``` But `R CMD check` will warn about undefined global variables (`Petal.Length`, `Petal.Width`, `Sepal.Length`, and `Sepal.Width`), because it doesn't know that `nest()` is looking for the variables inside of `mini_iris` (i.e. `Petal.Length` and friends are data-variables, not env-variables). The easiest way to silence this note is to use `all_of()`. `all_of()` is a tidyselect helper (like `starts_with()`, `ends_with()`, etc.) that takes column names stored as strings: ```{r} mini_iris %>% nest( petal = all_of(c("Petal.Length", "Petal.Width")), sepal = all_of(c("Sepal.Length", "Sepal.Width")) ) ``` Alternatively, you may want to use `any_of()` if it is OK that some of the specified variables cannot be found in the input data. The [tidyselect](https://tidyselect.r-lib.org) package offers an entire family of select helpers. You are probably already familiar with them from using `dplyr::select()`. ### Continuous integration Hopefully you've already adopted continuous integration for your package, in which `R CMD check` (which includes your own tests) is run on a regular basis, e.g. every time you push changes to your package's source on GitHub or similar. The tidyverse team currently relies most heavily on GitHub Actions, so that will be our example. `usethis::use_github_action()` can help you get started. We recommend adding a workflow that targets the devel version of tidyr. When should you do this? * Always? If your package is tightly coupled to tidyr, consider leaving this in place all the time, so you know if changes in tidyr affect your package. * Right before a tidyr release? For everyone else, you could add (or re-activate an existing) tidyr-devel workflow during the period preceding a major tidyr release that has the potential for breaking changes, especially if you've been contacted during our reverse dependency checks. Example of a GitHub Actions workflow that tests your package against the development version of tidyr: ``` yaml on: push: branches: - main pull_request: branches: - main name: R-CMD-check-tidyr-devel jobs: R-CMD-check: runs-on: macOS-latest steps: - uses: actions/checkout@v2 - uses: r-lib/actions/setup-r@v1 - name: Install dependencies run: | install.packages(c("remotes", "rcmdcheck")) remotes::install_deps(dependencies = TRUE) remotes::install_github("tidyverse/tidyr") shell: Rscript {0} - name: Check run: rcmdcheck::rcmdcheck(args = "--no-manual", error_on = "error") shell: Rscript {0} ``` GitHub Actions are an evolving landscape, so you can always mine the workflows for tidyr itself ([tidyverse/tidyr/.github/workflows](https://github.com/tidyverse/tidyr/tree/main/.github/workflows)) or the main [r-lib/actions](https://github.com/r-lib/actions) repo for ideas. ## tidyr v0.8.3 -> v1.0.0 v1.0.0 makes considerable changes to the interface of `nest()` and `unnest()` in order to bring them in line with newer tidyverse conventions. I have tried to make the functions as backward compatible as possible and to give informative warning messages, but I could not cover 100% of use cases, so you may need to change your package code. This guide will help you do so with a minimum of pain. Ideally, you'll tweak your package so that it works with both tidyr 0.8.3 and tidyr 1.0.0. This makes life considerably easier because it means there's no need to coordinate CRAN submissions - you can submit your package that works with both tidyr versions, before I submit tidyr to CRAN. This section describes our recommend practices for doing so, drawing from the general principles described in . If you use continuous integration already, we **strongly** recommend adding a build that tests with the development version of tidyr; see above for details. This section briefly describes how to run different code for different versions of tidyr, then goes through the major changes that might require workarounds: * `nest()` and `unnest()` get new interfaces. * `nest()` preserves groups. * `nest_()` and `unnest_()` are defunct. If you're struggling with a problem that's not described here, please reach out via [github](https://github.com/tidyverse/tidyr/issues/new) or [email](mailto:hadley@posit.co) so we can help out. ### Conditional code Sometimes you'll be able to write code that works with v0.8.3 _and_ v1.0.0. But this often requires code that's not particularly natural for either version and you'd be better off to (temporarily) have separate code paths, each containing non-contrived code. You get to re-use your existing code in the "old" branch, which will eventually be phased out, and write clean, forward-looking code in the "new" branch. The basic approach looks like this. First you define a function that returns `TRUE` for new versions of tidyr: ```{r} tidyr_new_interface <- function() { packageVersion("tidyr") > "0.8.99" } ``` We highly recommend keeping this as a function because it provides an obvious place to jot any transition notes for your package, and it makes it easier to remove transitional code later on. Another benefit is that the tidyr version is determined at *run time*, not at *build time*, and will therefore detect your user's current tidyr version. Then in your functions, you use an `if` statement to call different code for different versions: ```{r, eval = FALSE} my_function_inside_a_package <- function(...) # my code here if (tidyr_new_interface()) { # Freshly written code for v1.0.0 out <- tidyr::nest(df, data = any_of(c("x", "y", "z"))) } else { # Existing code for v0.8.3 out <- tidyr::nest(df, x, y, z) } # more code here } ``` If your new code uses a function that only exists in tidyr 1.0.0, you will get a `NOTE` from `R CMD check`: this is one of the few notes that you can explain in your CRAN submission comments. Just mention that it's for forward compatibility with tidyr 1.0.0, and CRAN will let your package through. ### New syntax for `nest()` What changed: * The to-be-nested columns are no longer accepted as "loose parts". * The new list-column's name is no longer provided via the `.key` argument. * Now we use a construct like this: `new_col = `. Why it changed: * The use of `...` for metadata is a problematic pattern we're moving away from. * The `new_col = ` construct lets us create multiple nested list-columns at once ("multi-nest"). ```{r} mini_iris %>% nest(petal = matches("Petal"), sepal = matches("Sepal")) ``` Before and after examples: ```{r eval = FALSE} # v0.8.3 mini_iris %>% nest(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width, .key = "my_data") # v1.0.0 mini_iris %>% nest(my_data = c(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)) # v1.0.0 avoiding R CMD check NOTE mini_iris %>% nest(my_data = any_of(c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width"))) # or equivalently: mini_iris %>% nest(my_data = !any_of("Species")) ``` If you need a quick and dirty fix without having to think, just call `nest_legacy()` instead of `nest()`. It's the same as `nest()` in v0.8.3: ```{r, eval = FALSE} if (tidyr_new_interface()) { out <- tidyr::nest_legacy(df, x, y, z) } else { out <- tidyr::nest(df, x, y, z) } ``` ### New syntax for `unnest()` What changed: * The to-be-unnested columns must now be specified explicitly, instead of defaulting to all list-columns. This also deprecates `.drop` and `.preserve`. * `.sep` has been deprecated and replaced with `names_sep`. * `unnest()` uses the [emerging tidyverse standard][name-repair] to disambiguate duplicated names. Use `names_repair = tidyr_legacy` to request the previous approach. * `.id` has been deprecated because it can be easily replaced by creating the column of names prior to `unnest()`, e.g. with an upstream call to `mutate()`. ```{r, eval = FALSE} # v0.8.3 df %>% unnest(x, .id = "id") # v1.0.0 df %>% mutate(id = names(x)) %>% unnest(x)) ``` Why it changed: * The use of `...` for metadata is a problematic pattern we're moving away from. * The changes to details arguments relate to features rolling out across multiple packages in the tidyverse. For example, `ptype` exposes prototype support from the new [vctrs package](https://vctrs.r-lib.org). `names_repair` specifies what to do about duplicated or non-syntactic names, consistent with tibble and readxl. Before and after: ```{r, eval = FALSE} nested <- mini_iris %>% nest(my_data = c(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)) # v0.8.3 automatically unnests list-cols nested %>% unnest() # v1.0.0 must be told which columns to unnest nested %>% unnest(any_of("my_data")) ``` If you need a quick and dirty fix without having to think, just call `unnest_legacy()` instead of `unnest()`. It's the same as `unnest()` in v0.8.3: ```{r, eval = FALSE} if (tidyr_new_interface()) { out <- tidyr::unnest_legacy(df) } else { out <- tidyr::unnest(df) } ``` ### `nest()` preserves groups What changed: * `nest()` now preserves the groups present in the input. Why it changed: * To reflect the growing support for grouped data frames, especially in recent releases of dplyr. See, for example, `dplyr::group_modify()`, `group_map()`, and friends. If the fact that `nest()` now preserves groups is problematic downstream, you have a few choices: * Apply `ungroup()` to the result. This level of pragmatism suggests, however, you should at least consider the next two options. * You should never have grouped in the first place. Eliminate the `group_by()` call and specify which columns should be nested versus not nested directly in `nest()`. * Adjust the downstream code to accommodate grouping. Imagine we used `group_by()` then `nest()` on `mini_iris`, then we computed on the list-column *outside the data frame*. ```{r} (df <- mini_iris %>% group_by(Species) %>% nest()) (external_variable <- map_int(df$data, nrow)) ``` And now we try to add that back to the data *post hoc*: ```{r error = TRUE} df %>% mutate(n_rows = external_variable) ``` This fails because `df` is grouped and `mutate()` is group-aware, so it's hard to add a completely external variable. Other than pragmatically `ungroup()`ing, what can we do? One option is to work inside the data frame, i.e. bring the `map()` inside the `mutate()`, and design the problem away: ```{r} df %>% mutate(n_rows = map_int(data, nrow)) ``` If, somehow, the grouping seems appropriate AND working inside the data frame is not an option, `tibble::add_column()` is group-unaware. It lets you add external data to a grouped data frame. ```{r} df %>% tibble::add_column(n_rows = external_variable) ``` ### `nest_()` and `unnest_()` are defunct What changed: * `nest_()` and `unnest_()` no longer work Why it changed: * We are transitioning the whole tidyverse to the powerful tidy eval framework. Therefore, we are gradually removing all previous solutions: - Specialized standard evaluation versions of functions, e.g., `foo_()` as a complement to `foo()`. - The older lazyeval framework. Before and after: ```{r eval = FALSE} # v0.8.3 mini_iris %>% nest_( key_col = "my_data", nest_cols = c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") ) nested %>% unnest_(~ my_data) # v1.0.0 mini_iris %>% nest(my_data = any_of(c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width"))) nested %>% unnest(any_of("my_data")) ``` [ggplot2-packages]: https://ggplot2.tidyverse.org/dev/articles/ggplot2-in-packages.html [name-repair]: https://www.tidyverse.org/blog/2019/01/tibble-2.0.1/#name-repair tidyr/R/0000755000176200001440000000000014553565525011625 5ustar liggesuserstidyr/R/unite.R0000644000176200001440000000471614360013543013064 0ustar liggesusers#' Unite multiple columns into one by pasting strings together #' #' Convenience function to paste together multiple columns into one. #' #' @param data A data frame. #' @param col The name of the new column, as a string or symbol. #' #' This argument is passed by expression and supports #' [quasiquotation][rlang::quasiquotation] (you can unquote strings #' and symbols). The name is captured from the expression with #' [rlang::ensym()] (note that this kind of interface where #' symbols do not represent actual objects is now discouraged in the #' tidyverse; we support it here for backward compatibility). #' @param ... <[`tidy-select`][tidyr_tidy_select]> Columns to unite #' @param sep Separator to use between values. #' @param na.rm If `TRUE`, missing values will be removed prior to uniting #' each value. #' @param remove If `TRUE`, remove input columns from output data frame. #' @seealso [separate()], the complement. #' @export #' @examples #' df <- expand_grid(x = c("a", NA), y = c("b", NA)) #' df #' #' df %>% unite("z", x:y, remove = FALSE) #' # To remove missing values: #' df %>% unite("z", x:y, na.rm = TRUE, remove = FALSE) #' #' # Separate is almost the complement of unite #' df %>% #' unite("xy", x:y) %>% #' separate(xy, c("x", "y")) #' # (but note `x` and `y` contain now "NA" not NA) unite <- function(data, col, ..., sep = "_", remove = TRUE, na.rm = FALSE) { check_dots_unnamed() UseMethod("unite") } #' @export unite.data.frame <- function(data, col, ..., sep = "_", remove = TRUE, na.rm = FALSE) { check_required(col) check_string(sep) check_bool(remove) check_bool(na.rm) if (dots_n(...) == 0) { from_vars <- set_names(seq_along(data), names(data)) } else { from_vars <- tidyselect::eval_select(expr(c(...)), data, allow_rename = FALSE) } out <- data if (remove) { out <- out[setdiff(names(out), names(from_vars))] } if (identical(na.rm, TRUE)) { cols <- unname(map(data[from_vars], as.character)) rows <- transpose(cols) united <- map_chr(rows, function(x) paste0(x[!is.na(x)], collapse = sep)) } else { cols <- unname(as.list(data[from_vars])) united <- exec(paste, !!!cols, sep = sep) } var <- as_string(ensym(col)) var <- enc2utf8(var) united <- list(united) names(united) <- var first_pos <- which(names(data) %in% names(from_vars))[1] after <- first_pos - 1L out <- df_append(out, united, after = after) reconstruct_tibble(data, out, if (remove) names(from_vars)) } tidyr/R/chop.R0000644000176200001440000002452614520546613012701 0ustar liggesusers#' Chop and unchop #' #' @description #' Chopping and unchopping preserve the width of a data frame, changing its #' length. `chop()` makes `df` shorter by converting rows within each group #' into list-columns. `unchop()` makes `df` longer by expanding list-columns #' so that each element of the list-column gets its own row in the output. #' `chop()` and `unchop()` are building blocks for more complicated functions #' (like [unnest()], [unnest_longer()], and [unnest_wider()]) and are generally #' more suitable for programming than interactive data analysis. #' #' @details #' Generally, unchopping is more useful than chopping because it simplifies #' a complex data structure, and [nest()]ing is usually more appropriate #' than `chop()`ing since it better preserves the connections between #' observations. #' #' `chop()` creates list-columns of class [vctrs::list_of()] to ensure #' consistent behaviour when the chopped data frame is emptied. For #' instance this helps getting back the original column types after #' the roundtrip chop and unchop. Because `` keeps tracks of #' the type of its elements, `unchop()` is able to reconstitute the #' correct vector type even for empty list-columns. #' #' @inheritParams rlang::args_dots_empty #' @inheritParams rlang::args_error_context #' #' @param data A data frame. #' @param cols <[`tidy-select`][tidyr_tidy_select]> Columns to chop or unchop. #' #' For `unchop()`, each column should be a list-column containing generalised #' vectors (e.g. any mix of `NULL`s, atomic vector, S3 vectors, a lists, #' or data frames). #' @param keep_empty By default, you get one row of output for each element #' of the list that you are unchopping/unnesting. This means that if there's a #' size-0 element (like `NULL` or an empty data frame or vector), then that #' entire row will be dropped from the output. If you want to preserve all #' rows, use `keep_empty = TRUE` to replace size-0 elements with a single row #' of missing values. #' @param ptype Optionally, a named list of column name-prototype pairs to #' coerce `cols` to, overriding the default that will be guessed from #' combining the individual values. Alternatively, a single empty ptype #' can be supplied, which will be applied to all `cols`. #' @export #' @examples #' # Chop ---------------------------------------------------------------------- #' df <- tibble(x = c(1, 1, 1, 2, 2, 3), y = 1:6, z = 6:1) #' # Note that we get one row of output for each unique combination of #' # non-chopped variables #' df %>% chop(c(y, z)) #' # cf nest #' df %>% nest(data = c(y, z)) #' #' # Unchop -------------------------------------------------------------------- #' df <- tibble(x = 1:4, y = list(integer(), 1L, 1:2, 1:3)) #' df %>% unchop(y) #' df %>% unchop(y, keep_empty = TRUE) #' #' # unchop will error if the types are not compatible: #' df <- tibble(x = 1:2, y = list("1", 1:3)) #' try(df %>% unchop(y)) #' #' # Unchopping a list-col of data frames must generate a df-col because #' # unchop leaves the column names unchanged #' df <- tibble(x = 1:3, y = list(NULL, tibble(x = 1), tibble(y = 1:2))) #' df %>% unchop(y) #' df %>% unchop(y, keep_empty = TRUE) chop <- function(data, cols, ..., error_call = current_env()) { check_dots_empty0(...) check_data_frame(data, call = error_call) check_required(cols, call = error_call) cols <- tidyselect::eval_select( expr = enquo(cols), data = data, allow_rename = FALSE, error_call = error_call ) cols <- tidyr_new_list(data[cols]) keys <- data[setdiff(names(data), names(cols))] info <- vec_group_loc(keys) keys <- info$key indices <- info$loc size <- vec_size(keys) cols <- map(cols, col_chop, indices = indices) cols <- new_data_frame(cols, n = size) out <- vec_cbind(keys, cols, .error_call = error_call) reconstruct_tibble(data, out) } col_chop <- function(x, indices) { ptype <- vec_ptype(x) out <- vec_chop(x, indices) out <- new_list_of(out, ptype) out } #' @export #' @rdname chop unchop <- function(data, cols, ..., keep_empty = FALSE, ptype = NULL, error_call = current_env()) { check_dots_empty0(...) check_data_frame(data, call = error_call) check_required(cols, call = error_call) check_bool(keep_empty, call = error_call) sel <- tidyselect::eval_select( expr = enquo(cols), data = data, allow_rename = FALSE, error_call = error_call ) size <- vec_size(data) names <- names(data) # Start from first principles to avoid issues in any subclass methods out <- new_data_frame(data, n = size) cols <- out[sel] # Remove unchopped columns to avoid slicing them needlessly later out[sel] <- NULL result <- df_unchop( x = cols, ptype = ptype, keep_empty = keep_empty, error_call = error_call ) cols <- result$val loc <- result$loc out <- vec_slice(out, loc) # Add unchopped columns back on then preserve original ordering out <- tidyr_col_modify(out, cols) out <- out[names] reconstruct_tibble(data, out) } # Helpers ----------------------------------------------------------------- # `df_unchop()` takes a data frame and unchops every column. This preserves the # width, but changes the size. # # - If `keep_empty = TRUE`, empty elements (`NULL` and empty typed elements) # are retained as their size 1 missing equivalents. # - If `keep_empty = FALSE`, rows of entirely empty elements are dropped. # - In the `keep_empty = FALSE` case, when determining the common size of the # row, `NULL`s are not included in the computation, but empty typed elements # are (i.e. you can't recycle integer() and 1:2). # # `df_unchop()` returns a data frame of two columns: # - `loc` locations that map each row to their original row in `x`. Generally # used to slice the data frame `x` was subset from to align it with `val`. # - `val` the unchopped data frame. df_unchop <- function(x, ..., ptype = NULL, keep_empty = FALSE, error_call = caller_env()) { check_dots_empty() ptype <- check_list_of_ptypes(ptype, names = names(x), call = error_call) size <- vec_size(x) # Avoid any data frame subclass method dispatch x <- new_data_frame(x, n = size) width <- length(x) x_names <- names(x) seq_len_width <- seq_len(width) seq_len_size <- seq_len(size) if (width == 0L) { # Algorithm requires >=1 columns out <- list(loc = seq_len_size, val = x) out <- new_data_frame(out, n = size) return(out) } x_ptypes <- map2(x, x_names, function(col, name) { ptype[[name]] %||% list_of_ptype(col) }) x_is_list <- map_lgl(x, vec_is_list) x_sizes <- vector("list", length = width) x_nulls <- vector("list", length = width) for (i in seq_len_width) { col <- x[[i]] col_name <- x_names[[i]] col_ptype <- x_ptypes[[i]] col_is_list <- x_is_list[[i]] if (!col_is_list) { # Optimize rare non list-cols x_sizes[[i]] <- vec_rep(1L, size) x_nulls[[i]] <- vec_rep(FALSE, size) next } col <- tidyr_new_list(col) col_sizes <- list_sizes(col) col_nulls <- vec_detect_missing(col) # Always replace `NULL` elements with size 1 missing equivalent for recycling. # These will be reset to `NULL` in `unchop_finalize()` if the # entire row was missing and `keep_empty = FALSE`. info <- list_replace_null(col, col_sizes, ptype = col_ptype) col <- info$x col_sizes <- info$sizes if (keep_empty) { info <- list_replace_empty_typed(col, col_sizes, ptype = col_ptype) col <- info$x col_sizes <- info$sizes } x[[i]] <- col x_sizes[[i]] <- col_sizes x_nulls[[i]] <- col_nulls } sizes <- reduce(x_sizes, unchop_sizes2, error_call = error_call) info <- unchop_finalize(x, sizes, x_nulls, keep_empty) x <- info$x sizes <- info$sizes out_loc <- vec_rep_each(seq_len_size, sizes) out_size <- sum(sizes) out_cols <- vector("list", length = width) for (i in seq_len_width) { col <- x[[i]] col_name <- x_names[[i]] col_ptype <- x_ptypes[[i]] col_is_list <- x_is_list[[i]] col_sizes <- x_sizes[[i]] if (!col_is_list) { if (!is_null(col_ptype)) { col <- vec_cast(col, col_ptype, x_arg = col_name, call = error_call) } out_cols[[i]] <- vec_slice(col, out_loc) next } # Drop outer names because inner elements have varying size col <- unname(col) row_recycle <- col_sizes != sizes col[row_recycle] <- map2(col[row_recycle], sizes[row_recycle], vec_recycle, call = error_call) col <- list_unchop(col, ptype = col_ptype) if (is_null(col)) { # This can happen when both of these are true: # - `col` was an empty list(), or a list of all `NULL`s. # - No ptype was specified for `col`, either by the user or by a list-of. if (out_size != 0L) { cli::cli_abort( "`NULL` column generated, but output size is not `0`.", .internal = TRUE ) } col <- unspecified(0L) } out_cols[[i]] <- col } names(out_cols) <- x_names out_val <- new_data_frame(out_cols, n = out_size) out <- list(loc = out_loc, val = out_val) out <- new_data_frame(out, n = out_size) out } unchop_sizes2 <- function(x, y, error_call) { # Standard tidyverse recycling rules, just vectorized. # Recycle `x` values with `y` x_one <- x == 1L if (any(x_one)) { x[x_one] <- y[x_one] } # Recycle `y` values with `x`. # Only necessary to be able to check for incompatibilities. y_one <- y == 1L if (any(y_one)) { y[y_one] <- x[y_one] } # Check for incompatibilities incompatible <- x != y if (any(incompatible)) { row <- which(incompatible)[[1]] x <- x[[row]] y <- y[[row]] cli::cli_abort( "In row {row}, can't recycle input of size {x} to size {y}.", call = error_call ) } x } unchop_finalize <- function(x, sizes, x_nulls, keep_empty) { if (keep_empty) { return(list(x = x, sizes = sizes)) } # If !keep_empty, `NULL` elements were temporarily given size 1L and # converted to a size 1 missing equivalent for recycling. However, if the # entire row was made up of `NULL`s, then we need to adjust the size back to # 0L and convert back to `NULL`s since that row should be dropped. null_row <- reduce(x_nulls, `&`) if (any(null_row)) { sizes[null_row] <- 0L x <- vec_assign(x, null_row, vec_init(x, n = 1L)) } list(x = x, sizes = sizes) } tidyr/R/unnest.R0000644000176200001440000001461214520540454013254 0ustar liggesusers#' Unnest a list-column of data frames into rows and columns #' #' Unnest expands a list-column containing data frames into rows and columns. #' #' @inheritSection nest New syntax #' @inheritParams unchop #' @inheritParams unpack #' @param cols <[`tidy-select`][tidyr_tidy_select]> List-columns to unnest. #' #' When selecting multiple columns, values from the same row will be recycled #' to their common size. #' @param ... `r lifecycle::badge("deprecated")`: #' previously you could write `df %>% unnest(x, y, z)`. #' Convert to `df %>% unnest(c(x, y, z))`. If you previously created a new #' variable in `unnest()` you'll now need to do it explicitly with `mutate()`. #' Convert `df %>% unnest(y = fun(x, y, z))` #' to `df %>% mutate(y = fun(x, y, z)) %>% unnest(y)`. #' @param names_sep If `NULL`, the default, the outer names will come from the #' inner names. If a string, the outer names will be formed by pasting #' together the outer and the inner column names, separated by `names_sep`. #' @param .drop,.preserve #' `r lifecycle::badge("deprecated")`: #' all list-columns are now preserved; If there are any that you #' don't want in the output use `select()` to remove them prior to #' unnesting. #' @param .id #' `r lifecycle::badge("deprecated")`: #' convert `df %>% unnest(x, .id = "id")` to `df %>% mutate(id = #' names(x)) %>% unnest(x))`. #' @param .sep #' `r lifecycle::badge("deprecated")`: #' use `names_sep` instead. #' @export #' @family rectangling #' @examples #' # unnest() is designed to work with lists of data frames #' df <- tibble( #' x = 1:3, #' y = list( #' NULL, #' tibble(a = 1, b = 2), #' tibble(a = 1:3, b = 3:1, c = 4) #' ) #' ) #' # unnest() recycles input rows for each row of the list-column #' # and adds a column for each column #' df %>% unnest(y) #' #' # input rows with 0 rows in the list-column will usually disappear, #' # but you can keep them (generating NAs) with keep_empty = TRUE: #' df %>% unnest(y, keep_empty = TRUE) #' #' # Multiple columns ---------------------------------------------------------- #' # You can unnest multiple columns simultaneously #' df <- tibble( #' x = 1:2, #' y = list( #' tibble(a = 1, b = 2), #' tibble(a = 3:4, b = 5:6) #' ), #' z = list( #' tibble(c = 1, d = 2), #' tibble(c = 3:4, d = 5:6) #' ) #' ) #' df %>% unnest(c(y, z)) #' #' # Compare with unnesting one column at a time, which generates #' # the Cartesian product #' df %>% #' unnest(y) %>% #' unnest(z) unnest <- function(data, cols, ..., keep_empty = FALSE, ptype = NULL, names_sep = NULL, names_repair = "check_unique", .drop = deprecated(), .id = deprecated(), .sep = deprecated(), .preserve = deprecated()) { deprecated <- FALSE if (!missing(.preserve)) { lifecycle::deprecate_warn( "1.0.0", "unnest(.preserve = )", details = "All list-columns are now preserved", always = TRUE ) deprecated <- TRUE .preserve <- tidyselect::vars_select(tbl_vars(data), !!enquo(.preserve)) } else { .preserve <- NULL } if (missing(cols) && missing(...)) { list_cols <- names(data)[map_lgl(data, is_list)] cols <- expr(c(!!!syms(setdiff(list_cols, .preserve)))) cli::cli_warn(c( "`cols` is now required when using `unnest()`.", i = "Please use `cols = {expr_text(cols)}`." )) deprecated <- TRUE } if (missing(...)) { cols <- enquo(cols) } else { dots <- enquos(cols, ..., .named = TRUE, .ignore_empty = "all") data <- dplyr::mutate(data, !!!dots) cols <- expr(c(!!!syms(names(dots)))) unnest_call <- expr(unnest(!!cols)) cli::cli_warn(c( "`unnest()` has a new interface. See `?unnest` for details.", i = "Try `df %>% {expr_text(unnest_call)}`, with `mutate()` if needed." )) deprecated <- TRUE } if (!is_missing(.drop)) { lifecycle::deprecate_warn( "1.0.0", "unnest(.drop = )", details = "All list-columns are now preserved.", always = TRUE ) deprecated <- TRUE } if (!is_missing(.id)) { lifecycle::deprecate_warn( "1.0.0", "unnest(.id = )", details = "Manually create column of names instead.", always = TRUE ) deprecated <- TRUE first_col <- tidyselect::vars_select(tbl_vars(data), !!cols)[[1]] data[[.id]] <- names(data[[first_col]]) } if (!is_missing(.sep)) { lifecycle::deprecate_warn("1.0.0", "unnest(.sep = )", details = glue("Use `names_sep = '{.sep}'` instead.") ) deprecated <- TRUE names_sep <- .sep } if (deprecated) { return(unnest( data, cols = !!cols, names_sep = names_sep, keep_empty = keep_empty, ptype = ptype, names_repair = tidyr_legacy )) } UseMethod("unnest") } #' @export unnest.data.frame <- function(data, cols, ..., keep_empty = FALSE, ptype = NULL, names_sep = NULL, names_repair = "check_unique", .drop = "DEPRECATED", .id = "DEPRECATED", .sep = "DEPRECATED", .preserve = "DEPRECATED") { error_call <- current_env() cols <- tidyselect::eval_select( expr = enquo(cols), data = data, allow_rename = FALSE ) cols <- unname(cols) data <- unchop( data = data, cols = all_of(cols), keep_empty = keep_empty, ptype = ptype, error_call = error_call ) unpack( data = data, cols = all_of(cols), names_sep = names_sep, names_repair = names_repair, error_call = error_call ) } #' @export unnest.rowwise_df <- function(data, cols, ..., keep_empty = FALSE, ptype = NULL, names_sep = NULL, names_repair = "check_unique") { out <- unnest.data.frame(as_tibble(data), {{ cols }}, keep_empty = keep_empty, ptype = ptype, names_sep = names_sep, names_repair = names_repair ) out <- dplyr::grouped_df(out, dplyr::group_vars(data)) out } tidyr/R/unnest-longer.R0000644000176200001440000002103314360013543014527 0ustar liggesusers#' Unnest a list-column into rows #' #' @description #' `unnest_longer()` turns each element of a list-column into a row. It #' is most naturally suited to list-columns where the elements are unnamed #' and the length of each element varies from row to row. #' #' `unnest_longer()` generally preserves the number of columns of `x` while #' modifying the number of rows. #' #' Learn more in `vignette("rectangle")`. #' #' @inheritParams hoist #' @inheritParams unnest #' @param col <[`tidy-select`][tidyr_tidy_select]> List-column(s) to unnest. #' #' When selecting multiple columns, values from the same row will be recycled #' to their common size. #' @param values_to A string giving the column name (or names) to store the #' unnested values in. If multiple columns are specified in `col`, this can #' also be a glue string containing `"{col}"` to provide a template for the #' column names. The default, `NULL`, gives the output columns the same names #' as the input columns. #' @param indices_to A string giving the column name (or names) to store the #' inner names or positions (if not named) of the values. If multiple columns #' are specified in `col`, this can also be a glue string containing `"{col}"` #' to provide a template for the column names. The default, `NULL`, gives the #' output columns the same names as `values_to`, but suffixed with `"_id"`. #' @param indices_include A single logical value specifying whether or not to #' add an index column. If any value has inner names, the index column will be #' a character vector of those names, otherwise it will be an integer vector #' of positions. If `NULL`, defaults to `TRUE` if any value has inner names #' or if `indices_to` is provided. #' #' If `indices_to` is provided, then `indices_include` can't be `FALSE`. #' @family rectangling #' @export #' @examples #' # `unnest_longer()` is useful when each component of the list should #' # form a row #' df <- tibble( #' x = 1:4, #' y = list(NULL, 1:3, 4:5, integer()) #' ) #' df %>% unnest_longer(y) #' #' # Note that empty values like `NULL` and `integer()` are dropped by #' # default. If you'd like to keep them, set `keep_empty = TRUE`. #' df %>% unnest_longer(y, keep_empty = TRUE) #' #' # If the inner vectors are named, the names are copied to an `_id` column #' df <- tibble( #' x = 1:2, #' y = list(c(a = 1, b = 2), c(a = 10, b = 11, c = 12)) #' ) #' df %>% unnest_longer(y) #' #' # Multiple columns ---------------------------------------------------------- #' # If columns are aligned, you can unnest simultaneously #' df <- tibble( #' x = 1:2, #' y = list(1:2, 3:4), #' z = list(5:6, 7:8) #' ) #' df %>% #' unnest_longer(c(y, z)) #' #' # This is important because sequential unnesting would generate the #' # Cartesian product of the rows #' df %>% #' unnest_longer(y) %>% #' unnest_longer(z) unnest_longer <- function(data, col, values_to = NULL, indices_to = NULL, indices_include = NULL, keep_empty = FALSE, names_repair = "check_unique", simplify = TRUE, ptype = NULL, transform = NULL) { check_data_frame(data) check_required(col) check_name(values_to, allow_null = TRUE) check_name(indices_to, allow_null = TRUE) check_bool(indices_include, allow_null = TRUE) check_bool(keep_empty) error_call <- current_env() cols <- tidyselect::eval_select(enquo(col), data, allow_rename = FALSE) col_names <- names(cols) n_col_names <- length(col_names) values_to <- values_to %||% "{col}" if (is.null(indices_to)) { indices_to <- vec_paste0(values_to, "_id") } else { if (is_false(indices_include)) { cli::cli_abort( "Can't use {.code indices_include = FALSE} when {.arg indices_to} is supplied." ) } indices_include <- TRUE } values_to <- glue_col_names(values_to, col_names) values_to <- vec_recycle(values_to, size = n_col_names) indices_to <- glue_col_names(indices_to, col_names) indices_to <- vec_recycle(indices_to, size = n_col_names) for (i in seq_along(cols)) { col <- cols[[i]] col_name <- col_names[[i]] col_values_to <- values_to[[i]] col_indices_to <- indices_to[[i]] data[[col]] <- col_to_long( col = data[[col]], name = col_name, values_to = col_values_to, indices_to = col_indices_to, indices_include = indices_include, keep_empty = keep_empty ) } data <- unchop(data, all_of(col_names), error_call = error_call) for (i in seq_along(cols)) { col <- cols[[i]] data[[col]] <- df_simplify( x = data[[col]], ptype = ptype, transform = transform, simplify = simplify ) } unpack( data = data, cols = all_of(col_names), names_repair = names_repair, error_call = error_call ) } # Converts a column of any type to a `list_of` col_to_long <- function(col, name, values_to, indices_to, indices_include, keep_empty, error_call = caller_env()) { if (vec_is_list(col)) { ptype <- list_of_ptype(col) } else { ptype <- vec_ptype(col) col <- vec_chop(col) } # Avoid expensive dispatch from `[[.list_of`, and allow for `[[<-`. # We've already captured the `ptype`. col <- tidyr_new_list(col) if (!list_all_vectors2(col)) { cli::cli_abort( "List-column {.var {name}} must contain only vectors or `NULL`.", call = error_call ) } sizes <- list_sizes(col) # Collect index info before replacing `NULL`s so `keep_empty` works correctly info <- collect_indices_info(col, sizes, indices_include, keep_empty) indices_include <- info$indices_include indices <- info$indices index_ptype <- info$index_ptype size_null <- as.integer(keep_empty) info <- list_replace_null(col, sizes, ptype = ptype, size = size_null) col <- info$x sizes <- info$sizes if (keep_empty) { info <- list_replace_empty_typed(col, sizes, ptype = ptype, size = 1L) col <- info$x sizes <- info$sizes } if (is.null(ptype)) { # Initialize `ptype` to generate a `ptype` version of the output data frame. # Important in the size 0 input case. ptype <- unspecified() } if (indices_include) { names <- c(values_to, indices_to) ptype <- new_long_indexed_frame(ptype, index_ptype, 0L, names) col <- pmap( list(col, indices, sizes), function(elt, index, size) new_long_indexed_frame(elt, index, size, names) ) } else { name <- values_to ptype <- new_long_frame(ptype, 0L, name) col <- map2( col, sizes, function(elt, size) new_long_frame(elt, size, name) ) } ptype <- vec_ptype_common(ptype, !!!col) col <- vec_cast_common(!!!col, .to = ptype) col <- new_list_of(col, ptype = ptype) col } new_long_frame <- function(x, size, name) { out <- list(x) names(out) <- name new_data_frame(out, n = size) } new_long_indexed_frame <- function(x, index, size, names) { out <- list(x, index) names(out) <- names new_data_frame(out, n = size) } collect_indices_info <- function(col, sizes, indices_include, keep_empty) { out <- list( indices_include = FALSE, indices = NULL, index_ptype = NULL ) if (is_false(indices_include)) { return(out) } indices <- map(col, vec_names) unnamed <- vec_detect_missing(indices) all_unnamed <- all(unnamed) if (is.null(indices_include) && all_unnamed) { # Same as `indices_include = FALSE` return(out) } if (all_unnamed) { # Indices are requested, but none of the elements are named. # Generate integer column of sequential indices. indices <- map(sizes, seq_len) index_ptype <- integer() } else { # Indices are requested, and some elements are named. # For any unnamed elements, generate `NA` indices. indices[unnamed] <- map(sizes[unnamed], vec_rep, x = "") index_ptype <- character() } if (keep_empty) { empty <- sizes == 0L if (any(empty)) { # `NULL` or typed empty elements get an `NA` index of the right type index_empty <- vec_init(index_ptype) indices[empty] <- list(index_empty) } } out$indices_include <- TRUE out$indices <- indices out$index_ptype <- index_ptype out } glue_col_names <- function(string, col_names) { data <- list(col = col_names) out <- glue::glue_data(data, string, .envir = NULL) out <- as.character(out) out } tidyr/R/pack.R0000644000176200001440000002112214363516001012644 0ustar liggesusers#' Pack and unpack #' #' @description #' Packing and unpacking preserve the length of a data frame, changing its #' width. `pack()` makes `df` narrow by collapsing a set of columns into a #' single df-column. `unpack()` makes `data` wider by expanding df-columns #' back out into individual columns. #' #' @details #' Generally, unpacking is more useful than packing because it simplifies #' a complex data structure. Currently, few functions work with df-cols, #' and they are mostly a curiosity, but seem worth exploring further because #' they mimic the nested column headers that are so popular in Excel. #' #' @inheritParams rlang::args_error_context #' #' @param data,.data A data frame. #' @param cols <[`tidy-select`][tidyr_tidy_select]> Columns to unpack. #' @param names_sep,.names_sep If `NULL`, the default, the names will be left #' as is. In `pack()`, inner names will come from the former outer names; #' in `unpack()`, the new outer names will come from the inner names. #' #' If a string, the inner and outer names will be used together. In #' `unpack()`, the names of the new outer columns will be formed by pasting #' together the outer and the inner column names, separated by `names_sep`. In #' `pack()`, the new inner names will have the outer names + `names_sep` #' automatically stripped. This makes `names_sep` roughly symmetric between #' packing and unpacking. #' @param ... For `pack()`, <[`tidy-select`][tidyr_tidy_select]> columns to #' pack, specified using name-variable pairs of the form #' `new_col = c(col1, col2, col3)`. The right hand side can be any valid tidy #' select expression. #' #' For `unpack()`, these dots are for future extensions and must be empty. #' @export #' @examples #' # Packing ------------------------------------------------------------------- #' # It's not currently clear why you would ever want to pack columns #' # since few functions work with this sort of data. #' df <- tibble(x1 = 1:3, x2 = 4:6, x3 = 7:9, y = 1:3) #' df #' df %>% pack(x = starts_with("x")) #' df %>% pack(x = c(x1, x2, x3), y = y) #' #' # .names_sep allows you to strip off common prefixes; this #' # acts as a natural inverse to name_sep in unpack() #' iris %>% #' as_tibble() %>% #' pack( #' Sepal = starts_with("Sepal"), #' Petal = starts_with("Petal"), #' .names_sep = "." #' ) #' #' # Unpacking ----------------------------------------------------------------- #' df <- tibble( #' x = 1:3, #' y = tibble(a = 1:3, b = 3:1), #' z = tibble(X = c("a", "b", "c"), Y = runif(3), Z = c(TRUE, FALSE, NA)) #' ) #' df #' df %>% unpack(y) #' df %>% unpack(c(y, z)) #' df %>% unpack(c(y, z), names_sep = "_") pack <- function(.data, ..., .names_sep = NULL, .error_call = current_env()) { check_data_frame(.data, call = .error_call) cols <- enquos(...) if (any(names2(cols) == "")) { cli::cli_abort("All elements of `...` must be named.", call = .error_call) } check_string(.names_sep, allow_null = TRUE, call = .error_call) cols <- with_indexed_errors( map(cols, function(col) { tidyselect::eval_select( expr = col, data = .data, allow_rename = FALSE, error_call = NULL ) }), message = function(cnd) { cli::format_inline("In expression named {.arg {cnd$name}}:") }, .error_call = .error_call ) unpacked <- setdiff(names(.data), unlist(map(cols, names))) unpacked <- .data[unpacked] packed <- map(cols, ~ .data[.x]) if (!is.null(.names_sep)) { packed <- imap(packed, strip_names, names_sep = .names_sep) } packed <- new_data_frame(packed, n = vec_size(.data)) out <- vec_cbind(unpacked, packed, .error_call = .error_call) reconstruct_tibble(.data, out) } #' @export #' @rdname pack #' @param names_repair Used to check that output data frame has valid #' names. Must be one of the following options: #' #' * `"minimal`": no name repair or checks, beyond basic existence, #' * `"unique`": make sure names are unique and not empty, #' * `"check_unique`": (the default), no name repair, but check they are unique, #' * `"universal`": make the names unique and syntactic #' * a function: apply custom name repair. #' * [tidyr_legacy]: use the name repair from tidyr 0.8. #' * a formula: a purrr-style anonymous function (see [rlang::as_function()]) #' #' See [vctrs::vec_as_names()] for more details on these terms and the #' strategies used to enforce them. unpack <- function(data, cols, ..., names_sep = NULL, names_repair = "check_unique", error_call = current_env()) { check_dots_empty0(...) check_data_frame(data, call = error_call) check_required(cols, call = error_call) check_string(names_sep, allow_null = TRUE, call = error_call) # Start from first principles to avoid issues in any subclass methods out <- tidyr_new_list(data) cols <- tidyselect::eval_select( expr = enquo(cols), data = data, allow_rename = FALSE, error_call = error_call ) cols <- out[cols] cols <- cols[map_lgl(cols, is.data.frame)] cols_names <- names(cols) if (is.null(names_sep) && is_string(names_repair, "check_unique")) { # Only perform checks if user hasn't supplied `names_sep` or `names_repair`. # We let `vec_as_names()` catch any remaining problems. check_inner_inner_duplicate(cols, error_call = error_call) check_outer_inner_duplicate(cols, names(data), error_call = error_call) } if (!is.null(names_sep)) { out[cols_names] <- map2( cols, cols_names, rename_with_names_sep, names_sep = names_sep ) } # Signal to tell `df_list()` to unpack names <- names(out) names[names %in% cols_names] <- "" names(out) <- names size <- vec_size(data) out <- df_list(!!!out, .size = size, .name_repair = "minimal") out <- tibble::new_tibble(out, nrow = size) names(out) <- vec_as_names( names = names(out), repair = names_repair, repair_arg = "names_repair", call = error_call ) reconstruct_tibble(data, out) } check_inner_inner_duplicate <- function(x, error_call = caller_env()) { n <- length(x) if (n == 0L || n == 1L) { # Nothing to duplicate across return(invisible()) } outer <- names(x) x <- unname(x) inner <- map(x, names) inner <- map(inner, unique) outer <- vec_rep_each(outer, times = list_sizes(inner)) inner <- list_unchop(inner, ptype = character()) problems <- vec_duplicate_detect(inner) if (!any(problems)) { return(invisible()) } outer <- vec_slice(outer, problems) inner <- vec_slice(inner, problems) split <- vec_split(outer, inner) inners <- split$key outers <- split$val bullets <- map2_chr(inners, outers, function(inner, outer) { cli::format_inline("{.code {inner}}, within {.code {outer}}.") }) bullets <- set_names(bullets, "i") bullets <- cli::format_bullets_raw(bullets) bullets <- set_names(bullets, " ") message <- c( "Can't duplicate names within the affected columns.", x = "These names are duplicated:", bullets, i = "Use `names_sep` to disambiguate using the column name.", i = "Or use `names_repair` to specify a repair strategy." ) cli::cli_abort(message, call = error_call) } check_outer_inner_duplicate <- function(x, outer, error_call = caller_env()) { # Names of unpacked columns will disappear so aren't considered outer <- setdiff(outer, names(x)) inner <- map(x, names) inner <- map(inner, unique) problems <- map(inner, function(x) intersect(x, outer)) problems <- vec_slice(problems, list_sizes(problems) != 0L) if (length(problems) == 0L) { return(invisible()) } bullets <- map2_chr(problems, names(problems), function(inner, outer) { cli::format_inline("{.code {inner}}, from {.code {outer}}.") }) bullets <- set_names(bullets, "i") bullets <- cli::format_bullets_raw(bullets) bullets <- set_names(bullets, " ") message <- c( "Can't duplicate names between the affected columns and the original data.", x = "These names are duplicated:", bullets, i = "Use `names_sep` to disambiguate using the column name.", i = "Or use `names_repair` to specify a repair strategy." ) cli::cli_abort(message, call = error_call) } rename_with_names_sep <- function(x, outer, names_sep) { inner <- names(x) names <- apply_names_sep(outer, inner, names_sep) set_names(x, names) } strip_names <- function(df, base, names_sep) { base <- vec_paste0(base, names_sep) names <- names(df) has_prefix <- regexpr(base, names, fixed = TRUE) == 1L names[has_prefix] <- substr(names[has_prefix], nchar(base) + 1, nchar(names[has_prefix])) set_names(df, names) } tidyr/R/unnest-wider.R0000644000176200001440000001722414363516001014362 0ustar liggesusers#' Unnest a list-column into columns #' #' @description #' `unnest_wider()` turns each element of a list-column into a column. It #' is most naturally suited to list-columns where every element is named, #' and the names are consistent from row-to-row. #' `unnest_wider()` preserves the rows of `x` while modifying the columns. #' #' Learn more in `vignette("rectangle")`. #' #' @inheritParams unnest_longer #' @param names_sep If `NULL`, the default, the names will be left #' as is. If a string, the outer and inner names will be pasted together using #' `names_sep` as a separator. #' #' If any values being unnested are unnamed, then `names_sep` must be #' supplied, otherwise an error is thrown. When `names_sep` is supplied, #' names are automatically generated for unnamed values as an increasing #' sequence of integers. #' @param strict A single logical specifying whether or not to apply strict #' vctrs typing rules. If `FALSE`, typed empty values (like `list()` or #' `integer()`) nested within list-columns will be treated like `NULL` and #' will not contribute to the type of the unnested column. This is useful #' when working with JSON, where empty values tend to lose their type #' information and show up as `list()`. #' @family rectangling #' @export #' @examples #' df <- tibble( #' character = c("Toothless", "Dory"), #' metadata = list( #' list( #' species = "dragon", #' color = "black", #' films = c( #' "How to Train Your Dragon", #' "How to Train Your Dragon 2", #' "How to Train Your Dragon: The Hidden World" #' ) #' ), #' list( #' species = "blue tang", #' color = "blue", #' films = c("Finding Nemo", "Finding Dory") #' ) #' ) #' ) #' df #' #' # Turn all components of metadata into columns #' df %>% unnest_wider(metadata) #' #' # Choose not to simplify list-cols of length-1 elements #' df %>% unnest_wider(metadata, simplify = FALSE) #' df %>% unnest_wider(metadata, simplify = list(color = FALSE)) #' #' # You can also widen unnamed list-cols: #' df <- tibble( #' x = 1:3, #' y = list(NULL, 1:3, 4:5) #' ) #' # but you must supply `names_sep` to do so, which generates automatic names: #' df %>% unnest_wider(y, names_sep = "_") #' #' # 0-length elements --------------------------------------------------------- #' # The defaults of `unnest_wider()` treat empty types (like `list()`) as `NULL`. #' json <- list( #' list(x = 1:2, y = 1:2), #' list(x = list(), y = 3:4), #' list(x = 3L, y = list()) #' ) #' #' df <- tibble(json = json) #' df %>% #' unnest_wider(json) #' #' # To instead enforce strict vctrs typing rules, use `strict` #' df %>% #' unnest_wider(json, strict = TRUE) unnest_wider <- function(data, col, names_sep = NULL, simplify = TRUE, strict = FALSE, names_repair = "check_unique", ptype = NULL, transform = NULL) { check_data_frame(data) check_required(col) check_string(names_sep, allow_null = TRUE) check_bool(strict) error_call <- current_env() cols <- tidyselect::eval_select(enquo(col), data, allow_rename = FALSE) col_names <- names(cols) for (i in seq_along(cols)) { col <- cols[[i]] col_name <- col_names[[i]] data[[col]] <- col_to_wide( col = data[[col]], name = col_name, strict = strict, names_sep = names_sep ) } data <- unchop(data, all_of(col_names), error_call = error_call) for (i in seq_along(cols)) { col <- cols[[i]] data[[col]] <- df_simplify( x = data[[col]], ptype = ptype, transform = transform, simplify = simplify ) } unpack( data = data, cols = all_of(col_names), names_repair = names_repair, error_call = error_call ) } # Converts a column of any type to a `list_of` col_to_wide <- function(col, name, strict, names_sep, error_call = caller_env()) { if (!vec_is_list(col)) { ptype <- vec_ptype(col) col <- vec_chop(col) col <- new_list_of(col, ptype = ptype) } # If we don't have a list_of, then a `NULL` `col_ptype` will get converted to # a 1 row, 0 col tibble for `elt_ptype` col_ptype <- list_of_ptype(col) elt_ptype <- elt_to_wide(col_ptype, name = name, strict = strict, names_sep = names_sep, error_call = error_call) elt_ptype <- vec_ptype(elt_ptype) # Avoid expensive dispatch from `[[.list_of` out <- tidyr_new_list(col) out <- with_indexed_errors( map( out, function(x) elt_to_wide( x = x, name = name, strict = strict, names_sep = names_sep, error_call = NULL ) ), message = function(cnd) { c( i = cli::format_inline("In column: {.code {name}}."), i = cli::format_inline("In row: {cnd$location}.") ) }, .error_call = error_call ) # In the sole case of a list_of, we can be sure that the # elements of `out` will all be of the same type. Otherwise, # - If `col` isn't a list-of, we don't know the element type. # - If `col` is a list-of but not one with a data frame ptype, then we # don't actually know if all elements have the same ptype, because the # number of resulting columns per element depends on that element's size. has_identical_elements <- is_list_of(col) && is.data.frame(col_ptype) if (has_identical_elements) { ptype <- elt_ptype } else { ptype <- vec_ptype_common(elt_ptype, !!!out) out <- vec_cast_common(!!!out, .to = ptype) } out <- new_list_of(out, ptype = ptype) out } # Convert a list element to a wide tibble with: # - 1 row # - N cols, where `N = vec_size(x)` # - Column names come from `vec_names(x)` # - Data frames are treated specially. They are treated like heterogeneous lists # where we know the type of each list element. # - When !strict, lists are treated specially as well. Any typed empty elements # are replaced with `NULL` so their type doesn't interfere with the final # common type. This is extremely useful for JSON data, # where round tripping a typed empty cell results in an empty `list()` that # won't be able to combine with other typed non-empty cells. However, it # can create inconsistencies with vctrs typing rules. elt_to_wide <- function(x, name, strict, names_sep, error_call = caller_env()) { if (is.null(x)) { x <- list() } if (!vec_is(x)) { cli::cli_abort( "List-column must only contain vectors.", call = error_call ) } if (is.data.frame(x)) { # Extremely special case for data.frames, # which we want to treat like lists where we know the type of each element x <- tidyr_new_list(x) x <- map(x, list_of) names <- names2(x) x <- set_names(x, NULL) } else { if (!strict && vec_is_list(x)) { empty <- list_sizes(x) == 0L if (any(empty)) { # Can't use vec_assign(), see https://github.com/r-lib/vctrs/issues/1424 x[empty] <- list(NULL) } } names <- vec_names2(x) x <- vec_set_names(x, NULL) x <- vec_chop(x) } empty <- names == "" any_empty <- any(empty) if (is.null(names_sep)) { if (any_empty) { stop_use_names_sep(error_call = error_call) } } else { if (any_empty) { names[empty] <- as.character(which(empty)) } names <- apply_names_sep(name, names, names_sep) } x <- set_names(x, names) x <- new_data_frame(x, n = 1L) x } stop_use_names_sep <- function(error_call = caller_env()) { message <- c( "Can't unnest elements with missing names.", i = "Supply {.arg names_sep} to generate automatic names." ) cli::cli_abort(message, call = error_call) } tidyr/R/utils.R0000644000176200001440000002246214553565525013116 0ustar liggesusers#' Pipe operator #' #' See \code{\link[magrittr]{%>%}} for more details. #' #' @name %>% #' @rdname pipe #' @keywords internal #' @export #' @importFrom magrittr %>% #' @usage lhs \%>\% rhs NULL # https://github.com/r-lib/vctrs/issues/211 reconstruct_tibble <- function(input, output, ungrouped_vars = character()) { if (inherits(input, "grouped_df")) { old_groups <- dplyr::group_vars(input) new_groups <- intersect(setdiff(old_groups, ungrouped_vars), names(output)) dplyr::grouped_df(output, new_groups, drop = dplyr::group_by_drop_default(input)) } else if (inherits(input, "tbl_df")) { # Assume name repair carried out elsewhere as_tibble(output, .name_repair = "minimal") } else { output } } seq_nrow <- function(x) seq_len(nrow(x)) seq_ncol <- function(x) seq_len(ncol(x)) last <- function(x) x[[length(x)]] #' Legacy name repair #' #' Ensures all column names are unique using the approach found in #' tidyr 0.8.3 and earlier. Only use this function if you want to preserve #' the naming strategy, otherwise you're better off adopting the new #' tidyverse standard with `name_repair = "universal"` #' #' @param nms Character vector of names #' @param prefix prefix Prefix to use for unnamed column #' @param sep Separator to use between name and unique suffix #' @keywords internal #' @export #' @examples #' df <- tibble(x = 1:2, y = list(tibble(x = 3:5), tibble(x = 4:7))) #' #' # Doesn't work because it would produce a data frame with two #' # columns called x #' \dontrun{ #' unnest(df, y) #' } #' #' # The new tidyverse standard: #' unnest(df, y, names_repair = "universal") #' #' # The old tidyr approach #' unnest(df, y, names_repair = tidyr_legacy) tidyr_legacy <- function(nms, prefix = "V", sep = "") { if (length(nms) == 0) { return(character()) } blank <- nms == "" nms[!blank] <- make.unique(nms[!blank], sep = sep) new_nms <- setdiff(paste(prefix, seq_along(nms), sep = sep), nms) nms[blank] <- new_nms[seq_len(sum(blank))] nms } tidyr_col_modify <- function(data, cols) { # Implement from first principles to avoid edge cases in # data frame methods for `[<-` and `[[<-`. # Assume each element of `cols` has the correct size. check_data_frame(data, .internal = TRUE) if (!is_list(cols)) { cli::cli_abort("`cols` must be a list.", .internal = TRUE) } size <- vec_size(data) data <- tidyr_new_list(data) names <- names(cols) for (i in seq_along(cols)) { name <- names[[i]] data[[name]] <- cols[[i]] } # Assume that we can return a bare data frame that will up restored to # a tibble / grouped df as needed elsewhere data <- new_data_frame(data, n = size) data } tidyr_new_list <- function(x) { if (!is_list(x)) { cli::cli_abort("`x` must be a list.", .internal = TRUE) } names <- names(x) if (is.null(names)) { attributes(x) <- NULL } else { attributes(x) <- list(names = names) } x } #' Replace `NULL` list elements #' #' @param x A list, but not a list-of. #' #' @param sizes An integer vector of sizes of the `x` elements. #' #' @param ptype One of: #' #' - `NULL` to fill `NULL` elements with `unspecified()`. #' #' - A `ptype` value to fill `NULL` elements with. #' #' @param size The size of the replacement value to fill with. Commonly either #' `0L` or `1L`. #' #' @returns #' A named list holding `x` with `NULL`s replaced, and `sizes` updated to #' have the correct size where `NULL` elements were replaced. #' #' @noRd list_replace_null <- function(x, sizes, ..., ptype = NULL, size = 1L) { check_dots_empty0(...) if (!vec_is_list(x)) { cli::cli_abort("`x` must be a list.", .internal = TRUE) } if (is_list_of(x)) { cli::cli_abort("`x` can't be a list-of. Unclass first and provide `ptype`.", .internal = TRUE) } if (vec_any_missing(x)) { null <- vec_detect_missing(x) null <- which(null) if (is_null(ptype)) { replacement <- list(unspecified(size)) } else { replacement <- list(vec_init(ptype, size)) } x <- vec_assign(x, null, replacement) if (size != 0L) { sizes <- vec_assign(sizes, null, size) } } list(x = x, sizes = sizes) } #' Replace empty typed list elements #' #' @details #' Importantly, `x` can't contain any `NULL` values. These must have already #' been processed by `list_replace_null()`. This is not checked for. #' #' @param x A list, but not a list-of. #' #' @param sizes An integer vector of sizes of the `x` elements. #' #' @param ptype One of: #' #' - `NULL` to initialize empty typed elements with `size`d equivalents of #' themselves. #' #' - A `ptype` value to replace all empty typed elements with. Useful if you #' know `x` is a homogeneous list. #' #' @param size The size of the replacement value to fill with. Can't be `0L`, #' as that makes no sense. #' #' @returns #' A named list holding `x` with empty typed elements replaced, and `sizes` #' updated to have the correct size where empty typed elements were replaced. #' #' @noRd list_replace_empty_typed <- function(x, sizes, ..., ptype = NULL, size = 1L) { check_dots_empty0(...) if (!vec_is_list(x)) { cli::cli_abort("`x` must be a list.", .internal = TRUE) } if (is_list_of(x)) { cli::cli_abort("`x` can't be a list-of. Unclass first.", .internal = TRUE) } if (size == 0L) { cli::cli_abort("`size` should never be 0.", .internal = TRUE) } empty <- sizes == 0L if (any(empty)) { empty <- which(empty) if (is_null(ptype)) { replacement <- map(vec_slice(x, empty), function(elt) vec_init(elt, size)) } else { replacement <- list(vec_init(ptype, size)) } x <- vec_assign(x, empty, replacement) sizes <- vec_assign(sizes, empty, size) } list(x = x, sizes = sizes) } # TODO: Remove after https://github.com/r-lib/vctrs/issues/1762 is implemented list_all_vectors2 <- function(x) { if (vec_any_missing(x)) { missing <- vec_detect_missing(x) x <- vec_slice(x, !missing) } list_all_vectors(x) } list_of_ptype <- function(x) { ptype <- attr(x, "ptype", exact = TRUE) # ptypes should always be unnamed, but this isn't guaranteed right now. # See https://github.com/r-lib/vctrs/pull/1020#discussion_r411327472 ptype <- vec_set_names(ptype, NULL) ptype } apply_names_sep <- function(outer, inner, names_sep) { # Need to avoid `paste0()` recycling issue. Not using `vec_paste0()` # because that is too slow to be applied to each element (#1427). # `outer` and `names_sep` are required to be length 1, # so we only need to check `inner`. if (length(inner) == 0L) { character() } else { paste0(outer, names_sep, inner) } } vec_paste0 <- function(...) { # Use tidyverse recycling rules to avoid size zero recycling bugs args <- vec_recycle_common(...) exec(paste0, !!!args) } check_data_frame <- function(x, ..., arg = caller_arg(x), call = caller_env()) { if (!is.data.frame(x)) { cli::cli_abort("{.arg {arg}} must be a data frame, not {.obj_type_friendly {x}}.", ..., call = call) } } check_unique_names <- function(x, arg = caller_arg(x), call = caller_env()) { if (length(x) > 0L && !is_named(x)) { cli::cli_abort("All elements of {.arg {arg}} must be named.", call = call) } if (vec_duplicate_any(names(x))) { cli::cli_abort("The names of {.arg {arg}} must be unique.", call = call) } } check_list_of_ptypes <- function(x, names, arg = caller_arg(x), call = caller_env()) { if (is.null(x)) { set_names(list(), character()) } else if (vec_is(x) && vec_is_empty(x)) { rep_named(names, list(x)) } else if (vec_is_list(x)) { check_unique_names(x, arg = arg, call = call) # Silently drop user supplied names not found in the data x[intersect(names(x), names)] } else { cli::cli_abort( "{.arg {arg}} must be `NULL`, an empty ptype, or a named list of ptypes.", call = call ) } } check_list_of_functions <- function(x, names, arg = caller_arg(x), call = caller_env()) { if (is.null(x)) { x <- set_names(list(), character()) } else if (is.function(x) || is_formula(x)) { x <- rep_named(names, list(x)) } else if (!vec_is_list(x)) { cli::cli_abort( "{.arg {arg}} must be `NULL`, a function, or a named list of functions.", call = call ) } check_unique_names(x, arg = arg, call = call) x_names <- names(x) for (i in seq_along(x)) { x[[i]] <- as_function(x[[i]], arg = glue("{arg}${x_names[[i]]}"), call = call) } # Silently drop user supplied names not found in the data x <- x[intersect(x_names, names)] x } check_list_of_bool <- function(x, names, arg = caller_arg(x), call = caller_env()) { if (is_bool(x)) { rep_named(names, x) } else if (vec_is_list(x)) { check_unique_names(x, arg = arg, call = call) x[intersect(names(x), names)] } else { cli::cli_abort( "{.arg {arg}} must be a list or a single `TRUE` or `FALSE`.", call = call ) } } with_indexed_errors <- function(expr, message, ..., .error_call = caller_env(), .frame = caller_env()) { try_fetch( expr, purrr_error_indexed = function(cnd) { message <- message(cnd) abort(message, ..., call = .error_call, parent = cnd$parent, .frame = .frame) } ) } int_max <- function(x, default = -Inf) { if (length(x) == 0) { default } else { max(x) } } tidyr/R/zzz.R0000644000176200001440000000007214323620576012575 0ustar liggesusers.onLoad <- function(libname, pkgname) { run_on_load() } tidyr/R/gather.R0000644000176200001440000001420514325573777013231 0ustar liggesusers#' Gather columns into key-value pairs #' #' @description #' `r lifecycle::badge("superseded")` #' #' Development on `gather()` is complete, and for new code we recommend #' switching to `pivot_longer()`, which is easier to use, more featureful, and #' still under active development. #' `df %>% gather("key", "value", x, y, z)` is equivalent to #' `df %>% pivot_longer(c(x, y, z), names_to = "key", values_to = "value")` #' #' See more details in `vignette("pivot")`. #' #' @section Rules for selection: #' #' Arguments for selecting columns are passed to [tidyselect::vars_select()] #' and are treated specially. Unlike other verbs, selecting functions make a #' strict distinction between data expressions and context expressions. #' #' * A data expression is either a bare name like `x` or an expression #' like `x:y` or `c(x, y)`. In a data expression, you can only refer #' to columns from the data frame. #' #' * Everything else is a context expression in which you can only #' refer to objects that you have defined with `<-`. #' #' For instance, `col1:col3` is a data expression that refers to data #' columns, while `seq(start, end)` is a context expression that #' refers to objects from the contexts. #' #' If you need to refer to contextual objects from a data expression, you can #' use `all_of()` or `any_of()`. These functions are used to select #' data-variables whose names are stored in a env-variable. For instance, #' `all_of(a)` selects the variables listed in the character vector `a`. #' For more details, see the [tidyselect::select_helpers()] documentation. #' #' @inheritParams expand #' @param key,value Names of new key and value columns, as strings or #' symbols. #' #' This argument is passed by expression and supports #' [quasiquotation][rlang::quasiquotation] (you can unquote strings #' and symbols). The name is captured from the expression with #' [rlang::ensym()] (note that this kind of interface where #' symbols do not represent actual objects is now discouraged in the #' tidyverse; we support it here for backward compatibility). #' @param ... A selection of columns. If empty, all variables are #' selected. You can supply bare variable names, select all #' variables between x and z with `x:z`, exclude y with `-y`. For #' more options, see the [dplyr::select()] documentation. See also #' the section on selection rules below. #' @param na.rm If `TRUE`, will remove rows from output where the #' value column is `NA`. #' @param convert If `TRUE` will automatically run #' [type.convert()] on the key column. This is useful if the column #' types are actually numeric, integer, or logical. #' @param factor_key If `FALSE`, the default, the key values will be #' stored as a character vector. If `TRUE`, will be stored as a factor, #' which preserves the original ordering of the columns. #' @inheritParams gather_ #' @export #' @examples #' # From https://stackoverflow.com/questions/1181060 #' stocks <- tibble( #' time = as.Date("2009-01-01") + 0:9, #' X = rnorm(10, 0, 1), #' Y = rnorm(10, 0, 2), #' Z = rnorm(10, 0, 4) #' ) #' #' gather(stocks, "stock", "price", -time) #' stocks %>% gather("stock", "price", -time) #' #' # get first observation for each Species in iris data -- base R #' mini_iris <- iris[c(1, 51, 101), ] #' # gather Sepal.Length, Sepal.Width, Petal.Length, Petal.Width #' gather(mini_iris, key = "flower_att", value = "measurement", #' Sepal.Length, Sepal.Width, Petal.Length, Petal.Width) #' # same result but less verbose #' gather(mini_iris, key = "flower_att", value = "measurement", -Species) gather <- function(data, key = "key", value = "value", ..., na.rm = FALSE, convert = FALSE, factor_key = FALSE) { check_dots_unnamed() UseMethod("gather") } #' @export gather.data.frame <- function(data, key = "key", value = "value", ..., na.rm = FALSE, convert = FALSE, factor_key = FALSE) { key_var <- as_string(ensym(key)) value_var <- as_string(ensym(value)) quos <- quos(...) if (is_empty(quos)) { gather_vars <- setdiff(names(data), c(key_var, value_var)) } else { gather_vars <- unname(tidyselect::vars_select(tbl_vars(data), !!!quos)) } if (is_empty(gather_vars)) { return(data) } gather_idx <- match(gather_vars, names(data)) id_idx <- setdiff(seq_along(data), gather_idx) dup_indx <- match(c(key_var, value_var), names(data)) id_idx <- setdiff(id_idx, dup_indx) ## Get the attributes if common, NULL if not. args <- normalize_melt_arguments(data, gather_idx) valueAsFactor <- "factor" %in% class(args$attr_template) out <- melt_dataframe( data, id_idx - 1L, gather_idx - 1L, as.character(key_var), as.character(value_var), args$attr_template, args$factorsAsStrings, as.logical(valueAsFactor), as.logical(factor_key) ) if (na.rm && anyNA(out)) { missing <- is.na(out[[value_var]]) out <- out[!missing, ] } if (convert) { out[[key_var]] <- type.convert(as.character(out[[key_var]]), as.is = TRUE) } reconstruct_tibble(data, out, gather_vars) } # Functions from reshape2 ------------------------------------------------- ## Get the attributes if common, NULL if not. normalize_melt_arguments <- function(data, measure.ind) { measure.attributes <- map(measure.ind, function(i) { attributes(data[[i]]) }) ## Determine if all measure.attributes are equal measure.attrs.equal <- all_identical(measure.attributes) if (measure.attrs.equal) { attr_template <- data[[measure.ind[1]]] } else { warn(glue( "attributes are not identical across measure variables; they will be dropped" )) attr_template <- NULL } ## If we are going to be coercing any factors to strings, we don't want to ## copy the attributes any.factors <- any(map_lgl(measure.ind, function(i) is.factor(data[[i]]))) if (any.factors) { attr_template <- NULL } list( attr_template = attr_template, factorsAsStrings = TRUE ) } all_identical <- function(xs) { if (length(xs) <= 1) { return(TRUE) } for (i in seq(2, length(xs))) { if (!identical(xs[[1]], xs[[i]])) { return(FALSE) } } TRUE } tidyr/R/separate-rows.R0000644000176200001440000000337114360013543014530 0ustar liggesusers#' Separate a collapsed column into multiple rows #' #' @description #' `r lifecycle::badge("superseded")` #' #' `separate_rows()` has been superseded in favour of [separate_longer_delim()] #' because it has a more consistent API with other separate functions. #' Superseded functions will not go away, but will only receive critical bug #' fixes. #' #' If a variable contains observations with multiple delimited values, #' `separate_rows()` separates the values and places each one in its own row. #' #' @inheritParams drop_na #' @inheritParams gather #' @param sep Separator delimiting collapsed values. #' @param ... <[`tidy-select`][tidyr_tidy_select]> Columns to separate across #' multiple rows #' @export #' @examples #' df <- tibble( #' x = 1:3, #' y = c("a", "d,e,f", "g,h"), #' z = c("1", "2,3,4", "5,6") #' ) #' separate_rows(df, y, z, convert = TRUE) #' #' # Now recommended #' df %>% #' separate_longer_delim(c(y, z), delim = ",") separate_rows <- function(data, ..., sep = "[^[:alnum:].]+", convert = FALSE) { check_dots_unnamed() UseMethod("separate_rows") } #' @export separate_rows.data.frame <- function(data, ..., sep = "[^[:alnum:].]+", convert = FALSE) { check_string(sep) check_bool(convert) vars <- tidyselect::eval_select(expr(c(...)), data, allow_rename = FALSE) vars <- names(vars) out <- purrr::modify_at(data, vars, str_split_n, pattern = sep) out <- unchop(as_tibble(out), any_of(vars), error_call = current_env()) if (convert) { out[vars] <- map(out[vars], type.convert, as.is = TRUE) } reconstruct_tibble(data, out, vars) } tidyr/R/tidyr.R0000644000176200001440000000265314360013543013071 0ustar liggesusers#' @keywords internal #' @import rlang #' @import vctrs #' @importFrom glue glue #' @importFrom purrr accumulate discard every keep map map2 map2_chr #' map2_dbl map2_df map2_int map2_lgl map_at map_chr map_dbl map_df #' map_if map_int map_lgl pmap pmap_chr pmap_dbl pmap_df pmap_int #' pmap_lgl imap reduce some transpose #' @importFrom dplyr tbl_vars #' @importFrom utils type.convert #' @importFrom lifecycle deprecated #' @useDynLib tidyr, .registration = TRUE "_PACKAGE" on_load(local_use_cli()) the <- new_environment() globalVariables(c(".", "name", "value")) #' @importFrom tibble tribble #' @export tibble::tribble #' @importFrom tibble tibble #' @export tibble::tibble #' @importFrom tibble as_tibble #' @export tibble::as_tibble #' @aliases select_helpers #' @importFrom tidyselect all_of #' @export tidyselect::all_of #' @importFrom tidyselect any_of #' @export tidyselect::any_of #' @importFrom tidyselect contains #' @export tidyselect::contains #' @importFrom tidyselect ends_with #' @export tidyselect::ends_with #' @importFrom tidyselect everything #' @export tidyselect::everything #' @importFrom tidyselect last_col #' @export tidyselect::last_col #' @importFrom tidyselect matches #' @export tidyselect::matches #' @importFrom tidyselect num_range #' @export tidyselect::num_range #' @importFrom tidyselect one_of #' @export tidyselect::one_of #' @importFrom tidyselect starts_with #' @export tidyselect::starts_with tidyr/R/drop-na.R0000644000176200001440000000220614325573777013315 0ustar liggesusers#' Drop rows containing missing values #' #' `drop_na()` drops rows where any column specified by `...` contains a #' missing value. #' #' @details #' Another way to interpret `drop_na()` is that it only keeps the "complete" #' rows (where no rows contain missing values). Internally, this completeness is #' computed through [vctrs::vec_detect_complete()]. #' #' @param data A data frame. #' @param ... <[`tidy-select`][tidyr_tidy_select]> Columns to inspect for #' missing values. If empty, all columns are used. #' @examples #' df <- tibble(x = c(1, 2, NA), y = c("a", NA, "b")) #' df %>% drop_na() #' df %>% drop_na(x) #' #' vars <- "y" #' df %>% drop_na(x, any_of(vars)) #' @export drop_na <- function(data, ...) { check_dots_unnamed() UseMethod("drop_na") } #' @export drop_na.data.frame <- function(data, ...) { dots <- enquos(...) if (is_empty(dots)) { # Use all columns if no `...` are supplied cols <- data } else { vars <- tidyselect::eval_select(expr(c(!!!dots)), data, allow_rename = FALSE) cols <- data[vars] } loc <- vec_detect_complete(cols) out <- vec_slice(data, loc) reconstruct_tibble(data, out) } tidyr/R/doc-params.R0000644000176200001440000000727214325573777014013 0ustar liggesusers#' Argument type: data-masking #' #' @description #' This page describes the `` argument modifier which #' indicates that the argument uses **data masking**, a sub-type of #' tidy evaluation. If you've never heard of tidy evaluation before, #' start with the practical introduction in #' then #' then read more about the underlying theory in #' . #' #' # Key techniques #' #' * To allow the user to supply the column name in a function argument, #' embrace the argument, e.g. `filter(df, {{ var }})`. #' #' ```R #' dist_summary <- function(df, var) { #' df %>% #' summarise(n = n(), min = min({{ var }}), max = max({{ var }})) #' } #' mtcars %>% dist_summary(mpg) #' mtcars %>% group_by(cyl) %>% dist_summary(mpg) #' ``` #' #' * To work with a column name recorded as a string, use the `.data` #' pronoun, e.g. `summarise(df, mean = mean(.data[[var]]))`. #' #' ```R #' for (var in names(mtcars)) { #' mtcars %>% count(.data[[var]]) %>% print() #' } #' #' lapply(names(mtcars), function(var) mtcars %>% count(.data[[var]])) #' ``` #' #' * To suppress `R CMD check` `NOTE`s about unknown variables #' use `.data$var` instead of `var`: #' #' ```R #' # has NOTE #' df %>% mutate(z = x + y) #' #' # no NOTE #' df %>% mutate(z = .data$x + .data$y) #' ``` #' #' You'll also need to import `.data` from rlang with (e.g.) #' `@importFrom rlang .data`. #' #' # Dot-dot-dot (...) #' #' `...` automatically provides indirection, so you can use it as is #' (i.e. without embracing) inside a function: #' #' ``` #' grouped_mean <- function(df, var, ...) { #' df %>% #' group_by(...) %>% #' summarise(mean = mean({{ var }})) #' } #' ``` #' #' You can also use `:=` instead of `=` to enable a glue-like syntax for #' creating variables from user supplied data: #' #' ``` #' var_name <- "l100km" #' mtcars %>% mutate("{var_name}" := 235 / mpg) #' #' summarise_mean <- function(df, var) { #' df %>% #' summarise("mean_of_{{var}}" := mean({{ var }})) #' } #' mtcars %>% group_by(cyl) %>% summarise_mean(mpg) #' ``` #' #' Learn more in . #' #' @keywords internal #' @name tidyr_data_masking NULL #' Argument type: tidy-select #' #' @description #' This page describes the `` argument modifier which #' indicates that the argument uses **tidy selection**, a sub-type of #' tidy evaluation. If you've never heard of tidy evaluation before, #' start with the practical introduction in #' then #' then read more about the underlying theory in #' . #' #' # Overview of selection features #' #' ```{r, child = "man/rmd/overview.Rmd"} #' ``` #' #' # Key techniques #' #' * If you want the user to supply a tidyselect specification in a #' function argument, you need to tunnel the selection through the function #' argument. This is done by embracing the function argument `{{ }}`, #' e.g `unnest(df, {{ vars }})`. #' #' * If you have a character vector of column names, use `all_of()` #' or `any_of()`, depending on whether or not you want unknown variable #' names to cause an error, e.g `unnest(df, all_of(vars))`, #' `unnest(df, !any_of(vars))`. #' #' * To suppress `R CMD check` `NOTE`s about unknown variables use `"var"` #' instead of `var`: #' #' ```R #' # has NOTE #' df %>% select(x, y, z) #' #' # no NOTE #' df %>% select("x", "y", "z") #' ``` #' #' @keywords internal #' @name tidyr_tidy_select NULL tidyr/R/append.R0000644000176200001440000000305314360013543013200 0ustar liggesusers#' Append new columns (`y`) to an existing data frame (`x`) #' #' @details #' If columns are duplicated between `x` and `y`, then `y` columns are #' silently preferred. #' #' @param x A data frame. #' @param y A named list of columns to append. Each column must be the same size #' as `x`. #' @param after One of: #' - `NULL` to place `y` at the end. #' - A single column name from `x` to place `y` after. #' - A single integer position (including `0L`) to place `y` after. #' @param remove Whether or not to remove the column corresponding to `after` #' from `x`. #' #' @returns #' A bare data frame containing the columns from `x` and any appended columns #' from `y`. The type of `x` is not maintained. It is up to the caller to #' restore the type of `x` with `reconstruct_tibble()`. #' #' @noRd df_append <- function(x, y, after = NULL, remove = FALSE) { size <- vec_size(x) row_names <- .row_names_info(x, type = 0L) x <- tidyr_new_list(x) y <- tidyr_new_list(y) x_names <- names(x) y_names <- names(y) n <- length(x) if (is.null(after)) { after <- n } else if (is.character(after)) { after <- match(after, x_names) } check_number_whole(after, min = 0L, max = n, .internal = TRUE) if (remove) { lhs <- seq2(1L, after - 1L) } else { lhs <- seq2(1L, after) } rhs <- seq2(after + 1L, n) # Prefer `y` if names are duplicated lhs <- setdiff(x_names[lhs], y_names) rhs <- setdiff(x_names[rhs], y_names) out <- vec_c(x[lhs], y, x[rhs]) out <- new_data_frame(out, n = size, row.names = row_names) out } tidyr/R/spread.R0000644000176200001440000001310014360013543013201 0ustar liggesusers#' Spread a key-value pair across multiple columns #' #' @description #' `r lifecycle::badge("superseded")` #' #' Development on `spread()` is complete, and for new code we recommend #' switching to `pivot_wider()`, which is easier to use, more featureful, and #' still under active development. #' `df %>% spread(key, value)` is equivalent to #' `df %>% pivot_wider(names_from = key, values_from = value)` #' #' See more details in `vignette("pivot")`. #' #' @param data A data frame. #' @param key,value <[`tidy-select`][tidyr_tidy_select]> Columns to use #' for `key` and `value`. #' @param fill If set, missing values will be replaced with this value. Note #' that there are two types of missingness in the input: explicit missing #' values (i.e. `NA`), and implicit missings, rows that simply aren't #' present. Both types of missing value will be replaced by `fill`. #' @param convert If `TRUE`, [type.convert()] with \code{asis = #' TRUE} will be run on each of the new columns. This is useful if the value #' column was a mix of variables that was coerced to a string. If the class of #' the value column was factor or date, note that will not be true of the new #' columns that are produced, which are coerced to character before type #' conversion. #' @param drop If `FALSE`, will keep factor levels that don't appear in the #' data, filling in missing combinations with `fill`. #' @param sep If `NULL`, the column names will be taken from the values of #' `key` variable. If non-`NULL`, the column names will be given #' by `""`. #' @export #' @examples #' stocks <- tibble( #' time = as.Date("2009-01-01") + 0:9, #' X = rnorm(10, 0, 1), #' Y = rnorm(10, 0, 2), #' Z = rnorm(10, 0, 4) #' ) #' stocksm <- stocks %>% gather(stock, price, -time) #' stocksm %>% spread(stock, price) #' stocksm %>% spread(time, price) #' #' # Spread and gather are complements #' df <- tibble(x = c("a", "b"), y = c(3, 4), z = c(5, 6)) #' df %>% #' spread(x, y) %>% #' gather("x", "y", a:b, na.rm = TRUE) #' #' # Use 'convert = TRUE' to produce variables of mixed type #' df <- tibble( #' row = rep(c(1, 51), each = 3), #' var = rep(c("Sepal.Length", "Species", "Species_num"), 2), #' value = c(5.1, "setosa", 1, 7.0, "versicolor", 2) #' ) #' df %>% spread(var, value) %>% str() #' df %>% spread(var, value, convert = TRUE) %>% str() spread <- function(data, key, value, fill = NA, convert = FALSE, drop = TRUE, sep = NULL) { UseMethod("spread") } #' @export spread.data.frame <- function(data, key, value, fill = NA, convert = FALSE, drop = TRUE, sep = NULL) { key_var <- tidyselect::vars_pull(names(data), !!enquo(key)) value_var <- tidyselect::vars_pull(names(data), !!enquo(value)) col <- data[key_var] col_id <- id(col, drop = drop) col_labels <- split_labels(col, col_id, drop = drop) rows <- data[setdiff(names(data), c(key_var, value_var))] if (ncol(rows) == 0 && nrow(rows) > 0) { # Special case when there's only one row row_id <- structure(1L, n = 1L) row_labels <- as.data.frame(matrix(nrow = 1, ncol = 0)) } else { row_id <- id(rows, drop = drop) row_labels <- split_labels(rows, row_id, drop = drop) rownames(row_labels) <- NULL } overall <- id(list(col_id, row_id), drop = FALSE) n <- attr(overall, "n") # Check that each output value occurs in unique location if (anyDuplicated(overall)) { groups <- split(seq_along(overall), overall) groups <- groups[map_int(groups, length) > 1] shared <- sum(map_int(groups, length)) str <- map_chr(groups, function(x) paste0(x, collapse = ", ")) cli::cli_abort(c( "Each row of output must be identified by a unique combination of keys.", i = "Keys are shared for {shared} rows", set_names(str, "*") )) } # Add in missing values, if necessary if (length(overall) < n) { overall <- match(seq_len(n), overall, nomatch = NA) } else { overall <- order(overall) } value <- data[[value_var]] ordered <- value[overall] if (!is.na(fill)) { ordered[is.na(ordered)] <- fill } if (convert && !is_character(ordered)) { ordered <- as.character(ordered) } dim(ordered) <- c(attr(row_id, "n"), attr(col_id, "n")) colnames(ordered) <- enc2utf8(col_names(col_labels, sep = sep)) ordered <- as_tibble_matrix(ordered) if (convert) { ordered[] <- map(ordered, type.convert, as.is = TRUE) } out <- df_append(row_labels, ordered) reconstruct_tibble(data, out, c(key_var, value_var)) } col_names <- function(x, sep = NULL) { names <- as.character(x[[1]]) if (is_null(sep)) { if (length(names) == 0) { # ifelse will return logical() character() } else { ifelse(are_na(names), "", names) } } else { paste(names(x)[[1]], names, sep = sep) } } as_tibble_matrix <- function(x) { # getS3method() only available in R >= 3.3 get("as_tibble.matrix", asNamespace("tibble"), mode = "function")(x) } split_labels <- function(df, id, drop = TRUE) { if (length(df) == 0) { return(df) } if (drop) { representative <- match(sort(unique(id)), id) out <- df[representative, , drop = FALSE] rownames(out) <- NULL out } else { unique_values <- map(df, ulevels) rev(expand.grid(rev(unique_values), stringsAsFactors = FALSE)) } } ulevels <- function(x) { if (is.factor(x)) { orig_levs <- levels(x) x <- addNA(x, ifany = TRUE) levs <- levels(x) factor(levs, levels = orig_levs, ordered = is.ordered(x), exclude = NULL) } else if (is.list(x)) { unique(x) } else { sort(unique(x), na.last = TRUE) } } tidyr/R/data.R0000644000176200001440000002010214520546617012647 0ustar liggesusers#' World Health Organization TB data #' #' @description #' A subset of data from the World Health Organization Global Tuberculosis #' Report, and accompanying global populations. `who` uses the original #' codes from the World Health Organization. The column names for columns #' 5 through 60 are made by combining `new_` with: #' #' * the method of diagnosis (`rel` = relapse, `sn` = negative pulmonary #' smear, `sp` = positive pulmonary smear, `ep` = extrapulmonary), #' * gender (`f` = female, `m` = male), and #' * age group (`014` = 0-14 yrs of age, `1524` = 15-24, `2534` = 25-34, #' `3544` = 35-44 years of age, `4554` = 45-54, `5564` = 55-64, #' `65` = 65 years or older). #' #' `who2` is a lightly modified version that makes teaching the basics #' easier by tweaking the variables to be slightly more consistent and #' dropping `iso2` and `iso3`. `newrel` is replaced by `new_rel`, and a #' `_` is added after the gender. #' #' @format ## `who` #' A data frame with 7,240 rows and 60 columns: #' \describe{ #' \item{country}{Country name} #' \item{iso2, iso3}{2 & 3 letter ISO country codes} #' \item{year}{Year} #' \item{new_sp_m014 - new_rel_f65}{Counts of new TB cases recorded by group. #' Column names encode three variables that describe the group.} #' } #' @source "who" #' @rdname who #' @format ## `who2` #' A data frame with 7,240 rows and 58 columns. "who2" #' @rdname who #' @format ## `population` #' A data frame with 4,060 rows and three columns: #' \describe{ #' \item{country}{Country name} #' \item{year}{Year} #' \item{population}{Population} #' } "population" #' Example tabular representations #' #' Data sets that demonstrate multiple ways to layout the same tabular data. #' #' `table1`, `table2`, `table3`, `table4a`, `table4b`, #' and `table5` all display the number of TB cases documented by the World #' Health Organization in Afghanistan, Brazil, and China between 1999 and 2000. #' The data contains values associated with four variables (country, year, #' cases, and population), but each table organizes the values in a different #' layout. #' #' The data is a subset of the data contained in the World Health #' Organization Global Tuberculosis Report #' #' @source #' @format NULL "table1" #' @rdname table1 #' @format NULL "table2" #' @rdname table1 #' @format NULL "table3" #' @rdname table1 #' @format NULL "table4a" #' @rdname table1 #' @format NULL "table4b" #' @rdname table1 #' @format NULL "table5" #' Some data about the Smith family #' #' A small demo dataset describing John and Mary Smith. #' #' @format A data frame with 2 rows and 5 columns. "smiths" #' US rent and income data #' #' Captured from the 2017 American Community Survey using the tidycensus #' package. #' #' @format A dataset with variables: #' \describe{ #' \item{GEOID}{FIP state identifier} #' \item{NAME}{Name of state} #' \item{variable}{Variable name: income = median yearly income, #' rent = median monthly rent} #' \item{estimate}{Estimated value} #' \item{moe}{90% margin of error} #' } "us_rent_income" #' Fish encounters #' #' Information about fish swimming down a river: each station represents an #' autonomous monitor that records if a tagged fish was seen at that location. #' Fish travel in one direction (migrating downstream). Information about #' misses is just as important as hits, but is not directly recorded in this #' form of the data. #' #' @source #' Dataset provided by Myfanwy Johnston; more details at #' #' #' @format A dataset with variables: #' \describe{ #' \item{fish}{Fish identifier} #' \item{station}{Measurement station} #' \item{seen}{Was the fish seen? (1 if yes, and true for all rows)} #' } "fish_encounters" #' Population data from the World Bank #' #' Data about population from the World Bank. #' #' @source #' Dataset from the World Bank data bank: #' #' @format A dataset with variables: #' \describe{ #' \item{country}{Three letter country code} #' \item{indicator}{Indicator name: `SP.POP.GROW` = population growth, #' `SP.POP.TOTL` = total population, `SP.URB.GROW` = urban population #' growth, `SP.URB.TOTL` = total urban population} #' \item{2000-2018}{Value for each year} #' } "world_bank_pop" #' Pew religion and income survey #' #' @format A dataset with variables: #' \describe{ #' \item{religion}{Name of religion} #' \item{`<$10k`-`Don\'t know/refused`}{Number of respondees with #' income range in column name} #' } #' @source #' Downloaded from #' (downloaded November 2009) "relig_income" #' Completed construction in the US in 2018 #' #' @format A dataset with variables: #' \describe{ #' \item{Year,Month}{Record date} #' \item{`1 unit`, `2 to 4 units`, `5 units or mote`}{Number of completed #' units of each size} #' \item{Northeast,Midwest,South,West}{Number of completed units in each region} #' } #' @source #' Completions of "New Residential Construction" found in Table 5 at #' #' (downloaded March 2019) "construction" #' Song rankings for Billboard top 100 in the year 2000 #' #' @format A dataset with variables: #' \describe{ #' \item{artist}{Artist name} #' \item{track}{Song name} #' \item{date.enter}{Date the song entered the top 100} #' \item{wk1 -- wk76}{Rank of the song in each week after it entered} #' } #' @source #' The "Whitburn" project, , #' (downloaded April 2008) "billboard" #' Household data #' #' This dataset is based on an example in #' `vignette("datatable-reshape", package = "data.table")` #' #' @format A data frame with 5 rows and 5 columns: #' \describe{ #' \item{family}{Family identifier} #' \item{dob_child1}{Date of birth of first child} #' \item{dob_child2}{Date of birth of second child} #' \item{name_child1}{Name of first child}? #' \item{name_child2}{Name of second child} #' } "household" #' Data from the Centers for Medicare & Medicaid Services #' #' @description #' Two datasets from public data provided the Centers for Medicare & Medicaid #' Services, . #' #' * `cms_patient_experience` contains some lightly cleaned data from #' "Hospice - Provider Data", which provides a list of hospice agencies #' along with some data on quality of patient care, #' . #' #' * `cms_patient_care` "Doctors and Clinicians Quality Payment Program PY 2020 #' Virtual Group Public Reporting", #' #' #' @examples #' cms_patient_experience %>% #' dplyr::distinct(measure_cd, measure_title) #' #' cms_patient_experience %>% #' pivot_wider( #' id_cols = starts_with("org"), #' names_from = measure_cd, #' values_from = prf_rate #' ) #' #' cms_patient_care %>% #' pivot_wider( #' names_from = type, #' values_from = score #' ) #' #' cms_patient_care %>% #' pivot_wider( #' names_from = measure_abbr, #' values_from = score #' ) #' #' cms_patient_care %>% #' pivot_wider( #' names_from = c(measure_abbr, type), #' values_from = score #' ) #' @format `cms_patient_experience` is a data frame with 500 observations and #' five variables: #' \describe{ #' \item{org_pac_id,org_nm}{Organisation ID and name} #' \item{measure_cd,measure_title}{Measure code and title} #' \item{prf_rate}{Measure performance rate} #' } "cms_patient_experience" #' @format `cms_patient_care` is a data frame with 252 observations and #' five variables: #' \describe{ #' \item{ccn,facility_name}{Facility ID and name} #' \item{measure_abbr}{Abbreviated measurement title, suitable for use as variable name} #' \item{score}{Measure score} #' \item{type}{Whether score refers to the rating out of 100 ("observed"), or #' the maximum possible value of the raw score ("denominator")} #' } #' @rdname cms_patient_experience "cms_patient_care" tidyr/R/unnest-auto.R0000644000176200001440000000420714325573777014242 0ustar liggesusers#' Automatically call `unnest_wider()` or `unnest_longer()` #' #' @description #' `unnest_auto()` picks between `unnest_wider()` or `unnest_longer()` #' by inspecting the inner names of the list-col: #' #' * If all elements are unnamed, it uses #' `unnest_longer(indices_include = FALSE)`. #' * If all elements are named, and there's at least one name in #' common across all components, it uses `unnest_wider()`. #' * Otherwise, it falls back to `unnest_longer(indices_include = TRUE)`. #' #' It's handy for very rapid interactive exploration but I don't recommend #' using it in scripts, because it will succeed even if the underlying data #' radically changes. #' #' @inheritParams unnest_longer #' @export #' @param col <[`tidy-select`][tidyr_tidy_select]> List-column to unnest. #' @keywords internal unnest_auto <- function(data, col) { check_required(col) col <- tidyselect::vars_pull(tbl_vars(data), {{ col }}) x <- data[[col]] dir <- guess_dir(x, col) switch(dir, longer = unnest_longer(data, {{ col }}, indices_include = FALSE), longer_idx = unnest_longer(data, {{ col }}, indices_include = TRUE), wider = unnest_wider(data, {{ col }}, names_repair = "unique") ) } guess_dir <- function(x, col) { names <- map(x, names) is_null <- unique(map_lgl(names, is.null)) if (identical(is_null, TRUE)) { # all unnamed code <- glue::glue("unnest_longer({col}, indices_include = FALSE)") reason <- "no element has names" out <- "longer" } else if (identical(is_null, FALSE)) { # all named common <- reduce(names, intersect) n_common <- length(common) if (n_common == 0) { code <- glue::glue("unnest_longer({col}, indices_include = TRUE)") reason <- "elements are named, but have no names in common" out <- "longer_idx" } else { code <- glue::glue("unnest_wider({col})") reason <- glue::glue("elements have {n_common} names in common") out <- "wider" } } else { code <- glue::glue("unnest_longer({col}, indices_include = FALSE)") reason <- "mix of named and unnamed elements" out <- "longer" } message(glue::glue("Using `{code}`; {reason}")) out } tidyr/R/compat-obj-type.R0000644000176200001440000001764214323620576014765 0ustar liggesusers# nocov start --- r-lib/rlang compat-obj-type # # Changelog # ========= # # 2022-10-04: # - `obj_type_friendly(value = TRUE)` now shows numeric scalars # literally. # - `stop_friendly_type()` now takes `show_value`, passed to # `obj_type_friendly()` as the `value` argument. # # 2022-10-03: # - Added `allow_na` and `allow_null` arguments. # - `NULL` is now backticked. # - Better friendly type for infinities and `NaN`. # # 2022-09-16: # - Unprefixed usage of rlang functions with `rlang::` to # avoid onLoad issues when called from rlang (#1482). # # 2022-08-11: # - Prefixed usage of rlang functions with `rlang::`. # # 2022-06-22: # - `friendly_type_of()` is now `obj_type_friendly()`. # - Added `obj_type_oo()`. # # 2021-12-20: # - Added support for scalar values and empty vectors. # - Added `stop_input_type()` # # 2021-06-30: # - Added support for missing arguments. # # 2021-04-19: # - Added support for matrices and arrays (#141). # - Added documentation. # - Added changelog. #' Return English-friendly type #' @param x Any R object. #' @param value Whether to describe the value of `x`. Special values #' like `NA` or `""` are always described. #' @param length Whether to mention the length of vectors and lists. #' @return A string describing the type. Starts with an indefinite #' article, e.g. "an integer vector". #' @noRd obj_type_friendly <- function(x, value = TRUE) { if (is_missing(x)) { return("absent") } if (is.object(x)) { if (inherits(x, "quosure")) { type <- "quosure" } else { type <- paste(class(x), collapse = "/") } return(sprintf("a <%s> object", type)) } if (!is_vector(x)) { return(.rlang_as_friendly_type(typeof(x))) } n_dim <- length(dim(x)) if (!n_dim) { if (!is_list(x) && length(x) == 1) { if (is_na(x)) { return(switch( typeof(x), logical = "`NA`", integer = "an integer `NA`", double = if (is.nan(x)) { "`NaN`" } else { "a numeric `NA`" }, complex = "a complex `NA`", character = "a character `NA`", .rlang_stop_unexpected_typeof(x) )) } show_infinites <- function(x) { if (x > 0) { "`Inf`" } else { "`-Inf`" } } str_encode <- function(x, width = 30, ...) { if (nchar(x) > width) { x <- substr(x, 1, width - 3) x <- paste0(x, "...") } encodeString(x, ...) } if (value) { if (is.numeric(x) && is.infinite(x)) { return(show_infinites(x)) } if (is.numeric(x) || is.complex(x)) { number <- as.character(round(x, 2)) what <- if (is.complex(x)) "the complex number" else "the number" return(paste(what, number)) } return(switch( typeof(x), logical = if (x) "`TRUE`" else "`FALSE`", character = { what <- if (nzchar(x)) "the string" else "the empty string" paste(what, str_encode(x, quote = "\"")) }, raw = paste("the raw value", as.character(x)), .rlang_stop_unexpected_typeof(x) )) } return(switch( typeof(x), logical = "a logical value", integer = "an integer", double = if (is.infinite(x)) show_infinites(x) else "a number", complex = "a complex number", character = if (nzchar(x)) "a string" else "\"\"", raw = "a raw value", .rlang_stop_unexpected_typeof(x) )) } if (length(x) == 0) { return(switch( typeof(x), logical = "an empty logical vector", integer = "an empty integer vector", double = "an empty numeric vector", complex = "an empty complex vector", character = "an empty character vector", raw = "an empty raw vector", list = "an empty list", .rlang_stop_unexpected_typeof(x) )) } } vec_type_friendly(x) } vec_type_friendly <- function(x, length = FALSE) { if (!is_vector(x)) { abort("`x` must be a vector.") } type <- typeof(x) n_dim <- length(dim(x)) add_length <- function(type) { if (length && !n_dim) { paste0(type, sprintf(" of length %s", length(x))) } else { type } } if (type == "list") { if (n_dim < 2) { return(add_length("a list")) } else if (is.data.frame(x)) { return("a data frame") } else if (n_dim == 2) { return("a list matrix") } else { return("a list array") } } type <- switch( type, logical = "a logical %s", integer = "an integer %s", numeric = , double = "a double %s", complex = "a complex %s", character = "a character %s", raw = "a raw %s", type = paste0("a ", type, " %s") ) if (n_dim < 2) { kind <- "vector" } else if (n_dim == 2) { kind <- "matrix" } else { kind <- "array" } out <- sprintf(type, kind) if (n_dim >= 2) { out } else { add_length(out) } } .rlang_as_friendly_type <- function(type) { switch( type, list = "a list", NULL = "`NULL`", environment = "an environment", externalptr = "a pointer", weakref = "a weak reference", S4 = "an S4 object", name = , symbol = "a symbol", language = "a call", pairlist = "a pairlist node", expression = "an expression vector", char = "an internal string", promise = "an internal promise", ... = "an internal dots object", any = "an internal `any` object", bytecode = "an internal bytecode object", primitive = , builtin = , special = "a primitive function", closure = "a function", type ) } .rlang_stop_unexpected_typeof <- function(x, call = caller_env()) { abort( sprintf("Unexpected type <%s>.", typeof(x)), call = call ) } #' Return OO type #' @param x Any R object. #' @return One of `"bare"` (for non-OO objects), `"S3"`, `"S4"`, #' `"R6"`, or `"R7"`. #' @noRd obj_type_oo <- function(x) { if (!is.object(x)) { return("bare") } class <- inherits(x, c("R6", "R7_object"), which = TRUE) if (class[[1]]) { "R6" } else if (class[[2]]) { "R7" } else if (isS4(x)) { "S4" } else { "S3" } } #' @param x The object type which does not conform to `what`. Its #' `obj_type_friendly()` is taken and mentioned in the error message. #' @param what The friendly expected type as a string. Can be a #' character vector of expected types, in which case the error #' message mentions all of them in an "or" enumeration. #' @param show_value Passed to `value` argument of `obj_type_friendly()`. #' @param ... Arguments passed to [abort()]. #' @inheritParams args_error_context #' @noRd stop_input_type <- function(x, what, ..., allow_na = FALSE, allow_null = FALSE, show_value = TRUE, arg = caller_arg(x), call = caller_env()) { # From compat-cli.R cli <- env_get_list( nms = c("format_arg", "format_code"), last = topenv(), default = function(x) sprintf("`%s`", x), inherit = TRUE ) if (allow_na) { what <- c(what, cli$format_code("NA")) } if (allow_null) { what <- c(what, cli$format_code("NULL")) } if (length(what)) { what <- oxford_comma(what) } message <- sprintf( "%s must be %s, not %s.", cli$format_arg(arg), what, obj_type_friendly(x, value = show_value) ) abort(message, ..., call = call, arg = arg) } oxford_comma <- function(chr, sep = ", ", final = "or") { n <- length(chr) if (n < 2) { return(chr) } head <- chr[seq_len(n - 1)] last <- chr[n] head <- paste(head, collapse = sep) # Write a or b. But a, b, or c. if (n > 2) { paste0(head, sep, final, " ", last) } else { paste0(head, " ", final, " ", last) } } # nocov end tidyr/R/pivot-wide.R0000644000176200001440000006040014553563421014030 0ustar liggesusers#' Pivot data from long to wide # #' @description #' `pivot_wider()` "widens" data, increasing the number of columns and #' decreasing the number of rows. The inverse transformation is #' [pivot_longer()]. #' #' Learn more in `vignette("pivot")`. #' #' @details #' `pivot_wider()` is an updated approach to [spread()], designed to be both #' simpler to use and to handle more use cases. We recommend you use #' `pivot_wider()` for new code; `spread()` isn't going away but is no longer #' under active development. #' #' @seealso [pivot_wider_spec()] to pivot "by hand" with a data frame that #' defines a pivoting specification. #' @inheritParams pivot_longer #' @param id_cols <[`tidy-select`][tidyr_tidy_select]> A set of columns that #' uniquely identify each observation. Typically used when you have #' redundant variables, i.e. variables whose values are perfectly correlated #' with existing variables. #' #' Defaults to all columns in `data` except for the columns specified through #' `names_from` and `values_from`. If a tidyselect expression is supplied, it #' will be evaluated on `data` after removing the columns specified through #' `names_from` and `values_from`. #' @param id_expand Should the values in the `id_cols` columns be expanded by #' [expand()] before pivoting? This results in more rows, the output will #' contain a complete expansion of all possible values in `id_cols`. Implicit #' factor levels that aren't represented in the data will become explicit. #' Additionally, the row values corresponding to the expanded `id_cols` will #' be sorted. #' @param names_from,values_from <[`tidy-select`][tidyr_tidy_select]> A pair of #' arguments describing which column (or columns) to get the name of the #' output column (`names_from`), and which column (or columns) to get the #' cell values from (`values_from`). #' #' If `values_from` contains multiple values, the value will be added to the #' front of the output column. #' @param names_sep If `names_from` or `values_from` contains multiple #' variables, this will be used to join their values together into a single #' string to use as a column name. #' @param names_prefix String added to the start of every variable name. This is #' particularly useful if `names_from` is a numeric vector and you want to #' create syntactic variable names. #' @param names_glue Instead of `names_sep` and `names_prefix`, you can supply #' a glue specification that uses the `names_from` columns (and special #' `.value`) to create custom column names. #' @param names_sort Should the column names be sorted? If `FALSE`, the default, #' column names are ordered by first appearance. #' @param names_vary When `names_from` identifies a column (or columns) with #' multiple unique values, and multiple `values_from` columns are provided, #' in what order should the resulting column names be combined? #' #' - `"fastest"` varies `names_from` values fastest, resulting in a column #' naming scheme of the form: `value1_name1, value1_name2, value2_name1, #' value2_name2`. This is the default. #' #' - `"slowest"` varies `names_from` values slowest, resulting in a column #' naming scheme of the form: `value1_name1, value2_name1, value1_name2, #' value2_name2`. #' @param names_expand Should the values in the `names_from` columns be expanded #' by [expand()] before pivoting? This results in more columns, the output #' will contain column names corresponding to a complete expansion of all #' possible values in `names_from`. Implicit factor levels that aren't #' represented in the data will become explicit. Additionally, the column #' names will be sorted, identical to what `names_sort` would produce. #' @param values_fill Optionally, a (scalar) value that specifies what each #' `value` should be filled in with when missing. #' #' This can be a named list if you want to apply different fill values to #' different value columns. #' @param values_fn Optionally, a function applied to the value in each cell #' in the output. You will typically use this when the combination of #' `id_cols` and `names_from` columns does not uniquely identify an #' observation. #' #' This can be a named list if you want to apply different aggregations #' to different `values_from` columns. #' @param unused_fn Optionally, a function applied to summarize the values from #' the unused columns (i.e. columns not identified by `id_cols`, #' `names_from`, or `values_from`). #' #' The default drops all unused columns from the result. #' #' This can be a named list if you want to apply different aggregations #' to different unused columns. #' #' `id_cols` must be supplied for `unused_fn` to be useful, since otherwise #' all unspecified columns will be considered `id_cols`. #' #' This is similar to grouping by the `id_cols` then summarizing the #' unused columns using `unused_fn`. #' @param ... Additional arguments passed on to methods. #' @export #' @examples #' # See vignette("pivot") for examples and explanation #' #' fish_encounters #' fish_encounters %>% #' pivot_wider(names_from = station, values_from = seen) #' # Fill in missing values #' fish_encounters %>% #' pivot_wider(names_from = station, values_from = seen, values_fill = 0) #' #' # Generate column names from multiple variables #' us_rent_income #' us_rent_income %>% #' pivot_wider( #' names_from = variable, #' values_from = c(estimate, moe) #' ) #' #' # You can control whether `names_from` values vary fastest or slowest #' # relative to the `values_from` column names using `names_vary`. #' us_rent_income %>% #' pivot_wider( #' names_from = variable, #' values_from = c(estimate, moe), #' names_vary = "slowest" #' ) #' #' # When there are multiple `names_from` or `values_from`, you can use #' # use `names_sep` or `names_glue` to control the output variable names #' us_rent_income %>% #' pivot_wider( #' names_from = variable, #' names_sep = ".", #' values_from = c(estimate, moe) #' ) #' us_rent_income %>% #' pivot_wider( #' names_from = variable, #' names_glue = "{variable}_{.value}", #' values_from = c(estimate, moe) #' ) #' #' # Can perform aggregation with `values_fn` #' warpbreaks <- as_tibble(warpbreaks[c("wool", "tension", "breaks")]) #' warpbreaks #' warpbreaks %>% #' pivot_wider( #' names_from = wool, #' values_from = breaks, #' values_fn = mean #' ) #' #' # Can pass an anonymous function to `values_fn` when you #' # need to supply additional arguments #' warpbreaks$breaks[1] <- NA #' warpbreaks %>% #' pivot_wider( #' names_from = wool, #' values_from = breaks, #' values_fn = ~ mean(.x, na.rm = TRUE) #' ) pivot_wider <- function(data, ..., id_cols = NULL, id_expand = FALSE, names_from = name, names_prefix = "", names_sep = "_", names_glue = NULL, names_sort = FALSE, names_vary = "fastest", names_expand = FALSE, names_repair = "check_unique", values_from = value, values_fill = NULL, values_fn = NULL, unused_fn = NULL) { # TODO: Use `check_dots_used()` after removing the `id_cols` compat behavior UseMethod("pivot_wider") } #' @export pivot_wider.data.frame <- function(data, ..., id_cols = NULL, id_expand = FALSE, names_from = name, names_prefix = "", names_sep = "_", names_glue = NULL, names_sort = FALSE, names_vary = "fastest", names_expand = FALSE, names_repair = "check_unique", values_from = value, values_fill = NULL, values_fn = NULL, unused_fn = NULL) { names_from <- enquo(names_from) values_from <- enquo(values_from) spec <- build_wider_spec( data = data, names_from = !!names_from, values_from = !!values_from, names_prefix = names_prefix, names_sep = names_sep, names_glue = names_glue, names_sort = names_sort, names_vary = names_vary, names_expand = names_expand, error_call = current_env() ) id_cols <- compat_id_cols( id_cols = {{ id_cols }}, ..., fn_call = match.call(expand.dots = FALSE) ) id_cols <- build_wider_id_cols_expr( data = data, id_cols = !!id_cols, names_from = !!names_from, values_from = !!values_from ) pivot_wider_spec( data = data, spec = spec, id_cols = !!id_cols, id_expand = id_expand, names_repair = names_repair, values_fill = values_fill, values_fn = values_fn, unused_fn = unused_fn, error_call = current_env() ) } #' Pivot data from long to wide using a spec #' #' This is a low level interface to pivoting, inspired by the cdata package, #' that allows you to describe pivoting with a data frame. #' #' @keywords internal #' @export #' @inheritParams rlang::args_dots_empty #' @inheritParams rlang::args_error_context #' @inheritParams pivot_wider #' @param spec A specification data frame. This is useful for more complex #' pivots because it gives you greater control on how metadata stored in the #' columns become column names in the result. #' #' Must be a data frame containing character `.name` and `.value` columns. #' Additional columns in `spec` should be named to match columns in the #' long format of the dataset and contain values corresponding to columns #' pivoted from the wide format. #' The special `.seq` variable is used to disambiguate rows internally; #' it is automatically removed after pivoting. #' @param id_cols <[`tidy-select`][tidyr_tidy_select]> A set of columns that #' uniquely identifies each observation. Defaults to all columns in `data` #' except for the columns specified in `spec$.value` and the columns of the #' `spec` that aren't named `.name` or `.value`. Typically used when you have #' redundant variables, i.e. variables whose values are perfectly correlated #' with existing variables. #' #' @examples #' # See vignette("pivot") for examples and explanation #' #' us_rent_income #' spec1 <- us_rent_income %>% #' build_wider_spec(names_from = variable, values_from = c(estimate, moe)) #' spec1 #' #' us_rent_income %>% #' pivot_wider_spec(spec1) #' #' # Is equivalent to #' us_rent_income %>% #' pivot_wider(names_from = variable, values_from = c(estimate, moe)) #' #' # `pivot_wider_spec()` provides more control over column names and output format #' # instead of creating columns with estimate_ and moe_ prefixes, #' # keep original variable name for estimates and attach _moe as suffix #' spec2 <- tibble( #' .name = c("income", "rent", "income_moe", "rent_moe"), #' .value = c("estimate", "estimate", "moe", "moe"), #' variable = c("income", "rent", "income", "rent") #' ) #' #' us_rent_income %>% #' pivot_wider_spec(spec2) pivot_wider_spec <- function(data, spec, ..., names_repair = "check_unique", id_cols = NULL, id_expand = FALSE, values_fill = NULL, values_fn = NULL, unused_fn = NULL, error_call = current_env()) { check_dots_empty0(...) spec <- check_pivot_spec(spec, call = error_call) check_bool(id_expand, call = error_call) names_from_cols <- names(spec)[-(1:2)] values_from_cols <- vec_unique(spec$.value) id_cols <- select_wider_id_cols( data = data, id_cols = {{ id_cols }}, names_from_cols = names_from_cols, values_from_cols = values_from_cols, error_call = error_call ) values_fn <- check_list_of_functions(values_fn, values_from_cols, call = error_call) unused_cols <- setdiff(names(data), c(id_cols, names_from_cols, values_from_cols)) unused_fn <- check_list_of_functions(unused_fn, unused_cols, call = error_call) unused_cols <- names(unused_fn) if (is.null(values_fill)) { values_fill <- list() } else if (is_scalar(values_fill)) { values_fill <- rep_named(values_from_cols, list(values_fill)) } else if (!vec_is_list(values_fill)) { cli::cli_abort( "{.arg values_fill} must be {.code NULL}, a scalar, or a named list, not {.obj_type_friendly {values_fill}}.", call = error_call ) } values_fill <- values_fill[intersect(names(values_fill), values_from_cols)] input <- data # Early conversion to tibble because data.table returns zero rows if # zero cols are selected. Also want to avoid the grouped-df behavior # of `complete()`. data <- as_tibble(data) data <- data[vec_unique(c(id_cols, names_from_cols, values_from_cols, unused_cols))] if (id_expand) { data <- complete(data, !!!syms(id_cols), fill = values_fill, explicit = FALSE) } # Figure out rows in output rows <- data[id_cols] row_id <- vec_group_id(rows) nrow <- attr(row_id, "n") rows <- vec_slice(rows, vec_unique_loc(row_id)) n_unused_fn <- length(unused_fn) unused <- vector("list", length = n_unused_fn) names(unused) <- unused_cols if (n_unused_fn > 0L) { # This can be expensive, only compute if we are using `unused_fn` unused_locs <- vec_group_loc(row_id)$loc } for (i in seq_len(n_unused_fn)) { unused_col <- unused_cols[[i]] unused_fn_i <- unused_fn[[i]] unused_value <- data[[unused_col]] unused[[i]] <- value_summarize( value = unused_value, value_locs = unused_locs, value_name = unused_col, fn = unused_fn_i, fn_name = "unused_fn", error_call = error_call ) } unused <- tibble::new_tibble(unused, nrow = nrow) duplicate_names <- character(0L) value_specs <- unname(split(spec, spec$.value)) value_out <- vec_init(list(), length(value_specs)) for (i in seq_along(value_out)) { value_spec <- value_specs[[i]] value_name <- value_spec$.value[[1]] value <- data[[value_name]] cols <- data[names(value_spec)[-(1:2)]] col_id <- vec_match(as_tibble(cols), value_spec[-(1:2)]) value_id <- data.frame(row = row_id, col = col_id) value_fn <- values_fn[[value_name]] if (is.null(value_fn) && vec_duplicate_any(value_id)) { # There are unhandled duplicates. Handle them with `list()` and warn. value_fn <- list duplicate_names <- c(duplicate_names, value_name) } if (!is.null(value_fn)) { result <- vec_group_loc(value_id) value_id <- result$key value_locs <- result$loc value <- value_summarize( value = value, value_locs = value_locs, value_name = value_name, fn = value_fn, fn_name = "values_fn", error_call = error_call ) } ncol <- nrow(value_spec) fill <- values_fill[[value_name]] if (is.null(fill)) { out <- vec_init(value, nrow * ncol) } else { stopifnot(vec_size(fill) == 1) fill <- vec_cast(fill, value, call = error_call) out <- vec_rep_each(fill, nrow * ncol) } out <- vec_assign(out, value_id$row + nrow * (value_id$col - 1L), value) value_out[[i]] <- chop_rectangular_df(out, value_spec$.name) } if (length(duplicate_names) > 0L) { duplicate_names <- glue::backtick(duplicate_names) duplicate_names <- glue::glue_collapse(duplicate_names, sep = ", ", last = " and ") group_cols <- c(id_cols, names_from_cols) group_cols <- backtick_if_not_syntactic(group_cols) group_cols <- glue::glue_collapse(group_cols, sep = ", ") cli::cli_warn(c( "Values from {duplicate_names} are not uniquely identified; output will contain list-cols.", "*" = "Use `values_fn = list` to suppress this warning.", "*" = "Use `values_fn = {{summary_fun}}` to summarise duplicates.", "*" = "Use the following dplyr code to identify duplicates.", " " = " {{data}} |>", " " = " dplyr::summarise(n = dplyr::n(), .by = c({group_cols})) |>", " " = " dplyr::filter(n > 1L)" )) } # `check_pivot_spec()` ensures `.name` is unique. Name repair shouldn't be needed. values <- vec_cbind(!!!value_out, .name_repair = "minimal") # Recreate desired column order of the new spec columns (#569) values <- values[spec$.name] out <- wrap_error_names(vec_cbind( rows, values, unused, .name_repair = names_repair, .error_call = error_call )) reconstruct_tibble(input, out) } #' @export #' @rdname pivot_wider_spec #' @inheritParams pivot_wider build_wider_spec <- function(data, ..., names_from = name, values_from = value, names_prefix = "", names_sep = "_", names_glue = NULL, names_sort = FALSE, names_vary = "fastest", names_expand = FALSE, error_call = current_env()) { check_dots_empty0(...) names_from <- tidyselect::eval_select( enquo(names_from), data, allow_rename = FALSE, allow_empty = FALSE, error_call = error_call ) values_from <- tidyselect::eval_select( enquo(values_from), data, allow_rename = FALSE, allow_empty = FALSE, error_call = error_call ) check_string(names_prefix, call = error_call) check_string(names_sep, call = error_call) check_string(names_glue, allow_null = TRUE, call = error_call) check_bool(names_sort, call = error_call) check_bool(names_expand, call = error_call) names_vary <- arg_match0( arg = names_vary, values = c("fastest", "slowest"), arg_nm = "names_vary", error_call = error_call ) data <- as_tibble(data) data <- data[names_from] if (names_expand) { # `expand()` always does sort + unique row_ids <- expand(data, !!!syms(names(data))) } else { row_ids <- vec_unique(data) if (names_sort) { row_ids <- vec_sort(row_ids) } } row_names <- exec(paste, !!!row_ids, sep = names_sep) out <- tibble( .name = vec_paste0(names_prefix, row_names) ) if (length(values_from) == 1) { out$.value <- names(values_from) } else { if (names_vary == "fastest") { out <- vec_rep(out, vec_size(values_from)) out$.value <- vec_rep_each(names(values_from), vec_size(row_ids)) row_ids <- vec_rep(row_ids, vec_size(values_from)) } else { out <- vec_rep_each(out, vec_size(values_from)) out$.value <- vec_rep(names(values_from), vec_size(row_ids)) row_ids <- vec_rep_each(row_ids, vec_size(values_from)) } out$.name <- vec_paste0(out$.value, names_sep, out$.name) } out <- vec_cbind(out, as_tibble(row_ids), .name_repair = "minimal") if (!is.null(names_glue)) { out$.name <- as.character(glue::glue_data(out, names_glue)) } out } build_wider_id_cols_expr <- function(data, id_cols = NULL, names_from = name, values_from = value, error_call = caller_env()) { names_from <- tidyselect::eval_select( enquo(names_from), data, allow_rename = FALSE, error_call = error_call ) values_from <- tidyselect::eval_select( enquo(values_from), data, allow_rename = FALSE, error_call = error_call ) out <- select_wider_id_cols( data = data, id_cols = {{ id_cols }}, names_from_cols = names(names_from), values_from_cols = names(values_from), error_call = error_call ) expr(c(!!!out)) } select_wider_id_cols <- function(data, id_cols = NULL, names_from_cols = character(), values_from_cols = character(), error_call = caller_env()) { id_cols <- enquo(id_cols) # Remove known non-id-cols so they are never selected data <- data[setdiff(names(data), c(names_from_cols, values_from_cols))] if (quo_is_null(id_cols)) { # Default selects everything in `data` after non-id-cols have been removed return(names(data)) } try_fetch( id_cols <- tidyselect::eval_select( enquo(id_cols), data, allow_rename = FALSE, error_call = error_call ), vctrs_error_subscript_oob = function(cnd) { rethrow_id_cols_oob(cnd, names_from_cols, values_from_cols, error_call) } ) names(id_cols) } rethrow_id_cols_oob <- function(cnd, names_from_cols, values_from_cols, call) { i <- cnd[["i"]] check_string(i, .internal = TRUE) if (i %in% names_from_cols) { stop_id_cols_oob(i, "names_from", call = call) } else if (i %in% values_from_cols) { stop_id_cols_oob(i, "values_from", call = call) } else { # Zap this special handler, throw the normal condition zap() } } stop_id_cols_oob <- function(i, arg, call) { cli::cli_abort( c( "`id_cols` can't select a column already selected by `{arg}`.", i = "Column `{i}` has already been selected." ), parent = NA, call = call ) } compat_id_cols <- function(id_cols, ..., fn_call, error_call = caller_env(), user_env = caller_env(2)) { dots <- enquos(...) # If `id_cols` is specified by name by the user, it will show up in the call. # Otherwise, default args don't show up in the call so it won't be there. user_specified_id_cols <- "id_cols" %in% names(fn_call) # For compatibility (#1353), assign the first value of `...` to `id_cols` if: # - The user didn't specify `id_cols`. # - There is exactly 1 unnamed element in `...`. use_compat_id_cols <- !user_specified_id_cols && length(dots) == 1L && !is_named(dots) if (use_compat_id_cols) { id_cols <- dots[[1L]] warn_deprecated_unnamed_id_cols(id_cols, user_env = user_env) } else { id_cols <- enquo(id_cols) check_dots_empty0(..., call = error_call) } id_cols } warn_deprecated_unnamed_id_cols <- function(id_cols, user_env = caller_env(2)) { id_cols <- as_label(id_cols) lifecycle::deprecate_warn( when = "1.3.0", what = I(cli::format_inline( "Specifying the {.arg id_cols} argument by position" )), details = cli::format_inline( "Please explicitly name {.arg id_cols}, like {.code id_cols = {id_cols}}." ), always = TRUE, user_env = user_env ) } # Helpers ----------------------------------------------------------------- value_summarize <- function(value, value_locs, value_name, fn, fn_name, error_call = caller_env()) { value <- vec_chop(value, value_locs) if (identical(fn, list)) { # The no-op case, for performance return(value) } value <- map(value, fn) sizes <- list_sizes(value) invalid_sizes <- sizes != 1L if (any(invalid_sizes)) { size <- sizes[invalid_sizes][[1]] cli::cli_abort( c( "Applying {.arg {fn_name}} to {.var {value_name}} must result in a single summary value per key.", i = "Applying {.arg {fn_name}} resulted in a vector of length {size}." ), call = error_call ) } value <- list_unchop(value) value } # Wrap a "rectangular" vector into a data frame chop_rectangular_df <- function(x, names) { n_col <- vec_size(names) n_row <- vec_size(x) / n_col indices <- vector("list", n_col) start <- 1L stop <- n_row for (i in seq_len(n_col)) { indices[[i]] <- seq2(start, stop) start <- start + n_row stop <- stop + n_row } out <- vec_chop(x, indices) names(out) <- names tibble::new_tibble(out, nrow = n_row) } is_scalar <- function(x) { if (is.null(x)) { return(FALSE) } if (vec_is_list(x)) { (vec_size(x) == 1) && !have_name(x) } else { vec_size(x) == 1 } } backtick_if_not_syntactic <- function(x) { ok <- make.names(x) == x ok[is.na(x)] <- FALSE x[!ok] <- glue::backtick(x[!ok]) x } tidyr/R/dep-lazyeval.R0000644000176200001440000001625014553565525014351 0ustar liggesusers# nocov start #' Deprecated SE versions of main verbs #' #' @description #' `r lifecycle::badge("deprecated")` #' #' tidyr used to offer twin versions of each verb suffixed with an #' underscore. These versions had standard evaluation (SE) semantics: #' rather than taking arguments by code, like NSE verbs, they took #' arguments by value. Their purpose was to make it possible to #' program with tidyr. However, tidyr now uses tidy evaluation #' semantics. NSE verbs still capture their arguments, but you can now #' unquote parts of these arguments. This offers full programmability #' with NSE verbs. Thus, the underscored versions are now superfluous. #' #' Unquoting triggers immediate evaluation of its operand and inlines #' the result within the captured expression. This result can be a #' value or an expression to be evaluated later with the rest of the #' argument. See `vignette("programming", "dplyr")` for more information. #' #' @param data A data frame #' @param vars,cols,col Name of columns. #' @name deprecated-se #' @keywords internal NULL #' @rdname deprecated-se #' @inheritParams complete #' @export complete_ <- function(data, cols, fill = list(), ...) { lifecycle::deprecate_warn("1.0.0", "complete_()", "complete()", always = TRUE) UseMethod("complete_") } #' @export complete_.data.frame <- function(data, cols, fill = list(), ...) { cols <- compat_lazy_dots(cols, caller_env()) complete(data, !!!cols, fill = fill) } #' @rdname deprecated-se #' @export drop_na_ <- function(data, vars) { lifecycle::deprecate_warn("1.0.0", "drop_na_()", "drop_na()", always = TRUE) UseMethod("drop_na_") } #' @export drop_na_.data.frame <- function(data, vars) { drop_na(data, !!!vars) } #' @rdname deprecated-se #' @export expand_ <- function(data, dots, ...) { lifecycle::deprecate_warn("1.0.0", "expand_()", "expand()", always = TRUE) UseMethod("expand_") } #' @export expand_.data.frame <- function(data, dots, ...) { dots <- compat_lazy_dots(dots, caller_env()) expand(data, !!!dots) } #' @rdname deprecated-se #' @param x For `nesting_` and `crossing_` a list of variables. #' @export crossing_ <- function(x) { lifecycle::deprecate_warn("1.0.0", "crossing_()", "crossing()", always = TRUE) x <- compat_lazy_dots(x, caller_env()) crossing(!!!x) } #' @rdname deprecated-se #' @export nesting_ <- function(x) { lifecycle::deprecate_warn("1.2.0", "nesting_()", "nesting()", always = TRUE) x <- compat_lazy_dots(x, caller_env()) nesting(!!!x) } #' @rdname deprecated-se #' @inheritParams extract #' @export extract_ <- function(data, col, into, regex = "([[:alnum:]]+)", remove = TRUE, convert = FALSE, ...) { lifecycle::deprecate_warn("1.0.0", "extract_()", "extract()", always = TRUE) UseMethod("extract_") } #' @export extract_.data.frame <- function(data, col, into, regex = "([[:alnum:]]+)", remove = TRUE, convert = FALSE, ...) { col <- compat_lazy(col, caller_env()) extract(data, col = !!col, into = into, regex = regex, remove = remove, convert = convert, ... ) } #' @rdname deprecated-se #' @inheritParams fill #' @param fill_cols Character vector of column names. #' @export fill_ <- function(data, fill_cols, .direction = c("down", "up")) { lifecycle::deprecate_warn("1.2.0", "fill_()", "fill()", always = TRUE) UseMethod("fill_") } #' @export fill_.data.frame <- function(data, fill_cols, .direction = c("down", "up", "downup", "updown")) { vars <- syms(fill_cols) fill(data, !!!vars, .direction = .direction) } #' @rdname deprecated-se #' @inheritParams gather #' @param key_col,value_col Strings giving names of key and value columns to #' create. #' @param gather_cols Character vector giving column names to be gathered into #' pair of key-value columns. #' @keywords internal #' @export gather_ <- function(data, key_col, value_col, gather_cols, na.rm = FALSE, convert = FALSE, factor_key = FALSE) { lifecycle::deprecate_warn("1.2.0", "gather_()", "gather()", always = TRUE) UseMethod("gather_") } #' @export gather_.data.frame <- function(data, key_col, value_col, gather_cols, na.rm = FALSE, convert = FALSE, factor_key = FALSE) { key_col <- sym(key_col) value_col <- sym(value_col) gather_cols <- syms(gather_cols) gather(data, key = !!key_col, value = !!value_col, !!!gather_cols, na.rm = na.rm, convert = convert, factor_key = factor_key ) } #' @rdname deprecated-se #' @inheritParams nest #' @keywords internal #' @export nest_ <- function(...) { lifecycle::deprecate_stop("1.0.0", "nest_()", "nest()", always = TRUE) } #' @rdname deprecated-se #' @inheritParams separate_rows #' @export separate_rows_ <- function(data, cols, sep = "[^[:alnum:].]+", convert = FALSE) { lifecycle::deprecate_warn("1.2.0", "separate_rows_()", "separate_rows()") UseMethod("separate_rows_") } #' @export separate_rows_.data.frame <- function(data, cols, sep = "[^[:alnum:].]+", convert = FALSE) { cols <- syms(cols) separate_rows(data, !!!cols, sep = sep, convert = convert) } #' @rdname deprecated-se #' @inheritParams separate #' @export separate_ <- function(data, col, into, sep = "[^[:alnum:]]+", remove = TRUE, convert = FALSE, extra = "warn", fill = "warn", ...) { lifecycle::deprecate_warn("1.2.0", "separate_()", "separate()", always = TRUE) UseMethod("separate_") } #' @export separate_.data.frame <- function(data, col, into, sep = "[^[:alnum:]]+", remove = TRUE, convert = FALSE, extra = "warn", fill = "warn", ...) { col <- sym(col) separate(data, col = !!col, into = into, sep = sep, remove = remove, convert = convert, extra = extra, fill = fill, ... ) } #' @rdname deprecated-se #' @inheritParams spread #' @param key_col,value_col Strings giving names of key and value cols. #' @export spread_ <- function(data, key_col, value_col, fill = NA, convert = FALSE, drop = TRUE, sep = NULL) { lifecycle::deprecate_warn("1.2.0", "spread_()", "spread()", always = TRUE) UseMethod("spread_") } #' @export spread_.data.frame <- function(data, key_col, value_col, fill = NA, convert = FALSE, drop = TRUE, sep = NULL) { key_col <- sym(key_col) value_col <- sym(value_col) spread(data, key = !!key_col, value = !!value_col, fill = fill, convert = convert, drop = drop, sep = sep ) } #' @rdname deprecated-se #' @inheritParams unite #' @param from Names of existing columns as character vector #' @export unite_ <- function(data, col, from, sep = "_", remove = TRUE) { lifecycle::deprecate_warn("1.2.0", "unite_()", "unite()", always = TRUE) UseMethod("unite_") } #' @export unite_.data.frame <- function(data, col, from, sep = "_", remove = TRUE) { col <- quo_get_expr(compat_lazy(col, caller_env())) from <- syms(from) unite(data, !!col, !!!from, sep = sep, remove = remove) } #' @rdname deprecated-se #' @inheritParams unnest #' @export unnest_ <- function(...) { lifecycle::deprecate_stop("1.0.0", "unnest_()", "unnest()") } # nocov end tidyr/R/nest-legacy.R0000644000176200001440000001647314357015307014164 0ustar liggesusers#' Legacy versions of `nest()` and `unnest()` #' #' @description #' `r lifecycle::badge("superseded")` #' #' tidyr 1.0.0 introduced a new syntax for [nest()] and [unnest()]. The majority #' of existing usage should be automatically translated to the new syntax with a #' warning. However, if you need to quickly roll back to the previous behaviour, #' these functions provide the previous interface. To make old code work as is, #' add the following code to the top of your script: #' #' ``` #' library(tidyr) #' nest <- nest_legacy #' unnest <- unnest_legacy #' ``` #' #' @param data A data frame. #' @param .key The name of the new column, as a string or symbol. This argument #' is passed by expression and supports #' [quasiquotation][rlang::quasiquotation] (you can unquote strings and #' symbols). The name is captured from the expression with [rlang::ensym()] #' (note that this kind of interface where symbols do not represent actual #' objects is now discouraged in the tidyverse; we support it here for #' backward compatibility). #' @param ... Specification of columns to unnest. Use bare variable names or #' functions of variables. If omitted, defaults to all list-cols. #' @param .drop Should additional list columns be dropped? By default, #' `unnest()` will drop them if unnesting the specified columns requires the #' rows to be duplicated. #' @param .preserve Optionally, list-columns to preserve in the output. These #' will be duplicated in the same way as atomic vectors. This has #' [dplyr::select()] semantics so you can preserve multiple variables with #' `.preserve = c(x, y)` or `.preserve = starts_with("list")`. #' @param .id Data frame identifier - if supplied, will create a new column with #' name `.id`, giving a unique identifier. This is most useful if the list #' column is named. #' @param .sep If non-`NULL`, the names of unnested data frame columns will #' combine the name of the original list-col with the names from the nested #' data frame, separated by `.sep`. #' @export #' @examples #' # Nest and unnest are inverses #' df <- tibble(x = c(1, 1, 2), y = 3:1) #' df %>% nest_legacy(y) #' df %>% nest_legacy(y) %>% unnest_legacy() #' #' # nesting ------------------------------------------------------------------- #' as_tibble(iris) %>% nest_legacy(!Species) #' as_tibble(chickwts) %>% nest_legacy(weight) #' #' # unnesting ----------------------------------------------------------------- #' df <- tibble( #' x = 1:2, #' y = list( #' tibble(z = 1), #' tibble(z = 3:4) #' ) #' ) #' df %>% unnest_legacy(y) #' #' # You can also unnest multiple columns simultaneously #' df <- tibble( #' a = list(c("a", "b"), "c"), #' b = list(1:2, 3), #' c = c(11, 22) #' ) #' df %>% unnest_legacy(a, b) #' # If you omit the column names, it'll unnest all list-cols #' df %>% unnest_legacy() nest_legacy <- function(data, ..., .key = "data") { UseMethod("nest_legacy") } #' @importFrom utils packageVersion #' @export nest_legacy.tbl_df <- function(data, ..., .key = "data") { key_var <- as_string(ensym(.key)) nest_vars <- unname(tidyselect::vars_select(names(data), ...)) if (is_empty(nest_vars)) { nest_vars <- names(data) } if (dplyr::is_grouped_df(data)) { group_vars <- dplyr::group_vars(data) } else { group_vars <- setdiff(names(data), nest_vars) } nest_vars <- setdiff(nest_vars, group_vars) data <- dplyr::ungroup(data) if (is_empty(group_vars)) { return(tibble(!!key_var := list(data))) } out <- dplyr::select(data, !!!syms(group_vars)) grouped_data <- dplyr::group_by(data, !!!syms(group_vars), .drop = TRUE) idx <- dplyr::group_indices(grouped_data) representatives <- which(!duplicated(idx)) out <- dplyr::slice(out, representatives) out[[key_var]] <- unname(split(data[nest_vars], idx))[unique(idx)] out } #' @export nest_legacy.data.frame <- function(data, ..., .key = "data") { .key <- enquo(.key) # Don't transform subclasses if (identical(class(data), "data.frame")) { data <- tibble::as_tibble(data) } nest_legacy.tbl_df(data, ..., .key = !!.key) } #' @export #' @rdname nest_legacy unnest_legacy <- function(data, ..., .drop = NA, .id = NULL, .sep = NULL, .preserve = NULL) { UseMethod("unnest_legacy") } #' @export unnest_legacy.data.frame <- function(data, ..., .drop = NA, .id = NULL, .sep = NULL, .preserve = NULL) { preserve <- tidyselect::vars_select(names(data), !!enquo(.preserve)) quos <- quos(...) if (is_empty(quos)) { list_cols <- names(data)[map_lgl(data, is_list)] list_cols <- setdiff(list_cols, preserve) quos <- syms(list_cols) } if (length(quos) == 0) { return(data) } nested <- dplyr::transmute(dplyr::ungroup(data), !!!quos) n <- map(nested, function(x) unname(map_int(x, NROW))) if (length(unique(n)) != 1) { cli::cli_abort("All nested columns must have the same number of elements.") } types <- map_chr(nested, list_col_type) nest_types <- split.default(nested, types) if (length(nest_types$mixed) > 0) { probs <- names(nest_types$mixed) cli::cli_abort(c( "Each column must either be a list of vectors or a list of data frames.", i = "Problems in: {.var {probs}}" )) } unnested_atomic <- imap(nest_types$atomic %||% list(), enframe, .id = .id) if (length(unnested_atomic) > 0) { unnested_atomic <- dplyr::bind_cols(unnested_atomic) } unnested_dataframe <- map(nest_types$dataframe %||% list(), function(.) { if (length(.) == 0L) { attr(., "ptype") %||% data.frame() } else { dplyr::bind_rows(., .id = .id) } }) if (!is_null(.sep)) { unnested_dataframe <- imap( unnested_dataframe, function(df, name) { set_names(df, paste(name, names(df), sep = .sep)) } ) } if (length(unnested_dataframe) > 0) { unnested_dataframe <- dplyr::bind_cols(unnested_dataframe) } # Keep list columns by default, only if the rows aren't expanded if (identical(.drop, NA)) { n_in <- nrow(data) if (length(unnested_atomic)) { n_out <- nrow(unnested_atomic) } else { n_out <- nrow(unnested_dataframe) } .drop <- n_out != n_in } if (.drop) { is_atomic <- map_lgl(data, is_atomic) group_vars <- names(data)[is_atomic] } else { group_vars <- names(data) } group_vars <- setdiff(group_vars, names(nested)) # Add list columns to be preserved group_vars <- union(group_vars, preserve) rest <- data[rep(seq_nrow(data), n[[1]]), group_vars, drop = FALSE] out <- dplyr::bind_cols(rest, unnested_atomic, unnested_dataframe) reconstruct_tibble(data, out) } list_col_type <- function(x) { is_data_frame <- is.data.frame(attr(x, "ptype", exact = TRUE)) || (is.list(x) && all(map_lgl(x, is.data.frame))) is_atomic <- all(map_lgl(x, function(x) is_atomic(x) || (is_list(x) && !is.object(x)))) if (is_data_frame) { "dataframe" } else if (is_atomic) { "atomic" } else { "mixed" } } enframe <- function(x, col_name, .id = NULL) { if (!is_list(x)) { x <- list(x) } col <- unname(x) col <- list_unchop(col) out <- set_names(list(col), col_name) out <- as_tibble(out) if (!is_null(.id)) { out[[.id]] <- id_col(x) } out } id_col <- function(x) { stopifnot(is_list(x)) ids <- if (is_null(names(x))) seq_along(x) else names(x) lengths <- map_int(x, length) ids[rep(seq_along(ids), lengths)] } tidyr/R/compat-lazyeval.R0000644000176200001440000000425014315413441015042 0ustar liggesusers# nocov start - compat-lazyeval (last updated: rlang 0.3.0) # This file serves as a reference for compatibility functions for lazyeval. # Please find the most recent version in rlang's repository. warn_underscored <- function() { return(NULL) warn(paste( "The underscored versions are deprecated in favour of", "tidy evaluation idioms. Please see the documentation", "for `quo()` in rlang" )) } warn_text_se <- function() { return(NULL) warn("Text parsing is deprecated, please supply an expression or formula") } compat_lazy <- function(lazy, env = caller_env(), warn = TRUE) { if (warn) warn_underscored() if (missing(lazy)) { return(quo()) } if (is_quosure(lazy)) { return(lazy) } if (is_formula(lazy)) { return(as_quosure(lazy, env)) } out <- switch(typeof(lazy), symbol = , language = new_quosure(lazy, env), character = { if (warn) warn_text_se() parse_quo(lazy[[1]], env) }, logical = , integer = , double = { if (length(lazy) > 1) { warn("Truncating vector to length 1") lazy <- lazy[[1]] } new_quosure(lazy, env) }, list = if (inherits(lazy, "lazy")) { lazy <- new_quosure(lazy$expr, lazy$env) } ) if (is_null(out)) { abort(sprintf("Can't convert a %s to a quosure", typeof(lazy))) } else { out } } compat_lazy_dots <- function(dots, env, ..., .named = FALSE) { if (missing(dots)) { dots <- list() } if (inherits(dots, c("lazy", "formula"))) { dots <- list(dots) } else { dots <- unclass(dots) } dots <- c(dots, list(...)) warn <- TRUE for (i in seq_along(dots)) { dots[[i]] <- compat_lazy(dots[[i]], env, warn) warn <- FALSE } named <- have_name(dots) if (.named && any(!named)) { nms <- vapply(dots[!named], function(x) expr_text(get_expr(x)), character(1)) names(dots)[!named] <- nms } names(dots) <- names2(dots) dots } compat_as_lazy <- function(quo) { structure(class = "lazy", list( expr = get_expr(quo), env = get_env(quo) )) } compat_as_lazy_dots <- function(...) { structure(class = "lazy_dots", lapply(quos(...), compat_as_lazy)) } # nocov end tidyr/R/unnest-helper.R0000644000176200001440000000640114360013543014522 0ustar liggesusers # Helpers ----------------------------------------------------------------- df_simplify <- function(x, ..., ptype = NULL, transform = NULL, simplify = TRUE, error_call = caller_env()) { check_dots_empty() ptype <- check_list_of_ptypes(ptype, names(x), call = error_call) transform <- check_list_of_functions(transform, names(x), call = error_call) simplify <- check_list_of_bool(simplify, names(x), call = error_call) x_n <- length(x) x_size <- vec_size(x) x_names <- names(x) out <- vector("list", length = x_n) names(out) <- x_names for (i in seq_len(x_n)) { col <- x[[i]] col_name <- x_names[[i]] col_ptype <- ptype[[col_name]] col_transform <- transform[[col_name]] col_simplify <- simplify[[col_name]] %||% TRUE out[[i]] <- col_simplify( x = col, ptype = col_ptype, transform = col_transform, simplify = col_simplify, error_call = error_call ) } new_data_frame(out, n = x_size) } col_simplify <- function(x, ..., ptype = NULL, transform = NULL, simplify = TRUE, error_call = caller_env()) { check_dots_empty() if (!is.null(transform)) { transform <- as_function(transform) } if (!vec_is_list(x)) { if (!is.null(transform)) { x <- transform(x) } if (!is.null(ptype)) { x <- vec_cast(x, ptype, call = error_call) } return(x) } if (!is.null(transform)) { x <- tidyr_new_list(x) x <- map(x, transform) # Can't convert result to list_of, as we can't be certain of element types } if (!is.null(ptype)) { x <- tidyr_new_list(x) x <- vec_cast_common(!!!x, .to = ptype, .call = error_call) x <- new_list_of(x, ptype = ptype) } if (!simplify) { return(x) } # Don't simplify lists of lists, because that typically indicates that # there might be multiple values. if (is_list_of(x)) { has_list_of_list <- vec_is_list(list_of_ptype(x)) } else { has_list_of_list <- any(map_lgl(x, vec_is_list)) } if (has_list_of_list) { return(x) } # Don't try and simplify non-vectors. list-of types always contain vectors. if (is_list_of(x)) { has_non_vector <- FALSE } else { has_non_vector <- !list_all_vectors2(x) } if (has_non_vector) { return(x) } out <- tidyr_new_list(x) ptype <- list_of_ptype(x) sizes <- list_sizes(out) # Ensure empty elements are filled in with their correct size 1 equivalent info <- list_replace_null(out, sizes, ptype = ptype) out <- info$x sizes <- info$sizes info <- list_replace_empty_typed(out, sizes, ptype = ptype) out <- info$x sizes <- info$sizes # Don't try to simplify if there are any size >1 left at this point has_non_scalar <- any(sizes != 1L) if (has_non_scalar) { return(x) } # Assume that if combining fails, then we want to return the object # after the `ptype` and `transform` have been applied, but before the # empty element filling and list attribute stripping was applied tryCatch( list_unchop(out, ptype = ptype), vctrs_error_incompatible_type = function(e) x ) } tidyr/R/complete.R0000644000176200001440000001021314357015307013543 0ustar liggesusers#' Complete a data frame with missing combinations of data #' #' Turns implicit missing values into explicit missing values. This is a wrapper #' around [expand()], [dplyr::full_join()] and [replace_na()] that's useful for #' completing missing combinations of data. #' #' @section Grouped data frames: #' With grouped data frames created by [dplyr::group_by()], `complete()` #' operates _within_ each group. Because of this, you cannot complete a grouping #' column. #' #' @inheritParams expand #' @param fill A named list that for each variable supplies a single value to #' use instead of `NA` for missing combinations. #' @param explicit Should both implicit (newly created) and explicit #' (pre-existing) missing values be filled by `fill`? By default, this is #' `TRUE`, but if set to `FALSE` this will limit the fill to only implicit #' missing values. #' @export #' @examples #' df <- tibble( #' group = c(1:2, 1, 2), #' item_id = c(1:2, 2, 3), #' item_name = c("a", "a", "b", "b"), #' value1 = c(1, NA, 3, 4), #' value2 = 4:7 #' ) #' df #' #' # Combinations -------------------------------------------------------------- #' # Generate all possible combinations of `group`, `item_id`, and `item_name` #' # (whether or not they appear in the data) #' df %>% complete(group, item_id, item_name) #' #' # Cross all possible `group` values with the unique pairs of #' # `(item_id, item_name)` that already exist in the data #' df %>% complete(group, nesting(item_id, item_name)) #' #' # Within each `group`, generate all possible combinations of #' # `item_id` and `item_name` that occur in that group #' df %>% #' dplyr::group_by(group) %>% #' complete(item_id, item_name) #' #' # Supplying values for new rows --------------------------------------------- #' # Use `fill` to replace NAs with some value. By default, affects both new #' # (implicit) and pre-existing (explicit) missing values. #' df %>% #' complete( #' group, #' nesting(item_id, item_name), #' fill = list(value1 = 0, value2 = 99) #' ) #' #' # Limit the fill to only the newly created (i.e. previously implicit) #' # missing values with `explicit = FALSE` #' df %>% #' complete( #' group, #' nesting(item_id, item_name), #' fill = list(value1 = 0, value2 = 99), #' explicit = FALSE #' ) complete <- function(data, ..., fill = list(), explicit = TRUE) { UseMethod("complete") } on_load({ the$has_dplyr_1_1 <- packageVersion("dplyr") >= "1.0.99" }) #' @export complete.data.frame <- function(data, ..., fill = list(), explicit = TRUE) { check_bool(explicit) out <- expand(data, ...) names <- names(out) if (length(names) > 0L) { if (the$has_dplyr_1_1) { out <- dplyr::full_join(out, data, by = names, multiple = "all") } else { out <- dplyr::full_join(out, data, by = names) } } else { # Avoid joining the 1x0 result from `expand()` with `data`. # That causes issues when `data` has zero rows. out <- data } if (explicit) { out <- replace_na(out, replace = fill) } else { new <- !vec_in(out[names], data[names]) slice <- vec_slice(out, new) slice <- replace_na(slice, replace = fill) out <- vec_assign(out, new, slice) } reconstruct_tibble(data, out) } #' @export complete.grouped_df <- function(data, ..., fill = list(), explicit = TRUE) { if (the$has_dplyr_1_1) { reframe <- utils::getFromNamespace("reframe", ns = "dplyr") pick <- utils::getFromNamespace("pick", ns = "dplyr") out <- reframe( data, complete( data = pick(everything()), ..., fill = fill, explicit = explicit ) ) drop <- dplyr::group_by_drop_default(data) dplyr::group_by(out, !!!dplyr::groups(data), .drop = drop) } else { dplyr::summarise( data, complete( data = dplyr::cur_data(), ..., fill = fill, explicit = explicit ), .groups = "keep" ) } } tidyr/R/cpp11.R0000644000176200001440000000070414323611636012663 0ustar liggesusers# Generated by cpp11: do not edit by hand melt_dataframe <- function(data, id_ind, measure_ind, variable_name, value_name, attrTemplate, factorsAsStrings, valueAsFactor, variableAsFactor) { .Call(`_tidyr_melt_dataframe`, data, id_ind, measure_ind, variable_name, value_name, attrTemplate, factorsAsStrings, valueAsFactor, variableAsFactor) } simplifyPieces <- function(pieces, p, fillLeft) { .Call(`_tidyr_simplifyPieces`, pieces, p, fillLeft) } tidyr/R/separate-wider.R0000644000176200001440000005033014520546617014660 0ustar liggesusers#' Split a string into columns #' #' @description #' `r lifecycle::badge("experimental")` #' #' Each of these functions takes a string column and splits it into multiple #' new columns: #' #' * `separate_wider_delim()` splits by delimiter. #' * `separate_wider_position()` splits at fixed widths. #' * `separate_wider_regex()` splits with regular expression matches. #' #' These functions are equivalent to [separate()] and [extract()], but use #' [stringr](https://stringr.tidyverse.org/) as the underlying string #' manipulation engine, and their interfaces reflect what we've learned from #' [unnest_wider()] and [unnest_longer()]. #' #' @inheritParams unnest_longer #' @param cols <[`tidy-select`][tidyr_tidy_select]> Columns to separate. #' @param names_sep If supplied, output names will be composed #' of the input column name followed by the separator followed by the #' new column name. Required when `cols` selects multiple columns. #' #' For `separate_wider_delim()` you can specify instead of `names`, in which #' case the names will be generated from the source column name, `names_sep`, #' and a numeric suffix. #' @param names For `separate_wider_delim()`, a character vector of output #' column names. Use `NA` if there are components that you don't want #' to appear in the output; the number of non-`NA` elements determines the #' number of new columns in the result. #' @param delim For `separate_wider_delim()`, a string giving the delimiter #' between values. By default, it is interpreted as a fixed string; use #' [stringr::regex()] and friends to split in other ways. #' @inheritParams rlang::args_dots_empty #' @param too_few What should happen if a value separates into too few #' pieces? #' #' * `"error"`, the default, will throw an error. #' * `"debug"` adds additional columns to the output to help you #' locate and resolve the underlying problem. This option is intended to #' help you debug the issue and address and should not generally remain in #' your final code. #' * `"align_start"` aligns starts of short matches, adding `NA` on the end #' to pad to the correct length. #' * `"align_end"` (`separate_wider_delim()` only) aligns the ends of short #' matches, adding `NA` at the start to pad to the correct length. #' @param too_many What should happen if a value separates into too many #' pieces? #' #' * `"error"`, the default, will throw an error. #' * `"debug"` will add additional columns to the output to help you #' locate and resolve the underlying problem. #' * `"drop"` will silently drop any extra pieces. #' * `"merge"` (`separate_wider_delim()` only) will merge together any #' additional pieces. #' @param cols_remove Should the input `cols` be removed from the output? #' Always `FALSE` if `too_few` or `too_many` are set to `"debug"`. #' @returns A data frame based on `data`. It has the same rows, but different #' columns: #' #' * The primary purpose of the functions are to create new columns from #' components of the string. #' For `separate_wider_delim()` the names of new columns come from `names`. #' For `separate_wider_position()` the names come from the names of `widths`. #' For `separate_wider_regex()` the names come from the names of #' `patterns`. #' #' * If `too_few` or `too_many` is `"debug"`, the output will contain additional #' columns useful for debugging: #' #' * `{col}_ok`: a logical vector which tells you if the input was ok or #' not. Use to quickly find the problematic rows. #' * `{col}_remainder`: any text remaining after separation. #' * `{col}_pieces`, `{col}_width`, `{col}_matches`: number of pieces, #' number of characters, and number of matches for `separate_wider_delim()`, #' `separate_wider_position()` and `separate_regexp_wider()` respectively. #' #' * If `cols_remove = TRUE` (the default), the input `cols` will be removed #' from the output. #' #' @export #' @examples #' df <- tibble(id = 1:3, x = c("m-123", "f-455", "f-123")) #' # There are three basic ways to split up a string into pieces: #' # 1. with a delimiter #' df %>% separate_wider_delim(x, delim = "-", names = c("gender", "unit")) #' # 2. by length #' df %>% separate_wider_position(x, c(gender = 1, 1, unit = 3)) #' # 3. defining each component with a regular expression #' df %>% separate_wider_regex(x, c(gender = ".", ".", unit = "\\d+")) #' #' # Sometimes you split on the "last" delimiter #' df <- tibble(var = c("race_1", "race_2", "age_bucket_1", "age_bucket_2")) #' # _delim won't help because it always splits on the first delimiter #' try(df %>% separate_wider_delim(var, "_", names = c("var1", "var2"))) #' df %>% separate_wider_delim(var, "_", names = c("var1", "var2"), too_many = "merge") #' # Instead, you can use _regex #' df %>% separate_wider_regex(var, c(var1 = ".*", "_", var2 = ".*")) #' # this works because * is greedy; you can mimic the _delim behaviour with .*? #' df %>% separate_wider_regex(var, c(var1 = ".*?", "_", var2 = ".*")) #' #' # If the number of components varies, it's most natural to split into rows #' df <- tibble(id = 1:4, x = c("x", "x y", "x y z", NA)) #' df %>% separate_longer_delim(x, delim = " ") #' # But separate_wider_delim() provides some tools to deal with the problem #' # The default behaviour tells you that there's a problem #' try(df %>% separate_wider_delim(x, delim = " ", names = c("a", "b"))) #' # You can get additional insight by using the debug options #' df %>% #' separate_wider_delim( #' x, #' delim = " ", #' names = c("a", "b"), #' too_few = "debug", #' too_many = "debug" #' ) #' #' # But you can suppress the warnings #' df %>% #' separate_wider_delim( #' x, #' delim = " ", #' names = c("a", "b"), #' too_few = "align_start", #' too_many = "merge" #' ) #' #' # Or choose to automatically name the columns, producing as many as needed #' df %>% separate_wider_delim(x, delim = " ", names_sep = "", too_few = "align_start") separate_wider_delim <- function( data, cols, delim, ..., names = NULL, names_sep = NULL, names_repair = "check_unique", too_few = c("error", "debug", "align_start", "align_end"), too_many = c("error", "debug", "drop", "merge"), cols_remove = TRUE ) { check_installed("stringr") check_data_frame(data) check_required(cols) check_dots_empty() check_string(delim, allow_empty = FALSE) if (is.null(names) && is.null(names_sep)) { cli::cli_abort("Must specify at least one of {.arg names} or {.arg names_sep}.") } check_character(names, allow_null = TRUE) if (is_named(names)) { cli::cli_abort("{.arg names} must be an unnamed character vector.") } too_few <- arg_match(too_few) too_many <- arg_match(too_many) check_bool(cols_remove) error_call %<~% current_env() map_unpack( data, {{ cols }}, function(x, col) str_separate_wider_delim(x, col, names = names, delim = delim, names_sep = names_sep, too_few = too_few, too_many = too_many, cols_remove = cols_remove, error_call = error_call ), names_sep = names_sep, names_repair = names_repair ) } str_separate_wider_delim <- function( x, col, names, delim, names_sep = NULL, too_few = "error", too_many = "error", cols_remove = TRUE, error_call = caller_env() ) { if (is_bare_string(delim)) { delim <- stringr::fixed(delim) } if (too_many == "merge") { if (is.null(names)) { cli::cli_abort( 'Must provide {.arg names} when {.code too_many = "merge"}.', call = error_call ) } n <- length(names) } else { n <- Inf } pieces <- stringr::str_split(x, delim, n = n) lengths <- lengths(pieces) n_pieces <- ifelse(is.na(x), NA, lengths) names <- names %||% as.character(seq_len(int_max(lengths, 0))) p <- length(names) check_df_alignment(col, p, "pieces", n_pieces, too_few = too_few, too_many = too_many, advice_short = c( i = 'Use `too_few = "debug"` to diagnose the problem.', i = 'Use `too_few = "align_start"/"align_end"` to silence this message.' ), advice_long = c( i = 'Use `too_many = "debug"` to diagnose the problem.', i = 'Use `too_many = "drop"/"merge"` to silence this message.' ), call = error_call ) out <- df_align( x = pieces, names = names, align_direction = if (too_few == "align_end") "end" else "start" ) if (!cols_remove || too_few == "debug" || too_many == "debug") { out[[col]] <- x } if (too_few == "debug" || too_many == "debug") { separate_warn_debug(col, names_sep, c("ok", "pieces", "remainder")) sep_loc <- stringr::str_locate_all(x, delim) sep_last <- map(sep_loc, function(x) if (nrow(x) < p) NA else x[p, "start"]) remainder <- stringr::str_sub(x, sep_last) remainder[is.na(remainder) & !is.na(x)] <- "" problem <- !is.na(x) & ( (too_few == "debug" & n_pieces < p) | (too_many == "debug" & n_pieces > p) ) out[[debug_name(col, names_sep, "ok")]] <- !problem out[[debug_name(col, names_sep, "pieces")]] <- n_pieces out[[debug_name(col, names_sep, "remainder")]] <- remainder } out } #' @rdname separate_wider_delim #' @param widths A named numeric vector where the names become column names, #' and the values specify the column width. Unnamed components will match, #' but not be included in the output. #' @export separate_wider_position <- function( data, cols, widths, ..., names_sep = NULL, names_repair = "check_unique", too_few = c("error", "debug", "align_start"), too_many = c("error", "debug", "drop"), cols_remove = TRUE ) { check_installed("stringr") check_data_frame(data) check_required(cols) check_required(widths) if (!is_integerish(widths) || !any(have_name(widths))) { cli::cli_abort("{.arg widths} must be a (partially) named integer vector.") } if (any(widths <= 0)) { cli::cli_abort("All values of {.arg widths} must be positive.") } check_dots_empty() too_few <- arg_match(too_few) too_many <- arg_match(too_many) check_bool(cols_remove) error_call %<~% current_env() map_unpack( data, {{ cols }}, function(x, col) str_separate_wider_position(x, col, widths = widths, names_sep = names_sep, too_few = too_few, too_many = too_many, cols_remove = cols_remove, error_call = error_call ), names_sep = names_sep, names_repair = names_repair ) } str_separate_wider_position <- function(x, col, widths, names_sep = NULL, too_few = "error", too_many = "error", cols_remove = TRUE, error_call = caller_env() ) { breaks <- cumsum(c(1L, unname(widths)))[-(length(widths) + 1L)] expected_width <- sum(widths) width <- stringr::str_length(x) check_df_alignment(col, expected_width, "characters", width, too_few = too_few, too_many = too_many, advice_short = c( i = 'Use `too_few = "debug"` to diagnose the problem.', i = 'Use `too_few = "align_start"` to silence this message.' ), advice_long = c( i = 'Use `too_many = "debug"` to diagnose the problem.', i = 'Use `too_many = "drop"` to silence this message.' ), call = error_call ) skip <- names(widths) == "" from <- cbind(start = breaks[!skip], length = widths[!skip]) names <- names(widths)[!skip] pieces <- stringr::str_sub_all(x, from) pieces <- map(pieces, function(x) x[x != ""]) out <- df_align( x = pieces, names = names, align_direction = if (too_few == "end") "end" else "start" ) if (!cols_remove || too_few == "debug" || too_many == "debug") { out[[col]] <- x } if (too_few == "debug" || too_many == "debug") { separate_warn_debug(col, names_sep, c("ok", "width", "remainder")) problem <- !is.na(x) & ( (too_few == "debug" & width < expected_width) | (too_many == "debug" & width > expected_width) ) out[[debug_name(col, names_sep, "width")]] <- width out[[debug_name(col, names_sep, "remainder")]] <- stringr::str_sub(x, expected_width + 1, width) out[[debug_name(col, names_sep, "ok")]] <- !problem } out } #' @rdname separate_wider_delim #' @param patterns A named character vector where the names become column names #' and the values are regular expressions that match the contents of the #' vector. Unnamed components will match, but not be included in the output. #' @export separate_wider_regex <- function( data, cols, patterns, ..., names_sep = NULL, names_repair = "check_unique", too_few = c("error", "debug", "align_start"), cols_remove = TRUE ) { check_installed("stringr") check_data_frame(data) check_required(cols) check_character(patterns) if (length(patterns) > 0 && all(!have_name(patterns))) { cli::cli_abort("{.arg patterns} must be a named character vector.") } check_dots_empty() check_string(names_sep, allow_null = TRUE) too_few <- arg_match(too_few) check_bool(cols_remove) error_call %<~% current_env() map_unpack( data, {{ cols }}, function(x, col) str_separate_wider_regex(x, col, patterns = patterns, names_sep = names_sep, too_few = too_few, cols_remove = cols_remove, error_call = error_call ), names_sep = names_sep, names_repair = names_repair ) } str_separate_wider_regex <- function(x, col, patterns, names_sep = NULL, too_few = "error", cols_remove = TRUE, error_call = caller_env()) { has_name <- names2(patterns) != "" groups <- stringr::str_c("(", ifelse(has_name, "", "?:"), patterns, ")") full_match <- stringr::str_c("^", stringr::str_flatten(groups, collapse = ""), "$") match <- stringr::str_match(x, full_match) if (ncol(match) != sum(has_name) + 1L) { cli::cli_abort(c( "Invalid number of groups.", i = 'Did you use "()" instead of "(?:)" inside {.arg patterns}?' ), call = error_call) } matches <- match[, -1, drop = FALSE] out <- as_tibble(matches, .name_repair = "none") colnames(out) <- names2(patterns)[has_name] if (!cols_remove || too_few == "debug") { out[[col]] <- x } match_count <- ifelse(is.na(x), NA, length(patterns)) remainder <- ifelse(is.na(x), NA, "") problems <- !is.na(x) & is.na(match[, 1]) no_match <- which(problems) if (length(no_match) > 0) { if (too_few == "error") { cli::cli_abort(c( "Expected each value of {.var {col}} to match the pattern, the whole pattern, and nothing but the pattern.", "!" = "{length(no_match)} value{?s} {?has/have} problem{?s}.", i = 'Use {.code too_few = "debug"} to diagnose the problem.', i = 'Use {.code too_few = "align_start"} to silence this message.' ), call = error_call) } # Progressively relax the matches for (i in rev(seq_along(groups))) { partial <- paste0("^", paste(groups[1:i], collapse = ""), "(.*)$") match <- stringr::str_match(x[no_match], partial) has_match <- !is.na(match[, 1]) match_idx <- no_match[has_match] if (length(match_idx) == 0) { next } matches <- as_tibble(match[has_match, -1, drop = FALSE], .name_repair = "none") cols <- names2(patterns)[has_name][1:(ncol(matches) - 1)] out[match_idx, cols] <- matches[1:(ncol(matches) - 1)] remainder[match_idx] <- matches[[ncol(matches)]] match_count[match_idx] <- i no_match <- no_match[!has_match] if (length(no_match) == 0) { break } } if (length(no_match) > 0) { match_count[no_match] <- 0L remainder[no_match] <- x[no_match] } } if (too_few == "debug") { separate_warn_debug(col, names_sep, c("ok", "matches", "remainder")) out[debug_name(col, names_sep, "ok")] <- !problems out[debug_name(col, names_sep, "matches")] <- match_count out[debug_name(col, names_sep, "remainder")] <- remainder } out } # helpers ----------------------------------------------------------------- map_unpack <- function(data, cols, fun, names_sep, names_repair, error_call = caller_env()) { cols <- tidyselect::eval_select( enquo(cols), data = data, allow_rename = FALSE, allow_empty = FALSE, error_call = error_call ) col_names <- names(cols) for (col in col_names) { data[[col]] <- fun(data[[col]], col) } unpack( data = data, cols = all_of(col_names), names_sep = names_sep, names_repair = names_repair, error_call = error_call ) } # cf. df_simplify df_align <- function( x, names, align_direction = c("start", "end") ) { vec_check_list(x) if (length(x) == 0) { return(new_data_frame(rep_named(names, list(character())), size = 0)) } align_direction <- arg_match(align_direction) n <- length(x) p <- length(names) out <- df_align_transpose(x, p, align_direction = align_direction) out <- out[!is.na(names)] names(out) <- names[!is.na(names)] new_data_frame(out, size = n) } df_align_transpose <- function(x, p, align_direction = "start") { sizes <- list_sizes(x) # Find the start location of each piece starts <- cumsum(c(1L, sizes[-length(sizes)])) # Combine all pieces sequentially, zapping names for performance. x <- list_unchop(x, ptype = character(), name_spec = zap()) indices <- vector("list", p) if (align_direction == "start") { # General idea is to use the `starts` to slice with, replacing them with # `NA` when we are too short, and then advancing the `starts` by 1 at each # iteration. for (i in seq_len(p)) { small <- sizes < i index <- starts index[small] <- NA_integer_ indices[[i]] <- index starts <- starts + 1L } } else { # `"end"` is a little tricky because we have to hold the start # location constant if we couldn't use it because the piece was too small. # Pieces that are too large are automatically ignored. gap <- sizes - p for (i in seq_len(p)) { small <- gap < 0 not_small <- !small index <- starts index[small] <- NA_integer_ indices[[i]] <- index starts[not_small] <- starts[not_small] + 1L gap <- gap + 1L } } vec_chop(x, indices) } check_df_alignment <- function( col, p, type, sizes, too_few, too_many, advice_short = NULL, advice_long = NULL, call = caller_env()) { n_short <- sum(sizes < p, na.rm = TRUE) n_long <- sum(sizes > p, na.rm = TRUE) error_short <- too_few == "error" && n_short > 0 error_long <- too_many == "error" && n_long > 0 if (!error_short && !error_long) { return() } cli::cli_abort(c( "Expected {p} {type} in each element of {.var {col}}.", "!" = if (error_short) "{n_short} value{?s} {?was/were} too short.", if (error_short) advice_short, "!" = if (error_long) "{n_long} value{?s} {?was/were} too long.", if (error_long) advice_long ), call = call) } separate_warn_debug <- function(col, names_sep, vars) { vars <- debug_name(col, names_sep, vars) cli::cli_warn("Debug mode activated: adding variables {.var {vars}}.") } debug_name <- function(col, names_sep, var) { paste0(col, names_sep %||% "_", var) } tidyr/R/seq.R0000644000176200001440000000265714323620576012543 0ustar liggesusers#' Create the full sequence of values in a vector #' #' This is useful if you want to fill in missing values that should have #' been observed but weren't. For example, `full_seq(c(1, 2, 4, 6), 1)` #' will return `1:6`. #' #' @param x A numeric vector. #' @param period Gap between each observation. The existing data will be #' checked to ensure that it is actually of this periodicity. #' @param tol Numerical tolerance for checking periodicity. #' @export #' @examples #' full_seq(c(1, 2, 4, 5, 10), 1) full_seq <- function(x, period, tol = 1e-6) { UseMethod("full_seq") } #' @export full_seq.numeric <- function(x, period, tol = 1e-6) { check_number_decimal(period) check_number_decimal(tol, min = 0) rng <- range(x, na.rm = TRUE) if (any(((x - rng[1]) %% period > tol) & (period - (x - rng[1]) %% period > tol))) { cli::cli_abort("{.arg x} is not a regular sequence.") } # in cases where the last element is within tolerance, pad it so that # the output length is correct if (period - ((rng[2] - rng[1]) %% period) <= tol) { rng[2] <- rng[2] + tol } seq(rng[1], rng[2], by = period) } #' @export full_seq.Date <- function(x, period, tol = 1e-6) { restore(x, full_seq(as.numeric(x), period, tol)) } #' @export full_seq.POSIXct <- function(x, period, tol = 1e-6) { restore(x, full_seq(as.numeric(x), period, tol)) } restore <- function(old, new) { mostattributes(new) <- attributes(old) new } tidyr/R/separate.R0000644000176200001440000001556314363516001013546 0ustar liggesusers#' Separate a character column into multiple columns with a regular #' expression or numeric locations #' #' @description #' `r lifecycle::badge("superseded")` #' #' `separate()` has been superseded in favour of [separate_wider_position()] #' and [separate_wider_delim()] because the two functions make the two uses #' more obvious, the API is more polished, and the handling of problems is #' better. Superseded functions will not go away, but will only receive #' critical bug fixes. #' #' Given either a regular expression or a vector of character positions, #' `separate()` turns a single character column into multiple columns. #' #' @inheritParams extract #' @param sep Separator between columns. #' #' If character, `sep` is interpreted as a regular expression. The default #' value is a regular expression that matches any sequence of #' non-alphanumeric values. #' #' If numeric, `sep` is interpreted as character positions to split at. Positive #' values start at 1 at the far-left of the string; negative value start at -1 at #' the far-right of the string. The length of `sep` should be one less than #' `into`. #' @param extra If `sep` is a character vector, this controls what #' happens when there are too many pieces. There are three valid options: #' #' * `"warn"` (the default): emit a warning and drop extra values. #' * `"drop"`: drop any extra values without a warning. #' * `"merge"`: only splits at most `length(into)` times #' @param fill If `sep` is a character vector, this controls what #' happens when there are not enough pieces. There are three valid options: #' #' * `"warn"` (the default): emit a warning and fill from the right #' * `"right"`: fill with missing values on the right #' * `"left"`: fill with missing values on the left #' @seealso [unite()], the complement, [extract()] which uses regular #' expression capturing groups. #' @export #' @examples #' # If you want to split by any non-alphanumeric value (the default): #' df <- tibble(x = c(NA, "x.y", "x.z", "y.z")) #' df %>% separate(x, c("A", "B")) #' #' # If you just want the second variable: #' df %>% separate(x, c(NA, "B")) #' #' # We now recommend separate_wider_delim() instead: #' df %>% separate_wider_delim(x, ".", names = c("A", "B")) #' df %>% separate_wider_delim(x, ".", names = c(NA, "B")) #' #' # Controlling uneven splits ------------------------------------------------- #' # If every row doesn't split into the same number of pieces, use #' # the extra and fill arguments to control what happens: #' df <- tibble(x = c("x", "x y", "x y z", NA)) #' df %>% separate(x, c("a", "b")) #' # The same behaviour as previous, but drops the c without warnings: #' df %>% separate(x, c("a", "b"), extra = "drop", fill = "right") #' # Opposite of previous, keeping the c and filling left: #' df %>% separate(x, c("a", "b"), extra = "merge", fill = "left") #' # Or you can keep all three: #' df %>% separate(x, c("a", "b", "c")) #' #' # To only split a specified number of times use extra = "merge": #' df <- tibble(x = c("x: 123", "y: error: 7")) #' df %>% separate(x, c("key", "value"), ": ", extra = "merge") #' #' # Controlling column types -------------------------------------------------- #' # convert = TRUE detects column classes: #' df <- tibble(x = c("x:1", "x:2", "y:4", "z", NA)) #' df %>% separate(x, c("key", "value"), ":") %>% str() #' df %>% separate(x, c("key", "value"), ":", convert = TRUE) %>% str() separate <- function(data, col, into, sep = "[^[:alnum:]]+", remove = TRUE, convert = FALSE, extra = "warn", fill = "warn", ...) { check_dots_used() UseMethod("separate") } #' @export separate.data.frame <- function(data, col, into, sep = "[^[:alnum:]]+", remove = TRUE, convert = FALSE, extra = "warn", fill = "warn", ...) { check_required(col) check_bool(remove) var <- tidyselect::vars_pull(names(data), !!enquo(col)) value <- as.character(data[[var]]) new_cols <- str_separate(value, into = into, sep = sep, convert = convert, extra = extra, fill = fill ) out <- df_append(data, new_cols, var, remove = remove) reconstruct_tibble(data, out, if (remove) var else NULL) } str_separate <- function(x, into, sep, convert = FALSE, extra = "warn", fill = "warn", error_call = caller_env()) { check_character(into, call = error_call) check_bool(convert, call = error_call) if (is.numeric(sep)) { out <- strsep(x, sep) } else if (is_string(sep)) { check_not_stringr_pattern(sep, call = error_call) out <- str_split_fixed(x, sep, length(into), extra = extra, fill = fill) } else { cli::cli_abort( "{.arg sep} must be a string or numeric vector, not {.obj_type_friendly {sep}}", call = error_call ) } names(out) <- as_utf8_character(into) out <- out[!is.na(names(out))] if (convert) { out[] <- map(out, type.convert, as.is = TRUE) } as_tibble(out) } strsep <- function(x, sep) { nchar <- nchar(x) pos <- map(sep, function(i) { if (i >= 0) { i } else { pmax(0, nchar + i) } }) pos <- c(list(0), pos, list(nchar)) map(1:(length(pos) - 1), function(i) { substr(x, pos[[i]] + 1, pos[[i + 1]]) }) } str_split_fixed <- function(value, sep, n, extra = "warn", fill = "warn") { if (extra == "error") { warn(glue( "`extra = \"error\"` is deprecated. \\ Please use `extra = \"warn\"` instead" )) extra <- "warn" } extra <- arg_match(extra, c("warn", "merge", "drop")) fill <- arg_match(fill, c("warn", "left", "right")) n_max <- if (extra == "merge") n else -1L pieces <- str_split_n(value, sep, n_max = n_max) simp <- simplifyPieces(pieces, n, fill == "left") n_big <- length(simp$too_big) if (extra == "warn" && n_big > 0) { idx <- list_indices(simp$too_big) warn(glue("Expected {n} pieces. Additional pieces discarded in {n_big} rows [{idx}].")) } n_sml <- length(simp$too_sml) if (fill == "warn" && n_sml > 0) { idx <- list_indices(simp$too_sml) warn(glue("Expected {n} pieces. Missing pieces filled with `NA` in {n_sml} rows [{idx}].")) } simp$strings } str_split_n <- function(x, pattern, n_max = -1) { if (is.factor(x)) { x <- as.character(x) } m <- gregexpr(pattern, x, perl = TRUE) if (n_max > 0) { m <- map(m, function(x) slice_match(x, seq_along(x) < n_max)) } regmatches(x, m, invert = TRUE) } slice_match <- function(x, i) { structure( x[i], match.length = attr(x, "match.length")[i], index.type = attr(x, "index.type"), useBytes = attr(x, "useBytes") ) } list_indices <- function(x, max = 20) { if (length(x) > max) { x <- c(x[seq_len(max)], "...") } paste(x, collapse = ", ") } check_not_stringr_pattern <- function(x, arg = caller_arg(x), call = caller_env()) { if (inherits_any(x, c("pattern", "stringr_pattern"))) { cli::cli_abort("{.arg {arg}} can't use modifiers from stringr.", call = call) } } tidyr/R/compat-types-check.R0000644000176200001440000002435114323620576015446 0ustar liggesusers# nocov start --- r-lib/rlang compat-types-check # # Dependencies # ============ # # - compat-obj-type.R # # Changelog # ========= # # 2022-10-07: # - `check_number_whole()` and `_decimal()` no longer treat # non-numeric types such as factors or dates as numbers. Numeric # types are detected with `is.numeric()`. # # 2022-10-04: # - Added `check_name()` that forbids the empty string. # `check_string()` allows the empty string by default. # # 2022-09-28: # - Removed `what` arguments. # - Added `allow_na` and `allow_null` arguments. # - Added `allow_decimal` and `allow_infinite` arguments. # - Improved errors with absent arguments. # # # 2022-09-16: # - Unprefixed usage of rlang functions with `rlang::` to # avoid onLoad issues when called from rlang (#1482). # # 2022-08-11: # - Added changelog. # Scalars ----------------------------------------------------------------- check_bool <- function(x, ..., allow_na = FALSE, allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_bool(x)) { return(invisible(NULL)) } if (allow_null && is_null(x)) { return(invisible(NULL)) } if (allow_na && identical(x, NA)) { return(invisible(NULL)) } } stop_input_type( x, c("`TRUE`", "`FALSE`"), ..., allow_na = allow_na, allow_null = allow_null, arg = arg, call = call ) } check_string <- function(x, ..., allow_empty = TRUE, allow_na = FALSE, allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { is_string <- .rlang_check_is_string( x, allow_empty = allow_empty, allow_na = allow_na, allow_null = allow_null ) if (is_string) { return(invisible(NULL)) } } stop_input_type( x, "a single string", ..., allow_na = allow_na, allow_null = allow_null, arg = arg, call = call ) } .rlang_check_is_string <- function(x, allow_empty, allow_na, allow_null) { if (is_string(x)) { if (allow_empty || !is_string(x, "")) { return(TRUE) } } if (allow_null && is_null(x)) { return(TRUE) } if (allow_na && (identical(x, NA) || identical(x, na_chr))) { return(TRUE) } FALSE } check_name <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { is_string <- .rlang_check_is_string( x, allow_empty = FALSE, allow_na = FALSE, allow_null = allow_null ) if (is_string) { return(invisible(NULL)) } } stop_input_type( x, "a valid name", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } check_number_decimal <- function(x, ..., min = -Inf, max = Inf, allow_infinite = TRUE, allow_na = FALSE, allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { .rlang_types_check_number( x, ..., min = min, max = max, allow_decimal = TRUE, allow_infinite = allow_infinite, allow_na = allow_na, allow_null = allow_null, arg = arg, call = call ) } check_number_whole <- function(x, ..., min = -Inf, max = Inf, allow_na = FALSE, allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { .rlang_types_check_number( x, ..., min = min, max = max, allow_decimal = FALSE, allow_infinite = FALSE, allow_na = allow_na, allow_null = allow_null, arg = arg, call = call ) } .rlang_types_check_number <- function(x, ..., min = -Inf, max = Inf, allow_decimal = FALSE, allow_infinite = FALSE, allow_na = FALSE, allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (allow_decimal) { what <- "a number" } else { what <- "a whole number" } .stop <- function(x, what, ...) stop_input_type( x, what, ..., allow_na = allow_na, allow_null = allow_null, arg = arg, call = call ) if (!missing(x)) { is_number <- is_number( x, allow_decimal = allow_decimal, allow_infinite = allow_infinite ) if (is_number) { if (min > -Inf && max < Inf) { what <- sprintf("a number between %s and %s", min, max) } else { what <- NULL } if (x < min) { what <- what %||% sprintf("a number larger than %s", min) .stop(x, what, ...) } if (x > max) { what <- what %||% sprintf("a number smaller than %s", max) .stop(x, what, ...) } return(invisible(NULL)) } if (allow_null && is_null(x)) { return(invisible(NULL)) } if (allow_na && (identical(x, NA) || identical(x, na_dbl) || identical(x, na_int))) { return(invisible(NULL)) } } .stop(x, what, ...) } is_number <- function(x, allow_decimal = FALSE, allow_infinite = FALSE) { if (!typeof(x) %in% c("integer", "double")) { return(FALSE) } if (!is.numeric(x)) { return(FALSE) } if (length(x) != 1) { return(FALSE) } if (is.na(x)) { return(FALSE) } if (!allow_decimal && !is_integerish(x)) { return(FALSE) } if (!allow_infinite && is.infinite(x)) { return(FALSE) } TRUE } check_symbol <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_symbol(x)) { return(invisible(NULL)) } if (allow_null && is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a symbol", ..., allow_null = allow_null, arg = arg, call = call ) } check_arg <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_symbol(x)) { return(invisible(NULL)) } if (allow_null && is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "an argument name", ..., allow_null = allow_null, arg = arg, call = call ) } check_call <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_call(x)) { return(invisible(NULL)) } if (allow_null && is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a defused call", ..., allow_null = allow_null, arg = arg, call = call ) } check_environment <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_environment(x)) { return(invisible(NULL)) } if (allow_null && is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "an environment", ..., allow_null = allow_null, arg = arg, call = call ) } check_function <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_function(x)) { return(invisible(NULL)) } if (allow_null && is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a function", ..., allow_null = allow_null, arg = arg, call = call ) } check_closure <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_closure(x)) { return(invisible(NULL)) } if (allow_null && is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "an R function", ..., allow_null = allow_null, arg = arg, call = call ) } check_formula <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_formula(x)) { return(invisible(NULL)) } if (allow_null && is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a formula", ..., allow_null = allow_null, arg = arg, call = call ) } # Vectors ----------------------------------------------------------------- check_character <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_character(x)) { return(invisible(NULL)) } if (allow_null && is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a character vector", ..., allow_null = allow_null, arg = arg, call = call ) } # nocov end tidyr/R/extract.R0000644000176200001440000000746114363516001013412 0ustar liggesusers#' Extract a character column into multiple columns using regular #' expression groups #' #' @description #' `r lifecycle::badge("superseded")` #' #' `extract()` has been superseded in favour of [separate_wider_regex()] #' because it has a more polished API and better handling of problems. #' Superseded functions will not go away, but will only receive critical bug #' fixes. #' #' Given a regular expression with capturing groups, `extract()` turns #' each group into a new column. If the groups don't match, or the input #' is NA, the output will be NA. #' #' @inheritParams expand #' @param col <[`tidy-select`][tidyr_tidy_select]> Column to expand. #' @param into Names of new variables to create as character vector. #' Use `NA` to omit the variable in the output. #' @param regex A string representing a regular expression used to extract the #' desired values. There should be one group (defined by `()`) for each #' element of `into`. #' @param remove If `TRUE`, remove input column from output data frame. #' @param convert If `TRUE`, will run [type.convert()] with #' `as.is = TRUE` on new columns. This is useful if the component #' columns are integer, numeric or logical. #' #' NB: this will cause string `"NA"`s to be converted to `NA`s. #' @param ... Additional arguments passed on to methods. #' @seealso [separate()] to split up by a separator. #' @export #' @examples #' df <- tibble(x = c(NA, "a-b", "a-d", "b-c", "d-e")) #' df %>% extract(x, "A") #' df %>% extract(x, c("A", "B"), "([[:alnum:]]+)-([[:alnum:]]+)") #' #' # Now recommended #' df %>% #' separate_wider_regex( #' x, #' patterns = c(A = "[[:alnum:]]+", "-", B = "[[:alnum:]]+") #' ) #' #' # If no match, NA: #' df %>% extract(x, c("A", "B"), "([a-d]+)-([a-d]+)") extract <- function(data, col, into, regex = "([[:alnum:]]+)", remove = TRUE, convert = FALSE, ...) { check_dots_used() UseMethod("extract") } #' @export extract.data.frame <- function(data, col, into, regex = "([[:alnum:]]+)", remove = TRUE, convert = FALSE, ...) { check_required(col) var <- tidyselect::vars_pull(names(data), !!enquo(col)) value <- as.character(data[[var]]) new_cols <- str_extract(value, into = into, regex = regex, convert = convert) out <- df_append(data, new_cols, var, remove = remove) reconstruct_tibble(data, out, if (remove) var else chr()) } str_extract <- function(x, into, regex, convert = FALSE, error_call = caller_env()) { check_string(regex, call = error_call) check_not_stringr_pattern(regex, call = error_call) check_character(into, call = error_call) check_bool(convert, call = error_call) out <- str_match_first(x, regex) if (length(out) != length(into)) { cli::cli_abort( "{.arg regex} should define {length(into)} groups; {length(out)} found.", call = error_call ) } # Handle duplicated names if (anyDuplicated(into)) { pieces <- split(out, into) into <- names(pieces) out <- map(pieces, pmap_chr, vec_paste0) } into <- as_utf8_character(into) non_na_into <- !is.na(into) out <- out[non_na_into] names(out) <- into[non_na_into] out <- as_tibble(out) if (convert) { out[] <- map(out, type.convert, as.is = TRUE) } out } # Helpers ----------------------------------------------------------------- str_match_first <- function(string, regex) { loc <- regexpr(regex, string, perl = TRUE) loc <- group_loc(loc) out <- map( seq_len(loc$matches), function(i) substr(string, loc$start[, i], loc$end[, i]) ) out[-1] } group_loc <- function(x) { start <- cbind(as.vector(x), attr(x, "capture.start")) end <- start + cbind(attr(x, "match.length"), attr(x, "capture.length")) - 1L no_match <- start == -1L start[no_match] <- NA end[no_match] <- NA list(matches = ncol(start), start = start, end = end) } tidyr/R/replace_na.R0000644000176200001440000000523714332223160014025 0ustar liggesusers#' Replace NAs with specified values #' #' @param data A data frame or vector. #' @param replace If `data` is a data frame, `replace` takes a named list of #' values, with one value for each column that has missing values to be #' replaced. Each value in `replace` will be cast to the type of the column #' in `data` that it being used as a replacement in. #' #' If `data` is a vector, `replace` takes a single value. This single value #' replaces all of the missing values in the vector. `replace` will be cast #' to the type of `data`. #' @param ... Additional arguments for methods. Currently unused. #' @return #' `replace_na()` returns an object with the same type as `data`. #' @seealso [dplyr::na_if()] to replace specified values with `NA`s; #' [dplyr::coalesce()] to replaces `NA`s with values from other vectors. #' @export #' @examples #' # Replace NAs in a data frame #' df <- tibble(x = c(1, 2, NA), y = c("a", NA, "b")) #' df %>% replace_na(list(x = 0, y = "unknown")) #' #' # Replace NAs in a vector #' df %>% dplyr::mutate(x = replace_na(x, 0)) #' # OR #' df$x %>% replace_na(0) #' df$y %>% replace_na("unknown") #' #' # Replace NULLs in a list: NULLs are the list-col equivalent of NAs #' df_list <- tibble(z = list(1:5, NULL, 10:20)) #' df_list %>% replace_na(list(z = list(5))) replace_na <- function(data, replace, ...) { check_dots_used() UseMethod("replace_na") } #' @export replace_na.default <- function(data, replace = NA, ...) { check_replacement(replace, "data") if (vec_any_missing(data)) { missing <- vec_detect_missing(data) data <- vec_assign(data, missing, replace, x_arg = "data", value_arg = "replace") } data } #' @export replace_na.data.frame <- function(data, replace = list(), ...) { if (!vec_is_list(replace)) { cli::cli_abort("{.arg replace} must be a list, not {.obj_type_friendly {replace}}.") } names <- intersect(names(replace), names(data)) col_args <- as.character(glue("data${names}")) value_args <- as.character(glue("replace${names}")) for (i in seq_along(names)) { name <- names[[i]] col <- data[[name]] value <- replace[[name]] col_arg <- col_args[[i]] value_arg <- value_args[[i]] check_replacement(value, col_arg) if (vec_any_missing(col)) { missing <- vec_detect_missing(col) data[[name]] <- vec_assign( x = col, i = missing, value = value, x_arg = col_arg, value_arg = value_arg ) } } data } check_replacement <- function(x, var, call = caller_env()) { n <- vec_size(x) if (n != 1) { cli::cli_abort( "Replacement for `{var}` must be length 1, not length {n}.", call = call ) } } tidyr/R/fill.R0000644000176200001440000000772314357015307012675 0ustar liggesusers#' Fill in missing values with previous or next value #' #' Fills missing values in selected columns using the next or previous entry. #' This is useful in the common output format where values are not repeated, #' and are only recorded when they change. #' #' Missing values are replaced in atomic vectors; `NULL`s are replaced in lists. #' #' @section Grouped data frames: #' With grouped data frames created by [dplyr::group_by()], `fill()` will be #' applied _within_ each group, meaning that it won't fill across group #' boundaries. #' #' @param data A data frame. #' @param ... <[`tidy-select`][tidyr_tidy_select]> Columns to fill. #' @param .direction Direction in which to fill missing values. Currently #' either "down" (the default), "up", "downup" (i.e. first down and then up) #' or "updown" (first up and then down). #' @export #' @examples #' # direction = "down" -------------------------------------------------------- #' # Value (year) is recorded only when it changes #' sales <- tibble::tribble( #' ~quarter, ~year, ~sales, #' "Q1", 2000, 66013, #' "Q2", NA, 69182, #' "Q3", NA, 53175, #' "Q4", NA, 21001, #' "Q1", 2001, 46036, #' "Q2", NA, 58842, #' "Q3", NA, 44568, #' "Q4", NA, 50197, #' "Q1", 2002, 39113, #' "Q2", NA, 41668, #' "Q3", NA, 30144, #' "Q4", NA, 52897, #' "Q1", 2004, 32129, #' "Q2", NA, 67686, #' "Q3", NA, 31768, #' "Q4", NA, 49094 #' ) #' # `fill()` defaults to replacing missing data from top to bottom #' sales %>% fill(year) #' #' # direction = "up" ---------------------------------------------------------- #' # Value (pet_type) is missing above #' tidy_pets <- tibble::tribble( #' ~rank, ~pet_type, ~breed, #' 1L, NA, "Boston Terrier", #' 2L, NA, "Retrievers (Labrador)", #' 3L, NA, "Retrievers (Golden)", #' 4L, NA, "French Bulldogs", #' 5L, NA, "Bulldogs", #' 6L, "Dog", "Beagles", #' 1L, NA, "Persian", #' 2L, NA, "Maine Coon", #' 3L, NA, "Ragdoll", #' 4L, NA, "Exotic", #' 5L, NA, "Siamese", #' 6L, "Cat", "American Short" #' ) #' #' # For values that are missing above you can use `.direction = "up"` #' tidy_pets %>% #' fill(pet_type, .direction = "up") #' #' # direction = "downup" ------------------------------------------------------ #' # Value (n_squirrels) is missing above and below within a group #' squirrels <- tibble::tribble( #' ~group, ~name, ~role, ~n_squirrels, #' 1, "Sam", "Observer", NA, #' 1, "Mara", "Scorekeeper", 8, #' 1, "Jesse", "Observer", NA, #' 1, "Tom", "Observer", NA, #' 2, "Mike", "Observer", NA, #' 2, "Rachael", "Observer", NA, #' 2, "Sydekea", "Scorekeeper", 14, #' 2, "Gabriela", "Observer", NA, #' 3, "Derrick", "Observer", NA, #' 3, "Kara", "Scorekeeper", 9, #' 3, "Emily", "Observer", NA, #' 3, "Danielle", "Observer", NA #' ) #' #' # The values are inconsistently missing by position within the group #' # Use .direction = "downup" to fill missing values in both directions #' squirrels %>% #' dplyr::group_by(group) %>% #' fill(n_squirrels, .direction = "downup") %>% #' dplyr::ungroup() #' #' # Using `.direction = "updown"` accomplishes the same goal in this example fill <- function(data, ..., .direction = c("down", "up", "downup", "updown")) { check_dots_unnamed() UseMethod("fill") } #' @export fill.data.frame <- function(data, ..., .direction = c("down", "up", "downup", "updown")) { vars <- tidyselect::eval_select(expr(c(...)), data, allow_rename = FALSE) .direction <- arg_match0( arg = .direction, values = c("down", "up", "downup", "updown"), ) fn <- function(col) { vec_fill_missing(col, direction = .direction) } dplyr::mutate_at(data, .vars = dplyr::vars(any_of(vars)), .funs = fn) } tidyr/R/pivot-long.R0000644000176200001440000004233614363516001014036 0ustar liggesusers#' Pivot data from wide to long #' #' @description #' `pivot_longer()` "lengthens" data, increasing the number of rows and #' decreasing the number of columns. The inverse transformation is #' [pivot_wider()] #' #' Learn more in `vignette("pivot")`. #' #' @details #' `pivot_longer()` is an updated approach to [gather()], designed to be both #' simpler to use and to handle more use cases. We recommend you use #' `pivot_longer()` for new code; `gather()` isn't going away but is no longer #' under active development. #' #' @param data A data frame to pivot. #' @param cols <[`tidy-select`][tidyr_tidy_select]> Columns to pivot into #' longer format. #' @param cols_vary When pivoting `cols` into longer format, how should the #' output rows be arranged relative to their original row number? #' #' * `"fastest"`, the default, keeps individual rows from `cols` close #' together in the output. This often produces intuitively ordered output #' when you have at least one key column from `data` that is not involved in #' the pivoting process. #' #' * `"slowest"` keeps individual columns from `cols` close together in the #' output. This often produces intuitively ordered output when you utilize #' all of the columns from `data` in the pivoting process. #' @param names_to A character vector specifying the new column or columns to #' create from the information stored in the column names of `data` specified #' by `cols`. #' #' * If length 0, or if `NULL` is supplied, no columns will be created. #' #' * If length 1, a single column will be created which will contain the #' column names specified by `cols`. #' #' * If length >1, multiple columns will be created. In this case, one of #' `names_sep` or `names_pattern` must be supplied to specify how the #' column names should be split. There are also two additional character #' values you can take advantage of: #' #' * `NA` will discard the corresponding component of the column name. #' #' * `".value"` indicates that the corresponding component of the column #' name defines the name of the output column containing the cell values, #' overriding `values_to` entirely. #' @param names_prefix A regular expression used to remove matching text #' from the start of each variable name. #' @param names_sep,names_pattern If `names_to` contains multiple values, #' these arguments control how the column name is broken up. #' #' `names_sep` takes the same specification as [separate()], and can either #' be a numeric vector (specifying positions to break on), or a single string #' (specifying a regular expression to split on). #' #' `names_pattern` takes the same specification as [extract()], a regular #' expression containing matching groups (`()`). #' #' If these arguments do not give you enough control, use #' `pivot_longer_spec()` to create a spec object and process manually as #' needed. #' @param names_repair What happens if the output has invalid column names? #' The default, `"check_unique"` is to error if the columns are duplicated. #' Use `"minimal"` to allow duplicates in the output, or `"unique"` to #' de-duplicated by adding numeric suffixes. See [vctrs::vec_as_names()] #' for more options. #' @param values_to A string specifying the name of the column to create #' from the data stored in cell values. If `names_to` is a character #' containing the special `.value` sentinel, this value will be ignored, #' and the name of the value column will be derived from part of the #' existing column names. #' @param values_drop_na If `TRUE`, will drop rows that contain only `NA`s #' in the `value_to` column. This effectively converts explicit missing values #' to implicit missing values, and should generally be used only when missing #' values in `data` were created by its structure. #' @param names_transform,values_transform Optionally, a list of column #' name-function pairs. Alternatively, a single function can be supplied, #' which will be applied to all columns. Use these arguments if you need to #' change the types of specific columns. For example, `names_transform = #' list(week = as.integer)` would convert a character variable called `week` #' to an integer. #' #' If not specified, the type of the columns generated from `names_to` will #' be character, and the type of the variables generated from `values_to` #' will be the common type of the input columns used to generate them. #' @param names_ptypes,values_ptypes Optionally, a list of column name-prototype #' pairs. Alternatively, a single empty prototype can be supplied, which will #' be applied to all columns. A prototype (or ptype for short) is a #' zero-length vector (like `integer()` or `numeric()`) that defines the type, #' class, and attributes of a vector. Use these arguments if you want to #' confirm that the created columns are the types that you expect. Note that #' if you want to change (instead of confirm) the types of specific columns, #' you should use `names_transform` or `values_transform` instead. #' @param ... Additional arguments passed on to methods. #' @export #' @examples #' # See vignette("pivot") for examples and explanation #' #' # Simplest case where column names are character data #' relig_income #' relig_income %>% #' pivot_longer(!religion, names_to = "income", values_to = "count") #' #' # Slightly more complex case where columns have common prefix, #' # and missing missings are structural so should be dropped. #' billboard #' billboard %>% #' pivot_longer( #' cols = starts_with("wk"), #' names_to = "week", #' names_prefix = "wk", #' values_to = "rank", #' values_drop_na = TRUE #' ) #' #' # Multiple variables stored in column names #' who %>% pivot_longer( #' cols = new_sp_m014:newrel_f65, #' names_to = c("diagnosis", "gender", "age"), #' names_pattern = "new_?(.*)_(.)(.*)", #' values_to = "count" #' ) #' #' # Multiple observations per row. Since all columns are used in the pivoting #' # process, we'll use `cols_vary` to keep values from the original columns #' # close together in the output. #' anscombe #' anscombe %>% #' pivot_longer( #' everything(), #' cols_vary = "slowest", #' names_to = c(".value", "set"), #' names_pattern = "(.)(.)" #' ) pivot_longer <- function(data, cols, ..., cols_vary = "fastest", names_to = "name", names_prefix = NULL, names_sep = NULL, names_pattern = NULL, names_ptypes = NULL, names_transform = NULL, names_repair = "check_unique", values_to = "value", values_drop_na = FALSE, values_ptypes = NULL, values_transform = NULL) { check_dots_used() UseMethod("pivot_longer") } #' @export pivot_longer.data.frame <- function(data, cols, ..., cols_vary = "fastest", names_to = "name", names_prefix = NULL, names_sep = NULL, names_pattern = NULL, names_ptypes = NULL, names_transform = NULL, names_repair = "check_unique", values_to = "value", values_drop_na = FALSE, values_ptypes = NULL, values_transform = NULL) { spec <- build_longer_spec( data = data, cols = {{ cols }}, names_to = names_to, values_to = values_to, names_prefix = names_prefix, names_sep = names_sep, names_pattern = names_pattern, names_ptypes = names_ptypes, names_transform = names_transform, error_call = current_env() ) pivot_longer_spec( data = data, spec = spec, cols_vary = cols_vary, names_repair = names_repair, values_drop_na = values_drop_na, values_ptypes = values_ptypes, values_transform = values_transform, error_call = current_env() ) } #' Pivot data from wide to long using a spec #' #' This is a low level interface to pivoting, inspired by the cdata package, #' that allows you to describe pivoting with a data frame. #' #' @keywords internal #' @export #' @inheritParams rlang::args_dots_empty #' @inheritParams rlang::args_error_context #' @inheritParams pivot_longer #' @param spec A specification data frame. This is useful for more complex #' pivots because it gives you greater control on how metadata stored in the #' column names turns into columns in the result. #' #' Must be a data frame containing character `.name` and `.value` columns. #' Additional columns in `spec` should be named to match columns in the #' long format of the dataset and contain values corresponding to columns #' pivoted from the wide format. #' The special `.seq` variable is used to disambiguate rows internally; #' it is automatically removed after pivoting. #' #' @examples #' # See vignette("pivot") for examples and explanation #' #' # Use `build_longer_spec()` to build `spec` using similar syntax to `pivot_longer()` #' # and run `pivot_longer_spec()` based on `spec`. #' spec <- relig_income %>% build_longer_spec( #' cols = !religion, #' names_to = "income", #' values_to = "count" #' ) #' spec #' #' pivot_longer_spec(relig_income, spec) #' #' # Is equivalent to: #' relig_income %>% pivot_longer( #' cols = !religion, #' names_to = "income", #' values_to = "count" #' ) pivot_longer_spec <- function(data, spec, ..., cols_vary = "fastest", names_repair = "check_unique", values_drop_na = FALSE, values_ptypes = NULL, values_transform = NULL, error_call = current_env()) { check_dots_empty0(...) spec <- check_pivot_spec(spec, call = error_call) spec <- deduplicate_spec(spec, data) cols_vary <- arg_match0( arg = cols_vary, values = c("fastest", "slowest"), error_call = error_call ) # Quick hack to ensure that split() preserves order v_fct <- factor(spec$.value, levels = unique(spec$.value)) values <- split(spec$.name, v_fct) value_names <- names(values) value_keys <- split(spec[-(1:2)], v_fct) keys <- vec_unique(spec[-(1:2)]) values_ptypes <- check_list_of_ptypes(values_ptypes, value_names, call = error_call) values_transform <- check_list_of_functions(values_transform, value_names, call = error_call) vals <- set_names(vec_init(list(), length(values)), value_names) for (value in value_names) { cols <- values[[value]] col_id <- vec_match(value_keys[[value]], keys) n_val_cols <- nrow(keys) val_cols <- vec_init(list(), n_val_cols) val_cols[col_id] <- unname(as.list(data[cols])) val_cols[-col_id] <- list(rep(NA, nrow(data))) if (has_name(values_transform, value)) { val_cols <- map(val_cols, values_transform[[value]]) } # Name inputs that came from `data`, just for good error messages when # taking the common type and casting names <- vec_rep("", times = n_val_cols) names[col_id] <- cols names(val_cols) <- names val_type <- vec_ptype_common( !!!val_cols[col_id], .ptype = values_ptypes[[value]], .call = error_call ) val_cols <- vec_cast_common( !!!val_cols, .to = val_type, .call = error_call ) val_cols <- unname(val_cols) if (cols_vary == "slowest") { vals[[value]] <- list_unchop(val_cols, ptype = val_type) } else if (cols_vary == "fastest") { vals[[value]] <- vec_interleave(!!!val_cols, .ptype = val_type) } else { cli::cli_abort("Unknown {.arg cols_vary} value.", .internal = TRUE) } } vals <- as_tibble(vals) # Join together data, keys, and vals to produce final tibble data_cols <- drop_cols(as_tibble(data, .name_repair = "minimal"), spec$.name) times_keys <- vec_size(data_cols) times_data_cols <- vec_size(keys) if (cols_vary == "slowest") { data_cols <- vec_rep(data_cols, times_data_cols) keys <- vec_rep_each(keys, times_keys) } else if (cols_vary == "fastest") { data_cols <- vec_rep_each(data_cols, times_data_cols) keys <- vec_rep(keys, times_keys) } else { cli::cli_abort("Unknown {.arg cols_vary} value.", .internal = TRUE) } out <- wrap_error_names(vec_cbind( data_cols, keys, vals, .name_repair = names_repair, .error_call = error_call )) if (values_drop_na && vec_any_missing(vals)) { out <- vec_slice(out, !vec_detect_missing(vals)) } out$.seq <- NULL reconstruct_tibble(data, out) } #' @rdname pivot_longer_spec #' @export build_longer_spec <- function(data, cols, ..., names_to = "name", values_to = "value", names_prefix = NULL, names_sep = NULL, names_pattern = NULL, names_ptypes = NULL, names_transform = NULL, error_call = current_env()) { check_dots_empty0(...) check_data_frame(data, call = error_call) check_required(cols, call = error_call) check_character(names_to, allow_null = TRUE, call = error_call) cols <- tidyselect::eval_select( expr = enquo(cols), data = data[unique(names(data))], allow_rename = FALSE, error_call = error_call ) cols <- names(cols) if (length(cols) == 0) { cli::cli_abort("{.arg cols} must select at least one column.", call = error_call) } if (is.null(names_prefix)) { names <- cols } else { names <- gsub(vec_paste0("^", names_prefix), "", cols) } if (is.null(names_to)) { names_to <- character(0L) } n_names_to <- length(names_to) has_names_sep <- !is.null(names_sep) has_names_pattern <- !is.null(names_pattern) if (n_names_to == 0L) { names <- tibble::new_tibble(x = list(), nrow = length(names)) } else if (n_names_to == 1L) { if (has_names_sep) { cli::cli_abort( "{.arg names_sep} can't be used with a length 1 {.arg names_to}.", call = error_call ) } if (has_names_pattern) { names <- str_extract(names, names_to, regex = names_pattern, error_call = error_call)[[1]] } names <- tibble(!!names_to := names) } else { if (!xor(has_names_sep, has_names_pattern)) { cli::cli_abort(paste0( "If you supply multiple names in {.arg names_to} you must also supply one", " of {.arg names_sep} or {.arg names_pattern}." ), call = error_call) } if (has_names_sep) { names <- str_separate(names, names_to, sep = names_sep, error_call = error_call) } else { names <- str_extract(names, names_to, regex = names_pattern, error_call = error_call) } } if (".value" %in% names_to) { values_to <- NULL } else { vec_assert(values_to, ptype = character(), size = 1, call = error_call) } names_ptypes <- check_list_of_ptypes(names_ptypes, names(names), call = error_call) names_transform <- check_list_of_functions(names_transform, names(names), call = error_call) # Optionally, transform cols for (col in names(names_transform)) { f <- names_transform[[col]] names[[col]] <- f(names[[col]]) } # Optionally, cast variables generated from columns for (col in names(names_ptypes)) { ptype <- names_ptypes[[col]] names[[col]] <- vec_cast(names[[col]], ptype, x_arg = col, call = error_call) } out <- tibble(.name = cols) out[[".value"]] <- values_to out <- vec_cbind(out, names) out } drop_cols <- function(df, cols) { if (is.character(cols)) { df[setdiff(names(df), cols)] } else if (is.integer(cols)) { df[-cols] } else { cli::cli_abort("Invalid input", .internal = TRUE) } } # Ensure that there's a one-to-one match from spec to data by adding # a special .seq variable which is automatically removed after pivoting. deduplicate_spec <- function(spec, df) { # Ensure each .name has a unique output identifier key <- spec[setdiff(names(spec), ".name")] if (vec_duplicate_any(key)) { pos <- vec_group_loc(key)$loc seq <- vector("integer", length = nrow(spec)) for (i in seq_along(pos)) { seq[pos[[i]]] <- seq_along(pos[[i]]) } spec$.seq <- seq } # Match spec to data, handling duplicated column names col_id <- vec_match(names(df), spec$.name) has_match <- !is.na(col_id) if (!vec_duplicate_any(col_id[has_match])) { return(spec) } spec <- vec_slice(spec, col_id[has_match]) # Need to use numeric indices because names only match first spec$.name <- seq_along(df)[has_match] pieces <- vec_split(seq_len(nrow(spec)), col_id[has_match]) copy <- integer(nrow(spec)) for (i in seq_along(pieces$val)) { idx <- pieces$val[[i]] copy[idx] <- seq_along(idx) } spec$.seq <- copy spec } tidyr/R/id.R0000644000176200001440000000271514315413441012332 0ustar liggesusersid <- function(.variables, drop = FALSE) { if (length(.variables) == 0) { n <- nrow(.variables) %||% 0L return(structure(seq_len(n), n = n)) } # Special case for single variable if (length(.variables) == 1) { return(id_var(.variables[[1]], drop = drop)) } # Calculate individual ids ids <- rev(map(.variables, id_var, drop = drop)) p <- length(ids) # Calculate dimensions ndistinct <- map_dbl(ids, attr, "n") n <- prod(ndistinct) if (n > 2^31) { # Too big for integers, have to use strings, which will be much slower :( char_id <- do.call("paste", c(ids, sep = "\r")) res <- match(char_id, unique(char_id)) } else { combs <- c(1, cumprod(ndistinct[-p])) mat <- do.call("cbind", ids) res <- c((mat - 1L) %*% combs + 1L) } attr(res, "n") <- n if (drop) { id_var(res, drop = TRUE) } else { structure(as.integer(res), n = attr(res, "n")) } } id_var <- function(x, drop = FALSE) { if (!is_null(attr(x, "n", exact = TRUE)) && !drop) { return(x) } if (is.factor(x) && !drop) { x_na <- addNA(x, ifany = TRUE) id <- as.integer(x_na) n <- length(levels(x_na)) } else if (length(x) == 0) { id <- integer() n <- 0L } else if (is_list(x)) { # Sorting lists isn't supported levels <- unique(x) id <- match(x, levels) n <- max(id) } else { levels <- sort(unique(x), na.last = TRUE) id <- match(x, levels) n <- max(id) } structure(id, n = n) } tidyr/R/uncount.R0000644000176200001440000000311314323620576013432 0ustar liggesusers#' "Uncount" a data frame #' #' Performs the opposite operation to [dplyr::count()], duplicating rows #' according to a weighting variable (or expression). #' #' @param data A data frame, tibble, or grouped tibble. #' @param weights A vector of weights. Evaluated in the context of `data`; #' supports quasiquotation. #' @param ... Additional arguments passed on to methods. #' @param .id Supply a string to create a new variable which gives a unique #' identifier for each created row. #' @param .remove If `TRUE`, and `weights` is the name of a column in `data`, #' then this column is removed. #' @export #' @examples #' df <- tibble(x = c("a", "b"), n = c(1, 2)) #' uncount(df, n) #' uncount(df, n, .id = "id") #' #' # You can also use constants #' uncount(df, 2) #' #' # Or expressions #' uncount(df, 2 / n) uncount <- function(data, weights, ..., .remove = TRUE, .id = NULL) { check_dots_used() UseMethod("uncount") } #' @export uncount.data.frame <- function(data, weights, ..., .remove = TRUE, .id = NULL) { check_bool(.remove) check_name(.id, allow_null = TRUE) weights_quo <- enquo(weights) w <- dplyr::pull(dplyr::mutate(data, `_weight` = !!weights_quo)) out <- vec_rep_each(data, w, error_call = current_env(), times_arg = "weights") # NOTE it was decided to also remove grouping variables as there is no clear # best answer. See https://github.com/tidyverse/tidyr/pull/1070 if (.remove && quo_is_symbol(weights_quo)) { out[[as_string(get_expr(weights_quo))]] <- NULL } if (!is.null(.id)) { out[[.id]] <- sequence(w) } reconstruct_tibble(data, out) } tidyr/R/hoist.R0000644000176200001440000001421614360013543013062 0ustar liggesusers#' Hoist values out of list-columns #' #' @description #' `hoist()` allows you to selectively pull components of a list-column #' into their own top-level columns, using the same syntax as [purrr::pluck()]. #' #' Learn more in `vignette("rectangle")`. #' #' @param .data A data frame. #' @param .col <[`tidy-select`][tidyr_tidy_select]> List-column to extract #' components from. #' @param ... <[`dynamic-dots`][rlang::dyn-dots]> Components of `.col` to turn #' into columns in the form `col_name = "pluck_specification"`. You can pluck #' by name with a character vector, by position with an integer vector, or #' with a combination of the two with a list. See [purrr::pluck()] for #' details. #' #' The column names must be unique in a call to `hoist()`, although existing #' columns with the same name will be overwritten. When plucking with a #' single string you can choose to omit the name, i.e. `hoist(df, col, "x")` #' is short-hand for `hoist(df, col, x = "x")`. #' @param .simplify If `TRUE`, will attempt to simplify lists of #' length-1 vectors to an atomic vector. Can also be a named list containing #' `TRUE` or `FALSE` declaring whether or not to attempt to simplify a #' particular column. If a named list is provided, the default for any #' unspecified columns is `TRUE`. #' @param .ptype Optionally, a named list of prototypes declaring the #' desired output type of each component. Alternatively, a single empty #' prototype can be supplied, which will be applied to all components. Use #' this argument if you want to check that each element has the type you #' expect when simplifying. #' #' If a `ptype` has been specified, but `simplify = FALSE` or simplification #' isn't possible, then a [list-of][vctrs::list_of()] column will be returned #' and each element will have type `ptype`. #' @param .transform Optionally, a named list of transformation #' functions applied to each component. Alternatively, a single function can #' be supplied, which will be applied to all components. Use this argument if #' you want to transform or parse individual elements as they are extracted. #' #' When both `ptype` and `transform` are supplied, the `transform` is applied #' before the `ptype`. #' @param .remove If `TRUE`, the default, will remove extracted components #' from `.col`. This ensures that each value lives only in one place. If all #' components are removed from `.col`, then `.col` will be removed from the #' result entirely. #' @examples #' df <- tibble( #' character = c("Toothless", "Dory"), #' metadata = list( #' list( #' species = "dragon", #' color = "black", #' films = c( #' "How to Train Your Dragon", #' "How to Train Your Dragon 2", #' "How to Train Your Dragon: The Hidden World" #' ) #' ), #' list( #' species = "blue tang", #' color = "blue", #' films = c("Finding Nemo", "Finding Dory") #' ) #' ) #' ) #' df #' #' # Extract only specified components #' df %>% hoist(metadata, #' "species", #' first_film = list("films", 1L), #' third_film = list("films", 3L) #' ) #' @export hoist #' @family rectangling hoist <- function(.data, .col, ..., .remove = TRUE, .simplify = TRUE, .ptype = NULL, .transform = NULL) { check_data_frame(.data) check_required(.col) pluckers <- check_pluckers(...) check_bool(.remove) .col <- tidyselect::vars_pull(names(.data), {{ .col }}) x <- .data[[.col]] vec_check_list(x, arg = ".data[[.col]]") # These are also checked in df_simplify(), but we check here to generate # errors with argument names check_list_of_ptypes(.ptype, names(x)) check_list_of_bool(.simplify, names(x)) check_list_of_functions(.transform, names(x)) # In R <4.1, `::` is quite slow and this is a tight loop, so eliminating # the lookup has a large performance impact: # https://github.com/tidyverse/tidyr/issues/1001 pluck <- purrr::pluck cols <- map(pluckers, function(idx) { map(x, ~ pluck(.x, !!!idx)) }) cols <- new_data_frame(cols, n = vec_size(.data)) cols <- df_simplify( cols, ptype = .ptype, transform = .transform, simplify = .simplify ) # Place new columns before old column out <- df_append(.data, cols, after = match(.col, names(.data)) - 1L) if (.remove) { x <- map(x, function(x) { # rev() is sneaky hack assuming that most people will remove in # numeric order, so this should avoid most order problems. A full # resolution will be considerably more work. for (plucker in rev(pluckers)) { x <- strike(x, plucker) } x }) if (every(x, is_empty)) { x <- NULL } out[[.col]] <- x } reconstruct_tibble(.data, out) } check_pluckers <- function(..., .call = caller_env()) { pluckers <- list2(...) is_string <- map_lgl(pluckers, ~ is.character(.x) && length(.x) == 1) auto_name <- names2(pluckers) == "" & is_string if (any(auto_name)) { names(pluckers)[auto_name] <- unlist(pluckers[auto_name]) } check_unique_names(pluckers, arg = "...", call = .call) # Standardize all pluckers to lists for splicing into `pluck()` # and for easier handling in `strike()` is_not_list <- !map_lgl(pluckers, vec_is_list) pluckers[is_not_list] <- map(pluckers[is_not_list], vec_chop) pluckers } strike <- function(x, indices) { if (!vec_is_list(indices)) { cli::cli_abort("{.arg indices} must be a list.", .internal = TRUE) } n_indices <- vec_size(indices) if (n_indices == 0L) { # Edge case corresponding to an empty plucker return(x) } index <- indices[[1L]] indices <- indices[-1L] size <- vec_size(x) is_valid_index <- (is.numeric(index) && (index <= size)) || (is.character(index) && has_name(x, index)) if (!is_valid_index) { # Nothing to do if the `pluck()` missed entirely return(x) } index <- vec_as_location(index, n = size, names = names(x)) if (n_indices == 1L) { # At base index, remove it x <- x[-index] } else { # Not at base index yet, continue recursion x[[index]] <- strike(x[[index]], indices) } x } tidyr/R/dep-extract.R0000644000176200001440000000060514013466035014154 0ustar liggesusers# nocov start #' Extract numeric component of variable. #' #' DEPRECATED: please use `readr::parse_number()` instead. #' #' @param x A character vector (or a factor). #' @keywords internal #' @export extract_numeric <- function(x) { message("extract_numeric() is deprecated: please use readr::parse_number() instead") as.numeric(gsub("[^0-9.-]+", "", as.character(x))) } # nocov end tidyr/R/separate-longer.R0000644000176200001440000000621214363516001015021 0ustar liggesusers#' Split a string into rows #' #' @description #' `r lifecycle::badge("experimental")` #' #' Each of these functions takes a string and splits it into multiple rows: #' #' * `separate_longer_delim()` splits by a delimiter. #' * `separate_longer_position()` splits by a fixed width. #' #' @export #' @param delim For `separate_longer_delim()`, a string giving the delimiter #' between values. By default, it is interpreted as a fixed string; use #' [stringr::regex()] and friends to split in other ways. #' @inheritParams separate_wider_delim #' @return A data frame based on `data`. It has the same columns, but different #' rows. #' @examples #' df <- tibble(id = 1:4, x = c("x", "x y", "x y z", NA)) #' df %>% separate_longer_delim(x, delim = " ") #' #' # You can separate multiple columns at once if they have the same structure #' df <- tibble(id = 1:3, x = c("x", "x y", "x y z"), y = c("a", "a b", "a b c")) #' df %>% separate_longer_delim(c(x, y), delim = " ") #' #' # Or instead split by a fixed length #' df <- tibble(id = 1:3, x = c("ab", "def", "")) #' df %>% separate_longer_position(x, 1) #' df %>% separate_longer_position(x, 2) #' df %>% separate_longer_position(x, 2, keep_empty = TRUE) separate_longer_delim <- function(data, cols, delim, ...) { check_installed("stringr") check_data_frame(data) check_required(cols) check_string(delim) check_dots_empty() if (is_bare_string(delim)) { delim <- stringr::fixed(delim) } map_unchop(data, {{ cols }}, stringr::str_split, pattern = delim) } #' @param width For `separate_longer_position()`, an integer giving the #' number of characters to split by. #' @param keep_empty By default, you'll get `ceiling(nchar(x) / width)` rows for #' each observation. If `nchar(x)` is zero, this means the entire input #' row will be dropped from the output. If you want to preserve all rows, #' use `keep_empty = TRUE` to replace size-0 elements with a missing value. #' @rdname separate_longer_delim #' @export separate_longer_position <- function(data, cols, width, ..., keep_empty = FALSE) { check_installed("stringr") check_data_frame(data) check_required(cols) check_number_whole(width, min = 1L) check_dots_empty() map_unchop( data, {{ cols }}, str_split_length, width = width, .keep_empty = keep_empty ) } str_split_length <- function(x, width = 1) { if (length(x) == 0L) { return(list()) } max_length <- max(stringr::str_length(x)) idx <- seq(1, max_length, by = width) pieces <- stringr::str_sub_all(x, cbind(idx, length = width)) pieces <- map(pieces, function(x) x[x != ""]) pieces } # helpers ----------------------------------------------------------------- map_unchop <- function(data, cols, fun, ..., .keep_empty = FALSE, .error_call = caller_env()) { cols <- tidyselect::eval_select( enquo(cols), data = data, allow_rename = FALSE, allow_empty = FALSE, error_call = .error_call ) col_names <- names(cols) for (col in col_names) { data[[col]] <- fun(data[[col]], ...) } unchop( data = data, cols = all_of(col_names), keep_empty = .keep_empty, ptype = character(), error_call = .error_call ) } tidyr/R/pivot.R0000644000176200001440000000354214323620576013106 0ustar liggesusers#' Check assumptions about a pivot `spec` #' #' @description #' `check_pivot_spec()` is a developer facing helper function for validating #' the pivot spec used in [pivot_longer_spec()] or [pivot_wider_spec()]. It is #' only useful if you are extending [pivot_longer()] or [pivot_wider()] with #' new S3 methods. #' #' `check_pivot_spec()` makes the following assertions: #' - `spec` must be a data frame. #' - `spec` must have a character column named `.name`. #' - `spec` must have a character column named `.value`. #' - The `.name` column must be unique. #' - The `.name` and `.value` columns must be the first two columns in the data #' frame, and will be reordered if that is not true. #' #' @inheritParams pivot_wider_spec #' #' @keywords internal #' @export #' @examples #' # A valid spec #' spec <- tibble(.name = "a", .value = "b", foo = 1) #' check_pivot_spec(spec) #' #' spec <- tibble(.name = "a") #' try(check_pivot_spec(spec)) #' #' # `.name` and `.value` are forced to be the first two columns #' spec <- tibble(foo = 1, .value = "b", .name = "a") #' check_pivot_spec(spec) check_pivot_spec <- function(spec, call = caller_env()) { check_data_frame(spec, call = call) if (!has_name(spec, ".name") || !has_name(spec, ".value")) { cli::cli_abort( "{.arg spec} must have {.var .name} and {.var .value} columns.", call = call ) } check_character(spec$.name, call = call) if (vec_duplicate_any(spec$.name)) { cli::cli_abort("{.var spec$.name} must be unique.", call = call) } check_character(spec$.value, call = call) # Ensure `.name` and `.value` come first, in that order vars <- union(c(".name", ".value"), names(spec)) spec <- spec[vars] spec } wrap_error_names <- function(code) { withCallingHandlers( code, vctrs_error_names = function(cnd) { cnd$arg <- "names_repair" cnd_signal(cnd) } ) } tidyr/R/nest.R0000644000176200001440000002343414363516001012707 0ustar liggesusers#' Nest rows into a list-column of data frames #' #' @description #' Nesting creates a list-column of data frames; unnesting flattens it back out #' into regular columns. Nesting is implicitly a summarising operation: you #' get one row for each group defined by the non-nested columns. This is useful #' in conjunction with other summaries that work with whole datasets, most #' notably models. #' #' Learn more in `vignette("nest")`. #' #' @details #' If neither `...` nor `.by` are supplied, `nest()` will nest all variables, #' and will use the column name supplied through `.key`. #' #' @section New syntax: #' tidyr 1.0.0 introduced a new syntax for `nest()` and `unnest()` that's #' designed to be more similar to other functions. Converting to the new syntax #' should be straightforward (guided by the message you'll receive) but if #' you just need to run an old analysis, you can easily revert to the previous #' behaviour using [nest_legacy()] and [unnest_legacy()] as follows: #' #' ``` #' library(tidyr) #' nest <- nest_legacy #' unnest <- unnest_legacy #' ``` #' #' @section Grouped data frames: #' `df %>% nest(data = c(x, y))` specifies the columns to be nested; i.e. the #' columns that will appear in the inner data frame. `df %>% nest(.by = c(x, #' y))` specifies the columns to nest _by_; i.e. the columns that will remain in #' the outer data frame. An alternative way to achieve the latter is to `nest()` #' a grouped data frame created by [dplyr::group_by()]. The grouping variables #' remain in the outer data frame and the others are nested. The result #' preserves the grouping of the input. #' #' Variables supplied to `nest()` will override grouping variables so that #' `df %>% group_by(x, y) %>% nest(data = !z)` will be equivalent to #' `df %>% nest(data = !z)`. #' #' You can't supply `.by` with a grouped data frame, as the groups already #' represent what you are nesting by. #' #' @param .data A data frame. #' @param ... <[`tidy-select`][tidyr_tidy_select]> Columns to nest; these will #' appear in the inner data frames. #' #' Specified using name-variable pairs of the form #' `new_col = c(col1, col2, col3)`. The right hand side can be any valid #' tidyselect expression. #' #' If not supplied, then `...` is derived as all columns _not_ selected by #' `.by`, and will use the column name from `.key`. #' #' `r lifecycle::badge("deprecated")`: #' previously you could write `df %>% nest(x, y, z)`. #' Convert to `df %>% nest(data = c(x, y, z))`. #' @param .by <[`tidy-select`][tidyr_tidy_select]> Columns to nest _by_; these #' will remain in the outer data frame. #' #' `.by` can be used in place of or in conjunction with columns supplied #' through `...`. #' #' If not supplied, then `.by` is derived as all columns _not_ selected by #' `...`. #' @param .key The name of the resulting nested column. Only applicable when #' `...` isn't specified, i.e. in the case of `df %>% nest(.by = x)`. #' #' If `NULL`, then `"data"` will be used by default. #' @param .names_sep If `NULL`, the default, the inner names will come from #' the former outer names. If a string, the new inner names will use the #' outer names with `names_sep` automatically stripped. This makes #' `names_sep` roughly symmetric between nesting and unnesting. #' @export #' @examples #' df <- tibble(x = c(1, 1, 1, 2, 2, 3), y = 1:6, z = 6:1) #' #' # Specify variables to nest using name-variable pairs. #' # Note that we get one row of output for each unique combination of #' # non-nested variables. #' df %>% nest(data = c(y, z)) #' #' # Specify variables to nest by (rather than variables to nest) using `.by` #' df %>% nest(.by = x) #' #' # In this case, since `...` isn't used you can specify the resulting column #' # name with `.key` #' df %>% nest(.by = x, .key = "cols") #' #' # Use tidyselect syntax and helpers, just like in `dplyr::select()` #' df %>% nest(data = any_of(c("y", "z"))) #' #' # `...` and `.by` can be used together to drop columns you no longer need, #' # or to include the columns you are nesting by in the inner data frame too. #' # This drops `z`: #' df %>% nest(data = y, .by = x) #' # This includes `x` in the inner data frame: #' df %>% nest(data = everything(), .by = x) #' #' # Multiple nesting structures can be specified at once #' iris %>% #' nest(petal = starts_with("Petal"), sepal = starts_with("Sepal")) #' iris %>% #' nest(width = contains("Width"), length = contains("Length")) #' #' # Nesting a grouped data frame nests all variables apart from the group vars #' fish_encounters %>% #' dplyr::group_by(fish) %>% #' nest() #' #' # That is similar to `nest(.by = )`, except here the result isn't grouped #' fish_encounters %>% #' nest(.by = fish) #' #' # Nesting is often useful for creating per group models #' mtcars %>% #' nest(.by = cyl) %>% #' dplyr::mutate(models = lapply(data, function(df) lm(mpg ~ wt, data = df))) nest <- function(.data, ..., .by = NULL, .key = NULL, .names_sep = NULL) { cols <- enquos(...) empty <- names2(cols) == "" if (any(empty)) { cols_good <- cols[!empty] cols_bad <- cols[empty] .key <- check_key(.key) if (length(cols_bad) == 1L) { cols_bad <- cols_bad[[1]] cols_fixed_expr <- expr(!!cols_bad) } else { cols_fixed_expr <- expr(c(!!!cols_bad)) } cols_fixed_label <- as_label(cols_fixed_expr) cols_fixed <- quos(!!.key := !!cols_fixed_expr) cols <- c(cols_good, cols_fixed) lifecycle::deprecate_warn( when = "1.0.0", what = I("Supplying `...` without names"), details = c( i = "Please specify a name for each selection.", i = cli::format_inline("Did you want `{(.key)} = {cols_fixed_label}`?") ), always = TRUE ) return(nest(.data, !!!cols, .by = {{ .by }})) } UseMethod("nest") } #' @export nest.data.frame <- function(.data, ..., .by = NULL, .key = NULL, .names_sep = NULL) { # The data frame print handles nested data frames poorly, so we want to # convert data frames (but not subclasses) to tibbles if (identical(class(.data), "data.frame")) { .data <- as_tibble(.data) } nest.tbl_df( .data, ..., .by = {{ .by }}, .key = .key, .names_sep = .names_sep ) } #' @export nest.tbl_df <- function(.data, ..., .by = NULL, .key = NULL, .names_sep = NULL) { error_call <- current_env() info <- nest_info(.data, ..., .by = {{ .by }}, .key = .key) cols <- info$cols inner <- info$inner outer <- info$outer inner <- .data[inner] inner <- pack(inner, !!!cols, .names_sep = .names_sep, .error_call = error_call) out <- .data[outer] out <- vec_cbind(out, inner, .name_repair = "check_unique", .error_call = error_call) out <- reconstruct_tibble(.data, out) out <- chop(out, cols = all_of(names(cols)), error_call = error_call) # `nest()` currently doesn't return list-of columns for (name in names(cols)) { out[[name]] <- tidyr_new_list(out[[name]]) } out } #' @export nest.grouped_df <- function(.data, ..., .by = NULL, .key = NULL, .names_sep = NULL) { by <- enquo(.by) if (!quo_is_null(by)) { cli::cli_abort("Can't supply {.arg .by} when {.arg .data} is a grouped data frame.") } if (missing(...)) { .key <- check_key(.key) cols <- setdiff(names(.data), dplyr::group_vars(.data)) nest.tbl_df(.data, !!.key := all_of(cols), .names_sep = .names_sep) } else { nest.tbl_df(.data, ..., .key = .key, .names_sep = .names_sep) } } nest_info <- function(.data, ..., .by = NULL, .key = NULL, .error_call = caller_env()) { by <- enquo(.by) cols <- enquos(...) n_cols <- length(cols) key <- check_key(.key, error_call = .error_call) if (n_cols != 0L && !is_default_key(.key)) { warn_unused_key(error_call = .error_call) } cols <- with_indexed_errors( map(cols, function(col) { names(tidyselect::eval_select( expr = col, data = .data, allow_rename = FALSE, error_call = NULL )) }), message = function(cnd) { cli::format_inline("In expression named {.arg {cnd$name}}:") }, .error_call = .error_call ) names <- names(.data) outer <- names(tidyselect::eval_select( expr = by, data = .data, allow_rename = FALSE, error_call = .error_call )) inner <- list_unchop(cols, ptype = character(), name_spec = zap()) inner <- vec_unique(inner) if (n_cols == 0L) { # Derive `inner` names from `.by` inner <- setdiff(names, outer) cols <- list2(!!key := inner) } if (quo_is_null(by)) { # Derive `outer` names from `...` outer <- setdiff(names, inner) } # Regenerate quosures for `pack()` cols <- map(cols, function(col) { quo(all_of(!!col)) }) cols <- new_quosures(cols) list( cols = cols, inner = inner, outer = outer ) } warn_unused_key <- function(error_call = caller_env()) { message <- c( "Can't supply both {.arg .key} and {.arg ...}.", i = "{.arg .key} will be ignored." ) cli::cli_warn(message, call = error_call) } check_key <- function(key, error_call = caller_env()) { if (is_default_key(key)) { "data" } else { check_string(key, allow_empty = FALSE, arg = ".key", call = error_call) key } } is_default_key <- function(key) { if (identical(maybe_missing(key), deprecated())) { # Temporary support for S3 method authors that set `.key = deprecated()`. # Remove this entire helper all methods have been updated. key <- NULL } is.null(key) } tidyr/R/expand.R0000644000176200001440000002406514363517015013224 0ustar liggesusers#' Expand data frame to include all possible combinations of values #' #' @description #' `expand()` generates all combination of variables found in a dataset. #' It is paired with `nesting()` and `crossing()` helpers. `crossing()` #' is a wrapper around [expand_grid()] that de-duplicates and sorts its inputs; #' `nesting()` is a helper that only finds combinations already present in the #' data. #' #' `expand()` is often useful in conjunction with joins: #' #' * use it with `right_join()` to convert implicit missing values to #' explicit missing values (e.g., fill in gaps in your data frame). #' * use it with `anti_join()` to figure out which combinations are missing #' (e.g., identify gaps in your data frame). #' #' @section Grouped data frames: #' With grouped data frames created by [dplyr::group_by()], `expand()` operates #' _within_ each group. Because of this, you cannot expand on a grouping column. #' #' @inheritParams expand_grid #' @param data A data frame. #' @param ... <[`data-masking`][tidyr_data_masking]> Specification of columns #' to expand or complete. Columns can be atomic vectors or lists. #' #' * To find all unique combinations of `x`, `y` and `z`, including those not #' present in the data, supply each variable as a separate argument: #' `expand(df, x, y, z)` or `complete(df, x, y, z)`. #' * To find only the combinations that occur in the #' data, use `nesting`: `expand(df, nesting(x, y, z))`. #' * You can combine the two forms. For example, #' `expand(df, nesting(school_id, student_id), date)` would produce #' a row for each present school-student combination for all possible #' dates. #' #' When used with factors, [expand()] and [complete()] use the full set of #' levels, not just those that appear in the data. If you want to use only the #' values seen in the data, use `forcats::fct_drop()`. #' #' When used with continuous variables, you may need to fill in values #' that do not appear in the data: to do so use expressions like #' `year = 2010:2020` or `year = full_seq(year,1)`. #' @seealso [complete()] to expand list objects. [expand_grid()] #' to input vectors rather than a data frame. #' @export #' @examples #' # Finding combinations ------------------------------------------------------ #' fruits <- tibble( #' type = c("apple", "orange", "apple", "orange", "orange", "orange"), #' year = c(2010, 2010, 2012, 2010, 2011, 2012), #' size = factor( #' c("XS", "S", "M", "S", "S", "M"), #' levels = c("XS", "S", "M", "L") #' ), #' weights = rnorm(6, as.numeric(size) + 2) #' ) #' #' # All combinations, including factor levels that are not used #' fruits %>% expand(type) #' fruits %>% expand(size) #' fruits %>% expand(type, size) #' fruits %>% expand(type, size, year) #' #' # Only combinations that already appear in the data #' fruits %>% expand(nesting(type)) #' fruits %>% expand(nesting(size)) #' fruits %>% expand(nesting(type, size)) #' fruits %>% expand(nesting(type, size, year)) #' #' # Other uses ---------------------------------------------------------------- #' # Use with `full_seq()` to fill in values of continuous variables #' fruits %>% expand(type, size, full_seq(year, 1)) #' fruits %>% expand(type, size, 2010:2013) #' #' # Use `anti_join()` to determine which observations are missing #' all <- fruits %>% expand(type, size, year) #' all #' all %>% dplyr::anti_join(fruits) #' #' # Use with `right_join()` to fill in missing rows (like `complete()`) #' fruits %>% dplyr::right_join(all) #' #' # Use with `group_by()` to expand within each group #' fruits %>% #' dplyr::group_by(type) %>% #' expand(year, size) expand <- function(data, ..., .name_repair = "check_unique") { UseMethod("expand") } #' @export expand.data.frame <- function(data, ..., .name_repair = "check_unique") { out <- grid_dots(..., `_data` = data) out <- map(out, sorted_unique) # Flattens unnamed data frames returned from `grid_dots()` out <- expand_grid(!!!out, .name_repair = .name_repair) reconstruct_tibble(data, out) } #' @export expand.grouped_df <- function(data, ..., .name_repair = "check_unique") { if (the$has_dplyr_1_1) { reframe <- utils::getFromNamespace("reframe", ns = "dplyr") pick <- utils::getFromNamespace("pick", ns = "dplyr") out <- reframe( data, expand( data = pick(everything()), ..., .name_repair = .name_repair ) ) drop <- dplyr::group_by_drop_default(data) dplyr::group_by(out, !!!dplyr::groups(data), .drop = drop) } else { dplyr::summarise( data, expand( data = dplyr::cur_data(), ..., .name_repair = .name_repair ), .groups = "keep" ) } } # Nesting & crossing ------------------------------------------------------ #' @rdname expand #' @export crossing <- function(..., .name_repair = "check_unique") { out <- grid_dots(...) out <- map(out, sorted_unique) # Flattens unnamed data frames returned from `grid_dots()` expand_grid(!!!out, .name_repair = .name_repair) } #' @rdname expand #' @export nesting <- function(..., .name_repair = "check_unique") { out <- grid_dots(...) if (length(out) == 0L) { # This matches `crossing()`, `expand_grid()`, and `expand()`, which return # a 1 row / 0 col tibble. Computations involving the number of combinations # of an empty set should return 1. size <- 1L } else { size <- NULL } # Flattens unnamed data frames out <- data_frame(!!!out, .size = size, .name_repair = .name_repair) out <- tibble::new_tibble(out, nrow = vec_size(out)) out <- sorted_unique(out) out } # expand_grid ------------------------------------------------------------- #' Create a tibble from all combinations of inputs #' #' @description #' `expand_grid()` is heavily motivated by [expand.grid()]. #' Compared to `expand.grid()`, it: #' #' * Produces sorted output (by varying the first column the slowest, rather #' than the fastest). #' * Returns a tibble, not a data frame. #' * Never converts strings to factors. #' * Does not add any additional attributes. #' * Can expand any generalised vector, including data frames. #' #' @param ... Name-value pairs. The name will become the column name in the #' output. #' @inheritParams tibble::as_tibble #' @return A tibble with one column for each input in `...`. The output #' will have one row for each combination of the inputs, i.e. the size #' be equal to the product of the sizes of the inputs. This implies #' that if any input has length 0, the output will have zero rows. #' @export #' @examples #' expand_grid(x = 1:3, y = 1:2) #' expand_grid(l1 = letters, l2 = LETTERS) #' #' # Can also expand data frames #' expand_grid(df = tibble(x = 1:2, y = c(2, 1)), z = 1:3) #' # And matrices #' expand_grid(x1 = matrix(1:4, nrow = 2), x2 = matrix(5:8, nrow = 2)) expand_grid <- function(..., .name_repair = "check_unique") { out <- grid_dots(...) names <- names2(out) unnamed <- which(names == "") any_unnamed <- any(unnamed) if (any_unnamed) { # `vec_expand_grid()` requires all inputs to be named. # Most are auto named by `grid_dots()`, but unnamed data frames are not. # So we temporarily name unnamed data frames that eventually get spliced. names[unnamed] <- vec_paste0("...", unnamed) names(out) <- names } out <- vec_expand_grid( !!!out, .name_repair = "minimal", .error_call = current_env() ) if (any_unnamed) { names[unnamed] <- "" names(out) <- names } size <- vec_size(out) # Flattens unnamed data frames after grid expansion out <- tidyr_new_list(out) out <- df_list(!!!out, .name_repair = .name_repair, .error_call = current_env()) out <- tibble::new_tibble(out, nrow = size) out } # Helpers ----------------------------------------------------------------- sorted_unique <- function(x) { if (is.factor(x)) { fct_unique(x) } else if (is_bare_list(x)) { vec_unique(x) } else { vec_sort(vec_unique(x)) } } # forcats::fct_unique fct_unique <- function(x) { levels <- levels(x) out <- levels if (!anyNA(levels) && anyNA(x)) { out <- c(out, NA_character_) } factor(out, levels = levels, exclude = NULL, ordered = is.ordered(x)) } grid_dots <- function(..., `_data` = NULL, .error_call = caller_env()) { dots <- enquos(...) n_dots <- length(dots) names <- names(dots) needs_auto_name <- names == "" # Silently uniquely repair "auto-names" to avoid collisions # from truncated long names. Probably not a perfect system, but solves # most of the reported issues. auto_names <- names(exprs_auto_name( exprs = dots[needs_auto_name], repair_auto = "unique", repair_quiet = TRUE )) names[needs_auto_name] <- auto_names # Set up a mask for repeated `eval_tidy()` calls that support iterative # expressions env <- new_environment() mask <- new_data_mask(env) mask$.data <- as_data_pronoun(env) if (!is.null(`_data`)) { # Pre-load the data mask with `_data` cols <- tidyr_new_list(`_data`) col_names <- names(cols) for (i in seq_along(cols)) { col <- cols[[i]] col_name <- col_names[[i]] env[[col_name]] <- col } } out <- vector("list", length = n_dots) null <- vector("logical", length = n_dots) for (i in seq_len(n_dots)) { dot <- dots[[i]] dot <- eval_tidy(dot, data = mask) if (is.null(dot)) { null[[i]] <- TRUE next } arg <- paste0("..", i) vec_assert(dot, arg = arg, call = .error_call) out[[i]] <- dot is_unnamed_data_frame <- is.data.frame(dot) && needs_auto_name[[i]] if (is_unnamed_data_frame) { # Signal that unnamed data frame should be spliced by setting its name # to `""`. Then add its individual columns into the mask. names[[i]] <- "" dot_names <- names(dot) for (i in seq_along(dot)) { dot_col <- dot[[i]] dot_name <- dot_names[[i]] env[[dot_name]] <- dot_col } } else { # Install `dot` in the mask for iterative evaluations name <- names[[i]] env[[name]] <- dot } } if (any(null)) { out <- out[!null] names <- names[!null] } names(out) <- names out } tidyr/NEWS.md0000644000176200001440000014243614553746165012535 0ustar liggesusers# tidyr 1.3.1 * `pivot_wider` now uses `.by` and `|>` syntax for the dplyr helper message to identify duplicates (@boshek, #1516) # tidyr 1.3.0 ## New features * New family of consistent string separating functions: `separate_wider_delim()`, `separate_wider_position()`, `separate_wider_regex()`, `separate_longer_delim()`, and `separate_longer_position()`. These functions are thorough refreshes of `separate()` and `extract()`, featuring improved performance, greater consistency, a polished API, and a new approach for handling problems. They use stringr and supersede `extract()`, `separate()`, and `separate_rows()` (#1304). The named character vector interface used in `separate_wider_regex()` is very similar to the [nc](https://github.com/tdhock/nc) package by Toby Dylan Hocking. * `nest()` gains a `.by` argument which allows you to specify the columns to nest by (rather than the columns to nest, i.e. through `...`). Additionally, the `.key` argument is no longer deprecated, and is used whenever `...` isn't specified (#1458). * `unnest_longer()` gains a `keep_empty` argument like `unnest()` (#1339). * `pivot_longer()` gains a `cols_vary` argument for controlling the ordering of the output rows relative to their original row number (#1312). * New datasets `who2`, `household`, `cms_patient_experience`, and `cms_patient_care` to demonstrate various tidying challenges (#1333). ## Breaking changes * The `...` argument of both `pivot_longer()` and `pivot_wider()` has been moved to the front of the function signature, after the required arguments but before the optional ones. Additionally, `pivot_longer_spec()`, `pivot_wider_spec()`, `build_longer_spec()`, and `build_wider_spec()` have all gained `...` arguments in a similar location. This change allows us to more easily add new features to the pivoting functions without breaking existing CRAN packages and user scripts. `pivot_wider()` provides temporary backwards compatible support for the case of a single unnamed argument that previously was being positionally matched to `id_cols`. This one special case still works, but will throw a warning encouraging you to explicitly name the `id_cols` argument. To read more about this pattern, see (#1350). ## Lifecycle changes * All functions deprecated in tidyr 1.0 and 1.2 (the old lazyeval functions ending in `_` and various arguments to `unnest()`) now warn on every use. They will be made defunct in 2024 (#1406). ## Rectangling * `unnest_longer()` now consistently drops rows with either `NULL` or empty vectors (like `integer()`) by default. Set the new `keep_empty` argument to `TRUE` to retain them. Previously, `keep_empty = TRUE` was implicitly being used for `NULL`, while `keep_empty = FALSE` was being used for empty vectors, which was inconsistent with all other tidyr verbs with this argument (#1363). * `unnest_longer()` now uses `""` in the index column for fully unnamed vectors. It also now consistently uses `NA` in the index column for empty vectors that are "kept" by `keep_empty = TRUE` (#1442). * `unnest_wider()` now errors if any values being unnested are unnamed and `names_sep` is not provided (#1367). * `unnest_wider()` now generates automatic names for _partially_ unnamed vectors. Previously it only generated them for fully unnamed vectors, resulting in a strange mix of automatic names and name-repaired names (#1367). ## Bug fixes and minor improvements ### General * Most tidyr functions now consistently disallow renaming during tidy-selection. Renaming was never meaningful in these functions, and previously either had no effect or caused problems (#1449, #1104). * tidyr errors (including input validation) have been thoroughly reviewed and should generally be more likely to point you in the right direction (#1313, #1400). * `uncount()` is now generic so implementations can be provided for objects other than data frames (@mgirlich, #1358). * `uncount()` gains a `...` argument. It comes between the required and the optional arguments (@mgirlich, #1358). * `nest()`, `complete()`, `expand()`, and `fill()` now document their support for grouped data frames created by `dplyr::group_by()` (#952). * All built in datasets are now standard tibbles (#1459). * R >=3.4.0 is now required, in line with the tidyverse standard of supporting the previous 5 minor releases of R. * rlang >=1.0.4 and vctrs >=0.5.2 are now required (#1344, #1470). * Removed dependency on ellipsis in favor of equivalent functions in rlang (#1314). ### Nesting, packing, and chopping * `unnest()`, `unchop()`, `unnest_longer()`, and `unnest_wider()` better handle lists with additional classes (#1327). * `pack()`, `unpack()`, `chop()`, and `unchop()` all gain an `error_call` argument, which in turn improves some of the error calls shown in `nest()` and various `unnest()` adjacent functions (#1446). * `chop()`, `unpack()`, and `unchop()` all gain `...`, which must be empty (#1447). * `unpack()` does a better job of reporting column name duplication issues and gives better advice about how to resolve them using `names_sep`. This also improves errors from functions that use `unpack()`, like `unnest()` and `unnest_wider()` (#1425, #1367). ### Pivoting * `pivot_longer()` no longer supports interpreting `values_ptypes = list()` and `names_ptypes = list()` as `NULL`. An empty `list()` is now interpreted as a `` prototype to apply to all columns, which is consistent with how any other 0-length value is interpreted (#1296). * `pivot_longer(values_drop_na = TRUE)` is faster when there aren't any missing values to drop (#1392, @mgirlich). * `pivot_longer()` is now more memory efficient due to the usage of `vctrs::vec_interleave()` (#1310, @mgirlich). * `pivot_longer()` now throws a slightly better error message when `values_ptypes` or `names_ptypes` is provided and the coercion can't be made (#1364). * `pivot_wider()` now throws a better error message when a column selected by `names_from` or `values_from` is also selected by `id_cols` (#1318). * `pivot_wider()` is now faster when `names_sep` is provided (@mgirlich, #1426). * `pivot_longer_spec()`, `pivot_wider_spec()`, `build_longer_spec()`, and `build_wider_spec()` all gain an `error_call` argument, resulting in better error reporting in `pivot_longer()` and `pivot_wider()` (#1408). ### Missing values * `fill()` now works correctly when there is a column named `.direction` in `data` (#1319, @tjmahr). * `replace_na()` is faster when there aren't any missing values to replace (#1392, @mgirlich). * The documentation of the `replace` argument of `replace_na()` now mentions that `replace` is always cast to the type of `data` (#1317). # tidyr 1.2.1 * Hot patch release to resolve R CMD check failures. # tidyr 1.2.0 ## Breaking changes * `complete()` and `expand()` no longer allow you to complete or expand on a grouping column. This was never well-defined since completion/expansion on a grouped data frame happens "within" each group and otherwise has the potential to produce erroneous results (#1299). * `replace_na()` no longer allows the type of `data` to change when the replacement is applied. `replace` will now always be cast to the type of `data` before the replacement is made. For example, this means that using a replacement value of `1.5` on an integer column is no longer allowed. Similarly, replacing missing values in a list-column must now be done with `list("foo")` rather than just `"foo"`. ## Pivoting * `pivot_wider()` gains new `names_expand` and `id_expand` arguments for turning implicit missing factor levels and variable combinations into explicit ones. This is similar to the `drop` argument from `spread()` (#770). * `pivot_wider()` gains a new `names_vary` argument for controlling the ordering when combining `names_from` values with `values_from` column names (#839). * `pivot_wider()` gains a new `unused_fn` argument for controlling how to summarize unused columns that aren't involved in the pivoting process (#990, thanks to @mgirlich for an initial implementation). * `pivot_longer()`'s `names_transform` and `values_transform` arguments now accept a single function which will be applied to all of the columns (#1284, thanks to @smingerson for an initial implementation). * `pivot_longer()`'s `names_ptypes` and `values_ptypes` arguments now accept a single empty ptype which will be applied to all of the columns (#1284). ## Nesting * `unnest()` and `unchop()`'s `ptype` argument now accepts a single empty ptype which will be applied to all `cols` (#1284). * `unpack()` now silently skips over any non-data frame columns specified by `cols`. This matches the existing behavior of `unchop()` and `unnest()` (#1153). ## Rectangling * `unnest_wider()` and `unnest_longer()` can now unnest multiple columns at once (#740). * `unnest_longer()`'s `indices_to` and `values_to` arguments now accept a glue specification, which is useful when unnesting multiple columns. * For `hoist()`, `unnest_longer()`, and `unnest_wider()`, if a `ptype` is supplied, but that column can't be simplified, the result will be a list-of column where each element has type `ptype` (#998). * `unnest_wider()` gains a new `strict` argument which controls whether or not strict vctrs typing rules should be applied. It defaults to `FALSE` for backwards compatibility, and because it is often more useful to be lax when unnesting JSON, which doesn't always map one-to-one with R's types (#1125). * `hoist()`, `unnest_longer()`, and `unnest_wider()`'s `simplify` argument now accepts a named list of `TRUE` or `FALSE` to control simplification on a per column basis (#995). * `hoist()`, `unnest_longer()`, and `unnest_wider()`'s `transform` argument now accepts a single function which will be applied to all components (#1284). * `hoist()`, `unnest_longer()`, and `unnest_wider()`'s `ptype` argument now accepts a single empty ptype which will be applied to all components (#1284). ## Grids * `complete()` gains a new `explicit` argument for limiting `fill` to only implicit missing values. This is useful if you don't want to fill in pre-existing missing values (#1270). * `complete()` gains a grouped data frame method. This generates a more correct completed data frame when groups are involved (#396, #966). ## Missing values * `drop_na()`, `replace_na()`, and `fill()` have been updated to utilize vctrs. This means that you can use these functions on a wider variety of column types, including lubridate's Period types (#1094), data frame columns, and the [rcrd](https://vctrs.r-lib.org/reference/new_rcrd.html) type from vctrs. * `replace_na()` no longer replaces empty atomic elements in list-columns (like `integer(0)`). The only value that is replaced in a list-column is `NULL` (#1168). * `drop_na()` no longer drops empty atomic elements from list-columns (like `integer(0)`). The only value that is dropped in a list-column is `NULL` (#1228). ## Bug fixes and minor improvements ### General * @mgirlich is now a tidyr author in recognition of his significant and sustained contributions. * All lazyeval variants of tidyr verbs have been soft-deprecated. Expect them to move to the defunct stage in the next minor release of tidyr (#1294). * `any_of()` and `all_of()` from tidyselect are now re-exported (#1217). * dplyr >= 1.0.0 is now required. ### Pivoting * `pivot_wider()` now gives better advice about how to identify duplicates when values are not uniquely identified (#1113). * `pivot_wider()` now throws a more informative error when `values_fn` doesn't result in a single summary value (#1238). * `pivot_wider()` and `pivot_longer()` now generate more informative errors related to name repair (#987). * `pivot_wider()` now works correctly when `values_fill` is a data frame. * `pivot_wider()` no longer accidentally retains `values_from` when pivoting a zero row data frame (#1249). * `pivot_wider()` now correctly handles the case where an id column name collides with a value from `names_from` (#1107). * `pivot_wider()` and `pivot_longer()` now both check that the spec columns `.name` and `.value` are character vectors. Additionally, the `.name` column must be unique (#1107). * `pivot_wider()`'s `names_from` and `values_from` arguments are now required if their default values of `name` and `value` don't correspond to columns in `data`. Additionally, they must identify at least 1 column in `data` (#1240). * `pivot_wider()`'s `values_fn` argument now correctly allows anonymous functions (#1114). * `pivot_wider_spec()` now works correctly with a 0-row data frame and a `spec` that doesn't identify any rows (#1250, #1252). * `pivot_longer()`'s `names_ptypes` argument is now applied after `names_transform` for consistency with the rectangling functions (i.e. `hoist()`) (#1233). * `check_pivot_spec()` is a new developer facing function for validating a pivot `spec` argument. This is only useful if you are extending `pivot_longer()` or `pivot_wider()` with new S3 methods (#1087). ### Nesting * The `nest()` generic now avoids computing on `.data`, making it more compatible with lazy tibbles (#1134). * The `.names_sep` argument of the data.frame method for `nest()` is now actually used (#1174). * `unnest()`'s `ptype` argument now works as expected (#1158). * `unpack()` no longer drops empty columns specified through `cols` (#1191). * `unpack()` now works correctly with data frame columns containing 1 row but 0 columns (#1189). * `chop()` now works correctly with data frames with 0 rows (#1206). * `chop()`'s `cols` argument is no longer optional. This matches the behavior of `cols` seen elsewhere in tidyr (#1205). * `unchop()` now respects `ptype` when unnesting a non-list column (#1211). ### Rectangling * `hoist()` no longer accidentally removes elements that have duplicated names (#1259). ### Grids * The grouped data frame methods for `complete()` and `expand()` now move the group columns to the front of the result (in addition to the columns you completed on or expanded, which were already moved to the front). This should make more intuitive sense, as you are completing or expanding "within" each group, so the group columns should be the first thing you see (#1289). * `complete()` now applies `fill` even when no columns to complete are specified (#1272). * `expand()`, `crossing()`, and `nesting()` now correctly retain `NA` values of factors (#1275). * `expand_grid()`, `expand()`, `nesting()`, and `crossing()` now silently apply name repair to automatically named inputs. This avoids a number of issues resulting from duplicate truncated names (#1116, #1221, #1092, #1037, #992). * `expand_grid()`, `expand()`, `nesting()`, and `crossing()` now allow columns from unnamed data frames to be used in expressions after that data frame was specified, like `expand_grid(tibble(x = 1), y = x)`. This is more consistent with how `tibble()` behaves. * `expand_grid()`, `expand()`, `nesting()`, and `crossing()` now work correctly with data frames containing 0 columns but >0 rows (#1189). * `expand_grid()`, `expand()`, `nesting()`, and `crossing()` now return a 1 row data frame when no inputs are supplied, which is more consistent with `prod() == 1L` and the idea that computations involving the number of combinations computed from an empty set should return 1 (#1258). ### Missing values * `drop_na()` no longer drops missing values from all columns when a tidyselect expression that results in 0 columns being selected is used (#1227). * `fill()` now treats `NaN` like any other missing value (#982). # tidyr 1.1.4 * `expand_grid()` is now about twice as fast and `pivot_wider()` is a bit faster (@mgirlich, #1130). * `unchop()` is now much faster, which propagates through to various functions, such as `unnest()`, `unnest_longer()`, `unnest_wider()`, and `separate_rows()` (@mgirlich, @DavisVaughan, #1127). * `unnest()` is now much faster (@mgirlich, @DavisVaughan, #1127). * `unnest()` no longer allows unnesting a list-col containing a mix of vector and data frame elements. Previously, this only worked by accident, and is considered an off-label usage of `unnest()` that has now become an error. # tidyr 1.1.3 * tidyr verbs no longer have "default" methods for lazyeval fallbacks. This means that you'll get clearer error messages (#1036). * `uncount()` error for non-integer weights and gives a clearer error message for negative weights (@mgirlich, #1069). * You can once again unnest dates (#1021, #1089). * `pivot_wider()` works with data.table and empty key variables (@mgirlich, #1066). * `separate_rows()` works for factor columns (@mgirlich, #1058). # tidyr 1.1.2 * `separate_rows()` returns to 1.1.0 behaviour for empty strings (@rjpatm, #1014). # tidyr 1.1.1 * New tidyr logo! * stringi dependency has been removed; this was a substantial dependency that make tidyr hard to compile in resource constrained environments (@rjpat, #936). * Replace Rcpp with cpp11. See for reasons why. # tidyr 1.1.0 ## General features * `pivot_longer()`, `hoist()`, `unnest_wider()`, and `unnest_longer()` gain new `transform` arguments; these allow you to transform values "in flight". They are partly needed because vctrs coercion rules have become stricter, but they give you greater flexibility than was available previously (#921). * Arguments that use tidy selection syntax are now clearly documented and have been updated to use tidyselect 1.1.0 (#872). ## Pivoting improvements * Both `pivot_wider()` and `pivot_longer()` are considerably more performant, thanks largely to improvements in the underlying vctrs code (#790, @DavisVaughan). * `pivot_longer()` now supports `names_to = character()` which prevents the name column from being created (#961). ```{r} df <- tibble(id = 1:3, x_1 = 1:3, x_2 = 4:6) df %>% pivot_longer(-id, names_to = character()) ``` * `pivot_longer()` no longer creates a `.copy` variable in the presence of duplicate column names. This makes it more consistent with the handling of non-unique specs. * `pivot_longer()` automatically disambiguates non-unique ouputs, which can occur when the input variables include some additional component that you don't care about and want to discard (#792, #793). ```{r} df <- tibble(id = 1:3, x_1 = 1:3, x_2 = 4:6) df %>% pivot_longer(-id, names_pattern = "(.)_.") df %>% pivot_longer(-id, names_sep = "_", names_to = c("name", NA)) df %>% pivot_longer(-id, names_sep = "_", names_to = c(".value", NA)) ``` * `pivot_wider()` gains a `names_sort` argument which allows you to sort column names in order. The default, `FALSE`, orders columns by their first appearance (#839). In a future version, I'll consider changing the default to `TRUE`. * `pivot_wider()` gains a `names_glue` argument that allows you to construct output column names with a glue specification. * `pivot_wider()` arguments `values_fn` and `values_fill` can now be single values; you now only need to use a named list if you want to use different values for different value columns (#739, #746). They also get improved errors if they're not of the expected type. ## Rectangling * `hoist()` now automatically names pluckers that are a single string (#837). It error if you use duplicated column names (@mgirlich, #834), and now uses `rlang::list2()` behind the scenes (which means that you can now use `!!!` and `:=`) (#801). * `unnest_longer()`, `unnest_wider()`, and `hoist()` do a better job simplifying list-cols. They no longer add unneeded `unspecified()` when the result is still a list (#806), and work when the list contains non-vectors (#810, #848). * `unnest_wider(names_sep = "")` now provides default names for unnamed inputs, suppressing the many previous name repair messages (#742). ## Nesting * `pack()` and `nest()` gains a `.names_sep` argument allows you to strip outer names from inner names, in symmetrical way to how the same argument to `unpack()` and `unnest()` combines inner and outer names (#795, #797). * `unnest_wider()` and `unnest_longer()` can now unnest `list_of` columns. This is important for unnesting columns created from `nest()` and with `pivot_wider()`, which will create `list_of` columns if the id columns are non-unique (#741). ## Bug fixes and minor improvements * `chop()` now creates list-columns of class `vctrs::list_of()`. This helps keep track of the type in case the chopped data frame is empty, allowing `unchop()` to reconstitute a data frame with the correct number and types of column even when there are no observations. * `drop_na()` now preserves attributes of unclassed vectors (#905). * `expand()`, `expand_grid()`, `crossing()`, and `nesting()` once again evaluate their inputs iteratively, so you can refer to freshly created columns, e.g. `crossing(x = seq(-2, 2), y = x)` (#820). * `expand()`, `expand_grid()`, `crossing()`, and `nesting()` gain a `.name_repair` giving you control over their name repair strategy (@jeffreypullin, #798). * `extract()` lets you use `NA` in `into`, as documented (#793). * `extract()`, `separate()`, `hoist()`, `unnest_longer()`, and `unnest_wider()` give a better error message if `col` is missing (#805). * `pack()`'s first argument is now `.data` instead of `data` (#759). * `pivot_longer()` now errors if `values_to` is not a length-1 character vector (#949). * `pivot_longer()` and `pivot_wider()` are now generic so implementations can be provided for objects other than data frames (#800). * `pivot_wider()` can now pivot data frame columns (#926) * `unite(na.rm = TRUE)` now works for all types of variable, not just character vectors (#765). * `unnest_wider()` gives a better error message if you attempt to unnest multiple columns (#740). * `unnest_auto()` works when the input data contains a column called `col` (#959). # tidyr 1.0.2 * Minor fixes for dev versions of rlang, tidyselect, and tibble. # tidyr 1.0.1 * Did not exist since I accidentally released v1.0.2 # tidyr 1.0.0 ## Breaking changes See `vignette("in-packages")` for a detailed transition guide. * `nest()` and `unnest()` have new syntax. The majority of existing usage should be automatically translated to the new syntax with a warning. If that doesn't work, put this in your script to use the old versions until you can take a closer look and update your code: ```r library(tidyr) nest <- nest_legacy unnest <- unnest_legacy ``` * `nest()` now preserves grouping, which has implications for downstream calls to group-aware functions, such as `dplyr::mutate()` and `filter()`. * The first argument of `nest()` has changed from `data` to `.data`. * `unnest()` uses the [emerging tidyverse standard](https://www.tidyverse.org/blog/2019/01/tibble-2.0.1/#name-repair) to disambiguate unique names. Use `names_repair = tidyr_legacy` to request the previous approach. * `unnest_()`/`nest_()` and the lazyeval methods for `unnest()`/`nest()` are now defunct. They have been deprecated for some time, and, since the interface has changed, package authors will need to update to avoid deprecation warnings. I think one clean break should be less work for everyone. All other lazyeval functions have been formally deprecated, and will be made defunct in the next major release. (See [lifecycle vignette](https://lifecycle.r-lib.org/articles/stages.html) for details on deprecation stages). * `crossing()` and `nesting()` now return 0-row outputs if any input is a length-0 vector. If you want to preserve the previous behaviour which silently dropped these inputs, you should convert empty vectors to `NULL`. (More discussion on this general pattern at https://github.com/tidyverse/principles/issues/24) ## Pivoting New `pivot_longer()` and `pivot_wider()` provide modern alternatives to `spread()` and `gather()`. They have been carefully redesigned to be easier to learn and remember, and include many new features. Learn more in `vignette("pivot")`. These functions resolve multiple existing issues with `spread()`/`gather()`. Both functions now handle mulitple value columns (#149/#150), support more vector types (#333), use tidyverse conventions for duplicated column names (#496, #478), and are symmetric (#453). `pivot_longer()` gracefully handles duplicated column names (#472), and can directly split column names into multiple variables. `pivot_wider()` can now aggregate (#474), select keys (#572), and has control over generated column names (#208). To demonstrate how these functions work in practice, tidyr has gained several new datasets: `relig_income`, `construction`, `billboard`, `us_rent_income`, `fish_encounters` and `world_bank_pop`. Finally, tidyr demos have been removed. They are dated, and have been superseded by `vignette("pivot")`. ## Rectangling tidyr contains four new functions to support **rectangling**, turning a deeply nested list into a tidy tibble: `unnest_longer()`, `unnest_wider()`, `unnest_auto()`, and `hoist()`. They are documented in a new vignette: `vignette("rectangle")`. `unnest_longer()` and `unnest_wider()` make it easier to unnest list-columns of vectors into either rows or columns (#418). `unnest_auto()` automatically picks between `_longer()` and `_wider()` using heuristics based on the presence of common names. New `hoist()` provides a convenient way of plucking components of a list-column out into their own top-level columns (#341). This is particularly useful when you are working with deeply nested JSON, because it provides a convenient shortcut for the `mutate()` + `map()` pattern: ```{r} df %>% hoist(metadata, name = "name") # shortcut for df %>% mutate(name = map_chr(metadata, "name")) ``` ## Nesting `nest()` and `unnest()` have been updated with new interfaces that are more closely aligned to evolving tidyverse conventions. They use the theory developed in [vctrs](https://vctrs.r-lib.org) to more consistently handle mixtures of input types, and their arguments have been overhauled based on the last few years of experience. They are supported by a new `vignette("nest")`, which outlines some of the main ideas of nested data (it's still very rough, but will get better over time). The biggest change is to their operation with multiple columns: `df %>% unnest(x, y, z)` becomes `df %>% unnest(c(x, y, z))` and `df %>% nest(x, y, z)` becomes `df %>% nest(data = c(x, y, z))`. I have done my best to ensure that common uses of `nest()` and `unnest()` will continue to work, generating an informative warning telling you precisely how you need to update your code. Please [file an issue](https://github.com/tidyverse/tidyr/issues/new) if I've missed an important use case. `unnest()` has been overhauled: * New `keep_empty` parameter ensures that every row in the input gets at least one row in the output, inserting missing values as needed (#358). * Provides `names_sep` argument to control how inner and outer column names are combined. * Uses standard tidyverse name-repair rules, so by default you will get an error if the output would contain multiple columns with the same name. You can override by using `name_repair` (#514). * Now supports `NULL` entries (#436). ## Packing and chopping Under the hood, `nest()` and `unnest()` are implemented with `chop()`, `pack()`, `unchop()`, and `unpack()`: * `pack()` and `unpack()` allow you to pack and unpack columns into data frame columns (#523). * `chop()` and `unchop()` chop up rows into sets of list-columns. Packing and chopping are interesting primarily because they are the atomic operations underlying nesting (and similarly, unchop and unpacking underlie unnesting), and I don't expect them to be used directly very often. ## New features * New `expand_grid()`, a tidy version of `expand.grid()`, is lower-level than the existing `expand()` and `crossing()` functions, as it takes individual vectors, and does not sort or uniquify them. * `crossing()`, `nesting()`, and `expand()` have been rewritten to use the vctrs package. This should not affect much existing code, but considerably simplies the implementation and ensures that these functions work consistently across all generalised vectors (#557). As part of this alignment, these functions now only drop `NULL` inputs, not any 0-length vector. ## Bug fixes and minor improvements * `full_seq()` now also works when gaps between observations are shorter than the given `period`, but are within the tolerance given by `tol`. Previously, gaps between consecutive observations had to be in the range [`period`, `period + tol`]; gaps can now be in the range [`period - tol`, `period + tol`] (@ha0ye, #657). * tidyr now re-exports `tibble()`, `as_tibble()`, and `tribble()`, as well as the tidyselect helpers (`starts_with()`, `ends_width()`, ...). This makes generating documentation, reprexes, and tests easier, and makes tidyr easier to use without also attaching dplyr. * All functions that take `...` have been instrumented with functions from the [ellipsis](https://github.com/r-lib/ellipsis/) package to warn if you've supplied arguments that are ignored (typically because you've misspelled an argument name) (#573). * `complete()` now uses `full_join()` so that all levels are preserved even when not all levels are specified (@Ryo-N7, #493). * `crossing()` now takes the unique values of data frame inputs, not just vector inputs (#490). * `gather()` throws an error if a column is a data frame (#553). * `extract()` (and hence `pivot_longer()`) can extract multiple input values into a single output column (#619). * `fill()` is now implemented using `dplyr::mutate_at()`. This radically simplifies the implementation and considerably improves performance when working with grouped data (#520). * `fill()` now accepts `downup` and `updown` as fill directions (@coolbutuseless, #505). * `unite()` gains `na.rm` argument, making it easier to remove missing values prior to uniting values together (#203) # tidyr 0.8.3 * `crossing()` preserves factor levels (#410), now works with list-columns (#446, @SamanthaToet). (These also help `expand()` which is built on top of `crossing()`) * `nest()` is compatible with dplyr 0.8.0. * `spread()` works when the id variable has names (#525). * `unnest()` preserves column being unnested when input is zero-length (#483), using `list_of()` attribute to correctly restore columns, where possible. * `unnest()` will run with named and unnamed list-columns of same length (@hlendway, #460). # tidyr 0.8.2 * `separate()` now accepts `NA` as a column name in the `into` argument to denote columns which are omitted from the result. (@markdly, #397). * Minor updates to ensure compatibility with dependencies. # tidyr 0.8.1 * `unnest()` weakens test of "atomicity" to restore previous behaviour when unnesting factors and dates (#407). # tidyr 0.8.0 ## Breaking changes * There are no deliberate breaking changes in this release. However, a number of packages are failing with errors related to numbers of elements in columns, and row names. It is possible that these are accidental API changes or new bugs. If you see such an error in your package, I would sincerely appreciate a minimal reprex. * `separate()` now correctly uses -1 to refer to the far right position, instead of -2. If you depended on this behaviour, you'll need to switch on `packageVersion("tidyr") > "0.7.2"` ## New features * Increased test coverage from 84% to 99%. * `uncount()` performs the inverse operation of `dplyr::count()` (#279) ## Bug fixes and minor improvements * `complete(data)` now returns `data` rather than throwing an error (#390). `complete()` with zero-length completions returns original input (#331). * `crossing()` preserves `NA`s (#364). * `expand()` with empty input gives empty data frame instead of `NULL` (#331). * `expand()`, `crossing()`, and `complete()` now complete empty factors instead of dropping them (#270, #285) * `extract()` has a better error message if `regex` does not contain the expected number of groups (#313). * `drop_na()` no longer drops columns (@jennybryan, #245), and works with list-cols (#280). Equivalent of `NA` in a list column is any empty (length 0) data structure. * `nest()` is now faster, especially when a long data frame is collapsed into a nested data frame with few rows. * `nest()` on a zero-row data frame works as expected (#320). * `replace_na()` no longer complains if you try and replace missing values in variables not present in the data (#356). * `replace_na()` now also works with vectors (#342, @flying-sheep), and can replace `NULL` in list-columns. It throws a better error message if you attempt to replace with something other than length 1. * `separate()` no longer checks that `...` is empty, allowing methods to make use of it. This check was added in tidyr 0.4.0 (2016-02-02) to deprecate previous behaviour where `...` was passed to `strsplit()`. * `separate()` and `extract()` now insert columns in correct position when `drop = TRUE` (#394). * `separate()` now works correctly counts from RHS when using negative integer `sep` values (@markdly, #315). * `separate()` gets improved warning message when pieces aren't as expected (#375). * `separate_rows()` supports list columns (#321), and works with empty tibbles. * `spread()` now consistently returns 0 row outputs for 0 row inputs (#269). * `spread()` now works when `key` column includes `NA` and `drop` is `FALSE` (#254). * `spread()` no longer returns tibbles with row names (#322). * `spread()`, `separate()`, `extract()` (#255), and `gather()` (#347) now replace existing variables rather than creating an invalid data frame with duplicated variable names (matching the semantics of mutate). * `unite()` now works (as documented) if you don't supply any variables (#355). * `unnest()` gains `preserve` argument which allows you to preserve list columns without unnesting them (#328). * `unnest()` can unnested list-columns contains lists of lists (#278). * `unnest(df)` now works if `df` contains no list-cols (#344) # tidyr 0.7.2 * The SE variants `gather_()`, `spread_()` and `nest_()` now treat non-syntactic names in the same way as pre tidy eval versions of tidyr (#361). * Fix tidyr bug revealed by R-devel. # tidyr 0.7.1 This is a hotfix release to account for some tidyselect changes in the unit tests. Note that the upcoming version of tidyselect backtracks on some of the changes announced for 0.7.0. The special evaluation semantics for selection have been changed back to the old behaviour because the new rules were causing too much trouble and confusion. From now on data expressions (symbols and calls to `:` and `c()`) can refer to both registered variables and to objects from the context. However the semantics for context expressions (any calls other than to `:` and `c()`) remain the same. Those expressions are evaluated in the context only and cannot refer to registered variables. If you're writing functions and refer to contextual objects, it is still a good idea to avoid data expressions by following the advice of the 0.7.0 release notes. # tidyr 0.7.0 This release includes important changes to tidyr internals. Tidyr now supports the new tidy evaluation framework for quoting (NSE) functions. It also uses the new tidyselect package as selecting backend. ## Breaking changes - If you see error messages about objects or functions not found, it is likely because the selecting functions are now stricter in their arguments An example of selecting function is `gather()` and its `...` argument. This change makes the code more robust by disallowing ambiguous scoping. Consider the following code: ``` x <- 3 df <- tibble(w = 1, x = 2, y = 3) gather(df, "variable", "value", 1:x) ``` Does it select the first three columns (using the `x` defined in the global environment), or does it select the first two columns (using the column named `x`)? To solve this ambiguity, we now make a strict distinction between data and context expressions. A data expression is either a bare name or an expression like `x:y` or `c(x, y)`. In a data expression, you can only refer to columns from the data frame. Everything else is a context expression in which you can only refer to objects that you have defined with `<-`. In practice this means that you can no longer refer to contextual objects like this: ``` mtcars %>% gather(var, value, 1:ncol(mtcars)) x <- 3 mtcars %>% gather(var, value, 1:x) mtcars %>% gather(var, value, -(1:x)) ``` You now have to be explicit about where to find objects. To do so, you can use the quasiquotation operator `!!` which will evaluate its argument early and inline the result: ```{r} mtcars %>% gather(var, value, !! 1:ncol(mtcars)) mtcars %>% gather(var, value, !! 1:x) mtcars %>% gather(var, value, !! -(1:x)) ``` An alternative is to turn your data expression into a context expression by using `seq()` or `seq_len()` instead of `:`. See the section on tidyselect for more information about these semantics. - Following the switch to tidy evaluation, you might see warnings about the "variable context not set". This is most likely caused by supplying helpers like `everything()` to underscored versions of tidyr verbs. Helpers should be always be evaluated lazily. To fix this, just quote the helper with a formula: `drop_na(df, ~everything())`. - The selecting functions are now stricter when you supply integer positions. If you see an error along the lines of ``` `-0.949999999999999`, `-0.940000000000001`, ... must resolve to integer column positions, not a double vector ``` please round the positions before supplying them to tidyr. Double vectors are fine as long as they are rounded. ## Switch to tidy evaluation tidyr is now a tidy evaluation grammar. See the [programming vignette](https://dplyr.tidyverse.org/articles/programming.html) in dplyr for practical information about tidy evaluation. The tidyr port is a bit special. While the philosophy of tidy evaluation is that R code should refer to real objects (from the data frame or from the context), we had to make some exceptions to this rule for tidyr. The reason is that several functions accept bare symbols to specify the names of _new_ columns to create (`gather()` being a prime example). This is not tidy because the symbol do not represent any actual object. Our workaround is to capture these arguments using `rlang::quo_name()` (so they still support quasiquotation and you can unquote symbols or strings). This type of NSE is now discouraged in the tidyverse: symbols in R code should represent real objects. Following the switch to tidy eval the underscored variants are softly deprecated. However they will remain around for some time and without warning for backward compatibility. ## Switch to the tidyselect backend The selecting backend of dplyr has been extracted in a standalone package tidyselect which tidyr now uses for selecting variables. It is used for selecting multiple variables (in `drop_na()`) as well as single variables (the `col` argument of `extract()` and `separate()`, and the `key` and `value` arguments of `spread()`). This implies the following changes: * The arguments for selecting a single variable now support all features from `dplyr::pull()`. You can supply a name or a position, including negative positions. * Multiple variables are now selected a bit differently. We now make a strict distinction between data and context expressions. A data expression is either a bare name of an expression like `x:y` or `c(x, y)`. In a data expression, you can only refer to columns from the data frame. Everything else is a context expression in which you can only refer to objects that you have defined with `<-`. You can still refer to contextual objects in a data expression by being explicit. One way of being explicit is to unquote a variable from the environment with the tidy eval operator `!!`: ```r x <- 2 drop_na(df, 2) # Works fine drop_na(df, x) # Object 'x' not found drop_na(df, !! x) # Works as if you had supplied 2 ``` On the other hand, select helpers like `start_with()` are context expressions. It is therefore easy to refer to objects and they will never be ambiguous with data columns: ```{r} x <- "d" drop_na(df, starts_with(x)) ``` While these special rules is in contrast to most dplyr and tidyr verbs (where both the data and the context are in scope) they make sense for selecting functions and should provide more robust and helpful semantics. # tidyr 0.6.3 * Patch tests to be compatible with dev tibble # tidyr 0.6.2 * Register C functions * Added package docs * Patch tests to be compatible with dev dplyr. # tidyr 0.6.1 * Patch test to be compatible with dev tibble * Changed deprecation message of `extract_numeric()` to point to `readr::parse_number()` rather than `readr::parse_numeric()` # tidyr 0.6.0 ## API changes * `drop_na()` removes observations which have `NA` in the given variables. If no variables are given, all variables are considered (#194, @janschulz). * `extract_numeric()` has been deprecated (#213). * Renamed `table4` and `table5` to `table4a` and `table4b` to make their connection more clear. The `key` and `value` variables in `table2` have been renamed to `type` and `count`. ## Bug fixes and minor improvements * `expand()`, `crossing()`, and `nesting()` now silently drop zero-length inputs. * `crossing_()` and `nesting_()` are versions of `crossing()` and `nesting()` that take a list as input. * `full_seq()` works correctly for dates and date/times. # tidyr 0.5.1 * Restored compatibility with R < 3.3.0 by avoiding `getS3method(envir = )` (#205, @krlmlr). # tidyr 0.5.0 ## New functions * `separate_rows()` separates observations with multiple delimited values into separate rows (#69, @aaronwolen). ## Bug fixes and minor improvements * `complete()` preserves grouping created by dplyr (#168). * `expand()` (and hence `complete()`) preserves the ordered attribute of factors (#165). * `full_seq()` preserve attributes for dates and date/times (#156), and sequences no longer need to start at 0. * `gather()` can now gather together list columns (#175), and `gather_.data.frame(na.rm = TRUE)` now only removes missing values if they're actually present (#173). * `nest()` returns correct output if every variable is nested (#186). * `separate()` fills from right-to-left (not left-to-right!) when fill = "left" (#170, @dgrtwo). * `separate()` and `unite()` now automatically drop removed variables from grouping (#159, #177). * `spread()` gains a `sep` argument. If not-null, this will name columns as "keyvalue". Additionally, if sep is `NULL` missing values will be converted to `` (#68). * `spread()` works in the presence of list-columns (#199) * `unnest()` works with non-syntactic names (#190). * `unnest()` gains a `sep` argument. If non-null, this will rename the columns of nested data frames to include both the original column name, and the nested column name, separated by `.sep` (#184). * `unnest()` gains `.id` argument that works the same way as `bind_rows()`. This is useful if you have a named list of data frames or vectors (#125). * Moved in useful sample datasets from the DSR package. * Made compatible with both dplyr 0.4 and 0.5. * tidyr functions that create new columns are more aggressive about re-encoding the column names as UTF-8. # tidyr 0.4.1 * Fixed bug in `nest()` where nested data was ending up in the wrong row (#158). # tidyr 0.4.0 ## Nested data frames `nest()` and `unnest()` have been overhauled to support a useful way of structuring data frames: the __nested__ data frame. In a grouped data frame, you have one row per observation, and additional metadata define the groups. In a nested data frame, you have one __row__ per group, and the individual observations are stored in a column that is a list of data frames. This is a useful structure when you have lists of other objects (like models) with one element per group. * `nest()` now produces a single list of data frames called "data" rather than a list column for each variable. Nesting variables are not included in nested data frames. It also works with grouped data frames made by `dplyr::group_by()`. You can override the default column name with `.key`. * `unnest()` gains a `.drop` argument which controls what happens to other list columns. By default, they're kept if the output doesn't require row duplication; otherwise they're dropped. * `unnest()` now has `mutate()` semantics for `...` - this allows you to unnest transformed columns more easily. (Previously it used select semantics). ## Expanding * `expand()` once again allows you to evaluate arbitrary expressions like `full_seq(year)`. If you were previously using `c()` to created nested combinations, you'll now need to use `nesting()` (#85, #121). * `nesting()` and `crossing()` allow you to create nested and crossed data frames from individual vectors. `crossing()` is similar to `base::expand.grid()` * `full_seq(x, period)` creates the full sequence of values from `min(x)` to `max(x)` every `period` values. ## Minor bug fixes and improvements * `fill()` fills in `NULL`s in list-columns. * `fill()` gains a direction argument so that it can fill either upwards or downwards (#114). * `gather()` now stores the key column as character, by default. To revert to the previous behaviour of using a factor (which allows you to preserve the ordering of the columns), use `key_factor = TRUE` (#96). * All tidyr verbs do the right thing for grouped data frames created by `group_by()` (#122, #129, #81). * `seq_range()` has been removed. It was never used or announced. * `spread()` once again creates columns of mixed type when `convert = TRUE` (#118, @jennybc). `spread()` with `drop = FALSE` handles zero-length factors (#56). `spread()`ing a data frame with only key and value columns creates a one row output (#41). * `unite()` now removes old columns before adding new (#89, @krlmlr). * `separate()` now warns if defunct ... argument is used (#151, @krlmlr). # tidyr 0.3.1 * Fixed bug where attributes of non-gather columns were lost (#104) # tidyr 0.3.0 ## New features * New `complete()` provides a wrapper around `expand()`, `left_join()` and `replace_na()` for a common task: completing a data frame with missing combinations of variables. * `fill()` fills in missing values in a column with the last non-missing value (#4). * New `replace_na()` makes it easy to replace missing values with something meaningful for your data. * `nest()` is the complement of `unnest()` (#3). * `unnest()` can now work with multiple list-columns at the same time. If you don't supply any columns names, it will unlist all list-columns (#44). `unnest()` can also handle columns that are lists of data frames (#58). ## Bug fixes and minor improvements * tidyr no longer depends on reshape2. This should fix issues if you also try to load reshape (#88). * `%>%` is re-exported from magrittr. * `expand()` now supports nesting and crossing (see examples for details). This comes at the expense of creating new variables inline (#46). * `expand_` does SE evaluation correctly so you can pass it a character vector of columns names (or list of formulas etc) (#70). * `extract()` is 10x faster because it now uses stringi instead of base R regular expressions. It also returns NA instead of throwing an error if the regular expression doesn't match (#72). * `extract()` and `separate()` preserve character vectors when `convert` is TRUE (#99). * The internals of `spread()` have been rewritten, and now preserve all attributes of the input `value` column. This means that you can now spread date (#62) and factor (#35) inputs. * `spread()` gives a more informative error message if `key` or `value` don't exist in the input data (#36). * `separate()` only displays the first 20 failures (#50). It has finer control over what happens if there are two few matches: you can fill with missing values on either the "left" or the "right" (#49). `separate()` no longer throws an error if the number of pieces aren't as expected - instead it uses drops extra values and fills on the right and gives a warning. * If the input is NA `separate()` and `extract()` both return silently return NA outputs, rather than throwing an error. (#77) * Experimental `unnest()` method for lists has been removed. # tidyr 0.2.0 ## New functions * Experimental `expand()` function (#21). * Experiment `unnest()` function for converting named lists into data frames. (#3, #22) ## Bug fixes and minor improvements * `extract_numeric()` preserves negative signs (#20). * `gather()` has better defaults if `key` and `value` are not supplied. If `...` is omitted, `gather()` selects all columns (#28). Performance is now comparable to `reshape2::melt()` (#18). * `separate()` gains `extra` argument which lets you control what happens to extra pieces. The default is to throw an "error", but you can also "merge" or "drop". * `spread()` gains `drop` argument, which allows you to preserve missing factor levels (#25). It converts factor value variables to character vectors, instead of embedding a matrix inside the data frame (#35). tidyr/MD50000644000176200001440000002770414554221641011733 0ustar liggesusers3ad2dc67e354ecffe2827e5705353c7f *DESCRIPTION 8ab1fac1816676ee4ca51609142cacda *LICENSE d139351f87716aba649f7fa3ddde41c7 *NAMESPACE c60e1eb4edbda781b1a72dcaa23ba34e *NEWS.md bbcdcba5ff3cd41f8af54f8363eb08b5 *R/append.R b6f8cfb26b2a569aceeca86c70adb25f *R/chop.R 8e1d537f1356b68f8864130d1ebb72ab *R/compat-lazyeval.R 07c06e6be0443b7d5b9094f11daa406f *R/compat-obj-type.R e39d35d817dec590fcb8b9a2715ba226 *R/compat-types-check.R bc0bc6ec3f2eae8eb0c2628c8931b137 *R/complete.R 657c773ef60163a1db7321543f8b2b1b *R/cpp11.R 1d0c7b7c80e7dd7d48de7b770a769c56 *R/data.R c89e18ec23c33ed2e334f4b07202d20b *R/dep-extract.R bd2b950ed80ae83b47efc4ebf6a5a9d5 *R/dep-lazyeval.R 5cc5f64e959c3e6cb10a79bdcb8759cc *R/doc-params.R 4e96b593f39ed5a1b61255fe16b5ae09 *R/drop-na.R 66bb6136f676acf7a01c4a353f6cfad4 *R/expand.R 75d9e883bf64e4ab9a2d5c323839aafa *R/extract.R 10e0fc10fa3dec17938f8ca68e31769e *R/fill.R 1b85b6f3a4d71e738b0990e4459885a2 *R/gather.R 0266ecc93c40f362ec6413661964f687 *R/hoist.R 150ab4e171183c10c873eb958ea1ef63 *R/id.R 2657fc5fc00618430a06d3d93a8d6f5c *R/nest-legacy.R 87833969584b822c2d46a1aec5c67285 *R/nest.R bb3e30e845ee113f6e6ae00ea29562fd *R/pack.R 9f79aac07ad5ad8736e20fed629bbd9e *R/pivot-long.R 8dbf78cf960e03888dbcd84bda3e228c *R/pivot-wide.R fe2a8943bce71dc634db74d8421bb3ed *R/pivot.R b79c2e86c789dd992e46e264b4283564 *R/replace_na.R 14d00a807f9a0a894adb9ef5d9c3e150 *R/separate-longer.R 1532b72bc211e7714b5554b9dfc5d699 *R/separate-rows.R efb3a91b2d5238d6a0756d469d8bb2c0 *R/separate-wider.R 5ed50a52656089aa3541285350d0dc5c *R/separate.R ca381e66ba3f729543e980503e11da9b *R/seq.R f72ef95433945175b43705a3844a6a7f *R/spread.R 2a8877a9da353e47e8d98aa25f208861 *R/tidyr.R 90315d66d28c63f4a23b6bf730172292 *R/uncount.R 87182963348ec47129a932e68ff42064 *R/unite.R 3643861c9bbc296139ab33a4c614abb6 *R/unnest-auto.R 2a39d36175e25501c21b331c4ca8f914 *R/unnest-helper.R 64226e9cbb2f9f9016a443caee23c2cd *R/unnest-longer.R a58a132d6d6192ea310e3037ea3aa26f *R/unnest-wider.R 96dd4a1f8841070eb50ab41b56a1adfc *R/unnest.R ff5a46dc75c86794a4fbae26d8707b48 *R/utils.R 25982996252cf9606bb5184a539bf630 *R/zzz.R 2b2bc635e70438c8a58e40fabf479ea0 *README.md 421342b1b315edf7f21bc71c245425e1 *build/vignette.rds d48e018d03266aff2a6712acae6e3bab *data/billboard.rda 59c4ece444461b8f6cbeff4b91185f89 *data/cms_patient_care.rda c97197f57341f7f8ba334bf0ae644880 *data/cms_patient_experience.rda 13ee0eb272050358d8d0b8863c6ff5e9 *data/construction.rda 29effe2a0f7104454542f762ba194e51 *data/fish_encounters.rda d6533eb20cc039d3eff1d31ce1f980d7 *data/household.rda 462db407803698b4924c9a7e9d14a112 *data/population.rda ec487685eedc5ab42a2512491e2684c2 *data/relig_income.rda 09351c2b78ab1238f00e52e01320cf51 *data/smiths.rda 051effc1a51def015df5fd2ad690dfae *data/table1.rda 1e6a73e3033352b35060b3b784b65061 *data/table2.rda 65d7bea1c93aa039c648a1d35fdcc49e *data/table3.rda 159d9fa723bf91b0b30d73fca1acd78b *data/table4a.rda b42f324c3fb4796777cd31573f83f1b3 *data/table4b.rda a31a10b62ee207df303a126c05fe98fd *data/table5.rda 5188aafac9e311e64eae6d2de842f0ca *data/us_rent_income.rda 1ee217c660cc234d2f37de4fcaf34eb7 *data/who.rda a548e94a8049726224f57e648d0124b1 *data/who2.rda 721ee6f256a1148522eadc9c825e8425 *data/world_bank_pop.rda 5d138afc7419fa2a4e103dfa767e6cc0 *inst/doc/in-packages.R a39be9e4fd95a116283859dc70afdd78 *inst/doc/in-packages.Rmd 327860f3031a15709a7f5cc0d759e679 *inst/doc/in-packages.html 125d4207bf04a87ad341aa2e9eb72067 *inst/doc/nest.R 6161ad5864fafdeea47e6b09f6e50532 *inst/doc/nest.Rmd 38866efed06b0222fd7e7ff41c3baf80 *inst/doc/nest.html aea3c6b28cd002fa9e8c4cc7ebae6eda *inst/doc/pivot.R 195e7517db19652a85623eeb74660056 *inst/doc/pivot.Rmd 47375fc56601cf804e6c6249d1364a09 *inst/doc/pivot.html 52c7eb8b5ac20457e89a439084b41617 *inst/doc/programming.R 9f5e406315affe10edf627ad79373797 *inst/doc/programming.Rmd dacf5554ca60737ec85cbb28f39266ca *inst/doc/programming.html 8a3d737f9d24a84e847b6e5b5925d2ea *inst/doc/rectangle.R fd8bd5e51ac26a4cf8e212528f32a701 *inst/doc/rectangle.Rmd 11c9499beac0bbc3bcf92e87fd0e69fa *inst/doc/rectangle.html 49b3192964c4b205db42a9dd10fb5fd8 *inst/doc/tidy-data.R 1d25d674f25be4e450d8dfc9c3065835 *inst/doc/tidy-data.Rmd 0ba4597eded9d83167cba84925222fd7 *inst/doc/tidy-data.html ca14baa05fc50140b428f09cb1848004 *man/billboard.Rd 031b8549d351f6d52cb049b5427795a3 *man/check_pivot_spec.Rd b1e5df0644e8072c4730f993d26043d0 *man/chop.Rd 60eb4476603f95a3cbbb1b655c16a868 *man/cms_patient_experience.Rd 61b60486e59f8d9f02b89eb9e3f99a8a *man/complete.Rd 3a534d46134cd72889e500e70deeac53 *man/construction.Rd 9e456d4934a1906f0d4cfe39198b397b *man/deprecated-se.Rd 05ce01009eac73ae6211d75c106ea1e2 *man/drop_na.Rd 0263c88ebc1c2a56f301ef11653285d6 *man/expand.Rd 22282dd4881335d82654a5eaf61d7ecf *man/expand_grid.Rd afcfe5c8b58ab61a173a916d05a86fc2 *man/extract.Rd 602508d0d844a2293cb4f980619d9da9 *man/extract_numeric.Rd cb1e46f469cfbbbde29c8b5113e1d789 *man/figures/lifecycle-archived.svg c0d2e5a54f1fa4ff02bf9533079dd1f7 *man/figures/lifecycle-defunct.svg a1b8c987c676c16af790f563f96cbb1f *man/figures/lifecycle-deprecated.svg c3978703d8f40f2679795335715e98f4 *man/figures/lifecycle-experimental.svg 952b59dc07b171b97d5d982924244f61 *man/figures/lifecycle-maturing.svg 27b879bf3677ea76e3991d56ab324081 *man/figures/lifecycle-questioning.svg 6902bbfaf963fbc4ed98b86bda80caa2 *man/figures/lifecycle-soft-deprecated.svg 53b3f893324260b737b3c46ed2a0e643 *man/figures/lifecycle-stable.svg 1c1fe7a759b86dc6dbcbe7797ab8246c *man/figures/lifecycle-superseded.svg 81baba887ee2226360f3bac4f99c6679 *man/figures/logo.png 968c87d5b2447981d3f03e6c582132c0 *man/fill.Rd 4a30112130793bf7d723ea481e223120 *man/fish_encounters.Rd 2ce354fda112b944beb91a8c27d989db *man/full_seq.Rd 79f8039d65ea18932f5bc2350e6226a4 *man/gather.Rd 4c0554d4d80bad56522c8f566ae76576 *man/hoist.Rd d8a85c7893c746deeca02c8e84420bf7 *man/household.Rd cf11849330a89199b7a0067fb34e9a9f *man/nest.Rd aef95c6be7d4d65d8e41485fbd62c67c *man/nest_legacy.Rd 6f7187f590fa8bad07da98989a6f8d27 *man/pack.Rd 0f020b37daf27c2fd4c78c574285ef1b *man/pipe.Rd d1d1ba0956f4b92541ca13c3db96f010 *man/pivot_longer.Rd 76c4fd19bc98de183a4f7e1c2bedb0ea *man/pivot_longer_spec.Rd ce80836ea9f4852ad36868b6f0c2c5fc *man/pivot_wider.Rd c03ad8c4835d209c8922c9d329f2c22a *man/pivot_wider_spec.Rd 8fd438a3972769287c2e2222e7c03ad0 *man/reexports.Rd 999faf582af874e95712af131f43b049 *man/relig_income.Rd 5efe4043e8b4e78a0703b421b18207ec *man/replace_na.Rd 571d10493d135be51cfe1d7fd132c9a9 *man/rmd/overview.Rmd 025d5d6728dbf6099953dcf1e43a9f48 *man/separate.Rd afd5da4c3774ff01b9c74515ae846c9d *man/separate_longer_delim.Rd 1fd5d81dea84425828e6ba2a959f86fc *man/separate_rows.Rd 23e520cd86b4387f0b740176eca2ade2 *man/separate_wider_delim.Rd da512183f68d91224f98842f41a5ed3a *man/smiths.Rd 707b31d2730c19447d947810ef86bd71 *man/spread.Rd 42f2ef012ee612fcb32dc9d37595595b *man/table1.Rd 3ee8658ea79120abbdf10c15b911ea01 *man/tidyr-package.Rd b17eebd8679bcc19242aaffb22aa7b93 *man/tidyr_data_masking.Rd c82530f3976b961bda1f9d77fb2f634c *man/tidyr_legacy.Rd 0eb623c67057994643377d04db53358a *man/tidyr_tidy_select.Rd d18313e8cea80d4f7663f301155c3f1a *man/uncount.Rd 748870f5a0755de7a1c6aa4158c9d444 *man/unite.Rd 2d3d45eca11615a846dd2fbfafb64472 *man/unnest.Rd 1e99f8c051f600730d54310acfb95e91 *man/unnest_auto.Rd 9217a554e3682101e33086f318edff32 *man/unnest_longer.Rd 1857822727f852fc2bdb0530f395b46b *man/unnest_wider.Rd 20be585e8f3ec47d3611f153786f7969 *man/us_rent_income.Rd 5ea3a51c9a3a171b145fec3a16861a36 *man/who.Rd 279fe923273623e597c445e52137cfdc *man/world_bank_pop.Rd 1cb63e96e3be857861463c03677644df *src/cpp11.cpp 57689504a86332e5b40475105e93e62d *src/melt.cpp 404d58a78f1973dc8139f21d486bc9d5 *src/simplifyPieces.cpp 14fd04cc33329083bbe4c25bdd2f0531 *tests/testthat.R 21e1ecadf5a8a5ec7b9225cfa47cf9c5 *tests/testthat/_snaps/append.md 16071617dbac9be8a6d68214449fb8a8 *tests/testthat/_snaps/chop.md d6fe7bc524da1566e77179e40a54be60 *tests/testthat/_snaps/complete.md de9c8f15bab5402d07f7ac79c086e338 *tests/testthat/_snaps/drop-na.md 4cee33fd94c4c640db54d7b61fb08e7b *tests/testthat/_snaps/expand.md dfd64c7aa677ab330f4a9f62be2712fd *tests/testthat/_snaps/extract.md 0f9b01f2bd9b25c52279ec8715e11eae *tests/testthat/_snaps/fill.md c47f0d6faac8346adbd9065b16169b25 *tests/testthat/_snaps/gather.md 5a39c3fdb1e214106e0fd15a4887d3fa *tests/testthat/_snaps/hoist.md 2b0fb94578f239c38ad61bf07e6c9299 *tests/testthat/_snaps/nest-legacy.md 8f865c3818456d1606e04129bac8ef80 *tests/testthat/_snaps/nest.md bb24fc065c193e6ba68d29b48da55149 *tests/testthat/_snaps/pack.md 2753a1f23c80535da292fc26f86e3b7d *tests/testthat/_snaps/pivot-long.md 8e9bc658dc5e9a59853113e507a253c4 *tests/testthat/_snaps/pivot-wide.md 9c148bfc0f5992b4b3b9703d9b444efe *tests/testthat/_snaps/pivot.md 1b819b140b04241c4208ba31b6b55134 *tests/testthat/_snaps/replace_na.md b5523e6b679750f8f5a45442b2667b7d *tests/testthat/_snaps/separate-longer.md 60ec7499b36019168c1156f262daa1f2 *tests/testthat/_snaps/separate-rows.md 84fb935b5dc94a421a18cb7b9b521eeb *tests/testthat/_snaps/separate-wider.md 0a5e651e2b63d272f70c3fd7d32274f9 *tests/testthat/_snaps/separate.md 773e3c55d7276a145bda5a787d8b78b3 *tests/testthat/_snaps/seq.md bca24be555073c9c08f3c8e8a8ff14b8 *tests/testthat/_snaps/spread.md 17199c2c2b0fe4434b89dd47e79450fc *tests/testthat/_snaps/uncount.md a617bc3c401ed7562b6ee1e602573633 *tests/testthat/_snaps/unite.md 3c449a3de719985f4e4993218ebc19f4 *tests/testthat/_snaps/unnest-helper.md bf825b73e59726db666890454b5aef62 *tests/testthat/_snaps/unnest-longer.md c6537b995d0bed5f5ce806bde0654354 *tests/testthat/_snaps/unnest-wider.md 21670b12c9a5bec1835d08aa2a3fa7ca *tests/testthat/_snaps/unnest.md 94a4c9f9570bd577ed08f4ccf781fcaa *tests/testthat/test-append.R 21ac814b379c10aad40e905c6439d5c6 *tests/testthat/test-chop.R 475638b40090c5cfda004a89ac475d82 *tests/testthat/test-complete.R 8103cb0d902a6b5a8e554bd4cd615203 *tests/testthat/test-drop-na.R 63ff87953e6d1783c303fc661c8a5ce4 *tests/testthat/test-expand.R 9f385a80fe67a3ca92dd30f49c949f3c *tests/testthat/test-extract.R f5c82fbead2a57c031717cd1a03d4474 *tests/testthat/test-fill.R 2445c5e01d44b7a9c274fd0994764e7f *tests/testthat/test-gather.R ca8ba5d005dafff6b4a77686a98db9f6 *tests/testthat/test-hoist.R 5808d16318a3425db634f7332e5e9008 *tests/testthat/test-id.R 4117781ee180bbf159f3648fef77d04b *tests/testthat/test-nest-legacy.R c504dc458e62ff854f5efa22535298d3 *tests/testthat/test-nest.R 683821043a66e8c3ca94990f74836909 *tests/testthat/test-pack.R 6333764e1c0a416a4c6aa00f3e2819f5 *tests/testthat/test-pivot-long.R 82a4dacc0ba53e7b9badeee44718e642 *tests/testthat/test-pivot-wide.R 975ebbc2f05b0d630315a8314f963d60 *tests/testthat/test-pivot.R 7116edf6181b28cf7ebbe247bcf1f6fd *tests/testthat/test-replace_na.R bdbb17175d59a97b470826157a2b851b *tests/testthat/test-separate-longer.R bd1f2b6006b72d3e56512b149fc1e3eb *tests/testthat/test-separate-rows.R afd4fec8d4a53568400d1dd2ba179def *tests/testthat/test-separate-wider.R 3a5b420c45b04d1093c2bc20673521d6 *tests/testthat/test-separate.R 2809261765f99c39ae868bc7deae2e22 *tests/testthat/test-seq.R 990599c14183ac4809db4e598e9da93b *tests/testthat/test-spread.R 13949782218f25512f0123d113c8560f *tests/testthat/test-uncount.R 570705678b16296d21bf32508f18447b *tests/testthat/test-unite.R 7985a54249f3764495fe5e65615ab66f *tests/testthat/test-unnest-auto.R 96548a18ef7a12e68851a83fe9fbbb25 *tests/testthat/test-unnest-helper.R b0c15426aea1071466bae65eac6fd945 *tests/testthat/test-unnest-longer.R b4df0a77e657b67d5813852068572d7a *tests/testthat/test-unnest-wider.R f931b58f27041b6bf49454132e9eba58 *tests/testthat/test-unnest.R fed49c03d4b835180154232cfc5c8ce2 *tests/testthat/test-utils.R 1efc5e08dc706a91fa53a3ff93e98948 *vignettes/classroom.csv e6922f00d32500d44918a80b8a42511f *vignettes/classroom2.csv a39be9e4fd95a116283859dc70afdd78 *vignettes/in-packages.Rmd 6161ad5864fafdeea47e6b09f6e50532 *vignettes/nest.Rmd 195e7517db19652a85623eeb74660056 *vignettes/pivot.Rmd 9f5e406315affe10edf627ad79373797 *vignettes/programming.Rmd fd8bd5e51ac26a4cf8e212528f32a701 *vignettes/rectangle.Rmd 6144ebd1068581258c02ed88fff198c3 *vignettes/tb.csv 1d25d674f25be4e450d8dfc9c3065835 *vignettes/tidy-data.Rmd f85f432d796495a2df1fedfcbd15ad7d *vignettes/weather.csv tidyr/inst/0000755000176200001440000000000014553746313012375 5ustar liggesuserstidyr/inst/doc/0000755000176200001440000000000014553746313013142 5ustar liggesuserstidyr/inst/doc/nest.html0000644000176200001440000005145214553746310015005 0ustar liggesusers Nested data

Nested data

library(tidyr)
library(dplyr)
library(purrr)

Basics

A nested data frame is a data frame where one (or more) columns is a list of data frames. You can create simple nested data frames by hand:

df1 <- tibble(
  g = c(1, 2, 3),
  data = list(
    tibble(x = 1, y = 2),
    tibble(x = 4:5, y = 6:7),
    tibble(x = 10)
  )
)

df1
#> # A tibble: 3 × 2
#>       g data            
#>   <dbl> <list>          
#> 1     1 <tibble [1 × 2]>
#> 2     2 <tibble [2 × 2]>
#> 3     3 <tibble [1 × 1]>

(It is possible to create list-columns in regular data frames, not just in tibbles, but it’s considerably more work because the default behaviour of data.frame() is to treat lists as lists of columns.)

But more commonly you’ll create them with tidyr::nest():

df2 <- tribble(
  ~g, ~x, ~y,
   1,  1,  2,
   2,  4,  6,
   2,  5,  7,
   3, 10,  NA
)
df2 %>% nest(data = c(x, y))
#> # A tibble: 3 × 2
#>       g data            
#>   <dbl> <list>          
#> 1     1 <tibble [1 × 2]>
#> 2     2 <tibble [2 × 2]>
#> 3     3 <tibble [1 × 2]>

nest() specifies which variables should be nested inside; an alternative is to use dplyr::group_by() to describe which variables should be kept outside.

df2 %>% group_by(g) %>% nest()
#> # A tibble: 3 × 2
#> # Groups:   g [3]
#>       g data            
#>   <dbl> <list>          
#> 1     1 <tibble [1 × 2]>
#> 2     2 <tibble [2 × 2]>
#> 3     3 <tibble [1 × 2]>

I think nesting is easiest to understand in connection to grouped data: each row in the output corresponds to one group in the input. We’ll see shortly this is particularly convenient when you have other per-group objects.

The opposite of nest() is unnest(). You give it the name of a list-column containing data frames, and it row-binds the data frames together, repeating the outer columns the right number of times to line up.

df1 %>% unnest(data)
#> # A tibble: 4 × 3
#>       g     x     y
#>   <dbl> <dbl> <dbl>
#> 1     1     1     2
#> 2     2     4     6
#> 3     2     5     7
#> 4     3    10    NA

Nested data and models

Nested data is a great fit for problems where you have one of something for each group. A common place this arises is when you’re fitting multiple models.

mtcars_nested <- mtcars %>% 
  group_by(cyl) %>% 
  nest()

mtcars_nested
#> # A tibble: 3 × 2
#> # Groups:   cyl [3]
#>     cyl data              
#>   <dbl> <list>            
#> 1     6 <tibble [7 × 10]> 
#> 2     4 <tibble [11 × 10]>
#> 3     8 <tibble [14 × 10]>

Once you have a list of data frames, it’s very natural to produce a list of models:

mtcars_nested <- mtcars_nested %>% 
  mutate(model = map(data, function(df) lm(mpg ~ wt, data = df)))
mtcars_nested
#> # A tibble: 3 × 3
#> # Groups:   cyl [3]
#>     cyl data               model 
#>   <dbl> <list>             <list>
#> 1     6 <tibble [7 × 10]>  <lm>  
#> 2     4 <tibble [11 × 10]> <lm>  
#> 3     8 <tibble [14 × 10]> <lm>

And then you could even produce a list of predictions:

mtcars_nested <- mtcars_nested %>% 
  mutate(model = map(model, predict))
mtcars_nested  
#> # A tibble: 3 × 3
#> # Groups:   cyl [3]
#>     cyl data               model     
#>   <dbl> <list>             <list>    
#> 1     6 <tibble [7 × 10]>  <dbl [7]> 
#> 2     4 <tibble [11 × 10]> <dbl [11]>
#> 3     8 <tibble [14 × 10]> <dbl [14]>

This workflow works particularly well in conjunction with broom, which makes it easy to turn models into tidy data frames which can then be unnest()ed to get back to flat data frames. You can see a bigger example in the broom and dplyr vignette.

tidyr/inst/doc/tidy-data.Rmd0000644000176200001440000005352314553563421015473 0ustar liggesusers--- title: "Tidy data" output: rmarkdown::html_vignette description: | A tidy dataset has variables in columns, observations in rows, and one value in each cell. This vignette introduces the theory of "tidy data" and shows you how it saves you time during data analysis. vignette: > %\VignetteIndexEntry{Tidy data} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, echo = FALSE} knitr::opts_chunk$set(collapse = TRUE, comment = "#>") set.seed(1014) options(dplyr.print_max = 10) ``` (This is an informal and code heavy version of the full [tidy data paper](https://vita.had.co.nz/papers/tidy-data.html). Please refer to that for more details.) ## Data tidying It is often said that 80% of data analysis is spent on the cleaning and preparing data. And it's not just a first step, but it must be repeated many times over the course of analysis as new problems come to light or new data is collected. To get a handle on the problem, this paper focuses on a small, but important, aspect of data cleaning that I call data **tidying**: structuring datasets to facilitate analysis. The principles of tidy data provide a standard way to organise data values within a dataset. A standard makes initial data cleaning easier because you don't need to start from scratch and reinvent the wheel every time. The tidy data standard has been designed to facilitate initial exploration and analysis of the data, and to simplify the development of data analysis tools that work well together. Current tools often require translation. You have to spend time munging the output from one tool so you can input it into another. Tidy datasets and tidy tools work hand in hand to make data analysis easier, allowing you to focus on the interesting domain problem, not on the uninteresting logistics of data. ## Defining tidy data {#defining} > Happy families are all alike; every unhappy family is unhappy in its own way > --- Leo Tolstoy Like families, tidy datasets are all alike but every messy dataset is messy in its own way. Tidy datasets provide a standardized way to link the structure of a dataset (its physical layout) with its semantics (its meaning). In this section, I'll provide some standard vocabulary for describing the structure and semantics of a dataset, and then use those definitions to define tidy data. ### Data structure Most statistical datasets are data frames made up of **rows** and **columns**. The columns are almost always labeled and the rows are sometimes labeled. The following code provides some data about an imaginary classroom in a format commonly seen in the wild. The table has three columns and four rows, and both rows and columns are labeled. ```{r} library(tibble) classroom <- tribble( ~name, ~quiz1, ~quiz2, ~test1, "Billy", NA, "D", "C", "Suzy", "F", NA, NA, "Lionel", "B", "C", "B", "Jenny", "A", "A", "B" ) classroom ``` There are many ways to structure the same underlying data. The following table shows the same data as above, but the rows and columns have been transposed. ```{r} tribble( ~assessment, ~Billy, ~Suzy, ~Lionel, ~Jenny, "quiz1", NA, "F", "B", "A", "quiz2", "D", NA, "C", "A", "test1", "C", NA, "B", "B" ) ``` The data is the same, but the layout is different. Our vocabulary of rows and columns is simply not rich enough to describe why the two tables represent the same data. In addition to appearance, we need a way to describe the underlying semantics, or meaning, of the values displayed in the table. ### Data semantics A dataset is a collection of **values**, usually either numbers (if quantitative) or strings (if qualitative). Values are organised in two ways. Every value belongs to a **variable** and an **observation**. A variable contains all values that measure the same underlying attribute (like height, temperature, duration) across units. An observation contains all values measured on the same unit (like a person, or a day, or a race) across attributes. A tidy version of the classroom data looks like this: (you'll learn how the functions work a little later) ```{r setup, message = FALSE} library(tidyr) library(dplyr) ``` ```{r} classroom2 <- classroom %>% pivot_longer(quiz1:test1, names_to = "assessment", values_to = "grade") %>% arrange(name, assessment) classroom2 ``` This makes the values, variables, and observations more clear. The dataset contains 36 values representing three variables and 12 observations. The variables are: 1. `name`, with four possible values (Billy, Suzy, Lionel, and Jenny). 2. `assessment`, with three possible values (quiz1, quiz2, and test1). 3. `grade`, with five or six values depending on how you think of the missing value (`r sort(unique(classroom2$grade), na.last = TRUE)`). The tidy data frame explicitly tells us the definition of an observation. In this classroom, every combination of `name` and `assessment` is a single measured observation. The dataset also informs us of missing values, which can and do have meaning. Billy was absent for the first quiz, but tried to salvage his grade. Suzy failed the first quiz, so she decided to drop the class. To calculate Billy's final grade, we might replace this missing value with an F (or he might get a second chance to take the quiz). However, if we want to know the class average for Test 1, dropping Suzy's structural missing value would be more appropriate than imputing a new value. For a given dataset, it's usually easy to figure out what are observations and what are variables, but it is surprisingly difficult to precisely define variables and observations in general. For example, if the columns in the classroom data were `height` and `weight` we would have been happy to call them variables. If the columns were `height` and `width`, it would be less clear cut, as we might think of height and width as values of a `dimension` variable. If the columns were `home phone` and `work phone`, we could treat these as two variables, but in a fraud detection environment we might want variables `phone number` and `number type` because the use of one phone number for multiple people might suggest fraud. A general rule of thumb is that it is easier to describe functional relationships between variables (e.g., `z` is a linear combination of `x` and `y`, `density` is the ratio of `weight` to `volume`) than between rows, and it is easier to make comparisons between groups of observations (e.g., average of group a vs. average of group b) than between groups of columns. In a given analysis, there may be multiple levels of observation. For example, in a trial of new allergy medication we might have three observational types: demographic data collected from each person (`age`, `sex`, `race`), medical data collected from each person on each day (`number of sneezes`, `redness of eyes`), and meteorological data collected on each day (`temperature`, `pollen count`). Variables may change over the course of analysis. Often the variables in the raw data are very fine grained, and may add extra modelling complexity for little explanatory gain. For example, many surveys ask variations on the same question to better get at an underlying trait. In early stages of analysis, variables correspond to questions. In later stages, you change focus to traits, computed by averaging together multiple questions. This considerably simplifies analysis because you don't need a hierarchical model, and you can often pretend that the data is continuous, not discrete. ### Tidy data Tidy data is a standard way of mapping the meaning of a dataset to its structure. A dataset is messy or tidy depending on how rows, columns and tables are matched up with observations, variables and types. In **tidy data**: 1. Each variable is a column; each column is a variable. 2. Each observation is a row; each row is an observation. 3. Each value is a cell; each cell is a single value. This is Codd's 3rd normal form, but with the constraints framed in statistical language, and the focus put on a single dataset rather than the many connected datasets common in relational databases. **Messy data** is any other arrangement of the data. Tidy data makes it easy for an analyst or a computer to extract needed variables because it provides a standard way of structuring a dataset. Compare the different versions of the classroom data: in the messy version you need to use different strategies to extract different variables. This slows analysis and invites errors. If you consider how many data analysis operations involve all of the values in a variable (every aggregation function), you can see how important it is to extract these values in a simple, standard way. Tidy data is particularly well suited for vectorised programming languages like R, because the layout ensures that values of different variables from the same observation are always paired. While the order of variables and observations does not affect analysis, a good ordering makes it easier to scan the raw values. One way of organising variables is by their role in the analysis: are values fixed by the design of the data collection, or are they measured during the course of the experiment? Fixed variables describe the experimental design and are known in advance. Computer scientists often call fixed variables dimensions, and statisticians usually denote them with subscripts on random variables. Measured variables are what we actually measure in the study. Fixed variables should come first, followed by measured variables, each ordered so that related variables are contiguous. Rows can then be ordered by the first variable, breaking ties with the second and subsequent (fixed) variables. This is the convention adopted by all tabular displays in this paper. ## Tidying messy datasets {#tidying} Real datasets can, and often do, violate the three precepts of tidy data in almost every way imaginable. While occasionally you do get a dataset that you can start analysing immediately, this is the exception, not the rule. This section describes the five most common problems with messy datasets, along with their remedies: - Column headers are values, not variable names. - Multiple variables are stored in one column. - Variables are stored in both rows and columns. - Multiple types of observational units are stored in the same table. - A single observational unit is stored in multiple tables. Surprisingly, most messy datasets, including types of messiness not explicitly described above, can be tidied with a small set of tools: pivoting (longer and wider) and separating. The following sections illustrate each problem with a real dataset that I have encountered, and show how to tidy them. ### Column headers are values, not variable names A common type of messy dataset is tabular data designed for presentation, where variables form both the rows and columns, and column headers are values, not variable names. While I would call this arrangement messy, in some cases it can be extremely useful. It provides efficient storage for completely crossed designs, and it can lead to extremely efficient computation if desired operations can be expressed as matrix operations. The following code shows a subset of a typical dataset of this form. This dataset explores the relationship between income and religion in the US. It comes from a report produced by the Pew Research Center, an American think-tank that collects data on attitudes to topics ranging from religion to the internet, and produces many reports that contain datasets in this format. ```{r} relig_income ``` This dataset has three variables, `religion`, `income` and `frequency`. To tidy it, we need to **pivot** the non-variable columns into a two-column key-value pair. This action is often described as making a wide dataset longer (or taller). When pivoting variables, we need to provide the name of the new key-value columns to create. After defining the columns to pivot (every column except for religion), you will need the name of the key column, which is the name of the variable defined by the values of the column headings. In this case, it's `income`. The second argument is the name of the value column, `frequency`. ```{r} relig_income %>% pivot_longer(-religion, names_to = "income", values_to = "frequency") ``` This form is tidy because each column represents a variable and each row represents an observation, in this case a demographic unit corresponding to a combination of `religion` and `income`. This format is also used to record regularly spaced observations over time. For example, the Billboard dataset shown below records the date a song first entered the billboard top 100. It has variables for `artist`, `track`, `date.entered`, `rank` and `week`. The rank in each week after it enters the top 100 is recorded in 75 columns, `wk1` to `wk75`. This form of storage is not tidy, but it is useful for data entry. It reduces duplication since otherwise each song in each week would need its own row, and song metadata like title and artist would need to be repeated. This will be discussed in more depth in [multiple types](#multiple-types). ```{r} billboard ``` To tidy this dataset, we first use `pivot_longer()` to make the dataset longer. We transform the columns from `wk1` to `wk76`, making a new column for their names, `week`, and a new value for their values, `rank`: ```{r} billboard2 <- billboard %>% pivot_longer( wk1:wk76, names_to = "week", values_to = "rank", values_drop_na = TRUE ) billboard2 ``` Here we use `values_drop_na = TRUE` to drop any missing values from the rank column. In this data, missing values represent weeks that the song wasn't in the charts, so can be safely dropped. In this case it's also nice to do a little cleaning, converting the week variable to a number, and figuring out the date corresponding to each week on the charts: ```{r} billboard3 <- billboard2 %>% mutate( week = as.integer(gsub("wk", "", week)), date = as.Date(date.entered) + 7 * (week - 1), date.entered = NULL ) billboard3 ``` Finally, it's always a good idea to sort the data. We could do it by artist, track and week: ```{r} billboard3 %>% arrange(artist, track, week) ``` Or by date and rank: ```{r} billboard3 %>% arrange(date, rank) ``` ### Multiple variables stored in one column After pivoting columns, the key column is sometimes a combination of multiple underlying variable names. This happens in the `tb` (tuberculosis) dataset, shown below. This dataset comes from the World Health Organisation, and records the counts of confirmed tuberculosis cases by `country`, `year`, and demographic group. The demographic groups are broken down by `sex` (m, f) and `age` (0-14, 15-25, 25-34, 35-44, 45-54, 55-64, unknown). ```{r} tb <- as_tibble(read.csv("tb.csv", stringsAsFactors = FALSE)) tb ``` First we use `pivot_longer()` to gather up the non-variable columns: ```{r} tb2 <- tb %>% pivot_longer( !c(iso2, year), names_to = "demo", values_to = "n", values_drop_na = TRUE ) tb2 ``` Column headers in this format are often separated by a non-alphanumeric character (e.g. `.`, `-`, `_`, `:`), or have a fixed width format, like in this dataset. `separate()` makes it easy to split a compound variables into individual variables. You can either pass it a regular expression to split on (the default is to split on non-alphanumeric columns), or a vector of character positions. In this case we want to split after the first character: ```{r} tb3 <- tb2 %>% separate(demo, c("sex", "age"), 1) tb3 ``` Storing the values in this form resolves a problem in the original data. We want to compare rates, not counts, which means we need to know the population. In the original format, there is no easy way to add a population variable. It has to be stored in a separate table, which makes it hard to correctly match populations to counts. In tidy form, adding variables for population and rate is easy because they're just additional columns. In this case, we could also do the transformation in a single step by supplying multiple column names to `names_to` and also supplying a grouped regular expression to `names_pattern`: ```{r} tb %>% pivot_longer( !c(iso2, year), names_to = c("sex", "age"), names_pattern = "(.)(.+)", values_to = "n", values_drop_na = TRUE ) ``` ### Variables are stored in both rows and columns The most complicated form of messy data occurs when variables are stored in both rows and columns. The code below loads daily weather data from the Global Historical Climatology Network for one weather station (MX17004) in Mexico for five months in 2010. ```{r} weather <- as_tibble(read.csv("weather.csv", stringsAsFactors = FALSE)) weather ``` It has variables in individual columns (`id`, `year`, `month`), spread across columns (`day`, d1-d31) and across rows (`tmin`, `tmax`) (minimum and maximum temperature). Months with fewer than 31 days have structural missing values for the last day(s) of the month. To tidy this dataset we first use pivot_longer to gather the day columns: ```{r} weather2 <- weather %>% pivot_longer( d1:d31, names_to = "day", values_to = "value", values_drop_na = TRUE ) weather2 ``` For presentation, I've dropped the missing values, making them implicit rather than explicit. This is ok because we know how many days are in each month and can easily reconstruct the explicit missing values. We'll also do a little cleaning: ```{r} weather3 <- weather2 %>% mutate(day = as.integer(gsub("d", "", day))) %>% select(id, year, month, day, element, value) weather3 ``` This dataset is mostly tidy, but the `element` column is not a variable; it stores the names of variables. (Not shown in this example are the other meteorological variables `prcp` (precipitation) and `snow` (snowfall)). Fixing this requires widening the data: `pivot_wider()` is inverse of `pivot_longer()`, pivoting `element` and `value` back out across multiple columns: ```{r} weather3 %>% pivot_wider(names_from = element, values_from = value) ``` This form is tidy: there's one variable in each column, and each row represents one day. ### Multiple types in one table {#multiple-types} Datasets often involve values collected at multiple levels, on different types of observational units. During tidying, each type of observational unit should be stored in its own table. This is closely related to the idea of database normalisation, where each fact is expressed in only one place. It's important because otherwise inconsistencies can arise. The billboard dataset actually contains observations on two types of observational units: the song and its rank in each week. This manifests itself through the duplication of facts about the song: `artist` is repeated many times. This dataset needs to be broken down into two pieces: a song dataset which stores `artist` and `song name`, and a ranking dataset which gives the `rank` of the `song` in each `week`. We first extract a `song` dataset: ```{r} song <- billboard3 %>% distinct(artist, track) %>% mutate(song_id = row_number()) song ``` Then use that to make a `rank` dataset by replacing repeated song facts with a pointer to song details (a unique song id): ```{r} rank <- billboard3 %>% left_join(song, c("artist", "track")) %>% select(song_id, date, week, rank) rank ``` You could also imagine a `week` dataset which would record background information about the week, maybe the total number of songs sold or similar "demographic" information. Normalisation is useful for tidying and eliminating inconsistencies. However, there are few data analysis tools that work directly with relational data, so analysis usually also requires denormalisation or the merging the datasets back into one table. ### One type in multiple tables It's also common to find data values about a single type of observational unit spread out over multiple tables or files. These tables and files are often split up by another variable, so that each represents a single year, person, or location. As long as the format for individual records is consistent, this is an easy problem to fix: 1. Read the files into a list of tables. 2. For each table, add a new column that records the original file name (the file name is often the value of an important variable). 3. Combine all tables into a single table. Purrr makes this straightforward in R. The following code generates a vector of file names in a directory (`data/`) which match a regular expression (ends in `.csv`). Next we name each element of the vector with the name of the file. We do this because will preserve the names in the following step, ensuring that each row in the final data frame is labeled with its source. Finally, `map_dfr()` loops over each path, reading in the csv file and combining the results into a single data frame. ```{r, eval = FALSE} library(purrr) paths <- dir("data", pattern = "\\.csv$", full.names = TRUE) names(paths) <- basename(paths) map_dfr(paths, read.csv, stringsAsFactors = FALSE, .id = "filename") ``` Once you have a single table, you can perform additional tidying as needed. An example of this type of cleaning can be found at which takes 129 yearly baby name tables provided by the US Social Security Administration and combines them into a single file. A more complicated situation occurs when the dataset structure changes over time. For example, the datasets may contain different variables, the same variables with different names, different file formats, or different conventions for missing values. This may require you to tidy each file to individually (or, if you're lucky, in small groups) and then combine them once tidied. An example of this type of tidying is illustrated in , which shows the tidying of epa fuel economy data for over 50,000 cars from 1978 to 2008. The raw data is available online, but each year is stored in a separate file and there are four major formats with many minor variations, making tidying this dataset a considerable challenge. tidyr/inst/doc/nest.Rmd0000644000176200001440000000550214165475471014564 0ustar liggesusers--- title: "Nested data" output: rmarkdown::html_vignette description: | A nested data frame contains a list-column of data frames. It's an alternative way of representing grouped data, that works particularly well when you're modelling. vignette: > %\VignetteIndexEntry{Nested data} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, message = FALSE} library(tidyr) library(dplyr) library(purrr) ``` ## Basics A nested data frame is a data frame where one (or more) columns is a list of data frames. You can create simple nested data frames by hand: ```{r} df1 <- tibble( g = c(1, 2, 3), data = list( tibble(x = 1, y = 2), tibble(x = 4:5, y = 6:7), tibble(x = 10) ) ) df1 ``` (It is possible to create list-columns in regular data frames, not just in tibbles, but it's considerably more work because the default behaviour of `data.frame()` is to treat lists as lists of columns.) But more commonly you'll create them with `tidyr::nest()`: ```{r} df2 <- tribble( ~g, ~x, ~y, 1, 1, 2, 2, 4, 6, 2, 5, 7, 3, 10, NA ) df2 %>% nest(data = c(x, y)) ``` `nest()` specifies which variables should be nested inside; an alternative is to use `dplyr::group_by()` to describe which variables should be kept outside. ```{r} df2 %>% group_by(g) %>% nest() ``` I think nesting is easiest to understand in connection to grouped data: each row in the output corresponds to one _group_ in the input. We'll see shortly this is particularly convenient when you have other per-group objects. The opposite of `nest()` is `unnest()`. You give it the name of a list-column containing data frames, and it row-binds the data frames together, repeating the outer columns the right number of times to line up. ```{r} df1 %>% unnest(data) ``` ## Nested data and models Nested data is a great fit for problems where you have one of _something_ for each group. A common place this arises is when you're fitting multiple models. ```{r} mtcars_nested <- mtcars %>% group_by(cyl) %>% nest() mtcars_nested ``` Once you have a list of data frames, it's very natural to produce a list of models: ```{r} mtcars_nested <- mtcars_nested %>% mutate(model = map(data, function(df) lm(mpg ~ wt, data = df))) mtcars_nested ``` And then you could even produce a list of predictions: ```{r} mtcars_nested <- mtcars_nested %>% mutate(model = map(model, predict)) mtcars_nested ``` This workflow works particularly well in conjunction with [broom](https://broom.tidymodels.org/), which makes it easy to turn models into tidy data frames which can then be `unnest()`ed to get back to flat data frames. You can see a bigger example in the [broom and dplyr vignette](https://broom.tidymodels.org/articles/broom_and_dplyr.html). tidyr/inst/doc/programming.R0000644000176200001440000000242114553746311015604 0ustar liggesusers## ----setup, echo = FALSE, message = FALSE------------------------------------- knitr::opts_chunk$set(collapse = TRUE, comment = "#>") options(tibble.print_min = 6L, tibble.print_max = 6L) set.seed(1014) # Manually "import"; only needed for old dplyr which uses old tidyselect # which doesn't attach automatically in tidy-select contexts all_of <- tidyselect::all_of ## ----------------------------------------------------------------------------- library(tidyr) iris %>% nest(data = !Species) ## ----------------------------------------------------------------------------- packageVersion("tidyr") mini_iris <- as_tibble(iris)[c(1, 2, 51, 52, 101, 102), ] mini_iris ## ----------------------------------------------------------------------------- nest_egg <- function(df, cols) { nest(df, egg = {{ cols }}) } nest_egg(mini_iris, !Species) ## ----------------------------------------------------------------------------- nest_egg <- function(df, cols) { nest(df, egg = all_of(cols)) } vars <- c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") nest_egg(mini_iris, vars) ## ----------------------------------------------------------------------------- sel_vars <- function(df, cols) { tidyselect::eval_select(rlang::enquo(cols), df) } sel_vars(mini_iris, !Species) tidyr/inst/doc/tidy-data.R0000644000176200001440000001032114553746313015142 0ustar liggesusers## ----echo = FALSE------------------------------------------------------------- knitr::opts_chunk$set(collapse = TRUE, comment = "#>") set.seed(1014) options(dplyr.print_max = 10) ## ----------------------------------------------------------------------------- library(tibble) classroom <- tribble( ~name, ~quiz1, ~quiz2, ~test1, "Billy", NA, "D", "C", "Suzy", "F", NA, NA, "Lionel", "B", "C", "B", "Jenny", "A", "A", "B" ) classroom ## ----------------------------------------------------------------------------- tribble( ~assessment, ~Billy, ~Suzy, ~Lionel, ~Jenny, "quiz1", NA, "F", "B", "A", "quiz2", "D", NA, "C", "A", "test1", "C", NA, "B", "B" ) ## ----setup, message = FALSE--------------------------------------------------- library(tidyr) library(dplyr) ## ----------------------------------------------------------------------------- classroom2 <- classroom %>% pivot_longer(quiz1:test1, names_to = "assessment", values_to = "grade") %>% arrange(name, assessment) classroom2 ## ----------------------------------------------------------------------------- relig_income ## ----------------------------------------------------------------------------- relig_income %>% pivot_longer(-religion, names_to = "income", values_to = "frequency") ## ----------------------------------------------------------------------------- billboard ## ----------------------------------------------------------------------------- billboard2 <- billboard %>% pivot_longer( wk1:wk76, names_to = "week", values_to = "rank", values_drop_na = TRUE ) billboard2 ## ----------------------------------------------------------------------------- billboard3 <- billboard2 %>% mutate( week = as.integer(gsub("wk", "", week)), date = as.Date(date.entered) + 7 * (week - 1), date.entered = NULL ) billboard3 ## ----------------------------------------------------------------------------- billboard3 %>% arrange(artist, track, week) ## ----------------------------------------------------------------------------- billboard3 %>% arrange(date, rank) ## ----------------------------------------------------------------------------- tb <- as_tibble(read.csv("tb.csv", stringsAsFactors = FALSE)) tb ## ----------------------------------------------------------------------------- tb2 <- tb %>% pivot_longer( !c(iso2, year), names_to = "demo", values_to = "n", values_drop_na = TRUE ) tb2 ## ----------------------------------------------------------------------------- tb3 <- tb2 %>% separate(demo, c("sex", "age"), 1) tb3 ## ----------------------------------------------------------------------------- tb %>% pivot_longer( !c(iso2, year), names_to = c("sex", "age"), names_pattern = "(.)(.+)", values_to = "n", values_drop_na = TRUE ) ## ----------------------------------------------------------------------------- weather <- as_tibble(read.csv("weather.csv", stringsAsFactors = FALSE)) weather ## ----------------------------------------------------------------------------- weather2 <- weather %>% pivot_longer( d1:d31, names_to = "day", values_to = "value", values_drop_na = TRUE ) weather2 ## ----------------------------------------------------------------------------- weather3 <- weather2 %>% mutate(day = as.integer(gsub("d", "", day))) %>% select(id, year, month, day, element, value) weather3 ## ----------------------------------------------------------------------------- weather3 %>% pivot_wider(names_from = element, values_from = value) ## ----------------------------------------------------------------------------- song <- billboard3 %>% distinct(artist, track) %>% mutate(song_id = row_number()) song ## ----------------------------------------------------------------------------- rank <- billboard3 %>% left_join(song, c("artist", "track")) %>% select(song_id, date, week, rank) rank ## ----eval = FALSE------------------------------------------------------------- # library(purrr) # paths <- dir("data", pattern = "\\.csv$", full.names = TRUE) # names(paths) <- basename(paths) # map_dfr(paths, read.csv, stringsAsFactors = FALSE, .id = "filename") tidyr/inst/doc/rectangle.html0000644000176200001440000022633514553746312016006 0ustar liggesusers Rectangling

Rectangling

Introduction

Rectangling is the art and craft of taking a deeply nested list (often sourced from wild caught JSON or XML) and taming it into a tidy data set of rows and columns. There are three functions from tidyr that are particularly useful for rectangling:

  • unnest_longer() takes each element of a list-column and makes a new row.
  • unnest_wider() takes each element of a list-column and makes a new column.
  • hoist() is similar to unnest_wider() but only plucks out selected components, and can reach down multiple levels.

(Alternative, for complex inputs where you need to rectangle a nested list according to a specification, see the tibblify package.)

A very large number of data rectangling problems can be solved by combining jsonlite::read_json() with these functions and a splash of dplyr (largely eliminating prior approaches that combined mutate() with multiple purrr::map()s). Note that jsonlite has another important function called fromJSON(). We don’t recommend it here because it performs its own automatic simplification (simplifyVector = TRUE). This often works well, particularly in simple cases, but we think you’re better off doing the rectangling yourself so you know exactly what’s happening and can more easily handle the most complicated nested structures.

To illustrate these techniques, we’ll use the repurrrsive package, which provides a number deeply nested lists originally mostly captured from web APIs.

library(tidyr)
library(dplyr)
library(repurrrsive)

GitHub users

We’ll start with gh_users, a list which contains information about six GitHub users. To begin, we put the gh_users list into a data frame:

users <- tibble(user = gh_users)

This seems a bit counter-intuitive: why is the first step in making a list simpler to make it more complicated? But a data frame has a big advantage: it bundles together multiple vectors so that everything is tracked together in a single object.

Each user is a named list, where each element represents a column.

names(users$user[[1]])
#>  [1] "login"               "id"                  "avatar_url"         
#>  [4] "gravatar_id"         "url"                 "html_url"           
#>  [7] "followers_url"       "following_url"       "gists_url"          
#> [10] "starred_url"         "subscriptions_url"   "organizations_url"  
#> [13] "repos_url"           "events_url"          "received_events_url"
#> [16] "type"                "site_admin"          "name"               
#> [19] "company"             "blog"                "location"           
#> [22] "email"               "hireable"            "bio"                
#> [25] "public_repos"        "public_gists"        "followers"          
#> [28] "following"           "created_at"          "updated_at"

There are two ways to turn the list components into columns. unnest_wider() takes every component and makes a new column:

users %>% unnest_wider(user)
#> # A tibble: 6 × 30
#>   login     id avatar_url gravatar_id url   html_url followers_url following_url
#>   <chr>  <int> <chr>      <chr>       <chr> <chr>    <chr>         <chr>        
#> 1 gabo… 6.60e5 https://a… ""          http… https:/… https://api.… https://api.…
#> 2 jenn… 5.99e5 https://a… ""          http… https:/… https://api.… https://api.…
#> 3 jtle… 1.57e6 https://a… ""          http… https:/… https://api.… https://api.…
#> 4 juli… 1.25e7 https://a… ""          http… https:/… https://api.… https://api.…
#> 5 leep… 3.51e6 https://a… ""          http… https:/… https://api.… https://api.…
#> 6 masa… 8.36e6 https://a… ""          http… https:/… https://api.… https://api.…
#> # ℹ 22 more variables: gists_url <chr>, starred_url <chr>,
#> #   subscriptions_url <chr>, organizations_url <chr>, repos_url <chr>,
#> #   events_url <chr>, received_events_url <chr>, type <chr>, site_admin <lgl>,
#> #   name <chr>, company <chr>, blog <chr>, location <chr>, email <chr>,
#> #   hireable <lgl>, bio <chr>, public_repos <int>, public_gists <int>,
#> #   followers <int>, following <int>, created_at <chr>, updated_at <chr>

But in this case, there are many components and we don’t need most of them so we can instead use hoist(). hoist() allows us to pull out selected components using the same syntax as purrr::pluck():

users %>% hoist(user, 
  followers = "followers", 
  login = "login", 
  url = "html_url"
)
#> # A tibble: 6 × 4
#>   followers login       url                            user             
#>       <int> <chr>       <chr>                          <list>           
#> 1       303 gaborcsardi https://github.com/gaborcsardi <named list [27]>
#> 2       780 jennybc     https://github.com/jennybc     <named list [27]>
#> 3      3958 jtleek      https://github.com/jtleek      <named list [27]>
#> 4       115 juliasilge  https://github.com/juliasilge  <named list [27]>
#> 5       213 leeper      https://github.com/leeper      <named list [27]>
#> 6        34 masalmon    https://github.com/masalmon    <named list [27]>

hoist() removes the named components from the user list-column, so you can think of it as moving components out of the inner list into the top-level data frame.

GitHub repos

We start off gh_repos similarly, by putting it in a tibble:

repos <- tibble(repo = gh_repos)
repos
#> # A tibble: 6 × 1
#>   repo       
#>   <list>     
#> 1 <list [30]>
#> 2 <list [30]>
#> 3 <list [30]>
#> 4 <list [26]>
#> 5 <list [30]>
#> 6 <list [30]>

This time the elements of repos are a list of repositories that belong to that user. These are observations, so should become new rows, so we use unnest_longer() rather than unnest_wider():

repos <- repos %>% unnest_longer(repo)
repos
#> # A tibble: 176 × 1
#>   repo             
#>   <list>           
#> 1 <named list [68]>
#> 2 <named list [68]>
#> 3 <named list [68]>
#> 4 <named list [68]>
#> 5 <named list [68]>
#> 6 <named list [68]>
#> # ℹ 170 more rows

Then we can use unnest_wider() or hoist():

repos %>% hoist(repo, 
  login = c("owner", "login"), 
  name = "name",
  homepage = "homepage",
  watchers = "watchers_count"
)
#> # A tibble: 176 × 5
#>   login       name        homepage watchers repo             
#>   <chr>       <chr>       <chr>       <int> <list>           
#> 1 gaborcsardi after       <NA>            5 <named list [65]>
#> 2 gaborcsardi argufy      <NA>           19 <named list [65]>
#> 3 gaborcsardi ask         <NA>            5 <named list [65]>
#> 4 gaborcsardi baseimports <NA>            0 <named list [65]>
#> 5 gaborcsardi citest      <NA>            0 <named list [65]>
#> 6 gaborcsardi clisymbols  ""             18 <named list [65]>
#> # ℹ 170 more rows

Note the use of c("owner", "login"): this allows us to reach two levels deep inside of a list. An alternative approach would be to pull out just owner and then put each element of it in a column:

repos %>% 
  hoist(repo, owner = "owner") %>% 
  unnest_wider(owner)
#> # A tibble: 176 × 18
#>   login     id avatar_url gravatar_id url   html_url followers_url following_url
#>   <chr>  <int> <chr>      <chr>       <chr> <chr>    <chr>         <chr>        
#> 1 gabo… 660288 https://a… ""          http… https:/… https://api.… https://api.…
#> 2 gabo… 660288 https://a… ""          http… https:/… https://api.… https://api.…
#> 3 gabo… 660288 https://a… ""          http… https:/… https://api.… https://api.…
#> 4 gabo… 660288 https://a… ""          http… https:/… https://api.… https://api.…
#> 5 gabo… 660288 https://a… ""          http… https:/… https://api.… https://api.…
#> 6 gabo… 660288 https://a… ""          http… https:/… https://api.… https://api.…
#> # ℹ 170 more rows
#> # ℹ 10 more variables: gists_url <chr>, starred_url <chr>,
#> #   subscriptions_url <chr>, organizations_url <chr>, repos_url <chr>,
#> #   events_url <chr>, received_events_url <chr>, type <chr>, site_admin <lgl>,
#> #   repo <list>

Game of Thrones characters

got_chars has a similar structure to gh_users: it’s a list of named lists, where each element of the inner list describes some attribute of a GoT character. We start in the same way, first by creating a data frame and then by unnesting each component into a column:

chars <- tibble(char = got_chars)
chars
#> # A tibble: 30 × 1
#>   char             
#>   <list>           
#> 1 <named list [18]>
#> 2 <named list [18]>
#> 3 <named list [18]>
#> 4 <named list [18]>
#> 5 <named list [18]>
#> 6 <named list [18]>
#> # ℹ 24 more rows

chars2 <- chars %>% unnest_wider(char)
chars2
#> # A tibble: 30 × 18
#>   url            id name  gender culture born  died  alive titles aliases father
#>   <chr>       <int> <chr> <chr>  <chr>   <chr> <chr> <lgl> <list> <list>  <chr> 
#> 1 https://ww…  1022 Theo… Male   "Ironb… "In … ""    TRUE  <chr>  <chr>   ""    
#> 2 https://ww…  1052 Tyri… Male   ""      "In … ""    TRUE  <chr>  <chr>   ""    
#> 3 https://ww…  1074 Vict… Male   "Ironb… "In … ""    TRUE  <chr>  <chr>   ""    
#> 4 https://ww…  1109 Will  Male   ""      ""    "In … FALSE <chr>  <chr>   ""    
#> 5 https://ww…  1166 Areo… Male   "Norvo… "In … ""    TRUE  <chr>  <chr>   ""    
#> 6 https://ww…  1267 Chett Male   ""      "At … "In … FALSE <chr>  <chr>   ""    
#> # ℹ 24 more rows
#> # ℹ 7 more variables: mother <chr>, spouse <chr>, allegiances <list>,
#> #   books <list>, povBooks <list>, tvSeries <list>, playedBy <list>

This is more complex than gh_users because some component of char are themselves a list, giving us a collection of list-columns:

chars2 %>% select_if(is.list)
#> # A tibble: 30 × 7
#>   titles    aliases    allegiances books     povBooks  tvSeries  playedBy 
#>   <list>    <list>     <list>      <list>    <list>    <list>    <list>   
#> 1 <chr [2]> <chr [4]>  <chr [1]>   <chr [3]> <chr [2]> <chr [6]> <chr [1]>
#> 2 <chr [2]> <chr [11]> <chr [1]>   <chr [2]> <chr [4]> <chr [6]> <chr [1]>
#> 3 <chr [2]> <chr [1]>  <chr [1]>   <chr [3]> <chr [2]> <chr [1]> <chr [1]>
#> 4 <chr [1]> <chr [1]>  <NULL>      <chr [1]> <chr [1]> <chr [1]> <chr [1]>
#> 5 <chr [1]> <chr [1]>  <chr [1]>   <chr [3]> <chr [2]> <chr [2]> <chr [1]>
#> 6 <chr [1]> <chr [1]>  <NULL>      <chr [2]> <chr [1]> <chr [1]> <chr [1]>
#> # ℹ 24 more rows

What you do next will depend on the purposes of the analysis. Maybe you want a row for every book and TV series that the character appears in:

chars2 %>% 
  select(name, books, tvSeries) %>% 
  pivot_longer(c(books, tvSeries), names_to = "media", values_to = "value") %>% 
  unnest_longer(value)
#> # A tibble: 179 × 3
#>   name          media    value            
#>   <chr>         <chr>    <chr>            
#> 1 Theon Greyjoy books    A Game of Thrones
#> 2 Theon Greyjoy books    A Storm of Swords
#> 3 Theon Greyjoy books    A Feast for Crows
#> 4 Theon Greyjoy tvSeries Season 1         
#> 5 Theon Greyjoy tvSeries Season 2         
#> 6 Theon Greyjoy tvSeries Season 3         
#> # ℹ 173 more rows

Or maybe you want to build a table that lets you match title to name:

chars2 %>% 
  select(name, title = titles) %>% 
  unnest_longer(title)
#> # A tibble: 59 × 2
#>   name              title                                               
#>   <chr>             <chr>                                               
#> 1 Theon Greyjoy     Prince of Winterfell                                
#> 2 Theon Greyjoy     Lord of the Iron Islands (by law of the green lands)
#> 3 Tyrion Lannister  Acting Hand of the King (former)                    
#> 4 Tyrion Lannister  Master of Coin (former)                             
#> 5 Victarion Greyjoy Lord Captain of the Iron Fleet                      
#> 6 Victarion Greyjoy Master of the Iron Victory                          
#> # ℹ 53 more rows

(Note that the empty titles ("") are due to an infelicity in the input got_chars: ideally people without titles would have a title vector of length 0, not a title vector of length 1 containing an empty string.)

Geocoding with google

Next we’ll tackle a more complex form of data that comes from Google’s geocoding service, stored in the repurssive package

repurrrsive::gmaps_cities
#> # A tibble: 5 × 2
#>   city       json            
#>   <chr>      <list>          
#> 1 Houston    <named list [2]>
#> 2 Washington <named list [2]>
#> 3 New York   <named list [2]>
#> 4 Chicago    <named list [2]>
#> 5 Arlington  <named list [2]>

json is a list-column of named lists, so it makes sense to start with unnest_wider():

repurrrsive::gmaps_cities %>%
  unnest_wider(json)
#> # A tibble: 5 × 3
#>   city       results    status
#>   <chr>      <list>     <chr> 
#> 1 Houston    <list [1]> OK    
#> 2 Washington <list [2]> OK    
#> 3 New York   <list [1]> OK    
#> 4 Chicago    <list [1]> OK    
#> 5 Arlington  <list [2]> OK

Notice that results is a list of lists. Most of the cities have 1 element (representing a unique match from the geocoding API), but Washington and Arlington have two. We can pull these out into separate rows with unnest_longer():

repurrrsive::gmaps_cities %>%
  unnest_wider(json) %>% 
  unnest_longer(results)
#> # A tibble: 7 × 3
#>   city       results          status
#>   <chr>      <list>           <chr> 
#> 1 Houston    <named list [5]> OK    
#> 2 Washington <named list [5]> OK    
#> 3 Washington <named list [5]> OK    
#> 4 New York   <named list [5]> OK    
#> 5 Chicago    <named list [5]> OK    
#> 6 Arlington  <named list [5]> OK    
#> # ℹ 1 more row

Now these all have the same components, as revealed by unnest_wider():

repurrrsive::gmaps_cities %>%
  unnest_wider(json) %>% 
  unnest_longer(results) %>% 
  unnest_wider(results)
#> # A tibble: 7 × 7
#>   city  address_components formatted_address geometry     place_id types  status
#>   <chr> <list>             <chr>             <list>       <chr>    <list> <chr> 
#> 1 Hous… <list [4]>         Houston, TX, USA  <named list> ChIJAYW… <list> OK    
#> 2 Wash… <list [2]>         Washington, USA   <named list> ChIJ-bD… <list> OK    
#> 3 Wash… <list [4]>         Washington, DC, … <named list> ChIJW-T… <list> OK    
#> 4 New … <list [3]>         New York, NY, USA <named list> ChIJOwg… <list> OK    
#> 5 Chic… <list [4]>         Chicago, IL, USA  <named list> ChIJ7cv… <list> OK    
#> 6 Arli… <list [4]>         Arlington, TX, U… <named list> ChIJ05g… <list> OK    
#> # ℹ 1 more row

We can find the latitude and longitude by unnesting geometry:

repurrrsive::gmaps_cities %>%
  unnest_wider(json) %>% 
  unnest_longer(results) %>% 
  unnest_wider(results) %>% 
  unnest_wider(geometry)
#> # A tibble: 7 × 10
#>   city       address_components formatted_address   bounds       location    
#>   <chr>      <list>             <chr>               <list>       <list>      
#> 1 Houston    <list [4]>         Houston, TX, USA    <named list> <named list>
#> 2 Washington <list [2]>         Washington, USA     <named list> <named list>
#> 3 Washington <list [4]>         Washington, DC, USA <named list> <named list>
#> 4 New York   <list [3]>         New York, NY, USA   <named list> <named list>
#> 5 Chicago    <list [4]>         Chicago, IL, USA    <named list> <named list>
#> 6 Arlington  <list [4]>         Arlington, TX, USA  <named list> <named list>
#> # ℹ 1 more row
#> # ℹ 5 more variables: location_type <chr>, viewport <list>, place_id <chr>,
#> #   types <list>, status <chr>

And then location:

repurrrsive::gmaps_cities %>%
  unnest_wider(json) %>%
  unnest_longer(results) %>%
  unnest_wider(results) %>%
  unnest_wider(geometry) %>%
  unnest_wider(location)
#> # A tibble: 7 × 11
#>   city       address_components formatted_address   bounds         lat    lng
#>   <chr>      <list>             <chr>               <list>       <dbl>  <dbl>
#> 1 Houston    <list [4]>         Houston, TX, USA    <named list>  29.8  -95.4
#> 2 Washington <list [2]>         Washington, USA     <named list>  47.8 -121. 
#> 3 Washington <list [4]>         Washington, DC, USA <named list>  38.9  -77.0
#> 4 New York   <list [3]>         New York, NY, USA   <named list>  40.7  -74.0
#> 5 Chicago    <list [4]>         Chicago, IL, USA    <named list>  41.9  -87.6
#> 6 Arlington  <list [4]>         Arlington, TX, USA  <named list>  32.7  -97.1
#> # ℹ 1 more row
#> # ℹ 5 more variables: location_type <chr>, viewport <list>, place_id <chr>,
#> #   types <list>, status <chr>

We could also just look at the first address for each city:

repurrrsive::gmaps_cities %>%
  unnest_wider(json) %>%
  hoist(results, first_result = 1) %>%
  unnest_wider(first_result) %>%
  unnest_wider(geometry) %>%
  unnest_wider(location)
#> # A tibble: 5 × 12
#>   city       address_components formatted_address  bounds             lat    lng
#>   <chr>      <list>             <chr>              <list>           <dbl>  <dbl>
#> 1 Houston    <list [4]>         Houston, TX, USA   <named list [2]>  29.8  -95.4
#> 2 Washington <list [2]>         Washington, USA    <named list [2]>  47.8 -121. 
#> 3 New York   <list [3]>         New York, NY, USA  <named list [2]>  40.7  -74.0
#> 4 Chicago    <list [4]>         Chicago, IL, USA   <named list [2]>  41.9  -87.6
#> 5 Arlington  <list [4]>         Arlington, TX, USA <named list [2]>  32.7  -97.1
#> # ℹ 6 more variables: location_type <chr>, viewport <list>, place_id <chr>,
#> #   types <list>, results <list>, status <chr>

Or use hoist() to dive deeply to get directly to lat and lng:

repurrrsive::gmaps_cities %>%
  hoist(json,
    lat = list("results", 1, "geometry", "location", "lat"),
    lng = list("results", 1, "geometry", "location", "lng")
  )
#> # A tibble: 5 × 4
#>   city         lat    lng json            
#>   <chr>      <dbl>  <dbl> <list>          
#> 1 Houston     29.8  -95.4 <named list [2]>
#> 2 Washington  47.8 -121.  <named list [2]>
#> 3 New York    40.7  -74.0 <named list [2]>
#> 4 Chicago     41.9  -87.6 <named list [2]>
#> 5 Arlington   32.7  -97.1 <named list [2]>

Sharla Gelfand’s discography

We’ll finish off with the most complex list, from Sharla Gelfand’s discography. We’ll start the usual way: putting the list into a single column data frame, and then widening so each component is a column. I also parse the date_added column into a real date-time1.

discs <- tibble(disc = discog) %>% 
  unnest_wider(disc) %>% 
  mutate(date_added = as.POSIXct(strptime(date_added, "%Y-%m-%dT%H:%M:%S"))) 
discs
#> # A tibble: 155 × 5
#>   instance_id date_added          basic_information       id rating
#>         <int> <dttm>              <list>               <int>  <int>
#> 1   354823933 2019-02-16 17:48:59 <named list [11]>  7496378      0
#> 2   354092601 2019-02-13 14:13:11 <named list [11]>  4490852      0
#> 3   354091476 2019-02-13 14:07:23 <named list [11]>  9827276      0
#> 4   351244906 2019-02-02 11:39:58 <named list [11]>  9769203      0
#> 5   351244801 2019-02-02 11:39:37 <named list [11]>  7237138      0
#> 6   351052065 2019-02-01 20:40:53 <named list [11]> 13117042      0
#> # ℹ 149 more rows

At this level, we see information about when each disc was added to Sharla’s discography, not any information about the disc itself. To do that we need to widen the basic_information column:

discs %>% unnest_wider(basic_information)
#> Error in `unnest_wider()`:
#> ! Can't duplicate names between the affected columns and the original
#>   data.
#> ✖ These names are duplicated:
#>   ℹ `id`, from `basic_information`.
#> ℹ Use `names_sep` to disambiguate using the column name.
#> ℹ Or use `names_repair` to specify a repair strategy.

Unfortunately that fails because there’s an id column inside basic_information. We can quickly see what’s going on by setting names_repair = "unique":

discs %>% unnest_wider(basic_information, names_repair = "unique")
#> New names:
#> • `id` -> `id...7`
#> • `id` -> `id...14`
#> # A tibble: 155 × 15
#>   instance_id date_added          labels  year master_url   artists id...7 thumb
#>         <int> <dttm>              <list> <int> <chr>        <list>   <int> <chr>
#> 1   354823933 2019-02-16 17:48:59 <list>  2015 <NA>         <list>  7.50e6 http…
#> 2   354092601 2019-02-13 14:13:11 <list>  2013 https://api… <list>  4.49e6 http…
#> 3   354091476 2019-02-13 14:07:23 <list>  2017 https://api… <list>  9.83e6 http…
#> 4   351244906 2019-02-02 11:39:58 <list>  2017 https://api… <list>  9.77e6 http…
#> 5   351244801 2019-02-02 11:39:37 <list>  2015 https://api… <list>  7.24e6 http…
#> 6   351052065 2019-02-01 20:40:53 <list>  2019 https://api… <list>  1.31e7 http…
#> # ℹ 149 more rows
#> # ℹ 7 more variables: title <chr>, formats <list>, cover_image <chr>,
#> #   resource_url <chr>, master_id <int>, id...14 <int>, rating <int>

The problem is that basic_information repeats the id column that’s also stored at the top-level, so we can just drop that:

discs %>% 
  select(!id) %>% 
  unnest_wider(basic_information)
#> # A tibble: 155 × 14
#>   instance_id date_added          labels  year master_url   artists     id thumb
#>         <int> <dttm>              <list> <int> <chr>        <list>   <int> <chr>
#> 1   354823933 2019-02-16 17:48:59 <list>  2015 <NA>         <list>  7.50e6 http…
#> 2   354092601 2019-02-13 14:13:11 <list>  2013 https://api… <list>  4.49e6 http…
#> 3   354091476 2019-02-13 14:07:23 <list>  2017 https://api… <list>  9.83e6 http…
#> 4   351244906 2019-02-02 11:39:58 <list>  2017 https://api… <list>  9.77e6 http…
#> 5   351244801 2019-02-02 11:39:37 <list>  2015 https://api… <list>  7.24e6 http…
#> 6   351052065 2019-02-01 20:40:53 <list>  2019 https://api… <list>  1.31e7 http…
#> # ℹ 149 more rows
#> # ℹ 6 more variables: title <chr>, formats <list>, cover_image <chr>,
#> #   resource_url <chr>, master_id <int>, rating <int>

Alternatively, we could use hoist():

discs %>% 
  hoist(basic_information,
    title = "title",
    year = "year",
    label = list("labels", 1, "name"),
    artist = list("artists", 1, "name")
  )
#> # A tibble: 155 × 9
#>   instance_id date_added          title      year label artist basic_information
#>         <int> <dttm>              <chr>     <int> <chr> <chr>  <list>           
#> 1   354823933 2019-02-16 17:48:59 Demo       2015 Tobi… Mollot <named list [9]> 
#> 2   354092601 2019-02-13 14:13:11 Observan…  2013 La V… Una B… <named list [9]> 
#> 3   354091476 2019-02-13 14:07:23 I          2017 La V… S.H.I… <named list [9]> 
#> 4   351244906 2019-02-02 11:39:58 Oído Abs…  2017 La V… Rata … <named list [9]> 
#> 5   351244801 2019-02-02 11:39:37 A Cat's …  2015 Kato… Ivy (… <named list [9]> 
#> 6   351052065 2019-02-01 20:40:53 Tashme     2019 High… Tashme <named list [9]> 
#> # ℹ 149 more rows
#> # ℹ 2 more variables: id <int>, rating <int>

Here I quickly extract the name of the first label and artist by indexing deeply into the nested list.

A more systematic approach would be to create separate tables for artist and label:

discs %>% 
  hoist(basic_information, artist = "artists") %>% 
  select(disc_id = id, artist) %>% 
  unnest_longer(artist) %>% 
  unnest_wider(artist)
#> # A tibble: 167 × 8
#>    disc_id join  name                     anv   tracks role  resource_url     id
#>      <int> <chr> <chr>                    <chr> <chr>  <chr> <chr>         <int>
#> 1  7496378 ""    Mollot                   ""    ""     ""    https://api… 4.62e6
#> 2  4490852 ""    Una Bèstia Incontrolable ""    ""     ""    https://api… 3.19e6
#> 3  9827276 ""    S.H.I.T. (3)             ""    ""     ""    https://api… 2.77e6
#> 4  9769203 ""    Rata Negra               ""    ""     ""    https://api… 4.28e6
#> 5  7237138 ""    Ivy (18)                 ""    ""     ""    https://api… 3.60e6
#> 6 13117042 ""    Tashme                   ""    ""     ""    https://api… 5.21e6
#> # ℹ 161 more rows

discs %>% 
  hoist(basic_information, format = "formats") %>% 
  select(disc_id = id, format) %>% 
  unnest_longer(format) %>% 
  unnest_wider(format) %>% 
  unnest_longer(descriptions)
#> # A tibble: 258 × 5
#>   disc_id descriptions text  name     qty  
#>     <int> <chr>        <chr> <chr>    <chr>
#> 1 7496378 "Numbered"   Black Cassette 1    
#> 2 4490852 "LP"         <NA>  Vinyl    1    
#> 3 9827276 "7\""        <NA>  Vinyl    1    
#> 4 9827276 "45 RPM"     <NA>  Vinyl    1    
#> 5 9827276 "EP"         <NA>  Vinyl    1    
#> 6 9769203 "LP"         <NA>  Vinyl    1    
#> # ℹ 252 more rows

Then you could join these back on to the original dataset as needed.


  1. I’d normally use readr::parse_datetime() or lubridate::ymd_hms(), but I can’t here because it’s a vignette and I don’t want to add a dependency to tidyr just to simplify one example.↩︎

tidyr/inst/doc/pivot.html0000644000176200001440000050101614553746311015172 0ustar liggesusers Pivoting

Pivoting

Introduction

This vignette describes the use of the new pivot_longer() and pivot_wider() functions. Their goal is to improve the usability of gather() and spread(), and incorporate state-of-the-art features found in other packages.

For some time, it’s been obvious that there is something fundamentally wrong with the design of spread() and gather(). Many people don’t find the names intuitive and find it hard to remember which direction corresponds to spreading and which to gathering. It also seems surprisingly hard to remember the arguments to these functions, meaning that many people (including me!) have to consult the documentation every time.

There are two important new features inspired by other R packages that have been advancing reshaping in R:

  • pivot_longer() can work with multiple value variables that may have different types, inspired by the enhanced melt() and dcast() functions provided by the data.table package by Matt Dowle and Arun Srinivasan.

  • pivot_longer() and pivot_wider() can take a data frame that specifies precisely how metadata stored in column names becomes data variables (and vice versa), inspired by the cdata package by John Mount and Nina Zumel.

In this vignette, you’ll learn the key ideas behind pivot_longer() and pivot_wider() as you see them used to solve a variety of data reshaping challenges ranging from simple to complex.

To begin we’ll load some needed packages. In real analysis code, I’d imagine you’d do with the library(tidyverse), but I can’t do that here since this vignette is embedded in a package.

library(tidyr)
library(dplyr)
library(readr)

Longer

pivot_longer() makes datasets longer by increasing the number of rows and decreasing the number of columns. I don’t believe it makes sense to describe a dataset as being in “long form”. Length is a relative term, and you can only say (e.g.) that dataset A is longer than dataset B.

pivot_longer() is commonly needed to tidy wild-caught datasets as they often optimise for ease of data entry or ease of comparison rather than ease of analysis. The following sections show how to use pivot_longer() for a wide range of realistic datasets.

String data in column names

The relig_income dataset stores counts based on a survey which (among other things) asked people about their religion and annual income:

relig_income
#> # A tibble: 18 × 11
#>    religion `<$10k` `$10-20k` `$20-30k` `$30-40k` `$40-50k` `$50-75k` `$75-100k`
#>    <chr>      <dbl>     <dbl>     <dbl>     <dbl>     <dbl>     <dbl>      <dbl>
#>  1 Agnostic      27        34        60        81        76       137        122
#>  2 Atheist       12        27        37        52        35        70         73
#>  3 Buddhist      27        21        30        34        33        58         62
#>  4 Catholic     418       617       732       670       638      1116        949
#>  5 Don’t k…      15        14        15        11        10        35         21
#>  6 Evangel…     575       869      1064       982       881      1486        949
#>  7 Hindu          1         9         7         9        11        34         47
#>  8 Histori…     228       244       236       238       197       223        131
#>  9 Jehovah…      20        27        24        24        21        30         15
#> 10 Jewish        19        19        25        25        30        95         69
#> # ℹ 8 more rows
#> # ℹ 3 more variables: `$100-150k` <dbl>, `>150k` <dbl>,
#> #   `Don't know/refused` <dbl>

This dataset contains three variables:

  • religion, stored in the rows,
  • income spread across the column names, and
  • count stored in the cell values.

To tidy it we use pivot_longer():

relig_income %>% 
  pivot_longer(
    cols = !religion, 
    names_to = "income", 
    values_to = "count"
  )
#> # A tibble: 180 × 3
#>    religion income             count
#>    <chr>    <chr>              <dbl>
#>  1 Agnostic <$10k                 27
#>  2 Agnostic $10-20k               34
#>  3 Agnostic $20-30k               60
#>  4 Agnostic $30-40k               81
#>  5 Agnostic $40-50k               76
#>  6 Agnostic $50-75k              137
#>  7 Agnostic $75-100k             122
#>  8 Agnostic $100-150k            109
#>  9 Agnostic >150k                 84
#> 10 Agnostic Don't know/refused    96
#> # ℹ 170 more rows
  • The first argument is the dataset to reshape, relig_income.

  • cols describes which columns need to be reshaped. In this case, it’s every column apart from religion.

  • names_to gives the name of the variable that will be created from the data stored in the column names, i.e. income.

  • values_to gives the name of the variable that will be created from the data stored in the cell value, i.e. count.

Neither the names_to nor the values_to column exists in relig_income, so we provide them as strings surrounded by quotes.

Numeric data in column names

The billboard dataset records the billboard rank of songs in the year 2000. It has a form similar to the relig_income data, but the data encoded in the column names is really a number, not a string.

billboard
#> # A tibble: 317 × 79
#>    artist     track date.entered   wk1   wk2   wk3   wk4   wk5   wk6   wk7   wk8
#>    <chr>      <chr> <date>       <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#>  1 2 Pac      Baby… 2000-02-26      87    82    72    77    87    94    99    NA
#>  2 2Ge+her    The … 2000-09-02      91    87    92    NA    NA    NA    NA    NA
#>  3 3 Doors D… Kryp… 2000-04-08      81    70    68    67    66    57    54    53
#>  4 3 Doors D… Loser 2000-10-21      76    76    72    69    67    65    55    59
#>  5 504 Boyz   Wobb… 2000-04-15      57    34    25    17    17    31    36    49
#>  6 98^0       Give… 2000-08-19      51    39    34    26    26    19     2     2
#>  7 A*Teens    Danc… 2000-07-08      97    97    96    95   100    NA    NA    NA
#>  8 Aaliyah    I Do… 2000-01-29      84    62    51    41    38    35    35    38
#>  9 Aaliyah    Try … 2000-03-18      59    53    38    28    21    18    16    14
#> 10 Adams, Yo… Open… 2000-08-26      76    76    74    69    68    67    61    58
#> # ℹ 307 more rows
#> # ℹ 68 more variables: wk9 <dbl>, wk10 <dbl>, wk11 <dbl>, wk12 <dbl>,
#> #   wk13 <dbl>, wk14 <dbl>, wk15 <dbl>, wk16 <dbl>, wk17 <dbl>, wk18 <dbl>,
#> #   wk19 <dbl>, wk20 <dbl>, wk21 <dbl>, wk22 <dbl>, wk23 <dbl>, wk24 <dbl>,
#> #   wk25 <dbl>, wk26 <dbl>, wk27 <dbl>, wk28 <dbl>, wk29 <dbl>, wk30 <dbl>,
#> #   wk31 <dbl>, wk32 <dbl>, wk33 <dbl>, wk34 <dbl>, wk35 <dbl>, wk36 <dbl>,
#> #   wk37 <dbl>, wk38 <dbl>, wk39 <dbl>, wk40 <dbl>, wk41 <dbl>, wk42 <dbl>, …

We can start with the same basic specification as for the relig_income dataset. Here we want the names to become a variable called week, and the values to become a variable called rank. I also use values_drop_na to drop rows that correspond to missing values. Not every song stays in the charts for all 76 weeks, so the structure of the input data force the creation of unnecessary explicit NAs.

billboard %>% 
  pivot_longer(
    cols = starts_with("wk"), 
    names_to = "week", 
    values_to = "rank",
    values_drop_na = TRUE
  )
#> # A tibble: 5,307 × 5
#>    artist  track                   date.entered week   rank
#>    <chr>   <chr>                   <date>       <chr> <dbl>
#>  1 2 Pac   Baby Don't Cry (Keep... 2000-02-26   wk1      87
#>  2 2 Pac   Baby Don't Cry (Keep... 2000-02-26   wk2      82
#>  3 2 Pac   Baby Don't Cry (Keep... 2000-02-26   wk3      72
#>  4 2 Pac   Baby Don't Cry (Keep... 2000-02-26   wk4      77
#>  5 2 Pac   Baby Don't Cry (Keep... 2000-02-26   wk5      87
#>  6 2 Pac   Baby Don't Cry (Keep... 2000-02-26   wk6      94
#>  7 2 Pac   Baby Don't Cry (Keep... 2000-02-26   wk7      99
#>  8 2Ge+her The Hardest Part Of ... 2000-09-02   wk1      91
#>  9 2Ge+her The Hardest Part Of ... 2000-09-02   wk2      87
#> 10 2Ge+her The Hardest Part Of ... 2000-09-02   wk3      92
#> # ℹ 5,297 more rows

It would be nice to easily determine how long each song stayed in the charts, but to do that, we’ll need to convert the week variable to an integer. We can do that by using two additional arguments: names_prefix strips off the wk prefix, and names_transform converts week into an integer:

billboard %>% 
  pivot_longer(
    cols = starts_with("wk"), 
    names_to = "week", 
    names_prefix = "wk",
    names_transform = as.integer,
    values_to = "rank",
    values_drop_na = TRUE,
  )

Alternatively, you could do this with a single argument by using readr::parse_number() which automatically strips non-numeric components:

billboard %>% 
  pivot_longer(
    cols = starts_with("wk"), 
    names_to = "week", 
    names_transform = readr::parse_number,
    values_to = "rank",
    values_drop_na = TRUE,
  )

Many variables in column names

A more challenging situation occurs when you have multiple variables crammed into the column names. For example, take the who dataset:

who
#> # A tibble: 7,240 × 60
#>    country  iso2  iso3   year new_sp_m014 new_sp_m1524 new_sp_m2534 new_sp_m3544
#>    <chr>    <chr> <chr> <dbl>       <dbl>        <dbl>        <dbl>        <dbl>
#>  1 Afghani… AF    AFG    1980          NA           NA           NA           NA
#>  2 Afghani… AF    AFG    1981          NA           NA           NA           NA
#>  3 Afghani… AF    AFG    1982          NA           NA           NA           NA
#>  4 Afghani… AF    AFG    1983          NA           NA           NA           NA
#>  5 Afghani… AF    AFG    1984          NA           NA           NA           NA
#>  6 Afghani… AF    AFG    1985          NA           NA           NA           NA
#>  7 Afghani… AF    AFG    1986          NA           NA           NA           NA
#>  8 Afghani… AF    AFG    1987          NA           NA           NA           NA
#>  9 Afghani… AF    AFG    1988          NA           NA           NA           NA
#> 10 Afghani… AF    AFG    1989          NA           NA           NA           NA
#> # ℹ 7,230 more rows
#> # ℹ 52 more variables: new_sp_m4554 <dbl>, new_sp_m5564 <dbl>,
#> #   new_sp_m65 <dbl>, new_sp_f014 <dbl>, new_sp_f1524 <dbl>,
#> #   new_sp_f2534 <dbl>, new_sp_f3544 <dbl>, new_sp_f4554 <dbl>,
#> #   new_sp_f5564 <dbl>, new_sp_f65 <dbl>, new_sn_m014 <dbl>,
#> #   new_sn_m1524 <dbl>, new_sn_m2534 <dbl>, new_sn_m3544 <dbl>,
#> #   new_sn_m4554 <dbl>, new_sn_m5564 <dbl>, new_sn_m65 <dbl>, …

country, iso2, iso3, and year are already variables, so they can be left as is. But the columns from new_sp_m014 to newrel_f65 encode four variables in their names:

  • The new_/new prefix indicates these are counts of new cases. This dataset only contains new cases, so we’ll ignore it here because it’s constant.

  • sp/rel/ep describe how the case was diagnosed.

  • m/f gives the gender.

  • 014/1524/2535/3544/4554/65 supplies the age range.

We can break these variables up by specifying multiple column names in names_to, and then either providing names_sep or names_pattern. Here names_pattern is the most natural fit. It has a similar interface to extract: you give it a regular expression containing groups (defined by ()) and it puts each group in a column.

who %>% 
  pivot_longer(
    cols = new_sp_m014:newrel_f65,
    names_to = c("diagnosis", "gender", "age"), 
    names_pattern = "new_?(.*)_(.)(.*)",
    values_to = "count"
  )
#> # A tibble: 405,440 × 8
#>    country     iso2  iso3   year diagnosis gender age   count
#>    <chr>       <chr> <chr> <dbl> <chr>     <chr>  <chr> <dbl>
#>  1 Afghanistan AF    AFG    1980 sp        m      014      NA
#>  2 Afghanistan AF    AFG    1980 sp        m      1524     NA
#>  3 Afghanistan AF    AFG    1980 sp        m      2534     NA
#>  4 Afghanistan AF    AFG    1980 sp        m      3544     NA
#>  5 Afghanistan AF    AFG    1980 sp        m      4554     NA
#>  6 Afghanistan AF    AFG    1980 sp        m      5564     NA
#>  7 Afghanistan AF    AFG    1980 sp        m      65       NA
#>  8 Afghanistan AF    AFG    1980 sp        f      014      NA
#>  9 Afghanistan AF    AFG    1980 sp        f      1524     NA
#> 10 Afghanistan AF    AFG    1980 sp        f      2534     NA
#> # ℹ 405,430 more rows

We could go one step further use readr functions to convert the gender and age to factors. I think this is good practice when you have categorical variables with a known set of values.

who %>% 
  pivot_longer(
    cols = new_sp_m014:newrel_f65,
    names_to = c("diagnosis", "gender", "age"), 
    names_pattern = "new_?(.*)_(.)(.*)",
    names_transform = list(
      gender = ~ readr::parse_factor(.x, levels = c("f", "m")),
      age = ~ readr::parse_factor(
        .x,
        levels = c("014", "1524", "2534", "3544", "4554", "5564", "65"), 
        ordered = TRUE
      )
    ),
    values_to = "count",
)

Doing it this way is a little more efficient than doing a mutate after the fact, pivot_longer() only has to transform one occurence of each name where a mutate() would need to transform many repetitions.

Multiple observations per row

So far, we have been working with data frames that have one observation per row, but many important pivoting problems involve multiple observations per row. You can usually recognise this case because name of the column that you want to appear in the output is part of the column name in the input. In this section, you’ll learn how to pivot this sort of data.

The following example is adapted from the data.table vignette, as inspiration for tidyr’s solution to this problem.

household
#> # A tibble: 5 × 5
#>   family dob_child1 dob_child2 name_child1 name_child2
#>    <int> <date>     <date>     <chr>       <chr>      
#> 1      1 1998-11-26 2000-01-29 Susan       Jose       
#> 2      2 1996-06-22 NA         Mark        <NA>       
#> 3      3 2002-07-11 2004-04-05 Sam         Seth       
#> 4      4 2004-10-10 2009-08-27 Craig       Khai       
#> 5      5 2000-12-05 2005-02-28 Parker      Gracie

Note that we have two pieces of information (or values) for each child: their name and their dob (date of birth). These need to go into separate columns in the result. Again we supply multiple variables to names_to, using names_sep to split up each variable name. Note the special name .value: this tells pivot_longer() that that part of the column name specifies the “value” being measured (which will become a variable in the output).

household %>% 
  pivot_longer(
    cols = !family, 
    names_to = c(".value", "child"), 
    names_sep = "_", 
    values_drop_na = TRUE
  )
#> # A tibble: 9 × 4
#>   family child  dob        name  
#>    <int> <chr>  <date>     <chr> 
#> 1      1 child1 1998-11-26 Susan 
#> 2      1 child2 2000-01-29 Jose  
#> 3      2 child1 1996-06-22 Mark  
#> 4      3 child1 2002-07-11 Sam   
#> 5      3 child2 2004-04-05 Seth  
#> 6      4 child1 2004-10-10 Craig 
#> 7      4 child2 2009-08-27 Khai  
#> 8      5 child1 2000-12-05 Parker
#> 9      5 child2 2005-02-28 Gracie

Note the use of values_drop_na = TRUE: the input shape forces the creation of explicit missing variables for observations that don’t exist.

A similar problem problem also exists in the anscombe dataset built in to base R:

anscombe
#>    x1 x2 x3 x4    y1   y2    y3    y4
#> 1  10 10 10  8  8.04 9.14  7.46  6.58
#> 2   8  8  8  8  6.95 8.14  6.77  5.76
#> 3  13 13 13  8  7.58 8.74 12.74  7.71
#> 4   9  9  9  8  8.81 8.77  7.11  8.84
#> 5  11 11 11  8  8.33 9.26  7.81  8.47
#> 6  14 14 14  8  9.96 8.10  8.84  7.04
#> 7   6  6  6  8  7.24 6.13  6.08  5.25
#> 8   4  4  4 19  4.26 3.10  5.39 12.50
#> 9  12 12 12  8 10.84 9.13  8.15  5.56
#> 10  7  7  7  8  4.82 7.26  6.42  7.91
#> 11  5  5  5  8  5.68 4.74  5.73  6.89

This dataset contains four pairs of variables (x1 and y1, x2 and y2, etc) that underlie Anscombe’s quartet, a collection of four datasets that have the same summary statistics (mean, sd, correlation etc), but have quite different data. We want to produce a dataset with columns set, x and y.

anscombe %>% 
  pivot_longer(
    cols = everything(), 
    cols_vary = "slowest",
    names_to = c(".value", "set"), 
    names_pattern = "(.)(.)"
  )
#> # A tibble: 44 × 3
#>    set       x     y
#>    <chr> <dbl> <dbl>
#>  1 1        10  8.04
#>  2 1         8  6.95
#>  3 1        13  7.58
#>  4 1         9  8.81
#>  5 1        11  8.33
#>  6 1        14  9.96
#>  7 1         6  7.24
#>  8 1         4  4.26
#>  9 1        12 10.8 
#> 10 1         7  4.82
#> # ℹ 34 more rows

Setting cols_vary to "slowest" groups the values from columns x1 and y1 together in the rows of the output before moving on to x2 and y2. This argument often produces more intuitively ordered output when you are pivoting every column in your dataset.

A similar situation can arise with panel data. For example, take this example dataset provided by Thomas Leeper. We can tidy it using the same approach as for anscombe:

pnl <- tibble(
  x = 1:4,
  a = c(1, 1,0, 0),
  b = c(0, 1, 1, 1),
  y1 = rnorm(4),
  y2 = rnorm(4),
  z1 = rep(3, 4),
  z2 = rep(-2, 4),
)

pnl %>% 
  pivot_longer(
    cols = !c(x, a, b), 
    names_to = c(".value", "time"), 
    names_pattern = "(.)(.)"
  )
#> # A tibble: 8 × 6
#>       x     a     b time       y     z
#>   <int> <dbl> <dbl> <chr>  <dbl> <dbl>
#> 1     1     1     0 1     -0.516     3
#> 2     1     1     0 2      2.48     -2
#> 3     2     1     1 1      0.240     3
#> 4     2     1     1 2      0.233    -2
#> 5     3     0     1 1     -1.33      3
#> 6     3     0     1 2     -0.986    -2
#> 7     4     0     1 1      0.401     3
#> 8     4     0     1 2     -0.965    -2

Wider

pivot_wider() is the opposite of pivot_longer(): it makes a dataset wider by increasing the number of columns and decreasing the number of rows. It’s relatively rare to need pivot_wider() to make tidy data, but it’s often useful for creating summary tables for presentation, or data in a format needed by other tools.

Capture-recapture data

The fish_encounters dataset, contributed by Myfanwy Johnston, describes when fish swimming down a river are detected by automatic monitoring stations:

fish_encounters
#> # A tibble: 114 × 3
#>    fish  station  seen
#>    <fct> <fct>   <int>
#>  1 4842  Release     1
#>  2 4842  I80_1       1
#>  3 4842  Lisbon      1
#>  4 4842  Rstr        1
#>  5 4842  Base_TD     1
#>  6 4842  BCE         1
#>  7 4842  BCW         1
#>  8 4842  BCE2        1
#>  9 4842  BCW2        1
#> 10 4842  MAE         1
#> # ℹ 104 more rows

Many tools used to analyse this data need it in a form where each station is a column:

fish_encounters %>% 
  pivot_wider(
    names_from = station, 
    values_from = seen
  )
#> # A tibble: 19 × 12
#>    fish  Release I80_1 Lisbon  Rstr Base_TD   BCE   BCW  BCE2  BCW2   MAE   MAW
#>    <fct>   <int> <int>  <int> <int>   <int> <int> <int> <int> <int> <int> <int>
#>  1 4842        1     1      1     1       1     1     1     1     1     1     1
#>  2 4843        1     1      1     1       1     1     1     1     1     1     1
#>  3 4844        1     1      1     1       1     1     1     1     1     1     1
#>  4 4845        1     1      1     1       1    NA    NA    NA    NA    NA    NA
#>  5 4847        1     1      1    NA      NA    NA    NA    NA    NA    NA    NA
#>  6 4848        1     1      1     1      NA    NA    NA    NA    NA    NA    NA
#>  7 4849        1     1     NA    NA      NA    NA    NA    NA    NA    NA    NA
#>  8 4850        1     1     NA     1       1     1     1    NA    NA    NA    NA
#>  9 4851        1     1     NA    NA      NA    NA    NA    NA    NA    NA    NA
#> 10 4854        1     1     NA    NA      NA    NA    NA    NA    NA    NA    NA
#> # ℹ 9 more rows

This dataset only records when a fish was detected by the station - it doesn’t record when it wasn’t detected (this is common with this type of data). That means the output data is filled with NAs. However, in this case we know that the absence of a record means that the fish was not seen, so we can ask pivot_wider() to fill these missing values in with zeros:

fish_encounters %>% 
  pivot_wider(
    names_from = station, 
    values_from = seen,
    values_fill = 0
  )
#> # A tibble: 19 × 12
#>    fish  Release I80_1 Lisbon  Rstr Base_TD   BCE   BCW  BCE2  BCW2   MAE   MAW
#>    <fct>   <int> <int>  <int> <int>   <int> <int> <int> <int> <int> <int> <int>
#>  1 4842        1     1      1     1       1     1     1     1     1     1     1
#>  2 4843        1     1      1     1       1     1     1     1     1     1     1
#>  3 4844        1     1      1     1       1     1     1     1     1     1     1
#>  4 4845        1     1      1     1       1     0     0     0     0     0     0
#>  5 4847        1     1      1     0       0     0     0     0     0     0     0
#>  6 4848        1     1      1     1       0     0     0     0     0     0     0
#>  7 4849        1     1      0     0       0     0     0     0     0     0     0
#>  8 4850        1     1      0     1       1     1     1     0     0     0     0
#>  9 4851        1     1      0     0       0     0     0     0     0     0     0
#> 10 4854        1     1      0     0       0     0     0     0     0     0     0
#> # ℹ 9 more rows

Aggregation

You can also use pivot_wider() to perform simple aggregation. For example, take the warpbreaks dataset built in to base R (converted to a tibble for the better print method):

warpbreaks <- warpbreaks %>% 
  as_tibble() %>% 
  select(wool, tension, breaks)
warpbreaks
#> # A tibble: 54 × 3
#>    wool  tension breaks
#>    <fct> <fct>    <dbl>
#>  1 A     L           26
#>  2 A     L           30
#>  3 A     L           54
#>  4 A     L           25
#>  5 A     L           70
#>  6 A     L           52
#>  7 A     L           51
#>  8 A     L           26
#>  9 A     L           67
#> 10 A     M           18
#> # ℹ 44 more rows

This is a designed experiment with nine replicates for every combination of wool (A and B) and tension (L, M, H):

warpbreaks %>% 
  count(wool, tension)
#> # A tibble: 6 × 3
#>   wool  tension     n
#>   <fct> <fct>   <int>
#> 1 A     L           9
#> 2 A     M           9
#> 3 A     H           9
#> 4 B     L           9
#> 5 B     M           9
#> 6 B     H           9

What happens if we attempt to pivot the levels of wool into the columns?

warpbreaks %>% 
  pivot_wider(
    names_from = wool, 
    values_from = breaks
  )
#> Warning: Values from `breaks` are not uniquely identified; output will contain
#> list-cols.
#> • Use `values_fn = list` to suppress this warning.
#> • Use `values_fn = {summary_fun}` to summarise duplicates.
#> • Use the following dplyr code to identify duplicates.
#>   {data} |>
#>   dplyr::summarise(n = dplyr::n(), .by = c(tension, wool)) |>
#>   dplyr::filter(n > 1L)
#> # A tibble: 3 × 3
#>   tension A         B        
#>   <fct>   <list>    <list>   
#> 1 L       <dbl [9]> <dbl [9]>
#> 2 M       <dbl [9]> <dbl [9]>
#> 3 H       <dbl [9]> <dbl [9]>

We get a warning that each cell in the output corresponds to multiple cells in the input. The default behaviour produces list-columns, which contain all the individual values. A more useful output would be summary statistics, e.g. mean breaks for each combination of wool and tension:

warpbreaks %>% 
  pivot_wider(
    names_from = wool, 
    values_from = breaks,
    values_fn = mean
  )
#> # A tibble: 3 × 3
#>   tension     A     B
#>   <fct>   <dbl> <dbl>
#> 1 L        44.6  28.2
#> 2 M        24    28.8
#> 3 H        24.6  18.8

For more complex summary operations, I recommend summarising before reshaping, but for simple cases it’s often convenient to summarise within pivot_wider().

Generate column name from multiple variables

Imagine, as in https://stackoverflow.com/questions/24929954, that we have information containing the combination of product, country, and year. In tidy form it might look like this:

production <- 
  expand_grid(
    product = c("A", "B"), 
    country = c("AI", "EI"), 
    year = 2000:2014
  ) %>%
  filter((product == "A" & country == "AI") | product == "B") %>% 
  mutate(production = rnorm(nrow(.)))
production
#> # A tibble: 45 × 4
#>    product country  year production
#>    <chr>   <chr>   <int>      <dbl>
#>  1 A       AI       2000     0.722 
#>  2 A       AI       2001     2.79  
#>  3 A       AI       2002     0.0848
#>  4 A       AI       2003     0.351 
#>  5 A       AI       2004     1.12  
#>  6 A       AI       2005    -2.26  
#>  7 A       AI       2006     0.566 
#>  8 A       AI       2007    -0.451 
#>  9 A       AI       2008    -0.0190
#> 10 A       AI       2009    -1.69  
#> # ℹ 35 more rows

We want to widen the data so we have one column for each combination of product and country. The key is to specify multiple variables for names_from:

production %>% 
  pivot_wider(
    names_from = c(product, country), 
    values_from = production
  )
#> # A tibble: 15 × 4
#>     year    A_AI    B_AI    B_EI
#>    <int>   <dbl>   <dbl>   <dbl>
#>  1  2000  0.722   0.410  -0.270 
#>  2  2001  2.79   -0.402   1.17  
#>  3  2002  0.0848  0.789  -0.399 
#>  4  2003  0.351   0.164  -0.0338
#>  5  2004  1.12    0.344  -1.01  
#>  6  2005 -2.26   -1.70    0.692 
#>  7  2006  0.566  -0.661  -1.05  
#>  8  2007 -0.451   1.38    0.221 
#>  9  2008 -0.0190  0.456  -0.608 
#> 10  2009 -1.69    0.0122  0.771 
#> # ℹ 5 more rows

When either names_from or values_from select multiple variables, you can control how the column names in the output constructed with names_sep and names_prefix, or the workhorse names_glue:

production %>% 
  pivot_wider(
    names_from = c(product, country), 
    values_from = production,
    names_sep = ".",
    names_prefix = "prod."
  )
#> # A tibble: 15 × 4
#>     year prod.A.AI prod.B.AI prod.B.EI
#>    <int>     <dbl>     <dbl>     <dbl>
#>  1  2000    0.722     0.410    -0.270 
#>  2  2001    2.79     -0.402     1.17  
#>  3  2002    0.0848    0.789    -0.399 
#>  4  2003    0.351     0.164    -0.0338
#>  5  2004    1.12      0.344    -1.01  
#>  6  2005   -2.26     -1.70      0.692 
#>  7  2006    0.566    -0.661    -1.05  
#>  8  2007   -0.451     1.38      0.221 
#>  9  2008   -0.0190    0.456    -0.608 
#> 10  2009   -1.69      0.0122    0.771 
#> # ℹ 5 more rows

production %>% 
  pivot_wider(
    names_from = c(product, country), 
    values_from = production,
    names_glue = "prod_{product}_{country}"
  )
#> # A tibble: 15 × 4
#>     year prod_A_AI prod_B_AI prod_B_EI
#>    <int>     <dbl>     <dbl>     <dbl>
#>  1  2000    0.722     0.410    -0.270 
#>  2  2001    2.79     -0.402     1.17  
#>  3  2002    0.0848    0.789    -0.399 
#>  4  2003    0.351     0.164    -0.0338
#>  5  2004    1.12      0.344    -1.01  
#>  6  2005   -2.26     -1.70      0.692 
#>  7  2006    0.566    -0.661    -1.05  
#>  8  2007   -0.451     1.38      0.221 
#>  9  2008   -0.0190    0.456    -0.608 
#> 10  2009   -1.69      0.0122    0.771 
#> # ℹ 5 more rows

Tidy census

The us_rent_income dataset contains information about median income and rent for each state in the US for 2017 (from the American Community Survey, retrieved with the tidycensus package).

us_rent_income
#> # A tibble: 104 × 5
#>    GEOID NAME       variable estimate   moe
#>    <chr> <chr>      <chr>       <dbl> <dbl>
#>  1 01    Alabama    income      24476   136
#>  2 01    Alabama    rent          747     3
#>  3 02    Alaska     income      32940   508
#>  4 02    Alaska     rent         1200    13
#>  5 04    Arizona    income      27517   148
#>  6 04    Arizona    rent          972     4
#>  7 05    Arkansas   income      23789   165
#>  8 05    Arkansas   rent          709     5
#>  9 06    California income      29454   109
#> 10 06    California rent         1358     3
#> # ℹ 94 more rows

Here both estimate and moe are values columns, so we can supply them to values_from:

us_rent_income %>% 
  pivot_wider(
    names_from = variable, 
    values_from = c(estimate, moe)
  )
#> # A tibble: 52 × 6
#>    GEOID NAME                 estimate_income estimate_rent moe_income moe_rent
#>    <chr> <chr>                          <dbl>         <dbl>      <dbl>    <dbl>
#>  1 01    Alabama                        24476           747        136        3
#>  2 02    Alaska                         32940          1200        508       13
#>  3 04    Arizona                        27517           972        148        4
#>  4 05    Arkansas                       23789           709        165        5
#>  5 06    California                     29454          1358        109        3
#>  6 08    Colorado                       32401          1125        109        5
#>  7 09    Connecticut                    35326          1123        195        5
#>  8 10    Delaware                       31560          1076        247       10
#>  9 11    District of Columbia           43198          1424        681       17
#> 10 12    Florida                        25952          1077         70        3
#> # ℹ 42 more rows

Note that the name of the variable is automatically appended to the output columns.

Implicit missing values

Occasionally, you’ll come across data where your names variable is encoded as a factor, but not all of the data will be represented.

weekdays <- c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")

daily <- tibble(
  day = factor(c("Tue", "Thu", "Fri", "Mon"), levels = weekdays),
  value = c(2, 3, 1, 5)
)

daily
#> # A tibble: 4 × 2
#>   day   value
#>   <fct> <dbl>
#> 1 Tue       2
#> 2 Thu       3
#> 3 Fri       1
#> 4 Mon       5

pivot_wider() defaults to generating columns from the values that are actually represented in the data, but you might want to include a column for each possible level in case the data changes in the future.

daily %>%
  pivot_wider(
    names_from = day, 
    values_from = value
  )
#> # A tibble: 1 × 4
#>     Tue   Thu   Fri   Mon
#>   <dbl> <dbl> <dbl> <dbl>
#> 1     2     3     1     5

The names_expand argument will turn implicit factor levels into explicit ones, forcing them to be represented in the result. It also sorts the column names using the level order, which produces more intuitive results in this case.

daily %>% 
  pivot_wider(
    names_from = day, 
    values_from = value, 
    names_expand = TRUE
  )
#> # A tibble: 1 × 7
#>     Mon   Tue   Wed   Thu   Fri   Sat   Sun
#>   <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#> 1     5     2    NA     3     1    NA    NA

If multiple names_from columns are provided, names_expand will generate a Cartesian product of all possible combinations of the names_from values. Notice that the following data has omitted some rows where the percentage value would be 0. names_expand allows us to make those explicit during the pivot.

percentages <- tibble(
  year = c(2018, 2019, 2020, 2020),
  type = factor(c("A", "B", "A", "B"), levels = c("A", "B")),
  percentage = c(100, 100, 40, 60)
)

percentages
#> # A tibble: 4 × 3
#>    year type  percentage
#>   <dbl> <fct>      <dbl>
#> 1  2018 A            100
#> 2  2019 B            100
#> 3  2020 A             40
#> 4  2020 B             60

percentages %>% 
  pivot_wider(
    names_from = c(year, type),
    values_from = percentage,
    names_expand = TRUE,
    values_fill = 0
  )
#> # A tibble: 1 × 6
#>   `2018_A` `2018_B` `2019_A` `2019_B` `2020_A` `2020_B`
#>      <dbl>    <dbl>    <dbl>    <dbl>    <dbl>    <dbl>
#> 1      100        0        0      100       40       60

A related problem can occur when there are implicit missing factor levels or combinations in the id_cols. In this case, there are missing rows (rather than columns) that you’d like to explicitly represent. For this example, we’ll modify our daily data with a type column, and pivot on that instead, keeping day as an id column.

daily <- mutate(daily, type = factor(c("A", "B", "B", "A")))
daily
#> # A tibble: 4 × 3
#>   day   value type 
#>   <fct> <dbl> <fct>
#> 1 Tue       2 A    
#> 2 Thu       3 B    
#> 3 Fri       1 B    
#> 4 Mon       5 A

All of our type levels are represented in the columns, but we are missing some rows related to the unrepresented day factor levels.

daily %>%
  pivot_wider(
    names_from = type, 
    values_from = value,
    values_fill = 0
  )
#> # A tibble: 4 × 3
#>   day       A     B
#>   <fct> <dbl> <dbl>
#> 1 Tue       2     0
#> 2 Thu       0     3
#> 3 Fri       0     1
#> 4 Mon       5     0

We can use id_expand in the same way that we used names_expand, which will expand out (and sort) the implicit missing rows in the id_cols.

daily %>% 
  pivot_wider(
    names_from = type, 
    values_from = value,
    values_fill = 0,
    id_expand = TRUE
  )
#> # A tibble: 7 × 3
#>   day       A     B
#>   <fct> <dbl> <dbl>
#> 1 Mon       5     0
#> 2 Tue       2     0
#> 3 Wed       0     0
#> 4 Thu       0     3
#> 5 Fri       0     1
#> 6 Sat       0     0
#> 7 Sun       0     0

Unused columns

Imagine you’ve found yourself in a situation where you have columns in your data that are completely unrelated to the pivoting process, but you’d still like to retain their information somehow. For example, in updates we’d like to pivot on the system column to create one row summaries of each county’s system updates.

updates <- tibble(
  county = c("Wake", "Wake", "Wake", "Guilford", "Guilford"),
  date = c(as.Date("2020-01-01") + 0:2, as.Date("2020-01-03") + 0:1),
  system = c("A", "B", "C", "A", "C"),
  value = c(3.2, 4, 5.5, 2, 1.2)
)

updates
#> # A tibble: 5 × 4
#>   county   date       system value
#>   <chr>    <date>     <chr>  <dbl>
#> 1 Wake     2020-01-01 A        3.2
#> 2 Wake     2020-01-02 B        4  
#> 3 Wake     2020-01-03 C        5.5
#> 4 Guilford 2020-01-03 A        2  
#> 5 Guilford 2020-01-04 C        1.2

We could do that with a typical pivot_wider() call, but we completely lose all information about the date column.

updates %>% 
  pivot_wider(
    id_cols = county, 
    names_from = system, 
    values_from = value
  )
#> # A tibble: 2 × 4
#>   county       A     B     C
#>   <chr>    <dbl> <dbl> <dbl>
#> 1 Wake       3.2     4   5.5
#> 2 Guilford   2      NA   1.2

For this example, we’d like to retain the most recent update date across all systems in a particular county. To accomplish that we can use the unused_fn argument, which allows us to summarize values from the columns not utilized in the pivoting process.

updates %>% 
  pivot_wider(
    id_cols = county, 
    names_from = system, 
    values_from = value,
    unused_fn = list(date = max)
  )
#> # A tibble: 2 × 5
#>   county       A     B     C date      
#>   <chr>    <dbl> <dbl> <dbl> <date>    
#> 1 Wake       3.2     4   5.5 2020-01-03
#> 2 Guilford   2      NA   1.2 2020-01-04

You can also retain the data but delay the aggregation entirely by using list() as the summary function.

updates %>% 
  pivot_wider(
    id_cols = county, 
    names_from = system, 
    values_from = value,
    unused_fn = list(date = list)
  )
#> # A tibble: 2 × 5
#>   county       A     B     C date      
#>   <chr>    <dbl> <dbl> <dbl> <list>    
#> 1 Wake       3.2     4   5.5 <date [3]>
#> 2 Guilford   2      NA   1.2 <date [2]>

Contact list

A final challenge is inspired by Jiena Gu. Imagine you have a contact list that you’ve copied and pasted from a website:

contacts <- tribble(
  ~field, ~value,
  "name", "Jiena McLellan",
  "company", "Toyota", 
  "name", "John Smith", 
  "company", "google", 
  "email", "john@google.com",
  "name", "Huxley Ratcliffe"
)

This is challenging because there’s no variable that identifies which observations belong together. We can fix this by noting that every contact starts with a name, so we can create a unique id by counting every time we see “name” as the field:

contacts <- contacts %>% 
  mutate(
    person_id = cumsum(field == "name")
  )
contacts
#> # A tibble: 6 × 3
#>   field   value            person_id
#>   <chr>   <chr>                <int>
#> 1 name    Jiena McLellan           1
#> 2 company Toyota                   1
#> 3 name    John Smith               2
#> 4 company google                   2
#> 5 email   john@google.com          2
#> 6 name    Huxley Ratcliffe         3

Now that we have a unique identifier for each person, we can pivot field and value into the columns:

contacts %>% 
  pivot_wider(
    names_from = field, 
    values_from = value
  )
#> # A tibble: 3 × 4
#>   person_id name             company email          
#>       <int> <chr>            <chr>   <chr>          
#> 1         1 Jiena McLellan   Toyota  <NA>           
#> 2         2 John Smith       google  john@google.com
#> 3         3 Huxley Ratcliffe <NA>    <NA>

Longer, then wider

Some problems can’t be solved by pivoting in a single direction. The examples in this section show how you might combine pivot_longer() and pivot_wider() to solve more complex problems.

World bank

world_bank_pop contains data from the World Bank about population per country from 2000 to 2018.

world_bank_pop
#> # A tibble: 1,064 × 20
#>    country indicator      `2000`  `2001`  `2002`  `2003`  `2004`  `2005`  `2006`
#>    <chr>   <chr>           <dbl>   <dbl>   <dbl>   <dbl>   <dbl>   <dbl>   <dbl>
#>  1 ABW     SP.URB.TOTL    4.16e4 4.20e+4 4.22e+4 4.23e+4 4.23e+4 4.24e+4 4.26e+4
#>  2 ABW     SP.URB.GROW    1.66e0 9.56e-1 4.01e-1 1.97e-1 9.46e-2 1.94e-1 3.67e-1
#>  3 ABW     SP.POP.TOTL    8.91e4 9.07e+4 9.18e+4 9.27e+4 9.35e+4 9.45e+4 9.56e+4
#>  4 ABW     SP.POP.GROW    2.54e0 1.77e+0 1.19e+0 9.97e-1 9.01e-1 1.00e+0 1.18e+0
#>  5 AFE     SP.URB.TOTL    1.16e8 1.20e+8 1.24e+8 1.29e+8 1.34e+8 1.39e+8 1.44e+8
#>  6 AFE     SP.URB.GROW    3.60e0 3.66e+0 3.72e+0 3.71e+0 3.74e+0 3.81e+0 3.81e+0
#>  7 AFE     SP.POP.TOTL    4.02e8 4.12e+8 4.23e+8 4.34e+8 4.45e+8 4.57e+8 4.70e+8
#>  8 AFE     SP.POP.GROW    2.58e0 2.59e+0 2.61e+0 2.62e+0 2.64e+0 2.67e+0 2.70e+0
#>  9 AFG     SP.URB.TOTL    4.31e6 4.36e+6 4.67e+6 5.06e+6 5.30e+6 5.54e+6 5.83e+6
#> 10 AFG     SP.URB.GROW    1.86e0 1.15e+0 6.86e+0 7.95e+0 4.59e+0 4.47e+0 5.03e+0
#> # ℹ 1,054 more rows
#> # ℹ 11 more variables: `2007` <dbl>, `2008` <dbl>, `2009` <dbl>, `2010` <dbl>,
#> #   `2011` <dbl>, `2012` <dbl>, `2013` <dbl>, `2014` <dbl>, `2015` <dbl>,
#> #   `2016` <dbl>, `2017` <dbl>

My goal is to produce a tidy dataset where each variable is in a column. It’s not obvious exactly what steps are needed yet, but I’ll start with the most obvious problem: year is spread across multiple columns.

pop2 <- world_bank_pop %>% 
  pivot_longer(
    cols = `2000`:`2017`, 
    names_to = "year", 
    values_to = "value"
  )
pop2
#> # A tibble: 19,152 × 4
#>    country indicator   year  value
#>    <chr>   <chr>       <chr> <dbl>
#>  1 ABW     SP.URB.TOTL 2000  41625
#>  2 ABW     SP.URB.TOTL 2001  42025
#>  3 ABW     SP.URB.TOTL 2002  42194
#>  4 ABW     SP.URB.TOTL 2003  42277
#>  5 ABW     SP.URB.TOTL 2004  42317
#>  6 ABW     SP.URB.TOTL 2005  42399
#>  7 ABW     SP.URB.TOTL 2006  42555
#>  8 ABW     SP.URB.TOTL 2007  42729
#>  9 ABW     SP.URB.TOTL 2008  42906
#> 10 ABW     SP.URB.TOTL 2009  43079
#> # ℹ 19,142 more rows

Next we need to consider the indicator variable:

pop2 %>% 
  count(indicator)
#> # A tibble: 4 × 2
#>   indicator       n
#>   <chr>       <int>
#> 1 SP.POP.GROW  4788
#> 2 SP.POP.TOTL  4788
#> 3 SP.URB.GROW  4788
#> 4 SP.URB.TOTL  4788

Here SP.POP.GROW is population growth, SP.POP.TOTL is total population, and SP.URB.* are the same but only for urban areas. Let’s split this up into two variables: area (total or urban) and the actual variable (population or growth):

pop3 <- pop2 %>% 
  separate(indicator, c(NA, "area", "variable"))
pop3
#> # A tibble: 19,152 × 5
#>    country area  variable year  value
#>    <chr>   <chr> <chr>    <chr> <dbl>
#>  1 ABW     URB   TOTL     2000  41625
#>  2 ABW     URB   TOTL     2001  42025
#>  3 ABW     URB   TOTL     2002  42194
#>  4 ABW     URB   TOTL     2003  42277
#>  5 ABW     URB   TOTL     2004  42317
#>  6 ABW     URB   TOTL     2005  42399
#>  7 ABW     URB   TOTL     2006  42555
#>  8 ABW     URB   TOTL     2007  42729
#>  9 ABW     URB   TOTL     2008  42906
#> 10 ABW     URB   TOTL     2009  43079
#> # ℹ 19,142 more rows

Now we can complete the tidying by pivoting variable and value to make TOTL and GROW columns:

pop3 %>% 
  pivot_wider(
    names_from = variable, 
    values_from = value
  )
#> # A tibble: 9,576 × 5
#>    country area  year   TOTL   GROW
#>    <chr>   <chr> <chr> <dbl>  <dbl>
#>  1 ABW     URB   2000  41625 1.66  
#>  2 ABW     URB   2001  42025 0.956 
#>  3 ABW     URB   2002  42194 0.401 
#>  4 ABW     URB   2003  42277 0.197 
#>  5 ABW     URB   2004  42317 0.0946
#>  6 ABW     URB   2005  42399 0.194 
#>  7 ABW     URB   2006  42555 0.367 
#>  8 ABW     URB   2007  42729 0.408 
#>  9 ABW     URB   2008  42906 0.413 
#> 10 ABW     URB   2009  43079 0.402 
#> # ℹ 9,566 more rows

Multi-choice

Based on a suggestion by Maxime Wack, https://github.com/tidyverse/tidyr/issues/384), the final example shows how to deal with a common way of recording multiple choice data. Often you will get such data as follows:

multi <- tribble(
  ~id, ~choice1, ~choice2, ~choice3,
  1, "A", "B", "C",
  2, "C", "B",  NA,
  3, "D",  NA,  NA,
  4, "B", "D",  NA
)

But the actual order isn’t important, and you’d prefer to have the individual questions in the columns. You can achieve the desired transformation in two steps. First, you make the data longer, eliminating the explicit NAs, and adding a column to indicate that this choice was chosen:

multi2 <- multi %>% 
  pivot_longer(
    cols = !id, 
    values_drop_na = TRUE
  ) %>% 
  mutate(checked = TRUE)
multi2
#> # A tibble: 8 × 4
#>      id name    value checked
#>   <dbl> <chr>   <chr> <lgl>  
#> 1     1 choice1 A     TRUE   
#> 2     1 choice2 B     TRUE   
#> 3     1 choice3 C     TRUE   
#> 4     2 choice1 C     TRUE   
#> 5     2 choice2 B     TRUE   
#> 6     3 choice1 D     TRUE   
#> 7     4 choice1 B     TRUE   
#> 8     4 choice2 D     TRUE

Then you make the data wider, filling in the missing observations with FALSE:

multi2 %>% 
  pivot_wider(
    id_cols = id,
    names_from = value, 
    values_from = checked, 
    values_fill = FALSE
  )
#> # A tibble: 4 × 5
#>      id A     B     C     D    
#>   <dbl> <lgl> <lgl> <lgl> <lgl>
#> 1     1 TRUE  TRUE  TRUE  FALSE
#> 2     2 FALSE TRUE  TRUE  FALSE
#> 3     3 FALSE FALSE FALSE TRUE 
#> 4     4 FALSE TRUE  FALSE TRUE

Manual specs

The arguments to pivot_longer() and pivot_wider() allow you to pivot a wide range of datasets. But the creativity that people apply to their data structures is seemingly endless, so it’s quite possible that you will encounter a dataset that you can’t immediately see how to reshape with pivot_longer() and pivot_wider(). To gain more control over pivoting, you can instead create a “spec” data frame that describes exactly how data stored in the column names becomes variables (and vice versa). This section introduces you to the spec data structure, and show you how to use it when pivot_longer() and pivot_wider() are insufficient.

Longer

To see how this works, lets return to the simplest case of pivoting applied to the relig_income dataset. Now pivoting happens in two steps: we first create a spec object (using build_longer_spec()) then use that to describe the pivoting operation:

spec <- relig_income %>% 
  build_longer_spec(
    cols = !religion, 
    names_to = "income",
    values_to = "count"
  )
pivot_longer_spec(relig_income, spec)
#> # A tibble: 180 × 3
#>    religion income             count
#>    <chr>    <chr>              <dbl>
#>  1 Agnostic <$10k                 27
#>  2 Agnostic $10-20k               34
#>  3 Agnostic $20-30k               60
#>  4 Agnostic $30-40k               81
#>  5 Agnostic $40-50k               76
#>  6 Agnostic $50-75k              137
#>  7 Agnostic $75-100k             122
#>  8 Agnostic $100-150k            109
#>  9 Agnostic >150k                 84
#> 10 Agnostic Don't know/refused    96
#> # ℹ 170 more rows

(This gives the same result as before, just with more code. There’s no need to use it here, it is presented as a simple example for using spec.)

What does spec look like? It’s a data frame with one row for each column in the wide format version of the data that is not present in the long format, and two special columns that start with .:

  • .name gives the name of the column.
  • .value gives the name of the column that the values in the cells will go into.

There is also one column in spec for each column present in the long format of the data that is not present in the wide format of the data. This corresponds to the names_to argument in pivot_longer() and build_longer_spec() and the names_from argument in pivot_wider() and build_wider_spec(). In this example, the income column is a character vector of the names of columns being pivoted.

spec
#> # A tibble: 10 × 3
#>    .name              .value income            
#>    <chr>              <chr>  <chr>             
#>  1 <$10k              count  <$10k             
#>  2 $10-20k            count  $10-20k           
#>  3 $20-30k            count  $20-30k           
#>  4 $30-40k            count  $30-40k           
#>  5 $40-50k            count  $40-50k           
#>  6 $50-75k            count  $50-75k           
#>  7 $75-100k           count  $75-100k          
#>  8 $100-150k          count  $100-150k         
#>  9 >150k              count  >150k             
#> 10 Don't know/refused count  Don't know/refused

Wider

Below we widen us_rent_income with pivot_wider(). The result is ok, but I think it could be improved:

us_rent_income %>% 
  pivot_wider(
    names_from = variable, 
    values_from = c(estimate, moe)
  )
#> # A tibble: 52 × 6
#>    GEOID NAME                 estimate_income estimate_rent moe_income moe_rent
#>    <chr> <chr>                          <dbl>         <dbl>      <dbl>    <dbl>
#>  1 01    Alabama                        24476           747        136        3
#>  2 02    Alaska                         32940          1200        508       13
#>  3 04    Arizona                        27517           972        148        4
#>  4 05    Arkansas                       23789           709        165        5
#>  5 06    California                     29454          1358        109        3
#>  6 08    Colorado                       32401          1125        109        5
#>  7 09    Connecticut                    35326          1123        195        5
#>  8 10    Delaware                       31560          1076        247       10
#>  9 11    District of Columbia           43198          1424        681       17
#> 10 12    Florida                        25952          1077         70        3
#> # ℹ 42 more rows

I think it would be better to have columns income, rent, income_moe, and rent_moe, which we can achieve with a manual spec. The current spec looks like this:

spec1 <- us_rent_income %>% 
  build_wider_spec(
    names_from = variable, 
    values_from = c(estimate, moe)
  )
spec1
#> # A tibble: 4 × 3
#>   .name           .value   variable
#>   <chr>           <chr>    <chr>   
#> 1 estimate_income estimate income  
#> 2 estimate_rent   estimate rent    
#> 3 moe_income      moe      income  
#> 4 moe_rent        moe      rent

For this case, we mutate spec to carefully construct the column names:

spec2 <- spec1 %>%
  mutate(
    .name = paste0(variable, ifelse(.value == "moe", "_moe", ""))
  )
spec2
#> # A tibble: 4 × 3
#>   .name      .value   variable
#>   <chr>      <chr>    <chr>   
#> 1 income     estimate income  
#> 2 rent       estimate rent    
#> 3 income_moe moe      income  
#> 4 rent_moe   moe      rent

Supplying this spec to pivot_wider() gives us the result we’re looking for:

us_rent_income %>% 
  pivot_wider_spec(spec2)
#> # A tibble: 52 × 6
#>    GEOID NAME                 income  rent income_moe rent_moe
#>    <chr> <chr>                 <dbl> <dbl>      <dbl>    <dbl>
#>  1 01    Alabama               24476   747        136        3
#>  2 02    Alaska                32940  1200        508       13
#>  3 04    Arizona               27517   972        148        4
#>  4 05    Arkansas              23789   709        165        5
#>  5 06    California            29454  1358        109        3
#>  6 08    Colorado              32401  1125        109        5
#>  7 09    Connecticut           35326  1123        195        5
#>  8 10    Delaware              31560  1076        247       10
#>  9 11    District of Columbia  43198  1424        681       17
#> 10 12    Florida               25952  1077         70        3
#> # ℹ 42 more rows

By hand

Sometimes it’s not possible (or not convenient) to compute the spec, and instead it’s more convenient to construct the spec “by hand”. For example, take this construction data, which is lightly modified from Table 5 “completions” found at https://www.census.gov/construction/nrc/index.html:

construction
#> # A tibble: 9 × 9
#>    Year Month  `1 unit` `2 to 4 units` `5 units or more` Northeast Midwest South
#>   <dbl> <chr>     <dbl> <lgl>                      <dbl>     <dbl>   <dbl> <dbl>
#> 1  2018 Janua…      859 NA                           348       114     169   596
#> 2  2018 Febru…      882 NA                           400       138     160   655
#> 3  2018 March       862 NA                           356       150     154   595
#> 4  2018 April       797 NA                           447       144     196   613
#> 5  2018 May         875 NA                           364        90     169   673
#> 6  2018 June        867 NA                           342        76     170   610
#> 7  2018 July        829 NA                           360       108     183   594
#> 8  2018 August      939 NA                           286        90     205   649
#> 9  2018 Septe…      835 NA                           304       117     175   560
#> # ℹ 1 more variable: West <dbl>

This sort of data is not uncommon from government agencies: the column names actually belong to different variables, and here we have summaries for number of units (1, 2-4, 5+) and regions of the country (NE, NW, midwest, S, W). We can most easily describe that with a tibble:

spec <- tribble(
  ~.name,            ~.value, ~units,  ~region,     
  "1 unit",          "n",     "1",     NA,          
  "2 to 4 units",    "n",     "2-4",   NA,          
  "5 units or more", "n",     "5+",    NA,          
  "Northeast",       "n",     NA,      "Northeast", 
  "Midwest",         "n",     NA,      "Midwest",   
  "South",           "n",     NA,      "South",     
  "West",            "n",     NA,      "West",      
)

Which yields the following longer form:

construction %>% pivot_longer_spec(spec)
#> # A tibble: 63 × 5
#>     Year Month    units region        n
#>    <dbl> <chr>    <chr> <chr>     <dbl>
#>  1  2018 January  1     <NA>        859
#>  2  2018 January  2-4   <NA>         NA
#>  3  2018 January  5+    <NA>        348
#>  4  2018 January  <NA>  Northeast   114
#>  5  2018 January  <NA>  Midwest     169
#>  6  2018 January  <NA>  South       596
#>  7  2018 January  <NA>  West        339
#>  8  2018 February 1     <NA>        882
#>  9  2018 February 2-4   <NA>         NA
#> 10  2018 February 5+    <NA>        400
#> # ℹ 53 more rows

Note that there is no overlap between the units and region variables; here the data would really be most naturally described in two independent tables.

Theory

One neat property of the spec is that you need the same spec for pivot_longer() and pivot_wider(). This makes it very clear that the two operations are symmetric:

construction %>% 
  pivot_longer_spec(spec) %>% 
  pivot_wider_spec(spec)
#> # A tibble: 9 × 9
#>    Year Month  `1 unit` `2 to 4 units` `5 units or more` Northeast Midwest South
#>   <dbl> <chr>     <dbl>          <dbl>             <dbl>     <dbl>   <dbl> <dbl>
#> 1  2018 Janua…      859             NA               348       114     169   596
#> 2  2018 Febru…      882             NA               400       138     160   655
#> 3  2018 March       862             NA               356       150     154   595
#> 4  2018 April       797             NA               447       144     196   613
#> 5  2018 May         875             NA               364        90     169   673
#> 6  2018 June        867             NA               342        76     170   610
#> 7  2018 July        829             NA               360       108     183   594
#> 8  2018 August      939             NA               286        90     205   649
#> 9  2018 Septe…      835             NA               304       117     175   560
#> # ℹ 1 more variable: West <dbl>

The pivoting spec allows us to be more precise about exactly how pivot_longer(df, spec = spec) changes the shape of df: it will have nrow(df) * nrow(spec) rows, and ncol(df) - nrow(spec) + ncol(spec) - 2 columns.

tidyr/inst/doc/pivot.Rmd0000644000176200001440000006441314553565751014764 0ustar liggesusers--- title: "Pivoting" output: rmarkdown::html_vignette description: Learn how use the new `pivot_longer()` and `pivot_wider()` functions which change the representation of a dataset without changing the data it contains. vignette: > %\VignetteIndexEntry{Pivoting} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(tibble.print_max = 10) ``` ## Introduction This vignette describes the use of the new `pivot_longer()` and `pivot_wider()` functions. Their goal is to improve the usability of `gather()` and `spread()`, and incorporate state-of-the-art features found in other packages. For some time, it's been obvious that there is something fundamentally wrong with the design of `spread()` and `gather()`. Many people don't find the names intuitive and find it hard to remember which direction corresponds to spreading and which to gathering. It also seems surprisingly hard to remember the arguments to these functions, meaning that many people (including me!) have to consult the documentation every time. There are two important new features inspired by other R packages that have been advancing reshaping in R: * `pivot_longer()` can work with multiple value variables that may have different types, inspired by the enhanced `melt()` and `dcast()` functions provided by the [data.table][data.table] package by Matt Dowle and Arun Srinivasan. * `pivot_longer()` and `pivot_wider()` can take a data frame that specifies precisely how metadata stored in column names becomes data variables (and vice versa), inspired by the [cdata][cdata] package by John Mount and Nina Zumel. In this vignette, you'll learn the key ideas behind `pivot_longer()` and `pivot_wider()` as you see them used to solve a variety of data reshaping challenges ranging from simple to complex. To begin we'll load some needed packages. In real analysis code, I'd imagine you'd do with the `library(tidyverse)`, but I can't do that here since this vignette is embedded in a package. ```{r setup, message = FALSE} library(tidyr) library(dplyr) library(readr) ``` ## Longer `pivot_longer()` makes datasets __longer__ by increasing the number of rows and decreasing the number of columns. I don't believe it makes sense to describe a dataset as being in "long form". Length is a relative term, and you can only say (e.g.) that dataset A is longer than dataset B. `pivot_longer()` is commonly needed to tidy wild-caught datasets as they often optimise for ease of data entry or ease of comparison rather than ease of analysis. The following sections show how to use `pivot_longer()` for a wide range of realistic datasets. ### String data in column names {#pew} The `relig_income` dataset stores counts based on a survey which (among other things) asked people about their religion and annual income: ```{r} relig_income ``` This dataset contains three variables: * `religion`, stored in the rows, * `income` spread across the column names, and * `count` stored in the cell values. To tidy it we use `pivot_longer()`: ```{r} relig_income %>% pivot_longer( cols = !religion, names_to = "income", values_to = "count" ) ``` * The first argument is the dataset to reshape, `relig_income`. * `cols` describes which columns need to be reshaped. In this case, it's every column apart from `religion`. * `names_to` gives the name of the variable that will be created from the data stored in the column names, i.e. `income`. * `values_to` gives the name of the variable that will be created from the data stored in the cell value, i.e. `count`. Neither the `names_to` nor the `values_to` column exists in `relig_income`, so we provide them as strings surrounded by quotes. ### Numeric data in column names {#billboard} The `billboard` dataset records the billboard rank of songs in the year 2000. It has a form similar to the `relig_income` data, but the data encoded in the column names is really a number, not a string. ```{r} billboard ``` We can start with the same basic specification as for the `relig_income` dataset. Here we want the names to become a variable called `week`, and the values to become a variable called `rank`. I also use `values_drop_na` to drop rows that correspond to missing values. Not every song stays in the charts for all 76 weeks, so the structure of the input data force the creation of unnecessary explicit `NA`s. ```{r} billboard %>% pivot_longer( cols = starts_with("wk"), names_to = "week", values_to = "rank", values_drop_na = TRUE ) ``` It would be nice to easily determine how long each song stayed in the charts, but to do that, we'll need to convert the `week` variable to an integer. We can do that by using two additional arguments: `names_prefix` strips off the `wk` prefix, and `names_transform` converts `week` into an integer: ```{r, eval = FALSE} billboard %>% pivot_longer( cols = starts_with("wk"), names_to = "week", names_prefix = "wk", names_transform = as.integer, values_to = "rank", values_drop_na = TRUE, ) ``` Alternatively, you could do this with a single argument by using `readr::parse_number()` which automatically strips non-numeric components: ```{r, eval = FALSE} billboard %>% pivot_longer( cols = starts_with("wk"), names_to = "week", names_transform = readr::parse_number, values_to = "rank", values_drop_na = TRUE, ) ``` ### Many variables in column names A more challenging situation occurs when you have multiple variables crammed into the column names. For example, take the `who` dataset: ```{r} who ``` `country`, `iso2`, `iso3`, and `year` are already variables, so they can be left as is. But the columns from `new_sp_m014` to `newrel_f65` encode four variables in their names: * The `new_`/`new` prefix indicates these are counts of new cases. This dataset only contains new cases, so we'll ignore it here because it's constant. * `sp`/`rel`/`ep` describe how the case was diagnosed. * `m`/`f` gives the gender. * `014`/`1524`/`2535`/`3544`/`4554`/`65` supplies the age range. We can break these variables up by specifying multiple column names in `names_to`, and then either providing `names_sep` or `names_pattern`. Here `names_pattern` is the most natural fit. It has a similar interface to `extract`: you give it a regular expression containing groups (defined by `()`) and it puts each group in a column. ```{r} who %>% pivot_longer( cols = new_sp_m014:newrel_f65, names_to = c("diagnosis", "gender", "age"), names_pattern = "new_?(.*)_(.)(.*)", values_to = "count" ) ``` We could go one step further use readr functions to convert the gender and age to factors. I think this is good practice when you have categorical variables with a known set of values. ```{r, eval = FALSE} who %>% pivot_longer( cols = new_sp_m014:newrel_f65, names_to = c("diagnosis", "gender", "age"), names_pattern = "new_?(.*)_(.)(.*)", names_transform = list( gender = ~ readr::parse_factor(.x, levels = c("f", "m")), age = ~ readr::parse_factor( .x, levels = c("014", "1524", "2534", "3544", "4554", "5564", "65"), ordered = TRUE ) ), values_to = "count", ) ``` Doing it this way is a little more efficient than doing a mutate after the fact, `pivot_longer()` only has to transform one occurence of each name where a `mutate()` would need to transform many repetitions. ### Multiple observations per row So far, we have been working with data frames that have one observation per row, but many important pivoting problems involve multiple observations per row. You can usually recognise this case because name of the column that you want to appear in the output is part of the column name in the input. In this section, you'll learn how to pivot this sort of data. The following example is adapted from the [data.table vignette](https://CRAN.R-project.org/package=data.table/vignettes/datatable-reshape.html), as inspiration for tidyr's solution to this problem. ```{r} household ``` Note that we have two pieces of information (or values) for each child: their `name` and their `dob` (date of birth). These need to go into separate columns in the result. Again we supply multiple variables to `names_to`, using `names_sep` to split up each variable name. Note the special name `.value`: this tells `pivot_longer()` that that part of the column name specifies the "value" being measured (which will become a variable in the output). ```{r} household %>% pivot_longer( cols = !family, names_to = c(".value", "child"), names_sep = "_", values_drop_na = TRUE ) ``` Note the use of `values_drop_na = TRUE`: the input shape forces the creation of explicit missing variables for observations that don't exist. A similar problem problem also exists in the `anscombe` dataset built in to base R: ```{r} anscombe ``` This dataset contains four pairs of variables (`x1` and `y1`, `x2` and `y2`, etc) that underlie Anscombe's quartet, a collection of four datasets that have the same summary statistics (mean, sd, correlation etc), but have quite different data. We want to produce a dataset with columns `set`, `x` and `y`. ```{r} anscombe %>% pivot_longer( cols = everything(), cols_vary = "slowest", names_to = c(".value", "set"), names_pattern = "(.)(.)" ) ``` Setting `cols_vary` to `"slowest"` groups the values from columns `x1` and `y1` together in the rows of the output before moving on to `x2` and `y2`. This argument often produces more intuitively ordered output when you are pivoting every column in your dataset. A similar situation can arise with panel data. For example, take this example dataset provided by [Thomas Leeper](https://github.com/gesistsa/rio/issues/193). We can tidy it using the same approach as for `anscombe`: ```{r} pnl <- tibble( x = 1:4, a = c(1, 1,0, 0), b = c(0, 1, 1, 1), y1 = rnorm(4), y2 = rnorm(4), z1 = rep(3, 4), z2 = rep(-2, 4), ) pnl %>% pivot_longer( cols = !c(x, a, b), names_to = c(".value", "time"), names_pattern = "(.)(.)" ) ``` ## Wider `pivot_wider()` is the opposite of `pivot_longer()`: it makes a dataset __wider__ by increasing the number of columns and decreasing the number of rows. It's relatively rare to need `pivot_wider()` to make tidy data, but it's often useful for creating summary tables for presentation, or data in a format needed by other tools. ### Capture-recapture data The `fish_encounters` dataset, contributed by [Myfanwy Johnston](https://fishsciences.github.io/post/visualizing-fish-encounter-histories/), describes when fish swimming down a river are detected by automatic monitoring stations: ```{r} fish_encounters ``` Many tools used to analyse this data need it in a form where each station is a column: ```{r} fish_encounters %>% pivot_wider( names_from = station, values_from = seen ) ``` This dataset only records when a fish was detected by the station - it doesn't record when it wasn't detected (this is common with this type of data). That means the output data is filled with `NA`s. However, in this case we know that the absence of a record means that the fish was not `seen`, so we can ask `pivot_wider()` to fill these missing values in with zeros: ```{r} fish_encounters %>% pivot_wider( names_from = station, values_from = seen, values_fill = 0 ) ``` ### Aggregation You can also use `pivot_wider()` to perform simple aggregation. For example, take the `warpbreaks` dataset built in to base R (converted to a tibble for the better print method): ```{r} warpbreaks <- warpbreaks %>% as_tibble() %>% select(wool, tension, breaks) warpbreaks ``` This is a designed experiment with nine replicates for every combination of `wool` (`A` and `B`) and `tension` (`L`, `M`, `H`): ```{r} warpbreaks %>% count(wool, tension) ``` What happens if we attempt to pivot the levels of `wool` into the columns? ```{r} warpbreaks %>% pivot_wider( names_from = wool, values_from = breaks ) ``` We get a warning that each cell in the output corresponds to multiple cells in the input. The default behaviour produces list-columns, which contain all the individual values. A more useful output would be summary statistics, e.g. `mean` breaks for each combination of wool and tension: ```{r} warpbreaks %>% pivot_wider( names_from = wool, values_from = breaks, values_fn = mean ) ``` For more complex summary operations, I recommend summarising before reshaping, but for simple cases it's often convenient to summarise within `pivot_wider()`. ### Generate column name from multiple variables Imagine, as in , that we have information containing the combination of product, country, and year. In tidy form it might look like this: ```{r} production <- expand_grid( product = c("A", "B"), country = c("AI", "EI"), year = 2000:2014 ) %>% filter((product == "A" & country == "AI") | product == "B") %>% mutate(production = rnorm(nrow(.))) production ``` We want to widen the data so we have one column for each combination of `product` and `country`. The key is to specify multiple variables for `names_from`: ```{r} production %>% pivot_wider( names_from = c(product, country), values_from = production ) ``` When either `names_from` or `values_from` select multiple variables, you can control how the column names in the output constructed with `names_sep` and `names_prefix`, or the workhorse `names_glue`: ```{r} production %>% pivot_wider( names_from = c(product, country), values_from = production, names_sep = ".", names_prefix = "prod." ) production %>% pivot_wider( names_from = c(product, country), values_from = production, names_glue = "prod_{product}_{country}" ) ``` ### Tidy census The `us_rent_income` dataset contains information about median income and rent for each state in the US for 2017 (from the American Community Survey, retrieved with the [tidycensus][tidycensus] package). ```{r} us_rent_income ``` Here both `estimate` and `moe` are values columns, so we can supply them to `values_from`: ```{r} us_rent_income %>% pivot_wider( names_from = variable, values_from = c(estimate, moe) ) ``` Note that the name of the variable is automatically appended to the output columns. ### Implicit missing values Occasionally, you'll come across data where your names variable is encoded as a factor, but not all of the data will be represented. ```{r} weekdays <- c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") daily <- tibble( day = factor(c("Tue", "Thu", "Fri", "Mon"), levels = weekdays), value = c(2, 3, 1, 5) ) daily ``` `pivot_wider()` defaults to generating columns from the values that are actually represented in the data, but you might want to include a column for each possible level in case the data changes in the future. ```{r} daily %>% pivot_wider( names_from = day, values_from = value ) ``` The `names_expand` argument will turn implicit factor levels into explicit ones, forcing them to be represented in the result. It also sorts the column names using the level order, which produces more intuitive results in this case. ```{r} daily %>% pivot_wider( names_from = day, values_from = value, names_expand = TRUE ) ``` If multiple `names_from` columns are provided, `names_expand` will generate a Cartesian product of all possible combinations of the `names_from` values. Notice that the following data has omitted some rows where the percentage value would be `0`. `names_expand` allows us to make those explicit during the pivot. ```{r} percentages <- tibble( year = c(2018, 2019, 2020, 2020), type = factor(c("A", "B", "A", "B"), levels = c("A", "B")), percentage = c(100, 100, 40, 60) ) percentages percentages %>% pivot_wider( names_from = c(year, type), values_from = percentage, names_expand = TRUE, values_fill = 0 ) ``` A related problem can occur when there are implicit missing factor levels or combinations in the `id_cols`. In this case, there are missing rows (rather than columns) that you'd like to explicitly represent. For this example, we'll modify our `daily` data with a `type` column, and pivot on that instead, keeping `day` as an id column. ```{r} daily <- mutate(daily, type = factor(c("A", "B", "B", "A"))) daily ``` All of our `type` levels are represented in the columns, but we are missing some rows related to the unrepresented `day` factor levels. ```{r} daily %>% pivot_wider( names_from = type, values_from = value, values_fill = 0 ) ``` We can use `id_expand` in the same way that we used `names_expand`, which will expand out (and sort) the implicit missing rows in the `id_cols`. ```{r} daily %>% pivot_wider( names_from = type, values_from = value, values_fill = 0, id_expand = TRUE ) ``` ### Unused columns Imagine you've found yourself in a situation where you have columns in your data that are completely unrelated to the pivoting process, but you'd still like to retain their information somehow. For example, in `updates` we'd like to pivot on the `system` column to create one row summaries of each county's system updates. ```{r} updates <- tibble( county = c("Wake", "Wake", "Wake", "Guilford", "Guilford"), date = c(as.Date("2020-01-01") + 0:2, as.Date("2020-01-03") + 0:1), system = c("A", "B", "C", "A", "C"), value = c(3.2, 4, 5.5, 2, 1.2) ) updates ``` We could do that with a typical `pivot_wider()` call, but we completely lose all information about the `date` column. ```{r} updates %>% pivot_wider( id_cols = county, names_from = system, values_from = value ) ``` For this example, we'd like to retain the most recent update date across all systems in a particular county. To accomplish that we can use the `unused_fn` argument, which allows us to summarize values from the columns not utilized in the pivoting process. ```{r} updates %>% pivot_wider( id_cols = county, names_from = system, values_from = value, unused_fn = list(date = max) ) ``` You can also retain the data but delay the aggregation entirely by using `list()` as the summary function. ```{r} updates %>% pivot_wider( id_cols = county, names_from = system, values_from = value, unused_fn = list(date = list) ) ``` ### Contact list A final challenge is inspired by [Jiena Gu](https://github.com/jienagu/tidyverse_examples/blob/master/example_long_wide.R). Imagine you have a contact list that you've copied and pasted from a website: ```{r} contacts <- tribble( ~field, ~value, "name", "Jiena McLellan", "company", "Toyota", "name", "John Smith", "company", "google", "email", "john@google.com", "name", "Huxley Ratcliffe" ) ``` This is challenging because there's no variable that identifies which observations belong together. We can fix this by noting that every contact starts with a name, so we can create a unique id by counting every time we see "name" as the `field`: ```{r} contacts <- contacts %>% mutate( person_id = cumsum(field == "name") ) contacts ``` Now that we have a unique identifier for each person, we can pivot `field` and `value` into the columns: ```{r} contacts %>% pivot_wider( names_from = field, values_from = value ) ``` ## Longer, then wider Some problems can't be solved by pivoting in a single direction. The examples in this section show how you might combine `pivot_longer()` and `pivot_wider()` to solve more complex problems. ### World bank `world_bank_pop` contains data from the World Bank about population per country from 2000 to 2018. ```{r} world_bank_pop ``` My goal is to produce a tidy dataset where each variable is in a column. It's not obvious exactly what steps are needed yet, but I'll start with the most obvious problem: year is spread across multiple columns. ```{r} pop2 <- world_bank_pop %>% pivot_longer( cols = `2000`:`2017`, names_to = "year", values_to = "value" ) pop2 ``` Next we need to consider the `indicator` variable: ```{r} pop2 %>% count(indicator) ``` Here `SP.POP.GROW` is population growth, `SP.POP.TOTL` is total population, and `SP.URB.*` are the same but only for urban areas. Let's split this up into two variables: `area` (total or urban) and the actual variable (population or growth): ```{r} pop3 <- pop2 %>% separate(indicator, c(NA, "area", "variable")) pop3 ``` Now we can complete the tidying by pivoting `variable` and `value` to make `TOTL` and `GROW` columns: ```{r} pop3 %>% pivot_wider( names_from = variable, values_from = value ) ``` ### Multi-choice Based on a suggestion by [Maxime Wack](https://github.com/MaximeWack), ), the final example shows how to deal with a common way of recording multiple choice data. Often you will get such data as follows: ```{r} multi <- tribble( ~id, ~choice1, ~choice2, ~choice3, 1, "A", "B", "C", 2, "C", "B", NA, 3, "D", NA, NA, 4, "B", "D", NA ) ``` But the actual order isn't important, and you'd prefer to have the individual questions in the columns. You can achieve the desired transformation in two steps. First, you make the data longer, eliminating the explicit `NA`s, and adding a column to indicate that this choice was chosen: ```{r} multi2 <- multi %>% pivot_longer( cols = !id, values_drop_na = TRUE ) %>% mutate(checked = TRUE) multi2 ``` Then you make the data wider, filling in the missing observations with `FALSE`: ```{r} multi2 %>% pivot_wider( id_cols = id, names_from = value, values_from = checked, values_fill = FALSE ) ``` ## Manual specs The arguments to `pivot_longer()` and `pivot_wider()` allow you to pivot a wide range of datasets. But the creativity that people apply to their data structures is seemingly endless, so it's quite possible that you will encounter a dataset that you can't immediately see how to reshape with `pivot_longer()` and `pivot_wider()`. To gain more control over pivoting, you can instead create a "spec" data frame that describes exactly how data stored in the column names becomes variables (and vice versa). This section introduces you to the spec data structure, and show you how to use it when `pivot_longer()` and `pivot_wider()` are insufficient. ### Longer To see how this works, lets return to the simplest case of pivoting applied to the `relig_income` dataset. Now pivoting happens in two steps: we first create a spec object (using `build_longer_spec()`) then use that to describe the pivoting operation: ```{r} spec <- relig_income %>% build_longer_spec( cols = !religion, names_to = "income", values_to = "count" ) pivot_longer_spec(relig_income, spec) ``` (This gives the same result as before, just with more code. There's no need to use it here, it is presented as a simple example for using `spec`.) What does `spec` look like? It's a data frame with one row for each column in the wide format version of the data that is not present in the long format, and two special columns that start with `.`: * `.name` gives the name of the column. * `.value` gives the name of the column that the values in the cells will go into. There is also one column in `spec` for each column present in the long format of the data that is not present in the wide format of the data. This corresponds to the `names_to` argument in `pivot_longer()` and `build_longer_spec()` and the `names_from` argument in `pivot_wider()` and `build_wider_spec()`. In this example, the income column is a character vector of the names of columns being pivoted. ```{r} spec ``` ### Wider Below we widen `us_rent_income` with `pivot_wider()`. The result is ok, but I think it could be improved: ```{r} us_rent_income %>% pivot_wider( names_from = variable, values_from = c(estimate, moe) ) ``` I think it would be better to have columns `income`, `rent`, `income_moe`, and `rent_moe`, which we can achieve with a manual spec. The current spec looks like this: ```{r} spec1 <- us_rent_income %>% build_wider_spec( names_from = variable, values_from = c(estimate, moe) ) spec1 ``` For this case, we mutate `spec` to carefully construct the column names: ```{r} spec2 <- spec1 %>% mutate( .name = paste0(variable, ifelse(.value == "moe", "_moe", "")) ) spec2 ``` Supplying this spec to `pivot_wider()` gives us the result we're looking for: ```{r} us_rent_income %>% pivot_wider_spec(spec2) ``` ### By hand Sometimes it's not possible (or not convenient) to compute the spec, and instead it's more convenient to construct the spec "by hand". For example, take this `construction` data, which is lightly modified from Table 5 "completions" found at : ```{r} construction ``` This sort of data is not uncommon from government agencies: the column names actually belong to different variables, and here we have summaries for number of units (1, 2-4, 5+) and regions of the country (NE, NW, midwest, S, W). We can most easily describe that with a tibble: ```{r} spec <- tribble( ~.name, ~.value, ~units, ~region, "1 unit", "n", "1", NA, "2 to 4 units", "n", "2-4", NA, "5 units or more", "n", "5+", NA, "Northeast", "n", NA, "Northeast", "Midwest", "n", NA, "Midwest", "South", "n", NA, "South", "West", "n", NA, "West", ) ``` Which yields the following longer form: ```{r} construction %>% pivot_longer_spec(spec) ``` Note that there is no overlap between the `units` and `region` variables; here the data would really be most naturally described in two independent tables. ### Theory One neat property of the `spec` is that you need the same spec for `pivot_longer()` and `pivot_wider()`. This makes it very clear that the two operations are symmetric: ```{r} construction %>% pivot_longer_spec(spec) %>% pivot_wider_spec(spec) ``` The pivoting spec allows us to be more precise about exactly how `pivot_longer(df, spec = spec)` changes the shape of `df`: it will have `nrow(df) * nrow(spec)` rows, and `ncol(df) - nrow(spec) + ncol(spec) - 2` columns. [cdata]: https://winvector.github.io/cdata/ [data.table]: https://github.com/Rdatatable/data.table/wiki [tidycensus]: https://walker-data.com/tidycensus/ tidyr/inst/doc/programming.html0000644000176200001440000005020014553746312016346 0ustar liggesusers Programming with tidyr

Programming with tidyr

Introduction

Most tidyr verbs use tidy evaluation to make interactive data exploration fast and fluid. Tidy evaluation is a special type of non-standard evaluation used throughout the tidyverse. Here’s some typical tidyr code:

library(tidyr)

iris %>%
  nest(data = !Species)
#> # A tibble: 3 × 2
#>   Species    data             
#>   <fct>      <list>           
#> 1 setosa     <tibble [50 × 4]>
#> 2 versicolor <tibble [50 × 4]>
#> 3 virginica  <tibble [50 × 4]>

Tidy evaluation is why we can use !Species to say “all the columns except Species”, without having to quote the column name ("Species") or refer to the enclosing data frame (iris$Species).

Two basic forms of tidy evaluation are used in tidyr:

  • Tidy selection: drop_na(), fill(), pivot_longer()/pivot_wider(), nest()/unnest(), separate()/extract(), and unite() let you select variables based on position, name, or type (e.g. 1:3, starts_with("x"), or is.numeric). Literally, you can use all the same techniques as with dplyr::select().

  • Data masking: expand(), crossing() and nesting() let you refer to use data variables as if they were variables in the environment (i.e. you write my_variable not df$my_variable).

We focus on tidy selection here, since it’s the most common. You can learn more about data masking in the equivalent vignette in dplyr: https://dplyr.tidyverse.org/dev/articles/programming.html. For other considerations when writing tidyr code in packages, please see vignette("in-packages").

We’ve pointed out that tidyr’s tidy evaluation interface is optimized for interactive exploration. The flip side is that this adds some challenges to indirect use, i.e. when you’re working inside a for loop or a function. This vignette shows you how to overcome those challenges. We’ll first go over the basics of tidy selection and data masking, talk about how to use them indirectly, and then show you a number of recipes to solve common problems.

Before we go on, we reveal the version of tidyr we’re using and make a small dataset to use in examples.

packageVersion("tidyr")
#> [1] '1.3.1'

mini_iris <- as_tibble(iris)[c(1, 2, 51, 52, 101, 102), ]
mini_iris
#> # A tibble: 6 × 5
#>   Sepal.Length Sepal.Width Petal.Length Petal.Width Species   
#>          <dbl>       <dbl>        <dbl>       <dbl> <fct>     
#> 1          5.1         3.5          1.4         0.2 setosa    
#> 2          4.9         3            1.4         0.2 setosa    
#> 3          7           3.2          4.7         1.4 versicolor
#> 4          6.4         3.2          4.5         1.5 versicolor
#> 5          6.3         3.3          6           2.5 virginica 
#> 6          5.8         2.7          5.1         1.9 virginica

Tidy selection

Underneath all functions that use tidy selection is the tidyselect package. It provides a miniature domain specific language that makes it easy to select columns by name, position, or type. For example:

  • select(df, 1) selects the first column; select(df, last_col()) selects the last column.

  • select(df, c(a, b, c)) selects columns a, b, and c.

  • select(df, starts_with("a")) selects all columns whose name starts with “a”; select(df, ends_with("z")) selects all columns whose name ends with “z”.

  • select(df, where(is.numeric)) selects all numeric columns.

You can see more details in ?tidyr_tidy_select.

Indirection

Tidy selection makes a common task easier at the cost of making a less common task harder. When you want to use tidy select indirectly with the column specification stored in an intermediate variable, you’ll need to learn some new tools. There are three main cases where this comes up:

  • When you have the tidy-select specification in a function argument, you must embrace the argument by surrounding it in doubled braces.

    nest_egg <- function(df, cols) {
      nest(df, egg = {{ cols }})
    }
    
    nest_egg(mini_iris, !Species)
    #> # A tibble: 3 × 2
    #>   Species    egg             
    #>   <fct>      <list>          
    #> 1 setosa     <tibble [2 × 4]>
    #> 2 versicolor <tibble [2 × 4]>
    #> 3 virginica  <tibble [2 × 4]>
  • When you have a character vector of variable names, you must use all_of() or any_of() depending on whether you want the function to error if a variable is not found. These functions allow you to write for loops or a function that takes variable names as a character vector.

    nest_egg <- function(df, cols) {
      nest(df, egg = all_of(cols))
    }
    
    vars <- c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width")
    nest_egg(mini_iris, vars)
    #> # A tibble: 3 × 2
    #>   Species    egg             
    #>   <fct>      <list>          
    #> 1 setosa     <tibble [2 × 4]>
    #> 2 versicolor <tibble [2 × 4]>
    #> 3 virginica  <tibble [2 × 4]>
  • In more complicated cases, you might want to use tidyselect directly:

    sel_vars <- function(df, cols) {
      tidyselect::eval_select(rlang::enquo(cols), df)
    }
    sel_vars(mini_iris, !Species)
    #> Sepal.Length  Sepal.Width Petal.Length  Petal.Width 
    #>            1            2            3            4

    Learn more in vignette("tidyselect").

Note that many tidyr functions use ... so you can easily select many variables, e.g. fill(df, x, y, z). I now believe that the disadvantages of this approach outweigh the benefits, and that this interface would have been better as fill(df, c(x, y, z)). For new functions that select columns, please just use a single argument and not ....

tidyr/inst/doc/in-packages.html0000644000176200001440000013373114553746310016217 0ustar liggesusers In packages

In packages

Introduction

This vignette serves two distinct, but related, purposes:

  • It documents general best practices for using tidyr in a package, inspired by using ggplot2 in packages.

  • It describes migration patterns for the transition from tidyr v0.8.3 to v1.0.0. This release includes breaking changes to nest() and unnest() in order to increase consistency within tidyr and with the rest of the tidyverse.

Before we go on, we’ll attach the packages we use, expose the version of tidyr, and make a small dataset to use in examples.

library(tidyr)
library(dplyr, warn.conflicts = FALSE)
library(purrr)

packageVersion("tidyr")
#> [1] '1.3.1'

mini_iris <- as_tibble(iris)[c(1, 2, 51, 52, 101, 102), ]
mini_iris
#> # A tibble: 6 × 5
#>   Sepal.Length Sepal.Width Petal.Length Petal.Width Species   
#>          <dbl>       <dbl>        <dbl>       <dbl> <fct>     
#> 1          5.1         3.5          1.4         0.2 setosa    
#> 2          4.9         3            1.4         0.2 setosa    
#> 3          7           3.2          4.7         1.4 versicolor
#> 4          6.4         3.2          4.5         1.5 versicolor
#> 5          6.3         3.3          6           2.5 virginica 
#> 6          5.8         2.7          5.1         1.9 virginica

Using tidyr in packages

Here we assume that you’re already familiar with using tidyr in functions, as described in vignette("programming.Rmd"). There are two important considerations when using tidyr in a package:

  • How to avoid R CMD CHECK notes when using fixed variable names.
  • How to alert yourself to upcoming changes in the development version of tidyr.

Fixed column names

If you know the column names, this code works in the same way regardless of whether its inside or outside of a package:

mini_iris %>% nest(
  petal = c(Petal.Length, Petal.Width), 
  sepal = c(Sepal.Length, Sepal.Width)
)
#> # A tibble: 3 × 3
#>   Species    petal            sepal           
#>   <fct>      <list>           <list>          
#> 1 setosa     <tibble [2 × 2]> <tibble [2 × 2]>
#> 2 versicolor <tibble [2 × 2]> <tibble [2 × 2]>
#> 3 virginica  <tibble [2 × 2]> <tibble [2 × 2]>

But R CMD check will warn about undefined global variables (Petal.Length, Petal.Width, Sepal.Length, and Sepal.Width), because it doesn’t know that nest() is looking for the variables inside of mini_iris (i.e. Petal.Length and friends are data-variables, not env-variables).

The easiest way to silence this note is to use all_of(). all_of() is a tidyselect helper (like starts_with(), ends_with(), etc.) that takes column names stored as strings:

mini_iris %>% nest(
  petal = all_of(c("Petal.Length", "Petal.Width")), 
  sepal = all_of(c("Sepal.Length", "Sepal.Width"))
)
#> # A tibble: 3 × 3
#>   Species    petal            sepal           
#>   <fct>      <list>           <list>          
#> 1 setosa     <tibble [2 × 2]> <tibble [2 × 2]>
#> 2 versicolor <tibble [2 × 2]> <tibble [2 × 2]>
#> 3 virginica  <tibble [2 × 2]> <tibble [2 × 2]>

Alternatively, you may want to use any_of() if it is OK that some of the specified variables cannot be found in the input data.

The tidyselect package offers an entire family of select helpers. You are probably already familiar with them from using dplyr::select().

Continuous integration

Hopefully you’ve already adopted continuous integration for your package, in which R CMD check (which includes your own tests) is run on a regular basis, e.g. every time you push changes to your package’s source on GitHub or similar. The tidyverse team currently relies most heavily on GitHub Actions, so that will be our example. usethis::use_github_action() can help you get started.

We recommend adding a workflow that targets the devel version of tidyr. When should you do this?

  • Always? If your package is tightly coupled to tidyr, consider leaving this in place all the time, so you know if changes in tidyr affect your package.

  • Right before a tidyr release? For everyone else, you could add (or re-activate an existing) tidyr-devel workflow during the period preceding a major tidyr release that has the potential for breaking changes, especially if you’ve been contacted during our reverse dependency checks.

Example of a GitHub Actions workflow that tests your package against the development version of tidyr:

on:
  push:
    branches:
      - main
  pull_request:
    branches:
      - main

name: R-CMD-check-tidyr-devel

jobs:
  R-CMD-check:
    runs-on: macOS-latest
    steps:
      - uses: actions/checkout@v2
      - uses: r-lib/actions/setup-r@v1
      - name: Install dependencies
        run: |
          install.packages(c("remotes", "rcmdcheck"))
          remotes::install_deps(dependencies = TRUE)
          remotes::install_github("tidyverse/tidyr")
        shell: Rscript {0}
      - name: Check
        run: rcmdcheck::rcmdcheck(args = "--no-manual", error_on = "error")
        shell: Rscript {0}

GitHub Actions are an evolving landscape, so you can always mine the workflows for tidyr itself (tidyverse/tidyr/.github/workflows) or the main r-lib/actions repo for ideas.

tidyr v0.8.3 -> v1.0.0

v1.0.0 makes considerable changes to the interface of nest() and unnest() in order to bring them in line with newer tidyverse conventions. I have tried to make the functions as backward compatible as possible and to give informative warning messages, but I could not cover 100% of use cases, so you may need to change your package code. This guide will help you do so with a minimum of pain.

Ideally, you’ll tweak your package so that it works with both tidyr 0.8.3 and tidyr 1.0.0. This makes life considerably easier because it means there’s no need to coordinate CRAN submissions - you can submit your package that works with both tidyr versions, before I submit tidyr to CRAN. This section describes our recommend practices for doing so, drawing from the general principles described in https://design.tidyverse.org/changes-multivers.html.

If you use continuous integration already, we strongly recommend adding a build that tests with the development version of tidyr; see above for details.

This section briefly describes how to run different code for different versions of tidyr, then goes through the major changes that might require workarounds:

  • nest() and unnest() get new interfaces.
  • nest() preserves groups.
  • nest_() and unnest_() are defunct.

If you’re struggling with a problem that’s not described here, please reach out via github or email so we can help out.

Conditional code

Sometimes you’ll be able to write code that works with v0.8.3 and v1.0.0. But this often requires code that’s not particularly natural for either version and you’d be better off to (temporarily) have separate code paths, each containing non-contrived code. You get to re-use your existing code in the “old” branch, which will eventually be phased out, and write clean, forward-looking code in the “new” branch.

The basic approach looks like this. First you define a function that returns TRUE for new versions of tidyr:

tidyr_new_interface <- function() {
  packageVersion("tidyr") > "0.8.99"
}

We highly recommend keeping this as a function because it provides an obvious place to jot any transition notes for your package, and it makes it easier to remove transitional code later on. Another benefit is that the tidyr version is determined at run time, not at build time, and will therefore detect your user’s current tidyr version.

Then in your functions, you use an if statement to call different code for different versions:

my_function_inside_a_package <- function(...)
  # my code here

  if (tidyr_new_interface()) {
    # Freshly written code for v1.0.0
    out <- tidyr::nest(df, data = any_of(c("x", "y", "z")))
  } else {
    # Existing code for v0.8.3
    out <- tidyr::nest(df, x, y, z)
  }

  # more code here
}

If your new code uses a function that only exists in tidyr 1.0.0, you will get a NOTE from R CMD check: this is one of the few notes that you can explain in your CRAN submission comments. Just mention that it’s for forward compatibility with tidyr 1.0.0, and CRAN will let your package through.

New syntax for nest()

What changed:

  • The to-be-nested columns are no longer accepted as “loose parts”.
  • The new list-column’s name is no longer provided via the .key argument.
  • Now we use a construct like this: new_col = <something about existing cols>.

Why it changed:

  • The use of ... for metadata is a problematic pattern we’re moving away from. https://design.tidyverse.org/dots-data.html

  • The new_col = <something about existing cols> construct lets us create multiple nested list-columns at once (“multi-nest”).

    mini_iris %>% 
      nest(petal = matches("Petal"), sepal = matches("Sepal")) 
    #> # A tibble: 3 × 3
    #>   Species    petal            sepal           
    #>   <fct>      <list>           <list>          
    #> 1 setosa     <tibble [2 × 2]> <tibble [2 × 2]>
    #> 2 versicolor <tibble [2 × 2]> <tibble [2 × 2]>
    #> 3 virginica  <tibble [2 × 2]> <tibble [2 × 2]>

Before and after examples:

# v0.8.3
mini_iris %>% 
  nest(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width, .key = "my_data")

# v1.0.0
mini_iris %>% 
  nest(my_data = c(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width))

# v1.0.0 avoiding R CMD check NOTE
mini_iris %>% 
  nest(my_data = any_of(c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width")))

# or equivalently:
mini_iris %>% 
  nest(my_data = !any_of("Species"))

If you need a quick and dirty fix without having to think, just call nest_legacy() instead of nest(). It’s the same as nest() in v0.8.3:

if (tidyr_new_interface()) {
  out <- tidyr::nest_legacy(df, x, y, z)
} else {
  out <- tidyr::nest(df, x, y, z)
}

New syntax for unnest()

What changed:

  • The to-be-unnested columns must now be specified explicitly, instead of defaulting to all list-columns. This also deprecates .drop and .preserve.

  • .sep has been deprecated and replaced with names_sep.

  • unnest() uses the emerging tidyverse standard to disambiguate duplicated names. Use names_repair = tidyr_legacy to request the previous approach.

  • .id has been deprecated because it can be easily replaced by creating the column of names prior to unnest(), e.g. with an upstream call to mutate().

    # v0.8.3
    df %>% unnest(x, .id = "id")
    
    # v1.0.0
    df %>% mutate(id = names(x)) %>% unnest(x))

Why it changed:

  • The use of ... for metadata is a problematic pattern we’re moving away from. https://design.tidyverse.org/dots-data.html

  • The changes to details arguments relate to features rolling out across multiple packages in the tidyverse. For example, ptype exposes prototype support from the new vctrs package. names_repair specifies what to do about duplicated or non-syntactic names, consistent with tibble and readxl.

Before and after:

nested <- mini_iris %>% 
  nest(my_data = c(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width))

# v0.8.3 automatically unnests list-cols
nested %>% unnest()

# v1.0.0 must be told which columns to unnest
nested %>% unnest(any_of("my_data"))

If you need a quick and dirty fix without having to think, just call unnest_legacy() instead of unnest(). It’s the same as unnest() in v0.8.3:

if (tidyr_new_interface()) {
  out <- tidyr::unnest_legacy(df)
} else {
  out <- tidyr::unnest(df)
}

nest() preserves groups

What changed:

  • nest() now preserves the groups present in the input.

Why it changed:

  • To reflect the growing support for grouped data frames, especially in recent releases of dplyr. See, for example, dplyr::group_modify(), group_map(), and friends.

If the fact that nest() now preserves groups is problematic downstream, you have a few choices:

  • Apply ungroup() to the result. This level of pragmatism suggests, however, you should at least consider the next two options.

  • You should never have grouped in the first place. Eliminate the group_by() call and specify which columns should be nested versus not nested directly in nest().

  • Adjust the downstream code to accommodate grouping.

Imagine we used group_by() then nest() on mini_iris, then we computed on the list-column outside the data frame.

(df <- mini_iris %>% 
   group_by(Species) %>% 
   nest())
#> # A tibble: 3 × 2
#> # Groups:   Species [3]
#>   Species    data            
#>   <fct>      <list>          
#> 1 setosa     <tibble [2 × 4]>
#> 2 versicolor <tibble [2 × 4]>
#> 3 virginica  <tibble [2 × 4]>
(external_variable <- map_int(df$data, nrow))
#> [1] 2 2 2

And now we try to add that back to the data post hoc:

df %>% 
  mutate(n_rows = external_variable)
#> Error in `mutate()`:
#> ℹ In argument: `n_rows = external_variable`.
#> ℹ In group 1: `Species = setosa`.
#> Caused by error:
#> ! `n_rows` must be size 1, not 3.

This fails because df is grouped and mutate() is group-aware, so it’s hard to add a completely external variable. Other than pragmatically ungroup()ing, what can we do? One option is to work inside the data frame, i.e. bring the map() inside the mutate(), and design the problem away:

df %>% 
  mutate(n_rows = map_int(data, nrow))
#> # A tibble: 3 × 3
#> # Groups:   Species [3]
#>   Species    data             n_rows
#>   <fct>      <list>            <int>
#> 1 setosa     <tibble [2 × 4]>      2
#> 2 versicolor <tibble [2 × 4]>      2
#> 3 virginica  <tibble [2 × 4]>      2

If, somehow, the grouping seems appropriate AND working inside the data frame is not an option, tibble::add_column() is group-unaware. It lets you add external data to a grouped data frame.

df %>% 
  tibble::add_column(n_rows = external_variable)
#> # A tibble: 3 × 3
#> # Groups:   Species [3]
#>   Species    data             n_rows
#>   <fct>      <list>            <int>
#> 1 setosa     <tibble [2 × 4]>      2
#> 2 versicolor <tibble [2 × 4]>      2
#> 3 virginica  <tibble [2 × 4]>      2

nest_() and unnest_() are defunct

What changed:

  • nest_() and unnest_() no longer work

Why it changed:

  • We are transitioning the whole tidyverse to the powerful tidy eval framework. Therefore, we are gradually removing all previous solutions:
    • Specialized standard evaluation versions of functions, e.g., foo_() as a complement to foo().
    • The older lazyeval framework.

Before and after:

# v0.8.3
mini_iris %>% 
  nest_(
    key_col = "my_data",
    nest_cols = c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width")
  )

nested %>% unnest_(~ my_data)

# v1.0.0
mini_iris %>% 
  nest(my_data = any_of(c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width")))

nested %>% unnest(any_of("my_data"))
tidyr/inst/doc/in-packages.R0000644000176200001440000000767514553746307015471 0ustar liggesusers## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(tidyr) library(dplyr, warn.conflicts = FALSE) library(purrr) packageVersion("tidyr") mini_iris <- as_tibble(iris)[c(1, 2, 51, 52, 101, 102), ] mini_iris ## ----------------------------------------------------------------------------- mini_iris %>% nest( petal = c(Petal.Length, Petal.Width), sepal = c(Sepal.Length, Sepal.Width) ) ## ----------------------------------------------------------------------------- mini_iris %>% nest( petal = all_of(c("Petal.Length", "Petal.Width")), sepal = all_of(c("Sepal.Length", "Sepal.Width")) ) ## ----------------------------------------------------------------------------- tidyr_new_interface <- function() { packageVersion("tidyr") > "0.8.99" } ## ----eval = FALSE------------------------------------------------------------- # my_function_inside_a_package <- function(...) # # my code here # # if (tidyr_new_interface()) { # # Freshly written code for v1.0.0 # out <- tidyr::nest(df, data = any_of(c("x", "y", "z"))) # } else { # # Existing code for v0.8.3 # out <- tidyr::nest(df, x, y, z) # } # # # more code here # } ## ----------------------------------------------------------------------------- mini_iris %>% nest(petal = matches("Petal"), sepal = matches("Sepal")) ## ----eval = FALSE------------------------------------------------------------- # # v0.8.3 # mini_iris %>% # nest(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width, .key = "my_data") # # # v1.0.0 # mini_iris %>% # nest(my_data = c(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)) # # # v1.0.0 avoiding R CMD check NOTE # mini_iris %>% # nest(my_data = any_of(c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width"))) # # # or equivalently: # mini_iris %>% # nest(my_data = !any_of("Species")) ## ----eval = FALSE------------------------------------------------------------- # if (tidyr_new_interface()) { # out <- tidyr::nest_legacy(df, x, y, z) # } else { # out <- tidyr::nest(df, x, y, z) # } ## ----eval = FALSE------------------------------------------------------------- # # v0.8.3 # df %>% unnest(x, .id = "id") # # # v1.0.0 # df %>% mutate(id = names(x)) %>% unnest(x)) ## ----eval = FALSE------------------------------------------------------------- # nested <- mini_iris %>% # nest(my_data = c(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)) # # # v0.8.3 automatically unnests list-cols # nested %>% unnest() # # # v1.0.0 must be told which columns to unnest # nested %>% unnest(any_of("my_data")) ## ----eval = FALSE------------------------------------------------------------- # if (tidyr_new_interface()) { # out <- tidyr::unnest_legacy(df) # } else { # out <- tidyr::unnest(df) # } ## ----------------------------------------------------------------------------- (df <- mini_iris %>% group_by(Species) %>% nest()) (external_variable <- map_int(df$data, nrow)) ## ----error = TRUE------------------------------------------------------------- df %>% mutate(n_rows = external_variable) ## ----------------------------------------------------------------------------- df %>% mutate(n_rows = map_int(data, nrow)) ## ----------------------------------------------------------------------------- df %>% tibble::add_column(n_rows = external_variable) ## ----eval = FALSE------------------------------------------------------------- # # v0.8.3 # mini_iris %>% # nest_( # key_col = "my_data", # nest_cols = c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") # ) # # nested %>% unnest_(~ my_data) # # # v1.0.0 # mini_iris %>% # nest(my_data = any_of(c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width"))) # # nested %>% unnest(any_of("my_data")) tidyr/inst/doc/tidy-data.html0000644000176200001440000022177514553746313015726 0ustar liggesusers Tidy data

Tidy data

(This is an informal and code heavy version of the full tidy data paper. Please refer to that for more details.)

Data tidying

It is often said that 80% of data analysis is spent on the cleaning and preparing data. And it’s not just a first step, but it must be repeated many times over the course of analysis as new problems come to light or new data is collected. To get a handle on the problem, this paper focuses on a small, but important, aspect of data cleaning that I call data tidying: structuring datasets to facilitate analysis.

The principles of tidy data provide a standard way to organise data values within a dataset. A standard makes initial data cleaning easier because you don’t need to start from scratch and reinvent the wheel every time. The tidy data standard has been designed to facilitate initial exploration and analysis of the data, and to simplify the development of data analysis tools that work well together. Current tools often require translation. You have to spend time munging the output from one tool so you can input it into another. Tidy datasets and tidy tools work hand in hand to make data analysis easier, allowing you to focus on the interesting domain problem, not on the uninteresting logistics of data.

Defining tidy data

Happy families are all alike; every unhappy family is unhappy in its own way — Leo Tolstoy

Like families, tidy datasets are all alike but every messy dataset is messy in its own way. Tidy datasets provide a standardized way to link the structure of a dataset (its physical layout) with its semantics (its meaning). In this section, I’ll provide some standard vocabulary for describing the structure and semantics of a dataset, and then use those definitions to define tidy data.

Data structure

Most statistical datasets are data frames made up of rows and columns. The columns are almost always labeled and the rows are sometimes labeled. The following code provides some data about an imaginary classroom in a format commonly seen in the wild. The table has three columns and four rows, and both rows and columns are labeled.

library(tibble)
classroom <- tribble(
  ~name,    ~quiz1, ~quiz2, ~test1,
  "Billy",  NA,     "D",    "C",
  "Suzy",   "F",    NA,     NA,
  "Lionel", "B",    "C",    "B",
  "Jenny",  "A",    "A",    "B"
  )
classroom
#> # A tibble: 4 × 4
#>   name   quiz1 quiz2 test1
#>   <chr>  <chr> <chr> <chr>
#> 1 Billy  <NA>  D     C    
#> 2 Suzy   F     <NA>  <NA> 
#> 3 Lionel B     C     B    
#> 4 Jenny  A     A     B

There are many ways to structure the same underlying data. The following table shows the same data as above, but the rows and columns have been transposed.

tribble(
  ~assessment, ~Billy, ~Suzy, ~Lionel, ~Jenny,
  "quiz1",     NA,     "F",   "B",     "A",
  "quiz2",     "D",    NA,    "C",     "A",
  "test1",     "C",    NA,    "B",     "B"
  )
#> # A tibble: 3 × 5
#>   assessment Billy Suzy  Lionel Jenny
#>   <chr>      <chr> <chr> <chr>  <chr>
#> 1 quiz1      <NA>  F     B      A    
#> 2 quiz2      D     <NA>  C      A    
#> 3 test1      C     <NA>  B      B

The data is the same, but the layout is different. Our vocabulary of rows and columns is simply not rich enough to describe why the two tables represent the same data. In addition to appearance, we need a way to describe the underlying semantics, or meaning, of the values displayed in the table.

Data semantics

A dataset is a collection of values, usually either numbers (if quantitative) or strings (if qualitative). Values are organised in two ways. Every value belongs to a variable and an observation. A variable contains all values that measure the same underlying attribute (like height, temperature, duration) across units. An observation contains all values measured on the same unit (like a person, or a day, or a race) across attributes.

A tidy version of the classroom data looks like this: (you’ll learn how the functions work a little later)

library(tidyr)
library(dplyr)
classroom2 <- classroom %>% 
  pivot_longer(quiz1:test1, names_to = "assessment", values_to = "grade") %>% 
  arrange(name, assessment)
classroom2
#> # A tibble: 12 × 3
#>   name  assessment grade
#>   <chr> <chr>      <chr>
#> 1 Billy quiz1      <NA> 
#> 2 Billy quiz2      D    
#> 3 Billy test1      C    
#> 4 Jenny quiz1      A    
#> 5 Jenny quiz2      A    
#> 6 Jenny test1      B    
#> # ℹ 6 more rows

This makes the values, variables, and observations more clear. The dataset contains 36 values representing three variables and 12 observations. The variables are:

  1. name, with four possible values (Billy, Suzy, Lionel, and Jenny).

  2. assessment, with three possible values (quiz1, quiz2, and test1).

  3. grade, with five or six values depending on how you think of the missing value (A, B, C, D, F, NA).

The tidy data frame explicitly tells us the definition of an observation. In this classroom, every combination of name and assessment is a single measured observation. The dataset also informs us of missing values, which can and do have meaning. Billy was absent for the first quiz, but tried to salvage his grade. Suzy failed the first quiz, so she decided to drop the class. To calculate Billy’s final grade, we might replace this missing value with an F (or he might get a second chance to take the quiz). However, if we want to know the class average for Test 1, dropping Suzy’s structural missing value would be more appropriate than imputing a new value.

For a given dataset, it’s usually easy to figure out what are observations and what are variables, but it is surprisingly difficult to precisely define variables and observations in general. For example, if the columns in the classroom data were height and weight we would have been happy to call them variables. If the columns were height and width, it would be less clear cut, as we might think of height and width as values of a dimension variable. If the columns were home phone and work phone, we could treat these as two variables, but in a fraud detection environment we might want variables phone number and number type because the use of one phone number for multiple people might suggest fraud. A general rule of thumb is that it is easier to describe functional relationships between variables (e.g., z is a linear combination of x and y, density is the ratio of weight to volume) than between rows, and it is easier to make comparisons between groups of observations (e.g., average of group a vs. average of group b) than between groups of columns.

In a given analysis, there may be multiple levels of observation. For example, in a trial of new allergy medication we might have three observational types: demographic data collected from each person (age, sex, race), medical data collected from each person on each day (number of sneezes, redness of eyes), and meteorological data collected on each day (temperature, pollen count).

Variables may change over the course of analysis. Often the variables in the raw data are very fine grained, and may add extra modelling complexity for little explanatory gain. For example, many surveys ask variations on the same question to better get at an underlying trait. In early stages of analysis, variables correspond to questions. In later stages, you change focus to traits, computed by averaging together multiple questions. This considerably simplifies analysis because you don’t need a hierarchical model, and you can often pretend that the data is continuous, not discrete.

Tidy data

Tidy data is a standard way of mapping the meaning of a dataset to its structure. A dataset is messy or tidy depending on how rows, columns and tables are matched up with observations, variables and types. In tidy data:

  1. Each variable is a column; each column is a variable.

  2. Each observation is a row; each row is an observation.

  3. Each value is a cell; each cell is a single value.

This is Codd’s 3rd normal form, but with the constraints framed in statistical language, and the focus put on a single dataset rather than the many connected datasets common in relational databases. Messy data is any other arrangement of the data.

Tidy data makes it easy for an analyst or a computer to extract needed variables because it provides a standard way of structuring a dataset. Compare the different versions of the classroom data: in the messy version you need to use different strategies to extract different variables. This slows analysis and invites errors. If you consider how many data analysis operations involve all of the values in a variable (every aggregation function), you can see how important it is to extract these values in a simple, standard way. Tidy data is particularly well suited for vectorised programming languages like R, because the layout ensures that values of different variables from the same observation are always paired.

While the order of variables and observations does not affect analysis, a good ordering makes it easier to scan the raw values. One way of organising variables is by their role in the analysis: are values fixed by the design of the data collection, or are they measured during the course of the experiment? Fixed variables describe the experimental design and are known in advance. Computer scientists often call fixed variables dimensions, and statisticians usually denote them with subscripts on random variables. Measured variables are what we actually measure in the study. Fixed variables should come first, followed by measured variables, each ordered so that related variables are contiguous. Rows can then be ordered by the first variable, breaking ties with the second and subsequent (fixed) variables. This is the convention adopted by all tabular displays in this paper.

Tidying messy datasets

Real datasets can, and often do, violate the three precepts of tidy data in almost every way imaginable. While occasionally you do get a dataset that you can start analysing immediately, this is the exception, not the rule. This section describes the five most common problems with messy datasets, along with their remedies:

  • Column headers are values, not variable names.

  • Multiple variables are stored in one column.

  • Variables are stored in both rows and columns.

  • Multiple types of observational units are stored in the same table.

  • A single observational unit is stored in multiple tables.

Surprisingly, most messy datasets, including types of messiness not explicitly described above, can be tidied with a small set of tools: pivoting (longer and wider) and separating. The following sections illustrate each problem with a real dataset that I have encountered, and show how to tidy them.

Column headers are values, not variable names

A common type of messy dataset is tabular data designed for presentation, where variables form both the rows and columns, and column headers are values, not variable names. While I would call this arrangement messy, in some cases it can be extremely useful. It provides efficient storage for completely crossed designs, and it can lead to extremely efficient computation if desired operations can be expressed as matrix operations.

The following code shows a subset of a typical dataset of this form. This dataset explores the relationship between income and religion in the US. It comes from a report produced by the Pew Research Center, an American think-tank that collects data on attitudes to topics ranging from religion to the internet, and produces many reports that contain datasets in this format.

relig_income
#> # A tibble: 18 × 11
#>   religion  `<$10k` `$10-20k` `$20-30k` `$30-40k` `$40-50k` `$50-75k` `$75-100k`
#>   <chr>       <dbl>     <dbl>     <dbl>     <dbl>     <dbl>     <dbl>      <dbl>
#> 1 Agnostic       27        34        60        81        76       137        122
#> 2 Atheist        12        27        37        52        35        70         73
#> 3 Buddhist       27        21        30        34        33        58         62
#> 4 Catholic      418       617       732       670       638      1116        949
#> 5 Don’t kn…      15        14        15        11        10        35         21
#> 6 Evangeli…     575       869      1064       982       881      1486        949
#> # ℹ 12 more rows
#> # ℹ 3 more variables: `$100-150k` <dbl>, `>150k` <dbl>,
#> #   `Don't know/refused` <dbl>

This dataset has three variables, religion, income and frequency. To tidy it, we need to pivot the non-variable columns into a two-column key-value pair. This action is often described as making a wide dataset longer (or taller).

When pivoting variables, we need to provide the name of the new key-value columns to create. After defining the columns to pivot (every column except for religion), you will need the name of the key column, which is the name of the variable defined by the values of the column headings. In this case, it’s income. The second argument is the name of the value column, frequency.

relig_income %>% 
  pivot_longer(-religion, names_to = "income", values_to = "frequency")
#> # A tibble: 180 × 3
#>   religion income  frequency
#>   <chr>    <chr>       <dbl>
#> 1 Agnostic <$10k          27
#> 2 Agnostic $10-20k        34
#> 3 Agnostic $20-30k        60
#> 4 Agnostic $30-40k        81
#> 5 Agnostic $40-50k        76
#> 6 Agnostic $50-75k       137
#> # ℹ 174 more rows

This form is tidy because each column represents a variable and each row represents an observation, in this case a demographic unit corresponding to a combination of religion and income.

This format is also used to record regularly spaced observations over time. For example, the Billboard dataset shown below records the date a song first entered the billboard top 100. It has variables for artist, track, date.entered, rank and week. The rank in each week after it enters the top 100 is recorded in 75 columns, wk1 to wk75. This form of storage is not tidy, but it is useful for data entry. It reduces duplication since otherwise each song in each week would need its own row, and song metadata like title and artist would need to be repeated. This will be discussed in more depth in multiple types.

billboard
#> # A tibble: 317 × 79
#>   artist      track date.entered   wk1   wk2   wk3   wk4   wk5   wk6   wk7   wk8
#>   <chr>       <chr> <date>       <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#> 1 2 Pac       Baby… 2000-02-26      87    82    72    77    87    94    99    NA
#> 2 2Ge+her     The … 2000-09-02      91    87    92    NA    NA    NA    NA    NA
#> 3 3 Doors Do… Kryp… 2000-04-08      81    70    68    67    66    57    54    53
#> 4 3 Doors Do… Loser 2000-10-21      76    76    72    69    67    65    55    59
#> 5 504 Boyz    Wobb… 2000-04-15      57    34    25    17    17    31    36    49
#> 6 98^0        Give… 2000-08-19      51    39    34    26    26    19     2     2
#> # ℹ 311 more rows
#> # ℹ 68 more variables: wk9 <dbl>, wk10 <dbl>, wk11 <dbl>, wk12 <dbl>,
#> #   wk13 <dbl>, wk14 <dbl>, wk15 <dbl>, wk16 <dbl>, wk17 <dbl>, wk18 <dbl>,
#> #   wk19 <dbl>, wk20 <dbl>, wk21 <dbl>, wk22 <dbl>, wk23 <dbl>, wk24 <dbl>,
#> #   wk25 <dbl>, wk26 <dbl>, wk27 <dbl>, wk28 <dbl>, wk29 <dbl>, wk30 <dbl>,
#> #   wk31 <dbl>, wk32 <dbl>, wk33 <dbl>, wk34 <dbl>, wk35 <dbl>, wk36 <dbl>,
#> #   wk37 <dbl>, wk38 <dbl>, wk39 <dbl>, wk40 <dbl>, wk41 <dbl>, wk42 <dbl>, …

To tidy this dataset, we first use pivot_longer() to make the dataset longer. We transform the columns from wk1 to wk76, making a new column for their names, week, and a new value for their values, rank:

billboard2 <- billboard %>% 
  pivot_longer(
    wk1:wk76, 
    names_to = "week", 
    values_to = "rank", 
    values_drop_na = TRUE
  )
billboard2
#> # A tibble: 5,307 × 5
#>   artist track                   date.entered week   rank
#>   <chr>  <chr>                   <date>       <chr> <dbl>
#> 1 2 Pac  Baby Don't Cry (Keep... 2000-02-26   wk1      87
#> 2 2 Pac  Baby Don't Cry (Keep... 2000-02-26   wk2      82
#> 3 2 Pac  Baby Don't Cry (Keep... 2000-02-26   wk3      72
#> 4 2 Pac  Baby Don't Cry (Keep... 2000-02-26   wk4      77
#> 5 2 Pac  Baby Don't Cry (Keep... 2000-02-26   wk5      87
#> 6 2 Pac  Baby Don't Cry (Keep... 2000-02-26   wk6      94
#> # ℹ 5,301 more rows

Here we use values_drop_na = TRUE to drop any missing values from the rank column. In this data, missing values represent weeks that the song wasn’t in the charts, so can be safely dropped.

In this case it’s also nice to do a little cleaning, converting the week variable to a number, and figuring out the date corresponding to each week on the charts:

billboard3 <- billboard2 %>%
  mutate(
    week = as.integer(gsub("wk", "", week)),
    date = as.Date(date.entered) + 7 * (week - 1),
    date.entered = NULL
  )
billboard3
#> # A tibble: 5,307 × 5
#>   artist track                    week  rank date      
#>   <chr>  <chr>                   <int> <dbl> <date>    
#> 1 2 Pac  Baby Don't Cry (Keep...     1    87 2000-02-26
#> 2 2 Pac  Baby Don't Cry (Keep...     2    82 2000-03-04
#> 3 2 Pac  Baby Don't Cry (Keep...     3    72 2000-03-11
#> 4 2 Pac  Baby Don't Cry (Keep...     4    77 2000-03-18
#> 5 2 Pac  Baby Don't Cry (Keep...     5    87 2000-03-25
#> 6 2 Pac  Baby Don't Cry (Keep...     6    94 2000-04-01
#> # ℹ 5,301 more rows

Finally, it’s always a good idea to sort the data. We could do it by artist, track and week:

billboard3 %>% arrange(artist, track, week)
#> # A tibble: 5,307 × 5
#>   artist track                    week  rank date      
#>   <chr>  <chr>                   <int> <dbl> <date>    
#> 1 2 Pac  Baby Don't Cry (Keep...     1    87 2000-02-26
#> 2 2 Pac  Baby Don't Cry (Keep...     2    82 2000-03-04
#> 3 2 Pac  Baby Don't Cry (Keep...     3    72 2000-03-11
#> 4 2 Pac  Baby Don't Cry (Keep...     4    77 2000-03-18
#> 5 2 Pac  Baby Don't Cry (Keep...     5    87 2000-03-25
#> 6 2 Pac  Baby Don't Cry (Keep...     6    94 2000-04-01
#> # ℹ 5,301 more rows

Or by date and rank:

billboard3 %>% arrange(date, rank)
#> # A tibble: 5,307 × 5
#>   artist   track   week  rank date      
#>   <chr>    <chr>  <int> <dbl> <date>    
#> 1 Lonestar Amazed     1    81 1999-06-05
#> 2 Lonestar Amazed     2    54 1999-06-12
#> 3 Lonestar Amazed     3    44 1999-06-19
#> 4 Lonestar Amazed     4    39 1999-06-26
#> 5 Lonestar Amazed     5    38 1999-07-03
#> 6 Lonestar Amazed     6    33 1999-07-10
#> # ℹ 5,301 more rows

Multiple variables stored in one column

After pivoting columns, the key column is sometimes a combination of multiple underlying variable names. This happens in the tb (tuberculosis) dataset, shown below. This dataset comes from the World Health Organisation, and records the counts of confirmed tuberculosis cases by country, year, and demographic group. The demographic groups are broken down by sex (m, f) and age (0-14, 15-25, 25-34, 35-44, 45-54, 55-64, unknown).

tb <- as_tibble(read.csv("tb.csv", stringsAsFactors = FALSE))
tb
#> # A tibble: 5,769 × 22
#>   iso2   year   m04  m514  m014 m1524 m2534 m3544 m4554 m5564   m65    mu   f04
#>   <chr> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int>
#> 1 AD     1989    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA
#> 2 AD     1990    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA
#> 3 AD     1991    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA
#> 4 AD     1992    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA
#> 5 AD     1993    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA
#> 6 AD     1994    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA    NA
#> # ℹ 5,763 more rows
#> # ℹ 9 more variables: f514 <int>, f014 <int>, f1524 <int>, f2534 <int>,
#> #   f3544 <int>, f4554 <int>, f5564 <int>, f65 <int>, fu <int>

First we use pivot_longer() to gather up the non-variable columns:

tb2 <- tb %>% 
  pivot_longer(
    !c(iso2, year), 
    names_to = "demo", 
    values_to = "n", 
    values_drop_na = TRUE
  )
tb2
#> # A tibble: 35,750 × 4
#>   iso2   year demo      n
#>   <chr> <int> <chr> <int>
#> 1 AD     1996 m014      0
#> 2 AD     1996 m1524     0
#> 3 AD     1996 m2534     0
#> 4 AD     1996 m3544     4
#> 5 AD     1996 m4554     1
#> 6 AD     1996 m5564     0
#> # ℹ 35,744 more rows

Column headers in this format are often separated by a non-alphanumeric character (e.g. ., -, _, :), or have a fixed width format, like in this dataset. separate() makes it easy to split a compound variables into individual variables. You can either pass it a regular expression to split on (the default is to split on non-alphanumeric columns), or a vector of character positions. In this case we want to split after the first character:

tb3 <- tb2 %>% 
  separate(demo, c("sex", "age"), 1)
tb3
#> # A tibble: 35,750 × 5
#>   iso2   year sex   age       n
#>   <chr> <int> <chr> <chr> <int>
#> 1 AD     1996 m     014       0
#> 2 AD     1996 m     1524      0
#> 3 AD     1996 m     2534      0
#> 4 AD     1996 m     3544      4
#> 5 AD     1996 m     4554      1
#> 6 AD     1996 m     5564      0
#> # ℹ 35,744 more rows

Storing the values in this form resolves a problem in the original data. We want to compare rates, not counts, which means we need to know the population. In the original format, there is no easy way to add a population variable. It has to be stored in a separate table, which makes it hard to correctly match populations to counts. In tidy form, adding variables for population and rate is easy because they’re just additional columns.

In this case, we could also do the transformation in a single step by supplying multiple column names to names_to and also supplying a grouped regular expression to names_pattern:

tb %>% pivot_longer(
  !c(iso2, year), 
  names_to = c("sex", "age"), 
  names_pattern = "(.)(.+)",
  values_to = "n", 
  values_drop_na = TRUE
)
#> # A tibble: 35,750 × 5
#>   iso2   year sex   age       n
#>   <chr> <int> <chr> <chr> <int>
#> 1 AD     1996 m     014       0
#> 2 AD     1996 m     1524      0
#> 3 AD     1996 m     2534      0
#> 4 AD     1996 m     3544      4
#> 5 AD     1996 m     4554      1
#> 6 AD     1996 m     5564      0
#> # ℹ 35,744 more rows

Variables are stored in both rows and columns

The most complicated form of messy data occurs when variables are stored in both rows and columns. The code below loads daily weather data from the Global Historical Climatology Network for one weather station (MX17004) in Mexico for five months in 2010.

weather <- as_tibble(read.csv("weather.csv", stringsAsFactors = FALSE))
weather
#> # A tibble: 22 × 35
#>   id       year month element    d1    d2    d3    d4    d5    d6    d7    d8
#>   <chr>   <int> <int> <chr>   <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#> 1 MX17004  2010     1 tmax       NA  NA    NA      NA  NA      NA    NA    NA
#> 2 MX17004  2010     1 tmin       NA  NA    NA      NA  NA      NA    NA    NA
#> 3 MX17004  2010     2 tmax       NA  27.3  24.1    NA  NA      NA    NA    NA
#> 4 MX17004  2010     2 tmin       NA  14.4  14.4    NA  NA      NA    NA    NA
#> 5 MX17004  2010     3 tmax       NA  NA    NA      NA  32.1    NA    NA    NA
#> 6 MX17004  2010     3 tmin       NA  NA    NA      NA  14.2    NA    NA    NA
#> # ℹ 16 more rows
#> # ℹ 23 more variables: d9 <lgl>, d10 <dbl>, d11 <dbl>, d12 <lgl>, d13 <dbl>,
#> #   d14 <dbl>, d15 <dbl>, d16 <dbl>, d17 <dbl>, d18 <lgl>, d19 <lgl>,
#> #   d20 <lgl>, d21 <lgl>, d22 <lgl>, d23 <dbl>, d24 <lgl>, d25 <dbl>,
#> #   d26 <dbl>, d27 <dbl>, d28 <dbl>, d29 <dbl>, d30 <dbl>, d31 <dbl>

It has variables in individual columns (id, year, month), spread across columns (day, d1-d31) and across rows (tmin, tmax) (minimum and maximum temperature). Months with fewer than 31 days have structural missing values for the last day(s) of the month.

To tidy this dataset we first use pivot_longer to gather the day columns:

weather2 <- weather %>% 
  pivot_longer(
    d1:d31, 
    names_to = "day", 
    values_to = "value", 
    values_drop_na = TRUE
  ) 
weather2
#> # A tibble: 66 × 6
#>   id       year month element day   value
#>   <chr>   <int> <int> <chr>   <chr> <dbl>
#> 1 MX17004  2010     1 tmax    d30    27.8
#> 2 MX17004  2010     1 tmin    d30    14.5
#> 3 MX17004  2010     2 tmax    d2     27.3
#> 4 MX17004  2010     2 tmax    d3     24.1
#> 5 MX17004  2010     2 tmax    d11    29.7
#> 6 MX17004  2010     2 tmax    d23    29.9
#> # ℹ 60 more rows

For presentation, I’ve dropped the missing values, making them implicit rather than explicit. This is ok because we know how many days are in each month and can easily reconstruct the explicit missing values.

We’ll also do a little cleaning:

weather3 <- weather2 %>% 
  mutate(day = as.integer(gsub("d", "", day))) %>%
  select(id, year, month, day, element, value)
weather3
#> # A tibble: 66 × 6
#>   id       year month   day element value
#>   <chr>   <int> <int> <int> <chr>   <dbl>
#> 1 MX17004  2010     1    30 tmax     27.8
#> 2 MX17004  2010     1    30 tmin     14.5
#> 3 MX17004  2010     2     2 tmax     27.3
#> 4 MX17004  2010     2     3 tmax     24.1
#> 5 MX17004  2010     2    11 tmax     29.7
#> 6 MX17004  2010     2    23 tmax     29.9
#> # ℹ 60 more rows

This dataset is mostly tidy, but the element column is not a variable; it stores the names of variables. (Not shown in this example are the other meteorological variables prcp (precipitation) and snow (snowfall)). Fixing this requires widening the data: pivot_wider() is inverse of pivot_longer(), pivoting element and value back out across multiple columns:

weather3 %>% 
  pivot_wider(names_from = element, values_from = value)
#> # A tibble: 33 × 6
#>   id       year month   day  tmax  tmin
#>   <chr>   <int> <int> <int> <dbl> <dbl>
#> 1 MX17004  2010     1    30  27.8  14.5
#> 2 MX17004  2010     2     2  27.3  14.4
#> 3 MX17004  2010     2     3  24.1  14.4
#> 4 MX17004  2010     2    11  29.7  13.4
#> 5 MX17004  2010     2    23  29.9  10.7
#> 6 MX17004  2010     3     5  32.1  14.2
#> # ℹ 27 more rows

This form is tidy: there’s one variable in each column, and each row represents one day.

Multiple types in one table

Datasets often involve values collected at multiple levels, on different types of observational units. During tidying, each type of observational unit should be stored in its own table. This is closely related to the idea of database normalisation, where each fact is expressed in only one place. It’s important because otherwise inconsistencies can arise.

The billboard dataset actually contains observations on two types of observational units: the song and its rank in each week. This manifests itself through the duplication of facts about the song: artist is repeated many times.

This dataset needs to be broken down into two pieces: a song dataset which stores artist and song name, and a ranking dataset which gives the rank of the song in each week. We first extract a song dataset:

song <- billboard3 %>% 
  distinct(artist, track) %>%
  mutate(song_id = row_number())
song
#> # A tibble: 317 × 3
#>   artist       track                   song_id
#>   <chr>        <chr>                     <int>
#> 1 2 Pac        Baby Don't Cry (Keep...       1
#> 2 2Ge+her      The Hardest Part Of ...       2
#> 3 3 Doors Down Kryptonite                    3
#> 4 3 Doors Down Loser                         4
#> 5 504 Boyz     Wobble Wobble                 5
#> 6 98^0         Give Me Just One Nig...       6
#> # ℹ 311 more rows

Then use that to make a rank dataset by replacing repeated song facts with a pointer to song details (a unique song id):

rank <- billboard3 %>%
  left_join(song, c("artist", "track")) %>%
  select(song_id, date, week, rank)
rank
#> # A tibble: 5,307 × 4
#>   song_id date        week  rank
#>     <int> <date>     <int> <dbl>
#> 1       1 2000-02-26     1    87
#> 2       1 2000-03-04     2    82
#> 3       1 2000-03-11     3    72
#> 4       1 2000-03-18     4    77
#> 5       1 2000-03-25     5    87
#> 6       1 2000-04-01     6    94
#> # ℹ 5,301 more rows

You could also imagine a week dataset which would record background information about the week, maybe the total number of songs sold or similar “demographic” information.

Normalisation is useful for tidying and eliminating inconsistencies. However, there are few data analysis tools that work directly with relational data, so analysis usually also requires denormalisation or the merging the datasets back into one table.

One type in multiple tables

It’s also common to find data values about a single type of observational unit spread out over multiple tables or files. These tables and files are often split up by another variable, so that each represents a single year, person, or location. As long as the format for individual records is consistent, this is an easy problem to fix:

  1. Read the files into a list of tables.

  2. For each table, add a new column that records the original file name (the file name is often the value of an important variable).

  3. Combine all tables into a single table.

Purrr makes this straightforward in R. The following code generates a vector of file names in a directory (data/) which match a regular expression (ends in .csv). Next we name each element of the vector with the name of the file. We do this because will preserve the names in the following step, ensuring that each row in the final data frame is labeled with its source. Finally, map_dfr() loops over each path, reading in the csv file and combining the results into a single data frame.

library(purrr)
paths <- dir("data", pattern = "\\.csv$", full.names = TRUE)
names(paths) <- basename(paths)
map_dfr(paths, read.csv, stringsAsFactors = FALSE, .id = "filename")

Once you have a single table, you can perform additional tidying as needed. An example of this type of cleaning can be found at https://github.com/hadley/data-baby-names which takes 129 yearly baby name tables provided by the US Social Security Administration and combines them into a single file.

A more complicated situation occurs when the dataset structure changes over time. For example, the datasets may contain different variables, the same variables with different names, different file formats, or different conventions for missing values. This may require you to tidy each file to individually (or, if you’re lucky, in small groups) and then combine them once tidied. An example of this type of tidying is illustrated in https://github.com/hadley/data-fuel-economy, which shows the tidying of epa fuel economy data for over 50,000 cars from 1978 to 2008. The raw data is available online, but each year is stored in a separate file and there are four major formats with many minor variations, making tidying this dataset a considerable challenge.

tidyr/inst/doc/programming.Rmd0000644000176200001440000001173614553565525016144 0ustar liggesusers--- title: "Programming with tidyr" output: rmarkdown::html_vignette description: | Notes on programming with tidy evaluation as it relates to tidyr. vignette: > %\VignetteIndexEntry{Programming with tidyr} %\VignetteEngine{knitr::rmarkdown} %\usepackage[utf8]{inputenc} --- ```{r setup, echo = FALSE, message = FALSE} knitr::opts_chunk$set(collapse = TRUE, comment = "#>") options(tibble.print_min = 6L, tibble.print_max = 6L) set.seed(1014) # Manually "import"; only needed for old dplyr which uses old tidyselect # which doesn't attach automatically in tidy-select contexts all_of <- tidyselect::all_of ``` ## Introduction Most tidyr verbs use **tidy evaluation** to make interactive data exploration fast and fluid. Tidy evaluation is a special type of non-standard evaluation used throughout the tidyverse. Here's some typical tidyr code: ```{r} library(tidyr) iris %>% nest(data = !Species) ``` Tidy evaluation is why we can use `!Species` to say "all the columns except `Species`", without having to quote the column name (`"Species"`) or refer to the enclosing data frame (`iris$Species`). Two basic forms of tidy evaluation are used in tidyr: * **Tidy selection**: `drop_na()`, `fill()`, `pivot_longer()`/`pivot_wider()`, `nest()`/`unnest()`, `separate()`/`extract()`, and `unite()` let you select variables based on position, name, or type (e.g. `1:3`, `starts_with("x")`, or `is.numeric`). Literally, you can use all the same techniques as with `dplyr::select()`. * **Data masking**: `expand()`, `crossing()` and `nesting()` let you refer to use data variables as if they were variables in the environment (i.e. you write `my_variable` not `df$my_variable`). We focus on tidy selection here, since it's the most common. You can learn more about data masking in the equivalent vignette in dplyr: . For other considerations when writing tidyr code in packages, please see `vignette("in-packages")`. We've pointed out that tidyr's tidy evaluation interface is optimized for interactive exploration. The flip side is that this adds some challenges to indirect use, i.e. when you're working inside a `for` loop or a function. This vignette shows you how to overcome those challenges. We'll first go over the basics of tidy selection and data masking, talk about how to use them indirectly, and then show you a number of recipes to solve common problems. Before we go on, we reveal the version of tidyr we're using and make a small dataset to use in examples. ```{r} packageVersion("tidyr") mini_iris <- as_tibble(iris)[c(1, 2, 51, 52, 101, 102), ] mini_iris ``` ## Tidy selection Underneath all functions that use tidy selection is the [tidyselect](https://tidyselect.r-lib.org/) package. It provides a miniature domain specific language that makes it easy to select columns by name, position, or type. For example: * `select(df, 1)` selects the first column; `select(df, last_col())` selects the last column. * `select(df, c(a, b, c))` selects columns `a`, `b`, and `c`. * `select(df, starts_with("a"))` selects all columns whose name starts with "a"; `select(df, ends_with("z"))` selects all columns whose name ends with "z". * `select(df, where(is.numeric))` selects all numeric columns. You can see more details in `?tidyr_tidy_select`. ### Indirection Tidy selection makes a common task easier at the cost of making a less common task harder. When you want to use tidy select indirectly with the column specification stored in an intermediate variable, you'll need to learn some new tools. There are three main cases where this comes up: * When you have the tidy-select specification in a function argument, you must **embrace** the argument by surrounding it in doubled braces. ```{r} nest_egg <- function(df, cols) { nest(df, egg = {{ cols }}) } nest_egg(mini_iris, !Species) ``` * When you have a character vector of variable names, you must use `all_of()` or `any_of()` depending on whether you want the function to error if a variable is not found. These functions allow you to write for loops or a function that takes variable names as a character vector. ```{r} nest_egg <- function(df, cols) { nest(df, egg = all_of(cols)) } vars <- c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") nest_egg(mini_iris, vars) ``` * In more complicated cases, you might want to use tidyselect directly: ```{r} sel_vars <- function(df, cols) { tidyselect::eval_select(rlang::enquo(cols), df) } sel_vars(mini_iris, !Species) ``` Learn more in `vignette("tidyselect")`. Note that many tidyr functions use `...` so you can easily select many variables, e.g. `fill(df, x, y, z)`. I now believe that the disadvantages of this approach outweigh the benefits, and that this interface would have been better as `fill(df, c(x, y, z))`. For new functions that select columns, please just use a single argument and not `...`. tidyr/inst/doc/rectangle.Rmd0000644000176200001440000002245214357024447015556 0ustar liggesusers--- title: "Rectangling" output: rmarkdown::html_vignette description: | Rectangling is the art and craft of taking a deeply nested list (often sourced from wild caught JSON or XML) and taming it into a tidy data set of rows and columns. This vignette introduces you to the main rectangling tools provided by tidyr: `unnest_longer()`, `unnest_wider()`, and `hoist()`. vignette: > %\VignetteIndexEntry{Rectangling} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction Rectangling is the art and craft of taking a deeply nested list (often sourced from wild caught JSON or XML) and taming it into a tidy data set of rows and columns. There are three functions from tidyr that are particularly useful for rectangling: * `unnest_longer()` takes each element of a list-column and makes a new row. * `unnest_wider()` takes each element of a list-column and makes a new column. * `hoist()` is similar to `unnest_wider()` but only plucks out selected components, and can reach down multiple levels. (Alternative, for complex inputs where you need to rectangle a nested list according to a specification, see the [tibblify](https://github.com/mgirlich/tibblify) package.) A very large number of data rectangling problems can be solved by combining `jsonlite::read_json()` with these functions and a splash of dplyr (largely eliminating prior approaches that combined `mutate()` with multiple `purrr::map()`s). Note that jsonlite has another important function called `fromJSON()`. We don't recommend it here because it performs its own automatic simplification (`simplifyVector = TRUE`). This often works well, particularly in simple cases, but we think you're better off doing the rectangling yourself so you know exactly what's happening and can more easily handle the most complicated nested structures. To illustrate these techniques, we'll use the repurrrsive package, which provides a number deeply nested lists originally mostly captured from web APIs. ```{r setup, message = FALSE} library(tidyr) library(dplyr) library(repurrrsive) ``` ## GitHub users We'll start with `gh_users`, a list which contains information about six GitHub users. To begin, we put the `gh_users` list into a data frame: ```{r} users <- tibble(user = gh_users) ``` This seems a bit counter-intuitive: why is the first step in making a list simpler to make it more complicated? But a data frame has a big advantage: it bundles together multiple vectors so that everything is tracked together in a single object. Each `user` is a named list, where each element represents a column. ```{r} names(users$user[[1]]) ``` There are two ways to turn the list components into columns. `unnest_wider()` takes every component and makes a new column: ```{r} users %>% unnest_wider(user) ``` But in this case, there are many components and we don't need most of them so we can instead use `hoist()`. `hoist()` allows us to pull out selected components using the same syntax as `purrr::pluck()`: ```{r} users %>% hoist(user, followers = "followers", login = "login", url = "html_url" ) ``` `hoist()` removes the named components from the `user` list-column, so you can think of it as moving components out of the inner list into the top-level data frame. ## GitHub repos We start off `gh_repos` similarly, by putting it in a tibble: ```{r} repos <- tibble(repo = gh_repos) repos ``` This time the elements of `repos` are a list of repositories that belong to that user. These are observations, so should become new rows, so we use `unnest_longer()` rather than `unnest_wider()`: ```{r} repos <- repos %>% unnest_longer(repo) repos ``` Then we can use `unnest_wider()` or `hoist()`: ```{r} repos %>% hoist(repo, login = c("owner", "login"), name = "name", homepage = "homepage", watchers = "watchers_count" ) ``` Note the use of `c("owner", "login")`: this allows us to reach two levels deep inside of a list. An alternative approach would be to pull out just `owner` and then put each element of it in a column: ```{r} repos %>% hoist(repo, owner = "owner") %>% unnest_wider(owner) ``` ## Game of Thrones characters `got_chars` has a similar structure to `gh_users`: it's a list of named lists, where each element of the inner list describes some attribute of a GoT character. We start in the same way, first by creating a data frame and then by unnesting each component into a column: ```{r} chars <- tibble(char = got_chars) chars chars2 <- chars %>% unnest_wider(char) chars2 ``` This is more complex than `gh_users` because some component of `char` are themselves a list, giving us a collection of list-columns: ```{r} chars2 %>% select_if(is.list) ``` What you do next will depend on the purposes of the analysis. Maybe you want a row for every book and TV series that the character appears in: ```{r} chars2 %>% select(name, books, tvSeries) %>% pivot_longer(c(books, tvSeries), names_to = "media", values_to = "value") %>% unnest_longer(value) ``` Or maybe you want to build a table that lets you match title to name: ```{r} chars2 %>% select(name, title = titles) %>% unnest_longer(title) ``` (Note that the empty titles (`""`) are due to an infelicity in the input `got_chars`: ideally people without titles would have a title vector of length 0, not a title vector of length 1 containing an empty string.) ## Geocoding with google Next we'll tackle a more complex form of data that comes from Google's geocoding service, stored in the repurssive package ```{r} repurrrsive::gmaps_cities ``` `json` is a list-column of named lists, so it makes sense to start with `unnest_wider()`: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) ``` Notice that `results` is a list of lists. Most of the cities have 1 element (representing a unique match from the geocoding API), but Washington and Arlington have two. We can pull these out into separate rows with `unnest_longer()`: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) ``` Now these all have the same components, as revealed by `unnest_wider()`: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) %>% unnest_wider(results) ``` We can find the latitude and longitude by unnesting `geometry`: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) %>% unnest_wider(results) %>% unnest_wider(geometry) ``` And then location: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) %>% unnest_wider(results) %>% unnest_wider(geometry) %>% unnest_wider(location) ``` We could also just look at the first address for each city: ```{r} repurrrsive::gmaps_cities %>% unnest_wider(json) %>% hoist(results, first_result = 1) %>% unnest_wider(first_result) %>% unnest_wider(geometry) %>% unnest_wider(location) ``` Or use `hoist()` to dive deeply to get directly to `lat` and `lng`: ```{r} repurrrsive::gmaps_cities %>% hoist(json, lat = list("results", 1, "geometry", "location", "lat"), lng = list("results", 1, "geometry", "location", "lng") ) ``` ## Sharla Gelfand's discography We'll finish off with the most complex list, from [Sharla Gelfand's](https://sharla.party/post/discog-purrr/) discography. We'll start the usual way: putting the list into a single column data frame, and then widening so each component is a column. I also parse the `date_added` column into a real date-time[^readr]. [^readr]: I'd normally use `readr::parse_datetime()` or `lubridate::ymd_hms()`, but I can't here because it's a vignette and I don't want to add a dependency to tidyr just to simplify one example. ```{r} discs <- tibble(disc = discog) %>% unnest_wider(disc) %>% mutate(date_added = as.POSIXct(strptime(date_added, "%Y-%m-%dT%H:%M:%S"))) discs ``` At this level, we see information about when each disc was added to Sharla's discography, not any information about the disc itself. To do that we need to widen the `basic_information` column: ```{r, error = TRUE} discs %>% unnest_wider(basic_information) ``` Unfortunately that fails because there's an `id` column inside `basic_information`. We can quickly see what's going on by setting `names_repair = "unique"`: ```{r} discs %>% unnest_wider(basic_information, names_repair = "unique") ``` The problem is that `basic_information` repeats the `id` column that's also stored at the top-level, so we can just drop that: ```{r} discs %>% select(!id) %>% unnest_wider(basic_information) ``` Alternatively, we could use `hoist()`: ```{r} discs %>% hoist(basic_information, title = "title", year = "year", label = list("labels", 1, "name"), artist = list("artists", 1, "name") ) ``` Here I quickly extract the name of the first label and artist by indexing deeply into the nested list. A more systematic approach would be to create separate tables for artist and label: ```{r} discs %>% hoist(basic_information, artist = "artists") %>% select(disc_id = id, artist) %>% unnest_longer(artist) %>% unnest_wider(artist) discs %>% hoist(basic_information, format = "formats") %>% select(disc_id = id, format) %>% unnest_longer(format) %>% unnest_wider(format) %>% unnest_longer(descriptions) ``` Then you could join these back on to the original dataset as needed. tidyr/inst/doc/in-packages.Rmd0000644000176200001440000003261614363604046015773 0ustar liggesusers--- title: "In packages" output: rmarkdown::html_vignette description: | Things to bear in mind when using tidyr in a package. vignette: > %\VignetteIndexEntry{In packages} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Introduction This vignette serves two distinct, but related, purposes: * It documents general best practices for using tidyr in a package, inspired by [using ggplot2 in packages][ggplot2-packages]. * It describes migration patterns for the transition from tidyr v0.8.3 to v1.0.0. This release includes breaking changes to `nest()` and `unnest()` in order to increase consistency within tidyr and with the rest of the tidyverse. Before we go on, we'll attach the packages we use, expose the version of tidyr, and make a small dataset to use in examples. ```{r setup} library(tidyr) library(dplyr, warn.conflicts = FALSE) library(purrr) packageVersion("tidyr") mini_iris <- as_tibble(iris)[c(1, 2, 51, 52, 101, 102), ] mini_iris ``` ## Using tidyr in packages Here we assume that you're already familiar with using tidyr in functions, as described in `vignette("programming.Rmd")`. There are two important considerations when using tidyr in a package: * How to avoid `R CMD CHECK` notes when using fixed variable names. * How to alert yourself to upcoming changes in the development version of tidyr. ### Fixed column names If you know the column names, this code works in the same way regardless of whether its inside or outside of a package: ```{r} mini_iris %>% nest( petal = c(Petal.Length, Petal.Width), sepal = c(Sepal.Length, Sepal.Width) ) ``` But `R CMD check` will warn about undefined global variables (`Petal.Length`, `Petal.Width`, `Sepal.Length`, and `Sepal.Width`), because it doesn't know that `nest()` is looking for the variables inside of `mini_iris` (i.e. `Petal.Length` and friends are data-variables, not env-variables). The easiest way to silence this note is to use `all_of()`. `all_of()` is a tidyselect helper (like `starts_with()`, `ends_with()`, etc.) that takes column names stored as strings: ```{r} mini_iris %>% nest( petal = all_of(c("Petal.Length", "Petal.Width")), sepal = all_of(c("Sepal.Length", "Sepal.Width")) ) ``` Alternatively, you may want to use `any_of()` if it is OK that some of the specified variables cannot be found in the input data. The [tidyselect](https://tidyselect.r-lib.org) package offers an entire family of select helpers. You are probably already familiar with them from using `dplyr::select()`. ### Continuous integration Hopefully you've already adopted continuous integration for your package, in which `R CMD check` (which includes your own tests) is run on a regular basis, e.g. every time you push changes to your package's source on GitHub or similar. The tidyverse team currently relies most heavily on GitHub Actions, so that will be our example. `usethis::use_github_action()` can help you get started. We recommend adding a workflow that targets the devel version of tidyr. When should you do this? * Always? If your package is tightly coupled to tidyr, consider leaving this in place all the time, so you know if changes in tidyr affect your package. * Right before a tidyr release? For everyone else, you could add (or re-activate an existing) tidyr-devel workflow during the period preceding a major tidyr release that has the potential for breaking changes, especially if you've been contacted during our reverse dependency checks. Example of a GitHub Actions workflow that tests your package against the development version of tidyr: ``` yaml on: push: branches: - main pull_request: branches: - main name: R-CMD-check-tidyr-devel jobs: R-CMD-check: runs-on: macOS-latest steps: - uses: actions/checkout@v2 - uses: r-lib/actions/setup-r@v1 - name: Install dependencies run: | install.packages(c("remotes", "rcmdcheck")) remotes::install_deps(dependencies = TRUE) remotes::install_github("tidyverse/tidyr") shell: Rscript {0} - name: Check run: rcmdcheck::rcmdcheck(args = "--no-manual", error_on = "error") shell: Rscript {0} ``` GitHub Actions are an evolving landscape, so you can always mine the workflows for tidyr itself ([tidyverse/tidyr/.github/workflows](https://github.com/tidyverse/tidyr/tree/main/.github/workflows)) or the main [r-lib/actions](https://github.com/r-lib/actions) repo for ideas. ## tidyr v0.8.3 -> v1.0.0 v1.0.0 makes considerable changes to the interface of `nest()` and `unnest()` in order to bring them in line with newer tidyverse conventions. I have tried to make the functions as backward compatible as possible and to give informative warning messages, but I could not cover 100% of use cases, so you may need to change your package code. This guide will help you do so with a minimum of pain. Ideally, you'll tweak your package so that it works with both tidyr 0.8.3 and tidyr 1.0.0. This makes life considerably easier because it means there's no need to coordinate CRAN submissions - you can submit your package that works with both tidyr versions, before I submit tidyr to CRAN. This section describes our recommend practices for doing so, drawing from the general principles described in . If you use continuous integration already, we **strongly** recommend adding a build that tests with the development version of tidyr; see above for details. This section briefly describes how to run different code for different versions of tidyr, then goes through the major changes that might require workarounds: * `nest()` and `unnest()` get new interfaces. * `nest()` preserves groups. * `nest_()` and `unnest_()` are defunct. If you're struggling with a problem that's not described here, please reach out via [github](https://github.com/tidyverse/tidyr/issues/new) or [email](mailto:hadley@posit.co) so we can help out. ### Conditional code Sometimes you'll be able to write code that works with v0.8.3 _and_ v1.0.0. But this often requires code that's not particularly natural for either version and you'd be better off to (temporarily) have separate code paths, each containing non-contrived code. You get to re-use your existing code in the "old" branch, which will eventually be phased out, and write clean, forward-looking code in the "new" branch. The basic approach looks like this. First you define a function that returns `TRUE` for new versions of tidyr: ```{r} tidyr_new_interface <- function() { packageVersion("tidyr") > "0.8.99" } ``` We highly recommend keeping this as a function because it provides an obvious place to jot any transition notes for your package, and it makes it easier to remove transitional code later on. Another benefit is that the tidyr version is determined at *run time*, not at *build time*, and will therefore detect your user's current tidyr version. Then in your functions, you use an `if` statement to call different code for different versions: ```{r, eval = FALSE} my_function_inside_a_package <- function(...) # my code here if (tidyr_new_interface()) { # Freshly written code for v1.0.0 out <- tidyr::nest(df, data = any_of(c("x", "y", "z"))) } else { # Existing code for v0.8.3 out <- tidyr::nest(df, x, y, z) } # more code here } ``` If your new code uses a function that only exists in tidyr 1.0.0, you will get a `NOTE` from `R CMD check`: this is one of the few notes that you can explain in your CRAN submission comments. Just mention that it's for forward compatibility with tidyr 1.0.0, and CRAN will let your package through. ### New syntax for `nest()` What changed: * The to-be-nested columns are no longer accepted as "loose parts". * The new list-column's name is no longer provided via the `.key` argument. * Now we use a construct like this: `new_col = `. Why it changed: * The use of `...` for metadata is a problematic pattern we're moving away from. * The `new_col = ` construct lets us create multiple nested list-columns at once ("multi-nest"). ```{r} mini_iris %>% nest(petal = matches("Petal"), sepal = matches("Sepal")) ``` Before and after examples: ```{r eval = FALSE} # v0.8.3 mini_iris %>% nest(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width, .key = "my_data") # v1.0.0 mini_iris %>% nest(my_data = c(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)) # v1.0.0 avoiding R CMD check NOTE mini_iris %>% nest(my_data = any_of(c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width"))) # or equivalently: mini_iris %>% nest(my_data = !any_of("Species")) ``` If you need a quick and dirty fix without having to think, just call `nest_legacy()` instead of `nest()`. It's the same as `nest()` in v0.8.3: ```{r, eval = FALSE} if (tidyr_new_interface()) { out <- tidyr::nest_legacy(df, x, y, z) } else { out <- tidyr::nest(df, x, y, z) } ``` ### New syntax for `unnest()` What changed: * The to-be-unnested columns must now be specified explicitly, instead of defaulting to all list-columns. This also deprecates `.drop` and `.preserve`. * `.sep` has been deprecated and replaced with `names_sep`. * `unnest()` uses the [emerging tidyverse standard][name-repair] to disambiguate duplicated names. Use `names_repair = tidyr_legacy` to request the previous approach. * `.id` has been deprecated because it can be easily replaced by creating the column of names prior to `unnest()`, e.g. with an upstream call to `mutate()`. ```{r, eval = FALSE} # v0.8.3 df %>% unnest(x, .id = "id") # v1.0.0 df %>% mutate(id = names(x)) %>% unnest(x)) ``` Why it changed: * The use of `...` for metadata is a problematic pattern we're moving away from. * The changes to details arguments relate to features rolling out across multiple packages in the tidyverse. For example, `ptype` exposes prototype support from the new [vctrs package](https://vctrs.r-lib.org). `names_repair` specifies what to do about duplicated or non-syntactic names, consistent with tibble and readxl. Before and after: ```{r, eval = FALSE} nested <- mini_iris %>% nest(my_data = c(Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)) # v0.8.3 automatically unnests list-cols nested %>% unnest() # v1.0.0 must be told which columns to unnest nested %>% unnest(any_of("my_data")) ``` If you need a quick and dirty fix without having to think, just call `unnest_legacy()` instead of `unnest()`. It's the same as `unnest()` in v0.8.3: ```{r, eval = FALSE} if (tidyr_new_interface()) { out <- tidyr::unnest_legacy(df) } else { out <- tidyr::unnest(df) } ``` ### `nest()` preserves groups What changed: * `nest()` now preserves the groups present in the input. Why it changed: * To reflect the growing support for grouped data frames, especially in recent releases of dplyr. See, for example, `dplyr::group_modify()`, `group_map()`, and friends. If the fact that `nest()` now preserves groups is problematic downstream, you have a few choices: * Apply `ungroup()` to the result. This level of pragmatism suggests, however, you should at least consider the next two options. * You should never have grouped in the first place. Eliminate the `group_by()` call and specify which columns should be nested versus not nested directly in `nest()`. * Adjust the downstream code to accommodate grouping. Imagine we used `group_by()` then `nest()` on `mini_iris`, then we computed on the list-column *outside the data frame*. ```{r} (df <- mini_iris %>% group_by(Species) %>% nest()) (external_variable <- map_int(df$data, nrow)) ``` And now we try to add that back to the data *post hoc*: ```{r error = TRUE} df %>% mutate(n_rows = external_variable) ``` This fails because `df` is grouped and `mutate()` is group-aware, so it's hard to add a completely external variable. Other than pragmatically `ungroup()`ing, what can we do? One option is to work inside the data frame, i.e. bring the `map()` inside the `mutate()`, and design the problem away: ```{r} df %>% mutate(n_rows = map_int(data, nrow)) ``` If, somehow, the grouping seems appropriate AND working inside the data frame is not an option, `tibble::add_column()` is group-unaware. It lets you add external data to a grouped data frame. ```{r} df %>% tibble::add_column(n_rows = external_variable) ``` ### `nest_()` and `unnest_()` are defunct What changed: * `nest_()` and `unnest_()` no longer work Why it changed: * We are transitioning the whole tidyverse to the powerful tidy eval framework. Therefore, we are gradually removing all previous solutions: - Specialized standard evaluation versions of functions, e.g., `foo_()` as a complement to `foo()`. - The older lazyeval framework. Before and after: ```{r eval = FALSE} # v0.8.3 mini_iris %>% nest_( key_col = "my_data", nest_cols = c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width") ) nested %>% unnest_(~ my_data) # v1.0.0 mini_iris %>% nest(my_data = any_of(c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width"))) nested %>% unnest(any_of("my_data")) ``` [ggplot2-packages]: https://ggplot2.tidyverse.org/dev/articles/ggplot2-in-packages.html [name-repair]: https://www.tidyverse.org/blog/2019/01/tibble-2.0.1/#name-repair tidyr/inst/doc/pivot.R0000644000176200001440000002742014553746311014431 0ustar liggesusers## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) options(tibble.print_max = 10) ## ----setup, message = FALSE--------------------------------------------------- library(tidyr) library(dplyr) library(readr) ## ----------------------------------------------------------------------------- relig_income ## ----------------------------------------------------------------------------- relig_income %>% pivot_longer( cols = !religion, names_to = "income", values_to = "count" ) ## ----------------------------------------------------------------------------- billboard ## ----------------------------------------------------------------------------- billboard %>% pivot_longer( cols = starts_with("wk"), names_to = "week", values_to = "rank", values_drop_na = TRUE ) ## ----eval = FALSE------------------------------------------------------------- # billboard %>% # pivot_longer( # cols = starts_with("wk"), # names_to = "week", # names_prefix = "wk", # names_transform = as.integer, # values_to = "rank", # values_drop_na = TRUE, # ) ## ----eval = FALSE------------------------------------------------------------- # billboard %>% # pivot_longer( # cols = starts_with("wk"), # names_to = "week", # names_transform = readr::parse_number, # values_to = "rank", # values_drop_na = TRUE, # ) ## ----------------------------------------------------------------------------- who ## ----------------------------------------------------------------------------- who %>% pivot_longer( cols = new_sp_m014:newrel_f65, names_to = c("diagnosis", "gender", "age"), names_pattern = "new_?(.*)_(.)(.*)", values_to = "count" ) ## ----eval = FALSE------------------------------------------------------------- # who %>% # pivot_longer( # cols = new_sp_m014:newrel_f65, # names_to = c("diagnosis", "gender", "age"), # names_pattern = "new_?(.*)_(.)(.*)", # names_transform = list( # gender = ~ readr::parse_factor(.x, levels = c("f", "m")), # age = ~ readr::parse_factor( # .x, # levels = c("014", "1524", "2534", "3544", "4554", "5564", "65"), # ordered = TRUE # ) # ), # values_to = "count", # ) ## ----------------------------------------------------------------------------- household ## ----------------------------------------------------------------------------- household %>% pivot_longer( cols = !family, names_to = c(".value", "child"), names_sep = "_", values_drop_na = TRUE ) ## ----------------------------------------------------------------------------- anscombe ## ----------------------------------------------------------------------------- anscombe %>% pivot_longer( cols = everything(), cols_vary = "slowest", names_to = c(".value", "set"), names_pattern = "(.)(.)" ) ## ----------------------------------------------------------------------------- pnl <- tibble( x = 1:4, a = c(1, 1,0, 0), b = c(0, 1, 1, 1), y1 = rnorm(4), y2 = rnorm(4), z1 = rep(3, 4), z2 = rep(-2, 4), ) pnl %>% pivot_longer( cols = !c(x, a, b), names_to = c(".value", "time"), names_pattern = "(.)(.)" ) ## ----------------------------------------------------------------------------- fish_encounters ## ----------------------------------------------------------------------------- fish_encounters %>% pivot_wider( names_from = station, values_from = seen ) ## ----------------------------------------------------------------------------- fish_encounters %>% pivot_wider( names_from = station, values_from = seen, values_fill = 0 ) ## ----------------------------------------------------------------------------- warpbreaks <- warpbreaks %>% as_tibble() %>% select(wool, tension, breaks) warpbreaks ## ----------------------------------------------------------------------------- warpbreaks %>% count(wool, tension) ## ----------------------------------------------------------------------------- warpbreaks %>% pivot_wider( names_from = wool, values_from = breaks ) ## ----------------------------------------------------------------------------- warpbreaks %>% pivot_wider( names_from = wool, values_from = breaks, values_fn = mean ) ## ----------------------------------------------------------------------------- production <- expand_grid( product = c("A", "B"), country = c("AI", "EI"), year = 2000:2014 ) %>% filter((product == "A" & country == "AI") | product == "B") %>% mutate(production = rnorm(nrow(.))) production ## ----------------------------------------------------------------------------- production %>% pivot_wider( names_from = c(product, country), values_from = production ) ## ----------------------------------------------------------------------------- production %>% pivot_wider( names_from = c(product, country), values_from = production, names_sep = ".", names_prefix = "prod." ) production %>% pivot_wider( names_from = c(product, country), values_from = production, names_glue = "prod_{product}_{country}" ) ## ----------------------------------------------------------------------------- us_rent_income ## ----------------------------------------------------------------------------- us_rent_income %>% pivot_wider( names_from = variable, values_from = c(estimate, moe) ) ## ----------------------------------------------------------------------------- weekdays <- c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") daily <- tibble( day = factor(c("Tue", "Thu", "Fri", "Mon"), levels = weekdays), value = c(2, 3, 1, 5) ) daily ## ----------------------------------------------------------------------------- daily %>% pivot_wider( names_from = day, values_from = value ) ## ----------------------------------------------------------------------------- daily %>% pivot_wider( names_from = day, values_from = value, names_expand = TRUE ) ## ----------------------------------------------------------------------------- percentages <- tibble( year = c(2018, 2019, 2020, 2020), type = factor(c("A", "B", "A", "B"), levels = c("A", "B")), percentage = c(100, 100, 40, 60) ) percentages percentages %>% pivot_wider( names_from = c(year, type), values_from = percentage, names_expand = TRUE, values_fill = 0 ) ## ----------------------------------------------------------------------------- daily <- mutate(daily, type = factor(c("A", "B", "B", "A"))) daily ## ----------------------------------------------------------------------------- daily %>% pivot_wider( names_from = type, values_from = value, values_fill = 0 ) ## ----------------------------------------------------------------------------- daily %>% pivot_wider( names_from = type, values_from = value, values_fill = 0, id_expand = TRUE ) ## ----------------------------------------------------------------------------- updates <- tibble( county = c("Wake", "Wake", "Wake", "Guilford", "Guilford"), date = c(as.Date("2020-01-01") + 0:2, as.Date("2020-01-03") + 0:1), system = c("A", "B", "C", "A", "C"), value = c(3.2, 4, 5.5, 2, 1.2) ) updates ## ----------------------------------------------------------------------------- updates %>% pivot_wider( id_cols = county, names_from = system, values_from = value ) ## ----------------------------------------------------------------------------- updates %>% pivot_wider( id_cols = county, names_from = system, values_from = value, unused_fn = list(date = max) ) ## ----------------------------------------------------------------------------- updates %>% pivot_wider( id_cols = county, names_from = system, values_from = value, unused_fn = list(date = list) ) ## ----------------------------------------------------------------------------- contacts <- tribble( ~field, ~value, "name", "Jiena McLellan", "company", "Toyota", "name", "John Smith", "company", "google", "email", "john@google.com", "name", "Huxley Ratcliffe" ) ## ----------------------------------------------------------------------------- contacts <- contacts %>% mutate( person_id = cumsum(field == "name") ) contacts ## ----------------------------------------------------------------------------- contacts %>% pivot_wider( names_from = field, values_from = value ) ## ----------------------------------------------------------------------------- world_bank_pop ## ----------------------------------------------------------------------------- pop2 <- world_bank_pop %>% pivot_longer( cols = `2000`:`2017`, names_to = "year", values_to = "value" ) pop2 ## ----------------------------------------------------------------------------- pop2 %>% count(indicator) ## ----------------------------------------------------------------------------- pop3 <- pop2 %>% separate(indicator, c(NA, "area", "variable")) pop3 ## ----------------------------------------------------------------------------- pop3 %>% pivot_wider( names_from = variable, values_from = value ) ## ----------------------------------------------------------------------------- multi <- tribble( ~id, ~choice1, ~choice2, ~choice3, 1, "A", "B", "C", 2, "C", "B", NA, 3, "D", NA, NA, 4, "B", "D", NA ) ## ----------------------------------------------------------------------------- multi2 <- multi %>% pivot_longer( cols = !id, values_drop_na = TRUE ) %>% mutate(checked = TRUE) multi2 ## ----------------------------------------------------------------------------- multi2 %>% pivot_wider( id_cols = id, names_from = value, values_from = checked, values_fill = FALSE ) ## ----------------------------------------------------------------------------- spec <- relig_income %>% build_longer_spec( cols = !religion, names_to = "income", values_to = "count" ) pivot_longer_spec(relig_income, spec) ## ----------------------------------------------------------------------------- spec ## ----------------------------------------------------------------------------- us_rent_income %>% pivot_wider( names_from = variable, values_from = c(estimate, moe) ) ## ----------------------------------------------------------------------------- spec1 <- us_rent_income %>% build_wider_spec( names_from = variable, values_from = c(estimate, moe) ) spec1 ## ----------------------------------------------------------------------------- spec2 <- spec1 %>% mutate( .name = paste0(variable, ifelse(.value == "moe", "_moe", "")) ) spec2 ## ----------------------------------------------------------------------------- us_rent_income %>% pivot_wider_spec(spec2) ## ----------------------------------------------------------------------------- construction ## ----------------------------------------------------------------------------- spec <- tribble( ~.name, ~.value, ~units, ~region, "1 unit", "n", "1", NA, "2 to 4 units", "n", "2-4", NA, "5 units or more", "n", "5+", NA, "Northeast", "n", NA, "Northeast", "Midwest", "n", NA, "Midwest", "South", "n", NA, "South", "West", "n", NA, "West", ) ## ----------------------------------------------------------------------------- construction %>% pivot_longer_spec(spec) ## ----------------------------------------------------------------------------- construction %>% pivot_longer_spec(spec) %>% pivot_wider_spec(spec) tidyr/inst/doc/nest.R0000644000176200001440000000262314553746310014236 0ustar liggesusers## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, message = FALSE--------------------------------------------------- library(tidyr) library(dplyr) library(purrr) ## ----------------------------------------------------------------------------- df1 <- tibble( g = c(1, 2, 3), data = list( tibble(x = 1, y = 2), tibble(x = 4:5, y = 6:7), tibble(x = 10) ) ) df1 ## ----------------------------------------------------------------------------- df2 <- tribble( ~g, ~x, ~y, 1, 1, 2, 2, 4, 6, 2, 5, 7, 3, 10, NA ) df2 %>% nest(data = c(x, y)) ## ----------------------------------------------------------------------------- df2 %>% group_by(g) %>% nest() ## ----------------------------------------------------------------------------- df1 %>% unnest(data) ## ----------------------------------------------------------------------------- mtcars_nested <- mtcars %>% group_by(cyl) %>% nest() mtcars_nested ## ----------------------------------------------------------------------------- mtcars_nested <- mtcars_nested %>% mutate(model = map(data, function(df) lm(mpg ~ wt, data = df))) mtcars_nested ## ----------------------------------------------------------------------------- mtcars_nested <- mtcars_nested %>% mutate(model = map(model, predict)) mtcars_nested tidyr/inst/doc/rectangle.R0000644000176200001440000001152614553746312015235 0ustar liggesusers## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, message = FALSE--------------------------------------------------- library(tidyr) library(dplyr) library(repurrrsive) ## ----------------------------------------------------------------------------- users <- tibble(user = gh_users) ## ----------------------------------------------------------------------------- names(users$user[[1]]) ## ----------------------------------------------------------------------------- users %>% unnest_wider(user) ## ----------------------------------------------------------------------------- users %>% hoist(user, followers = "followers", login = "login", url = "html_url" ) ## ----------------------------------------------------------------------------- repos <- tibble(repo = gh_repos) repos ## ----------------------------------------------------------------------------- repos <- repos %>% unnest_longer(repo) repos ## ----------------------------------------------------------------------------- repos %>% hoist(repo, login = c("owner", "login"), name = "name", homepage = "homepage", watchers = "watchers_count" ) ## ----------------------------------------------------------------------------- repos %>% hoist(repo, owner = "owner") %>% unnest_wider(owner) ## ----------------------------------------------------------------------------- chars <- tibble(char = got_chars) chars chars2 <- chars %>% unnest_wider(char) chars2 ## ----------------------------------------------------------------------------- chars2 %>% select_if(is.list) ## ----------------------------------------------------------------------------- chars2 %>% select(name, books, tvSeries) %>% pivot_longer(c(books, tvSeries), names_to = "media", values_to = "value") %>% unnest_longer(value) ## ----------------------------------------------------------------------------- chars2 %>% select(name, title = titles) %>% unnest_longer(title) ## ----------------------------------------------------------------------------- repurrrsive::gmaps_cities ## ----------------------------------------------------------------------------- repurrrsive::gmaps_cities %>% unnest_wider(json) ## ----------------------------------------------------------------------------- repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) ## ----------------------------------------------------------------------------- repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) %>% unnest_wider(results) ## ----------------------------------------------------------------------------- repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) %>% unnest_wider(results) %>% unnest_wider(geometry) ## ----------------------------------------------------------------------------- repurrrsive::gmaps_cities %>% unnest_wider(json) %>% unnest_longer(results) %>% unnest_wider(results) %>% unnest_wider(geometry) %>% unnest_wider(location) ## ----------------------------------------------------------------------------- repurrrsive::gmaps_cities %>% unnest_wider(json) %>% hoist(results, first_result = 1) %>% unnest_wider(first_result) %>% unnest_wider(geometry) %>% unnest_wider(location) ## ----------------------------------------------------------------------------- repurrrsive::gmaps_cities %>% hoist(json, lat = list("results", 1, "geometry", "location", "lat"), lng = list("results", 1, "geometry", "location", "lng") ) ## ----------------------------------------------------------------------------- discs <- tibble(disc = discog) %>% unnest_wider(disc) %>% mutate(date_added = as.POSIXct(strptime(date_added, "%Y-%m-%dT%H:%M:%S"))) discs ## ----error = TRUE------------------------------------------------------------- discs %>% unnest_wider(basic_information) ## ----------------------------------------------------------------------------- discs %>% unnest_wider(basic_information, names_repair = "unique") ## ----------------------------------------------------------------------------- discs %>% select(!id) %>% unnest_wider(basic_information) ## ----------------------------------------------------------------------------- discs %>% hoist(basic_information, title = "title", year = "year", label = list("labels", 1, "name"), artist = list("artists", 1, "name") ) ## ----------------------------------------------------------------------------- discs %>% hoist(basic_information, artist = "artists") %>% select(disc_id = id, artist) %>% unnest_longer(artist) %>% unnest_wider(artist) discs %>% hoist(basic_information, format = "formats") %>% select(disc_id = id, format) %>% unnest_longer(format) %>% unnest_wider(format) %>% unnest_longer(descriptions)