tmvtnorm/0000755000176200001440000000000012567714017012152 5ustar liggesuserstmvtnorm/inst/0000755000176200001440000000000012567600065013124 5ustar liggesuserstmvtnorm/inst/CITATION0000644000176200001440000000160512567600060014256 0ustar liggesuserscitHeader("To cite package tmvtnorm in publications use:") ## R >= 2.8.0 passes package metadata to citation(). if(!exists("meta") || is.null(meta)) meta <- packageDescription("tmvtnorm") year <- sub("-.*", "", meta$Date) note <- sprintf("R package version %s", meta$Version) citEntry( entry = "Manual", title = "{tmvtnorm}: Truncated Multivariate Normal and Student t Distribution", author = personList(as.person("Stefan Wilhelm"), as.person("Manjunath B G")), year = year, note = note, url = "http://CRAN.R-project.org/package=tmvtnorm", textVersion = paste("Stefan Wilhelm, Manjunath B G (", year, "). tmvtnorm: Truncated Multivariate Normal and Student t Distribution. ", note, ".", sep="") ) tmvtnorm/inst/doc/0000755000176200001440000000000012567600065013671 5ustar liggesuserstmvtnorm/inst/doc/GibbsSampler.pdf0000644000176200001440000060114112567600065016741 0ustar liggesusers%PDF-1.5 % 3 0 obj << /Length 3513 /Filter /FlateDecode >> stream xYs]DLfm4Ll?(RbC%wX, Rrb} `?|?CJ7N(ij> g̨AGW7r|.'W=Yϖ';xs3t~}5{솧ęwq]В@&+ty껑4VJ&J #/az?EuC;w@R@*Y =@P:zgm-D><7 B<8 M/l%섓qmD|睕 ,uhUK  Fb3D`)z/ʧeOT;iЁ2=͜og(6Ioy7([=#g 2vu[˄t8#ό!]DvM;794=i<9)k3^!3"DuAX$]uK m8GBت5ab>LznA"Ql@k!ӍI/)A҂>T)-`sQ̘tT@j0Et6omliDЂt,|X&ok[<E' >SmhœIТ1(J)U]hz3!=4TI`}!gj|{`tw/mQ}:I"T;?gE=Qy֯RuyFm Rk!Ǻ:N1ѝ,dx~)QaM"ֵ>zlʃ]x\tbL2ÛR {bdf܈#ˢ@ciG߯Pbi%PlP1g vYqnF갾ײM2BRluپ!\8߯QN|.oK&'2#KR*"U**ۤY`&o*Q_Ȳj9RAPR@B Ծ1+Y&Q<`M<@٨pImP[~4 8XM%cYž ;tb%6kl~ý-): D6Akdn#5%;^  13Vapߝ>H㇈VLI2 _ާȭQfeCȍFTD%3}VURzF#SC{Un89̺%$zZE>)9꫘SMe+Gy{9` ;}ߖGlć'v;8ڊeyC4ڥ 2I ;6=PڂG *9Xu"^L4P6 z =7WӮly]8kq_ݎ=*ZgVsߘ,s6W٥o(W510LDCPrJ1)$$zp۲[[8%d@3G~> sJ2O%օm93Ü", 3X>QPn&W$}f)¤DY>6҈ p:ɲϓAMKu "(1LI,9]£W]ԶlCd6;6)@jTV3.RxCGG**˨>5|USE}7\z&检)fOs}0 0z˞ : \x~Dv>ڟ X*:DҢvE§WasOf)PǴ^8XT{*|6+wӘ:K{\xQ+uo?gE,/{/*}a ²ʠ^ڴLu3(=[>65$&d,;WC^8%G63zVIӨzba*[5Ub2M]0!h4_)|y)lrO͖ Slu2|LVH UAT;d'02dD L䛚S фPa&2kC9DQE󌉨gH^hk1ebaUFxPsZ#ZNT, P}5K#ޠ)rr)_N ux #CXiK$yx?؛ endstream endobj 2 0 obj << /Type /Page /Contents 3 0 R /Resources 1 0 R /MediaBox [0 0 595.276 841.89] /Parent 19 0 R >> endobj 1 0 obj << /Font << /F117 4 0 R /F122 5 0 R /F127 6 0 R /F8 7 0 R /F135 8 0 R /F139 9 0 R /F138 10 0 R /F132 11 0 R /F142 12 0 R /F1 13 0 R /F151 14 0 R /F153 15 0 R /F154 16 0 R /F104 17 0 R /F167 18 0 R >> /ProcSet [ /PDF /Text ] >> endobj 22 0 obj << /Length 4600 /Filter /FlateDecode >> stream x\Ys~ׯeI >\;éDW$"4t9\Jr΁/tϾzi8t(%Bh'ó\[~ Ono8ʿ"WoK&T֕%V | ϤY?Ng0QD+mPCrcƦn.}xk?5>}_TsG_:ԝXՔ,⪧3wX<@RFltvug I$; vLZt]XtW2#;Mg+LjM9:H7w$Oj=1JQG*9{l*Pq(#P1S ^Tuo據̾V6& 5 J߇}*KV I2m]EQг =}A6}t!i Y)A3<' $T`L@ζđP&MM#CQV 20~:>젽c$6K9V; _z] E6Cղ JDV5Z6Q~ m2F߆G}=K|{l:bƇ] `k(oC;q,WՌP{dDdO \$pl.4@q0X+r4ԟ"z9T|kv=.?sT9y ejSƃ5a\^<-H ǻ~3$L!&%dB4#?Z_\c&_6|f٧$ܱ]I``>;/6tZőnocz.`ַh̑qW2@\6"q͛pgnV3UHՈ!@eo2U}ok& JjWo&B$-A7࡬^q0i#P64RwDi $ls]w4|s|ji&9e88B(3>a= qyD+Qr!4i9o:#ޒ] ZlcOyg<YǷejyk<|G&4Oٓ"ǸʧQNJ,(%J ]Hh9NaX>"ͱ'06p`T3$7wd2]"˥]Zk*pJ)ݙwV6bP UDw邙g\A{NqhEXɇK$P#M\|qL@eEk]"r]E^&L(m`,|1 [Ѽ.۫y:G)p;g|49zC8̯ Sm 5H 5__t娏+V xn{`>VC;ըF\`spIRz:5ѳO93[J#Vj^+DFa&@6!H0roVbh]RQy$2_'rV`G\H65:!*?ߍ0 f|Q 0 `8p.kPqyPx NeuBjb1iXi/֖xn o S`slHZZrwƭ3,P5httj5m ̅p*<.C4FLZt;!N6UN^߁Z2N)?_= BSxc5Ť`W7Mf=UEKy.uސR80zEfWL0R XW1#ad.46OUє=2CrYU\ǒv02S\W9F$fwtN|BKDL5/V&|L:'u*Wh"lhnzFDǽ %6͗寫+ׂH̽qx֙ae,UV)^ ,e#uoO: {g}/[pi\-N 5g 3vq@BW[<5'+| A5BBZBτf=!ZWOI~BHȿqǓ] /p8ŦOѪBbd o}w##sBS -cA3x$x$4vΤe! E4gr_{>ϞދXt94ʧG+E~]0yk3#?)5"_U@PseZ7‰Y.5{rE-aR#h}و)G|O 1_ˠVÙ߽%Q,Yv<~mT3O/ `)OB,h{Rara퉑Wk2",on¥PoI)BY温8jź=CxN-w1Q0 iO߄KM-E2we&)7q75WP/ 7zZ90L.U穽ڷ -% 7aR}%Rh0@y2c&)Ҷ;۸m`^MR |YX)&tB"~2 /qL(żͰf+!4皵E;k4,}ǚ=iM;#857%kfe?JA endstream endobj 21 0 obj << /Type /Page /Contents 22 0 R /Resources 20 0 R /MediaBox [0 0 595.276 841.89] /Parent 19 0 R >> endobj 20 0 obj << /Font << /F8 7 0 R /F135 8 0 R /F142 12 0 R /F139 9 0 R /F138 10 0 R /F127 6 0 R /F132 11 0 R /F1 13 0 R /F172 23 0 R /F140 24 0 R /F136 25 0 R /F104 17 0 R >> /ProcSet [ /PDF /Text ] >> endobj 28 0 obj << /Length 4129 /Filter /FlateDecode >> stream x[[s6~[و&@v&ML&vMhؒW${.RlC,^@\sotFQUeAS(qp:;hN.;pem]>Q:04n ԥF[aMR!aKg0CUUL)1~ ,k+Sc<@bj49Lg4on HTS0JrrfBoe\l9'0LXS'Rdz#v6E3$o0'P"Javв>|t #G2#ʊB~&9oDv }{2~F f"K `XՔ.\oКl ؍(n@Қb ū+* -5Y.ucW<[nՌt%Q& ?u\"wx Q=.~‡~`7ˎ裑3  ܨbůgŅ;淬\i\3Oj-xLk?bXyQ*cKEƐLNF]z2HeJ-u9vud伽X⡴W"Ց'EN]UN;cV.y% OCTixӾ%&TR? :\K#LድzݙG$4gfŢ{2pgAA ܷ)K]RvbFˠ@CQ _lO?Q*]SbƟi&g$#, S8foPn,.r43h=vbLQ6GzE|D IckMf-mJ m cPUÎ)M z"3F\fS& UbwFOT)9"&,&33o6ۘX:G'xحsB=]Vzt"F= m!ƽr Д6B;E[%@-E'x%ATv9=iuj~!ܔ]_s إ.ַlyjm@oJ>rO( .%JNzװy!Z:Cݨ ([#ogzwh28fl:7G_Gc~]ptYl@+u[ )9gd5]~xǘ><'n=ʳ~4WnםA SP9IƂ{i Ó*p0T9T@u.8c]4u Y ٫۬+r.ex N:u9ѪG70k{+!8)#`y ٳMigJT;r.r%P.w0 w5qms f15S"aHl l3"* ^} g1f VvvM)~έMXJ3zeKhxVJ+(TseU iz ‹ g HP֑?gc!A_B1w_Bnq 8*&WFI|/9Lw|$)Q97;4<"< ")'qÉ|*  {H?K&Avlf֡rHE]Z%G6c MQlZ#9ؠI#;4@zRrCL3`_.B3PF0?؁t동n/$D!I^ ?S\zh߇L\.sm k6`<'ڹ&7ѨSDzY߅h27%νvưr!ynLʡ c,iOlǦ6Ct"(VebcDJd~"pB A@$χ{rN֙{eݷ+Z܏O=<9\Nߜs_PQ  J JU$?Pû U}~R`!|UGo'WʄN00BJ:T aEJN+1?]ғ{LQ|,!HI]c?a׉4wNE~a,~S`c8j'eVwð`%:H-5[.VoO'mbl O*쓨{;*~NՏ-BܞZx {F;NV6j0yC%jR>\sS3`fd6~Z93u<·4\m/75thL^WJ 26;Vb|qs*_K8l%Z3Juu ArtwwWj~on}C >:'C 3|؅z DW8#{:DJfZ}s 8Ԓ.,c_TzG/6%l ƃEU.gXxn_P>{n㹰Z;+g[;fA.&=`,D}P`eq^ m?h\IOqF6O ;W45evA.nA`FL7ˣo%9{̷Z Ɂ'Mm(ј >Ե/C$txoGD Ǵ[yX^^˜nVN5N-&jpJLj`Av@2yh}r^:Dw~ =FC] 0|ӕut}-~ +_= 1xY. fbnڧK6 @{ؘʠx!i]s?IbP:.i Ofؐ 4dqRS h!hOˏ?a!]:w=J+{=5݃|)G/nY+o__@!F{7DDVrj?V@MX7^F%6K=RwIR > (!ֶ="j+ѫ`#6?> endobj 26 0 obj << /Font << /F8 7 0 R /F132 11 0 R /F139 9 0 R /F138 10 0 R /F127 6 0 R /F135 8 0 R /F142 12 0 R /F104 17 0 R /F174 29 0 R /F153 15 0 R >> /ProcSet [ /PDF /Text ] >> endobj 30 0 obj [306.7 408.9 408.9 511.1 766.7 306.7 357.8 306.7 511.1 511.1 511.1 511.1 511.1 511.1 511.1 511.1 511.1 511.1 511.1 306.7 306.7 306.7 766.7 511.1 511.1 766.7 743.3 703.9 715.6 755 678.3 652.8 773.6 743.3 385.6 525 768.9 627.2 896.7 743.3 766.7 678.3 766.7 729.4 562.2 715.6 743.3 743.3 998.9 743.3 743.3 613.3 306.7 514.4 306.7 511.1 306.7 306.7 511.1 460 460 511.1 460 306.7 460 511.1 306.7 306.7 460 255.6 817.8 562.2 511.1 511.1 460 421.7 408.9 332.2 536.7 460 664.4 463.9 485.6] endobj 31 0 obj [583 500 0 678 444 500 563] endobj 32 0 obj [786] endobj 33 0 obj << /Length 149 /Filter /FlateDecode >> stream x3135R0P0Bc3csCB.c46K$r9yr+p{E=}JJS ]  b<]00 @0?`d=0s@f d'n.WO@.sud endstream endobj 23 0 obj << /Type /Font /Subtype /Type3 /Name /F172 /FontMatrix [0.01204 0 0 0.01204 0 0] /FontBBox [ 5 5 36 37 ] /Resources << /ProcSet [ /PDF /ImageB ] >> /FirstChar 136 /LastChar 136 /Widths 34 0 R /Encoding 35 0 R /CharProcs 36 0 R >> endobj 34 0 obj [41.52 ] endobj 35 0 obj << /Type /Encoding /Differences [136/a136] >> endobj 36 0 obj << /a136 33 0 R >> endobj 37 0 obj [590.3 590.3 885.4 885.4 295.1 324.7 531.3 531.3 531.3 531.3 531.3 795.8 472.2 531.3 767.4 826.4 531.3 958.7 1076.8 826.4 295.1 295.1 531.3 885.4 531.3 885.4 826.4 295.1 413.2 413.2 531.3 826.4 295.1 354.2 295.1 531.3 531.3 531.3 531.3 531.3 531.3 531.3 531.3 531.3 531.3 531.3 295.1 295.1 295.1 826.4 501.7 501.7 826.4 795.8 752.1 767.4 811.1 722.6 693.1 833.5 795.8 382.6 545.5 825.4 663.6 972.9 795.8 826.4 722.6 826.4 781.6 590.3 767.4 795.8 795.8 1091 795.8 795.8 649.3 295.1 531.3 295.1 531.3 295.1 295.1 531.3 590.3 472.2 590.3 472.2 324.7 531.3 590.3 295.1 324.7 560.8 295.1 885.4 590.3 531.3 590.3 560.8 414.1 419.1 413.2 590.3 560.8 767.4] endobj 38 0 obj [562.5 562.5 562.5 562.5 562.5 562.5 562.5 562.5 562.5 312.5 312.5 342.6 875 531.2 531.2 875 849.5 799.8 812.5 862.3 738.4 707.2 884.3 879.6 419 581 880.8 675.9 1067.1 879.6 844.9 768.5 844.9 839.1 625 782.4 864.6 849.5 1162 849.5 849.5 687.5 312.5 581 312.5 562.5 312.5 312.5 546.9 625 500 625 513.3 343.7 562.5 625 312.5 343.7 593.7 312.5 937.5 625 562.5 625 593.7 459.5 443.8 437.5 625 593.7 812.5 593.7] endobj 39 0 obj [645.8] endobj 40 0 obj [525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525 525] endobj 41 0 obj [833.3 555.6 500 555.6 527.8 427.8 394.4 390.3 555.6 527.8] endobj 42 0 obj [597.2 597.2 736.1 736.1 527.8 527.8 583.3 583.3 583.3 583.3 750 750 750 750 1044.4 1044.4 791.7 791.7 583.3 583.3 638.9 638.9 638.9 638.9 805.6 805.6 805.6 805.6 1277.8 1277.8 811.1 811.1 875 875 666.7 666.7 666.7 666.7 666.7 666.7 888.9 888.9 888.9 888.9 888.9 888.9 888.9 666.7 875 875 875 875 611.1 611.1 833.3 1111.1 472.2 555.6 1111.1 1511.1 1111.1 1511.1 1111.1 1511.1 1055.6 944.5 472.2 833.3 833.3 833.3 833.3 833.3 1444.5 1277.8 555.6 1111.1 1111.1 1111.1 1111.1 1111.1 944.5 1277.8 555.6 1000 1444.5 555.6 1000 1444.5 472.2 472.2 527.8 527.8 527.8 527.8 666.7 666.7] endobj 43 0 obj [667] endobj 44 0 obj [550 589 632 551 529 738 621 534 360 738 559 634 566] endobj 46 0 obj [250 333 250 606 500 500 500 500 500 500 500 500 500 500 250 250 606 606 606 444 747 778 611 709 774 611 556 763 832 337 333 726 611 946 831 786 604 786 668 525 613 778 722 1000 667 667 667 333 606 333 606 500 278 500 553 444 611 479 333 556 582 291 234 556 291 883 582 546 601 560 395 424 326 603 565 834 516] endobj 47 0 obj [611 556 722 778 333 333 667 556 944 778 778 611 778 667 556 611 778 722 944 722 667 667 333 606 333 606 500 278 444 463 407 500 389 278 500 500 278 278 444 278 778 556 444 500 463 389 389 333 556 500 722 500 500] endobj 48 0 obj [778 611 556 778 778 389 389 722 611 944 778 833 667 833 722 556 611 778 667 1000 722 611 667 333 606 333 606 500 278 556 537 444 556 444 333 500 556 333 333 556 333 833 556 556 556 537 389 444 389 556 556 833 500 556 500] endobj 49 0 obj [583.3 555.6 555.6 833.3 833.3 277.8 305.6 500 500 500 500 500 750 444.4 500 722.2 777.8 500 902.8 1013.9 777.8 277.8 277.8 500 833.3 500 833.3 777.8 277.8 388.9 388.9 500 777.8 277.8 333.3 277.8 500 500 500 500 500 500 500 500 500 500 500 277.8 277.8 277.8 777.8 472.2 472.2 777.8 750 708.3 722.2 763.9 680.6 652.8 784.7 750 361.1 513.9 777.8 625 916.7 750 777.8 680.6 777.8 736.1 555.6 722.2 750 750 1027.8 750 750 611.1 277.8 500 277.8 500 277.8 277.8 500 555.6 444.4 555.6 444.4 305.6 500 555.6 277.8 305.6 527.8 277.8 833.3 555.6 500 555.6 527.8 391.7 394.4 388.9 555.6 527.8 722.2 527.8 527.8 444.4 500] endobj 50 0 obj [777.8 277.8 777.8 500 777.8 500 777.8 777.8 777.8 777.8 777.8 777.8 777.8 1000 500 500 777.8 777.8 777.8 777.8 777.8 777.8 777.8 777.8 777.8 777.8 777.8 777.8 1000 1000 777.8 777.8 1000 1000 500 500 1000 1000 1000 777.8 1000 1000 611.1 611.1 1000 1000 1000 777.8 275 1000 666.7 666.7 888.9 888.9 0 0 555.6 555.6 666.7 500 722.2 722.2 777.8 777.8 611.1 798.5 656.8 526.5 771.4 527.8 718.7 594.9 844.5 544.5 677.8 761.9 689.7 1200.9 820.5 796.1 695.6 816.7 847.5 605.6 544.6 625.8 612.8 987.8 713.3 668.3 724.7 666.7 666.7 666.7 666.7 666.7 611.1 611.1 444.4 444.4 444.4 444.4 500 500 388.9 388.9 277.8] endobj 51 0 obj [272 326.4 272 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 489.6 272 272 272 761.6 462.4 462.4 761.6 734 693.4 707.2 747.8 666.2 639 768.3 734 353.2 503 761.2 611.8 897.2 734 761.6 666.2 761.6 720.6 544 707.2 734 734 1006 734 734 598.4 272 489.6 272 489.6 272 272 489.6 544 435.2 544 435.2 299.2 489.6 544 272 299.2 516.8 272 816 544 489.6 544 516.8 380.8 386.2 380.8 544] endobj 52 0 obj [706.6 628.2 602.1 726.3 693.3 327.6 471.5 719.4 576 850 693.3 719.8 628.2 719.8 680.5 510.9 667.6 693.3 693.3 954.5 693.3 693.3 563.1 249.6 458.6 249.6 458.6 249.6 249.6 458.6 510.9 406.4 510.9 406.4 275.8 458.6 510.9 249.6 275.8 484.7 249.6 772.1 510.9 458.6 510.9 484.7 354.1 359.4 354.1 510.9 484.7] endobj 53 0 obj << /Length1 1437 /Length2 6866 /Length3 0 /Length 7839 /Filter /FlateDecode >> stream xڍwTSҶH.t{B@H &"EAA@@@;Ez{Z߷Z9{f=白NXxl6Pe$4A? /?!+> &d5`H (()& b"Q"f hHԍU⍂;1k C8ow@A@v:cv]v;"Q܀' Bݠ(-`2^BV@GC{QP Pa E=5 @: w!~;!  Cv08RE{0 wCb`l (`LA\nn0~V ECA!cY'`C*݅su)!~~~au^_]՘ |\.y=! hjC':F #cyf_ϿWz"pOIDUKO#aG@c,F +!쐀؟l1="_3#$P?$7`~@gv1W;0_ iјDbP#蟡Gmۦc@a!ܔa^P[m+f C@n_ /f N C&(fn5`B{bZfm^) "h )C/>DCME>?RPM!L_PB81H; n8)Ya]2J@5Hx8:Kj%^Pb?d٨!A*Nwh U@ކ\g;<˾NuXM꬯\EIs(N!E*v e]EVw`OO/⍱l[6on4qĚu\]'"C[b3ņI ah|8wgΰ4A:n/=]42⋀('ыFHzIhJ 肳~oC} żʳ7O#In{r팞;b=xIӽWʛlbY;Dlfa.0nu%Sڦcª=~$\e5O-4! ppc8^uQ>]Abra,8/5ӘnmaƄس&hZ .HG7Όyw*&k#F1d0`Qh XOx#iW-ly-2( %bIOS~zVX}#^_*fwJk9/W7eN#}A/9oDN'sԹvYV΃/?tK^.~vt4;9EFxmƼTGCn*_S8FX씲'w|֒ZƊ@Њk#OƆ=qGi3DR2Ɠ|r($3 =;`{(z2:ݳ#JaJlqFS܁̩[,wsR^a_ xcM ;֝E˔M6@nVtkK IUGYtWCt8\ܱ{ lnyX橎]<'3>U|*QPBIXzK]z2PUs#zF30^_(5=N1VF~4kvv5(\!2["W8g,iA&kj$Bj6je2o (_5B1/>0gVl\U ˕;"+Q|Q!/{6ɧ)bԦkꏻy=|ഏ;vbj&s$e U'eg ϯR_F= ЈG)$lVsh||sSqˠX*Ȯ뷞լΫG9xҶ$![_ܸӥ U0>2Ts'9'a@Gni#~?8nrRAz}B/!>0lK&C+C9uW8v.3&Hʶ6p?O^jL$DoOyx:'m8XFO@ػ-1hsGݛ\6|a<d1[q?]O0⴪_bc,З,}KC㤙K{7<17n)<~a;uRH̿_A YM6H}<)u pFWF49svƑ!&`sJq**>-wh&e{U7x2-\T;١y4/jQq_Xi~a5O…ppt-–w7qUg x@@5Wɼ7Tٽ1^rE Tif Xzq i9?Rm~PiX<2s"v9uZNVgFwey Rp@2 xl+Y+ݱh DB ?wy,^NN>ӇpXOu9t$ܴZC|J ƌgr`ݤJpf\~ʹp+}b`â̩dCǝٙ_ F/VɣLj iR9[3dSQ$KIRN0(m)P,xE.'cnmB HK%ݵ\]+zme ol[ Ƴd& /}mHz`@OmTo٬&,?o85гFD/OT5l罺cƉ/xH@ ⦠TDboO%Ku]JȈ 'qn5C}j-zXMTc}I§d"=>'윾}]ĻYVĈV`,;5xqgKbbEd/oïQ >)#m?Zs8Q-me.Y2Z~CE$)gEsKɗ̋uGpȄs@J= 'E'vcjXwN.:;K^9= /BH,Ң7kU[-7\f_DuOHp](W,”(?uJvl+$%cm[H[8|_\m&Gn= g<{O@o~լx6ynin-Oe'±o"='%8D41p\)*1g]n-%w>.ұk&[ZTB}7a>"YNYȍԑmm-G--΢EF(W2Յ=+CG[n5<{<)|W}[1—2摃xuMLEٟf)u: _8 J"rw%IF&QmϿ.xo >a|Kي1F_/fZ kT 9Eӥfo ~InJ3W Ntա/ 6Q\35Gխk)&~MFgExQVp 7V7 ׀iȟ>f/9"# ~xԀ;kЅƯU0?Q|%1ae}adp5o+ܹ-6Ks7Adž~GaV딇~Q'vyѻ}^OhbP֭ѵgӧ>;:59gG}f-{PxeM!2(tߖ3FL#]^Lg"m0]7ٙ2rǩbİ{gGHͬYyq;19W:Ҧ&`n6 1R!jj4 8gU-G~mױpDƾH$~+!@Y%I^śk!#̓?Ƚ#Q%,[/V' ZfPZi oW7f5aV/Y7vӨG蛿qk<^gt0<XN)؃Nly춖jo7I,0ef`uB˽Cv)WPxN%2@Xb2,BĿ#LR8 ɣuQFGNnV3蘭FCei/\*n0N}#K66+gՙ8'*ptaM6\yE|szU*#O⑇ܭ́~S̵O*NtW%J8#{R ;i9'NQ-G/,h5xsZf7xިx\ WٖP+ >]u'/;Y^wmNFY ڻ驼=. ֺ;6Ǯ1TzE{;hohPiwƧz2daZz e֩9u5Nd`pX8BA)̷8M7AFYMA˫;{NA1C<#[k(DU^1,@ZEV SG=ɸ\ZxR̾u: mM.e@VsS'ojh0s6)bٺ=^$ޭ1y:9| .d;P &D}ʢ7hxMU ;_2i!Θ<z6ȣ?!L/q|ꔰy VRl.-9ӁŞw bҽ.wf U! ô0a.y_Ixd_?feך!7? djUa*z#V+*"ͫ:߮]ͻ~ G[%Z(ۛq0.U!<&3}( Zn)`!IڐBO#gW\?^8 #+94pvwT:5+ݓ=wYp\4&>S vzD% w~&mQfM<fm8tm|uJḂx<,TJc%\ޏ,$ا巤\>h? vUx *ys_A;)ڀh?_'`)Z?zq܅L}/8g67p\ V\.U/bϐ%˅2{CMvNI}M3a+U⍦syrRu_fy,ɎyEfT[MyT:*՞z$Wv:t׫:B#60fe|IYUqAXYneVͳ]d)ewq-I.К $̖5N#Ym؉7׸Qوɷp`")rc#TBD}ϭ9'f!ӯX1G%`}/lKF7dsl'"K)e߮'8X^Ri7 |.g|zw}RD4얀d۱gOT/^c8F=i'X8ZJbTe9$\>' %v@2XT|'{{NW?(%rؤe(<<EfDjk&rD:8`A>MOǕ8_;ox#?E1"I/*ucq2N g`NjEY޼iuH-JsgX]_d_ȝjسxY_:n ,l;:^d*wRiuBGm!7 endstream endobj 54 0 obj << /Type /FontDescriptor /FontName /ESYHOE+CMB10 /Flags 4 /FontBBox [-62 -250 1011 750] /Ascent 694 /CapHeight 686 /Descent -194 /ItalicAngle 0 /StemV 108 /XHeight 444 /CharSet (/m/n/o/r/t/v) /FontFile 53 0 R >> endobj 55 0 obj << /Length1 1728 /Length2 10556 /Length3 0 /Length 11658 /Filter /FlateDecode >> stream xڍT.{.݊C$)P(^R)^bSZ;3Zﭬvη#. sxDrF@^/# ;AEŐA6y#Qz8|(PH#" . yBl\U(sC/Ś# CAPXЅYC>EEˋ Ir   r5#@Cf'5*.`d? ߉ ?A0gB\7{y N G2f qA~;1+@m``!ppuP?= >Vhfv =z>.?\`.1[ p{ol +OG3Oxp7Q~@f:C㊹+Ȱ5NYY7S+|"!@AX- g=_ \G,ݔG ,?BT;Uۑ~? ?Q[{R ,}*]9}7E7F nm\^4's~Z@=n?\ Pk-pϣx~u{b7x.` cQ! [O V ?"[o$"V [ncVA~/_1? > O܏]  vx^N‹s{\3a+"=MJ&u#˥Ot۱&'d0)9w8""v2zjx}R29S+}_1R?4t1*g͝ ى7g4ر+3^M]u!c 3&B|6:'J2WVWbKe>,ىrG۽(^121Bo"W5h(;亂E,^t'ТMk'o\xToTfou%Z^^ڼYAH4sCC^|w~ ']HB7JP&z/%&(A@{\14÷5%)_N!9[S}|{`&d.s+>覆`SaJrUEQKϫ[Pg/Ghb(,PmTSLewQzE%:0U>oS+k?R1ylWS9?TpNwr Pgi }k UiM ϖl6vpe_0Ȟ˫t 뾁!k醡&\1e^32RgNn5"+|lu}6溭֟ZxP$_im&izX%<fC>hę6Р`},gBWx\E滭-4;N;1zڻГ4%+xBڗzO:yY{/RbEk e ̆D3|h1{Fq65 ,<]RPs&Bd 4C<2S8p9Ԏ1V{YvhB?ؾ {r]lm&1L1lN,mtw= A $H}@B,r/8$s͹mmga񵁼1gmڇ%4Z@=⓹@WU!l&!]>t6 E>a>lhND2P5PvchfnѓRL\I5\O_-G5碅_֫Y=װ,[!VXT~U-AڔA% F`W%=D%s|J#.ɡXc^9/Yj +IR.'E8LZ:MGsdv<ZOҸhtUj"aLxSy/ ,u1U-FHBKORM:y.xjm \6ZUBeW3ŒQ>!i[uCM[( FNIJ!  o.o/bdDCvX/qOE ]/j6!Њ !oT>VmH 9@EvO. 3g寳cbdH3I1fϓ[9%Cު;HhW:U膁=BA'_n}\N2 Nc}J%x5+gNǃR{'S)L/N:vzb֢ !RBL;jl+¨scērq$5 V]ݐKrZԃK~|WCރ*vn5NCJCt#Aœs&R~I `^OZ\b3 {O Ax c-Mf>Z?E56ei.o+eSH3Z[1țaב gUZ=3/ڇM7*d jF$`p~)kMa>ˣM5 Аq {U̟r*RŨz8n i ]tBoպ>c#]ﶟ}\y;;K*d%IIɗƐJFX z6֛#OҾY!&ޓQw蕼_1țj4<Q^8~1P*57JZ$2x1JKU؜!ښYAnCP߆WJ*;᳐OU-j~c60r%lTrH8'OZ+Y*ѿN[%:ȚnIquC!+#s^uL~R-g恣SD,Ƽ^3ΪT(kz< l.7[E詢VyGKhi=D۞*pJa"ᏏZhl~s7 5$FZ}Ý /:; ָ+K#EȞW < 0'5hQ "&7k@Cx)̜}XDO+▯v=Hysy-(ˊ7]^jT3F)>c[j1 {U6 /Q6l> <ƹdN9l_-}N9gw/R;._?&Ĭ鸝j {q唧=0[ՀodEŵ t0b`)z\7nz:0ƤijYG*lSUg 8Z^ " K?Pۍܐj)6|ϙUۣfKʞM/jb CYϖPک =EY-#|ե5S j܄Ts!Dgv M'VatQj0E$ywU[~%*d1e,_f`+Y)wCn b-ݲ'٣@ߝN26)1h,|)˕w|i \;PXta-2/5gnhwDY_22>uq_Ydyݐ ;Įa0A0 2]rceF_4&Y7Ϲ53}gvύ];˗h *! %r#zla}f?*vX/h i3f1!^-=\sjՊuȉߟgb[g$4EaEqm/t,Q?wODtk~32["drUYu{Ɉzo˧\lȊ )in?mӀJ-x>m-2ܩx[BXapgvsf #A /~ cCC1SYFvuFbOE6}ݘs ,[Vs1EЙg`UU_t0@2U#bR?%5dच1 ѝjb(q d&SoM:0.(#] {dP&rƗ@9O4ಝn0`P+_獚??8l>*$\Zy.j}螯xsS}ytwmn\ڂ޺!} XsЀ*ت8c(QT7uARV+JaHDS*X(xy^ʱO\kc6UNCH-l,+_P8fYB/[ \iTy0SH-dC~R]QF9 ɓ ^~<)oB[?3/B_^p 07c6|1< lJS3<S_-[=%A߃pKM U  1.e;r490Wz![ʦ>{ ۩GoN3det%q )\\ ْ>{ށr zGq;t\f,+TmE$ajPqg*N·=9JEKpfixmz}rEK>,?2!?ϯ|[W<? T GPH(v9򬎩h]1"B9cNN|G7"jY,#$hnѥCUY͓Z9M5/V_IB3(*/Kk61Oa 1ُE &-<=[1}gDzT{xhMWci@(\VPil*QR#eBB»7;`g{NN? dE$r{c_+īh^+aq~L },Jٲw*ũXKٵjGb@g@CG,B-x{ ._H iYGٶ&DNˑ|_BM֢|qha/~qLEw ^f,쵒+|FP5&m(22CD߷yf,خ*SCR!P*a3 V0Z=LUYׇ'^GkWEtSuщw<[fB+I̯_vTyEeX;l;B>ϼ`بABS^)P4 羛lgizFoܡ,vN3b/xglz4OI' i&%~yE|а=%B?d&,~\%KuRAj.8. ݆m kѭ* эQՔ:⧃>IS=_|FAC$(68 5.+>8 hq"]a~LL(Gi1{n.x0qmA7He]]$'b%̪.ֹ!sn71m7|,ϩx.3/Xĸ=I@AcUr 07q"j K}%BA+ň}iN6&uNYshجL3bp)鉡]zϪCj?!&IlL!Ñ,yftۛWT/Bbh:ן/xz0* f"f7KGX-YH٦wΕ}Į37 Jg8{iw,:X@8:]'.񡫽z<9,B%Ų39@gZ曯2Eb=m U肄 Pܵn ʈ颙b_C U3"dG"oOjW jSd zO,`scRשO+ӶJw{7j z'J./Z&TPKۢq#TuJ~V |ҲY10`z+YxOM05/\PQٰ6H$6pcOJzyaW ,6hk>%i0+;㜝V iR=2<ňګ@E#T_Z*Ĭ<0H#(Y C(:,_eʘo]j.[M#vb'4 e=LKC"Y,!J,Z h(K`-:Z^VBg,0ps"Fw~IHQߑe]t_mŽ^*44}{\qMo2uj`RʎnCaQmnvVˉp.+JUp][`$O&[zc#H4~SCIٺs %"[Өo ]Z0462ڴyQKYUDǿ|j:nM J_^)LCgu݈b˯bj2CFI~C4#:ݎ&d:Ą`򳑹PFYԟwsA9ZH4*N޼wZ=wluLl?pFrZ1~Nr^0#su4>N+tG4zf=U:L)2$oվ8+93f=;7JHiPy#!Lnn*ɷkR1o[<%8X8'~`ڻMFHVx&ۜ^F(V{L̻ ='V3kk#i%Tp34$vwLKI#lsr+ߣ\q\i"mDn fx!lаxNr,8I3~&IÞrjTE|-\.Ok}PLc6 ݄f T߉NjMinT#9h'FxeHpDXe4ܤ!"S]r ]~l%.x~|U<{搑16ldSE,8wK@55UY[Å~6Ǭ*i>7/l7,NdEEt%vYfIL)o KS3eP8* ? H>J;=䞏<[Y7;\H7{uBN Y' ٪* 6[T WYv0!pSU~&vTɞ|h͵g:Oܺe5g,p?LSUC!n胧tL}h_J6O:eY|%2=D()m+-y J2 3*`Q,"&ұtJу*VWC6n~q4&Ry\ &ěiQTE#HB9 j]Vr˭ѪEJ]f~BuF=֞֬GIi`?$'}ρ]=oC!_U^īED%lR?$,j },#O%;`T9Ў/iʒ܅q_u# ilF H* he\;-R붕j G$H:q\!vռdk|#AF@{cFlPS$ \ ^IlO>"ۘͻ.3zF#&'Vؕg6/(#,{zL6 [CPZȈD>8wn4M%h_px3TUps)&8hc'Y2v/:?iYbVDW;1B =۫9 bbpv(R\0`ҕ3)McO{I O,$]D$֍(tŐƓl DEܪIů͎q9Jl3XK.ž*hG>fÐf[iWY׃3-Ǯ8Рނط&X]xQc=?K&VX??ۆ w}@`-fȒx`#h"~B期+FpWc)Z/c rNv Η :PE*߀ܧP! ^D9YҮF&5ket3m;CtJI .eWr=yrJpɊXx >J`,3wD?P\i9u#ř|HDxL }|9=`Y6OMP5#(dzV|8:O}A`xӞQerZ4bJx֓0Vt/ה'ʡ_ʅJ >H|k-jrbVySI tڻ5P$G۽A#UKdzbs+/Js-Ji9Aݫ-q3SuaZ쭟38>y`^^oȺ'צNY|7?co˰բVLHY=`Zw>W|Pp%-m[PRʼJɽ~A6ExCQ Tʚk8BTJ|Y96,5`'tx:D]-$ΆbEr4)=OбЪm&rg6^ouJ -|څ} ֣Y?@?Ӎf/Z:N)pίMVfU0]X3 ;:\ c_k|5f,v I/bY*AF \R f0{Qv]ABDX+I&AaKB#: zb=X`TX"c73TK(Ѻ#su2mzJm,;}CA|žұBE:8H͖.歺d?.n4 00z.DK$#7fá?~svVk C#ڵ_eWs55TҸ5Euji 0vkVlk`4@3)4$v8Fl21j 4L^q% ˮ+h3HJf(֌ݿ 0-ZH3KgG_xU5f'nVm@L\uUF+.6kwŎ:H6z @x.> endobj 57 0 obj << /Length1 1597 /Length2 7227 /Length3 0 /Length 8256 /Filter /FlateDecode >> stream xڍtT.)H5  1ҝ"RtJ" HI=x=޷{k֚ߟ71!Re]U @PP_PPˎfAxC01ΦBu0( IťB"R/ Ђ ޸lpy_`.PRR;@A0. y`(2H?ӛpA#7 qji ` 0;#@C`w.>0'p`"Ew?޿AaA`0  \P@_M`N o?9~)@wz!zn̪0'e'U =@~0'_m8x  | *8w&\ H J` z{ڀC!w?A _ 8AH# O;3#k;>g{0'8#?W,jJJp@OHRL Sߜ5ap_ _5 3Nn#(*M濢?xx9"zaIye7B>j"Aws jP vK1M1{C.>aw v{Ad?Sp_&$*!ܻk;w-d? sܵ p#p)N-qF*A]~%Ar PB p'4 䁄#L u Oy@ɿ#v>; qg`(Q t~|c6ͳ>? 2E|F*f /r(,0]6ažybvf8]ŗ 8|& [<4 wGoEb+|#AhPLvVeէ13[bxWU|ɦI6SlEy,| ܤDS'I >2i= K. ZJ\1OcÈ~B~=HklPers׭I8(>}ԚNiH)cO .>J }.^ݯ`FYI=$շs~OUTz"|a$~9o *c]gkLyTjW>Odg6.aE(bݦceԭo8ڕMWQ|z%7 fڎ_oT y& ;-;F v(B%?9nzznǹMb/bUZb+2u_tpsM]|4KH3&\{lX'GlW\:uBq+m`fS?#;ܬ(4^L4Mss( AX)mp9?ko6Q/ ;Űh4ѹ1(GhvoLjfFYMtn¹X'V"h:$&-|-Y21)Qfh .hy@߰VV0#}[jlWvq\ի؝R'O֏2?\ Zg mJ;2NN'dRN^yTNŕs_ Gfi[&Bj0&If5.9!O WH,F'Ϭ뚄/ZmB4/_b0G16TfsMݵՏ?ZJ"?ǪyPǁj#7tmPNvsChD5й{}bckǡ÷Y>q!OJU59t>&;^8WZwvэ.m.!g,ke5hΛT#Z%y;7Q5xĝK._^B}=ѝ^Ff֔3=4ԞNFwwH9SU[,EpV '\S~Kgye=ZX+kDR NkO{1fbB߱ɰMV| %rd`ӥY1\_6y|Ѕ*G .\`zT<{h*x\wn& OOmd!k艹p%hP\һ,(dt۳W)PQՋFc(3c^z5Alrk\&]_2YJ 3_(p̈mб`;$ EK ?1FJf}Y?UVp-p'[8&5?1^/w]bs&PIfY%4E>w` ON=RZ77g`g[GmE'Q}! }:~:;?$&3HI <8аe%$m( T[0d:~q>!C@Or6J}2܄{E]\Pq$wv7tp(g ف >ʡSxkC6|<' \. hځ'a[ݭ8v'iLFΩ{-76uEPY#R߬ >nJѻ8u\ug ~Ь :YGk#Jf]d~kWy'dүb+vwiu/E-$ͺ9{YL!t!džVUW_qk^voX:+ƞҝY[KqRg)3H(DIթ ꊓ$cG-w @S'} ڳQJo1 @#W}7hJ Qav eƬ|j6iNiv{z5OV".23mTv\Xt)Q0VGF!ZbhTK (zͯ<X'B,;ϭX6c?qGJIP:ENX$$ YVj} P^ %drUpfHb`ufq)偫ȩf)1NsH?ĘmL4rGCLqCrIS",/]}duXcRnMxebDvF my&*I͉@`s]琚|<&Ldcu#F =ռY Cb{ϫeeb6 ={HSI\Lo]$}A>Z0ǿ}tceIn}@_3ϸ'uPm̴Ilj'OzHOâFg>o#1q#KշruBN#hL20 sW[Q$be7OP͌WO?$s9xѠd4&HHӃbhnlsáB3B`% `B][YqAe5!&Y-vDg&Cc{&8a }))yӔЊN@i5h"8U oe\^Ð#qQgT!?"XʃFi>ۗrTd>IVu;ۏnC7l ճu=o:6A-F1<tڅ_f GWIh"\ttގnEN,._yY0wkO -i)JzD /!D~|~&(&0.%Ɨh$O?':E1 2mZ~fr f|t]n6ΩSтw 0I?-*.a+N|Df˰=+UR̯ҮMɣs/Z$ՏS$Se\ȈDuB28uH%ީzUM]]ZNaixˈ8bC.9atU{x+yZ2eT"/k*3`v?UiWRUq&TFQK 5Ry"۵au]llU+}_וN@Ւ3F9%N}?%phX5icAE${2h}@)B'=VN~8vJ 5~S3B)jQmO.$| `Ѳcnfdp ?頃?w9'B0˷ U\TI1s4APNO9dTYp; 5W %v~_MBE=qzIJ=9Ý/G'8cfQ|/>mqp_eKq]lnb}h1 F,ˍS~^ [xPo?" tKntʈz#k;aW(AHγ Xq'Wwh]3նfMSQ{τIU8̭ȑtfs=$UV̉ r?Rt*?ς*"7]/2(f6/F.ꐀTA 29c; §qg@/SߒdHVmN&}VQ#vqywO7(HF{w)8a`$z6m0ft:ו `ٵw&@£5;Zp":hl#~ sO{ Pd@fGF)Wʥt^E(iO΍5fO]Zub,Li՝cCB,Y,hm{3&\HGDzk}&>)ŃxY;}&3x~ gAbdxP[8Dާhb) 򨉍+/=}KG8V 1II5r~ENʬp.%U3+ } 1=b᠍8'=;=Th̲GGL 3N\LP~Ӱ:o"l>iGÇ=L^So((%&fE Ζl`Z;}Tڐ!# endstream endobj 58 0 obj << /Type /FontDescriptor /FontName /BKFEXL+CMEX10 /Flags 4 /FontBBox [-24 -2960 1454 772] /Ascent 40 /CapHeight 0 /Descent -600 /ItalicAngle 0 /StemV 47 /XHeight 431 /CharSet (/braceleftBig/bracerightBig/bracketleftbigg/bracketrightbigg/integraltext/parenleftBig/parenleftbigg/parenrightBig/parenrightbigg) /FontFile 57 0 R >> endobj 59 0 obj << /Length1 2496 /Length2 21432 /Length3 0 /Length 22869 /Filter /FlateDecode >> stream xڌP\ݺ- C.Kpڸ܃=>;罪{fOƧsEE$j`rwe2Y̬lTTjV4@.V|;]2 cW=@d|6VV1tpH[ *qG/g+ KWpyК܌rځL Ʈ ;pDSc[(h,]]XX<<<\-V ; `ʘjV.U]=Adp792%G?0{&Pgvteۛ64uq[ 1@JT` .幘:[90X.7 ˒fvv {WIX9Lmbg6>Vf濋0ssdQrrH,B#8YYYy 'Ԓ7#_Jo1?GG99puvo̬L]& +{?`1 '@{@<")..-Oщ9x|8XLl%?7տWO6O^:s):zoߖ'$fk/5׿ K > c`M5AZʸh"e 2Sr5g[>eV e <-S^@!%M~16N.x` Ϣ_K `awp~O"[A?a/E?",`8,8?ET pRh1A/N09CwpslbImK/GK_`_YuY3 w-U a9\ßdG &skK{7ßyqh?Xf{GW?@l~M',jۿw''eqt5Dp99঻{!`oϿ /L Bf n2od8`b=JЃiw\pjW Ϣs l]eFкhP7ʶ$qS-lxsJˣS- 8Ǣ5}DLj"{N6PMT9Nn<(ywҞ5}ˣas*{\rO?b?Pdø2c\x\LcdO~f@;a/`t?Z҉OG u1:E#v,;S\ҿ$PH̘„|^i>ھ}t!I.i2t&W®l1tfht[-y k[4v<: 3݆4 5 owi Yx%-uV҉SK] ;5A2XhKIAbVVQ3O~u3\&)Fa]k4*Au3,8%^[ 2=߆􂼬y4*[8UvNłC{yon 4/*<"Ć>mqFtQJz=oBq&Nf&=ɃTFpQOP wQA*@'!{ OT{PmC /bjwyozmLmSVq69?B~&HZbո!ZDT>Z?}+FΊ%4~PPB&_Ȁ:e)@(Eֵ@h^6庄pWRO\2´(J,)y7պYi迶 ?Б}4 3=@{-0K5=6=2d.Y_5: ˤ(,j%3bwq7o{|a⸪(5&{&;g&\E H[xKq&z\FPV: "N dYK|R_Zk2ֆ,/rB+ꪪ҅q Gsuگx<8Ww,"s659˹@[Ծs>5wG2oJBtܼǡNnx+ӜWu^SݿgMN\SA^![|4݇k/qN4FΎ/x C[yEhWzX8B\GL: k%J&MuZ#2%Cy#Sn' hl(n_nt"Ѥ;Vv?_{->5'|>+r_7r+oxpo\")ޅEkp_0jm {9fzKJ7s"l Tz8%cu<\dw(Y|3|Au;;-$ zqu.C9UzP0OWUfI{ Ў>(aPMgvR+4hΆUTحP.~0J;MXO`1ɴmRd#Sg(9,_ʹ3҉HKѼm/O˛ ]j;&/| 1S4_>!{(rTf‰(sY2H]&ֳ{Pn*(OJcyZ{ F¶6L6SwokiD\RɩDVS~vιG 3;,;LxhLpù h`Cw R31b'xl>卯>mX=կS$/U`0q Y-Pa9_mFAĬaS6G|j-'!B. 'uksF'[Z[o]K&?*bVJggG69TU2-'ƢB@i٧t{]p|sDY/J J%ڣy)e^D,7m ec~ +(}vK6zN{wfqn0ugk; jIA8@lv/柺9M[@QL$G f]|6DSkI"ϒHb*by*N?^'>4*䷱l lÕd͋ZQ$̩3 "Õ)*OA/.As@)ωuŋ:O %Lr} o"G]șaSox#)+āf"*^E},4{)~<@{Zo݇Y WN:Y\(T0rm [%a\Awk =ei\;{2rRu6&kG8FpM}>O[+{y _(?`:ۉ$au_YOm,vpH25G&.Nk ǬAdq1E%ӔML:c[FT[L$&Zzob`Mb߆ó~ ϴ@]^χhA!$/Yx,"qJkx_fCV:'&g4DI%!?HF){r =|_&B$gŹxwN|L|z֌%Wl4MWrŨu*RL$k''-Ɨ:0-l dsQP P.–.ӈ[ r =Tu]$Te§0a~0 ;uЄU bh~^zوҝQ3mjڧi</ J6PF gێwHoI=2Yhbʰھ@X=ueqd1f)έV*g*USk|rCXM;]X^4I'y/WHB'tW +D}$ >+ wh++<$c jusIQ2 &Yd9*7J6I%221EY$8% d]J.jxS^9ƋLܜ5Ӑcdq.~)0`i,qy;ta^;DU} "dM'ii!:7 8ՐMinB*wW<@4{E>*w3M1Y-EAo`& ?=@B7 ׫JLA~OOW|&TaYs^25=`Zγl38ts'i0JUd~5I)w* uյZ-PNb9a$%`̼PIg =Dj̈(=)Q1=4λ{悽Bˋ&Q,agWL0zo4V|@x ijʍߐ#W^zV?:=rlsv%! słka:ɸԠ'ŒoQ#N3|fM+N.LM"rPܱ*{.A~򳵪Q|楳vEݢsltkd3VjpH*~☞,<ݣŒ[E_uN}{uf#Y:)]%VweM@KmҲ) F5bˊ*'o]pW DLiy\oFŁaꎁIf2wzJG:쨏 ۡXd=ޅaԸ)f2F,~L.o۟QrcrL󫚅cDKi{qzWpk&J-LS­$9b.5!y^?mP,r+%Jޖ&IՇS!oHx}GT Oq#*j*un 5J7+DSD !~[or] maSehBm #ٌ0fE GTJ2V1T+v3{>/w*[;(8}o}_^@..M fkQ:T\`G*C-կq5ŋ'<߱ĉBCmJWU Bs?8~3E!-y94#Nr`&V g?uwy\=y\,(5;پbe;n1BR 5׀*.Z9bG3צ甛7^|$Y@j5 Pe*SN3q9g12I2CM},PvyZoF$}To\"D(tdżTqO-CJO7_iM(U3Gu6abLduy0X_+ϹbgGͽ.h@zܹ/W*v,snEX 2ߑn#5RINֽXr 4hc<ѩf 4fW깕ract-e43$D" f#NQ' S{C_Fe3I D'3~Unf`q32.4LFX{?^?ihc۸crAS)72O}espuٕ ~k " >s\Mn!L{튌23 u $mS5J΀:gey.*L2܊ Y,{]tBo#ןNYU%?9( {ɰx{MO>XTq uE754 ~%nLʰ>88y È_ڎ `:*8㟛=崹%ebHTA/@y0K}.KCqGƤʼ,-6\[uȳ덡;(|:Ǩ+!431yr%2=Ecq^+lf ޹MFܬ0Ohs̥aUy&J? u|}stM}NcwIhI]ьPB'q-rPɼ* |1 إڽ{Zg.RVvǮ`z/0&I QQsqş3tcGj|H1U,C"ݐ}fl,0P`q5X0G`_za'ϧڒzv442-_$GSXtC&%DrV!yzV[IY7U_ nצέ ^%tU9A$Z%޲eN&z G},vJO,@ypf;Il𺥈My+?|]עx9ɫsC-,:ty)P_K^ }eK??&`Z)|UkL/oagk'~^oc\[ 焩!_#96H7\[1[ >OU\_R`8직Tv<_mW^>o>TnktQUrpoaIO~;lygB=.ԕ_K$,egd[:-2+wQ`B* ICd-86>vt[ G掂vT{85> )P [B SlEߴw]𲖄!lHn{Ҳ%tϩ'|{Ce㙱Ub#), )Z/12}U2jKI_yh{}jF\jG:΄rN#[Yq%tdc^_Qbalnr.o QN,5W2D֑`s^@SgMz~.77G =v(J'XVܙvy_QT:]~Z/Rr1qP%oɀҜc!z YxP 1|:f ߮Vҏj~ۯB_'tmv#Q$7"QHõ].Rbs#t)_+rB d&W \*1%ѡA&ڽ$zJ5h ,i4Se#IxN" my05[\A_~}38ưҞmÅx"rh_Qj/Wm?vB=i,$+kĮ }CyМ?8e{N@zRK'Mݟ&X[:NYޣVacfJ+GS"Mk7v^)N,C*lzo}KhSǧdNwv,uyW&|=l_ֻJOE]G]]khHگOX9P_d8Bqj&('{ٲ}0e_ap : ot&J }rw;UCۑV>AnT^Z(\癏qPLoQ/Y㐣& nb ]8WVOHBKndz5&fOmnXAM- JQ*;x' Ye0%L9} [\.* evVش}}MIPȲnDu $T . JRMac'ԅ X jO{v*I,ƺNAB:liXhTvtY X*uI v!+MtZЪ9ise]q&@ kE7ܮܖt7EJjKdc˚y*)yV,.DBN~5 ǹaaqSrL7 MX_`z]E`\͏elC7|T݄QA# %Ntkm.>ͪ..bThr[G[jQlc=Ve);bL{֝ ]_褱I_itq:CNę74[I?^]^,c ` Ʃ^'Pʟțo }OFǂiKYP6Om^Ѳ7j6K)#L[7!hϣuiC蒼ŗҥ0=37v͏HM)+RBˑxOj豏h)WDm4/uOt Nc r |;gt,TYf: J[Lr^cmߕ,o #曲׫]e1Y}Bw~$1Aq/ @AW!5Qd,RmيYd֪®GU|mÃ/3aW{K3N{W>fƬ ۜӈR/2=5an >pC! MWX~wVt.EZ)>SjJL(|SUG%lum \*c4`'_yiP𼟹2Ff0֔)*B@^X\no'7e'5IBI2#VH @vN~k`nXԸ")Q^q䂂F_@-DC$q\RꍉV/zQbЈ(}B~i/d]XڣKBL Ng0Op3ޣ#eH@mFw[)I(KUl^F?JE6(VoUи* n[d[`t]%; 8 sgj$9m?ß7ĝ(էȭՆRtm~|aqYeZE>\S-Fxg1\~sV&6w_Iܾdlj!Ie@V-~DY?J7" !;/}V6lR'HxOps{.n&ri@Z}ڹ S_,G|p{C ½t6*v3(*PDU#IGr' z U~<&c|ZV1*V6dٷHH F.DHfsϾe&T_bE3DSNU]2*̜vOndy9Tt,t~:5+4zIgEh$- .z -y'98$qg(jJpe}f'o[rXyY7xhM[|2'K?9ܸ,fYF'ӋЕ&@İ0wjmT^Fq7oDF>+(H?9e.eVUБ@M "K?8]Atnc^CX;(h/:sͺ} zw EuN?Vt l9fcq3>zd_R7 f-:&G<̦ć_/Wa\MCEOMN EԆS^٥'vʛ{vŐ~3'KA$ҡ8++pC 9t3? ?ItH =9ekL7 uj]w4He=gM7͜9X?dƸWzV}lpĥk؆ CoDM^{Lءa0%'|,ϢtѴb}XuU4ɺ8|(3Üzsڙ)uHx%_geL wjaNmjZ"Oy?9BUs7~%K]9ϦbǨmxF-f,5@W .kHQ-0>jd.ϒ;)˧?2CݸLyk/Ea-;L{[ {ETXeذ7G}d,X^tuA3/v Q rkWZyk*aRVzPBU}[D ' k PO\n^ 6rbDO\o"x,EcQ~gx ;Z;YWTvc7;2TљmP 7IOdIiw-۵8>uƪY,yRڄ( %G6~b":SAOPxj;KPX'Q˄9OI0dv}wh>>t8nͨ_F$qtXLJJk#TiGpP}řS5ϞPFߞpNsA[@ĔOc˱ ~]j3Ym+Я:HVZs >PдЦW}"UZzP.ÐDQ7ˊfڒK› c>ũ5xX& 6j[9-_e[$z銦ݯjNe1m>'bB#ݛ ? | x,tG/kET!gRI N @ H:..+ZOt;<+Ju^R90l' &ݙ//P? q6(B;WI){X4Ian5龡b!IƘq[1An|WuF֗g-ҟQaLA\ّˑ} @swv( \,&" ln]?6`}4hq8v(lp,SsW./o4j'G< K񢕴"JIҺu"VEYgGrE~×X%j\d-KaF'~]'̓z $:5FkSSBkk/'jWLLX~"`.e*8/d!1'N{\"KMXO<-91[BQ"j 2j7m:x/6!=eo=Wjf"v Ֆ0(D .7d*:e:*O:tw[QORbȍ=]_O˧ͽ\g#D"YNh B/x g_E*:;N k)u֕1-.;LR83R259ghG$ ޥzsП>p7ؕKKa.CwMP1ɱ}Ȱ]:‡E~/qZ_"]ۉպLnV+Yl|ȧ׍T$uAKA;n<|}+K! zƨy'gXqVQ$7P6N¸i[f gaoFM1 Z65}$or0Q)zR~Ecvy#ﺦQw=ϛoVto듂E{8\#g7}v 19#^a6VPIw:I8RaJne~,Df[HT%6ajV\bN@`%a^ct4ėӒ 73N%4k.@$ݱƎf5&n F=l+0dS+!Aݷƽ*8 o I5bҩgwV8[tͩ=7V,wFOX_KV"$i=sjesvv0FputECEUg6.AꗻtBhjh5r@axr:>JnGDW*N52+0bOEq,/ (8y;n5cAP۳ _1/qv7tFmԎYRժ$60Æӥˢ i1hCk|r7Ԍd`ߑw%[[\CbsǍ`=kUw*߷6Mbfܼ.JzH6U{6b2Sܸ# j2r#˒CC9J?%򢸡UIn %㱩%v?Ч2 @#ph<ѳ(/(NɥL .& k˨?snEiok~j#8gѮhvn⧛qɾ!:;I@&!vNqAZZAN]R&N bsw PḎ<((?TC=#|0IE@-Xm?|'4!~/qs hH~M)_Q^Ay1pIlc&. 6 }]BqI%5D/zț"]'.nMx{,lbUL5L2)sڶ4=RVl^-Q4Bls p.f&X3̙"@; SwiGb"5yqigG9Yr@! 2C1QqCՃ[OPP a Iަ^bI|$ "E_3QpBG%b̄ C9w(i:n/NѸh}!U8)k.G$!tǵH'n"VkCe#Re ee?Uc//WU@ GEG{a)e &n^GlD b~Q9iufWr'L0̖ 7zUsWWxmZ rOM+<+qOoLER(Hu\h w0(CM҄>hJ\Nj@BOMb,@mG=MD[c~RyvT9=z ྾9v=ɔN<ƻ;y ֽT-̹#0:l$BGZ~cf&+0ƣwqGpae@*-6ެ 4.:vȗBZjze,ө lS1-Z;n0nohf2Eh ahFp5ZQA2+(F[[0r(ad䜦'Y :`nE᲼Rdde}";Tb%y$Vf4^4SpVo:kVIZ1>ix_:!@f|BUՕT5 L3S3zb͔h Ȧ@iԭ(jKsT}=RSN0gi6=*Qc&١fFCq4h(=GC:w/xȆWPVg qFE{?SJU =hr'|DHe2Xl RCpaռ/M.k3&)0oq73xKV(`HXɐٟzi!m{TK۠5.IhD¦~]c~.$'`|<_av:O¯ґes8ud1sz->u**k"'+.&шbN^qAt[_*<ժ*LY2am}}`1}\y%2'xsT,Wuij$ $˴F&TYYA +{/z:>83=#DhMUAIZ.jYKAŜ $֏PdQ58͕~]'"~k>y,;(']52 ёB^:b(Bi@N#3ޔ)U[>W}-,6+ۥ}qjG2nh$lW= E20{_ mqKK! ŅL#)Gcbw(?ڧ>|&,`)ZX{-AEWPPF2諦*fM٣sW Mm PGL/o?SܵVEE*xgODc@^Ց}P)s/ܿ}Hk\ QrgTJ!boX ׸?c w<1׸w3s"Q qtU.qFs u"KTql|8vο.de%g2C;pWMlr7<͞ ;w! ܎ ^ UrjY:ࣺuR ֠=M_Zٍݘ]k^a Fւ3m6:\dW0Gp) F^78$:wN3CQV~k ]xMcTEUpZ.S~ ʳ?GxvZǮ0w#!윳x& 2gӵ_LO^Xked8pw!qXm {<|G3cJ:iT}&ۺuk/l7L|jG/\ƈт:;DbvŹLX</n4(p,xSNdkROS;KgRk<Ypt,-?_.<%tDyu7#ݤ I=-Ȇ?+[a U _,u+ /"`V0}nqU3cih v:RU6}x_=ŽQ=p;tM$x`@yc +?5|"V]Kǯ6\Ba6nC1/T*\(*>8ЕgDFoWy6ߒ7Sˆat5ArԚt{!?C fd2u>:"ܱ`+#Z];͒)ϩ* 1bGɝS2mhunqlRzWGWC]*])?# GMqVF:z-r 3@IE> T2N.YW13?n(6Y],Ԝqkw;Xu,4Aens1 ݳ=&?C[4TX'ыH"*SZg0Zy5kO~Q䭛GEe9wd8,ݖM{ G("*3] SV֍|hZ,y8t'N ";ƭ'00ܸQ1& z4Ш Z؁5^NǺ-W)v[Łj\XyJ-,TˆQhXm_{|=ۥ:;l *'.@]?WoW}q(ޚ@;SiLxyI>bHL(KFx4\ m0-K$h=ߋv!PŗOkU~!&?PQl|M`>Fۙ:iR]̼rv֔y^&I -m[2ڦގdG}U4 }jPҠG($+&rg?i~Ϲ̣<=s](oSVgM,,&H6gI+TNT(]=5T;\T*9TnEpK4s~m4U\lǬqϏ(ymH@W+Omp@Rx+i\g:,Bc$A^P-W qBPʃ %K|]sP}\"!d:=趡_T= ~.֤>5rovl~4c$Po0^c}(O4ZAt+DTN1:q&*ʊ-iw|O4S7Ⱦg[C(J$j*%NT аLߝ^`&!s7=]H{b,5CD&2G;P#3>9H~3q!؈Jƪ\4:ṰtǪL +? їCl.(5uL^ÖQn& n M)mY ׊'B{Ц$u|鱘f[Gukq{.-??h܉] .5EZozQ }eU f li'=/Mߤsf(Q4FJ,ҐoFU / ^$7*O$9r#˳B$쟯Ch.XntEA+j GE7Ԇ- >ỲUșfRXN=DѦUbHLR__6VT􃿗%4Z D-Ŷp(-nS "g!y=8Bf0 uQv2/+M<]b.I%i:b>d]"=ŖZ'I^9{6k=9ITEoHwd15a! .i Ē?|LB~9ӗUڇ{w8fJXƼUybZR,%ǂ-O='%E%]B@pyܞO>#*Q2:]Mprp)(ILi;,^rpIɋ_5AhU1ak˵VK(%@Y|,u k \:υ#{P!bڭ2o Oe _p]xr3g;uBtm&}~ u,{p3c( IhBfaE̖)U#mN]T]{XG%ܐh=NK\A ^PcRYJC I4b+@ҮjUq_-';$X딸oғss@^X,iڹ=CInj" 3LP ({g`qB@;R!&;|#jӕd<WZu/|.G3{ji3'g>΂eK|sQp]Z% %`Õ}F[z {|i=Z$5GW*4FuI OMT=$r ;9 G-&3@ yi^EHn!*oIf> endobj 61 0 obj << /Length1 1671 /Length2 9612 /Length3 0 /Length 10697 /Filter /FlateDecode >> stream xڍT-˧& ~PK puvo^C0?!;C=F'q?3~?˩/'gsv^;7@HP w 0**,BtK)3̀Υ$Z͟Ggi wf`{_Ous}ҿUΪB,n*@ fe5B]䡞 bӮ{0 `{,sۧIP#`7vv{zZE pr\BO,O)pjz#'8 ד:9!N˿!SfK'տSBNA!? S~O%?l:w'S)?kǶ< ?񄘣/;4v]I{N1,9wFMf ^wJ|u[Rr稽 5#Iin'dёT%sn-b;|2} FGg@X^- ]4{N̫Ys$/P\)ę˝|VN`E;)1^v!#5$D#LSV<:G~|>̄~R@Mzb})M" oUuO3.lGu;F>nmn>*a>KȚ0A ȀYk5tKՐAse-5mUaSeHm;{.&buɹo)ݮɑbi2y50Lv03k;<Я# *t*jpQݴ"Dl)Lm ~T{` /8׫x>ĸ(Qa%f0G=L~PSpJ wyG(rD`ż:i/.b6@Tϣ́FoaQ]PRR/K6"UǯT\AZu~oKzT- >(srSn ìSOoB(ȅc'2h}& DP'~I沛(#{b NNZ NzT3F8K`^T w;-܉ އ|[(T(˜3VaԏYEA'vuKwIG(Wg> ?>1}#A?ң'X!&ͪ5D1P Rԫ Mg#XƮ$Vu3 Av@mdRD;b̳&[9"UXj=Xαb(zJ6|3g"uk Gf63и֛r㩨 7u.24|uڰt,TfS1tѿV](R5$#{, brFSS ($C "NN%oDݕc>I bH>2%RtqSL[3:VMoz&7HUN 6iyۜ?{{A{kbi+ye:K-cgb2$$yM57C59ɜ@h]i'GVV$B`H37Hz&e~WpMwe5$&*LM9eDPO;@;ӞA\mg:IUXtۄ 兇eHE1J۪ݨ8Y;=f[VzKFE4\<j[VI͜cf?%C5U*cJc4)I%X4ߑ~AI+uVaiG5I?RzRYj(=KܤO=w&ދQ޵mFB$5=03&΂=]f@dF3iJP^V̀bYk[ӡ ^?k>HEH~jb6VT~ƍbI\FLp5CeZO0ڨv3]NVV^hhkV%8Zo1EMM}g꼤{Pd\ZI4x}O@awJgެԽ@hd~S(pεCYdAИpvXQBA+R1msqԠ ,g?5gu94xzm[5;nNrSfY1:Ǧ#cLmqhs]t$;˘um3k #301j͈$6Nѯjo?.Q~S1ȯem71(g86|OO\Bԩgr`%PAiT {I^_y:˕c6RàB:vCױoq<~ӵ9rƔ 5oS|g͖4-yf̺ؒ>ZfǻttP1-S׽"_ݙ*z)]W9GﴍґH8 SMP0|)\|aUF!1k8"(7=D%šP|Y $P)Խݧd֭(‹bZ**טU5cŔg8S\7CΆwPt{Or$Z̟`_`ʬ 4 -!VR61L`[,&#4~9gߺU*'Z e@;~s>c9Bרig+ zd\R/ac\a ^uv"5M1!b+>oz&cLȞAM9rTuˮr/dGC%EuWfSwœ |A|a"O=Г$=l|th,;}tjPVL*7K9Sуvr91;Qt;kK/J΂QJ`jŵܝxLޝBw@c9+F \Xʨy=:&X?]h+2?H!jiu,N#1M_gn4q_ݪ)Ь%> Ɓ^A`!X):=PӲ# -5IfFk˗;.I~J$v>F#|`69clIQM H\c]8⥴[G`NAdV,З`_sС"&sDӲg&K]Gsgpz|o ~jX1\g?'>[Eim5ڄUv=ǴL}~7E&AI5F;:1~PՒg1<%ZRl"O rlYX%#!wz+ l>~VڮCIYl)"*l,mcsq,C(jHϬpG1St8ӧ&73޾cMۺJ۵,녡 Q̿,f=jXvUqFޜp`B#$v -H(A%PE&^k 0{Ѷw{#x_γeg^Xd%c5m5jۆGqЛ_}@HώiqSrPmv J >yLp,m+  L/`lU_6o#$O?CMe}HFRxL˥S3z@a|r%Q+W!I=QHbb GRКcЦ53{BMȲe1iX'yč:HaZKbx-a!Z]l26Cӗ>kLP"ZSϩՀ0ALuLOhijy+ͪbqbNM2'bPM-XrWjk˜B_j*_p1lΈvzI~AΎnd@7&9]xMq)o%HWՙ͂&gG ,:=kOp@dU[׮5y|QCxX 'Ƣ6tm i[8+TI{h`l!`. #H+uX}t«=ٯi Ծ;.-vٽ]uΠb698 ֝Sk>G+܁K'k&G$JYlit} 66\õh5@ޓ%4L.~'vٽ cL, BiG/lCPϴyfgAXQϧ22m[}=Qk F*XђSf^VzΕ/-ֆU%Sr!n .% txHG G#PR4ck Os~6@n k`) b-q{\UC/ٌ9tH PwyZR<[5Ɣ:1WX0%VϿ*0P%҇ۓ^E:11Y~PcХ|52Ln"]ToZ,pin";Qx,Aٛ:JּMb).7CC؜)+BGNvjiIJ`iLhz]LZ>݄\:]+ێjDtuԂw۩G@ F6+iwo4Zx,K3FiMi0hZOy8{g*7uq (cl;^)i]`G'}<6ϖorA7a394Ekz_%rKOSr:5ܲOz_V9EH=y3$3Bߊ"?ë<,#u#JD^z"bXwR0vJkЦ8aRcF)WT%Z`D$dBCG&{CBѲzMq7.V왘 |3=q>-Is!!6c;3%77aQVRY$DXuϷR 9Y*NomwxMY΁7JqFn& ,/A80 W`{D'Kd;b9486Sy!.<~ 'M`9.A"y05jo) D!GD5A}O{a9~9!nE1KE*ĄM3r{[V ,U*#v\<{ĭy2Ҭxq&e{&{6v=<=5j-i>.7Y_Y_(x?.z.9g2W_ih=S1ؕ˲_h$]bKHCG ;r_MD"f$aNM(**ztEp9F!_r7҈QC>{}{@3ȉ_J=j}F9 ,8;\8 F;EwEeeLjP ދJ.A*D.'}h+$^2!>Va!N(z۷蛑dނ4tV~q{ZIRX"C^i'Nx /ū&jrKjCkjjb]kDm#R%vr!t ۫TDYϤqd.s\Rzgm~j p@4C .$ѿۆvġUanA_bZ&![,} .c6ӣdw9 /Gl|ͻbKwĈr Gfb( R~DoysK|{axm7A돱!EI' su4/^&^H͈ݺŝݜ (_7d 5m0Xq'8y^JoDj0FUV7< "xUSrzV+uVL7t=}]Q/+>dM*rkDaB?)A ̓덉(M(o*h4[4Nmwk7_ v6yneU'jiLDC80ӯ+{8K^k =4=fUb87d%L jf QJU?ɠp$ޢ7LyT Kr1NX|sZ =/m%ˆL)~TRj[Y8XX79BZIxh-FmO=$ẗ́v gtߵCmE$YO">{iKQw  u~Eo-^o1R@? j+/ Mv&{_Kqf}84]>)a&E JW5 /m-\LfzcT)xl~}:MZLU$* . y>p[̒ڐ0}6 QG0|3'G)v@OcZ=/+!y☸yb(񞾥D7aOBMS'.:œ 0dr؄Qrb$C>SeCЌfAٕ8Bة+)ЩlM5B=:NS,`gPƛ;#SNYiIYV{z6&1b۫86Fu< /v45g([ʁ6: ZUސ("$aG z4bp[SDaL+.dWΧNlK[SGɸ(C$dz~O${Qe‹~_ߑ4 'Ba~3㎲6pq XNU j32cр#WG{q5E/lf00d+Լa-QQEТ\z=p <*s>7h?Fw B 9zǽC]; ;9i+&]N0l(2[/4EgKRm,bse-̮9 $Cz"vV vɚ|1z?tnd\fiw [\9S|ބi@UNpLwU>onlQLR [cXHN9NC#:ŦvQ(Z}c'i^LYB<͢Ѳ{m9n_y(We7L$XMW" gb_4z\w`  S%Rڿ{ұkM}b.0,/ ;L\6QFÞ bVt2 {d"Ҧ3%lf*_]q̇e L\]:nDٙ@>$1V:49EݶW랈ؐ܏m: |ӤC{ϊ< n*J )|_b}+ XԤOJR u+Wl?>݀>.cDwvzMpG\w1٘ (j7F&J _|B IʻnDS/ 1L78w/׹e3W7ti7D`38z:/"4.ӵ=Xs#ǡ'-nN IǞfҪdڠ|.ݴwX)d!P0a i,oz; $ GC@X}HVMr#W1#\w,YoDYBs/ov2 Q3cXI endstream endobj 62 0 obj << /Type /FontDescriptor /FontName /TKUXEF+CMR12 /Flags 4 /FontBBox [-34 -251 988 750] /Ascent 694 /CapHeight 683 /Descent -194 /ItalicAngle 0 /StemV 65 /XHeight 431 /CharSet (/A/S/W/a/comma/e/f/five/g/h/i/l/m/n/one/s/seven/t/two/u/zero) /FontFile 61 0 R >> endobj 63 0 obj << /Length1 1699 /Length2 10071 /Length3 0 /Length 11149 /Filter /FlateDecode >> stream xڍPM; 2%hp@pw@$ݻ{5U|}N)U5,!@iؕ `e`feeG#Qk]@0 $f/2I3;% f`qYYcqH,JyD-qrY۸Owda(^2Z4  quuga`6spa8[ 1<@6u h 0@ggHM˟r 3"Y./n`K3%9@CNi#௳1/߁@?,, f`/`T]=]f`߆f.3w3f/ ՞3ՅdEa^NY l)qp]]~' rZ˟7kx}+wn,Z`PN/?2k+ t=-lX~rd-~zi|!n@?+ W9F'h'\3`2{lߟ%l"#)g!&;+ (f`UlY)`ݠw,e̸!+IoG$foGmef\__ 5J@Kj\^@ lm1\A@KU͟\ك@U `bcefYؽ./ 8R la\3gg3/$֗Ab%0!/.Vg7 `-x,2EzTx9,'Eo{3^!>cK~࿐b/|b//y/ADK"ȿ%%%˿/|_R_dRI/Y=HK PۺЎ1" =4:&%N{4tYbF0VwhD~|omhKRk}4IPkGZ;4Y]~IStvtny<'7^4[=jnDziwZA%spL$XgW3Xd H~?qo^dw!"'z#~"S1y|я(EyrZ~ ^xכo=ᝣ7~pg 1:az2BmjĐLfݾsnS% @1֮oSqhs<5zά y0Ў6TgD>,ate Ƙ w`@sT~..U}E0/әl3\Fw+Ή/[ *n•O/1[p;;:sGWA6$r)_s$y83>OU#e~ {|]Ԫ3Rob|8ܦQ~3:M+8"{LZ{b9u>(QA'Pؤ\ke$3l♭ztŠ>1(xAj)!醓/Q.ֵYC'˞Rqɗ=~TVEmVF,vHMuqܶ_pV N}No!.1-ABޟ*/ '%D |Cr_bA/cn4n$O?A)lT*W7J4"&?{NrcZN1:YG T& a6/!+io9/VԂq:ҝFr5DwܘS1.ɲX*|J_)^滋c^U n Y,ė AO\ylY[XW7b1P 7a ^cQOV(Ԙ,q!J d+xU+tY1}JIß<#ޔd3GOQ_W0Xi⯮F8vZ99@?5XeBkMIXI2ףox|؈-^{=&NHiӾvԽ`qխty%^>C?~L?t;ЕSLzf+wE&NPq7[]/1IyCo70LSఐtnGLK[gzy,H}qɿptl:ΰS2QE<Wy 74O|lem0f|YMq(vI%}YO<'a#zC;BC bG^-mci(“P[ؒ 1Ӈ=ɏkBSku {v}y8ޝL- i 6Ի9iS< VWϲD2K}墩h#W@fd'X‰]*.F詰:ǭUwBHr1kZ]}7GXlJ"dγ@1 9|b^c/թ,Q* }*$EWFOdPh-d@?uq٣#K/7^Mg֋ z`T7(` U9^Oo˚ts"=zi<T#S-Pؐ{Rj ȘCCVxf{QIZ^$QxK"xٴ,ۢtC#)m-E46e7TIvg[5oSj4K-3'L ,ѠYb=9[x3^fs nI=q*_+1ۿv.aUyoqTI8@r|T\RAÚkj=6]{`ҫп }%9>l>hyMb#֘N6 Sg3Xɜ'2lm |b ; Sq Tj|+6aOSm®$V! SRى0Tz-ĕ Dn˰ &XEi2R]_+WAXorjIXLƍ=tIB֪YE?~4b!6ѣDtYwSMZ:,DOTz+bM?XtJ z!B?0lVK 2L(s`Vl>a弼`j &y56L >r`j?~AoРا 8&roE%:ImBD뮰vg>໯hE `=!{1Ƙ".#kd+A .X"IpauD͠xO?M\׫FdY~hv%Y:ԥTCδl\-,f"jNiFyg_.c@=.!@_ W Qm )q)|]j2bL2OFaƀE"KE<Xc_U+=}:fͲ9+gUtwC\17!&s]~-cqgg^ f j6z[O_11ݣgJ,莚.L;YC@ܻg0=#ϴbiUD=9Iz &9w; Mqoq&5 .IțCpL\$,ͮRAT9 3(-}c{scjEifRw!)qJ0~I%s=nMe֫C(8 )I]ID?*F|u=&$[]<ˣB!p1?,CJR4gM광Aq:76K"-`*.RduƅKh+Զ]. O ʨhqZVv[Hnz9SURyRo^[ Vq; \Ş.nLon?!:DE\L vTJh"kȈ8oZ#+,1rd觖oxeƱTF , S$l;8[|#\4]O&^W k/B<|bf.>} qSUs}PxNڸ˟{tORkz% *ۅv_]f G-ExdKv/j$rVHc릲?JILRo2+shK-_fod$p yZY#TGI01h㸜B QS\lT®&zBm4WBIN -F]#\&ǒ$yd8ύmrwǥ;o0l!^ F hELMO(v!bJ}A0jva(|#bsEGݯ=T&s5Yv<SޕEm@ }-HO߶yGsX׭]b0%pU͆ZM~3ˑMlĸ6+0=͗,عN"׸()ŝ85eghgpfj"X^ںLQ+#!_`6݈T9 Փ( ) rv\(_}d r5t':Tt(q#iX eh1\l? <L?maj&1N;s殷3PJwS)Z /rYFI_qЍz/xMD`eyUȌ_n2]:;.zRa8|t@j~)qX>lzm:$Y=#B6A6Ռ[^$k;n4^:Oێq'%7O@/kkS[SRw!"3%RZe a|[8qL,%9FM4>/Z[6 pLEO{VLU]rj_ݻ:OX^]\g5%0F~ / M䓾#ShEY}i/xg~րf&nS_z'|=xOO i^vV9NjoN]xU{~"_ɯyU'L3:y͈EG }Ee󪽪RN l6VDy֚H )W]0ZjrIFʑfnupsĿ$kvT(KF5hAj^K~5y}ocj;N'Ȩ=yk˔oILQy"Q[Ayɫn3ֲTO4RNjD%_N:5iYS Oaު8be0{CFM > ųO&<)twHsԏF%V䲢.d]0+OInwRnWrSoŸNÏ'ˑqGC@\c+,*_3ٽJ8Nl[}{$-bZL ,ѓzZ?QFB: <b&m>a5w{ʐx&֨,ۑ}%iCjg=S\N[xQ=з?mgbz60v:qIp]1=ft5В sn`}Z%I]YFxs(8%-KEt,]DNhiBC swg3cey]UF`>k] e?5b9k L\ תr;UI.a) ّvY`hXL3bIET8G>?M6hx<=;z':5\Y|v{.k>'ՀSkHk/@E w3D ?I"﭅.z۵׾V'ihDIKxb$%1sBSPb\H#Big %4RޣhdqhӫFT" *Qw3 Ɏda4z{+ak]^8 )~8èF(n Cb& 'ꉈTR_ Vq7I،);c Dj:%B6})Ij,J1΢7Œ6U.i5%A%gNjƌA  Wnyr&&ؕ kfɣyp͸a<ߏ|ZENBWҀX| %GjZ*ej5#$:.k!.A@+(|ȡSLӺDq[mGwja-X< Ro3|g,KS ѽ[8`?TϯR@wǕ3XPD^cWȤRe*u+gp6[ELiTN}p= K{/ wW%+>8:m!d汅a>Ee& =9TCù-'#ٔ`(!)(c!PC4[c`j*w|Lu-1f#Bp6hѬU9h7.A\RU|1DG~>0?}O©3\ġz*Y.JܢN$9GTD0 #-AY -݁p rZqʹYDGث|B[m`<9|]ua$ ڹFN洋e,SKÚA5kK}%z"L|ICM0h)R$'IukwېOLua;GԪLmot6Ofivta }zI59>ʮ90}mVȘՏ{j:91(ƨf'%O_qSf<&*ꊜ73}u V5ڪX[~Dяȶ>Fبij壆:oLP"Ir^};6G7:|N5Q!(ZZ{e 3n>ρ$Ps<9Tju S sot#<(&"H}Q mV;Tږi:ꂎM\O9E'Rfe==}pe^h?ߜtJ?# pƻ:#w6#z*(N=/ZƳ7%Ժ26g+qOFbU/%Xt תu1bp.T\O\cntIQ獺V r`|m!J4晚LOKHkS=JiJѥɶM䄡9~lⅉ^Y5Nk):~hI^8?ckfv?–$pѢ(Ryj.{ȸ T VÛ,)bQ nB: SCQv?ּ%A.:MהDVRW8'Tz8DŽ=0[ fW940 gL?Uir(F0"ji~_&yȃbSNGZgnc7K5״E'2J(@k 8p%Ǟo)[{A54ç2 n8ehy [Heq_gR3pYX7_IF~pA ҩ a=˄BQbTF!z|lNA.,m3QiTrXQbE{=ϱsYwYH%m~lHY 4 ;ŷ,y5r;Yáp*xSM쟫;: ||{\p׽Xhk]+np=VIu!./;azsD3s劌+}OCGatt[ 1}vzu552|-ǍjD,c<&ȇ} 03\C#qi-o^C.D*D|}Ő,*Z?szOWOaƇO7QKy5ZTԝMbgpNV$cHgȎ a yFkll'29tZ[7 e>WiGAڣKsg?[D~8$gGF|洊 SKOʐ"<[:ˌ)3[bcq<cjy,abZFfa``Q̏+^wjI|cנޫkI- u0gօvOrS_'T^7`MP=~6]/ "dP$*zGl-K1n}U$snuw1= pf:kN$2aBҍ+dpal4jta\CJJBsHVAHp ܙG߻ˁnh0`p^$LB*R.z:ljh-Oxy~2/cû|ְeNʦ}֨j_Pm8 k)\k l3K&:\GNqrȽ'Z#t-\Բ1.h:0hR*R1$2 :McK/7wԘ>nUG=c z0\`})fr1ygs}-ZЫ7&\r,υfBHyL=vˌg,Z?}[ˌ }MYoi%>F!c+U$ X[nBxk Shwds7y~Pmdg~x (~x^P,~g?l$ 7}*)?Sx19XʒzUz>EDu|9ݡ ܏aЋ',2qK&*Qd- oHLgz.THaɋd:f{C%L3)륻mnq7oa4PcG|IqUƑKuoTKy˜.1|aDLawʖ_;'m+4ԟ(U[ ݨTN8O0 Gs<N" wt^2K\5< Vy)w}yI,#6{61 P|Nf*G;!< ;>EX٭v |$kV7羠=-r)n =eƇEYwpF!CyjڅnlvwG!ó^mr\V{@vB%950?"$<6}WbžH؜z V ӳBߒ&"lESb )i˾meT% ?ގ{Rԇr~$t)㼊z/FlUZGBfZ$BO)sB_\Eױ]L~Ʀb[45~{&FmtC! Y//XC|g4+v.[G?%Λ̋u1: k1ZV0p_Y\]ķg%qq{,Yv'.E"!TZ랒r)SXL]fu"YS5`hls)DƉ$U գ!u;ˊN<3G޶|lWMGP7杴 o.4F0I73v x[<-[Rfxx).pa!&%痤|IQ^P V+ĩ{\\}ʦLoDKV0c{K? ʨoj( Ȝ)0hm7ﲥ$vv8Rhx )6dsuo4VZQыZ+^V ރ;puǠkz:^mGNv?"I endstream endobj 64 0 obj << /Type /FontDescriptor /FontName /FWHOTG+CMR17 /Flags 4 /FontBBox [-33 -250 945 749] /Ascent 694 /CapHeight 683 /Descent -195 /ItalicAngle 0 /StemV 53 /XHeight 430 /CharSet (/D/G/M/N/S/T/a/b/c/d/e/f/h/i/l/m/n/o/p/r/s/t/u/v) /FontFile 63 0 R >> endobj 65 0 obj << /Length1 1538 /Length2 8458 /Length3 0 /Length 9477 /Filter /FlateDecode >> stream xڍT6LtHtwwPCttH#) tK Hw{s9Yzj mvIKsHj ^rpqqca_R z= *3 $1=:@J%'psq Y cr  ziGOg L ?w=lal@O7ZA`B0`Bf.blw0r9,كƁAб%v9OuyrpZOwU _*n wC8YX8;A=Pk˩p<`l3oC3Óbfd'q3&쩾sp;\8\rdY= s duO?c:C:[VKtuԅ\A2[<0#\\\@ \GG#/ b]AT7` d b'd~3`D< ''nY:@!13\Ne]UYm}?[%%f`Oa; *B%ԥ%^ fRsxb,qrY<}4Q/|\!?Z [WWU6@*jZEHB!n"E,l_r CA. ?{z4\GzZRj`{yffO#~BoZ<0u=X98c ?yBCAAſ|@n'_@^?N?Sd@ tÿ!'xwOo>_y,0g,mkۮ%ٷD^1{;`0We]qL}C\b5%I4AkbWɺ^Jt vm{'@;fJyNW=ue#[U|ʘwe1FͳgHhP`h,'8STJ 1/?xr^x-UptґP"L0xKSz>]ay$u}t>eRӺre@?.DJ‰HҢh_L“}]iC z"Q_msj=Q'E#5 ;Q֩5\P1/[G~e TD; at& NGt_1W.s~(>S,ԉ>VJHUmt0$Hx #aLatafUXް\|GL!&l^-UXtM Ơ;3ɼr|HWtr&3]$5,1C|_k}_A6;7$,ۂ:~ 7Wma5CMb(E2X!pwkGBFmt*C]yl:T5;aw9_qXO}'䳒wgWjgOAdx%l/ݛ__#f/f )>)UBN/2BZVC)Vξ(؆T=װ(~ʨwXQ!,[% ~8^ŃDa;^>큤պ;u:$<{zm-isHi6^d(8ѫ^BkEH=q|x!JΉVjuQWwz!xl{'i E%τ%;(;,L W71b]cUq -N[\<?)j"ed]=Ҙo̒rCIOgB19V`Ϸ9#c*cMқyfM9+On.:&Km.šЍ}aJz53GYx.lӉ6I.Mڀ5tbdBLR {zP3<>ZA6=HowUOnTü<+\ƏX!EߩйIZL]y]e*Ln7VmANW]7Ϫۘ_O%kʺZ{|eAd2wvzD o\8[ ?e[9G87pnY)[Sf۞ʡqRPvmW;O7ޚE/X %-S-Z$/؏a~4L3"\Tji~1p>3;U=kk{MSX\,o.U4W/q+G) mhC5纼wjώL3t쥍(xMgI9\%Ն84@Z)i3ȏBq Z &>A~P2vY׏y1zSjl:Iː\L+aŇ|E)ϯTU0dzy,f|0|ñDov F=̕"oMM^]"J*Xqkq?\ǮJ Lg(2Y>=h&-j%Lڀ;o"sAX- NdFsq}~Qg  ]⫳{}YooN6Ö־L;t7ߟUclz֯,Fܗ\0i,9"GXL~r~DR%uM~e1nt.$fbsJ9aH{4h&g (ץiDZmJ4J/-s aa7$:Bk=}S 3+% )uh'XZzѸP6سM (&C,u87|ޟM鍷/1љfVo>U0tb5hՃ c;႙{so>{)=?qI9enMC+?%oTQ ɣ meK:\82p>/Q]QJ/H41A.Dw|MB/Ƽ +/= Mis ̞[Mi\!]fT77W:%p+k_ up!. r cnyޒMᲶD6q 32_iEDQzK7"^ָO)y`/ yX ^vscJeyXX?԰e7+Ix)>{Z觑e\XCk<C4 M<(haަnCJo6"MS9٦2/䏎F 6䃧{GV /Vƀo9&1_7_Z8S%'njPO$`珴X!mhQZq7΋s͑>,vus6kdsrClZG..s1$rdy _$JaikM/`f2Wy&V^meo?6+'uLNRIz~Yk] s(5bUa>zMqF;_01$ZN6Ⱥ;dh֋)#,z\ YXlz پHܒD#Cx@5HuŮx,Nr_ >\WQ}erְq]{^a4}m)N;mQ! 1LY_9u]^+8c0!#N{"$''9qx%i9 \!U>! )$z,,|36<ł6XfĎV6Je䖶>Zʠ*4PbS,˒-I$~B5\U5?Fk/;ݻ[N0m3jHS;_;'iOJcjXiB. - N5ᆎ($+NzQ<~˛zŎ-όzPkġ)k#1^cul]'cU[=E{1YeME:H iHS*X[@B!䚴n,6{>Lˌ+eWTH\VU_T#ƾX;#~j+EtzotѶ Ŋ޾ y?lզmF`GJi0>!<+%gp_}[HʞN SMpܶNJU,4}W j7wF6wY4O>8l,0K [:Q_\c& hWI#ͤqq{s_\_9ՖFʊ̍N({w*Hޖ木o)Mv(Ya'ƥٔbZgh*CR&RS7pDm5AҭK)t Q?QdꌳyP" ȍ]m&hz} endstream endobj 66 0 obj << /Type /FontDescriptor /FontName /KUMESX+CMR8 /Flags 4 /FontBBox [-36 -250 1070 750] /Ascent 694 /CapHeight 683 /Descent -194 /ItalicAngle 0 /StemV 76 /XHeight 431 /CharSet (/a/at/c/e/fi/h/i/l/m/n/o/period/w) /FontFile 65 0 R >> endobj 67 0 obj << /Length1 1370 /Length2 5960 /Length3 0 /Length 6892 /Filter /FlateDecode >> stream xڍwT6R HK7 tw03 ] !-4()%!ݍ}[us]ZD%m(aH.^n @ ~nItma҇!p d `Ma @$7&ȁ=67 A8Lpo7=ߏ5+**;vA0@ 8V;:pk(H'7 wd؀R AC$w񃝡NuGފ@ ~+C )W buwo|+iӿ E(@ 6P_|ˮKiNPDz\ nex@ܒ rYm~ɌOPq@\|yohMcGކp7_w*h2#ۭ~_mͿϿ xAq&ա-gUT\˟0[z"_s}U?q)'Hќ, b92 KVA,qvAhlvS&hQ[$L\ wV\"VE7g脀. +ݺmDǸhdJGfꮫ5w*Cqd۷ޞ|Jp" be(H2(2'c](1G[iuiexE}gmF_CE)"W`|d}hF/jN~0(.5IҪSPbE,f촗oC!vv5!}Yw_,a!o.oqهW؁G[U,JLقdOhBS+B>1| 3^iAK c݇'EB/=${&Q%:(wDq"F4g]L21~by*WH 4:t8|-0B ja)-9'Vuj:0 @{<=- mE ݖJ6rJeCޖ7FcsC;۫MAU-gi@1 ELCӳВe # '%EIP?I{pC2bo7j9>B ]MbeFtsWc ?mO9uJКoD^):4$Fչݣ 9x)&UTǾi1 טmJrHƑH)z!%_B 2~Xrz]Z^|.̣8*oX!YI:4DF:ɢ85鵣v]E+ %r$s۱s(e3C$vol6 Gkч AI9*4Gv;?+$GvoK-$Y-^ayr+!@Yg)ǡ%,gAt\ZM~™ԴzgvQI0l72ʎ_9 LQ`gYS7޴Fwt~n0#7W&DX%/KRTH#P71v,3V\hj$\ۺd`8 XdM:$w*@^EWk'銳#], jL|1܋3iwcݹ7^݈n/Hn>}0Xy'A `?->P*t.WtPD:xX-dL.Z{|J Dr^x@ݻ@Pg ]h9sēSIa/ Id?A9[IP >=~fMk0#(3uVHw BGfo`3ZHڼ)͝۝R*c9kG{?LFOokw-qaKP_з fVd=џoK#3df½̭ eԜC ۂ.pjRUpY˻LXkP~+h;+ӱð<wE&\ǫ8{X͍pNX]ꛃW .s Ke6@FqO 5YH aQCs;N)v x8aN˕SdCЭuop,a2jL@GR+=_v7e2t=3h18P .Q̛dݲ:#cAN([ߦVV=>EN]ZyZL.dk*ƭٗ d:ep9xBr;֋p3V? O&-& |ga0$_/cY##Loz#< a~ɠ?IUD|GֱrwE "Y[7@f|,Lz2͜ߪP dΞ^hBOhggs$t8@6\AubTWj<,Ue_޴ͻ#p_ɂjͥ־3N*C&F:9Տދ:D-XW`/q.R.+DWzJR̾i}.zv:~P/F !-rMN *,P~ ߞ jV_ Yçb4%7h|}Z^O/=+ʊ٫O9XӕnegM^Э2KYTruÛ`T;e U"o6o)cSh4&l&"7%"a wã:mL*yloIkew͚XU@fù))o,].` gmc;uM) _0v! KҜ%G Z\ݯ7GJL|pu+!y]>KR,IyCUrUMӐm3[˲cV-CRJ V>Ԋ Dy>mtU >CH:\wX}s-#5{(^c+)RE;}two$P$$Zڶ膔E0Zq? 2⦓L8uRI1mg21oL)˴R|îrC+`2?,KDIlK-9.hq,ܩ}fjs˨{sS<*{۟:#AZ؏DrZ+nt$% 0Pe+4M+?qbdJѦhi#IXԹ> &CP8vI!Cu3\CVݷ.У&%B]ϓ'>‚^ &sFt':z\͵srKO̺o(J|m=I!Jt.e6 n"V'Gq*OR{8O`̚AYrVD0EW1lL'KVT,IJDlεQNx3etr 8z ;I9kyW++mC\+iy63b6 = ]졯{xlPǽ l+Kz|,G^c ԟ2.j8$hF$\8! d)/de[ o r! mp Ű\2PfŸ4,*8F|Y_WmdL|;+fVll]Wcb$*F/jdZ%̄j,*eHFoTl֙.6ƃ<@;zB~tPV A>/zMY@i.[>wW/ҳ+QȾ: 3𨟿$r bj`Dz0Tq_~0=T$r ޳7 }?@Li eb % :{&22JG{j:&_Q:>/` 5uP]̰q>`}ì֊*Hm#PjV;?M2/&~N6fXHJctFCMʻ,n(ZRD^H3_hI(NY3sa^=nq0FphOLZIL&5Rpv]3S+7a/~Mg%S?Q]);"J^(SJȺT0V HH}<ϗ4Mg@Z/:.{,n5ܘU ?4\0Pb{2# G::6 >[dbAN;zv#&]zU>ص> '^ HDJ~F`7 Ҫ!gC?ʏ׺B7ǭFLZ Go`2*NZ[*&O4J_3֢pؖp]cF+ ajƼcuXameđMAl]5v]2I?T6WTa!+kY7lH "|~1-fv֫̀.b9(&#> endobj 69 0 obj << /Length1 1500 /Length2 6841 /Length3 0 /Length 7865 /Filter /FlateDecode >> stream xڍx4־{7D{n1 ft׈ !z Bh!wABI޼k}ߚf~gﳟ}fְ+8" 8_H(P1@ PAM!^H(. %/ƔA(4QhzB"!q)! )  JMDxIA>PG@ ٕ^Pgz\`now; : #!P!PR wY E !Hd.4|v yAhCH7@0y@"@H@_x vw w8AaA0$Aa 4w O}H@Baj}*pG%;BO_Os_x+'(W&p7DC sb@Iqq1!F8ˀC @$@yyC F P8a_kt~+ Z~B׿l sDaneTTD%@8@BB 8 `a[C [X$yYe{kL\c>&^*v$~_Jm@iV_xm49-E&KH -eJK$؅ReF2+m{4m{3REu=mnD2m$iMsbSdsĸ>aQ1p~ +.-1[pFnM\?MɐLQ\1dA\_.=>/iַý1'#Fd7{)/5.ޑ򨈣# fB _uI[KO&7ֽc!:CBDvT|F|LxLPåɓ[I$ w zb&;x jvYؗrjD-?v-N$V oLB+Z:=e\1=nlr !g򠯓3cO ð kd)%I]eHϫwԩ+եFN*OEe1|zN(d Ai2dp9S LNkxߊQk ۥ,4므n8zmcIӆk!Z~JZl4;Zlr}-n'v/D7MK1M AxuL0W%0-i;Gz6Y'u6)s&e10B+@YR/^$,"i&_g1&Ye= 1ٲr y-ԪNdѸbm/7v3ك\P<3wMb T:UC[2JU7S$Ȳh2yC>LahZi/8HmLgNoYX/fڒK=exc1_oOK4#oW0H5OFF qoY՘㻐K^1?v`4l5^9"8(:g泑Y&Vr}4H 2KBu54T18N:6=&4;QԝH5oΜvc&mI]l|Rٛ 3PvT:_To7|湼 ~ZN“3vVQ7=Ûj9@j2>B " uڜ֨JrA1ة!™LG P[ו`и|hiil`ne8Ʊ ;kIn Uf`D72|GnUKv!OkWc;(lbqyu2eekF̝ y&I7DfTi:4WCIHyjJrA,; cGJ wض^I|=ԙro~ԑo0 }?sbpB7}25HT7D'zGΚ' 5yڵZ z_[g*-̻On3pm/߈ $3Ź<ϟhDU ;}h0"犰zskls:`G11s׭qwkqvVx=F8.,=1R`6 yPZҢ31B7*w/n `GGɍ^Wj=fU&(; <-,i*RNIH ڮh3VQr7X*;Y%:m(h> \nlY jS4 Ou rOgiLap-/|FzNa\ 89SL*s*u[aRH>;g2߄5UAζu:t7}l>FY>4Lrr<Ӟ{K2զqJhr#ad€Ĭƣ"kɡkQ>+^ 4`E.[N8!+B{Y1K6JUl7U8L-9Q\|mӸor' m)X_U=7x eOdEb G$f{yظQHحz4CHu1=@]q3e>`= rFEۼy ]qJ̽~{%TޅzxԩTW|iX9қqKNv੣?0ҵ-;ؑ+39O2*H s{rQ!չ^g/\sɴ3`lTqbnqG:i_N*鮭2ڪD rp/goAypF;My_=+߇̕\ΔƃWZ&ȵZXTi(\",Aqf]s<Ij=P8e) !p&} ٘s8=ҌA47| iw0"Rɀr[!ۉJq;U #9@Γ@_:;b1R#F'9;y dk/&NFW{VDqbJ';ѼSs7g_(~ށԫ*k Sw],,ݛ.j;zyKu,'\ߐ.CSډ3Hg/Λ8\ r}Yɚuπ|Qd6UR߹zrl |]oBP:^iEg@1&JM,LIM 7qHQOx3]cY D`y O4?8^=X/ɺb;'Pś& myPП d7L҃Ů ܠcgnZv,fXML\V(@nA0TF*Vq',}G]Ccn"WOWˀг8^$eOmW6%ɔOUR)K-Ю QmС/lD(NNEH^)aeʟ}yr 3{fVA)qv)qg},Yn>;ܟZf pO׭TM>sP]{߂& U rL8f}*ǵ^gϾXEORa/@;m+Vz=:VY=BM-:i1<6x r{"`n*LYY v`u>i%gkC^Bׅt$ݱ($#*Puf7V-Ms.i1w,W۷::͍>-u×J"-ږF_c}*dGD~#^'E&T\鳾H2.Mnɸ,4`ϿIRno)LK5=f ``oĠT.-S6Պw trSYm+2{cp..Դ]6Ybi ņ*SL;پ`&3=G-{QМ&g6`;8ḋ!tݜAgoM '!${M>k{!ZKf:3+"rW|2 aN"6$Ȝz'ܜ;aY{Eu8:>\ԨUb y㒚$5]DE[2i:`UVn'פf9AZAZ߮z+Gq^/)p=WjہG+G0$V<7oӗ;PݖE'荃\ܭJ?VIȆ}s+-1xqXFOL{ݥG%{"Y{t3P* '8x9 mN۞LR}"e## h̋f_nK'r7ĒNkY^;)?I_?JwcJ9 9릱)l&)\@2y\Rȵ*/i(yppȊrF;ޥ3т{!&Fzv=W=o<Y;,U/xE3=vR~R|& Ýoeň̒,& ae_' XTەتFod 0EkE$nrvsꫳ$IeKڃUf ^ M y84G}{ar qjLkۂO)pޱ=74}^|J{cWcjJ6C 4ٱi{,~W̋uV9pf6[W>P8ܻȻe?:ˆYrr4T8ꏒL7>ݐR/dԥ'$*a,~I'13:71z0XC.9(YdDSu<}N=R/^o0aEo۝}r%.:TxTFAM 4X"5Mg <*;$m+qY/S)xtOn7Qn٩Uw~k{WsT~= J}Z79]Y%`[݋ґ+u3N@2 uw66z:!pFتQPzDu,PM~ǷvZ8f֜WWf׆ #\c&PDyəm9қ=(kj,.jɶc˛H3rcw۵xMvVkÐe𮟅A@9\ Ɇ84O̸^F+@iQFYCK%`j4]ې%d?Q<>D˅ m@8ЬBEet Uߩ) qTfyo˞"##lw [=r_Ii7 VB3"_ =`ΐ^{eD7teu77 im2_إQ񴌌$!s//!rgTF?Hy@Giۧɔ`o+f%:}V[n} ? wEObwﲉPD,{r8ϯ'O;:]4|_1ֵz+V~)<Q,t/l q4Wɠ{yD ߹3DvzhwtPsZQڼ$ɼB2AOzK0zwuL(?v7Yņ+i!J 0npW}to[Z2ik5ŧi_ds߄b+rǡfQzoMC@zW+gK5g_%E)WϙOEti4χXw=e|qxϘOfE;b~fgYkQwGB'^/Av,J5G9FdΞ4nsD=.m?Mgn[V9IJњ.49#멡2AH endstream endobj 70 0 obj << /Type /FontDescriptor /FontName /TQHDZA+CMSY10 /Flags 4 /FontBBox [-29 -960 1116 775] /Ascent 750 /CapHeight 683 /Descent -194 /ItalicAngle -14 /StemV 40 /XHeight 431 /CharSet (/asteriskmath/bar/lessequal/minus/multiply/prime/similar) /FontFile 69 0 R >> endobj 71 0 obj << /Length1 1943 /Length2 14161 /Length3 0 /Length 15359 /Filter /FlateDecode >> stream xڍP e-A5hp Ǒ{ϹW}_M̳{{~LEQ (WԐee3!PQi#Pi]\A|w$LrvVv++7 ?.| S@ tEwtvYYG9O:@27u(YW47;nV͉ӓޕJ r].@ -LƄ@аPwt4u v s;x_.Pv:W27?$;;:x ; @YJˍ``G;dgjg)QU{jrrser#i޷YBG} {3}A  ?ڰpwbt9e%y7!c8YXXxx@gܚ4:Y0|o zn.@;!,@n3f_]@^}wX]avy̲ _`eep?oSu+`}Oko.%w݀SY_+rOWdwĻrާ@}o6UZW}D*Z̭R_v?TqtqüXX}mowi+`hǰqrL]\Lq|YߧLn{KG?,/`qA<f/Y򿈛]w?蝧_`VAf",,f8}v[ {-ſ +/`|_=?߷w_"rCO6{';VG>N4pvw|Wކ˿{ͮ^{G@Fswk]@/99M}h}('I6䡫8=I|`qk˜%ў-ҷ[[M>hK]i Ww}n=v +V7"t Η?糏[WY4bBaG뒥t"7RI=%t~5sWϑݛڋUjނLJ ؂eFc[[rF;'My_E4<̯z/~ u#3IF9 cY*׾3c1!.q N@XXE;):͢~TD*2'`""?qi^P>vs sw$3~<cȁ:S$r'SH4m_|Բi(YWK.X[hD$s/[V]΅v[fݸeb4bTs9CHeodW_pzw}@-eJ ^BA&eDC86Jvr"ұIz4uLfW+v ۑIfZN~}̟#'-$XyR00Pjuxz@ Erh^ǃ'ejxkWM~bASRn>2Cۨ6_Sp3a>ɺg`wDUQn/~)y xw-ѢU>%l(YWESDHTܢs>\q@Odi+VgQ b?ӊ'GcJQ}g$_5DJ<̍LAp* *OR(ՉHm^ҭAV N1y9q%oإ:@MH'>kxkƨK ̀՘wCR'ʙ^howX1Cƀzp<k8=Heoy΢lZk۰PTCɧ$,1k>+BU̢SX7m.);1v `Eh 97̗3_Ay 2 8Ճ\(Zn OG=p*?m, j "iOmbV9"ȡ8qmvB^sbK輓0fY)i] =a=P2%<$bpCcO 0"'}Z/N*.Y}t ws $'{v6/.(*r1<:&K&im򉄨=܁Y(I(%~ETy.9PSDN8CXItO8h20YqFgQjo Я @e WJEef{cel/;f6JC7A/ihIawVoYuĬkKcElj5Z~gPlE7E, nshdS^?R$ҐAM㥶uDR~j¤d~`gQ5K5;ӵG*gz7G܆`td`RX|]0H{B=G3QA>JϑEH(?(٘OP-KpRѩQ,۔p&ڟcg59ÍI2"Oɷ̽I|j2 Tc~"];mWTLe֚hخ[;XyƧ<^#u-VgІ?D'3F-E״-f[fZKXêDd܇Wܨq2nfq> wıKU j"~W֌aξp!I >cASch Lp"~H)Sg84`A*A/qWGvWn<(Ql3v2(fPv?E"GbŘX~c,.Veya,C )M6jV/!z;-3cn6 QH'# ppzУKj9/hdFxf/c^a†nx$ rlܶ^eA/AرYsױh#u 7 ( Y5GN+n(C홓EO{[QOecg];󵇈MۖBHxr~0Rk:pmg1TLKO Έ.H鱳؜7*,-UBzMPihc֗gGF96_>Eoڝ5R1C~wYfE"w[5"X ;7xG-f?c{a'f;vk +Yjp^smC63 TͅtsSD<(٭yM͈kx̄ƹ|7Xfs {MA2E]dnxRiXWd D+] AcCM0Tcd$sJ^tUͦ74@LΝ͖1UҶwgE9~%j ^q Dhwmϗcs.}m>= '$J77R YlYmْ-co8u$sA[ӽyJܴ;GT- eilf787헙Iw"fxJ;#s5+-9"ċuزƴqz6@$L[^\:SPWBܠ"X,M$Yե+U/Bbޮ@g'h>wA[6W_ lk+mLrB(ڟ5^;" O׋ @ɦb;ff/bU'w#cܻ%(,EčZ f {TUU>9\n8N&&-tz :WXG2xWF݇ټɡ5(ؚgY4[cf}lWiYhTo¸Sr[~YY)̀Ϊ[^hs&Wk2C2 CSo+$G`㷳+o&Gz}x0`>S+1uJ>oxF߅u#(\`тEO|[Aka޷!Кzy93 go0v*5ћaW˝D8p&\KGrpBuL( =c)Sp:29I6]t^kT/4v.@y{5,uYN'rд*^*=LNbK2Hc-sTR]-sH_ITX;UϮ괏Se4&񤙚955}u,(xmO1›OC<2kßIoB>)Ac]p\eOXH@5.R{ȥno Awpv )ePݩV뒶7͆_mF~>1 UHFW]$U \=ZnF/0vAm9tb/4}7;|跷\j2Q 8$p582I-4>J[>L8_Ob<76 a$:ZȦhӬppler#͎\׃XY (a۶W6< 3GկmOG bGr].zf8CsR)V[P.2cJFܰ[W +}.G_ɚ" 2w $ӯbԤw&DÁ`ÃEGl~ue3jd!H'삸a$dD ی9K(fo]lttFkPwlVl[`P0Q|b!L=s$Yo$d~d!gC!Ixn<bt&ϫ 9*Wt +h}k? ' ܔ͢ 2a5n\ 5UmP-_c2=M'[0뽅#ς,"X:m=;~IbʊH;gֵuc zW!2Cf!ոkǏWSP^Wd77KW'L>AI3 DB=2XQX&\6GBLar)ZKvɝ˻3o's'ev3F쨯6 N~J~,$433y{&r=,23!?Owȉ;hnDt-n F llj j&:]{G IU2,eĀפI|p=#T.lXT[pXbg ,ŌUWk'~TYV8 ?9b[+=Fv7xB2.R" ~؄5VrJQxΛd?$a@~F?qnC}3VoJP*^6ЯHpd^R)/4RW]􅦙zXd6I=t='Hq=p3?UFO*NQuCHT Eu.W!1/Qv!|N(.X V.4y\HM}'6;BHM`]W#ŕ' !YaL9܉Q\/z,S(^$7H un&x1j۸hN G%~ D_B]ous9# 6z8Mג ,kF3꛿KI ۤ ?EI/2 h}!qE?a;n{NJ~Jև؍yڟ^d{ptBǑ!f(%ϾUms#%|U5e븬{1 ͭ{聼pl%}oZ/z@ad:+,%č,:2֮eSSnPOz|dOFչ|~[њT˄i&{֢H_(f7 }WaW@@!'$9''fDf ,0v,=#o6{ uԲ%"CDhx5ip#Ƴ>Xce%X'_2HvL?,i(OrYNq+ȬQ IB v7'sLXZm"X" Ӯ[d B},$]15s^GsR&xWҭ BR,l]C7YFtc|7WŊb<>:J!#B2)n* yzV._ؤ:2=gW++{{ِOI->V}pŒ*ո |w8:aKp􁞄pC4 !bUJۑW?SZ~DlU.B /Z>,-ۍ{aв%,YA^*|e[SSZr ̡f=[lM)y5_"c -5%EU)EF@8PtMIhvP9JS;p\@S,$藤K;6db2) bKR([q*BśOԻW47JRT_J?a&易<s+$ЍZVe5[7:vE-Ӻ?{IG D80'7鞜h~iV׆^q;B g"7`cg )/qhhkjL%aGӶP$gWXM`P`׺46m#Q])6m ґpo\aa&EɑUy A 脆$h23[&_͕n8]O)De䶀pr'2oY6I2;YL56U}&rwhڔ~8rvbI ;%GM+/lNQgXPf^xă׸P{2=p2f듊y6rzp9{􅉦 }YD= <4EB>EJSdՉ:tPǙw^OZ?ay*;k ~ akz5owic+D]^ݷR^Jh'Rst F!5'aNdQԮ-3L i#EC _O/֘&}dsJ2l=3|^ P<1>@/< z,/kT] $9;pEb SOW g"qd'lsvjTK| ˍl;dϽБ4#3$cwaJ[gpE% MQ1@;ݽ$R*!q0zrhFXaWyP͕vї+9!K?lJba_(7?Bu!'X4֒ݚy}skRdyHop~na>&ny}=5lY5C%Kˌc0V+.u20t+89}eb-:=[i6pSu7&G|a@ǽ uE?vow"3,fb;Eڄe%v⃢mX[ڭ?XY~>Mub!]מMߺ)s }&YiYh"$Fk}߭iNlե;d[ :~k^4ٲ #~*}0kPWC NeLvSAWFfI_Sw}8F,۳0:n² kVBïyP_1#~)E"A3bkQVQe(PK }@gX(OG~ͨ"yq q:P,~5[e56JO%q0Yorgd|bg!,XZ [NmNj ̺10S&V/6Λd$$+MJݹX5N- #$#TM*ъ: XEs`"}t|܉qcxyygjwYGIbo5/rBgg>#rVĭ&XܡLJW)~_~Ή] \O$</x]~C-4!~aF՜whۮJ$v,#r>2*QYÏIktZr#m:W ?  lǔI*b2sz"ڿ E|'ʑqk0w)%ټ _cfEo觚o\ǵgƘ>gӼe !穥#{rq%q6L2vTB!"t/۞jFJ/s7VG߰VozašcU.د|GDKpLW*nv&ބy73Pu/JΖ}{e#hd5TQ+z0 S}H}HJڭ+m k|#6 h:Ow'"^=nBTo buLss|~Cҁ:EUq:>Ľ;No*Pwث#WQd!T0PGF, 5QnzKTGkŠXV_YS2YIҞS;].ߔ38hc+IWRy#IZX64p'MSu5}W}pj*V>GP\`*dY԰tPT:PЩAm`w9w0tO-kɿGS+85<#-xȉh" ̂&3}*[:1dgWHr Ɓ|7IT&J.3;$ 60j 'P8_*lE_rŭѕǪ??#Iѱºjgʅ !|&Sv\4˴џZ5a׭@8HzH(.R㽚$nd0݌kjX1+!`2!h5뮲+5S(-=DPT'Zե7rD'β[Rs0?''μg!=Y4jwwܰs NlA4v˅5C< ]T}T@ҳӯgn~0( oYۤ‹U>7RjEL@pHrفb4% M]M K oit' ɯ8Qu1= 5nc$"oPc@?-:q#&%+´LҜbm_2myײ̓襬#YqN>q9_X)ZIbp[pg"X}Y(Z<ԀmZϻU&5H `h4-ji+pW.s#U~K> Nd%Fb{*6yX r6ØDhM&b|,J.%x imcY`|8U4bQ+NHR%H9g5rm% rFH^v%D”GnPԘpN@߭A_qf/Š(C%G?~~-7GFPJqfacȢA8A.)u ^kUmG/^ 1Z&ᔚ3Ƥy`1DZm>~fno0is /%( 4\GgGܙ4znU }4/_l* ~2@+TJ[x) ;_`Ⰽ( #lNglўb^ ++ K։} Ӵmh݁tw/FIϡu*&}#)|tXڬ_hhJ|OB5??B ۀ>wçJJa4>,$GP|.]ī;vN[L?W%劖TO3GM;D ) jX\ kfPR gk=<2"W#cz Rw,GѳQ<|=d_Hx}s=/B'A= G 3 X:d(YX_n澕'W#7;QF(cO0+! &ʳrQ6րbaALf5"ėS>&~@sa j܅o7hK_߾DYYfwu\+ttؤލq@[ۧj׼yIk bş6T6BqX&?$X7՝JP(0bveTv fg)41YJ&~vzB;t"~`nQ&0P6)T (gZ b5ǻ& l*7zG' \OB6'X^HRwpa ~̹:>cI~Qn ILȎٲ9 K<9/yHk{ ölFHQqd̘PaTfC9 ?2,hJ'+tǞFs8aErT!HyM1þVW* ^Φ]Kӄ}#Suzf5Sv8P[u¶RZm+UQ:"#>,w0xCP}PFmm,@ ;7f2 M^^)䋜CS6铡>T  gfcnVB _ ^mipht*:UbHa5Msh,(ȢtA !Tme5jVTᤀ֚.mȜǟQ^:TVx]rk#}Րbr%/ृL2`[+.6Pn^=-'h#NPq,CZtS+9aY]W%VbF$'J 3%LWf4ȪӫpCtcx85hm-P#ظ9!)s)DZR"/:t͹F3G ):.OkQNZ"kRAUDG3X۟'䁯C|ZsIp:tp==dlkBrF \y2kM8.lc];"N*|>ixﶉ%8Zl~*Z}.l4Jr2>3=q%j IK`Ik P#JĦ>+Dp$!s6WC7FcUá͙c|U#ξ hGAmK'TIBÅYjIHɌ6Dw.ˀxm,9b!x|xo2uI^8ߍpTwǽPi gR7KelˣaƜM:A.rPls%?x->smF4ϊ+g<6]}w)5#}sk~wB ~7%lS;N rsSDNH\T58Ci}.4D5MC}Eq?rl y0҇|p:K-~1v p~q&ݭ$a591vMTױ/X_>^USawfBH\uRϼuO}as5_ܒ6?!1ç@R#~lޒAK8G9DX6WL"bwVV+d7T?SUL*?)P:"@8:_u2Bm8i(F?uA:7ws"`yHhM;GhaFvM3©)j1ÓVE3#}QU$1e-U&cO&Bl~KC櫙ٷj|eSb.j KQ+5ĒJpzV 5uFK_xx*+paR0Vy$org$\}WWlZkjF$5PM ^lBBaG!Ku|2 rɌ<=g7|'҇qC,?{t $\jMᲷ"l؛0B^|-E毴2 jk4`'` h6Ѭ=$>Cp"v&6f&8tީri]M\ ~p+3͂4d@woXr+ 'Nߴ;Ri y22y)SqhAj#ϒ}tQ{Uv'}ǘF-X~M bպZ5$<#;L|7^j|9fs=dӫAjXDME̓(y{L[l Of鐞D$8 O#%ֺ6tdPusm&'b3RJpVr endstream endobj 72 0 obj << /Type /FontDescriptor /FontName /ITITLT+CMTI10 /Flags 4 /FontBBox [-35 -250 1124 750] /Ascent 694 /CapHeight 683 /Descent -194 /ItalicAngle -14 /StemV 68 /XHeight 431 /CharSet (/A/B/C/D/E/I/M/N/P/S/T/a/b/c/colon/comma/d/e/f/g/h/hyphen/i/l/m/n/nine/o/one/p/period/quoteright/r/s/t/u/v/y) /FontFile 71 0 R >> endobj 73 0 obj << /Length1 1755 /Length2 10851 /Length3 0 /Length 11979 /Filter /FlateDecode >> stream xڍP-@q%84XC'hpwww' XGf̽WW]k휵5$ sbfc$ـ dGV;ـcGA;1$ cg3Qbsqظx@;"4vYr;#  tz:Sz1[ljlP4v>W45AL 'JA'hdblZ3\NU#2dl k4j%O <l ;g;3\&PIVpl,l+w"Ʀ[{c;wl(K+891~m!.`cg'k>GS(ɑl{FiYLbk srDݟ$ 2}>wwֿ.jd33==$+لfj='o ޞ{ o9p:=o0:L@`;?Azg?VW**-;!nOfN3; ,*࿺W9gϧ]R_A\Jg݂t= h,?B4;Uۑ~? -/Ƴnw@ vK 3zewA;J@f*`'S?Yـ@*GyL_gI=W3^2v.n1j|V;y@nbqz7ino wFnտs? ׁ_ |b\Wd/럂܅!fJ;sc6Ǝ"<9 >n\{?R d81j#veJg\9ߢ#%WdA1.E<jBߵyƪNm|;{L¬.o !G̋s֭h nyiH&)߼d>uØΜx"eD>⩻kcTݱH %^ga~Ěg = hH1,6vʻ_rƶ=C!!^P(K4"¥qǰ&2&rC7""oWUzUgđێZ[ԉ3: R^b*&N^Z=Pe{ ]zCDKXv9GuSx<\]Im&eLȬLcuJSØy2懊o(E徙NH)^wgߙaQZ0V 5 !~ 1mlH" S a]ԭ鋇'72N)͹7w}1 ڼ!TI,_~hB[B5UFXڌJ@cFtMQEJJQݙ.ݯn!UZ4z304us|$-TyǮuL_[>Gf-ȐխDF(606' -@qJc=^ܢ%X+-&&Yn TV*gm3l iWK\o1ⲭaj,wm@SX9Sg#4W۟[&r#j3D,/RM26Q1 |x!#?&E@Y}Tԏs_%8uyN7fYB\c?[}NxO8XDS#05'c&VrDѦr؈zr TE?%~:wB2 c`pedl-|_`dczOp)PiS_!;vͶ9ySܽWW7{~&!a]Q糋 -;gdʤtEs#%j;jtZ^ɧ)ok2?BLds1B`Wn~&"mbLY0:ttA <[A,ͅaܸtS|B#utLו[]ת_$ *(/Ď7EvD hwC`"bƍg'0ę?`HQ;,ƚ20"K,EM}*O!9F+!į]]}v$߷K_i|˔:ęw6ŅJcFeei|U q,M\7_:O'Iv› k9][⍞"h- _G˖||j;2^OZXڇ !5\|*-d=R}BmDsDDG("qpcqٔt3" ݎZlKe\`iEk2b$sP@VR}{yaj^1n֌ rۖ܎1µCa;`%{=騸Hs݈7y6 vR[K"W1rM\B4:G`Eן#^]$*U@[UFҁrH%=R,bNF㐃Ńeh~F;^e cf1H,N$ @,* % >~u^?>ez=S.(g}ǻHkZ)$ }mQFոf ^ Aho.¤~U[ {)ȳY`sIOg=tɻn}Vz@ 4t3gߖo8r- AX6|6>xe-tj85gq aD&"`m-_g f߫o]qИ!wz##}߱z8F%q|G'WE)JΈ|OKֳF]z1l?n+k3,Xob.#{w]!X.T>\6[zT;!k?VY8鼜*,:1'yF䔻bxՕe^o Ui#fm`5%mc;QP7M]'Zu<2Ac0kɷG୘o? =Rvp΋5 DC՚Aa@;wIe ^!֡:`D]v9K" #KF[RZ\4LK:~%$,pynLKqcMnHyCp]ejL QY`a_{cx(D)dY_JE f }[|d昃.aZߒVpgkwg=ͬY 6lؖm Xmq0'!#\>mYL(,oF]#}?FNO*G Nx9rn5 3$6X{9<\P7qtF~? V#;x 8O}Ȓ..FyqޮlՑ|miI znұHqN39&%g:RܙH:oQ'[bE_I.tM 3ڶ TA ɱ\:Bjgf(y~H\s2wx֚$+;}O0i9m'EUt*ˮ5Oz x3ẢFny֦nf:k>mf&$j:42cjbҧ+ 0&̃xKv<M=1\j i@k֞8LѦ?T "oWM"ugץ^up~Z諾cÅblC'fNfUwId`LI5[OwDR,&uXsM>iXa9'DYĜo'N7ъ rQZ]4xM^Zt&eDwBy̔{rwƒcω*>;P#2P( "/lZ)ɾi9>>wRY. e;Z^rrRo?B3}i~lvI gJx~QdO&Q#NpJNjf,|Qk!8AmN /a*1!Y2M:') ?'%l9bnf@ek[Kvqȹ ;\J{oY$y_}L3b)?TUFɐTZy1hFNʡq*i6W cԃKYJYE^퉞NkkЖVI&+!n cC[9D~'w%Tp4gpz%ULTpO%ߴcyk2xoN/6Vxc4p2+CHQڂٮ1h"\86gþxB!!xֽ5wg59(gJ9-"C8Vg(Qi3ՌŰLBs6[id&$m/zx?9> ҕX5pSG4\%"Z׹ 9OqӘ@+bC=qAɨsv'78HuFzi §E&_@3tq컜+W} *A)Rp4uz_5XW5>O&zX"=g}x!kMRe\"eO"hߛpf |'FZ I{= kEsx'~Cd"?3Ye;VLZ8#~ٳ6ڎBfX3^YX˷LOFo3EM_LJ{ِG%nbY~$suAoR i=W4 ۘ&jf G3XocD&Go_}n. e?~_/5$.17rګ&CVmv*_OP޹LA![2BkR]9.4bq58iWwzwlT\ XXs '0l!fr|'4(}Qk -XexZvf:Ω&* e.i"\^ w̚i`x³_B21JS,xqDS2*#`o oFXzE$`[sڗhAZLpa00W}1*53X',u8}E.0r>1@ۡx]:c7ݤ#!_Q(".c7i$vxs5F봄cVv";>x^b0B16Q0* yH*.^ۦ*&Ghr%ԃN |":Y1Hul4HVᓊk"Zr\Ң@ B]&`a8<%M$-AW3_Oe-*4HC_ҩ\C2A} "_GiݸDqo6Г|M(K#3;0$_rpKw*[ bW7ݺě&u6HtH`n8;13p[uf6Nӥ9=켺qOlY X˭fRE IN2H`eVz!XQdqi#Kx)zX:/d=D[[fC"j)NS)Gd'c׊Bty]` g|:0ޒqeA")^*ZXcRUtgP qIǵEzy$ eu.uP͘? kTXu‘Ci DȌGo:jzNJk.F{rjO|ZMkc Knn ө|_LCH:uvr :Xk/w~- H3Hg&=c/(fʀ(pMQ}:/ZM. ?֙pգ^ٴrO:g9sP-`]Z5Fj4Ki.\8p; J=07!lasd;LmIS)z@XۓeڮO꧎>0b] 0.nRqi 5dWFVAΫISeje [zI2 BRa4.!EwRow sn?OLN.!l&ɷ')ͽ`5)' t9iyZa5#b`&1UÆdB<1c}R,zޣ<4 rBRcMPx"Cx%`k1ʂʇKHFZo=d![ މc;_INh`28i\,22^aم3Vt߽e6;4Vה4=W*q rӅ#Y |Q[F nU`Ez4%I^'B/PdN6t0!uT%tmZ%DJCsu:|5">I9 Cio#c| <ǥYeZdkxEx~2.4} $;}<,`gr_Xy8کC*44D")FlEx2v>}9-VNZ?&Mb2dH/ȭvNlslLSh)Szid&fu Nt.TxFи ̢͒<`\G9[?dEiL[.2°t qٟ̇#sPXsIȦ}Gm7WJYKk%a+:iJ;F&-:1S߽U9N[~f_aVꉘ& GmSj:ks&?:(V*+'>uqiBtrQޓYΛ-\I3>-dnr*z-򰙃Ce$0E]sJ_fg,!9q$i|¥xpd`C9>d@,pvj2j_E43#5y%; m8|g$ J7tGo{ 3yK뎒w.YZ}v{ y³3-2 ܈?? "bK&|,SRy~p+{Kj)[ީ^4Z[:+Y T|CM776U`֦y^Q_U=>4|pBaMX;,x˔;&z]'`.> endobj 75 0 obj << /Length1 1158 /Length2 1888 /Length3 0 /Length 2697 /Filter /FlateDecode >> stream xuS dBx PybQ< smC|2`13 X NٸjEp8S!|D! (f!&6Ƒ,)| 11Oρ2D8/Bs%ʾW@Y\?W  񸢹Xd7?A._qpp\I ;/#⊾"o1N/RP0A|H  \9x\q% 7>P[6BQ!-n`VfDXbhFE"9vEg} $'r15tIB\ܛBB_Xђ-"p1`J3͌d P܍RLNigSXrf{ i͜TuIq;b6j&?O,ҚuG++G4puRivؐ~^Iċ5#NGP񩒹kw?1tO)8vM(W4Ĝ m905%{Øw^|캡<dD=!bylΘp&//GU,g/k EeINi%^y(%5QH[xտ4gM\H|EfH WR1J?-J |ѕ"r:tҹ~>喜^r=ӯ+tX3O(P]r%!AU@L]G 7JƦ=W?ߡNãzk/3U$IlKg2|ui~%O7荚"650j̶۶*K<UfCca \Msf)G%/|?[rkqUj;bf}/o-xxpl_Iv ٭ҲEm Lާz;eEVWkrDrWSH߲e|zP(.c9:PoJ}Lw=V˩cR)c%5L_( yfs+vLO3 5M.kp n]n%w_jʥ->XeqT3l5.j_Y.? =Iкr1θwd{#gͥ{ZQ V/+ WMYxcz4_V첤AetW?Vjyuڕ|)}.zNVP꺠6O? 4r7kH VW}Aاx|?pn|yb[M 0s+c[Uן6R<=_m8Na~wQ<˜ή{$D\> endobj 77 0 obj << /Length1 1214 /Length2 3385 /Length3 0 /Length 4247 /Filter /FlateDecode >> stream xuW 8mۖ,YR$=ی}lc613f$Y"RT5R,^ɖ]*D(";?9yZ뼮9C6ml&lIr˙hzܠvO[3_ث۵hι\hCl|8Itq8rJ/1Ekٷя;k2}~'iFsi7X]ײ,>q \vgUBV[s>ع8.}QǓa)qڙ=cui+ͣv#,x4V{2N\#!\^Rj|iteȬ%q[>!5 #N8oL:B)Xby~gZ;kH2UO$RF[}CBϑ',,*\m (Rjy+dv#^> ͆r[3_B>Ynn`pI/[^jZ1鹬!U{ڌxUs Js;NJ7[ٍ^2`u6Zdݶo[I/}ZJsW.i:&aĝŶc&=ZeFb[= R!UkoBSXY ~S*J>CrYl"O3#OP >.v_:jp."sO#K65c`-b(:`|qƤ'pJsK]-ٟb\ty 78t?F)?_Inglzg \?ǝg~1|k\6wj֛3y0o&rC>WmXG`ԌWLȑ0O,鯶gKb2ɧX_qq̒r­i[?t97ԯì4aA&dԛ^yWGՍÏKs`@DȰBPȹ$O6">9~!Yv"%>9?jwRSFIq;1 n\9i<ใYPW渾p +li,Va:0Α_ajuN:oV(\"ifPN52ki&#cR`=WoZEKqRs @kJ<5Pr$3E,1txa9__Wَ)zm.#&]0ST]fjW3zWD5Ba@ #oz6μ8B=rպ,sYNnE]FL:w4x>U jYzn'{쵷ןc59:>qmu.+\XZLjV6&&~3՘p/4vG{eëo!GeXߦs<a0-تe>|"W=?m4TjNG:'і}2f;QA"ԛcٙ$-|HzOM5C Â8(b=HǨ*&|aȰsjmoY`HzR܏Qmdrчٓ'C10N^W7>TwjCϮ9+FlO ݕ-#"F'.$;E'6||o:|.<*.8DFi;7;U7-]t||cXWKq>qCqy/ ?Av=pѲ}nKV|b\\&ޞ拻DڄykiF J ;/`oBFZfsݩ0{yRr4LhTjxFHpo1#Vtt\FY ?gtuH {mʵ#AسCڋGS],*zl'Gcm7hަ/.F^ʢoio9c+ +rLYhIA/5>'#fyL!bxVmJ\ #Q3rIf|iHbPmUĤ\4ىK^ݚq"}؀ikآ:Ma꠸K(JX+O[UԺT r[QNڎrl3eJ*cFkҒxUha= DA oz fNfj-şl%~i=0_rCf;X ΑݬZ5H6¡>$ >{;D*kn5{ȩFCԎ}Q7SK̓9q-3E5'kQGRd)b+ *A,/r琾7?q#3W粻c@9f6e"ȥַl˱@^2S5py**Pۙkhln5/YUlB!kNZR:}B^O/A QΧ:fxY䱦̩b!Xtr҉/C/afફ .i9U;T҆e>􏕩{,nKa) xRO$Sؓ2LpXEIV:dƓ+bu_=bvIYŏ?5\<qZ|꒑P86gFޟ 1))9|17]iб̂Y K+'{(gm؟İh/SR坚q(Lw드d^IdeIb4ec'_'?P`0cONoVjzlM=E-D g)jUqX8=VS g:_i&ɗ >Y-^s5YVg5w) #>П t-C. !rL_?3!2^h+r&`w=!-!R`+.V endstream endobj 78 0 obj << /Type /FontDescriptor /FontName /JZSEYD+PazoMath-BoldItalic /Flags 4 /FontBBox [-64 -272 868 740] /Ascent 469 /CapHeight 0 /Descent -271 /ItalicAngle -9 /StemV 77 /XHeight 0 /CharSet (/alpha/beta/mu) /FontFile 77 0 R >> endobj 79 0 obj << /Length1 1144 /Length2 1556 /Length3 0 /Length 2342 /Filter /FlateDecode >> stream xuS XL&72xHYR7$4[TVCӜ3333gH\B*dɍ%%Pt۳?3{yfmχpč)sm |57DM ##g(] n$ !9Xǖcm˵\6K.I!!á l6X)Q>!3XJ,\@;,|E F&_M 8&0"` D" M  +Bتo.E`Lۥ(2FH@(#8BB|Eťt]EIlYիW3IBJ0&I2|)TQG$JQg BQmT $+)8 Oj@B_L{R瓨 |]܀Ŕ.ېB@G_&Fo> Bq)HJsJ0HNҥ$$ ,ZU!D"UUBׂQeRe"UH"\B&|dJ*(_!?+ƃhq! R|@MNeTCqQs|"Wvk|?D( '̚MřaTnWOJVDA Qz0\`1qSDFE ʛfUK%uPQ9?4K)W}~^Hen\ `cmXΎg$_FUMKZND{nӊ=ԉIY.K[XLr}?sˆ5Mx58xWQM5', gJI.*{FRw3eΔNzFVңÚ/W}\an#]F͉<|AS}a5g\I܎}lrRpW~1vlK2V|]w{|F:X00(zZdbS3ҫO&{K'3-3$ާNW/ɻ;0yδ~q>QQ~SNwV{lTSNjO&z[^$aENN}wPʲ]T@-jxz/oV. xJ^VNk9ܷn3&KjcYVmvpXdLέRT-Fbaɮ^Zit~@ʈ63(Zż:{fvnW}_3_lF6:=iApHѐ ;JngH_:fo6̜9zئ=[y3ƛUYc#uaAW&VɼyQuaONM ^u~n ;H2栳pXW]y ʴ-OΩ! IO`a8=RfTд=y}%{ӚW#,[>rBNP55GdW4ny}Fw8}*ɭ5?jvɈm ]ƌALpVwuAXögkOӿ\7o\3E͐ĝo'+fm֔-=ܵBqNܞ6ڔ͒m D𝡾ލL{DV GHڕ?CQYbuV9'.4l_=<*hK 3(b8H&M' uNݳHϳ|1AW`)sӵKe| >[eKc*zeݹ3덯$0Ŗ5f uNc/Ω/Z_zn'GFSqtƻ[^DѩVAY.OP{8jڜ`궉 {A:*opmkŷW}F_to_S nz<5aV}']qShY *-uqM+~1\д|Ix -zղkxMy5W^tm>pXy&p5z=a]wVo|xޒʛhM׈ClvWgO c_/%c/:vkt0lؕ+3Ѵ'q<^ʺ{9o9Q9$#r\ج&?9輐vD "c#z'˻-) endstream endobj 80 0 obj << /Type /FontDescriptor /FontName /DCWRCI+PazoMath /Flags 4 /FontBBox [-40 -283 878 946] /Ascent 0 /CapHeight 0 /Descent 0 /ItalicAngle 0 /StemV 95 /XHeight 0 /CharSet (/Phi) /FontFile 79 0 R >> endobj 81 0 obj << /Length1 1188 /Length2 2758 /Length3 0 /Length 3594 /Filter /FlateDecode >> stream xuT <cI$%[Jc杅13famDWB(J*B/""[vo;yy96W- <-Q!-6pc!>K!46uV  iTk,6 20c14:0}?% l@>`< LEadz߅ &]4*AeEsd" p5$ "@@߼3lgB r D['7 , *2$PaaaD*K p0$@(" !um `&đ ҹjtLfMfDeh L jA8 d: @ Sh,hT&\`$cwLeȆ< &p(:Ikª <d“e0 )u_:_d R !BLp~b_1?<-DX -#m}3iCfx2#,sm1La=K0h/wHd\[<t$(X/IT rٿo!_mXAk v< @iSiYQ= $w0h#1I&cM <!?S\?c~ d6+Q% Dh-CCO$`X' EÙ ݖyd[WWVGgdvhߖ zW=="Sk @HJrv=xgV%i䵶q+hBe4ef-кi -TNxRz(pߪʞ>l=d.|m*4m/eҡ #Qs6*@(S[Uʝ .]ޠ5)=BcAnH!md>edx =dd_|I$)e_! Ej=|ٰ5]l&ebSoX98hW^ҀwxZ3mcFμru׆# 1:Cv1z t34Wjq֘v?ZǧYN=~\7U"VOorC-^E^J#R6A835>V_בټ[Դ8*-Bڹ7ﺩ{2"T[t\~4ZBJjtrzJ~>Z'ҎW";aBm| [rccm36#Rb̿:.}ڴ+=phi B3}y2یngT 䠻OH$>_{\6=Iw)v+kFHA]c|*Ov;$Ѿ0k:XwfS :9UӖ謡 &av޾(15۱zY){6Va iǙCQQ''UB<@Q+^9'&mʞ+.tAuMUbYlxk Y`o^zpj\O|hKTs3wo@A~ce040 -o mO 1L?yym}x1GRY/:HoqwA=z$:FP>&)e'zcA]fuX3fIX 7=Ps,cQgks2bD7!]S~j `ZAQCZy>MUևS7 epUxrQ7+`\>)r!]CmҘAζW{zӚ]/Nm5j$)8:l|N{WKeW͌4ʢ#jdԙCywf6}-^vO(L04?XuYdb!YQv#PYOM27Znҷ7vr ͬ6<")<@mnUo c#|K t{תORͻ(rkK.Y'jhhtoW DقsBJ\c֤MEDƣ/Vl0pT扶' .|ߤ'un|O!DB /ӶaHq5$TDz'̖N̜<>) endstream endobj 82 0 obj << /Type /FontDescriptor /FontName /PMIEOT+PazoMath-Italic /Flags 4 /FontBBox [-70 -277 902 733] /Ascent 482 /CapHeight 0 /Descent -276 /ItalicAngle -9 /StemV 65 /XHeight 0 /CharSet (/mu/sigma) /FontFile 81 0 R >> endobj 83 0 obj << /Length1 1630 /Length2 7141 /Length3 0 /Length 7967 /Filter /FlateDecode >> stream xڭTeX;%ACRJf!f`fnN鐔.iI~g}_3{{+^KK)@!.>n}C-k''k %u*# , dC!rs!|8LY lg>qpp`!>ANPg@{% 2VP*jA-7'-@ l AlP ~p-! i r q\@0g0 v0k( ur` #}p~ȴpvAj)ao~З@`4(  O\6  wqz@CYÀN 8wwU'To'8%7CN[Cn;0(C^B|فn.A? b3l"PzãE<ߦ`Gg?.>ZI'w~4`c vwQ4%7߱!v3 +=A@-0[ .>^޿az`[G  k<*ʪFu=/C7Yn=ܼ|u2< 7kai ۺ`s ҃@ [YkԌ4D㜁9Ӟ.>ԁzWU΀7_E˭Cߏ>5sS}k܉-(ڏ;x0VE ~ھaiU4!^m; `X߯^1"et!M%k#z`o9%Kߧ'ݛTٱLb֏^Vz_?܅Uuqp^32\*6ЦMl@%,1P!99\?[8 e*+rҋ=>㸌R]c /(K&천5DYMSѰABղ8ů3:Lu-uw-p(" |TM?pϒ_wrJUԙzKHJ;U)!m,w9qog1x|j&e8g]Im>ımǣO[= v_h|aB{s4ʼn{F!$(d؍;912S*F.{w )AJ(,?.;A?Vئ\ŝMz)\$^)fzr2DjJ5)p0wd+.`V ]% Z.EU숪~ 4*lQ*lAbhkkqpXp*Uv 3 @t|a%"`Yec7i5eu~lv͜'F 8qIZɩ= ]j5߯ӴzgTt/@ƜK`zh|%C89PRNa::~JIW'Y"\:BMU eGdz<X*FSp,7HGju@bPV$ ܵI(Yх9lJva\h&.5rY}EOMcT9U>3?4cipgo0[G5L)6W B 8}ގ-osE[S1r ל?7*Id2_zt:04{~U}'qBe-zݫp_'q,zXL͞IYx'?JɶLV-QUu򸦷f\:DX#N?Bj&@$pdX 32kDd!s_7&,qRfŷ\~#)nW"/OM6~RA2pR-8I~Kr0hM rx"m|7yxlۋF~$CmKj}&o__Y7/60ߚ+LPQK' J1b<ֈxjy`%B%#dez|7m@ViF@dI1Z2C mK47kOSYuvoMfeUoWv}֎~1VYR$gh~cb;]Q33uC]=l$JY\:_mp\{8kةVumw\:,7^ؓr񓍜uׁ\3?D!)_johϤ ܻcŐIjiekk=:Q<۞x:Vz>'BBd$%k̪B%_3E#s }[QGCL}djMb`BmjS'7m#flĹ]Dq~ 8Љ?hp7 $϶ׯ=zy^fpZy*'&"? jزz1ڜDJ*+1:wʰ\,aJ^ơ-\8<؈Os_`&7畎,E1tP69}=M'V1z[w@ֻZvMhO"}>@̱ߎ,sWo~D2;vM/ۛbG[Z-;{P^R0z,9b^]fԘsw ޜ3Q67!s-Ȇт|!EɇzT9ʜUg!/Hw1 !Q=(=My)]Zbykc9SAḀYW+ზTUClN}QĿ}N*3k%rCو /*]9|uuU҅l%NHq#dbKxPP^'T3DMb)({U![_R.ep4驈2EkƦSX:@b) :V)LUҺ Z 7eV6븎Ħ;kKeRE.^G]}Q+18@\N2*_hc6?eHmQ(v'W .Ce'&ٸO_@YFkf:S1R2[0wC~ҫްT?b4ݚ1 öj2cwaЩY]B ǼZ$V[+ YyKl-?ns>48$@^h<1u%^5ZG8z`=iɢ%P] K| X5f JvFr{r ǎPbexD/_VĶ`nQkё"s]vãR^d$r>) ~8r`oEޙ MNRe`.=fcq;FWϽ"ZUHU͂.:ϿAek uOXL͏tX>QCyFǚBPww9u*3Ww>,}\U=c䣖SpQAǽK2bi}L"4 (%OIhi3%"|^襛7xb:״o(̓dQ6*rUisvIA-5GXCXsy 6tp [xQ/Eniɰ2cPA~/gfϨ#ZN8 Z'C\_=zQӾX__ތeFj_vyy9^ ḡ}0(D]5ZToܔ`7VJ^ŕv? bW NOS h$9=H%6FGS$YI~~Md{L3>e8F 'Qs+`0%0ӫhzoF2 Ui2><3+ѤC.ZaI0a3Y%oJw*O.ִwnkVtҸɻ1PX^|yy)9kYJ?\g$ ǒJpdwJM#ƳHMYкMNܿ=#nF)}bfwv#c:+sn8z2g421a dzhFxR *n>Q.#Bؐ'ᶯNݙ wl;|J]4|]{ElM3ctX=NaŹ%WUzsG{/Lv}㆗QkqI ^jE]&P蒲{-:QH⇰+rqҫ~s2e;Ns x>2,{dޮ1A{UOgs=Lt(Yzdx(ftT-V]] ե2ȏ8ˊokM:"/u]7(8 q)`>Y1{ בgz+G3OߝCݒrߑHNE.jy8禱qm}f#ɼנ@/EWkKHʞ`tQIJ~t<3e _YL k-'1Go"~i}rӰ +𙿬c&u[4T<َ)dWIRRf3d3j0q]b4c>H۱o t'ҀϹ ٹ+DXAF]~Fm$YF)p͊jm='#E v|~()>2 K9ao\Enw)luY&'W Lj?$4_ҁx\f$AEHF7:LyZӄIL4vb) ,V1*@3JĻ**~)ST#+|s2qIZZ{I'Mv.U!ys:E`D,TIF?t n-Ƹcxm](뫲ӆƽqt# 'dYK9-!=e¨ o **]{FҺ6Rk99=SWÔv-GwchiI4{|Sẇr(X_gv0 ߔ>!9vJr*Ӈ3:)~)T*m_,J:g(ەV B$h27$KN1>Y@%ć[cYeO(x%*4p~0cD0]x,k! /rpCp0o9oZz,ȽjK{-B#uxZZ+VEQקq\gW[NZUgPd&i4,6 E iХmІiE=wvܶ@ES/f\"dERw幝VE6 zȨy ɜF6yEݾؙ%;GmKC5d/;r.+#(]iSF˺;*xcY `xvA9J׈H̳NQws/ƴI/d;~`d{!;=Hqy*a"#k'qMnM)ZL:0&LYZ쓧D'*G)k-B2i@+-1!gpr ްR#f9Aa>L0%ϹT`Sc̩31V8 IWusO∱kNsotjCMVz^U$ձP3H1-sJ;މgcܒ#\zz(d=T'x6ӗ<=2P!I1'r#z ~x/"c^5+PtJSwDҲXQ4.['wK8A{1SYdwbئ 瑚Wi_߰}"~s!QGd-@Dn@\eH)b %_nps:&Hj2/&4f;+4ZO`%Z <æ _"•dĔH]_MO5)A*PRaD2sֹgu"e= .ҞS>H "&'@u%Q^}9J }LROQ_Խ.(%7x/xb*{xQwdfxzC.dd3(*I.;}h_~Gk4ߍO3OؚB>Ft3x3,ꊿ;j\GV^حil?!~!ÈeElr㻠m̈zGBڗw+ROِFr-K{n&<^fJLCd6ώ^kl__E$+8w*YWPe uBc8GtnC+ L*wL n endstream endobj 84 0 obj << /Type /FontDescriptor /FontName /JQUIKX+URWPalladioL-BoldItal /Flags 4 /FontBBox [-170 -300 1073 935] /Ascent 728 /CapHeight 669 /Descent -256 /ItalicAngle -9 /StemV 114 /XHeight 469 /CharSet (/D/H/T/a/b/x/y/z) /FontFile 83 0 R >> endobj 85 0 obj << /Length1 1616 /Length2 9349 /Length3 0 /Length 10167 /Filter /FlateDecode >> stream xڭxeT\ђ5Npw% 4ҍ45BА.=w yf_͏=Uuv]u/U VqK9PrquT,AEVũJKhAfP @h Z޾p C=A6P #33˿,BtYt//n@{ }_oP dWQՓS0(kd`=@dPY.@F`)ͅK`pqZ^=,\,GrX;/=B /v+_Bΐ *b r^JJ' ' XDZB,\⅚.('9` rq7| K3;_7st7?9.@{+6Tη/9-/A`T?"89atu A f񅄙%l Z+C/) ;D?Dyw!=-jol ;sɀ[H?H0rPV_`" Z6+3>k- 0EϿrrpOdax`"_RJN=T Ƚpsqkduy ^[{B`SOHzS@7og3C.'U,dM-Oa?۪^RI$ׇ+T(m79k]Z,(yU" 2ՖHAVEL^ VZFGձ]-eCg7#0d\8ZM>t4!YwbHEϏsape"F9뾠Fê͋ HF!_}[DlOc.-a7pA!fck_O׸;sTΑÀcRUML#nڀI7%V{Foq& f2 i?G.=4m|hCR8N#5$DZ ! .c(޵sU9@.xЧBV|g8^!@-ۙ qVM k'J l'LY- Bӯ qGS#$(Ĵ8c~=1딆7/e"OTc)Zt*و,ҬG+ܧ?odWjeբ\T'Am&0 sw~1e%p@|5ռvKf~{cN9-&5`l`;(41sBv#{Kv|F1MOw5U M8罝LJ twZ@avaƈ^U1bp"cTp`INҍ5i3LXS˨%/e=_v){GHϕ\[:I`Z,RA!hq};+fK7xhՑghn* :p2ǵ<8Їm*S{gޫTyiS4(c4I|B|ܚϩ)E/ޠ%t|e_2{AFkuayյ1#dO]$U5haJ⯧ݾ=Ӗ0s *Wݜhj,&Osmp b(k݈fvtT`c|b8gUq?/9s&O+OK'-Up % .3Pdit}b?.f^ʽHy ckEybxt a23T1rxS$8.ӹɷ#~\l**+7ӯWo{b{Ԫd&GcېT ˢuz:7^QI4#!Ć"}Ҥ$GHR؛NzxVvQUǤxL3l7_֨޶ fQ۷AQ)D( ѶJq:Zޑ܊Ub gr9 NǦYk l#xz|ZaC{X5m `QlOb:WS8*E_6$ڴE} ̬>ܒXpڏvÙ<1fyD[r6_N?n qRY>WR>m |f*`󔶏S.79>61+ꈣhc}&t,[/>E_& X$)ߙP+ Ɉ c3c+b8G9\6|4 d8(PDc1V`e,r{LެHZ[%*|X*I<@{{UV@ ;w>9`E>0Ry~P&[hB&2ʺ#޵ @I(YTuFA+}x&[ JQ pM So ~sGV S'MUCȰ"'=`Qka"l58- }f0M|e?ćOdtV۝uesDI^(,n]яDOf"x]w˟aDgH!a[Q "rVY~ 9 |j`J~܄zl>M~O8x@}@D}H% #SLZT _;j֎H?b Z35C1"iZ0ggI{%gSD&%+]x+w;t2M;~+L2KcGe,(6QEƋcrLៈl51=L`e嶽B9Lk<=AI jl»-?<|[Qq-NvN:{ r| x/}3y e\o wM D]ms)Pz }><@W綡k*?ƎW:Jň%FOZLBlc9[B6T?qs>!*OR:rޡFE1k˧gdKm HLdnwƲgw}np1IJ&sW6T$HAqz,qIEÇ5ᦲuZi |t-Oqy e4u0ooI`b W|HTE]mT{AVNac5~m{ ?^aRO|`/ҫJ,-mٓ5qJe: i,]*}jٖ0E<*i+b I[img Az7C avBx KβPZ %+5<&$\mHXiic斆Fߘbo>jOccf\Ld԰%w3yv;,\C47KS2UAn%=pL}BhDnE.,!ꐧ[/`q' sj|ob>2-h[Sͱj_Qz==#2> F-QqNX{S𓒺 0O|}tj4|ôユȟTJS$϶ȴ9;6L Мс:2M$+3Akv0#obђޟ+`&o\\e اYP]7yj(sl]Z/2~V~u"Sd'Yfw݂wWy,|Fz*lQSXB-pĵ#h"FxG\:>~KU/F.U(s)bd^`L* +ZK})teքdvWzZڀ؆ra5 1,Cͩkw&ibWr5ua-N,~+6@6L![ϢG@= BV4wtoʉ%qV &xw@]1&OUy* "%kD!FЩòP H7LMdͩޜ* ag۠]qî}ZpIJp~'BpAqyo~!, g̷{LK~7~\-FF$m1R'ߕ 7b]؛">N4@]}߭#*`A7n.ZQhF}F xz{;V4 ^7DA3>4p^ E;ӷ-r~dP Mꡕ>UY.EAd7{6TD݊oG< `lɒ<) O.9RaTO9KQQr{EW]dzPMUC?Zc1y5u5:c\AEޜ"I)t.$Wjk|x[/ɝ2̜ ?],OF,A0ۤaS%gzH5ٺ CGOfK~j=:H=sV";B.&FZgpY~*yu?-GxSWYߍZo/%v>J"%T%v\~ԥ~D: X`Ըyd`Ӭ䮙[u Bo#O/ G%Aak6)("]7J=ޚ)Bwd笱 XW fmuQZ_q1Z1v`g{!/4!Vt!+.[;J,U<1gT~|,7 p|_bHˆ)>;Y~'Tr'zbaEۏᗂ vOuB&N|kb%/SOxi qO/+.e9e))?w>D i\SlLlcՠk,xc`#MX%gӎWߩ`blWnlrHz7]]rا$Ukv۬mrUxy1qFo!`nٷLD5#ܲ3b &!tsޫk d蹳I-{'+gROK $SGTMW[HڭB_N0Uv#lhIiT -/!mIjfW=y3,m[NX:R 0WHe#ljJM4<x)1R<Ps"4-Z7c / ڻZ]nJ3|"8~eqLwuS24>~q058캮TyZ]W@zaymm}- 1N` DzaEP]J>\Fߜ_ n=IBWͧ>&Uxc+E*҅{:kng~/~@>q8]7Fy{p+\RߌP?5zHnLe׽/[v~vED Re..DO njWI*5tXsu5}[+M2WΑss4J~Qs`>xšݡ#8ɱR1v*d$䮎 l$#ʃܐ<ɼzA:aK3 ~?5LK.L>+33(Kʙ%*VgѣSIV{plYQ l@̛-~oQքтFHh:ocq 2O̷ \֩K)0)Hք2;WN/t4n1/\Pf--'.ůemΤe2?p2_*uN܇wVP 3X(\".`ߡ&}oђTΖ*Z~c,}W Qw#×0K0?gz1=ML q%KS< /e[ydZV!xM%f:#I G{))Īd\7GxK{z{n~KɣvƱ%=O`JwBE#}:Y^C1W+~eKgHᡑ{~:dr-̐GPPP>\ .d!XXUgѡz$,èQ~v!YgEԛ;&)UGƪ~'n!S&S4R!w{0$Ƨ]ٶ0~}%\SHԺ$lO}UI;TT#I1#b"[Ȯ!9dDؽcM-%_p鰉B^s< )>DG=* :yH:"yU>%}.$bok^LR2d4^"̍M7dG}Y}qgXPHyz^}~vbŲE~gbzڦQȺka\c6&ֺ+^k^*cf r!ԡN&"PBƵltqm!ڬlqzbbEݘdt-Zy{OUo=[RV}\ɞ C-+Ohf(&`lxDlT*Cy794.}v'y͏ 8D0 ܁AǷPBbXN b::YJYŽZZJ)slN$IšSBsr|ȩ&ljȚYV2-IR *ffWߗ'c({$x?0wteG<٠02c>]W=bƔ0/b|SM&#y}PJj?{赥's_ulܱM2>O$NkG3SrtкȽ' nq/wX!w@RDnIl2wɜ~ 4s\53Tw@aU_x4~x(c嘭KjC+dSHhtCo/r6s@"MD55J/8}Z_+iMry$swͫ)N2}-|^_8yuv"1Rk/ox[j,8~sDkqË#(X}T̓ѝ2Rn7ZK@-Ð1ď|b*4-,(&Z;Ka?? ֟N.IZeU8S* cutΛ ƞNFЙ\P*Jf}K $Y滰7׎2x0 C]ťVi}l[$KxmJĂ;uZ{zxN& 6cpij`ODm+ޚRl.bE#H}ԅ1G[gٔ?]B_9"W*v3%Or|s[ϖ}XS'}"gQd"qRl5{mD&.^(鏹w$›w ="Rhr}&d1˒=SBQ9=lJ1-??L endstream endobj 86 0 obj << /Type /FontDescriptor /FontName /EZMALJ+URWPalladioL-Roma /Flags 4 /FontBBox [-166 -283 1021 943] /Ascent 715 /CapHeight 680 /Descent -282 /ItalicAngle 0 /StemV 84 /XHeight 469 /CharSet (/comma/e/one/p/period/two/x/zero) /FontFile 85 0 R >> endobj 87 0 obj << /Length1 1620 /Length2 9543 /Length3 0 /Length 10367 /Filter /FlateDecode >> stream xڭVUT\ۖ% ww( SPC4C 5iro~=^W8gZs{CMTg37K۹04Դ&66&f{Ef9 Z l4qfI0`CH;@ .zFFZL@^"!v7-\,s  ԑS(kdv`&6@9NOk,/\by { & 8,L\^fb؁l\b7 ' h r8^%Ong 74i/u19\.rfgK2'_e:C,Y ladfvv~y3 oݛ88@j8mYP9^r\^r[@PXl9;s{;v3W`n`DgпabfogQX]^Ro*D7Ho"OyWjiWe[_A1EK&Z࿫T[ژ8+7ŋ"|,\[!ؼ/b~q` Y>_Y555eEP0?h)ٛ r^{v%_D\+8A<zl,ll쀗? FdogǨؙl2ANN/u_{A(s3 /i.xYCz{ ?ٷ?Ԍ?5B76ж%}(;rhy7Y "rin? 4s:!_SR]9RbۧIعi; b̌A0M#Ot;]TxGܵZ=D(yjbQ=On԰p!dW@gdB\RJz~ЍQ-]\%I6d^Iv {7 x}قBM\5?ՙ,%8.Dry:#OvvMݥf/!6'iWWS;DcCQ6?FaL cdwiQzXeW=4q"yX_iD4%^m`Ypx&G 6o5RDʓ'L #2KB6Ke:2.~^ 2hEe@6[J4м0=FfKL~a5("~]~w+TNw niW0Un<.H^xoPsLn* ]5%-"Z.c% ۅr/% Qa:Ҩ%-_/CBGLЏC3sXj SH3"|VvwUw"ǶJo}ū 3#t*q0}VS/M}U%]ɭ-j,v9hlVAZ\yFɛifsWɢB~'ES+t"%1DuswԛoհmBgY(M_l/^-$ O5 ]Z%!^ZfTV(s7]T"$HiU϶:ʧa1nl5΍Yc14~vbrV [ ?>BI6ш.fl!ٖ1cUSΝV)ښJ%>g $hlʚ39uh1W<#F(W<$[ݍ x'g!e {4:V%1P,oqK:v~QpBtc]Nc.$DJ4 w%}F~ I%{T:/Iq%}j#sE@D{z:t\@>.^sW\XuBc=ggowf+ܢؖ2Ե)+S+3uLkvѶuhYa~xgj|Q\5k~PgleZ~Z)h!PӬXi[V6yy9Oo6yӃcD!WORXb%?$0Ҩ36G6os?~4DROZuBoYY(!ER!ux4G|뤠kηq*oy@ڼ28>ngaݣ%Nu3FIDXb{.K.&G =ߓDhP/wSSp9zO"<6Â]'O57 @-cVNHܻbcd\b4CO;&p.riǝCtcGSds;`8ZF!dE)_,&+aNPƯd)s bщrs5讧ȓ6Kvy}x-a:oͺ޿T:挩eT 4.j.pG'"=2Y%> C)O<կFo*BB_E`G8CmaL1qڗٮ#,vf#ٞ$Jmv2 5^-^5DyPD#8+{Ynw JfQa4lnO])G$3kL煰VwK CtUlB0GZmcfщ1!P|7.J_~,;\͆?5/ B )']KÙsuW|1^TnE-j7O!f'Th>#g˘m܃H{_!QE.D%+=5*o#FLmYm~. [w4a:]4"uK k0BUv1̈́0|{EM0W!j!T0Cd6vMYlՠkDQ'9vfT=\W%+3hrΓdyOAja5Q'SJD^i0*(lt/hyŬC:K&'u TkK2;*ܜ]Y/_2_$eEeqQS΅[^;{Ec8LsR ԷNI5SޯFIn9 ~n<ԻH|96n񫮶o :`65‡x_4AF{_BɡɈ *go6F=m/}Jo0yGthіt9q0(t$=n/)S ]NItV`cIxd7{bߙ ?h;pЋ2XD|2vXrxCwS#'[*/OV-o;0?ۥ!N ~`uҺ:66^de?vҦ6x Z@HDWW2&ד X*g[]lБDp&5ƘAU7КH$Ub޼]t&|B*[d`r%ХK|rr5=%,D~ts5sFTdnerfD2BHζma;rNڙN/Jw3Nb|ֱ]{q7ѭ9ԋ[8ImC?=Iϱ-^Gu&|Z} t)Q^ӽzz=s&RiEv+jZ3Rs FK'-R5O)p+*JcD83/"aW| R\ˇ!9{x0 Dxݞx8 ==ըxf| o q2u4$rC}P*оŤS(jMr q%~Tgip팶5ҐE?v.r$@Hlkm>@7y s43YM # 4LAf¸.z {,>;n1 lF z&À| k1lS rj[?2n6 \XDҺ2,fPmQdYn5mл=ߒ^Gwp)\vp<`g = ;IΥn'Py4᧐&-S80Nr>LB5YYc{-${9I% gƪ[Iak*qI1@:F^YflQFщ Χ4(fCcV9iEVS\aۣraC;v eߨz^~{C(ceKeNL+umzX+5Qc ʇגPF!*}7{҄40bM͑ˑUdukmM >q&WuijRž_J}}oUy`G2~k٩!Fش,cԱ8"1 +W_;gލ[4,KL+4i枡`4xbu9N;-r_vy6瞹*۰[+mL:eC;;%ePizƜmn{<=NIժ!KvQ9fZ8-pyson+.^ʴB }vT"a.U׎fΩ>$\ } 3d:<5B{B02hoݻ߉-]'Opa˓x2-x5j3/VezliK.LבDͯvjN[M<;afb,\y7 F~R&`IE^Y $fmEK`B=-1Ð嗯-F8*8sCs>MFƍ jWr|ULQh$'rhhWtNN[Չg^=݈R ޠ\J\rd,vG(0G6N~6ܐz%c oR#wc/AŒ]v2²P|D9~b{º9M|i2޵CKIzo)vTL7 N3@ac c MJLr\wG(zn3Vö6"ýalt陭Պ$ <}~3@Q&|hM+ǰHx/|$'\캻)OԳM7ƞs)?\NZc s9$ҲGXO5i?gb&$:C WH(yd%G2(2/.8U gtDeھVr˞t6{Zw!H**DpiYq¦dJa.$RV;@'ଞ(`\y'r:!%\gyBrDbM;VɊtƷBu_p+k:"u*k|kwk!o1$ Y zgF4}\00]7ƽmqf95@LrqU?.f)zr3w=* RնO7+'t .iץ\mIQ|!mܯ}uKwط־ll&3y1!oR%H`6n_e֠(r=g>YU<w uECeH-z¼,I9D;P_@W5+ӯqatа)?>v +}U`+; {*LY{Uͳ>_,v"ZtvHx0ptq֣+sFbDKr-H5hcuQR PU4=TH"Րz4Y)w:c%AJ$8ސ m&~[0!_;vm)[2-#W{ ՞"`E}CEMzhUTuWR ]wPJN2"\2vhHɵ! H[b2(- fʊ@3'~'\ I`B#q |H)#tm,ԫ AqͰʹ-xooTǼF#۰ŅDUM6i={00 ,K8HۨLM8˝bFCM(':\1'CZ62ʥXJf8:4nP7.:9땃)y1qĀÝXx#X;D"wHuiM>m so-OR@[~CȴCPV['!s>9$GTwIz0o`#6uy"{%{X&~S0 u/}+D L Ҝ{$X">-[e0jӺy+qBbлb.S/}!m #  _U!,NO?fͮdK~o? JCz?(`?mz8yntVTe^ڐ|*ˊi %@MNTfy]7]B#F~ZAq ?ⱉ$ eE =7*ŧXLYqBMvS<ލl l̬c #yP;ynEe,mٿ3v_NWe8;Јֻ8EF"`~sXyÕd#Ce$!TsGrKhWsfO:`ZI9?oڲ6V+* rX$.o)1K9eqY3ۊ=&_28ԞUE9#a}[`!Agw1t] rd@⊈-+ BA``xM9L V-4 tK ?J6 0hym-n&K{Lۈ7G^{qY* 0Й(l մяȺZfQkqH8n4"L}+{eb-oKRs_+[ISW~bB [51vRT6:H}˵27b.^.nX{־^g)[X7[ Ew!)L0ʪ%"RwF0(m=n3=WKFadl0U]XZ]0˾ǹT/#XL، GcS>ev/:EXR^k7/j!@Y3:UnN[d}` &HdoOa7ﻵT93*3%jC[w}`4jC*yt|[4 =t.c$)TL p~*y߫\+arY9MeP|\{dLD>RWj(Q|."'< {lsZH'.FhT&$ުY1Gs:YpaWCp.S6V'šQO)MSje~ eBi}4ǪRq vTW !xVbVqKΜ󞚰% !-ձ5+iS# Au&%aؚ;疜F06نŴ9F ySD LS_PMܽ1'i|Vrp}P$:戇;x-0Sx٫)]%5cyI+HQ厄7eCz0.[^3HOgoG"Y~;Nt6se0L N*4 #&1?vL"c AEp1 tCA|]6u'Q?`xGq+t\vA,oT3؛6q-뮣a5oS! ȇb {o*Z> endobj 45 0 obj << /Type /Encoding /Differences [44/comma 46/period 48/zero/one/two 68/D/E 72/H 78/N 80/P 84/T/U/V 88/X/Y 97/a/b 100/d/e/f 105/i/j 110/n 112/p 114/r 120/x/y/z] >> endobj 14 0 obj << /Type /Font /Subtype /Type1 /BaseFont /ESYHOE+CMB10 /FontDescriptor 54 0 R /FirstChar 109 /LastChar 118 /Widths 41 0 R >> endobj 17 0 obj << /Type /Font /Subtype /Type1 /BaseFont /ZOHZIA+CMBX12 /FontDescriptor 56 0 R /FirstChar 49 /LastChar 120 /Widths 38 0 R >> endobj 13 0 obj << /Type /Font /Subtype /Type1 /BaseFont /BKFEXL+CMEX10 /FontDescriptor 58 0 R /FirstChar 16 /LastChar 111 /Widths 42 0 R >> endobj 7 0 obj << /Type /Font /Subtype /Type1 /BaseFont /DWCCGG+CMR10 /FontDescriptor 60 0 R /FirstChar 11 /LastChar 123 /Widths 49 0 R >> endobj 5 0 obj << /Type /Font /Subtype /Type1 /BaseFont /TKUXEF+CMR12 /FontDescriptor 62 0 R /FirstChar 44 /LastChar 117 /Widths 51 0 R >> endobj 4 0 obj << /Type /Font /Subtype /Type1 /BaseFont /FWHOTG+CMR17 /FontDescriptor 64 0 R /FirstChar 68 /LastChar 118 /Widths 52 0 R >> endobj 18 0 obj << /Type /Font /Subtype /Type1 /BaseFont /KUMESX+CMR8 /FontDescriptor 66 0 R /FirstChar 12 /LastChar 119 /Widths 37 0 R >> endobj 16 0 obj << /Type /Font /Subtype /Type1 /BaseFont /EAXHAV+CMSS10 /FontDescriptor 68 0 R /FirstChar 82 /LastChar 82 /Widths 39 0 R >> endobj 6 0 obj << /Type /Font /Subtype /Type1 /BaseFont /TQHDZA+CMSY10 /FontDescriptor 70 0 R /FirstChar 0 /LastChar 106 /Widths 50 0 R >> endobj 29 0 obj << /Type /Font /Subtype /Type1 /BaseFont /ITITLT+CMTI10 /FontDescriptor 72 0 R /FirstChar 39 /LastChar 121 /Widths 30 0 R >> endobj 15 0 obj << /Type /Font /Subtype /Type1 /BaseFont /ERBNXF+CMTT10 /FontDescriptor 74 0 R /FirstChar 40 /LastChar 118 /Widths 40 0 R >> endobj 12 0 obj << /Type /Font /Subtype /Type1 /BaseFont /GJUYUR+PazoMath-Bold /FontDescriptor 76 0 R /FirstChar 83 /LastChar 83 /Widths 43 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /BaseFont /JZSEYD+PazoMath-BoldItalic /FontDescriptor 78 0 R /FirstChar 97 /LastChar 109 /Widths 44 0 R >> endobj 24 0 obj << /Type /Font /Subtype /Type1 /BaseFont /DCWRCI+PazoMath /FontDescriptor 80 0 R /FirstChar 70 /LastChar 70 /Widths 32 0 R >> endobj 25 0 obj << /Type /Font /Subtype /Type1 /BaseFont /PMIEOT+PazoMath-Italic /FontDescriptor 82 0 R /FirstChar 109 /LastChar 115 /Widths 31 0 R >> endobj 8 0 obj << /Type /Font /Subtype /Type1 /BaseFont /JQUIKX+URWPalladioL-BoldItal /FontDescriptor 84 0 R /FirstChar 68 /LastChar 122 /Widths 48 0 R /Encoding 45 0 R >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /BaseFont /EZMALJ+URWPalladioL-Roma /FontDescriptor 86 0 R /FirstChar 44 /LastChar 120 /Widths 46 0 R /Encoding 45 0 R >> endobj 9 0 obj << /Type /Font /Subtype /Type1 /BaseFont /VBSVVH+URWPalladioL-Ital /FontDescriptor 88 0 R /FirstChar 69 /LastChar 121 /Widths 47 0 R /Encoding 45 0 R >> endobj 19 0 obj << /Type /Pages /Count 3 /Kids [2 0 R 21 0 R 27 0 R] >> endobj 89 0 obj << /Type /Catalog /Pages 19 0 R >> endobj 90 0 obj << /Producer (MiKTeX pdfTeX-1.40.15) /Creator (TeX) /CreationDate (D:20150827141901+02'00') /ModDate (D:20150827141901+02'00') /Trapped /False /PTEX.Fullbanner (This is MiKTeX-pdfTeX 2.9.5496 (1.40.15)) >> endobj xref 0 91 0000000000 65535 f 0000003719 00000 n 0000003607 00000 n 0000000015 00000 n 0000192952 00000 n 0000192813 00000 n 0000193370 00000 n 0000192674 00000 n 0000194385 00000 n 0000194726 00000 n 0000194557 00000 n 0000193938 00000 n 0000193791 00000 n 0000192533 00000 n 0000192251 00000 n 0000193650 00000 n 0000193230 00000 n 0000192392 00000 n 0000193091 00000 n 0000194894 00000 n 0000008756 00000 n 0000008641 00000 n 0000003961 00000 n 0000014261 00000 n 0000194092 00000 n 0000194234 00000 n 0000013286 00000 n 0000013171 00000 n 0000008962 00000 n 0000193509 00000 n 0000013468 00000 n 0000013966 00000 n 0000014010 00000 n 0000014032 00000 n 0000014507 00000 n 0000014532 00000 n 0000014594 00000 n 0000014629 00000 n 0000015295 00000 n 0000015719 00000 n 0000015743 00000 n 0000016077 00000 n 0000016153 00000 n 0000016747 00000 n 0000016769 00000 n 0000192072 00000 n 0000016839 00000 n 0000017166 00000 n 0000017396 00000 n 0000017635 00000 n 0000018261 00000 n 0000018880 00000 n 0000019285 00000 n 0000019605 00000 n 0000027563 00000 n 0000027791 00000 n 0000039569 00000 n 0000039844 00000 n 0000048219 00000 n 0000048562 00000 n 0000071551 00000 n 0000072025 00000 n 0000082841 00000 n 0000083115 00000 n 0000094384 00000 n 0000094646 00000 n 0000104242 00000 n 0000104489 00000 n 0000111500 00000 n 0000111717 00000 n 0000119701 00000 n 0000119975 00000 n 0000135454 00000 n 0000135780 00000 n 0000147879 00000 n 0000148182 00000 n 0000150998 00000 n 0000151218 00000 n 0000155584 00000 n 0000155823 00000 n 0000158284 00000 n 0000158496 00000 n 0000162209 00000 n 0000162439 00000 n 0000170525 00000 n 0000170775 00000 n 0000181061 00000 n 0000181321 00000 n 0000191807 00000 n 0000194966 00000 n 0000195017 00000 n trailer << /Size 91 /Root 89 0 R /Info 90 0 R /ID [<5E5784700D8F938132CF2E0ABEA675E0> <5E5784700D8F938132CF2E0ABEA675E0>] >> startxref 195239 %%EOF tmvtnorm/inst/doc/GibbsSampler.Rnw0000644000176200001440000002337412567600065016744 0ustar liggesusers%\VignetteIndexEntry{A short description of the Gibbs Sampler} \documentclass[a4paper]{article} \usepackage{Rd} \usepackage{amsmath} \usepackage{natbib} \usepackage{palatino,mathpazo} \usepackage{Sweave} %\newcommand{\pkg}[1]{\textbf{#1}} \newcommand{\vecb}[1]{\ensuremath{\boldsymbol{\mathbf{#1}}}} \def\bfx{\mbox{\boldmath $x$}} \def\bfy{\mbox{\boldmath $y$}} \def\bfz{\mbox{\boldmath $z$}} \def\bfalpha{\mbox{\boldmath $\alpha$}} \def\bfbeta{\mbox{\boldmath $\beta$}} \def\bfmu{\mbox{\boldmath $\mu$}} \def\bfa{\mbox{\boldmath $a$}} \def\bfb{\mbox{\boldmath $b$}} \def\bfu{\mbox{\boldmath $u$}} \def\bfSigma{\mbox{\boldmath $\Sigma$}} \def\bfD{\mbox{\boldmath $D$}} \def\bfH{\mbox{\boldmath $H$}} \def\bfT{\mbox{\boldmath $T$}} \def\bfX{\mbox{\boldmath $X$}} \def\bfY{\mbox{\boldmath $X$}} \title{Gibbs Sampler for the Truncated Multivariate Normal Distribution} \author{Stefan Wilhelm\thanks{wilhelm@financial.com}} \begin{document} \maketitle In this note we describe two ways of generating random variables with the Gibbs sampling approach for a truncated multivariate normal variable $\bfx$, whose density function can be expressed as: \begin{eqnarray*} f(\bfx,\bfmu,\bfSigma,\bfa,\bfb) & = & \frac{ \exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\}} } { \int_{\bfa}^{\bfb}{\exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\} } d\bfx } } \end{eqnarray*} for $\bfa \le \bfx \le \bfb$ and $0$ otherwise.\\ \par The first approach, as described by \cite{Kotecha1999}, uses the covariance matrix $\bfSigma$ and has been implemented in the R package \pkg{tmvtnorm} since version 0.9 (\cite{tmvtnorm-0.9}). The second way is based on the works of \cite{Geweke1991,Geweke2005} and uses the precision matrix $\bfH = \bfSigma^{-1}$. As will be shown below, the usage of the precision matrix offers some computational advantages, since it does not involve matrix inversions and is therefore favorable in higher dimensions and settings where the precision matrix is readily available. Applications are for example the analysis of spatial data, such as from telecommunications or social networks.\\ \par Both versions of the Gibbs sampler can also be used for general linear constraints $\bfa \le \bfD \bfx \le \bfb$, what we will show in the last section. The function \code{rtmvnorm()} in the package \pkg{tmvtnorm} contains the \R{} implementation of the methods described in this note (\cite{tmvtnorm-1.3}). \section{Gibbs Sampler with convariance matrix $\bfSigma$} We describe here a Gibbs sampler for sampling from a truncated multinormal distribution as proposed by \cite{Kotecha1999}. It uses the fact that conditional distributions are truncated normal again. Kotecha use full conditionals $f(x_i | x_{-i}) = f(x_i | x_1,\ldots,x_{i-1},x_{i+1},\ldots,x_{d})$.\\ \par We use the fact that the conditional density of a multivariate normal distribution is multivariate normal again. We cite \cite{Geweke2005}, p.171 for the following theorem on the Conditional Multivariate Normal Distribution.\\ Let $\bfz = \left( \begin{array}{c} \bfx \\ \bfy \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_x \\ \bfmu_y \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{xx} & \bfSigma_{xy} \\ \bfSigma_{yx} & \bfSigma_{yy} \end{array} \right]$\\ Denote the corresponding precision matrix \begin{equation} \bfH = \bfSigma^{-1} = \left[ \begin{array}{cc} \bfH_{xx} & \bfH_{xy} \\ \bfH_{yx} & \bfH_{yy} \end{array} \right] \end{equation} Then the distribution of $\bfy$ conditional on $\bfx$ is normal with variance \begin{equation} \bfSigma_{y.x} = \bfSigma_{yy} - \bfSigma_{yx} \bfSigma_{xx}^{-1} \bfSigma_{xy} = \bfH_{yy}^{-1} \end{equation} and mean \begin{equation} \bfmu_{y.x} = \bfmu_{y} + \bfSigma_{yx} \bfSigma_{xx}^{-1} (\bfx - \bfmu_x) = \bfmu_y - \bfH_{yy}^{-1} \bfH_{yx}(\bfx - \bfmu_x) \end{equation} \par In the case of the full conditionals $f(x_i | x_{-i})$, which we will denote as $i.-i$ this results in the following formulas: $\bfz = \left( \begin{array}{c} \bfx_i \\ \bfx_{-i} \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_i \\ \bfmu_{-i} \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{ii} & \bfSigma_{i,-i} \\ \bfSigma_{-i,i} & \bfSigma_{-i,-i} \end{array} \right]$ Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfSigma_{ii} - \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} \bfSigma_{-i,i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_{i} + \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} (\bfx_{-i} - \bfmu_{-i}) = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} We can then construct a Markov chain which continously draws from $f(x_i | x_{-i})$ subject to $a_i \le x_i \le b_i$. Let $\bfx^{(j)}$ denote the sample drawn at the $j$-th MCMC iteration. The steps of the Gibbs sampler for generating $N$ samples $\bfx^{(1)},\ldots,\bfx^{(N)}$ are: \begin{itemize} \item Since the conditional variance $\bfSigma_{i.-i}$ is independent from the actual realisation $\bfx^{(j)}_{-i}$, we can well precalculate it before running the Markov chain. \item Choose a start value $\bfx^{(0)}$ of the chain. \item In each round $j=1,\ldots,N$ we go from $i=1,\ldots,d$ and sample from the conditional density $x^{(j)}_i | x^{(j)}_1,\ldots,x^{(j)}_{i-1},x^{(j-1)}_{i+1},\ldots,x^{(j-1)}_{d}$. \item Draw a uniform random variate $U \sim Uni(0, 1)$. This is where our approach slightly differs from \cite{Kotecha1999}. They draw a normal variate $y$ and then apply $\Phi(y)$, which is basically uniform. \item We draw from univariate conditional normal distributions with mean $\mu$ and variance $\sigma^2$. See for example \cite{Greene2003} or \cite{Griffiths2004} for a transformation between a univariate normal random $y \sim N(\mu,\sigma^2)$ and a univariate truncated normal variate $x \sim TN(\mu,\sigma^2, a, b)$. For each realisation $y$ we can find a $x$ such as $P(Y \le y) = P(X \le x)$: \begin{equation*} \frac{ \Phi \left( \frac{x - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } { \Phi \left( \frac{b - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } = \Phi \left( \frac{y - \mu}{\sigma} \right) = U \end{equation*} \item Draw $\bfx_{i.-i}$ from conditional univariate truncated normal distribution \\ $TN(\bfmu_{i.-i}, \bfSigma_{i.-i}, a_i, b_i)$ by \begin{equation} \begin{split} \bfx_{i.-i} & = \bfmu_{i.-i} + \\ & \sigma_{i.-i} \Phi^{-1} \left[ U \left( \Phi \left( \frac{b_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) - \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right) + \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right] \end{split} \end{equation} \end{itemize} \section{Gibbs Sampler with precision matrix H} The Gibbs Sampler stated in terms of the precision matrix $\bfH = \bfSigma^{-1}$ instead of the covariance matrix $\bfSigma$ is much easier to write and to implement: Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} Most importantly, if the precision matrix $\bfH$ is known, the Gibbs sampler does only involve matrix inversions of $\bfH_{ii}$ which in our case is a diagonal element/scalar. Hence, from the computational and performance perspective, especially in high dimensions, using $\bfH$ rather than $\bfSigma$ is preferable. When using $\bfSigma$ in $d$ dimensions, we have to solve for $d$ $(d-1) \times (d-1)$ matrices $\bfSigma_{-i,-i}$, $i=1,\ldots,d$, which can be quite substantial computations. \section{Gibbs Sampler for linear constraints} In this section we present the Gibbs sampling for general linear constraints based on \cite{Geweke1991}. We want to sample from $\bfx \sim N(\bfmu, \bfSigma)$ subject to linear constraints $\bfa \le \bfD \bfx \le \bfb$ for a full-rank matrix $\bfD$.\\ Defining \begin{equation} \bfz = \bfD \bfx - \bfD \bfmu, \end{equation} we have $E[\bfz] = \bfD E[\bfx] - \bfD \bfmu = 0$ and $Var[\bfz] = \bfD Var[\bfx] \bfD' = \bfD \bfSigma \bfD'$. Hence, this problem can be transformed to the rectangular case $\bfalpha \le \bfz \le \bfbeta$ with $\bfalpha = \bfa - \bfD \bfmu$ and $\bfbeta = \bfb - \bfD \bfmu$. It follows $\bfz \sim N(0, \bfT)$ with $\bfT = \bfD \bfSigma \bfD'$.\\ In the precision matrix case, the corresponding precision matrix of the transformed problem will be $\bfT^{-1} = ( \bfD \bfSigma \bfD' )^{-1} = \bfD'^{-1} \bfH \bfD^{-1}$. We can then sample from $\bfz$ the way described in the previous sections (either with covariance or precision matrix approach) and then transform $\bfz$ back to $\bfx$ by \begin{equation} \bfx = \bfmu + \bfD^{-1} \bfz \end{equation} \bibliographystyle{plainnat} \bibliography{tmvtnorm} \end{document}tmvtnorm/src/0000755000176200001440000000000012567600065012736 5ustar liggesuserstmvtnorm/src/Fortran2CWrapper.c0000644000176200001440000000105012567600065016237 0ustar liggesusers#include #include #include void F77_SUB(rndstart)(void) { GetRNGstate(); } void F77_SUB(rndend)(void) { PutRNGstate(); } double F77_SUB(normrnd)(void) { return norm_rand(); } double F77_SUB(unifrnd)(void) { return unif_rand(); } double F77_SUB(pnormr)(double *x, double *mu, double *sigma, int *lower_tail, int *log_p) { return pnorm(*x, *mu, *sigma, *lower_tail, *log_p); } double F77_SUB(qnormr)(double *p, double *mu, double *sigma, int *lower_tail, int *log_p) { return qnorm(*p, *mu, *sigma, *lower_tail, *log_p); } tmvtnorm/src/Makevars0000644000176200001440000000013312567600065014427 0ustar liggesusersPKG_LIBS=$(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) all: $(SHLIB) rtmvnormgibbs.o: linked_list.o tmvtnorm/src/linked_list.f900000644000176200001440000000331712567600060015556 0ustar liggesusersmodule linked_list implicit none ! type matrix row, holds a pointer to the root element of the linked list type matrixrow type(node),pointer :: first ! pointer to first node in linked list type(node),pointer :: last ! pointer to last node in linked list end type matrixrow ! matrix element for sparse matrix elements H[i,j]=v type matrixelem integer :: i, j double precision :: v end type matrixelem ! define a linked list of matrix elements type node type(matrixelem) data ! data type(node),pointer::next ! pointer to the ! next element end type node CONTAINS ! insert the new matrix element H[i,j]=v to the linked list of row "i" subroutine insert_list_element(row, newelem) type(matrixrow) :: row type(matrixelem) :: newelem if (.not. associated(row%first)) then allocate(row%first) nullify(row%first%next) row%first%data = newelem row%last => row%first !print *,"added element to linked list i=",newelem%i," j=",newelem%j," v=",newelem%v else allocate(row%last%next) nullify(row%last%next%next) row%last%next%data = newelem row%last => row%last%next !print *,"added element to linked list i=",newelem%i," j=",newelem%j," v=",newelem%v endif end subroutine ! remove all elements of the linked list and free memory subroutine free_all(row) implicit none type(matrixrow) :: row type(node), pointer :: tmp do tmp => row%first if (associated(tmp) .eqv. .FALSE.) exit row%first => row%first%next deallocate(tmp) end do end subroutine free_all end module linked_list tmvtnorm/src/rtmvnormgibbs.f900000644000176200001440000006274712567600060016164 0ustar liggesusers! Gibbs sampling from a truncated multinormal distribution ! ! References ! 1. Kotecha et al. (1999): ! Kotecha, J. H. & Djuric, P. M. ! "Gibbs sampling approach for generation of truncated multivariate Gaussian random variables", ! IEEE Computer Society, IEEE Computer Society, 1999, 1757-1760 ! ! 2. Geweke (2005): Contemporary Bayesian Econometrics and ! Statistics. John Wiley and Sons, 2005, pp. 171-172 ! ! ! Code written by Stefan Wilhelm as part of the R package tmvtnorm. ! (http://CRAN.R-project.org/package=tmvtnorm) ! ! To cite package tmvtnorm in publications use: ! ! Stefan Wilhelm, Manjunath B G (2012). tmvtnorm: Truncated ! Multivariate Normal Distribution. R package version 1.4-5. ! ! A BibTeX entry for LaTeX users is ! ! @Manual{, ! title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, ! author = {Stefan Wilhelm and Manjunath B G}, ! year = {2012}, ! note = {R package version 1.4-5}, ! url = {http://CRAN.R-project.org/package=tmvtnorm}, ! } ! ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param sigma covariance matrix (d x d) ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbscov(n, d, mean, sigma, lower, upper, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, i, j, k, l, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! Kovarianzmatrix sigma und Partitionen Sigma_i, sigma_ii und S double precision, dimension(d, d) :: sigma double precision, dimension(d, d-1) :: Sigma_i double precision :: sigma_ii double precision, dimension(d-1,d-1) :: S ! S_inv (d-1 x d-1) ist die Inverse von S double precision, dimension(d-1,d-1) :: S_inv ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix double precision, dimension(d, d-1) :: P ! Deklarationen frs Matrix-Invertieren mit LAPACK-Routinen (Dimension d-1) double precision, dimension( d-1 ) :: work ! ipiv = pivot indices integer, dimension( d-1 ) :: ipiv ! lda = leading dimension integer :: m, lda, lwork, info ! initialise R random number generator call rndstart() m =d-1 lda =d-1 lwork=d-1 ind = 0 ! Partitioning of sigma ! sigma = [ sigma_ii Sigma_i ] ! (d x d) [ (1 x 1) (1 x d-1) ] ! [ Sigma_i' S ] ! [ (d-1 x 1) (d-1 x d-1) ] ! List of conditional variances sd(i) can be precalculated do i = 1,d ! subindex "-i" minus_i = (/ (j, j=1,i-1), (j, j=i+1,d) /) S = sigma(minus_i, minus_i) ! Sigma_{-i,-i} : (d-1) x (d-1) sigma_ii = sigma(i,i) ! Sigma_{i,i} : 1 x 1 Sigma_i(i,:) = sigma(i, minus_i) ! Sigma_{i,-i} : 1 x (d-1) ! Matrix S --> S_inv umkopieren do k=1,(d-1) do l=1,(d-1) S_inv(k,l)=S(k,l) end do end do ! Matrix invertieren ! LU-Faktorisierung (Dreieckszerlegung) der Matrix S_inv call dgetrf( m, m, S_inv, lda, ipiv, info ) ! Inverse der LU-faktorisierten Matrix S_inv call dgetri( m, S_inv, lda, ipiv, work, lwork, info ) P(i,:) = pack(matmul(Sigma_i(i,:), S_inv), .TRUE.) ! (1 x d-1) %*% (d-1 x d-1) = (1 x d-1) s2 = 0 do j = 1,d-1 s2 = s2 + P(i,j) * Sigma_i(i,j) end do sd(i) = sqrt(sigma(i,i) - s2) ! (1 x d-1) * (d-1 x 1) --> sd[[i]] ist (1,1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! Berechnung von bedingtem Erwartungswert und bedingter Varianz: ! bedingte Varianz hngt nicht von x[-i] ab! ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! mu_i = mean(i) + P[[i]] %*% (x(-i) - mean(-i)) s3(1:(d-1))= xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i,k) * s3(k) end do mu_i = mean(i) + s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur fr j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbscov ! Gibbs sampling with p general linear constraints a <= Cx <= b ! with r >= d linear constraints. C is (r x d), x (d x 1), a,b (r x 1). ! x0 must satisfy the constraints a <= C x0 <= b. ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param r number of linear constraints ! @param mean mean vector of dimension d (d x 1) ! @param sigma covariance matrix (d x d) ! @param C matrix for linear constraints (r x d) ! @param a lower bound for linear constraints (r x 1) ! @param b upper bound for linear constraints (r x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbscov2(n, d, r, mean, sigma, C, a, b, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, r, i, j, k = 1, l, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, sd double precision, dimension(r) :: a, b double precision, dimension(r, d) :: C double precision :: bound1, bound2, lower, upper ! Kovarianzmatrix sigma und Partitionen Sigma_i, sigma_ii und S double precision, dimension(d, d) :: sigma double precision, dimension(d, d-1) :: Sigma_12 double precision :: Sigma_11 double precision, dimension(d-1,d-1) :: Sigma_22 ! Sigma_22_inv (d-1 x d-1) ist die Inverse von Sigma_22 double precision, dimension(d-1,d-1) :: Sigma_22_inv ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix double precision, dimension(d, d-1) :: P ! Deklarationen frs Matrix-Invertieren mit LAPACK-Routinen (Dimension d-1) double precision, dimension( d-1 ) :: work ! ipiv = pivot indices integer, dimension( d-1 ) :: ipiv ! lda = leading dimension integer :: m, lda, lwork, info INTEGER, DIMENSION(1) :: seed seed(1) = 12345 ! initialise R random number generator call rndstart() !CALL RANDOM_SEED !CALL RANDOM_SEED (SIZE=K) ! Sets K = N !CALL RANDOM_SEED (PUT = SEED (1:K)) ! Uses the starting value ! ! given by the user m =d-1 lda =d-1 lwork=d-1 ind = 0 ! Partitioning of sigma ! sigma = [ Sigma_11 Sigma_12 ] = [ Sigma_{i,i} Sigma_{i,-i} ] ! (d x d) [ ] [ (1 x 1) (1 x d-1) ] ! [ Sigma_21 Sigma_22 ] [ Sigma_{-i,i} Sigma_{-i,-i}] ! [ ] [ (d-1 x 1) (d-1 x d-1) ] ! List of conditional variances sd(i) can be precalculated do i = 1,d ! subindex "-i" minus_i = (/ (j, j=1,i-1), (j, j=i+1,d) /) Sigma_22 = sigma(minus_i, minus_i) ! Sigma_{-i,-i} : (d-1) x (d-1) Sigma_11 = sigma(i,i) ! Sigma_{i,i} : 1 x 1 Sigma_12(i,:) = sigma(i, minus_i) ! Sigma_{i,-i} : 1 x (d-1) ! Matrix Sigma_22 --> Sigma_22_inv umkopieren do k=1,(d-1) do l=1,(d-1) Sigma_22_inv(k,l) = Sigma_22(k,l) end do end do ! Matrix invertieren ! LU-Faktorisierung (Dreieckszerlegung) der Matrix S_inv call dgetrf( m, m, Sigma_22_inv, lda, ipiv, info ) ! Inverse der LU-faktorisierten Matrix S_inv call dgetri( m, Sigma_22_inv, lda, ipiv, work, lwork, info ) P(i,:) = pack(matmul(Sigma_12(i,:), Sigma_22_inv), .TRUE.) ! (1 x d-1) %*% (d-1 x d-1) = (1 x d-1) s2 = 0 do j = 1,d-1 s2 = s2 + P(i,j) * Sigma_12(i,j) end do sd(i) = sqrt(sigma(i,i) - s2) ! (1 x d-1) * (d-1 x 1) --> sd[[i]] ist (1,1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d !print '("i=",I3)',i ! Berechnung von bedingtem Erwartungswert und bedingter Varianz: ! bedingte Varianz hngt nicht von x[-i] ab! ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! mu_i = mean(i) + P[[i]] %*% (x(-i) - mean(-i)) s3(1:(d-1))= xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i,k) * s3(k) end do mu_i = mean(i) + s2 ! TODO: Set to -Inf/+Inf lower = -1000.0d0 upper = 1000d0 ! Determine lower bounds for x[i] using all linear constraints relevant for x[i] do k = 1,r if (C(k,i) == 0 ) then CYCLE end if s2 = dot_product(C(k,minus_i), xr(minus_i)) bound1 = (a(k)- s2) /C(k, i) bound2 = (b(k)- s2) /C(k, i) if (C(k, i) > 0) then lower = max(lower, bound1) upper = min(upper, bound2) else lower = max(lower, bound2) upper = min(upper, bound1) end if end do !print '("mu_i = ",f6.3)', mu_i !print '("sd(i) = ",f6.3)', sd(i) !print '("lower = ",f6.3)', lower !print '("upper = ",f6.3)',upper Fa = pnormr(lower, mu_i, sd(i), 1, 0) Fb = pnormr(upper, mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q !print '("xr(i)=",f6.3)',xr(i) ! Nur fr j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbscov2 ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param H precision matrix (d x d) ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnormgibbsprec(n, d, mean, H, lower, upper, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, i, j, k, ind = 0, burnin, thinning ! subindex "-i" integer, dimension(d-1) :: minus_i double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d, d) :: H ! Liste von d mal 1 x (d-1) Matrizen = d x (d-1) Matrix als H[i, -i] double precision, dimension(d, d-1) :: P double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d-1) :: s3 double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! initialise R random number generator call rndstart() ! initialise Fortran random number generator ! CALL RANDOM_SEED ! SW: I do not know why, but we have to reset ind each time!!! ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do i = 1,d minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) H_inv_ii(i) = (1.0d0 / H(i, i)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) P(i,:) = H(i, minus_i) ! 1 x (d-1) end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! subindex "-i" minus_i = (/ (k, k=1,i-1), (k, k=i+1,d) /) ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) ! mu_i <- mean[i] (1 / H[i,i]) * H[i,-i] %*% (x[-i] - mean[-i]) s3(1:(d-1)) = xr(minus_i) - mean(minus_i) s2 = 0 do k = 1,d-1 s2 = s2 + P(i, k) * s3(k) end do mu_i = mean(i) - H_inv_ii(i) * s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur fr j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) !call intpr("ind=", 4, ind, 1) !call dblepr("X(ind)=", 7, X(ind), 1) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnormgibbsprec ! populate map (row --> linked list of matrix elements) for with all entries in Hi, Hj and Hv ! if upper_triangular is TRUE, then we assume that only matrix elements with Hi <= Hj are given and we will ! put two elements in the (Hi,Hj,Hv) and (Hj,Hi,Hv) to the list for all Hi <= Hj subroutine populate_map(map, Hi, Hj, Hv, num_nonzero, d, upper_triangular) use linked_list integer :: num_nonzero, d integer, dimension(num_nonzero) :: Hi, Hj double precision, dimension(num_nonzero) :: Hv type(matrixrow), dimension(d), INTENT(INOUT) :: map type(matrixelem) :: newelem integer :: i, k logical :: upper_triangular !allocate(map(d)) ! and allocate our map do i=1,d nullify(map(i)%first) ! "zero out" our list nullify(map(i)%last) enddo ! populate map for with all entries in Hi, Hj and Hv do k=1,num_nonzero i = Hi(k) if (upper_triangular) then !if only upper triangular elements (i,j,v) with (i <= j) are given, !insert element (i, j, v) and (j, i, v) fr i <> j if (Hi(k) <= Hj(k)) then ! (i, j, v) element newelem%i = Hi(k) newelem%j = Hj(k) newelem%v = Hv(k) call insert_list_element(map(Hi(k)), newelem) end if if (Hi(k) < Hj(k)) then ! (j, i, v) element newelem%i = Hj(k) newelem%j = Hi(k) newelem%v = Hv(k) call insert_list_element(map(Hj(k)), newelem) end if else ! insert all elements given by (Hi, Hj, Hv) newelem%i = Hi(k) newelem%j = Hj(k) newelem%v = Hv(k) call insert_list_element(map(i), newelem) end if enddo end subroutine ! Gibbs sampling of the truncated multivariate normal distribution using a sparse matrix representation of the precision matrix H, ! represented in triplet form ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param Hi,Hj,Hv are the nonzero elements of the precision matrix H (d, d): H(i, j)=v, each a vector having the same length num_nonzero ! @param num_nonzero number of nonzero elements of the precision matrix H ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnorm_sparse_triplet(n, d, mean, Hi, Hj, Hv, num_nonzero, lower, upper, x0, burnin, thinning, X) use linked_list IMPLICIT NONE integer :: n, d, i, j, k, ind = 0, burnin, thinning, num_nonzero ! matrix representation of concentration matrix H integer, dimension(num_nonzero) :: Hi, Hj double precision, dimension(num_nonzero) :: Hv double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! in this map we store for every row i the non-zero entries (triplets) as a linked list of matrix elements ! example: i=1 --> (i=1,j=1,v=0.8), (i=1,j=2,v=0.2), (i=1,j=5,v=0.3) etc. ! The list will not be sorted ascending in j, so we can only iterate this list... type(matrixrow), dimension(d) :: map type(matrixelem) :: elem type( node ), pointer :: current ! initialise R random number generator call rndstart() ! initialise Fortran random number generator !CALL RANDOM_SEED ! We have to reset ind each time ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! loop through all elements and look for diagonal elements H[i,i], calculate conditional standard deviations sd(i | -i) ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do k=1,num_nonzero i = Hi(k) j = Hj(k) if (i == j) then H_inv_ii(i) = (1.0d0 / Hv(k)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) end if end do ! populate map with linked lists of matrix elements H[i,j]=v and symmetric element H[j,i]=v call populate_map(map, Hi, Hj, Hv, num_nonzero, d, .TRUE.) ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! s2 will represent the term H[i,-i] (x[-i] - mean[-i]) s2 = 0 ! We avoid some n x d x d accesses to hash matrix H even for those elements that are zero... ! For n=30 and d=5000 this results in 30 x 5000 x 5000 = 75 million accesses to matrix H... ! Instead of iterating all (d-1) elements H[i,-i] we only iterate all m (m < d) NON-ZERO elements H[i,-i] which will dramatically reduce the number ! of hashtable accesses. This will scale as n x d x m and will be linear in d for a fixed m. current => map(i)%first do while (associated(current)) elem = current%data ! sum only non-zero H[i,-i] elements in H[i,-i] (x[-i] - mean[-i]) ! no summing for i = j elements! if (elem%j .ne. elem%i) then k = elem%j s2 = s2 + elem%v * (xr(k) - mean(k)) !TODO check end if current => current%next end do ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) ! we only loop through all non-zero elements in H[i,-i] = all indices j .ne. i in sparse matrix representation H[i,j]=v mu_i = mean(i) - H_inv_ii(i) * s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Nur fr j > burnin samples aufzeichnen, Default ist thinning = 1 ! bei Thinning nur jedes x-te Element nehmen if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) end if end do end do ! deallocate linked list at the end of the program and free memory do i=1,d call free_all(map(i)) nullify(map(i)%first) ! "zero out" our list nullify(map(i)%last) enddo nullify(current) ! reset R random number generator call rndend() end subroutine rtmvnorm_sparse_triplet ! Gibbs sampling of the truncated multivariate normal distribution using a sparse matrix representation of the precision matrix H (d x d). ! ! Instead of using a triplet representation H(i,j)=v, we use the compressed sparse column (csc) format with 3 vectors ! Hi : integer vector of row index, length num_nonzero; starting from zero ! Hp : integer vector of pointers, length d + 1; starting from zero; non-decreasing vector ! Hv : double vector of values, length num_nonzero ! ! This format is good at accessing all non-zero elements in one column j ! (and -as in our case- for symmetric matrices also to acess all elements in one row i) ! ! To access an element H(i,j), the following steps are necessary ! j ! v = Hv(Hp(j):Hp(j+1)) ! i = Hi(Hp(j):Hp(j+1)) ! ! @param n number of random sample to generate by Gibbs sampling ! @param d dimension (d >= 2) ! @param mean mean vector of dimension d (d x 1) ! @param Hi,Hp,Hv are the nonzero elements of the precision matrix H (d, d): H(i, j)=v, each a vector having the same length num_nonzero ! @param num_nonzero number of nonzero elements of the precision matrix H ! @param lower lower truncation points (d x 1) ! @param upper upper truncation points (d x 1) ! @param x0 Startvektor (d x 1) ! @param burnin Number of Burn-in samples to be discarded ! @param thinning thinning factor for thinning the Markov chain ! @return return value X --> vektor (n * d) --> can be coerced into a (n x d) matrix subroutine rtmvnorm_sparse_csc(n, d, mean, Hi, Hp, Hv, num_nonzero, lower, upper, x0, burnin, thinning, X) IMPLICIT NONE integer :: n, d, i, j, k, r, ind = 0, burnin, thinning, num_nonzero ! compressed sparse column (csc) matrix representation of concentration matrix H integer, dimension(num_nonzero) :: Hi integer, dimension(d+1) :: Hp double precision, dimension(num_nonzero) :: Hv double precision :: unifrnd, qnormr, pnormr, u, q, prob, Fa, Fb, mu_i, s2 double precision, dimension(d) :: H_inv_ii double precision, dimension(n*d), INTENT(INOUT) :: X double precision, dimension(d) :: x0, xr, mean, lower, upper, sd ! initialise R random number generator call rndstart() ! initialise Fortran random number generator !CALL RANDOM_SEED ! SW: I do not know why, but we have to reset ind each time!!! ! If we forget this line, ind will be incremented further and then Fortran crashes! ind = 0 ! loop through all elements and look for diagonal elements H[i,i], calculate conditional standard deviations sd(i | -i) ! List of conditional variances sd(i) can be precalculated ! Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) ! does not depend on x[-i] and can be precalculated before running the chain. do j=1,d do k=Hp(j),Hp(j+1)-1 ! k from 0..(d-1) i = Hi(k+1) + 1 ! Hi is index from 0..(d-1) --> need index i=1..d if (i == j) then H_inv_ii(i) = (1.0d0 / Hv(k+1)) ! H^{-1}(i,i) = 1 / H(i,i) sd(i) = sqrt(H_inv_ii(i)) ! sd(i) is sqrt(H^{-1}(i,i)) end if end do end do ! start value xr = x0 ! Actual number of samples to create: ! #burn-in-samples + n * #thinning-factor !For all samples n times the thinning factor do j = 1,(burnin + n * thinning) ! For all dimensions do i = 1,d ! conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) s2 = 0 ! For H[i,-i] (x[-i] - mean[-i]) we need to sum only all non-zero H[i,-i] elements! ! since H is symmetric, we can use the column sparse compressed (csc) format and sum all H[-i,i] elements instead do k=Hp(i),Hp(i+1)-1 ! loop all non-zero elements in column i, k is index 0..(d-1) r = Hi(k+1) + 1 ! row index r in column i is r=1..d if (i .ne. r) then s2 = s2 + Hv(k+1) * (xr(r) - mean(r)) end if end do mu_i = mean(i) - H_inv_ii(i) * s2 Fa = pnormr(lower(i), mu_i, sd(i), 1, 0) Fb = pnormr(upper(i), mu_i, sd(i), 1, 0) u = unifrnd() !call RANDOM_NUMBER(u) prob = u * (Fb - Fa) + Fa q = qnormr(prob, 0.0d0, 1.0d0, 1, 0) xr(i) = mu_i + sd(i) * q ! Only retain samples for j > burnin. Default is thinning = 1. ! If thinning>1 do retain only every x-th element if (j > burnin .AND. mod(j - burnin,thinning) == 0) then ind = ind + 1 X(ind) = xr(i) ! call intpr("ind=", 4, ind, 1) ! call dblepr("X(ind)=", 7, X(ind), 1) end if end do end do ! reset R random number generator call rndend() end subroutine rtmvnorm_sparse_csc tmvtnorm/NAMESPACE0000644000176200001440000000074212567600060013364 0ustar liggesusersuseDynLib(tmvtnorm) import(stats) import(utils) import(mvtnorm) import(stats4) import(gmm) import(Matrix) importFrom("methods", "as") export(ptmvnorm) export(rtmvnorm) export(rtmvnorm2) export(rtmvnorm.sparseMatrix) export(dtmvnorm) export(dtmvnorm.marginal) export(dtmvnorm.marginal2) export(qtmvnorm.marginal) export(ptmvnorm.marginal) export(mtmvnorm) export(dtmvt) export(rtmvt) export(ptmvt) export(ptmvt.marginal) export(mle.tmvnorm) export(gmm.tmvnorm)tmvtnorm/demo/0000755000176200001440000000000012567600065013073 5ustar liggesuserstmvtnorm/demo/demo1.R0000644000176200001440000000376012567600060014224 0ustar liggesusersrequire(tmvtnorm) library(utils) # Example 1 from Horrace (2005) x1<-seq(-2, 3, by=0.1) x2<-seq(-2, 3, by=0.1) density<-function(x) { sigma=matrix(c(1, -0.5, -0.5, 1), 2, 2) z=dtmvnorm(x, mean=c(0,0), sigma=sigma, lower=c(-1,-1)) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute the density function d=fgrid(x1, x2, density) # plot the density function as Contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate Normal Density", xlab=expression(x[1]), ylab=expression(x[2])) abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) # Example 2: X=rtmvnorm(n=100, mean=c(0,0), sigma=matrix(c(1, 0.8, 0.8, 2), 2, 2), lower=c(-Inf,-Inf), upper=c(0,0)) plot(X, xlim=c(-3,3), ylim=c(-3,3), main="Samples from Multivariate Normal Distribution", xlab=expression(x[1]), ylab=expression(x[2])) abline(v=0, lty=2) abline(h=0, lty=2) # Example 3: Profiling of rejection sampling: 10000 samples ~ 0.8 second Rprof("rtmvnorm.out") X=rtmvnorm(n=10000, mean=c(0,0), sigma=matrix(c(1, 0.8, 0.8, 2), 2, 2), lower=c(-Inf,-Inf), upper=c(0,0)) Rprof(NULL) summaryRprof("rtmvnorm.out") # Example 4: Profiling of Gibbs sampling: 10000 samples ~ 0.8 second Rprof("rtmvnorm.gibbs.out") m = 10 a = rep(-1, m) b = rep(1, m) # Erwartungswert und Kovarianzmatrix erzeugen mu = rep(0, m) sigma = matrix(0.8, m, m) diag(sigma) = rep(1, m) # Akzeptanzrate ausrechnen alpha = pmvnorm(lower=a, upper=b, mean=mu, sigma=sigma) alpha X=rtmvnorm(n=10000, mean=mu, sigma=sigma, lower=a, upper=b, algorithm="gibbs") Rprof(NULL) summaryRprof("rtmvnorm.gibbs.out") # Sampling from non-truncated normal distribution 10000 samples ~ 0.02 second Rprof("rmvnorm.out") X=rmvnorm(n=10000, mean=c(0,0), sigma=matrix(c(1, 0.8, 0.8, 2), 2, 2)) Rprof(NULL) summaryRprof("rmvnorm.out") tmvtnorm/demo/demo2.R0000644000176200001440000000112512567600060014216 0ustar liggesuserslibrary(tmvtnorm) library(rgl) # simulate x1, x2, x3 from truncated multivariate normal distribution sigma = matrix(c(1, 0, 0, 0, 1, 0, 0, 0, 1), 3, 3) # not truncated X = rmvnorm(n=2000, mean=c(0,0,0), sigma=sigma) # truncated X2 = rtmvnorm(n=2000, mean=c(0,0,0), sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(0,1,Inf)) # display as 3D scatterplot open3d() plot3d(X[,1], X[,2], X[,3], col="black", size=2, xlab=expression(x[1]), ylab=expression(x[2]), zlab=expression(x[3])) plot3d(X2[,1], X2[,2], X2[,3], col="red", size=2, add=TRUE) tmvtnorm/demo/00Index0000644000176200001440000000017312567600060014221 0ustar liggesusersdemo1 truncated multivariate normal densities demo2 3D scatterplot from a truncated trivariate normal distribution tmvtnorm/NEWS0000644000176200001440000001176112567600060012647 0ustar liggesusersUser visible changes in tmvtnorm package changes in tmvtnorm 1.4-10 (2015-08-24) # Fixed problem with build process in src/Makevars (parallel make) changes in tmvtnorm 1.4-9 (2014-03-03) # Moved package vignette to vignettes/ directory to be consistent with R 3.1.0 changes in tmvtnorm 1.4-8 (2013-03-29) # bugfix in dtmvnorm(...,margin=NULL). Introduced in 1.4-7. Reported by Julius.Vainora [julius.vainora@gmail.com] # bugfix in rtmvt(..., algorithm="gibbs"): Algorithm="gibbs" was not forwarded properly to rtmvnorm(). Reported by Aurelien Bechler [aurelien.bechler@agroparistech.fr] # allow non-integer degrees of freedom in rtmvt, e.g. rtmvt(..., df=3.2). Suggested by Aurelien Bechler [aurelien.bechler@agroparistech.fr] Rejection sampling does not work with non-integer df, only Gibbs sampling. changes in tmvtnorm 1.4-7 (2012-11-29) # new method rtmvnorm2() for drawing random samples with general linear constraints a <= Dx <= b with x (d x 1), D (r x d), a,b (r x 1) which can also handle the case r > d. Requested by Xiaojin Xu [xiaojinxu.fdu@gmail.com] Currently works with Gibbs sampling. # bugfix in dtmvnorm(...,log=TRUE). Reported by John Merrill [john.merrill@gmail.com] # optimization in mtmvnorm() to speed up the calculations # dtmvnorm.marginal2() can now be used with vectorized xq, xr. changes in tmvtnorm 1.4-6 (2012-03-23) # further optimization in mtmvnorm() and implementation of Johnson/Kotz-Formula when only a subset of variables is truncated changes in tmvtnorm 1.4-5 (2012-02-13) # rtmvnorm() can be used with both sparse triplet representation and (compressed sparse column) for H # dramatic performance gain in mtmvnorm() through optimization changes in tmvtnorm 1.4-4 (2012-01-10) # dramatic performance gain in rtmvnorm.sparseMatrix() through optimization # Bugfix in rtmvnorm() with linear constraints D: (reported by Claudia Kllmann [koellmann@statistik.tu-dortmund.de]) - forwarding "algorithm=" argument from rtmvnorm() to internal methods dealing with linear constraints was corrupt. - sampling with linear constraints D lead to wrong results due to missing t() changes in tmvtnorm 1.4-2 (2012-01-04) # Bugfix in rtmvnorm.sparseMatrix(): fixed a memory leak in Fortran code # Added a package vignette with a description of the Gibbs sampler changes in tmvtnorm 1.4-1 (2011-12-27) # Allow a sparse precision matrix H to be passed to rtmvnorm.sparseMatrix() which allows random number generation in very high dimensions (e.g. d >> 5000) # Rewritten the Fortran version of the Gibbs sampler for the use with sparse precision matrix H. changes in tmvtnorm 1.3-1 (2011-12-01) # Allow for the use of a precision matrix H rather than covariance matrix sigma in rtmvnorm() for both rejection and Gibbs sampling. (requested by Miguel Godinho de Matos from Carnegie Mellon University) # Rewritten both the R and Fortran version of the Gibbs sampler. # GMM estimation in gmm.tmvnorm(,method=c("ManjunathWilhelm","Lee")) can now be done using the Manjunath/Wilhelm and Lee moment conditions. changes in tmvtnorm 1.2-3 (2011-06-04) # rtmvnorm() works now with general linear constraints a<= Dx<=b, with x (d x 1), full-rank matrix D (d x d), a,b (d x 1). Implemented with both rejection sampling and Gibbs sampling (Geweke (1991)) # Added GMM estimation in gmm.tmvnorm() # Bugfix in dtmvt() thanks to Jason Kramer: Using type="shifted" in pmvt() (reported by Jason Kramer [jskramer@uci.edu]) changes in tmvtnorm 1.1-5 (2010-11-20) # Added Maximum Likelihood estimation method (MLE) mle.tmvtnorm() # optimized mtmvnorm(): precalcuted F_a[i] in a separate loop which improved the computation of the mean, suggested by Miklos.Reiter@sungard.com # added a flag doComputeVariance (default TRUE), so users which are only interested in the mean, can compute only the variance (BTW: this flag does not make sense for the mean, since the mean has to be calculated anyway.) # Fixed a bug with LAPACK and BLAS/FLIBS libraries: Prof. Ripley/Writing R extensions: "For portability, the macros @code{BLAS_LIBS} and @code{FLIBS} should always be included @emph{after} @code{LAPACK_LIBS}." changes in tmvtnorm 1.0-2 (2010-01-28) # Added methods for the truncated multivariate t-Distribution : rtmvt(), dtmvt() und ptmvt() and ptmvt.marginal() changes in tmvtnorm 0.9-2 (2010-01-03) # Implementation of "thinning technique" for Gibbs sampling: Added parameter thinning=1 to rtmvnorm.gibbs() for thinning of Markov chains, i.e. reducing autocorrelations of random samples # Documenting additional arguments "thinning", "start.value" and "burn.in", for rmvtnorm.gibbs() # Added parameter "burn-in" and "thinning" in the Fortran code for discarding burn-in samples and thinng the Markov chain. # Added parameter log=FALSE to dtmvnorm.marginal() # Added parameter margin=NULL to dtmvnorm() as an interface/wrapper to marginal density functions dtmvnorm.marginal() and dtmvnorm.marginal2() # Code polishing and review tmvtnorm/R/0000755000176200001440000000000012567600057012351 5ustar liggesuserstmvtnorm/R/dtmvnorm-marginal.R0000644000176200001440000001017112567600057016132 0ustar liggesusers# Dichtefunktion und Verteilung einer multivariate truncated normal # # Problem ist die Bestimmung der Randverteilung einer Variablen. # # 1. Im bivariaten Fall kann explizit eine Formel angegeben werden (vgl. Arnold (1993)) # 2. Im multivariaten Fall kann ein Integral angegeben werden (vgl. Horrace (2005)) # 3. Bestimmung der Dichtefunktion ber das Integral mglich? # 4. Kann die Verteilungsfunktion pmvnorm() helfen? Kann man dann nach einer Variablen differenzieren? # Literatur: # # Genz, A. (1992). Numerical computation of multivariate normal probabilities. Journal of Computational and Graphical Statistics, 1, 141-150 # Genz, A. (1993). Comparison of methods for the computation of multivariate normal probabilities. Computing Science and Statistics, 25, 400-405 # Horrace (2005). # Jack Cartinhour (1990): One-dimensional marginal density functions of a truncated multivariate normal density function # Communications in Statistics - Theory and Methods, Volume 19, Issue 1 1990 , pages 197 - 203 # Dichtefunktion fr Randdichte f(xn) einer Truncated Multivariate Normal Distribution, # vgl. Jack Cartinhour (1990) "One-dimensional marginal density functions of a truncated multivariate normal density function" # # @param xn Vektor der Lnge l von Punkten, an dem die Randdichte ausgewertet wird # @param i Index (1..n) dessen Randdichte berechnet werden soll # @param mean (nx1) Mittelwertvektor # @param sigma (nxn)-Kovarianzmatrix # @param lower,upper Trunkierungsvektor lower <= x <= upper dtmvnorm.marginal <- function(xn, n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE) { if (NROW(sigma) != NCOL(sigma)) { stop("sigma must be a square matrix") } if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } # Anzahl der Dimensionen k <- length(mean) if (n < 1 || n > length(mean) || !is.numeric(n) || length(n) > 1 || !n %in% 1:length(mean)) { stop("n must be a integer scalar in 1..length(mean)") } # Univariater Fall, vgl. Greene (2003), S.573 if (k == 1) { prob <- pnorm(upper, mean=mean, sd=sqrt(sigma)) - pnorm(lower, mean=mean, sd=sqrt(sigma)) density <- ifelse( lower[1]<=xn & xn<=upper[1], dnorm(xn, mean=mean, sd=sqrt(sigma)) / prob, 0) if (log == TRUE) { return(log(density)) } else { return(density) } } # Standardize sigma to correlation matrix, mean to zero vector # adjust xn, lower, upper #sd <- sqrt(diag(sigma)) #xn <- (xn - mean) / sd #lower <- (lower - mean) / sd #upper <- (upper - mean) / sd #mean <- rep(0, k) #sigma <- cov2cor(sigma) # Kovarianzmatrix; nach Standardisierung Korrelationsmatrix C <- sigma # Inverse Kovarianzmatrix, Precision matrix A <- solve(sigma) # Partitionierung von A und C A_1 <- A[-n,-n] # (n-1) x (n-1) #a_nn <- A[n, n] # 1x1 #a <- A[-n, n] # (n-1) x 1 A_1_inv <- solve(A_1) C_1 <- C[-n,-n] # (n-1) x (n-1) c_nn <- C[n, n] # 1x1 c <- C[-n, n] # (n-1) x 1 # Partitionierung von Mittelwertvektor mu mu <- mean mu_1 <- mean[-n] mu_n <- mean[n] # Skalierungsfaktor der Dichte p <- pmvnorm(lower=lower, upper=upper, mean=mu, sigma=C) f_xn <- c() for (i in 1:length(xn)) { if (!(lower[n]<=xn[i] && xn[i]<=upper[n]) || is.infinite(xn[i])) { f_xn[i] <- 0 next } # m(x_n) --> (n-1x1) # Aufpassen bei z.B. m=c(Inf, Inf, NaN) und c=0 m <- mu_1 + (xn[i] - mu_n) * c / c_nn # SW: Possibly optimize with vectorized version of pmvnorm() which accepts different bounds # for univariate density, pmvnorm() does not accept corr= f_xn[i] <- exp(-0.5*(xn[i]-mu_n)^2/c_nn) * pmvnorm(lower=lower[-n], upper=upper[-n], mean=m, sigma=A_1_inv) } density <- 1/p * 1/sqrt(2*pi*c_nn) * f_xn if (log == TRUE) { return(log(density)) } else { return(density) } } tmvtnorm/R/ptmvt.R0000644000176200001440000000367512567600057013661 0ustar liggesusers # Verteilungsfunktion der truncated multivariate t distribution # # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper ptmvt <- function( lowerx, upperx, mean=rep(0, length(lowerx)), sigma, df = 1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) { # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check of additional arguments lowerx and upperx if (is.null(lowerx) || any(is.na(lowerx))) stop(sQuote("lowerx"), " not specified or contains NA") if (is.null(upperx) || any(is.na(upperx))) stop(sQuote("upperx"), " not specified or contains NA") if (!is.numeric(lowerx) || !is.vector(lowerx)) stop(sQuote("lowerx"), " is not a numeric vector") if (!is.numeric(upperx) || !is.vector(upperx)) stop(sQuote("upperx"), " is not a numeric vector") if (length(lowerx) != length(lower) || length(lower) != length(upperx)) stop("lowerx an upperx must have the same length as lower and upper!") if (any(lowerx>=upperx)) stop("lowerx must be smaller than or equal to upperx (lowerx<=upperx)") # Aufpassen: # Wir mssen garantieren, dass nur innerhalb des Support-Bereichs lower <= x <= upper integriert wird. Sonst kann Ergebnis >= 1 rauskommen. # Wenn einzelne Komponenten von lowerx <= lower sind, dann von der Untergrenze lower integrieren. Analog fr upperx >= upper f <- pmvt(lower=pmax(lowerx, lower), upper=pmin(upperx, upper), delta=mean, sigma=sigma, df=df, maxpts = maxpts, abseps = abseps, releps = releps, type="shifted") / pmvt(lower=lower, upper=upper, delta=mean, sigma=sigma, df=df, maxpts = maxpts, abseps = abseps, releps = releps, type="shifted") return(f) }tmvtnorm/R/rtmvnorm.R0000644000176200001440000007001412567600057014362 0ustar liggesusers################################################################################ # # Sampling from Truncated multivariate Gaussian distribution using # # a) Rejection sampling # b) Gibbs sampler # # for both rectangular constraints a <= x <= b and general linear constraints # a <= Dx <= b. For D = I this implies rectangular constraints. # The method can be used using both covariance matrix sigma and precision matrix H. # # Author: Stefan Wilhelm # # References: # (1) Jayesh H. Kotecha and Petar M. Djuric (1999) : # "GIBBS SAMPLING APPROACH FOR GENERATION OF TRUNCATED MULTIVARIATE GAUSSIAN RANDOM VARIABLES" # (2) Geweke (1991): # "Effcient simulation from the multivariate normal and Student-t distributions # subject to linear constraints and the evaluation of constraint probabilities" # (3) John Geweke (2005): Contemporary Bayesian Econometrics and Statistics, Wiley, pp.171-172 # (4) Wilhelm (2011) package vignette to package "tmvtnorm" # ################################################################################ # We need this separate method rtmvnorm.sparseMatrix() because # rtmvnorm() initialises dense d x d sigma and D matrix which will not work for high dimensions d. # It also does some sanity checks on sigma and D (determinant etc.) which will not # work for high dimensions. # returns a matrix X (n x d) with random draws # from a truncated multivariate normal distribution with d dimensionens # using Gibbs sampling # # @param n Anzahl der Realisationen # @param mean mean vector (d x 1) der Normalverteilung # @param lower lower truncation vector (d x 1) with lower <= x <= upper # @param upper upper truncation vector (d x 1) with lower <= x <= upper # @param H precision matrix (d x d) if given, defaults to identity matrix rtmvnorm.sparseMatrix <- function(n, mean = rep(0, nrow(H)), H = sparseMatrix(i=1:length(mean), j=1:length(mean), x=1), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), ...) { if (is.null(H) || !inherits(H, "sparseMatrix")) { stop("H must be of class 'sparseMatrix'") } rtmvnorm.gibbs.Fortran(n, mean, sigma=NULL, H, lower, upper, ...) } # Erzeugt eine Matrix X (n x d) mit Zufallsrealisationen # aus einer Trunkierten Multivariaten Normalverteilung mit d Dimensionen # ber Rejection Sampling oder Gibbs Sampler aus einer Multivariaten Normalverteilung. # If matrix D is given, it must be a (d x d) full rank matrix. # Therefore this method can only cover the case with only r <= d linear restrictions. # For r > d linear restrictions, please see rtmvnorm2(n, mean, sigma, D, lower, upper), # where D can be defined as (r x d). # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (d x 1) der Normalverteilung # @param sigma Kovarianzmatrix (d x d) der Normalverteilung # @param lower unterer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param upper oberer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param D Matrix for linear constraints, defaults to (d x d) diagonal matrix # @param H Precision matrix (d x d) if given # @param algorithm c("rejection", "gibbs", "gibbsR") rtmvnorm <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean)), H = NULL, algorithm=c("rejection", "gibbs", "gibbsR"), ...) { algorithm <- match.arg(algorithm) if (is.null(mean) && (is.null(sigma) || is.null(H))) { stop("Invalid arguments for ",sQuote("mean")," and ",sQuote("sigma"),"/",sQuote("H"),". Need at least mean vector and covariance or precision matrix.") } # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (!is.null(H) && sigma != diag(length(mean))) { stop("Cannot give both covariance matrix sigma and precision matrix H arguments at the same time") } else if (!is.null(H) && !inherits(H, "sparseMatrix")) { # check precision matrix H if it is symmetric and positive definite checkSymmetricPositiveDefinite(H, name="H") # H explicitly given, we will override sigma later if we need sigma # sigma <- solve(H) } # else sigma explicitly or implicitly given # check of additional arguments if (n < 1 || !is.numeric(n) || n != as.integer(n) || length(n) > 1) { stop("n must be a integer scalar > 0") } # check matrix D, must be n x n with rank n if (!is.matrix(D) || det(D) == 0) { stop("D must be a (n x n) matrix with full rank n!") } if (!identical(D,diag(length(mean)))) { # D <> I : general linear constraints retval <- rtmvnorm.linear.constraints(n=n, mean=mean, sigma=sigma, H=H, lower=lower, upper=upper, D=D, algorithm=algorithm, ...) return(retval) } else { # D == I : rectangular case if (algorithm == "rejection") { if (!is.null(H)) { # precision matrix case H retval <- rtmvnorm.rejection(n, mean, sigma=solve(H), lower, upper, ...) } else { # covariance matrix case sigma retval <- rtmvnorm.rejection(n, mean, sigma, lower, upper, ...) } } else if (algorithm == "gibbs") { # precision matrix case H vs. covariance matrix case sigma will be handled inside method retval <- rtmvnorm.gibbs.Fortran(n, mean, sigma, H, lower, upper, ...) } else if (algorithm == "gibbsR") { if (!is.null(H)) { # precision matrix case H retval <- rtmvnorm.gibbs.Precision(n, mean, H, lower, upper, ...) } else { # covariance matrix case sigma retval <- rtmvnorm.gibbs(n, mean, sigma, lower, upper, ...) } } } return(retval) } # Erzeugt eine Matrix X (n x k) mit Zufallsrealisationen # aus einer Trunkierten Multivariaten Normalverteilung mit k Dimensionen # ber Rejection Sampling aus einer Multivariaten Normalverteilung mit der Bedingung # lower <= Dx <= upper # # Wenn D keine Diagonalmatrix ist, dann ist gelten lineare Restriktionen fr # lower <= Dx <= upper (siehe Geweke (1991)) # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param sigma Kovarianzmatrix (k x k) der Normalverteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param D Matrix for linear constraints, defaults to diagonal matrix rtmvnorm.rejection <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean))) { # No check of input parameters, checks are done in rtmvnorm()! # k = Dimension k <- length(mean) # Ergebnismatrix (n x k) Y <- matrix(NA, n, k) # Anzahl der noch zu ziehenden Samples numSamples <- n # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- 0 # Akzeptanzrate alpha aus der Multivariaten Normalverteilung bestimmen r <- length(lower) d <- length(mean) if (r == d & identical(D, diag(d))) { alpha <- pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma) if (alpha <= 0.01) warning(sprintf("Acceptance rate is very low (%s) and rejection sampling becomes inefficient. Consider using Gibbs sampling.", alpha)) estimatedAlpha <- TRUE } else { # TODO: Wie bestimme ich aus lower <= Dx <= upper fr r > d Restriktionen die Akzeptanzrate alpha? # Defere calculation of alpha. Assume for now that all samples will be accepted. alpha <- 1 estimatedAlpha <- FALSE } # Ziehe wiederholt aus der Multivariaten NV und schaue, wieviel Samples nach Trunkierung brig bleiben while(numSamples > 0) { # Erzeuge N/alpha Samples aus einer multivariaten Normalverteilung: Wenn alpha zu niedrig ist, wird Rejection Sampling ineffizient und N/alpha zu gro. Dann nur N erzeugen nproposals <- ifelse (numSamples/alpha > 1000000, numSamples, ceiling(max(numSamples/alpha,10))) X <- rmvnorm(nproposals, mean=mean, sigma=sigma) # Bestimme den Anteil der Samples nach Trunkierung # Bug: ind= rowSums(lower <= X & X <= upper) == k # wesentlich schneller als : ind=apply(X, 1, function(x) all(x >= lower & x<=upper)) X2 <- X %*% t(D) ind <- logical(nproposals) for (i in 1:nproposals) { ind[i] <- all(X2[i,] >= lower & X2[i,] <= upper) } # Anzahl der akzeptierten Samples in diesem Durchlauf numAcceptedSamples <- length(ind[ind==TRUE]) # Wenn nix akzeptiert wurde, dann weitermachen if (length(numAcceptedSamples) == 0 || numAcceptedSamples == 0) next if (!estimatedAlpha) { alpha <- numAcceptedSamples / nproposals if (alpha <= 0.01) warning(sprintf("Acceptance rate is very low (%s) and rejection sampling becomes inefficient. Consider using Gibbs sampling.", alpha)) } #cat("numSamplesAccepted=",numAcceptedSamples," numSamplesToDraw = ",numSamples,"\n") numNeededSamples <- min(numAcceptedSamples, numSamples) Y[(numAcceptedSamplesTotal+1):(numAcceptedSamplesTotal+numNeededSamples),] <- X[which(ind)[1:numNeededSamples],] # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- numAcceptedSamplesTotal + numAcceptedSamples # Anzahl der verbliebenden Samples numSamples <- numSamples - numAcceptedSamples } Y } # Gibbs Sampler for Truncated Univariate Normal Distribution # # Jayesh H. Kotecha and Petar M. Djuric (1999) : GIBBS SAMPLING APPROACH FOR GENERATION OF TRUNCATED MULTIVARIATE GAUSSIAN RANDOM VARIABLES # # Im univariaten Fall sind die erzeugten Samples unabhngig, # deswegen gibt es hier keine Chain im eigentlichen Sinn und auch keinen Startwert/Burn-in/Thinning. # # As a change to Kotecha, we do not draw a sample x from the Gaussian Distribution # and then apply pnorm(x) - which is uniform - but rather draw directly from the # uniform distribution u ~ U(0, 1). # # @param n number of realisations # @param mu mean of the normal distribution # @param sigma standard deviation # @param a lower truncation point # @param b upper truncation point rtnorm.gibbs <- function(n, mu=0, sigma=1, a=-Inf, b=Inf) { # Draw from Uni(0,1) F <- runif(n) #Phi(a) und Phi(b) Fa <- pnorm(a, mu, sd=sigma) Fb <- pnorm(b, mu, sd=sigma) # Truncated Normal Distribution, see equation (6), but F(x) ~ Uni(0,1), # so we directly draw from Uni(0,1) instead of doing: # x <- rnorm(n, mu, sigma) # y <- mu + sigma * qnorm(pnorm(x)*(Fb - Fa) + Fa) y <- mu + sigma * qnorm(F * (Fb - Fa) + Fa) y } # Gibbs Sampler Implementation in R for Truncated Multivariate Normal Distribution # (covariance case with sigma) # Jayesh H. Kotecha and Petar M. Djuric (1999) : # GIBBS SAMPLING APPROACH FOR GENERATION OF TRUNCATED MULTIVARIATE # GAUSSIAN RANDOM VARIABLES # # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param sigma Kovarianzmatrix (k x k) der Normalverteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= Dx <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= Dx <= upper # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvnorm.gibbs <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # We check only additional arguments like "burn.in.samples", "start.value" and "thinning" if (thinning < 1 || !is.numeric(thinning) || length(thinning) > 1) { stop("thinning must be a integer scalar > 0") } # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.valueupper)) stop("start value is not inside support region") x0 <- start.value } else { # Start value from support region, may be lower or upper bound, if they are finite, # if both are infinite, we take 0. x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=sigma[1,1], a=lower[1], b=upper[1]) return(X) } # Ergebnismatrix (n x k) X <- matrix(NA, n, d) # Draw from Uni(0,1) U <- runif((S + n*thinning) * d) l <- 1 # List of conditional standard deviations can be pre-calculated sd <- list(d) # List of t(Sigma_i) %*% solve(Sigma) term P <- list(d) for(i in 1:d) { # Partitioning of Sigma Sigma <- sigma[-i,-i] # (d-1) x (d-1) sigma_ii <- sigma[i,i] # 1 x 1 Sigma_i <- sigma[i,-i] # 1 x (d-1) P[[i]] <- t(Sigma_i) %*% solve(Sigma) # (1 x (d-1)) * ((d-1) x (d-1)) = (1 x (d-1)) sd[[i]] <- sqrt(sigma_ii - P[[i]] %*% Sigma_i) # (1 x (d-1)) * ((d-1) x 1) } x <- x0 # Runn chain from index (1 - #burn-in-samples):(n*thinning) and only record samples from j >= 1 # which discards the burn-in-samples for (j in (1-S):(n*thinning)) { # For all dimensions for(i in 1:d) { # Berechnung von bedingtem Erwartungswert und bedingter Varianz: # bedingte Varianz hngt nicht von x[-i] ab! mu_i <- mean[i] + P[[i]] %*% (x[-i] - mean[-i]) # Transformation F.tmp <- pnorm(c(lower[i], upper[i]), mu_i, sd[[i]]) Fa <- F.tmp[1] Fb <- F.tmp[2] x[i] <- mu_i + sd[[i]] * qnorm(U[l] * (Fb - Fa) + Fa) l <- l + 1 } if (j > 0) { if (thinning == 1) { # no thinning, take all samples except for burn-in-period X[j,] <- x } else if (j %% thinning == 0){ X[j %/% thinning,] <- x } } } return(X) } # R-Implementation of Gibbs sampler with precision matrix H # # @param n number of random draws # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param H Precision matrix (k x k) der Normalverteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvnorm.gibbs.Precision <- function(n, mean = rep(0, nrow(H)), H = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # We check only additional arguments like "burn.in.samples", "start.value" and "thinning" if (thinning < 1 || !is.numeric(thinning) || length(thinning) > 1) { stop("thinning must be a integer scalar > 0") } # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.valueupper)) stop("start value is not inside support region") x0 <- start.value } else { # Start value from support region, may be lower or upper bound, if they are finite, # if both are infinite, we take 0. x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=1/H[1,1], a=lower[1], b=upper[1]) return(X) } # Ergebnismatrix (n x k) X <- matrix(NA, n, d) # Draw from U ~ Uni(0,1) for all iterations we need in advance U <- runif((S + n*thinning) * d) l <- 1 # Vector of conditional standard deviations sd(i | -i) = H_ii^{-1} = 1 / H[i, i] = sqrt(1 / diag(H)) # does not depend on x[-i] and can be precalculated before running the chain. sd <- sqrt(1 / diag(H)) # start value of the chain x <- x0 # Run chain from index (1 - #burn-in-samples):(n*thinning) and only record samples from j >= 1 # which discards the burn-in-samples for (j in (1-S):(n*thinning)) { # For all dimensions for(i in 1:d) { # conditional mean mu[i] = E[i | -i] = mean[i] - H_ii^{-1} H[i,-i] (x[-i] - mean[-i]) mu_i <- mean[i] - (1 / H[i,i]) * H[i,-i] %*% (x[-i] - mean[-i]) # draw x[i | -i] from conditional univariate truncated normal distribution with # TN(E[i | -i], sd(i | -i), lower[i], upper[i]) F.tmp <- pnorm(c(lower[i], upper[i]), mu_i, sd[i]) Fa <- F.tmp[1] Fb <- F.tmp[2] x[i] <- mu_i + sd[i] * qnorm(U[l] * (Fb - Fa) + Fa) l <- l + 1 } if (j > 0) { if (thinning == 1) { # no thinning, take all samples except for burn-in-period X[j,] <- x } else if (j %% thinning == 0){ X[j %/% thinning,] <- x } } } return(X) } # Gibbs sampler with compiled Fortran code # Depending on, whether covariance matrix Sigma or precision matrix H (dense or sparse format) # is specified as parameter, we call either # Fortran routine "rtmvnormgibbscov" (dense covariance matrix sigma), # "rtmvnormgibbsprec" (dense matrix H) or "rtmvnormgibbssparseprec" (sparse precision matrix H). # # @param H precision matrix in sparse triplet format (i, j, v) # Memory issues: We want to increase dimension d, and return matrix X will be (n x d) # so if we want to create a large number of random samples X (n x d) with high d then # we will probably also run into memory problems (X is dense). In most MCMC applications, # we only have to create a small number n in high dimensions, # e.g. 1 random sample per iteration (+ burn-in-samples). # In this case we will not experience any problems. Users should be aware of choosing n and d appropriately rtmvnorm.gibbs.Fortran <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), H = NULL, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # No checks of input arguments, checks are done in rtmvnorm() # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.valueupper)) stop("start value is not inside support region") x0 <- start.value } else { # Start value from support region, may be lower or upper bound, if they are finite, # if both are infinite, we take 0. x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { if (!is.null(H)) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=1 / sigma[1,1], a=lower[1], b=upper[1]) } else { X <- rtnorm.gibbs(n, mu=mean[1], sigma=sigma[1,1], a=lower[1], b=upper[1]) } return(X) } # Ergebnismatrix (n x d) X <- matrix(0, n, d) # Call to Fortran subroutine if (!is.null(H)){ if (!inherits(H, "sparseMatrix")) { ret <- .Fortran("rtmvnormgibbsprec", n = as.integer(n), d = as.integer(d), mean = as.double(mean), H = as.double(H), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } else if (inherits(H, "dgCMatrix")) { # H is given in compressed sparse column (csc) representation ret <- .Fortran("rtmvnorm_sparse_csc", n = as.integer(n), d = as.integer(d), mean = as.double(mean), Hi = as.integer(H@i), Hp = as.integer(H@p), Hv = as.double(H@x), num_nonzero = as.integer(length(H@x)), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } else { # H is given in sparse matrix triplet representation # Es muss klar sein, dass nur die obere Dreiecksmatrix (i <= j) bergeben wird... sH <- as(H, "dgTMatrix") # precision matrix as triplet representation # ATTENTION: sH@i and sH@j are zero-based (0..(n-1)), we need it as 1...n ind <- sH@i <= sH@j # upper triangular matrix elements of H[i,j] with i <= j ret <- .Fortran("rtmvnorm_sparse_triplet", n = as.integer(n), d = as.integer(d), mean = as.double(mean), Hi = as.integer(sH@i[ind]+1), Hj = as.integer(sH@j[ind]+1), Hv = as.double(sH@x[ind]), num_nonzero = as.integer(sum(ind)), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } } else { ret <- .Fortran("rtmvnormgibbscov", n = as.integer(n), d = as.integer(d), mean = as.double(mean), sigma = as.double(sigma), lower = as.double(lower), upper = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") } X <- matrix(ret$X, ncol=d, byrow=TRUE) return(X) } # Gibbs sampling fr Truncated Multivariate Normal Distribution # with linear constraints based on Geweke (1991): # This is simply a wrapper function around our rectangular sampling version... # # x ~ N(mu, sigma) subject to a <= Dx <= b # # alpha <= z <= beta # mit alpha = a - D * mu, beta = b - D * mu # z ~ N(0, T), T = D Sigma D' # x = mu + D^(-1) z # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der t-verteilung # @param sigma Kovarianzmatrix (k x k) der t-Verteilung # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param D Matrix for linear constraints, defaults to diagonal matrix # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvnorm.linear.constraints <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), H = NULL, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean)), algorithm,...) { # dimension of X d <- length(mean) # check matrix D, must be n x n with rank n if (!is.matrix(D) || det(D) == 0) { stop("D must be a (n x n) matrix with full rank n!") } # create truncated multi-normal samples in variable Z ~ N(0, T) # with alpha <= z <= beta # Parameter-Transformation for given sigma: # x ~ N(mean, sigma) subject to a <= Dx <= b # define z = D x - D mu # alpha <= z <= beta # mit alpha = a - D * mu # beta = b - D * mu # z ~ N(0, T), # T = D Sigma D' # x = mu + D^(-1) z # Parameter-Transformation for given H: # x ~ N(mean, H^{-1}) # precision matrix in z is: # T^{-1} = D'^{-1} H D^{-1} # (AB)^{-1} = B^{-1} %*% A^{-1} alpha <- as.vector(lower - D %*% mean) beta <- as.vector(upper - D %*% mean) Dinv <- solve(D) # D^(-1) if (!is.null(H)) { Tinv <- t(Dinv) %*% H %*% Dinv Z <- rtmvnorm(n, mean=rep(0, d), sigma=diag(d), H=Tinv, lower=alpha, upper=beta, algorithm=algorithm, ...) } else { T <- D %*% sigma %*% t(D) Z <- rtmvnorm(n, mean=rep(0, d), sigma=T, H=NULL, lower=alpha, upper=beta, algorithm=algorithm, ...) } # For each z do the transformation # x = mu + D^(-1) z X <- sweep(Z %*% t(Dinv), 2, FUN="+", mean) return(X) } ################################################################################ if (FALSE) { checkSymmetricPositiveDefinite(matrix(1:4, 2, 2), name = "H") lower <- c(-1, -1) upper <- c(1, 1) mean <- c(0.5, 0.5) sigma <- matrix(c(1, 0.8, 0.8, 1), 2, 2) H <- solve(sigma) D <- matrix(c(1, 1, 1, -1), 2, 2) checkSymmetricPositiveDefinite(H, name = "H") # 1. covariance matrix sigma case # 1.1. rectangular case D == I X0 <- rtmvnorm(n=1000, mean, sigma, lower, upper, algorithm="rejection") X1 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) cov(X1) cov(X2) cov(X3) # 1.2. general linear constraints case D <> I X1 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) # 2. precision matrix case H # 2.1. rectangular case D == I X1 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) # 2.2. general linear constraints case D <> I X1 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="rejection") X2 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="gibbsR") X3 <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="gibbs") par(mfrow=c(2,2)) plot(X1) plot(X2) plot(X3) } tmvtnorm/R/mtmvnorm.R0000644000176200001440000002066412567600057014363 0ustar liggesusers# Expectation and covariance matrix computation # based on the algorithms by Lee (1979), Lee (1983), Leppard and Tallis (1989) # and Manjunath and Wilhelm (2009) # # References: # Amemiya (1973) : Regression Analysis When the Dependent Variable is Truncated Normal # Amemiya (1974) : Multivariate Regression and Simultaneous Equations Models When the Dependent Variables Are Truncated Normal # Lee (1979) : On the first and second moments of the truncated multi-normal distribution and a simple estimator # Lee (1983) : The Determination of Moments of the Doubly Truncated Multivariate Tobit Model # Leppard and Tallis (1989) : Evaluation of the Mean and Covariance of the Truncated Multinormal # Manjunath B G and Stefan Wilhelm (2009): # Moments Calculation for the Doubly Truncated Multivariate Normal Distribution # Johnson/Kotz (1972) # Compute truncated mean and truncated variance in the case # where only a subset of k < n x_1,..,x_k variables are truncated. # In this case, computations simplify and we only have to deal with k-dimensions. # Example: n=10 variables but only k=3 variables are truncated. # # Attention: Johnson/Kotz (1972), p.70 only works for zero mean vector! # We have to demean all variables first JohnsonKotzFormula <- function(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean))) { # determine which variables are truncated idx <- which(!is.infinite(lower) | !is.infinite(upper)) # index of truncated variables n <- length(mean) k <- length(idx) # number of truncated variables if (k >= n) stop(sprintf("Number of truncated variables (%s) must be lower than total number of variables (%s).", k, n)) if (k == 0) { return(list(tmean=mean, tvar=sigma)) # no truncation } # transform to zero mean first lower <- lower - mean upper <- upper - mean # partitionining of sigma # sigma = [ V11 V12 ] # [ V21 V22 ] V11 <- sigma[idx,idx] V12 <- sigma[idx,-idx] V21 <- sigma[-idx,idx] V22 <- sigma[-idx,-idx] # determine truncated mean xi and truncated variance U11 r <- mtmvnorm(mean=rep(0, k), sigma=V11, lower=lower[idx], upper=upper[idx]) xi <- r$tmean U11 <- r$tvar invV11 <- solve(V11) # V11^(-1) # See Johnson/Kotz (1972), p.70 formula tmean <- numeric(n) tmean[idx] <- xi tmean[-idx] <- xi %*% invV11 %*% V12 tvar <- matrix(NA, n, n) tvar[idx, idx] <- U11 tvar[idx, -idx] <- U11 %*% invV11 %*% V12 tvar[-idx, idx] <- V21 %*% invV11 %*% U11 tvar[-idx, -idx] <- V22 - V21 %*% (invV11 - invV11 %*% U11 %*% invV11) %*% V12 tmean <- tmean + mean return(list(tmean=tmean, tvar=tvar)) } # Mean and Covariance of the truncated multivariate distribution (double truncation, general sigma, general mean) # # @param mean mean vector (k x 1) # @param sigma covariance matrix (k x k) # @param lower lower truncation point (k x 1) # @param upper upper truncation point (k x 1) # @param doComputeVariance flag whether to compute variance (for performance reasons) mtmvnorm <- function(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), doComputeVariance=TRUE, pmvnorm.algorithm=GenzBretz()) { N <- length(mean) # Check input parameters cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check number of truncated variables; if only a subset of variables is truncated # we can use the Johnson/Kotz formula together with mtmvnorm() # determine which variables are truncated idx <- which(!is.infinite(lower) | !is.infinite(upper)) # index of truncated variables k <- length(idx) # number of truncated variables if (k < N) { return(JohnsonKotzFormula(mean=mean, sigma=sigma, lower=lower, upper=upper)) } # Truncated Mean TMEAN <- numeric(N) # Truncated Covariance matrix TVAR <- matrix(NA, N, N) # Verschiebe die Integrationsgrenzen um -mean, damit der Mittelwert 0 wird a <- lower - mean b <- upper - mean lower <- lower - mean upper <- upper - mean # eindimensionale Randdichte F_a <- numeric(N) F_b <- numeric(N) zero_mean <- rep(0,N) # pre-calculate one-dimensial marginals F_a[q] once for (q in 1:N) { tmp <- dtmvnorm.marginal(xn=c(a[q],b[q]), n = q, mean=zero_mean, sigma=sigma, lower=lower, upper=upper) F_a[q] <- tmp[1] F_b[q] <- tmp[2] } # 1. Bestimme E[X_i] = mean + Sigma %*% (F_a - F_b) TMEAN <- as.vector(sigma %*% (F_a - F_b)) if (doComputeVariance) { # TODO: # calculating the bivariate densities is not necessary # in case of conditional independence. # calculate bivariate density only on first use and then cache it # so we can avoid this memory overhead. F2 <- matrix(0, N, N) for (q in 1:N) { for (s in 1:N) { if (q != s) { d <- dtmvnorm.marginal2( xq=c(a[q], b[q], a[q], b[q]), xr=c(a[s], a[s], b[s], b[s]), q=q, r=s, mean=zero_mean, sigma=sigma, lower=lower, upper=upper, pmvnorm.algorithm=pmvnorm.algorithm) F2[q,s] <- (d[1] - d[2]) - (d[3] - d[4]) } } } # 2. Bestimme E[X_i, X_j] # Check if a[q] = -Inf or b[q]=+Inf, then F_a[q]=F_b[q]=0, but a[q] * F_a[q] = NaN and b[q] * F_b[q] = NaN F_a_q <- ifelse(is.infinite(a), 0, a * F_a) # n-dimensional vector q=1..N F_b_q <- ifelse(is.infinite(b), 0, b * F_b) # n-dimensional vector q=1..N for (i in 1:N) { for (j in 1:N) { sum <- 0 for (q in 1:N) { sum <- sum + sigma[i,q] * sigma[j,q] * (sigma[q,q])^(-1) * (F_a_q[q] - F_b_q[q]) if (j != q) { sum2 <- 0 for (s in 1:N) { # this term tt will be zero if the partial correlation coefficient \rho_{js.q} is zero! # even for s == q will the term be zero, so we do not need s!=q condition here tt <- (sigma[j,s] - sigma[q,s] * sigma[j,q] * (sigma[q,q])^(-1)) sum2 <- sum2 + tt * F2[q,s] } sum2 <- sigma[i, q] * sum2 sum <- sum + sum2 } } # end for q TVAR[i, j] <- sigma[i, j] + sum #general mean case: TVAR[i, j] = mean[j] * TMEAN[i] + mean[i] * TMEAN[j] - mean[i] * mean[j] + sigma[i, j] + sum } } # 3. Bestimme Varianz Cov(X_i, X_j) = E[X_i, X_j] - E[X_i]*E[X_j] fr (0, sigma)-case TVAR <- TVAR - TMEAN %*% t(TMEAN) } else { TVAR = NA } # 4. Rckverschiebung um +mean fr (mu, sigma)-case TMEAN <- TMEAN + mean return(list(tmean=TMEAN, tvar=TVAR)) } # Bestimmung von Erwartungswert und Kovarianzmatrix ber numerische Integration und die eindimensionale Randdichte # d.h. # E[X_i] = \int_{a_i}^{b_i}{x_i * f(x_i) d{x_i}} # Var[x_i] = \int_{a_i}^{b_i}{(x_i-\mu_i)^2 * f(x_i) d{x_i}} # Cov[x_i,x_j] = \int_{a_i}^{b_i}\int_{a_j}^{b_j}{(x_i-\mu_i)(x_j-\mu_j) * f(x_i,x_j) d{x_i}d{x_j}} # # Die Bestimmung von E[X_i] und Var[x_i] # Die Bestimmung der Kovarianz Cov[x_i,x_j] bentigt die zweidimensionale Randdichte. # # # @param mean Mittelwertvektor (k x 1) # @param sigma Kovarianzmatrix (k x k) # @param lower, upper obere und untere Trunkierungspunkte (k x 1) mtmvnorm.quadrature <- function(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean))) { k = length(mean) # Bestimmung des Erwartungswerts/Varianz ber numerische Integration expectation <- function(x, n=1) { x * dtmvnorm.marginal(x, n=n, mean=mean, sigma=sigma, lower=lower, upper=upper) } variance <- function(x, n=1) { (x - m.integration[n])^2 * dtmvnorm.marginal(x, n=n, mean=mean, sigma=sigma, lower=lower, upper=upper) } # Determine expectation from one-dimensional marginal distribution using integration # i=1..k m.integration<-numeric(k) for (i in 1:k) { m.integration[i] <- integrate(expectation, lower[i], upper[i], n=i)$value } # Determine variances from one-dimensional marginal distribution using integration # i=1..k v.integration<-numeric(k) for (i in 1:k) { v.integration[i] <- integrate(variance, lower[i], upper[i], n=i)$value } return(list(m=m.integration, v=v.integration)) } tmvtnorm/R/dtmvnorm.R0000644000176200001440000000770012567600057014346 0ustar liggesuserssource("R/rtmvnorm.R") # Dichtefunktion der Multivariaten Trunkierten Normalverteilung mit Trunkierungsvektor lower and upper # # vgl. Horrace (2005) "Some Results on the Multivariate Truncated Normal Distribution" # # @param x Argumentenvektor der Dichte der Lnge n oder Matrix (T x n) mit T Beobachtungen # @param mean Mittelwertvektor der Lnge n # @param sigma Kovarianzmatrix (n x n) # @param lower unterer Trunkierungsvektor (n x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (n x 1) mit lower <= x <= upper # @param margin if NULL then joint density, if MARGIN=1 then first marginal density, if MARGIN=c(1,2) # then bivariate marginal density for x_1 and x_2 dtmvnorm <- function(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep( -Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), log = FALSE, margin=NULL) { # check of standard tmvnorm arguments cargs <- checkTmvArgs(mean=mean, sigma=sigma, lower=lower, upper=upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # Check of optional argument "margin" if (!is.null(margin)) { # Aufpassen! dtmvnorm() nimmt als Argumente auch eine (T x n)-Matrix, # dtmvnorm.marginal() nimmt nur einen Vektor # dtmvnorm.marginal2() nimmt 2 Vektoren der gleichen Lnge # Aufpassen mit Checks auf die Lnge von x # Aufpassen mit dem log=TRUE Argument! if (!length(margin) %in% c(1, 2)) stop("Length of margin must be either 1 (one-dimensional marginal density) or 2 (bivariate marginal density).") if (any(margin <= 0) || any(margin > length(mean))) { stop("All elements in margin must be in 1..length(mean).") } # one-dimensional marginal density f_{n}(x_n) if (length(margin) == 1) { return(dtmvnorm.marginal(xn=x, n=margin, mean = mean, sigma = sigma, lower = lower, upper = upper, log = log)) } # for bivariate marginal density f_{q,r}(x_q, x_r) we need q <> r and "x" as (n x 2) matrix if (length(margin) == 2) { if(margin[1] == margin[2]) stop("Two different margins needed for bivariate marginal density.") if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } if(!is.matrix(x) || ncol(x) != 2) stop("For bivariate marginal density x must be either a (n x 2) matrix or a vector of length 2.") # bivariate marginal density f_{q,r}(x_q, x_r) return(dtmvnorm.marginal2(xq=x[,1], xr=x[,2], q=margin[1], r=margin[2], mean = mean, sigma = sigma, lower = lower, upper = upper, log = log)) } } # Check of additional inputs like x if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } # Anzahl der Beobachtungen T <- nrow(x) # check for each row if in support region insidesupportregion <- logical(T) for (i in 1:T) { insidesupportregion[i] = all(x[i,] >= lower & x[i,] <= upper & !any(is.infinite(x))) } if(log) { # density value for points inside the support region dvin <- dmvnorm(x, mean=mean, sigma=sigma, log=TRUE) - log(pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma)) # density value for points outside the support region dvout <- -Inf } else { dvin <- dmvnorm(x, mean=mean, sigma=sigma, log=FALSE) / pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma) dvout <- 0 } f <- ifelse(insidesupportregion, dvin, dvout) return(f) } #dtmvnorm(x=c(0,0)) #dtmvnorm(x=c(0,0), sigma=diag(2)) #dtmvnorm(x=c(0,0), mean=c(0,0), sigma=diag(2)) #dmvnorm(x=c(0,0), mean=c(0,0), sigma=diag(2)) #dtmvnorm(x=matrix(c(0,0,1,1),2,2, byrow=TRUE), mean=c(0,0), sigma=diag(2)) #dtmvnorm(x=matrix(c(0,0,1,1),2,2, byrow=TRUE), mean=c(0,0), sigma=diag(2), lower=c(-1,-1), upper=c(0.5, 0.5)) #dtmvnorm(x=matrix(c(0,0,1,1),2,2, byrow=TRUE), mean=c(0,0), sigma=diag(2), lower=c(-1,-1), upper=c(0.5, 0.5), log=TRUE) #dtmvnorm(as.matrix(seq(-1,2, by=0.1), ncol=1), mean=c(0.5), sigma=as.matrix(1.2^2), lower=0) tmvtnorm/R/rtmvnorm2.R0000644000176200001440000002577712567600057014464 0ustar liggesusers# Checks for lower <= Dx <= upper, where # mean (d x 1), sigma (d x d), D (r x d), x (d x 1), lower (r x 1), upper (r x 1) # Uses partly checks as in mvtnorm:::checkmvArgs! # checkTmvArgs2 <- function(mean, sigma, lower, upper, D) { if (is.null(lower) || any(is.na(lower))) stop(sQuote("lower"), " not specified or contains NA") if (is.null(upper) || any(is.na(upper))) stop(sQuote("upper"), " not specified or contains NA") if (!is.numeric(mean) || !is.vector(mean)) stop(sQuote("mean"), " is not a numeric vector") if (is.null(sigma) || any(is.na(sigma))) stop(sQuote("sigma"), " not specified or contains NA") if (is.null(D) || any(is.na(D))) stop(sQuote("D"), " not specified or contains NA") if (!is.matrix(sigma)) { sigma <- as.matrix(sigma) } if (!is.matrix(D)) { D <- as.matrix(D) } if (NCOL(lower) != NCOL(upper)) { stop("lower and upper have non-conforming size") } checkSymmetricPositiveDefinite(sigma) d <- length(mean) r <- length(lower) if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } if (length(lower) != NROW(D) || length(upper) != NROW(D)) { stop("D (r x d), lower (r x 1) and upper (r x 1) have non-conforming size") } if (length(mean) != NCOL(D)) { stop("D (r x d) and mean (d x 1) have non-conforming size") } if (any(lower>=upper)) { stop("lower must be smaller than or equal to upper (lower<=upper)") } # checked arguments cargs <- list(mean=mean, sigma=sigma, lower=lower, upper=upper, D=D) return(cargs) } # Gibbs sampling with general linear constraints a <= Dx <= b # with x (d x 1), D (r x d), a,b (r x 1) requested by Xiaojin Xu [xiaojinxu.fdu@gmail.com] # which allows for (r > d) constraints! # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (d x 1) der Normalverteilung # @param sigma Kovarianzmatrix (d x d) der Normalverteilung # @param lower unterer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param upper oberer Trunkierungsvektor (d x 1) mit lower <= Dx <= upper # @param D Matrix for linear constraints, defaults to (d x d) diagonal matrix # @param H Precision matrix (d x d) if given # @param algorithm c("rejection", "gibbs", "gibbsR") rtmvnorm2 <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), D = diag(length(mean)), algorithm=c("gibbs", "gibbsR", "rejection"), ...) { algorithm <- match.arg(algorithm) # check of standard tmvtnorm arguments # Have to change check procedure to handle r > d case cargs <- checkTmvArgs2(mean, sigma, lower, upper, D) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper D <- cargs$D # check of additional arguments if (n < 1 || !is.numeric(n) || n != as.integer(n) || length(n) > 1) { stop("n must be a integer scalar > 0") } if (!identical(D,diag(length(mean)))) { # D <> I : general linear constraints if (algorithm == "gibbs") { # precision matrix case H vs. covariance matrix case sigma will be handled inside method retval <- rtmvnorm.gibbs2.Fortran(n, mean=mean, sigma=sigma, D=D, lower=lower, upper=upper, ...) } else if (algorithm == "gibbsR") { # covariance matrix case sigma retval <- rtmvnorm.gibbs2(n, mean=mean, sigma=sigma, D=D, lower=lower, upper=upper, ...) } else if (algorithm == "rejection") { retval <- rtmvnorm.rejection(n, mean=mean, sigma=sigma, D=D, lower=lower, upper=upper, ...) } return(retval) } else { # for D = I (d x d) forward to normal rtmvnorm() method retval <- rtmvnorm(n, mean=mean, sigma=sigma, lower=lower, upper=upper, D=D, ...) return(retval) } return(retval) } # Gibbs sampler implementation in R for general linear constraints # lower <= Dx <= upper where D (r x d), x (d x 1), lower, upper (r x 1) # which can handle the case r > d. # # @param n # @param mean # @param sigma # @param D # @param lower # @param upper # @param burn.in.samples # @param start.value # @param thinning rtmvnorm.gibbs2 <- function (n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), D = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { if (thinning < 1 || !is.numeric(thinning) || length(thinning) > 1) { stop("thinning must be a integer scalar > 0") } d <- length(mean) S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(D %*% start.value < lower || D %*% start.value > upper)) stop("start value does not suffice linear constraints lower <= Dx <= upper") x0 <- start.value } else { x0 <- ifelse(is.finite(lower), lower, ifelse(is.finite(upper), upper, 0)) } if (d == 1) { X <- rtnorm.gibbs(n, mu = mean[1], sigma = sigma[1, 1], a = lower[1], b = upper[1]) return(X) } # number of linear constraints lower/a <= Dx <= upper/b, D (r x n), a,b (r x 1), x (n x 1) r <- nrow(D) X <- matrix(NA, n, d) U <- runif((S + n * thinning) * d) l <- 1 sd <- list(d) P <- list(d) # [ Sigma_11 Sigma_12 ] = [ sigma_{i,i} sigma_{i,-i} ] # [ Sigma_21 Sigma_22 ] [ sigma_{-i,i} sigma_{-i,-i} ] for (i in 1:d) { Sigma_11 <- sigma[i, i] # (1 x 1) Sigma_12 <- sigma[i, -i] # (1 x (d - 1)) Sigma_22 <- sigma[-i, -i] # ((d - 1) x (d - 1)) P[[i]] <- t(Sigma_12) %*% solve(Sigma_22) sd[[i]] <- sqrt(Sigma_11 - P[[i]] %*% Sigma_12) } x <- x0 # for all draws for (j in (1 - S):(n * thinning)) { # for all x[i] for (i in 1:d) { lower_i <- -Inf upper_i <- +Inf # for all linear constraints k relevant for variable x[i]. # If D[k,i]=0 then constraint is irrelevant for x[i] for (k in 1:r) { if (D[k,i] == 0) next bound1 <- lower[k]/D[k, i] - D[k,-i] %*% x[-i] /D[k, i] bound2 <- upper[k]/D[k, i] - D[k,-i] %*% x[-i] /D[k, i] if (D[k, i] > 0) { lower_i <- pmax(lower_i, bound1) upper_i <- pmin(upper_i, bound2) } else { lower_i <- pmax(lower_i, bound2) upper_i <- pmin(upper_i, bound1) } } mu_i <- mean[i] + P[[i]] %*% (x[-i] - mean[-i]) F.tmp <- pnorm(c(lower_i, upper_i), mu_i, sd[[i]]) Fa <- F.tmp[1] Fb <- F.tmp[2] x[i] <- mu_i + sd[[i]] * qnorm(U[l] * (Fb - Fa) + Fa) l <- l + 1 } if (j > 0) { if (thinning == 1) { X[j, ] <- x } else if (j%%thinning == 0) { X[j%/%thinning, ] <- x } } } return(X) } rtmvnorm.gibbs2.Fortran <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), D = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # No checks of input arguments, checks are done in rtmvnorm() # dimension of X d <- length(mean) # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Take start value given by user or determine from lower and upper if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (NCOL(D) != length(start.value) || NROW(D) != length(lower) || NROW(D) != length(upper)) stop("D, start.value, lower, upper have non-conforming size") if (any(D %*% start.value < lower || D %*% start.value > upper)) stop("start value must lie in simplex defined by lower <= Dx <= upper") x0 <- start.value } else { stop("Must give start.value with lower <= D start.value <= upper") } # Sample from univariate truncated normal distribution which is very fast. if (d == 1) { X <- rtnorm.gibbs(n, mu=mean[1], sigma=sigma[1,1], a=lower[1], b=upper[1]) return(X) } # Ergebnismatrix (n x d) X <- matrix(0, n, d) # number of linear constraints lower/a <= Dx <= upper/b, D (r x n), a,b (r x 1), x (n x 1) r <- nrow(D) # Call to Fortran subroutine # TODO: Aufpassen, ob Matrix D zeilen- oder spaltenweise an Fortran bergeben wird! # Bei sigma ist das wegen Symmetrie egal. ret <- .Fortran("rtmvnormgibbscov2", n = as.integer(n), d = as.integer(d), r = as.integer(r), mean = as.double(mean), sigma = as.double(sigma), C = as.double(D), a = as.double(lower), b = as.double(upper), x0 = as.double(x0), burnin = as.integer(burn.in.samples), thinning = as.integer(thinning), X = as.double(X), NAOK=TRUE, PACKAGE="tmvtnorm") X <- matrix(ret$X, ncol=d, byrow=TRUE) return(X) } if (FALSE) { # dimension d=2 # number of linear constraints r=3 > d # linear restrictions a <= Dx <= b with x (d x 1); D (r x d); a,b (r x 1) D <- matrix( c( 1, 1, 1, -1, 0.5, -1), 3, 2, byrow=TRUE) a <- c(0, 0, 0) b <- c(1, 1, 1) # mark linear constraints as lines plot(NA, xlim=c(-0.5, 1.5), ylim=c(-1,1)) for (i in 1:3) { abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") } # Gibbs sampling: # determine lower and upper bounds for each index i given the remaining variables: x[i] | x[-i] ### Gibbs sampling for general linear constraints a <= Dx <= b x0 <- c(0.5, 0.2) sigma <- matrix(c(1, 0.2, 0.2, 1), 2, 2) X <- rtmvnorm.gibbs2(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b, start.value=x0) points(X, pch=20, col="black") X2 <- rtmvnorm.gibbs2(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b, start.value=x0) points(X2, pch=20, col="green") # Rejection sampling (rtmvnorm.rejection) funktioniert bereits mit beliebigen Restriktionen (r > d) X3 <- rtmvnorm.rejection(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b) points(X3, pch=20, col="red") rtmvnorm.gibbs2(n=1000, mean=c(0, 0), sigma, D, lower=a, upper=b, start.value=c(-1, -1)) colMeans(X) colMeans(X2) } tmvtnorm/R/bivariate-marginal-density.R0000644000176200001440000001600712567600057017713 0ustar liggesusers# SW: This method is private. It is the same as mvtnorm::dmvnorm() function, # but without sanity checks for sigma. We perform the sanity checks before. .dmvnorm <- function (x, mean, sigma, log = FALSE) { if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } distval <- mahalanobis(x, center = mean, cov = sigma) logdet <- sum(log(eigen(sigma, symmetric = TRUE, only.values = TRUE)$values)) logretval <- -(ncol(x) * log(2 * pi) + logdet + distval)/2 if (log) return(logretval) exp(logretval) } # Computation of the bivariate marginal density F_{q,r}(x_q, x_r) (q != r) # of truncated multivariate normal distribution # following the works of Tallis (1961), Leppard and Tallis (1989) # # References: # Tallis (1961): # "The Moment Generating Function of the Truncated Multi-normal Distribution" # Leppard and Tallis (1989): # "Evaluation of the Mean and Covariance of the Truncated Multinormal" # Manjunath B G and Stefan Wilhelm (2009): # "Moments Calculation for the Doubly Truncated Multivariate Normal Distribution" # # (n-2) Integral, d.h. zweidimensionale Randdichte in Dimension q und r, # da (n-2) Dimensionen rausintegriert werden. # vgl. Tallis (1961), S.224 und Code Leppard (1989), S.550 # # f(xq=b[q], xr=b[r]) # # Attention: Function is not vectorized at the moment! # Idee: Vektorisieren xq, xr --> die Integration Bounds sind immer verschieden, # pmvnorm() kann nicht vektorisiert werden. Sonst spart man schon ein bisschen Overhead. # Der eigentliche bottleneck ist aber pmvnorm(). # Gibt es Unterschiede bzgl. der verschiedenen Algorithmen GenzBretz() vs. Miwa()? # pmvnorm(corr=) kann ich verwenden # # @param xq # @param xr # @param q index for dimension q # @param r Index fr Dimension r # @param mean # @param sigma # @param lower # @param upper # @param log=FALSE dtmvnorm.marginal2 <- function(xq, xr, q, r, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE, pmvnorm.algorithm=GenzBretz()) { # dimensionality n <- nrow(sigma) # number of xq values delivered N <- length(xq) # input checks if (n < 2) stop("Dimension n must be >= 2!") # TODO: Check eventuell rauslassen # SW; isSymmetric is sehr teuer #if (!isSymmetric(sigma, tol = sqrt(.Machine$double.eps))) { #if (!isTRUE(all.equal(sigma, t(sigma))) || any(diag(sigma) < 0)) { # stop("sigma must be a symmetric matrix") #} if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } if (!(q %in% 1:n && r %in% 1:n)) { stop("Indexes q and r must be integers in 1:n") } if (q == r) { stop("Index q must be different than r!") } # Skalierungsfaktor der gestutzten Dichte (Anteil nach Trunkierung) # Idee: dtmvnorm.marginal2() braucht 80% der Zeit von mtmvnorm(). Die meiste Zeit davon in pmvnorm(). # pmvnorm()-Aufrufe sind teuer, daher knnte man das alpha schon vorher berechnen # lassen (nur 2 pmvnorm()-Aufrufe in der Methode, wrde 50% sparen) # Da Methode jetzt vektorisiert ist, sparen wir die Aufrufe wg. alpha alpha <- pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma, algorithm=pmvnorm.algorithm) if (n == 2) { density <- numeric(N) indOut <- xq < lower[q] | xq > upper[q] | xr < lower[r] | xr > upper[r] | is.infinite(xq) | is.infinite(xr) density[indOut] <- 0 # dmvnorm() macht auch viele Checks; Definiere eine private Methode .dmvnorm() ohne Checks density[!indOut] <- .dmvnorm(x=cbind(xq, xr)[!indOut,], mean=mean[c(q,r)], sigma=sigma[c(q,r),c(q,r)]) / alpha if (log == TRUE) { return(log(density)) } else { return(density) } } # standard deviation for normalisation SD <- sqrt(diag(sigma)) # normalised bounds lower.normalised <- (lower - mean) / SD upper.normalised <- (upper - mean) / SD xq.normalised <- (xq - mean[q]) / SD[q] # (N x 1) xr.normalised <- (xr - mean[r]) / SD[r] # (N x 1) # Computing correlation matrix R from sigma (matrix (n x n)): # R = D % sigma %*% D with diagonal matrix D as sqrt(sigma) # same as cov2cor() D <- matrix(0, n, n) diag(D) <- sqrt(diag(sigma))^(-1) R <- D %*% sigma %*% D # # Determine (n-2) x (n-2) correlation matrix RQR # RQR <- matrix(NA, n-2, n-2) RINV <- solve(R) WW <- matrix(NA, n-2, n-2) M1 <- 0 for (i in 1:n) { if (i != q && i != r) { M1 <- M1 + 1 M2 <- 0 for (j in 1:n) { if (j != q && j != r) { M2 <- M2 + 1 WW[M1, M2] <- RINV[i,j] } } } } WW <- solve(WW[1:(n-2),1:(n-2)]) for(i in 1:(n-2)) { for(j in 1:(n-2)) { RQR[i, j] <- WW[i, j] / sqrt(WW[i,i] * WW[j,j]) } } # # Determine bounds of integration vector AQR and BQR (n - 2) x 1 # # lower and upper integration bounds AQR <- matrix(NA, N, n-2) BQR <- matrix(NA, N, n-2) M2 <- 0 # counter = 1..(n-2) for (i in 1:n) { if (i != q && i != r) { M2 <- M2 + 1 BSQR <- (R[q, i] - R[q, r] * R[r, i]) / (1 - R[q, r]^2) BSRQ <- (R[r, i] - R[q, r] * R[q, i]) / (1 - R[q, r]^2) RSRQ <- (1 - R[i, q]^2) * (1 - R[q, r]^2) RSRQ <- (R[i, r] - R[i, q] * R[q, r]) / sqrt(RSRQ) # partial correlation coefficient R[r,i] given q # lower integration bound AQR[,M2] <- (lower.normalised[i] - BSQR * xq.normalised - BSRQ * xr.normalised) / sqrt((1 - R[i, q]^2) * (1 - RSRQ^2)) AQR[,M2] <- ifelse(is.nan(AQR[,M2]), -Inf, AQR[,M2]) # upper integration bound BQR[,M2] <- (upper.normalised[i] - BSQR * xq.normalised - BSRQ * xr.normalised) / sqrt((1 - R[i, q]^2) * (1 - RSRQ^2)) BQR[,M2] <- ifelse(is.nan(BQR[,M2]), Inf, BQR[,M2]) } } # Correlation matrix for r and q R2 <- matrix(c( 1, R[q,r], R[q,r], 1), 2, 2) sigma2 <- sigma[c(q,r),c(q,r)] density <- ifelse ( xq < lower[q] | xq > upper[q] | xr < lower[r] | xr > upper[r] | is.infinite(xq) | is.infinite(xr), 0, { # SW: RQR is a correlation matrix, so call pmvnorm(...,corr=) which is faster than # pmvnorm(...,corr=) # SW: Possibly vectorize this loop if pmvnorm allows vectorized lower and upper bounds prob <- numeric(N) # (N x 1) for (i in 1:N) { if ((n - 2) == 1) { # univariate case: pmvnorm(...,corr=) does not work, will work with sigma= prob[i] <- pmvnorm(lower=AQR[i,], upper=BQR[i,], sigma=RQR, algorithm=pmvnorm.algorithm) } else { prob[i] <- pmvnorm(lower=AQR[i,], upper=BQR[i,], corr=RQR, algorithm=pmvnorm.algorithm) } } dmvnorm(x=cbind(xq, xr), mean=mean[c(q,r)], sigma=sigma2) * prob / alpha } ) if (log == TRUE) { return(log(density)) } else { return(density) } } tmvtnorm/R/ptmvt-marginal.R0000644000176200001440000000255512567600057015445 0ustar liggesusers# Verteilungsfunktion fr die eindimensionale Randdichte f(x_n) # einer Truncated Multivariate Student t Distribution, # by integrating out (n-1) dimensions. # # @param xn Vektor der Lnge l von Punkten, an dem die Verteilungsfunktion ausgewertet wird # @param i Index (1..n) dessen Randdichte berechnet werden soll # @param mean (nx1) Mittelwertvektor # @param sigma (nxn)-Kovarianzmatrix # @param df degrees of freedom parameter # @param lower,upper Trunkierungsvektor lower <= x <= upper ptmvt.marginal <- function(xn, n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), df = 1, lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean))) { # check of standard tmvnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (n < 1 || n > length(mean) || !is.numeric(n) || length(n) > 1 || !n %in% 1:length(mean)) { stop("n must be a integer scalar in 1..length(mean)") } # Anzahl der Dimensionen k = length(mean) Fx = numeric(length(xn)) upper2 = upper alpha = pmvt(lower = lower, upper = upper, delta = mean, sigma = sigma, df = df) for (i in 1:length(xn)) { upper2[n] = xn[i] Fx[i] = pmvt(lower=lower, upper=upper2, delta=mean, sigma=sigma, df = df) } return (Fx/alpha) }tmvtnorm/R/qtmvnorm-marginal.R0000644000176200001440000000267212567600057016156 0ustar liggesusers# Berechnet die Quantile der eindimensionalen Randverteilung ber uniroot() # # @param p probability # @param interval a vector containing the end-points of the interval to be searched by uniroot. # @param tail specifies which quantiles should be computed. lower.tail gives the quantile x for which P[X <= x] = p, upper.tail gives x with P[X > x] = p and both.tails leads to x with P[-x <= X <= x] = p. # @param n # @param mean # @param sigma # @param lower # @param upper # @param ... additional parameters to uniroot() qtmvnorm.marginal <- function (p, interval = c(-10, 10), tail = c("lower.tail", "upper.tail", "both.tails"), n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), ...) { if (length(p) != 1 || (p <= 0 || p >= 1)) stop(sQuote("p"), " is not a double between zero and one") if (n > length(mean) || n < 1) stop(sQuote("n"), " is not a integer between 1 and ",length(mean)) pfct <- function(q) { switch(tail, both.tails = { low <- lower low[n] <- -abs(q) upp <- upper upp[n] <- abs(q) }, upper.tail = { low <- lower upp <- upper low[n] <- q }, lower.tail = { low <- lower upp <- upper upp[n] <- q }, ) ptmvnorm(low, upp, mean, sigma, lower, upper) - p } qroot <- uniroot(pfct, interval = interval, ...) qroot } tmvtnorm/R/tmvnorm-estimation-GMM.R0000644000176200001440000002442112567600057016771 0ustar liggesusers# Estimation of the parameters # of the truncated multivariate normal distribution using GMM # and # (1) the moment equations from Lee (1981) and Lee (1983) # (2) Our moment formula and equating mean and covariance matrix #library(gmm) #library(tmvtnorm) #source("rtmvnorm.R") # fr checkTmvArgs() #source("tmvnorm-estimation.R") # fr vec(), vech() und inv_vech() "%w/o%" <- function(x,y) x[!x %in% y] #-- x without y ################################################################################ # # Multivariater Fall # ################################################################################ # Definition einer Funktion mit Momentenbedingungen fr gmm() # nach den Lee (1979, 1983, 1981) moment conditions # # N dimensions, K = N + N*(N+1)/2 parameters # number of moment conditions L=(l_max + 1) * N # parameter vector tet = c(mu, vech(sigma)), length K # @param tet named parameter vector theta = c(mu, vech(sigma)) # @param x data matrix (T x N) gmultiLee <- function(tet, fixed=c(), fullcoefnames, x, lower, upper, l_max = ceiling((ncol(x)+1)/2), cholesky=FALSE) { fullcoef <- rep(NA, length(tet) + length(fixed)) names(fullcoef) <- fullcoefnames if (any(!names(fixed) %in% names(fullcoef))) stop("some named arguments in 'fixed' are not arguments in parameter vector theta") fullcoef[names(tet)] <- tet fullcoef[names(fixed)] <- fixed K <- length(tet) # Anzahl der zu schtzenden Parameter N <- ncol(x) # Anzahl der Dimensionen T <- nrow(x) # Anzahl der Beobachtungen #l_max <- ceiling((N+1)/2) # maximales l fr Momentenbedingungen X <- matrix(NA, T, (l_max+1)*N) # Rckgabematrix mit den Momenten # Parameter mean/sigma aus dem Parametervektor tet extrahieren mean <- fullcoef[1:N] # Matrix fr sigma bauen if (cholesky) { L <- inv_vech(fullcoef[-(1:N)]) L[lower.tri(L, diag=FALSE)] <- 0 # L entspricht jetzt chol(sigma), obere Dreiecksmatrix sigma <- t(L) %*% L } else { sigma <- inv_vech(fullcoef[-(1:N)]) } #cat("Call to gmultiLee with tet=",tet," sigma=",sigma," det(sigma)=",det(sigma),"\n") #flush.console() # if sigma is not positive definite we return some maximum value if (det(sigma) <= 0 || any(diag(sigma) < 0)) { X <- matrix(+Inf, T, N + N * (N+1) / 2) return(X) } sigma_inv <- solve(sigma) # inverse Kovarianzmatrix F_a = numeric(N) F_b = numeric(N) F <- 1 for (i in 1:N) { # one-dimensional marginal density in dimension i F_a[i] <- dtmvnorm.marginal(lower[i], n=i, mean=mean, sigma=sigma, lower=lower, upper=upper) F_b[i] <- dtmvnorm.marginal(upper[i], n=i, mean=mean, sigma=sigma, lower=lower, upper=upper) } k <- 1 for(l in 0:l_max) { for (i in 1:N) { sigma_i <- sigma_inv[i,] # i-te Zeile der inversen Kovarianzmatrix (1 x N) = entpricht sigma^{i'} a_il <- ifelse(is.infinite(lower[i]), 0, lower[i]^l) b_il <- ifelse(is.infinite(upper[i]), 0, upper[i]^l) # Lee (1983) moment equation for l #X[,k] <- sigma_i %*% mean * x[,i]^l - (x[,i]^l * x) %*% sigma_inv[,i] + l * (x[,i]^(l-1)) + (a_il * F_a[i] - b_il * F_b[i]) / F X[,k] <- sigma_i %*% mean * x[,i]^l - sweep(x, 1, x[,i]^l, FUN="*") %*% sigma_inv[,i] + l * (x[,i]^(l-1)) + (a_il * F_a[i] - b_il * F_b[i]) / F #T x 1 (1 x N) (N x 1) (T x 1) (T x N) (N x 1) (T x 1) (skalar) k <- k + 1 # Zhlvariable } } return(X) } # Definition einer Funktion mit Momentenbedingungen # mit Mean and Covariance-Matrix bauen anstatt mit Lee Bedingungen # # @param tet named parameter vector theta, should be part of c(vec(mu), vech(sigma)) # @param fixed a named list of fixed parameters # @param fullcoefnames # @param x data matrix (T x N) # @param lower # @param upper # @param cholesky flag whether we use Cholesky decompostion Sigma = LL' # of the covariance matrix in order to ensure positive-definiteness of sigma gmultiManjunathWilhelm <- function(tet, fixed=c(), fullcoefnames, x, lower, upper, cholesky=FALSE) { fullcoef <- rep(NA, length(tet) + length(fixed)) names(fullcoef) <- fullcoefnames if (any(!names(fixed) %in% names(fullcoef))) stop("some named arguments in 'fixed' are not arguments in parameter vector theta") fullcoef[names(tet)] <- tet fullcoef[names(fixed)] <- fixed N <- ncol(x) # Anzahl der Dimensionen T <- nrow(x) # Anzahl der Beobachtungen X <- matrix(NA, T, N + N * (N+1) / 2) # Rckgabematrix mit den Momenten # Parameter mean/sigma aus dem Parametervektor tet extrahieren mean <- fullcoef[1:N] # Matrix fr sigma bauen if (cholesky) { L <- inv_vech(fullcoef[-(1:N)]) L[lower.tri(L, diag=FALSE)] <- 0 # L entspricht jetzt chol(sigma), obere Dreiecksmatrix sigma <- t(L) %*% L } else { sigma <- inv_vech(fullcoef[-(1:N)]) } #cat("Call to gmultiManjunathWilhelm with tet=",tet," fullcoef=", fullcoef, " sigma=",sigma," det(sigma)=",det(sigma),"\n") #flush.console() # if sigma is not positive definite we return some maximum value if (det(sigma) <= 0 || any(diag(sigma) < 0)) { X <- matrix(+Inf, T, N + N * (N+1) / 2) return(X) } # Determine moments (mu, sigma) for parameters mean/sigma # experimental: moments <- mtmvnorm(mean=mean, sigma=sigma, lower=lower, upper=upper, doCheckInputs=FALSE) moments <- mtmvnorm(mean=mean, sigma=sigma, lower=lower, upper=upper) # Momentenbedingungen fr die Elemente von mean : mean(x) for(i in 1:N) { X[,i] <- (moments$tmean[i] - x[,i]) } # Momentenbedingungen fr alle Lower-Diagonal-Elemente von sigma k <- 1 for (i in 1:N) { for (j in 1:N) { # (1,1), (2, 1), (2,2) if (j > i) next #cat(sprintf("sigma[%d,%d]",i, j),"\n") X[,(N+k)] <- (moments$tmean[i] - x[,i]) * (moments$tmean[j] - x[,j]) - moments$tvar[i, j] k <- k + 1 } } return(X) } # GMM estimation method # # @param X data matrix (T x n) # @param lower, upper truncation points # @param start list of start values for mu and sigma # @param fixed a list of fixed parameters # @param method either "ManjunathWilhelm" or "Lee" moment conditions # @param cholesky flag, if TRUE, we use the Cholesky decomposition of sigma as parametrization # @param ... additional parameters passed to gmm() gmm.tmvnorm <- function(X, lower=rep(-Inf, length = ncol(X)), upper=rep(+Inf, length = ncol(X)), start=list(mu=rep(0,ncol(X)), sigma=diag(ncol(X))), fixed=list(), method=c("ManjunathWilhelm","Lee"), cholesky=FALSE, ... ) { method <- match.arg(method) # check of standard tmvtnorm arguments cargs <- checkTmvArgs(start$mu, start$sigma, lower, upper) start$mu <- cargs$mean start$sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check if we have at least one sample if (!is.matrix(X) || nrow(X) == 0) { stop("Data matrix X with at least one row required.") } # verify dimensions of x and lower/upper match n <- length(lower) if (NCOL(X) != n) { stop("data matrix X has a non-conforming size. Must have ",length(lower)," columns.") } # check if lower <= X <= upper for all rows ind <- logical(nrow(X)) for (i in 1:nrow(X)) { ind[i] = all(X[i,] >= lower & X[i,] <= upper) } if (!all(ind)) { stop("some of the data points are not in the region lower <= X <= upper") } # parameter vector theta theta <- c(start$mu, vech2(start$sigma)) # names for mean vector elements : mu_i nmmu <- paste("mu_",1:n,sep="") # names for sigma elements : sigma_i.j nmsigma <- paste("sigma_",vech2(outer(1:n,1:n, paste, sep=".")),sep="") names(theta) <- c(nmmu, nmsigma) fullcoefnames <- names(theta) # use only those parameters without the fixed parameters for gmm(), # since I do not know how to specify fixed=c() in gmm() theta2 <- theta[names(theta) %w/o% names(fixed)] # define a wrapper function with only 2 arguments theta and x (f(theta, x)) # that will be invoked by gmm() gManjunathWilhelm <- function(tet, x) { gmultiManjunathWilhelm(tet=tet, fixed=unlist(fixed), fullcoefnames=fullcoefnames, x=x, lower=lower, upper=upper, cholesky=cholesky) } # TODO: Allow for l_max parameter for Lee moment conditions gLee <- function(tet, x) { gmultiLee(tet = tet, fixed = unlist(fixed), fullcoefnames = fullcoefnames, x = x, lower = lower, upper = upper, cholesky = cholesky) } if (method == "ManjunathWilhelm") { gmm.fit <- gmm(gManjunathWilhelm, x=X, t0=theta2, ...) } else { gmm.fit <- gmm(gLee, x=X, t0=theta2, ...) } return(gmm.fit) } # deprecated # GMM mit Lee conditions gmm.tmvnorm2 <- function (X, lower = rep(-Inf, length = ncol(X)), upper = rep(+Inf, length = ncol(X)), start = list(mu = rep(0, ncol(X)), sigma = diag(ncol(X))), fixed = list(), cholesky = FALSE, ...) { cargs <- checkTmvArgs(start$mu, start$sigma, lower, upper) start$mu <- cargs$mean start$sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (!is.matrix(X) || nrow(X) == 0) { stop("Data matrix X with at least one row required.") } n <- length(lower) if (NCOL(X) != n) { stop("data matrix X has a non-conforming size. Must have ", length(lower), " columns.") } ind <- logical(nrow(X)) for (i in 1:nrow(X)) { ind[i] = all(X[i, ] >= lower & X[i, ] <= upper) } if (!all(ind)) { stop("some of the data points are not in the region lower <= X <= upper") } theta <- c(start$mu, vech2(start$sigma)) nmmu <- paste("mu_", 1:n, sep = "") nmsigma <- paste("sigma_", vech2(outer(1:n, 1:n, paste, sep = ".")), sep = "") names(theta) <- c(nmmu, nmsigma) fullcoefnames <- names(theta) theta2 <- theta[names(theta) %w/o% names(fixed)] gmultiwrapper <- function(tet, x) { gmultiLee(tet = tet, fixed = unlist(fixed), fullcoefnames = fullcoefnames, x = x, lower = lower, upper = upper, cholesky = cholesky) } gmm.fit <- gmm(gmultiwrapper, x = X, t0 = theta2, ...) return(gmm.fit) } tmvtnorm/R/rtmvt.R0000644000176200001440000002101512567600057013647 0ustar liggesusers# Sampling from Truncated multivariate t distribution using # # a) Rejection sampling # b) Gibbs sampling # # Author: Stefan Wilhelm, Manjunath B G # # Literatur: # (1) Rejection Sampling : None # (2) Gibbs Sampling : # Geweke (1991) "Efficient simulation from the multivariate normal and Student-t distributions # subject to linear constraints and the evaluation of constraint probabilities" ############################################################################### rtmvt <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), algorithm=c("rejection", "gibbs"), ...) { algorithm <- match.arg(algorithm) # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check of additional arguments : n and df if (n < 1 || !is.numeric(n) || n != as.integer(n) || length(n) > 1) { stop("n must be a integer scalar > 0") } if (df < 1 || !is.numeric(df) || length(df) > 1) { stop("df must be a numeric scalar > 0") } if (algorithm == "rejection") { if (df != as.integer(df)) stop("Rejection sampling currenly works only for integer degrees of freedom. Consider using algorithm='gibbs'.") retval <- rtmvt.rejection(n, mean, sigma, df, lower, upper) } else if (algorithm == "gibbs") { retval <- rtmvt.gibbs(n, mean, sigma, df, lower, upper, ...) } return(retval) } # Erzeugt eine Matrix X (n x k) mit Zufallsrealisationen aus einer Trunkierten Multivariaten t Verteilung # mit k Dimensionen # ber Rejection Sampling aus einer Multivariaten t-Verteilung # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der Normalverteilung # @param sigma Kovarianzmatrix (k x k) der Normalverteilung # @param df degrees of freedom parameter # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper rtmvt.rejection <- function(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean))) { # No check of input parameters, checks are done in rtmvnorm()! # k = Dimension k <- length(mean) # mean as (1 x k) matrix mmean <- matrix(mean, 1, k) # Ergebnismatrix (n x k) Y <- matrix(NA, n, k) # Anzahl der noch zu ziehenden Samples numSamples <- n # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- 0 # Akzeptanzrate alpha aus der Multivariaten t-Verteilung bestimmen alpha <- pmvt(lower=lower, upper=upper, delta=mean, sigma=sigma, df=df) if (alpha <= 0.01) warning("Acceptance rate is very low and rejection sampling becomes inefficient. Consider using Gibbs sampling.") # Ziehe wiederholt aus der Multivariaten Student-t und schaue, wieviel Samples nach Trunkierung brig bleiben while(numSamples > 0) { # Erzeuge N/alpha Samples aus einer multivariaten Normalverteilung: Wenn alpha zu niedrig ist, wird Rejection Sampling ineffizient und N/alpha zu gro. Dann nur N erzeugen nproposals <- ifelse (numSamples/alpha > 1000000, numSamples, ceiling(max(numSamples/alpha,10))) X <- rmvt(nproposals, sigma=sigma, df=df) # SW: rmvt() hat keinen Parameter delta # add mean : t(t(X) + mean) oder so: for (i in 1:k) { X[,i] = mean[i] + X[,i] } # Bestimme den Anteil der Samples nach Trunkierung # Bug: ind= rowSums(lower <= X & X <= upper) == k # wesentlich schneller als : ind=apply(X, 1, function(x) all(x >= lower & x<=upper)) ind <- logical(nproposals) for (i in 1:nproposals) { ind[i] = all(X[i,] >= lower & X[i,] <= upper) } # Anzahl der akzeptierten Samples in diesem Durchlauf numAcceptedSamples <- length(ind[ind==TRUE]) # Wenn nix akzeptiert wurde, dann weitermachen if (length(numAcceptedSamples) == 0 || numAcceptedSamples == 0) next #cat("numSamplesAccepted=",numAcceptedSamples," numSamplesToDraw = ",numSamples,"\n") numNeededSamples <- min(numAcceptedSamples, numSamples) Y[(numAcceptedSamplesTotal+1):(numAcceptedSamplesTotal+numNeededSamples),] <- X[which(ind)[1:numNeededSamples],] # Anzahl der akzeptierten Samples insgesamt numAcceptedSamplesTotal <- numAcceptedSamplesTotal + numAcceptedSamples # Anzahl der verbliebenden Samples numSamples <- numSamples - numAcceptedSamples } Y } # Gibbs sampler for the truncated multivariate Student-t # see Geweke (1991) # # @param n Anzahl der Realisationen # @param mean Mittelwertvektor (k x 1) der t-Verteilung # @param sigma Kovarianzmatrix (k x k) der t-Verteilung # @param df degrees of freedom parameter # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param burn.in number of burn-in samples to be discarded # @param start start value for Gibbs sampling # @param thinning rtmvt.gibbs <- function (n=1, mean=rep(0, ncol(sigma)), sigma = diag(length(mean)), df=1, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), burn.in.samples = 0, start.value = NULL, thinning = 1) { # dimension of X k = length(mean) # Mean Vector mu = mean # number of burn-in samples S <- burn.in.samples if (!is.null(S)) { if (S < 0) stop("number of burn-in samples must be non-negative") } # Ergebnismatrix X (n x k) # Random sample from truncated Student-t density X <- matrix(NA, n, k) # Realisation from truncated multivariate normal Z <- numeric(k) # Chi-Square variable w w <- numeric(1) # x is one realisation from truncated Student-t density conditioned on Z and w x <- numeric(k) # Take start value given by user or use random start value if (!is.null(start.value)) { if (length(mean) != length(start.value)) stop("mean and start value have non-conforming size") if (any(start.valueupper)) stop("start value is not inside support region") Z <- start.value - mu } else { # If no start value is specified, # the initial value/start value for Z drawn from TN(0,\Sigma) # with truncation point a = a-mu and b = b-mu Z <- rtmvnorm(1, mean=rep(0,k), sigma=sigma, lower=lower-mu, upper=upper-mu, algorithm="gibbs") } # Algorithm begins : # Draw from Uni(0,1) U <- runif((S + n*thinning) * k) indU <- 1 # Index for accessing U # List of conditional standard deviations can be pre-calculated sd <- list(k) # List of t(Sigma_i) %*% solve(Sigma) term P <- list(k) for(i in 1:k) { # Partitioning of Sigma Sigma <- sigma[-i,-i] # (k-1) x (k-1) sigma_ii <- sigma[i,i] # 1 x 1 Sigma_i <- sigma[i,-i] # (k-1) x 1 P[[i]] <- t(Sigma_i) %*% solve(Sigma) sd[[i]] <- sqrt(sigma_ii - P[[i]] %*% Sigma_i) } for(i in (1-S):(n*thinning)) { # Step 1: Simulation of w conditional on Z from Chi-square distribution by rejection sampling # so that (lower - mu) * w <= Z <= (upper - mu) * w acceptedW <- FALSE while (!acceptedW) { w <- (rchisq(1, df, ncp=0)/df)^(1/2) acceptedW <- all((lower - mu) * w <= Z & Z <= (upper - mu) * w) } # Transformed Chi-Square sample subject to condition on Z0 alpha <- (lower - mu) * w beta <- (upper - mu) * w # Step 2: Simulation from Truncated normal Gibbs sampling approach for(j in 1:k) { mu_j <- P[[j]] %*% (Z[-j]) Fa <- pnorm( (lower[j]-mu[j])*w, mu_j, sd[[j]]) Fb <- pnorm( (upper[j]-mu[j])*w, mu_j, sd[[j]]) Z[j] <- mu_j + sd[[j]] * qnorm(U[indU] * (Fb - Fa) + Fa) # changed on 22nd February 2010 by Manju indU <- indU + 1 } # Step 3: Student-t transformation x <- mu + ( Z / w ) if (i > 0) { if (thinning == 1) { # no thinning, take all samples except for burn-in-period X[i,] <- x } else if (i %% thinning == 0){ X[i %/% thinning,] <- x } } } return(X) } # Ziehe aus einer multi-t-Distribution ohne Truncation X <- rtmvt.rejection(n=10000, mean=rep(0, 3), df=2) # Teste mit Kolmogoroff-Smirnoff-Test auf Verteilung tmvtnorm/R/checkTmvArgs.R0000644000176200001440000000312412567600057015055 0ustar liggesuserscheckSymmetricPositiveDefinite <- function(x, name="sigma") { if (!isSymmetric(x, tol = sqrt(.Machine$double.eps))) { stop(sprintf("%s must be a symmetric matrix", name)) } if (NROW(x) != NCOL(x)) { stop(sprintf("%s must be a square matrix", name)) } if (any(diag(x) <= 0)) { stop(sprintf("%s all diagonal elements must be positive", name)) } if (det(x) <= 0) { stop(sprintf("%s must be positive definite", name)) } } # Uses partly checks as in mvtnorm:::checkmvArgs! checkTmvArgs <- function(mean, sigma, lower, upper) { if (is.null(lower) || any(is.na(lower))) stop(sQuote("lower"), " not specified or contains NA") if (is.null(upper) || any(is.na(upper))) stop(sQuote("upper"), " not specified or contains NA") if (!is.numeric(mean) || !is.vector(mean)) stop(sQuote("mean"), " is not a numeric vector") if (is.null(sigma) || any(is.na(sigma))) stop(sQuote("sigma"), " not specified or contains NA") if (!is.matrix(sigma)) { sigma <- as.matrix(sigma) } if (NCOL(lower) != NCOL(upper)) { stop("lower and upper have non-conforming size") } checkSymmetricPositiveDefinite(sigma) if (length(mean) != NROW(sigma)) { stop("mean and sigma have non-conforming size") } if (length(lower) != length(mean) || length(upper) != length(mean)) { stop("mean, lower and upper must have the same length") } if (any(lower>=upper)) { stop("lower must be smaller than or equal to upper (lower<=upper)") } # checked arguments cargs <- list(mean=mean, sigma=sigma, lower=lower, upper=upper) return(cargs) }tmvtnorm/R/tmvnorm-estimation.R0000644000176200001440000002344012567600057016353 0ustar liggesusers# estimation methods for the parameters of the truncated multivariate normal distribution # # Literatur: # # Amemiya (1974) : Instrumental Variables estimator # Lee (1979) # Lee (1983) # Griffiths (2002) : # "Gibbs Sampler for the parameters of the truncated multivariate normal distribution" # # Stefan Wilhelm, wilhelm@financial.com #library(tmvtnorm) library(stats4) # Hilfsfunktion : VECH() Operator vech=function (x) { # PURPOSE: creates a column vector by stacking columns of x # on and below the diagonal #---------------------------------------------------------- # USAGE: v = vech(x) # where: x = an input matrix #--------------------------------------------------------- # RETURNS: # v = output vector containing stacked columns of x #---------------------------------------------------------- # Written by Mike Cliff, UNC Finance mcliff@unc.edu # CREATED: 12/08/98 #if(!is.matrix(x)) #{ # #} rows = nrow(x) columns = ncol(x); v = c(); for (i in 1:columns) { v = c(v, x[i:rows,i]); } v } # Hilfsfunktion : Operator fr Namensgebung sigma_i.j (i <= j), d.h. wie vech(), nur Zeilenweise vech2 <- function (x) { # PURPOSE: creates a column vector by stacking columns of x # on and below the diagonal #---------------------------------------------------------- # USAGE: v = vech2(x) # where: x = an input matrix #--------------------------------------------------------- # RETURNS: # v = output vector containing stacked columns of x #---------------------------------------------------------- # Written by Mike Cliff, UNC Finance mcliff@unc.edu # CREATED: 12/08/98 rows = nrow(x) columns = ncol(x); v = c(); for (i in 1:rows) { v = c(v, x[i,i:columns]); } v } # Hilfsfunktion : Inverser VECH() Operator inv_vech=function(v) { #---------------------------------------------------------- # USAGE: x = inv_vech(v) # where: v = a vector #--------------------------------------------------------- # RETURNS: # x = a symmetric (m x m) matrix containing de-vectorized elements of v #---------------------------------------------------------- # Anzahl der Zeilen m = -0.5+sqrt(0.5^2+2*length(v)) x = matrix(0,nrow=m,ncol=m) if (length(v) != m*(m+1)/2) { # error stop("v must have m*(m+1)/2 elements") } for (i in 1:m) { #cat("r=",i:m," c=",i,"\n") x[ i:m, i] = v[((i-1)*(m-(i-2)*0.5)+1) : (i*(m-(i-1)*0.5))] x[ i, i:m] = v[((i-1)*(m-(i-2)*0.5)+1) : (i*(m-(i-1)*0.5))] } x } # 1. Maximum-Likelihood-Estimation of mu and sigma when truncation points are known # # TODO/Idee: Cholesky-Zerlegung der Kovarianzmatrix als Parametrisierung # # @param X data matrix (T x n) # @param lower, upper truncation points # @param start list of start values for mu and sigma # @param fixed a list of fixed parameters # @param method # @param cholesky flag, if TRUE, we use the Cholesky decomposition of sigma as parametrization # @param lower.bounds lower bounds for method "L-BFGS-B" # @param upper.bounds upper bounds for method "L-BFGS-B" mle.tmvnorm <- function(X, lower=rep(-Inf, length = ncol(X)), upper=rep(+Inf, length = ncol(X)), start=list(mu=rep(0,ncol(X)), sigma=diag(ncol(X))), fixed=list(), method="BFGS", cholesky=FALSE, lower.bounds=-Inf, upper.bounds=+Inf, ...) { # check of standard tmvtnorm arguments cargs <- checkTmvArgs(start$mu, start$sigma, lower, upper) start$mu <- cargs$mean start$sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check if we have at least one sample if (!is.matrix(X) || nrow(X) == 0) { stop("Data matrix X with at least one row required.") } # verify dimensions of x and lower/upper match n <- length(lower) if (NCOL(X) != n) { stop("data matrix X has a non-conforming size. Must have ",length(lower)," columns.") } # check if lower <= X <= upper for all rows ind <- logical(nrow(X)) for (i in 1:nrow(X)) { ind[i] = all(X[i,] >= lower & X[i,] <= upper) } if (!all(ind)) { stop("some of the data points are not in the region lower <= X <= upper") } if ((length(lower.bounds) > 1L || length(upper.bounds) > 1L || lower.bounds[1L] != -Inf || upper.bounds[1L] != Inf) && method != "L-BFGS-B") { warning("bounds can only be used with method L-BFGS-B") method <- "L-BFGS-B" } # parameter vector theta = mu_1,...,mu_n,vech(sigma) if (cholesky) { # if cholesky == TRUE use Cholesky decomposition of sigma # t(chol(sigma)) returns a lower triangular matrix which can be vectorized using vech() theta <- c(start$mu, vech2(t(chol(start$sigma)))) } else { theta <- c(start$mu, vech2(start$sigma)) } # names for mean vector elements : mu_i nmmu <- paste("mu_",1:n,sep="") # names for sigma elements : sigma_ij nmsigma <- paste("sigma_",vech2(outer(1:n,1:n, paste, sep=".")),sep="") names(theta) <- c(nmmu, nmsigma) # negative log-likelihood-Funktion dynamisch definiert mit den formals(), # damit mle() damit arbeiten kann # # Eigentlich wollen wir eine Funktion negloglik(theta) mit einem einzigen Parametersvektor theta. # Die Methode mle() braucht aber eine "named list" der Parameter (z.B. mu_1=0, mu_2=0, sigma_1=2,...) und entsprechend eine # Funktion negloglik(mu1, mu2, sigma1,...) # Da wir nicht vorher wissen, wie viele Parameter zu schtzen sind, definieren wir die formals() # dynamisch um # # @param x dummy/placeholder argument, will be overwritten by formals() with list of skalar parameters negloglik <- function(x) { nf <- names(formals()) # recover parameter vector from named arguments (mu1=...,mu2=...,sigma11,sigma12 etc). # stack all named arguments to parameter vector theta theta <- sapply(nf, function(x) {eval(parse(text=x))}) # mean vector herholen mean <- theta[1:n] # Matrix fr sigma bauen if (cholesky) { L <- inv_vech(theta[-(1:n)]) L[lower.tri(L, diag=FALSE)] <- 0 # L entspricht jetzt chol(sigma), obere Dreiecksmatrix sigma <- t(L) %*% L } else { sigma <- inv_vech(theta[-(1:n)]) } # if sigma is not positive definite, return MAXVALUE if (det(sigma) <= 0 || any(diag(sigma) < 0)) { return(.Machine$integer.max) } # Log-Likelihood # Wieso hier nur dmvnorm() : Wegen Dichte = Conditional density f <- -(sum(dmvnorm(X, mean, sigma, log=TRUE)) - nrow(X) * log(pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma))) if (is.infinite(f) || is.na(f)) { # cat("negloglik=",f," for parameter vector ",theta,"\n") # "L-BFGS-B" requires a finite function value, other methods can handle infinte values like +Inf # return a high finite value, e.g. integer.max, so optimize knows this is the wrong place to be # TODO: check whether to return +Inf or .Machine$integer.max, certain algorithms may prefer +Inf, others a finite value #return(+Inf) return(.Machine$integer.max) } f } formals(negloglik) <- theta # for method "L-BFGS-B" pass bounds parameter "lower.bounds" and "upper.bounds" # under names "lower" and "upper" if ((length(lower.bounds) > 1L || length(upper.bounds) > 1L || lower.bounds[1L] != -Inf || upper.bounds[1L] != Inf) && method == "L-BFGS-B") { mle.fit <- eval.parent(substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, lower=lower.bounds, upper=upper.bounds, ...))) #mle.call <- substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, lower=lower.bounds, upper=upper.bounds, ...)) #mle.fit <- mle(negloglik, start=as.list(theta), fixed=fixed, method = method, lower=lower.bounds, upper=upper.bounds, ...) #mle.fit@call <- mle.call return (mle.fit) } else { # we need evaluated arguments in the call for profile(mle.fit) mle.fit <- eval.parent(substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, ...))) #mle.call <- substitute(mle(negloglik, start=as.list(theta), fixed=fixed, method = method, ...)) #mle.fit <- mle(negloglik, start=as.list(theta), fixed=fixed, method = method, ...) #mle.fit@call <- mle.call return (mle.fit) } } # Beispiel: if (FALSE) { lower=c(-1,-1) upper=c(1, 2) mu =c(0, 0) sigma=matrix(c(1, 0.7, 0.7, 2), 2, 2) # generate random samples X <- rtmvnorm(n=500, mu, sigma, lower, upper) method <- "BFGS" # estimate mu and sigma from random samples # Standard-Startwerte mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper) mle.fit1a <- mle.tmvnorm(X, lower=lower, upper=upper, cholesky=TRUE) mle.fit1b <- mle.tmvnorm(X, lower=lower, upper=upper, method="L-BFGS-B", lower.bounds=c(-1, -1, 0.001, -Inf, 0.001), upper.bounds=c(2, 2, 2, 2, 3)) Rprof("mle.profile1.out") mle.profile1 <- profile(mle.fit1, X, method="BFGS", trace=TRUE) Rprof(NULL) summaryRprof("mle.profile1.out") confint(mle.profile1) par(mfrow=c(2,2)) plot(mle.profile1) summary(mle.fit1) logLik(mle.fit1) vcov(mle.fit1) #TODO: confint(mle.fit1) #profile(mle.fit1) # andere Startwerte, nher am wahren Ergebnis mle.fit2 <- mle.tmvnorm(x=X, lower=lower, upper=upper, start=list(mu=c(0.1, 0.1), sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2))) # --> funktioniert jetzt besser... summary(mle.fit2) # andere Startwerte, nimm mean und Kovarianz aus den Daten (stimmt zwar nicht, ist aber sicher # ein besserer Startwert als 0 und diag(n). mle.fit3 <- mle.tmvnorm(x=X, lower=lower, upper=upper, start=list(mu=colMeans(X), sigma=cov(X))) summary(mle.fit3) }tmvtnorm/R/dtmvt.R0000644000176200001440000000410312567600057013630 0ustar liggesusers# Density function for the truncated multivariate t-distribution # # Author: stefan ############################################################################### # Density function for the truncated multivariate t-distribution # @param x # @param mean # @param sigma # @param df degrees of freedom parameter # @param log dtmvt <- function(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower= rep( -Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), log = FALSE){ # Check of additional inputs like x if (is.vector(x)) { x <- matrix(x, ncol = length(x)) } # Anzahl der Beobachtungen T = nrow(x) # check for each row if in support region insidesupportregion <- logical(T) for (i in 1:T) { insidesupportregion[i] = all(x[i,] >= lower & x[i,] <= upper & !any(is.infinite(x))) } # density value for points outside the support region dv = if (log) { -Inf } else { 0 } # conditional density f <- ifelse(insidesupportregion, dmvt(x, delta=mean, sigma=sigma, df=df, log=log) / pmvt(lower=lower, upper=upper, delta=mean, sigma=sigma, df=df, type="shifted"), dv) return(f) } if (FALSE) { # Example x1<-seq(-2, 3, by=0.1) x2<-seq(-2, 3, by=0.1) mean=c(0,0) sigma=matrix(c(1, -0.5, -0.5, 1), 2, 2) lower=c(-1,-1) density<-function(x) { z=dtmvt(x, mean=mean, sigma=sigma, lower=lower) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute multivariate-t density d for grid d=fgrid(x1, x2, function(x) dtmvt(x, mean=mean, sigma=sigma, lower=lower)) # compute multivariate normal density d for grid d2=fgrid(x1, x2, function(x) dtmvnorm(x, mean=mean, sigma=sigma, lower=lower)) # plot density as contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate t Density", xlab=expression(x[1]), ylab=expression(x[2])) contour(x1, x2, d2, nlevels=5, add=TRUE, col="red") abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) } tmvtnorm/R/ptmvnorm-marginal.R0000644000176200001440000000270612567600057016153 0ustar liggesusers# Verteilungsfunktion fr die eindimensionale Randdichte f(xn) einer Truncated Multivariate Normal Distribution, # vgl. Jack Cartinhour (1990) "One-dimensional marginal density functions of a truncated multivariate normal density function" fr die Dichtefunktion # # @param xn Vektor der Lnge l von Punkten, an dem die Verteilungsfunktion ausgewertet wird # @param i Index (1..n) dessen Randdichte berechnet werden soll # @param mean (nx1) Mittelwertvektor # @param sigma (nxn)-Kovarianzmatrix # @param lower,upper Trunkierungsvektor lower <= x <= upper ptmvnorm.marginal <- function(xn, n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean))) { # check of standard tmvnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper if (n < 1 || n > length(mean) || !is.numeric(n) || length(n) > 1 || !n %in% 1:length(mean)) { stop("n must be a integer scalar in 1..length(mean)") } # Anzahl der Dimensionen k = length(mean) Fx = numeric(length(xn)) upper2 = upper alpha = pmvnorm(lower = lower, upper = upper, mean = mean, sigma = sigma) for (i in 1:length(xn)) { upper2[n] = xn[i] Fx[i] = pmvnorm(lower=lower, upper=upper2, mean=mean, sigma=sigma) } return (Fx/alpha) } tmvtnorm/R/ptmvnorm.R0000644000176200001440000000361512567600057014363 0ustar liggesusers # Verteilungsfunktion der truncated multivariate normal distribution # # @param lower unterer Trunkierungsvektor (k x 1) mit lower <= x <= upper # @param upper oberer Trunkierungsvektor (k x 1) mit lower <= x <= upper ptmvnorm <- function(lowerx, upperx, mean=rep(0, length(lowerx)), sigma, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) { # check of standard tmvtnorm arguments cargs <- checkTmvArgs(mean, sigma, lower, upper) mean <- cargs$mean sigma <- cargs$sigma lower <- cargs$lower upper <- cargs$upper # check of additional arguments lowerx and upperx if (is.null(lowerx) || any(is.na(lowerx))) stop(sQuote("lowerx"), " not specified or contains NA") if (is.null(upperx) || any(is.na(upperx))) stop(sQuote("upperx"), " not specified or contains NA") if (!is.numeric(lowerx) || !is.vector(lowerx)) stop(sQuote("lowerx"), " is not a numeric vector") if (!is.numeric(upperx) || !is.vector(upperx)) stop(sQuote("upperx"), " is not a numeric vector") if (length(lowerx) != length(lower) || length(lower) != length(upperx)) stop("lowerx an upperx must have the same length as lower and upper!") if (any(lowerx>=upperx)) stop("lowerx must be smaller than or equal to upperx (lowerx<=upperx)") # Aufpassen: # Wir mssen garantieren, dass nur innerhalb des Support-Bereichs lower <= x <= upper integriert wird. Sonst kann Ergebnis >= 1 rauskommen. # Wenn einzelne Komponenten von lowerx <= lower sind, dann von der Untergrenze lower integrieren. Analog fr upperx >= upper f <- pmvnorm(lower=pmax(lowerx, lower), upper=pmin(upperx, upper), mean=mean, sigma=sigma, maxpts = maxpts, abseps = abseps, releps = releps) / pmvnorm(lower=lower, upper=upper, mean=mean, sigma=sigma, maxpts = maxpts, abseps = abseps, releps = releps) return(f) } tmvtnorm/vignettes/0000755000176200001440000000000012567600065014157 5ustar liggesuserstmvtnorm/vignettes/tmvtnorm.bib0000644000176200001440000001303712567600060016522 0ustar liggesusers% This file was created with JabRef 2.5. % Encoding: Cp1252 @BOOK{Geweke2005, title = {Contemporary Bayesian Econometrics and Statistics}, publisher = {John Wiley and Sons}, year = {2005}, author = {John F. Geweke}, file = {:John Geweke. Contemporary Bayesian Econometrics and Statistics (Wiley,2005)(ISBN 0471679321)(308s).pdf:PDF}, owner = {stefan}, timestamp = {2007.01.30} } @ELECTRONIC{Geweke1991, author = {John F. Geweke}, year = {1991}, title = {Effcient simulation from the multivariate normal and Student-t distributions subject to linear constraints and the evaluation of constraint probabilities}, howpublished = {http://www.biz.uiowa.edu/faculty/jgeweke/papers/paper47/paper47.pdf}, file = {:Geweke1991.pdf:PDF}, owner = {stefan}, timestamp = {2010.01.22} } @INPROCEEDINGS{Geweke1991a, author = {John F. Geweke}, title = {Effcient Simulation from the Multivariate Normal and Student-t Distributions Subject to Linear Constraints}, booktitle = {Computer Science and Statistics. Proceedings of the 23rd Symposium on the Interface. Seattle Washington, April 21-24, 1991}, year = {1991}, pages = {571-578}, file = {:Geweke1991a.pdf:PDF}, owner = {stefan}, timestamp = {2010.02.09} } @BOOK{Greene2003, title = {Econometric Analysis}, publisher = {Prentice-Hall}, year = {2003}, author = {William H. Greene}, edition = {5}, file = {Greene - Econometrics.pdf:Greene - Econometrics.pdf:PDF}, owner = {stefan}, timestamp = {2005.12.13} } @UNPUBLISHED{Griffiths2002, author = {William Griffiths}, title = {A {G}ibbs' Sampler for the Parameters of a Truncated Multivariate Normal Distribution}, note = {University of Melbourne}, year = {2002}, file = {:Griffiths2002.pdf:PDF}, institution = {The University of Melbourne}, number = {856}, owner = {stefan}, timestamp = {2012.01.04}, type = {Department of Economics - Working Papers Series}, url = {http://ideas.repec.org/p/mlb/wpaper/856.html} } @INBOOK{Griffiths2004, chapter = {A {G}ibbs' sampler for the parameters of a truncated multivariate normal distribution}, pages = {75 - 91}, title = {Contemporary Issues In Economics And Econometrics: Theory and Application}, publisher = {Edward Elgar Publishing}, year = {2004}, editor = {Ralf Becker and Stan Hurn}, author = {William E. Griffiths}, journal = {Contemporary issues in economics and econometrics}, owner = {stefan}, timestamp = {2009.09.09} } @INPROCEEDINGS{Kotecha1999, author = {Kotecha, J. H. and Djuric, P. M.}, title = {{G}ibbs sampling approach for generation of truncated multivariate Gaussian random variables}, booktitle = {ICASSP '99: Proceedings of the Acoustics, Speech, and Signal Processing, 1999. on 1999 IEEE International Conference}, year = {1999}, pages = {1757--1760}, address = {Washington, DC, USA}, publisher = {IEEE Computer Society}, doi = {http://dx.doi.org/10.1109/ICASSP.1999.756335}, file = {:Kotecha1999.pdf:PDF}, isbn = {0-7803-5041-3}, journal = {IEEE Computer Society}, owner = {stefan}, timestamp = {2009.04.16} } @MANUAL{tmvtnorm-0.7, title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, author = {Stefan Wilhelm}, year = {2009}, note = {R package version 0.7-2}, owner = {stefan}, timestamp = {2009.10.05}, url = {http://www.r-project.org} } @MANUAL{tmvtnorm-1.2, title = {{tmvtnorm}: Truncated Multivariate Normal and {S}tudent t Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2011}, note = {R package version 1.2-3}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @MANUAL{tmvtnorm-1.3, title = {{tmvtnorm}: Truncated Multivariate Normal and {S}tudent t Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2011}, note = {R package version 1.3-1}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @MANUAL{tmvtnorm-1.4, title = {{tmvtnorm}: Truncated Multivariate Normal and {S}tudent t Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2011}, note = {R package version 1.4-1}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @ARTICLE{RJournal:Wilhelm+Manjunath:2010, author = {Stefan Wilhelm and B. G. Manjunath}, title = {{tmvtnorm: A Package for the Truncated Multivariate Normal Distribution}}, journal = {The R Journal}, year = {2010}, volume = {2}, pages = {25--29}, number = {1}, month = {June}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://journal.r-project.org/archive/2010-1/RJournal_2010-1_Wilhelm+Manjunath.pdf} } @MANUAL{tmvtnorm-0.9, title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2010}, note = {R package version 0.9-2}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @MANUAL{tmvtnorm-1.1, title = {{tmvtnorm}: Truncated Multivariate Normal Distribution}, author = {Stefan Wilhelm and B G Manjunath}, year = {2010}, note = {R package version 1.1-0}, owner = {stefan}, timestamp = {2012.01.04}, url = {http://CRAN.R-project.org/package=tmvtnorm} } @comment{jabref-meta: selector_publisher:} @comment{jabref-meta: selector_author:} @comment{jabref-meta: selector_journal:} @comment{jabref-meta: selector_keywords:} tmvtnorm/vignettes/GibbsSampler.Rnw0000644000176200001440000002337412567600060017225 0ustar liggesusers%\VignetteIndexEntry{A short description of the Gibbs Sampler} \documentclass[a4paper]{article} \usepackage{Rd} \usepackage{amsmath} \usepackage{natbib} \usepackage{palatino,mathpazo} \usepackage{Sweave} %\newcommand{\pkg}[1]{\textbf{#1}} \newcommand{\vecb}[1]{\ensuremath{\boldsymbol{\mathbf{#1}}}} \def\bfx{\mbox{\boldmath $x$}} \def\bfy{\mbox{\boldmath $y$}} \def\bfz{\mbox{\boldmath $z$}} \def\bfalpha{\mbox{\boldmath $\alpha$}} \def\bfbeta{\mbox{\boldmath $\beta$}} \def\bfmu{\mbox{\boldmath $\mu$}} \def\bfa{\mbox{\boldmath $a$}} \def\bfb{\mbox{\boldmath $b$}} \def\bfu{\mbox{\boldmath $u$}} \def\bfSigma{\mbox{\boldmath $\Sigma$}} \def\bfD{\mbox{\boldmath $D$}} \def\bfH{\mbox{\boldmath $H$}} \def\bfT{\mbox{\boldmath $T$}} \def\bfX{\mbox{\boldmath $X$}} \def\bfY{\mbox{\boldmath $X$}} \title{Gibbs Sampler for the Truncated Multivariate Normal Distribution} \author{Stefan Wilhelm\thanks{wilhelm@financial.com}} \begin{document} \maketitle In this note we describe two ways of generating random variables with the Gibbs sampling approach for a truncated multivariate normal variable $\bfx$, whose density function can be expressed as: \begin{eqnarray*} f(\bfx,\bfmu,\bfSigma,\bfa,\bfb) & = & \frac{ \exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\}} } { \int_{\bfa}^{\bfb}{\exp{\left\{ -\frac{1}{2} (\bfx-\bfmu)' \bfSigma^{-1} (\bfx-\bfmu) \right\} } d\bfx } } \end{eqnarray*} for $\bfa \le \bfx \le \bfb$ and $0$ otherwise.\\ \par The first approach, as described by \cite{Kotecha1999}, uses the covariance matrix $\bfSigma$ and has been implemented in the R package \pkg{tmvtnorm} since version 0.9 (\cite{tmvtnorm-0.9}). The second way is based on the works of \cite{Geweke1991,Geweke2005} and uses the precision matrix $\bfH = \bfSigma^{-1}$. As will be shown below, the usage of the precision matrix offers some computational advantages, since it does not involve matrix inversions and is therefore favorable in higher dimensions and settings where the precision matrix is readily available. Applications are for example the analysis of spatial data, such as from telecommunications or social networks.\\ \par Both versions of the Gibbs sampler can also be used for general linear constraints $\bfa \le \bfD \bfx \le \bfb$, what we will show in the last section. The function \code{rtmvnorm()} in the package \pkg{tmvtnorm} contains the \R{} implementation of the methods described in this note (\cite{tmvtnorm-1.3}). \section{Gibbs Sampler with convariance matrix $\bfSigma$} We describe here a Gibbs sampler for sampling from a truncated multinormal distribution as proposed by \cite{Kotecha1999}. It uses the fact that conditional distributions are truncated normal again. Kotecha use full conditionals $f(x_i | x_{-i}) = f(x_i | x_1,\ldots,x_{i-1},x_{i+1},\ldots,x_{d})$.\\ \par We use the fact that the conditional density of a multivariate normal distribution is multivariate normal again. We cite \cite{Geweke2005}, p.171 for the following theorem on the Conditional Multivariate Normal Distribution.\\ Let $\bfz = \left( \begin{array}{c} \bfx \\ \bfy \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_x \\ \bfmu_y \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{xx} & \bfSigma_{xy} \\ \bfSigma_{yx} & \bfSigma_{yy} \end{array} \right]$\\ Denote the corresponding precision matrix \begin{equation} \bfH = \bfSigma^{-1} = \left[ \begin{array}{cc} \bfH_{xx} & \bfH_{xy} \\ \bfH_{yx} & \bfH_{yy} \end{array} \right] \end{equation} Then the distribution of $\bfy$ conditional on $\bfx$ is normal with variance \begin{equation} \bfSigma_{y.x} = \bfSigma_{yy} - \bfSigma_{yx} \bfSigma_{xx}^{-1} \bfSigma_{xy} = \bfH_{yy}^{-1} \end{equation} and mean \begin{equation} \bfmu_{y.x} = \bfmu_{y} + \bfSigma_{yx} \bfSigma_{xx}^{-1} (\bfx - \bfmu_x) = \bfmu_y - \bfH_{yy}^{-1} \bfH_{yx}(\bfx - \bfmu_x) \end{equation} \par In the case of the full conditionals $f(x_i | x_{-i})$, which we will denote as $i.-i$ this results in the following formulas: $\bfz = \left( \begin{array}{c} \bfx_i \\ \bfx_{-i} \end{array} \right) \sim N(\bfmu, \bfSigma)$ with $\bfmu = \left( \begin{array}{c}\bfmu_i \\ \bfmu_{-i} \end{array} \right)$ and $\bfSigma = \left[ \begin{array}{cc} \bfSigma_{ii} & \bfSigma_{i,-i} \\ \bfSigma_{-i,i} & \bfSigma_{-i,-i} \end{array} \right]$ Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfSigma_{ii} - \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} \bfSigma_{-i,i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_{i} + \bfSigma_{i,-i} \bfSigma_{-i,-i}^{-1} (\bfx_{-i} - \bfmu_{-i}) = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} We can then construct a Markov chain which continously draws from $f(x_i | x_{-i})$ subject to $a_i \le x_i \le b_i$. Let $\bfx^{(j)}$ denote the sample drawn at the $j$-th MCMC iteration. The steps of the Gibbs sampler for generating $N$ samples $\bfx^{(1)},\ldots,\bfx^{(N)}$ are: \begin{itemize} \item Since the conditional variance $\bfSigma_{i.-i}$ is independent from the actual realisation $\bfx^{(j)}_{-i}$, we can well precalculate it before running the Markov chain. \item Choose a start value $\bfx^{(0)}$ of the chain. \item In each round $j=1,\ldots,N$ we go from $i=1,\ldots,d$ and sample from the conditional density $x^{(j)}_i | x^{(j)}_1,\ldots,x^{(j)}_{i-1},x^{(j-1)}_{i+1},\ldots,x^{(j-1)}_{d}$. \item Draw a uniform random variate $U \sim Uni(0, 1)$. This is where our approach slightly differs from \cite{Kotecha1999}. They draw a normal variate $y$ and then apply $\Phi(y)$, which is basically uniform. \item We draw from univariate conditional normal distributions with mean $\mu$ and variance $\sigma^2$. See for example \cite{Greene2003} or \cite{Griffiths2004} for a transformation between a univariate normal random $y \sim N(\mu,\sigma^2)$ and a univariate truncated normal variate $x \sim TN(\mu,\sigma^2, a, b)$. For each realisation $y$ we can find a $x$ such as $P(Y \le y) = P(X \le x)$: \begin{equation*} \frac{ \Phi \left( \frac{x - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } { \Phi \left( \frac{b - \mu}{\sigma} \right) - \Phi \left( \frac{a - \mu}{\sigma} \right) } = \Phi \left( \frac{y - \mu}{\sigma} \right) = U \end{equation*} \item Draw $\bfx_{i.-i}$ from conditional univariate truncated normal distribution \\ $TN(\bfmu_{i.-i}, \bfSigma_{i.-i}, a_i, b_i)$ by \begin{equation} \begin{split} \bfx_{i.-i} & = \bfmu_{i.-i} + \\ & \sigma_{i.-i} \Phi^{-1} \left[ U \left( \Phi \left( \frac{b_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) - \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right) + \Phi \left( \frac{a_i - \bfmu_{i.-i}}{\sigma_{i.-i}} \right) \right] \end{split} \end{equation} \end{itemize} \section{Gibbs Sampler with precision matrix H} The Gibbs Sampler stated in terms of the precision matrix $\bfH = \bfSigma^{-1}$ instead of the covariance matrix $\bfSigma$ is much easier to write and to implement: Then the distribution of $i$ conditional on $-i$ is normal with variance \begin{equation} \bfSigma_{i.-i} = \bfH_{ii}^{-1} \end{equation} and mean \begin{equation} \bfmu_{i.-i} = \bfmu_i - \bfH_{ii}^{-1} \bfH_{i,-i}(\bfx_{-i} - \bfmu_{-i}) \end{equation} Most importantly, if the precision matrix $\bfH$ is known, the Gibbs sampler does only involve matrix inversions of $\bfH_{ii}$ which in our case is a diagonal element/scalar. Hence, from the computational and performance perspective, especially in high dimensions, using $\bfH$ rather than $\bfSigma$ is preferable. When using $\bfSigma$ in $d$ dimensions, we have to solve for $d$ $(d-1) \times (d-1)$ matrices $\bfSigma_{-i,-i}$, $i=1,\ldots,d$, which can be quite substantial computations. \section{Gibbs Sampler for linear constraints} In this section we present the Gibbs sampling for general linear constraints based on \cite{Geweke1991}. We want to sample from $\bfx \sim N(\bfmu, \bfSigma)$ subject to linear constraints $\bfa \le \bfD \bfx \le \bfb$ for a full-rank matrix $\bfD$.\\ Defining \begin{equation} \bfz = \bfD \bfx - \bfD \bfmu, \end{equation} we have $E[\bfz] = \bfD E[\bfx] - \bfD \bfmu = 0$ and $Var[\bfz] = \bfD Var[\bfx] \bfD' = \bfD \bfSigma \bfD'$. Hence, this problem can be transformed to the rectangular case $\bfalpha \le \bfz \le \bfbeta$ with $\bfalpha = \bfa - \bfD \bfmu$ and $\bfbeta = \bfb - \bfD \bfmu$. It follows $\bfz \sim N(0, \bfT)$ with $\bfT = \bfD \bfSigma \bfD'$.\\ In the precision matrix case, the corresponding precision matrix of the transformed problem will be $\bfT^{-1} = ( \bfD \bfSigma \bfD' )^{-1} = \bfD'^{-1} \bfH \bfD^{-1}$. We can then sample from $\bfz$ the way described in the previous sections (either with covariance or precision matrix approach) and then transform $\bfz$ back to $\bfx$ by \begin{equation} \bfx = \bfmu + \bfD^{-1} \bfz \end{equation} \bibliographystyle{plainnat} \bibliography{tmvtnorm} \end{document}tmvtnorm/MD50000644000176200001440000000451312567714017012465 0ustar liggesusers5e527d36d2a10da9f5c3609646202c8f *DESCRIPTION ced85104fa3b2ca276c9b58fcfa393e7 *NAMESPACE 11d9def432117854dd9cbf2005289ef3 *NEWS 8a43a80501e4f0d284d8dad44b1ff01f *R/bivariate-marginal-density.R a1a7ec7c15e68c65e72badf55e0ddb4a *R/checkTmvArgs.R 8f899ed74ffca246bfdb1ab32b6cd4a5 *R/dtmvnorm-marginal.R 405b00c540e5a3196dc7da3208cc6ca3 *R/dtmvnorm.R a17c645dd934398b4640de0fd7f3a54a *R/dtmvt.R c96ae27fd43a21521adef2e6903591be *R/mtmvnorm.R a96cbc560f21e70baf7c20844adafd6c *R/ptmvnorm-marginal.R 00f8f84be42685f1a3f29c5719780191 *R/ptmvnorm.R 3e75d9a8d7fb267900d6d0a954f22b96 *R/ptmvt-marginal.R 09d837b4d67158a40df214bd77d14feb *R/ptmvt.R 6db4e4dd6eed11a0bfdfb2931f90ad6c *R/qtmvnorm-marginal.R 4a1ba3e6e53994fd23aa5f16253be134 *R/rtmvnorm.R 0fd54c003f22e8224e09b356360d903a *R/rtmvnorm2.R 919b77e2c112e757564e7c45de982b19 *R/rtmvt.R a49359a68b29c73b8ffb8d8ee1f06fcb *R/tmvnorm-estimation-GMM.R 77f5cbab362cfbfa34a5095eec2656a5 *R/tmvnorm-estimation.R ca06adb6608b2283d5848eed35aecab9 *build/vignette.rds 05f0324f7a52081a7faca3fe125d2879 *demo/00Index 43befa36b93301eb64f743780e3ceac8 *demo/demo1.R 8cb92af9f083a0e444c58f5c82d79ace *demo/demo2.R 0dc224491642cea64607b4d81b2a5abf *inst/CITATION f55b59ffbaa8c7e74942f7c32ee74430 *inst/doc/GibbsSampler.Rnw 2aa935a1910e5d9c3bb0e5947261c27e *inst/doc/GibbsSampler.pdf 3419ad200c683a01965d1d351d33c39a *man/dmvnorm.marginal.Rd 46aec3318f77c2054b554dd61d0d8773 *man/dtmvnorm.marginal2.Rd d5abcd1b9309d33ef4dff8a2abc8c0a3 *man/dtmvt.Rd 32edf9fceff1ff4387abd33872e72030 *man/gmm.tmvnorm.Rd 1827e59a9f4bfa78a87b4115a64c4ea5 *man/mle.tmvnorm.Rd cff6273c570a1521628e59817570af9b *man/mtmvnorm.Rd b774db13d8f8cd2cc45095571f6db206 *man/ptmvnorm.Rd c48a6ae141dea1f55e97ea86275ee3f5 *man/ptmvnorm.marginal.Rd 10385b05ca941f63b5d13015ad2b4c9f *man/ptmvt.Rd 342f510aae0e72a5d7600b4e3ca1db11 *man/qtmvnorm-marginal.Rd 760e93e78948d97c057faeab0a3ea195 *man/rtmvnorm.Rd 6679f9d0fc409cd57555b9a2f0af55d2 *man/rtmvnorm2.Rd 53a03043903568c0ea2662e21b6fe984 *man/rtmvt.Rd 56ff9ee8d7d9d645a83ccf47d5fe5b1e *man/tmvnorm.Rd ae25215cedbedbe2d9de559d8f4f5d66 *src/Fortran2CWrapper.c b250052c55dffdfaf7f35a9f61fb22d0 *src/Makevars 36b6e5d6d9569aa0771c5173f60e7027 *src/linked_list.f90 61b7c44d111fc315151ce5cca252c707 *src/rtmvnormgibbs.f90 f55b59ffbaa8c7e74942f7c32ee74430 *vignettes/GibbsSampler.Rnw 28065219dbfddba2e85a093c5a456c95 *vignettes/tmvtnorm.bib tmvtnorm/build/0000755000176200001440000000000012567600065013246 5ustar liggesuserstmvtnorm/build/vignette.rds0000644000176200001440000000033212567600065015603 0ustar liggesusers}O @^, w]:t]uuewA٬i 0]}6Dpd\-TIVTI˒ s*3ZxyUI#6/ ,>ˆTUD[z>[ńN/L cj%&3]8ihw=)449Q$,5'xtmvtnorm/DESCRIPTION0000644000176200001440000000160212567714017013657 0ustar liggesusersPackage: tmvtnorm Version: 1.4-10 Date: 2015-08-26 Title: Truncated Multivariate Normal and Student t Distribution Author: Stefan Wilhelm with contributions from Manjunath B G Maintainer: Stefan Wilhelm Imports: stats, methods Depends: R (>= 1.9.0), mvtnorm, utils, Matrix, stats4, gmm Encoding: latin1 Suggests: lattice Description: Random number generation for the truncated multivariate normal and Student t distribution. Computes probabilities, quantiles and densities, including one-dimensional and bivariate marginal densities. Computes first and second moments (i.e. mean and covariance matrix) for the double-truncated multinormal case. License: GPL (>= 2) URL: http://www.r-project.org NeedsCompilation: yes Packaged: 2015-08-27 12:19:01 UTC; stefan_admin Repository: CRAN Date/Publication: 2015-08-28 01:06:55 tmvtnorm/man/0000755000176200001440000000000012567600065012722 5ustar liggesuserstmvtnorm/man/rtmvnorm.Rd0000644000176200001440000003456212567600060015102 0ustar liggesusers\name{rtmvnorm} \alias{rtmvnorm} \alias{rtmvnorm.sparseMatrix} \title{Sampling Random Numbers From The Truncated Multivariate Normal Distribution} \description{ This function generates random numbers from the truncated multivariate normal distribution with mean equal to \code{mean} and covariance matrix \code{sigma} (or alternatively precision matrix \code{H}), lower and upper truncation points \code{lower} and \code{upper} with either rejection sampling or Gibbs sampling. } \usage{ rtmvnorm(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), D = diag(length(mean)), H = NULL, algorithm=c("rejection", "gibbs", "gibbsR"), ...) rtmvnorm.sparseMatrix(n, mean = rep(0, nrow(H)), H = sparseMatrix(i=1:length(mean), j=1:length(mean), x=1), lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), ...) } \arguments{ \item{n}{Number of random points to be sampled. Must be an integer \eqn{\ge 1}{>= 1}.} \item{mean}{Mean vector, default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix, default is \code{diag(ncol(x))}.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{D}{Matrix for linear constraints, defaults to diagonal matrix.} \item{H}{Precision matrix, default is \code{NULL}.} \item{algorithm}{Method used, possible methods are rejection sampling ("rejection", default), the Fortan Gibbs sampler ("gibbs") and the old Gibbs sampler implementation in R ("gibbsR").} \item{...}{additional parameters for Gibbs sampling, given to the internal method \code{rtmvnorm.gibbs()}, such as \code{burn.in.samples}, \code{start.value} and \code{thinning}, see details} } \details{ The generation of random numbers from a truncated multivariate normal distribution is done using either rejection sampling or Gibbs sampling. \bold{Rejection sampling}\cr Rejection sampling is done from the standard multivariate normal distribution. So we use the function \code{\link[mvtnorm]{rmvnorm}} of the \pkg{mvtnorm} package to generate proposals which are either accepted if they are inside the support region or rejected. In order to speed up the generation of N samples from the truncated distribution, we first calculate the acceptance rate alpha from the truncation points and then generate N/alpha samples iteratively until we have got N samples. This typically does not take more than 2-3 iterations. Rejection sampling may be very inefficient when the support region is small (i.e. in higher dimensions) which results in very low acceptance rates alpha. In this case the Gibbs sampler is preferable. \bold{Gibbs sampling}\cr The Gibbs sampler samples from univariate conditional distributions, so all samples can be accepted except for a burn-in period. The number of burn-in samples to be discarded can be specified, as well as a start value of the chain. If no start value is given, we determine a start value from the support region using either lower bound or upper bound if they are finite, or 0 otherwise. The Gibbs sampler has been reimplemented in Fortran 90 for performance reasons (\code{algorithm="gibbs"}). The old R implementation is still accessible through \code{algorithm="gibbsR"}. The arguments to be passed along with \code{algorithm="gibbs"} or \code{algorithm="gibbsR"} are: \describe{ \item{\code{burn.in.samples}}{number of samples in Gibbs sampling to be discarded as burn-in phase, must be non-negative.} \item{\code{start.value}}{Start value (vector of length \code{length(mean)}) for the MCMC chain. If one is specified, it must lie inside the support region (\eqn{lower <= start.value <= upper}). If none is specified, the start value is taken componentwise as the finite lower or upper boundaries respectively, or zero if both boundaries are infinite. Defaults to NULL.} \item{\code{thinning}}{Thinning factor for reducing autocorrelation of random points in Gibbs sampling. Must be an integer >= 1. We create a Markov chain of length \code{(n*thinning)} and take only those samples \code{j=1:(n*thinning)} where \code{j \%\% thinning == 0} Defaults to 1 (no thinning of the chain).} } \bold{Sampling with linear constraints}\cr We extended the method to also simulate from a multivariate normal distribution subject to general linear constraints \eqn{lower <= D x <= upper}. For general D, both rejection sampling or Gibbs sampling according to Geweke (1991) are available. \bold{Gibbs sampler and the use of the precision matrix H}\cr Why is it important to have a random sampler that works with the precision matrix? Especially in Bayesian and spatial statistics, there are a number of high-dimensional applications where the precision matrix \code{H} is readily available, but is sometimes nearly singular and cannot be easily inverted to sigma. Additionally, it turns out that the Gibbs sampler formulas are much simpler in terms of the precision matrix than in terms of the covariance matrix. See the details of the Gibbs sampler implementation in the package vignette or for example Geweke (2005), pp.171-172. (Thanks to Miguel Godinho de Matos from Carnegie Mellon University for pointing me to this.) Therefore, we now provide an interface for the direct use of the precision matrix \code{H} in \code{rtmvnorm()}. \bold{Gibbs sampler with sparse precision matrix H}\cr The size of the covariance matrix \code{sigma} or precision matrix \code{H} - if expressed as a dense \code{\link[base]{matrix}} - grows quadratic with the number of dimensions d. For high-dimensional problems (such as d > 5000), it is no longer efficient and appropriate to work with dense matrix representations, as one quickly runs into memory problems.\cr It is interesting to note that in many applications the precision matrix, which holds the conditional dependencies, will be sparse, whereas the covariance matrix will be dense. Hence, expressing H as a sparse matrix will significantly reduce the amount of memory to store this matrix and allows much larger problems to be handled. In the current version of the package, the precision matrix (not \code{sigma} since it will be dense in most cases) can be passed to \code{rtmvnorm.sparseMatrix()} as a \code{\link[Matrix]{sparseMatrix}} from the \code{Matrix} package. See the examples section below for a usage example. } \section{Warning}{ A word of caution is needed for useRs that are not familiar with Markov Chain Monte Carlo methods like Gibbs sampling: Rejection sampling is exact in the sense that we are sampling directly from the target distribution and the random samples generated are independent. So it is clearly the default method. Markov Chain Monte Carlo methods are only approximate methods, which may suffer from several problems: \itemize{ \item{Poor mixing} \item{Convergence problems} \item{Correlation among samples} } Diagnostic checks for Markov Chain Monte Carlo include trace plots, CUSUM plots and autocorrelation plots like \code{\link{acf}}. For a survey see for instance Cowles (1996). That is, consecutive samples generated from \code{rtmvnorm(..., algorithm=c("gibbs", "gibbsR"))} are correlated (see also example 3 below). One way of reducing the autocorrelation among the random samples is "thinning" the Markov chain, that is recording only a subset/subsequence of the chain. For example, one could record only every 100th sample, which clearly reduces the autocorrelation and "increases the independence". But thinning comes at the cost of higher computation times, since the chain has to run much longer. We refer to autocorrelation plots in order to determine optimal thinning. } \author{Stefan Wilhelm , Manjunath B G } \seealso{\code{\link{ptmvnorm}}, \code{\link[mvtnorm]{pmvnorm}}, \code{\link[mvtnorm]{rmvnorm}}, \code{\link[mvtnorm]{dmvnorm}}} \references{ Alan Genz, Frank Bretz, Tetsuhisa Miwa, Xuefei Mi, Friedrich Leisch, Fabian Scheipl, Torsten Hothorn (2009). mvtnorm: Multivariate Normal and t Distributions. R package version 0.9-7. URL \url{http://CRAN.R-project.org/package=mvtnorm} Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Horrace, W. (2005). Some Results on the Multivariate Truncated Normal Distribution. \emph{Journal of Multivariate Analysis}, \bold{94}, 209--221 Jayesh H. Kotecha and Petar M. Djuric (1999). Gibbs Sampling Approach For Generation of Truncated Multivariate Gaussian Random Variables \emph{IEEE Computer Society}, 1757--1760 Cowles, M. and Carlin, B. (1996). Markov Chain Monte Carlo Convergence Diagnostics: A Comparative Review \emph{Journal of the American Statistical Association}, \bold{91}, 883--904 Geweke, J. F. (1991). Effcient Simulation from the Multivariate Normal and Student-t Distributions Subject to Linear Constraints \emph{Computer Science and Statistics. Proceedings of the 23rd Symposium on the Interface. Seattle Washington, April 21-24, 1991}, 571--578 Geweke, J. F. (2005). Contemporary Bayesian Econometrics and Statistics, \emph{Wiley & Sons}, pp.171--172 } \examples{ ################################################################################ # # Example 1: # rejection sampling in 2 dimensions # ################################################################################ sigma <- matrix(c(4,2,2,3), ncol=2) x <- rtmvnorm(n=500, mean=c(1,2), sigma=sigma, upper=c(1,0)) plot(x, main="samples from truncated bivariate normal distribution", xlim=c(-6,6), ylim=c(-6,6), xlab=expression(x[1]), ylab=expression(x[2])) abline(v=1, lty=3, lwd=2, col="gray") abline(h=0, lty=3, lwd=2, col="gray") ################################################################################ # # Example 2: # Gibbs sampler for 4 dimensions # ################################################################################ C <- matrix(0.8, 4, 4) diag(C) <- rep(1, 4) lower <- rep(-4, 4) upper <- rep(-1, 4) # acceptance rate alpha alpha <- pmvnorm(lower=lower, upper=upper, mean=rep(0,4), sigma=C) alpha # Gibbs sampler X1 <- rtmvnorm(n=20000, mean = rep(0,4), sigma=C, lower=lower, upper=upper, algorithm="gibbs", burn.in.samples=100) # Rejection sampling X2 <- rtmvnorm(n=5000, mean = rep(0,4), sigma=C, lower=lower, upper=upper) colMeans(X1) colMeans(X2) plot(density(X1[,1], from=lower[1], to=upper[1]), col="red", lwd=2, main="Kernel density estimates from random samples generated by Gibbs vs. Rejection sampling") lines(density(X2[,1], from=lower[1], to=upper[1]), col="blue", lwd=2) legend("topleft",legend=c("Gibbs Sampling","Rejection Sampling"), col=c("red","blue"), lwd=2, bty="n") ################################################################################ # # Example 3: # Autocorrelation plot for Gibbs sampler # with and without thinning # ################################################################################ sigma <- matrix(c(4,2,2,3), ncol=2) X1 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), algorithm="rejection") acf(X1) # no autocorrelation among random points X2 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), algorithm="gibbs") acf(X2) # exhibits autocorrelation among random points X3 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), algorithm="gibbs", thinning=2) acf(X3) # reduced autocorrelation among random points plot(density(X1[,1], to=1)) lines(density(X2[,1], to=1), col="blue") lines(density(X3[,1], to=1), col="red") ################################################################################ # # Example 4: Univariate case # ################################################################################ X <- rtmvnorm(100, mean=0, sigma=1, lower=-1, upper=1) ################################################################################ # # Example 5: Linear Constraints # ################################################################################ mean <- c(0, 0) sigma <- matrix(c(10, 0, 0, 1), 2, 2) # Linear Constraints # # a1 <= x1 + x2 <= b2 # a2 <= x1 - x2 <= b2 # # [ a1 ] <= [ 1 1 ] [ x1 ] <= [b1] # [ a2 ] [ 1 -1 ] [ x2 ] [b2] a <- c(-2, -2) b <- c( 2, 2) D <- matrix(c(1, 1, 1, -1), 2, 2) X <- rtmvnorm(n=10000, mean, sigma, lower=a, upper=b, D=D, algorithm="gibbsR") plot(X, main="Gibbs sampling for multivariate normal with linear constraints according to Geweke (1991)") # mark linear constraints as lines for (i in 1:nrow(D)) { abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") } ################################################################################ # # Example 6: Using precision matrix H rather than sigma # ################################################################################ lower <- c(-1, -1) upper <- c(1, 1) mean <- c(0.5, 0.5) sigma <- matrix(c(1, 0.8, 0.8, 1), 2, 2) H <- solve(sigma) D <- matrix(c(1, 1, 1, -1), 2, 2) X <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm="gibbs") plot(X, main="Gibbs sampling with precision matrix and linear constraints") ################################################################################ # # Example 7: Using sparse precision matrix H in high dimensions # ################################################################################ \dontrun{ d <- 1000 I_d <- sparseMatrix(i=1:d, j=1:d, x=1) W <- sparseMatrix(i=c(1:d, 1:(d-1)), j=c(1:d, (2:d)), x=0.5) H <- t(I_d - 0.5 * W) %*% (I_d - 0.5 * W) lower <- rep(0, d) upper <- rep(2, d) # Gibbs sampler generates n=100 draws in d=1000 dimensions X <- rtmvnorm.sparseMatrix(n=100, mean = rep(0,d), H=H, lower=lower, upper=upper, burn.in.samples=100) colMeans(X) cov(X) } } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/gmm.tmvnorm.Rd0000644000176200001440000001122612567600060015467 0ustar liggesusers\name{gmm.tmvnorm} \alias{gmm.tmvnorm} \title{ GMM Estimation for the Truncated Multivariate Normal Distribution } \description{ Generalized Method of Moments (GMM) Estimation for the Truncated Multivariate Normal Distribution } \usage{ gmm.tmvnorm(X, lower = rep(-Inf, length = ncol(X)), upper = rep(+Inf, length = ncol(X)), start = list(mu = rep(0, ncol(X)), sigma = diag(ncol(X))), fixed = list(), method=c("ManjunathWilhelm","Lee"), cholesky = FALSE, ...) } \arguments{ \item{X}{Matrix of quantiles, each row is taken to be a quantile.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = ncol(X))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = ncol(X))}.} \item{start}{Named list with elements \code{mu} (mean vector) and \code{sigma} (covariance matrix). Initial values for optimizer.} \item{fixed}{Named list. Parameter values to keep fixed during optimization.} \item{method}{Which set of moment conditions used, possible methods are "ManjunathWilhelm" (default) and "Lee".} \item{cholesky}{if TRUE, we use the Cholesky decomposition of \code{sigma} as parametrization} \item{\dots}{Further arguments to pass to \code{\link{gmm}}} } \details{ This method performs an estimation of the parameters \code{mean} and \code{sigma} of a truncated multinormal distribution using the Generalized Method of Moments (GMM), when the truncation points \code{lower} and \code{upper} are known. \code{gmm.tmvnorm()} is a wrapper for the general GMM method \code{\link[gmm]{gmm}}, so one does not have to specify the moment conditions. \bold{Manjunath/Wilhelm moment conditions}\cr Because the first and second moments can be computed thanks to the \code{\link{mtmvnorm}} function, we can set up a method-of-moments estimator by equating the sample moments to their population counterparts. This way we have an exactly identified case. \bold{Lee (1979,1983) moment conditions}\cr The recursive moment conditions presented by Lee (1979,1983) are defined for \eqn{l=0,1,2,\ldots} as \deqn{ \sigma^{iT} E(x_i^l \textbf{x}) = \sigma^{iT} \mu E(x_i^l) + l E(x_i^{l-1}) + \frac{a_i^l F_i(a_i)}{F} - \frac{b_i^l F_i(b_i)}{F} } where \eqn{E(x_i^l)} and \eqn{E(x_i^l \textbf{x})} are the moments of \eqn{x_i^l} and \eqn{x_i^l \textbf{x}} respectively and \eqn{F_i(c)/F} is the one-dimensional marginal density in variable \eqn{i} as calculated by \code{\link{dtmvnorm.marginal}}. \eqn{\sigma^{iT}} is the \eqn{i}-th column of the inverse covariance matrix \eqn{\Sigma^{-1}}. This method returns an object of class \code{gmm}, for which various diagnostic methods are available, like \code{profile()}, \code{confint()} etc. See examples. } \value{ An object of class \code{\link[gmm]{gmm}} } \author{ Stefan Wilhelm \email{wilhelm@financial.com} } \references{ Tallis, G. M. (1961). The moment generating function of the truncated multinormal distribution. \emph{Journal of the Royal Statistical Society, Series B}, \bold{23}, 223--229 Lee, L.-F. (1979). On the first and second moments of the truncated multi-normal distribution and a simple estimator. \emph{Economics Letters}, \bold{3}, 165--169 Lee, L.-F. (1983). The determination of moments of the doubly truncated multivariate normal Tobit model. \emph{Economics Letters}, \bold{11}, 245--250 Manjunath B G and Wilhelm, S. (2009). Moments Calculation For the Double Truncated Multivariate Normal Density. Working Paper. Available at SSRN: \url{http://ssrn.com/abstract=1472153} } \seealso{ \code{\link[gmm]{gmm}} } \examples{ \dontrun{ set.seed(1.234) # the actual parameters lower <- c(-1, -2) upper <- c(3, Inf) mu <- c(0, 0) sigma <- matrix(c(1, 0.8, 0.8, 2), 2, 2) # generate random samples X <- rtmvnorm(n=500, mu, sigma, lower, upper) # estimate mean vector and covariance matrix sigma from random samples X # with default start values gmm.fit1 <- gmm.tmvnorm(X, lower=lower, upper=upper) # diagnostic output of the estimated parameters summary(gmm.fit1) vcov(gmm.fit1) # confidence intervals confint(gmm.fit1) # choosing a different start value gmm.fit2 <- gmm.tmvnorm(X, lower=lower, upper=upper, start=list(mu=c(0.1, 0.1), sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2))) summary(gmm.fit2) # GMM estimation with Lee (1983) moment conditions gmm.fit3 <- gmm.tmvnorm(X, lower=lower, upper=upper, method="Lee") summary(gmm.fit3) confint(gmm.fit3) # MLE estimation for comparison mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper) confint(mle.fit1) } } tmvtnorm/man/tmvnorm.Rd0000644000176200001440000001067712567600060014721 0ustar liggesusers% --- Source file: tmvtnorm.Rd --- \name{tmvnorm} \alias{dtmvnorm} \title{Truncated Multivariate Normal Density} \description{ This function provides the joint density function for the truncated multivariate normal distribution with mean equal to \code{mean} and covariance matrix \code{sigma}, lower and upper truncation points \code{lower} and \code{upper}. For convenience, it furthermore serves as a wrapper function for the one-dimensional and bivariate marginal densities \code{dtmvnorm.marginal()} and \code{dtmvnorm.marginal2()} respectively when invoked with the \code{margin} argument. } \usage{ dtmvnorm(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE, margin=NULL) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{mean}{Mean vector, default is \code{rep(0, nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} \item{margin}{if \code{NULL} then the joint density is computed (the default), if \code{MARGIN=1} then the one-dimensional marginal density in variate \code{q} (\code{q = 1..length(mean)}) is returned, if \code{MARGIN=c(q,r)} then the bivariate marginal density in variates \code{q} and \code{r} for \code{q,r = 1..length(mean)} and \eqn{q \ne r}{q != r} is returned.} } \details{ The computation of truncated multivariate normal probabilities and densities is done using conditional probabilities from the standard/untruncated multivariate normal distribution. So we refer to the documentation of the mvtnorm package and the methodology is described in Genz (1992, 1993). } \author{Stefan Wilhelm } \seealso{\code{\link{ptmvnorm}}, \code{\link[mvtnorm]{pmvnorm}}, \code{\link[mvtnorm]{rmvnorm}}, \code{\link[mvtnorm]{dmvnorm}}, \code{\link{dtmvnorm.marginal}} and \code{\link{dtmvnorm.marginal2}} for marginal density functions} \references{ Genz, A. (1992). Numerical computation of multivariate normal probabilities. \emph{Journal of Computational and Graphical Statistics}, \bold{1}, 141--150 Genz, A. (1993). Comparison of methods for the computation of multivariate normal probabilities. \emph{Computing Science and Statistics}, \bold{25}, 400--405 Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Horrace, W. (2005). Some Results on the Multivariate Truncated Normal Distribution. \emph{Journal of Multivariate Analysis}, \bold{94}, 209--221 } \examples{ dtmvnorm(x=c(0,0), mean=c(1,1), upper=c(0,0)) ########################################### # # Example 1: # truncated multivariate normal density # ############################################ x1<-seq(-2, 3, by=0.1) x2<-seq(-2, 3, by=0.1) density<-function(x) { sigma=matrix(c(1, -0.5, -0.5, 1), 2, 2) z=dtmvnorm(x, mean=c(0,0), sigma=sigma, lower=c(-1,-1)) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute density d for grid d=fgrid(x1, x2, density) # plot density as contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate Normal Density", xlab=expression(x[1]), ylab=expression(x[2])) abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) ########################################### # # Example 2: # generation of random numbers # from a truncated multivariate normal distribution # ############################################ sigma <- matrix(c(4,2,2,3), ncol=2) x <- rtmvnorm(n=500, mean=c(1,2), sigma=sigma, upper=c(1,0)) plot(x, main="samples from truncated bivariate normal distribution", xlim=c(-6,6), ylim=c(-6,6), xlab=expression(x[1]), ylab=expression(x[2])) abline(v=1, lty=3, lwd=2, col="gray") abline(h=0, lty=3, lwd=2, col="gray") } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/ptmvnorm.Rd0000644000176200001440000000614212567600060015071 0ustar liggesusers% --- Source file: ptmvnorm.Rd --- \name{ptmvnorm} \alias{ptmvnorm} \title{ Truncated Multivariate Normal Distribution } \description{ Computes the distribution function of the truncated multivariate normal distribution for arbitrary limits and correlation matrices based on the \code{pmvnorm()} implementation of the algorithms by Genz and Bretz. } \usage{ ptmvnorm(lowerx, upperx, mean=rep(0, length(lowerx)), sigma, lower = rep(-Inf, length = length(mean)), upper = rep( Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) } \arguments{ \item{lowerx}{ the vector of lower limits of length n.} \item{upperx}{ the vector of upper limits of length n.} \item{mean}{ the mean vector of length n.} \item{sigma}{ the covariance matrix of dimension n. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}. } \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{maxpts}{ maximum number of function values as integer. } \item{abseps}{ absolute error tolerance as double. } \item{releps}{ relative error tolerance as double. } } \details{ The computation of truncated multivariate normal probabilities and densities is done using conditional probabilities from the standard/untruncated multivariate normal distribution. So we refer to the documentation of the \code{mvtnorm} package and the methodology is described in Genz (1992, 1993) and Genz/Bretz (2009). For properties of the truncated multivariate normal distribution see for example Johnson/Kotz (1970) and Horrace (2005). } \value{ The evaluated distribution function is returned with attributes \item{error}{estimated absolute error and} \item{msg}{status messages.} } \references{ Genz, A. (1992). Numerical computation of multivariate normal probabilities. \emph{Journal of Computational and Graphical Statistics}, \bold{1}, 141--150 Genz, A. (1993). Comparison of methods for the computation of multivariate normal probabilities. \emph{Computing Science and Statistics}, \bold{25}, 400--405 Genz, A. and Bretz, F. (2009). Computation of Multivariate Normal and t Probabilities. \emph{Lecture Notes in Statistics}, Vol. \bold{195}, Springer-Verlag, Heidelberg. Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Horrace, W. (2005). Some Results on the Multivariate Truncated Normal Distribution. \emph{Journal of Multivariate Analysis}, \bold{94}, 209--221 } \examples{ sigma <- matrix(c(5, 0.8, 0.8, 1), 2, 2) Fx <- ptmvnorm(lowerx=c(-1,-1), upperx=c(0.5,0), mean=c(0,0), sigma=sigma, lower=c(-1,-1), upper=c(1,1)) } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/dtmvt.Rd0000644000176200001440000000712112567600060014343 0ustar liggesusers\name{dtmvt} \alias{dtmvt} \title{Truncated Multivariate Student t Density} \description{ This function provides the joint density function for the truncated multivariate Student t distribution with mean vector equal to \code{mean}, covariance matrix \code{sigma}, degrees of freedom parameter \code{df} and lower and upper truncation points \code{lower} and \code{upper}. } \usage{ dtmvt(x, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), log = FALSE) } \arguments{ \item{x}{Vector or matrix of quantiles. If \code{x} is a matrix, each row is taken to be a quantile.} \item{mean}{Mean vector, default is \code{rep(0, nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{df}{degrees of freedom parameter} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} } \details{ The Truncated Multivariate Student t Distribution is a conditional Multivariate Student t distribution subject to (linear) constraints \eqn{a \le \bold{x} \le b}. The density of the \eqn{p}-variate Multivariate Student t distribution with \eqn{\nu}{nu} degrees of freedom is \deqn{ f(\bold{x}) = \frac{\Gamma((\nu + p)/2)}{(\pi\nu)^{p/2} \Gamma(\nu/2) \|\Sigma\|^{1/2}} [ 1 + \frac{1}{\nu} (x - \mu)^T \Sigma^{-1} (x - \mu) ]^{- (\nu + p) / 2} } The density of the truncated distribution \eqn{f_{a,b}(x)} with constraints \eqn{(a \le x \le b)}{a <= x <= b} is accordingly \deqn{ f_{a,b}(x) = \frac{f(\bold{x})} {P(a \le x \le b)} } } \value{ a numeric vector with density values } \seealso{ \code{\link{ptmvt}} and \code{\link{rtmvt}} for probabilities and random number generation in the truncated case, see \code{\link[mvtnorm]{dmvt}}, \code{\link[mvtnorm]{rmvt}} and \code{\link[mvtnorm]{pmvt}} for the untruncated multi-t distribution. } \references{ Geweke, J. F. (1991) Efficient simulation from the multivariate normal and Student-t distributions subject to linear constraints and the evaluation of constraint probabilities. \url{http://www.biz.uiowa.edu/faculty/jgeweke/papers/paper47/paper47.pdf} Samuel Kotz, Saralees Nadarajah (2004). Multivariate t Distributions and Their Applications. \emph{Cambridge University Press} } \author{Stefan Wilhelm \email{wilhelm@financial.com}} \examples{ # Example x1 <- seq(-2, 3, by=0.1) x2 <- seq(-2, 3, by=0.1) mean <- c(0,0) sigma <- matrix(c(1, -0.5, -0.5, 1), 2, 2) lower <- c(-1,-1) density <- function(x) { z=dtmvt(x, mean=mean, sigma=sigma, lower=lower) z } fgrid <- function(x, y, f) { z <- matrix(nrow=length(x), ncol=length(y)) for(m in 1:length(x)){ for(n in 1:length(y)){ z[m,n] <- f(c(x[m], y[n])) } } z } # compute multivariate-t density d for grid d <- fgrid(x1, x2, function(x) dtmvt(x, mean=mean, sigma=sigma, lower=lower)) # compute multivariate normal density d for grid d2 <- fgrid(x1, x2, function(x) dtmvnorm(x, mean=mean, sigma=sigma, lower=lower)) # plot density as contourplot contour(x1, x2, d, nlevels=5, main="Truncated Multivariate t Density", xlab=expression(x[1]), ylab=expression(x[2])) contour(x1, x2, d2, nlevels=5, add=TRUE, col="red") abline(v=-1, lty=3, lwd=2) abline(h=-1, lty=3, lwd=2) } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/ptmvt.Rd0000644000176200001440000000431312567600060014357 0ustar liggesusers\name{ptmvt} \alias{ptmvt} \title{Truncated Multivariate Student t Distribution} \description{ Computes the distribution function of the truncated multivariate t distribution } \usage{ ptmvt(lowerx, upperx, mean = rep(0, length(lowerx)), sigma, df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), maxpts = 25000, abseps = 0.001, releps = 0) } \arguments{ \item{lowerx}{ the vector of lower limits of length n.} \item{upperx}{ the vector of upper limits of length n.} \item{mean}{ the mean vector of length n.} \item{sigma}{ the covariance matrix of dimension n. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}. } \item{df}{Degrees of freedom parameter} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{maxpts}{ maximum number of function values as integer. } \item{abseps}{ absolute error tolerance as double. } \item{releps}{ relative error tolerance as double. } } \value{ The evaluated distribution function is returned with attributes \item{error}{estimated absolute error and} \item{msg}{status messages.} } \references{ Geweke, J. F. (1991) Efficient simulation from the multivariate normal and Student-t distributions subject to linear constraints and the evaluation of constraint probabilities. \url{http://www.biz.uiowa.edu/faculty/jgeweke/papers/paper47/paper47.pdf} Samuel Kotz, Saralees Nadarajah (2004). Multivariate t Distributions and Their Applications. \emph{Cambridge University Press} } \author{Stefan Wilhelm } \examples{ sigma <- matrix(c(5, 0.8, 0.8, 1), 2, 2) Fx <- ptmvt(lowerx=c(-1,-1), upperx=c(0.5,0), mean=c(0,0), sigma=sigma, df=3, lower=c(-1,-1), upper=c(1,1)) } \keyword{ math } \keyword{ multivariate } tmvtnorm/man/mle.tmvnorm.Rd0000644000176200001440000000716412567600060015472 0ustar liggesusers\name{mle.tmvnorm} \alias{mle.tmvnorm} \title{ Maximum Likelihood Estimation for the Truncated Multivariate Normal Distribution } \description{ Maximum Likelihood Estimation for the Truncated Multivariate Normal Distribution } \usage{ mle.tmvnorm(X, lower = rep(-Inf, length = ncol(X)), upper = rep(+Inf, length = ncol(X)), start = list(mu = rep(0, ncol(X)), sigma = diag(ncol(X))), fixed = list(), method = "BFGS", cholesky = FALSE, lower.bounds = -Inf, upper.bounds = +Inf, ...) } \arguments{ \item{X}{Matrix of quantiles, each row is taken to be a quantile.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = ncol(X))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = ncol(X))}.} \item{start}{Named list with elements \code{mu} (mean vector) and \code{sigma} (covariance matrix). Initial values for optimizer.} \item{fixed}{Named list. Parameter values to keep fixed during optimization.} \item{method}{Optimization method to use. See \code{\link{optim}}} \item{cholesky}{if TRUE, we use the Cholesky decomposition of \code{sigma} as parametrization} \item{lower.bounds}{lower bounds/box constraints for method "L-BFGS-B"} \item{upper.bounds}{upper bounds/box constraints for method "L-BFGS-B"} \item{\dots}{Further arguments to pass to \code{\link{optim}}} } \details{ This method performs a maximum likelihood estimation of the parameters \code{mean} and \code{sigma} of a truncated multinormal distribution, when the truncation points \code{lower} and \code{upper} are known. \code{mle.tmvnorm()} is a wrapper for the general maximum likelihood method \code{\link[stats4]{mle}}, so one does not have to specify the negative log-likelihood function. The log-likelihood function for a data matrix X (T x n) can be established straightforward as \deqn{ \log L(X | \mu,\Sigma) = -T \log{\alpha(\mu,\Sigma)} + {-T/2} \log{\|\Sigma\|} -\frac{1}{2} \sum_{t=1}^{T}{(x_t-\mu)' \Sigma^{-1} (x_t-\mu)} } As \code{\link[stats4]{mle}}, this method returns an object of class \code{mle}, for which various diagnostic methods are available, like \code{profile()}, \code{confint()} etc. See examples. In order to adapt the estimation problem to \code{\link[stats4]{mle}}, the named parameters for mean vector elements are "mu_i" and the elements of the covariance matrix are "sigma_ij" for the lower triangular matrix elements, i.e. (j <= i). } \value{ An object of class \code{\link[stats4]{mle-class}} } \author{ Stefan Wilhelm \email{wilhelm@financial.com} } \seealso{ \code{\link[stats4]{mle}} and \code{\link[stats4]{mle-class}} } \examples{ \dontrun{ set.seed(1.2345) # the actual parameters lower <- c(-1,-1) upper <- c(1, 2) mu <- c(0, 0) sigma <- matrix(c(1, 0.7, 0.7, 2), 2, 2) # generate random samples X <- rtmvnorm(n=500, mu, sigma, lower, upper) method <- "BFGS" # estimate mean vector and covariance matrix sigma from random samples X # with default start values mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper) # diagnostic output of the estimated parameters summary(mle.fit1) logLik(mle.fit1) vcov(mle.fit1) # profiling the log likelihood and confidence intervals mle.profile1 <- profile(mle.fit1, X, method="BFGS", trace=TRUE) confint(mle.profile1) par(mfrow=c(3,2)) plot(mle.profile1) # choosing a different start value mle.fit2 <- mle.tmvnorm(X, lower=lower, upper=upper, start=list(mu=c(0.1, 0.1), sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2))) summary(mle.fit2) } }tmvtnorm/man/dtmvnorm.marginal2.Rd0000644000176200001440000001007112567600060016724 0ustar liggesusers\name{dtmvnorm.marginal2} \Rdversion{1.1} \alias{dtmvnorm.marginal2} \title{ Bivariate marginal density functions from a Truncated Multivariate Normal distribution } \description{ This function computes the bivariate marginal density function \eqn{f(x_q, x_r)} from a k-dimensional Truncated Multivariate Normal density function (k>=2). The bivariate marginal density is obtained by integrating out (k-2) dimensions as proposed by Tallis (1961). This function is basically an extraction of the Leppard and Tallis (1989) Fortran code for moments calculation, but extended to the double truncated case. } \usage{ dtmvnorm.marginal2(xq, xr, q, r, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), log = FALSE, pmvnorm.algorithm=GenzBretz()) } \arguments{ \item{xq}{Value \eqn{x_q}} \item{xr}{Value \eqn{x_r}} \item{q}{Index position for \eqn{x_q} within mean vector to calculate the bivariate marginal density for.} \item{r}{Index position for \eqn{x_r} within mean vector to calculate the bivariate marginal density for.} \item{mean}{Mean vector, default is \code{rep(0, length = nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} \item{pmvnorm.algorithm}{Algorithm used for \code{\link[mvtnorm]{pmvnorm}}} } \details{ The bivariate marginal density function \eqn{f(x_q, x_r)} for \eqn{x \sim TN(\mu, \Sigma, a, b)} and \eqn{q \ne r} is defined as \deqn{F_{q,r}(x_q=c_q, x_r=c_r) = \int^{b_1}_{a_1}...\int^{b_{q-1}}_{a_{q-1}}\int^{b_{q+1}}_{a_{q+1}}...\int^{b_{r-1}}_{a_{r-1}}\int^{b_{r+1}}_{a_{r+1}}...\int^{b_{k}}_{a_{k}} \varphi{_{\alpha}}_{\Sigma}(x_s, c_q, c_r) dx_s} } \references{ Tallis, G. M. (1961). The moment generating function of the truncated multinormal distribution. \emph{Journal of the Royal Statistical Society, Series B}, \bold{23}, 223--229 Leppard, P. and Tallis, G. M. (1989). Evaluation of the Mean and Covariance of the Truncated Multinormal \emph{Applied Statistics}, \bold{38}, 543--553 Manjunath B G and Wilhelm, S. (2009). Moments Calculation For the Double Truncated Multivariate Normal Density. Working Paper. Available at SSRN: \url{http://ssrn.com/abstract=1472153} } \author{Stefan Wilhelm , Manjunath B G } \examples{ lower = c(-0.5, -1, -1) upper = c( 2.2, 2, 2) mean = c(0,0,0) sigma = matrix(c(2.0, -0.6, 0.7, -0.6, 1.0, -0.2, 0.7, -0.2, 1.0), 3, 3) # generate random samples from untruncated and truncated distribution Y = rmvnorm(10000, mean=mean, sigma=sigma) X = rtmvnorm(500, mean=mean, sigma=sigma, lower=lower, upper=upper, algorithm="gibbs") # compute bivariate marginal density of x1 and x2 xq <- seq(lower[1], upper[1], by=0.1) xr <- seq(lower[2], upper[2], by=0.1) grid <- matrix(NA, length(xq), length(xr)) for (i in 1:length(xq)) { for (j in 1:length(xr)) { grid[i,j] = dtmvnorm.marginal2(xq=xq[i], xr=xr[j], q=1, r=2, sigma=sigma, lower=lower, upper=upper) } } plot(Y[,1], Y[,2], xlim=c(-4, 4), ylim=c(-4, 4), main=expression("bivariate marginal density ("*x[1]*","*x[2]*")"), xlab=expression(x[1]), ylab=expression(x[2]), col="gray80") points(X[,1], X[,2], col="black") lines(x=c(lower[1], upper[1], upper[1], lower[1], lower[1]), y=c(lower[2],lower[2],upper[2],upper[2],lower[2]), lty=2, col="red") contour(xq, xr, grid, add=TRUE, nlevels = 8, col="red", lwd=2) # scatterplot matrices for untruncated and truncated points require(lattice) splom(Y) splom(X) } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/qtmvnorm-marginal.Rd0000644000176200001440000000662212567600060016665 0ustar liggesusers\encoding{latin1} \name{qtmvnorm-marginal} \alias{qtmvnorm.marginal} \title{ Quantiles of the Truncated Multivariate Normal Distribution in one dimension} \description{ Computes the equicoordinate quantile function of the truncated multivariate normal distribution for arbitrary correlation matrices based on an inversion of the algorithms by Genz and Bretz. } \usage{ qtmvnorm.marginal(p, interval = c(-10, 10), tail = c("lower.tail","upper.tail","both.tails"), n=1, mean=rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{p}{ probability.} \item{interval}{ a vector containing the end-points of the interval to be searched by \code{\link{uniroot}}.} \item{tail}{ specifies which quantiles should be computed. \code{lower.tail} gives the quantile \eqn{x} for which \eqn{P[X \le x] = p}{P[X <= x] = p}, \code{upper.tail} gives \eqn{x} with \eqn{P[X > x] = p} and \code{both.tails} leads to \eqn{x} with \eqn{P[-x \le X \le x] = p}.}{P[-x <= X <= x] = p} \item{n}{ index (1..n) to calculate marginal quantile for} \item{mean}{ the mean vector of length n. } \item{sigma}{ the covariance matrix of dimension n. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}. } \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{...}{ additional parameters to be passed to \code{\link{uniroot}}.} } \details{ Only equicoordinate quantiles are computed, i.e., the quantiles in each dimension coincide. Currently, the distribution function is inverted by using the \code{\link{uniroot}} function which may result in limited accuracy of the quantiles. } \value{ A list with four components: \code{quantile} and \code{f.quantile} give the location of the quantile and the value of the function evaluated at that point. \code{iter} and \code{estim.prec} give the number of iterations used and an approximate estimated precision from \code{\link{uniroot}}. } \seealso{\code{\link{ptmvnorm}}, \code{\link[mvtnorm]{pmvnorm}}} \examples{ # finite dimensional distribution of the Geometric Brownian Motion log-returns # with truncation # volatility p.a. sigma=0.4 # risk free rate r = 0.05 # n=3 points in time T <- c(0.5, 0.7, 1) # covariance matrix of Geometric Brownian Motion returns Sigma = sigma^2*outer(T,T,pmin) # mean vector of the Geometric Brownian Motion returns mu = (r - sigma^2/2) * T # lower truncation vector a (a<=x<=b) a = rep(-Inf, 3) # upper truncation vector b (a<=x<=b) b = c(0, 0, Inf) # quantile of the t_1 returns qtmvnorm.marginal(p=0.95, interval = c(-10, 10), tail = "lower.tail", n=1, mean = mu, sigma = Sigma, lower=a, upper=b) } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/mtmvnorm.Rd0000644000176200001440000000635112567600060015070 0ustar liggesusers\name{mtmvnorm} \alias{mtmvnorm} \alias{moments} \title{Computation of Mean Vector and Covariance Matrix For Truncated Multivariate Normal Distribution} \description{ Computation of the first two moments, i.e. mean vector and covariance matrix for the Truncated Multivariate Normal Distribution based on the works of Tallis (1961), Lee (1979) and Leppard and Tallis (1989), but extended to the double-truncated case with general mean and general covariance matrix. } \usage{ mtmvnorm(mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), doComputeVariance=TRUE, pmvnorm.algorithm=GenzBretz()) } \arguments{ \item{mean}{Mean vector, default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix, default is \code{diag(ncol(x))}.} \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{doComputeVariance}{flag whether to compute the variance for users who are interested only in the mean. Defaults to \code{TRUE} for backward compatibility.} \item{pmvnorm.algorithm}{Algorithm used for \code{\link[mvtnorm]{pmvnorm}}} } \details{ Details for the moment calculation under double truncation and the derivation of the formula can be found in the Manjunath/Wilhelm (2009) working paper. If only a subset of variables are truncated, we calculate the truncated moments only for these and use the Johnson/Kotz formula for the remaining untruncated variables. } \value{ \item{tmean}{Mean vector of truncated variables} \item{tvar}{Covariance matrix of truncated variables} } \references{ Tallis, G. M. (1961). The moment generating function of the truncated multinormal distribution. \emph{Journal of the Royal Statistical Society, Series B}, \bold{23}, 223--229 Johnson, N./Kotz, S. (1970). Distributions in Statistics: Continuous Multivariate Distributions \emph{Wiley & Sons}, pp. 70--73 Lee, L.-F. (1979). On the first and second moments of the truncated multi-normal distribution and a simple estimator. \emph{Economics Letters}, \bold{3}, 165--169 Leppard, P. and Tallis, G. M. (1989). Evaluation of the Mean and Covariance of the Truncated Multinormal. \emph{Applied Statistics}, \bold{38}, 543--553 Manjunath B G and Wilhelm, S. (2009). Moments Calculation For the Double Truncated Multivariate Normal Density. Working Paper. Available at SSRN: \url{http://ssrn.com/abstract=1472153} } \author{Stefan Wilhelm , Manjunath B G } \examples{ mu <- c(0.5, 0.5, 0.5) sigma <- matrix(c( 1, 0.6, 0.3, 0.6, 1, 0.2, 0.3, 0.2, 2), 3, 3) a <- c(-Inf, -Inf, -Inf) b <- c(1, 1, 1) # compute first and second moments mtmvnorm(mu, sigma, lower=a, upper=b) # compare with simulated results X <- rtmvnorm(n=1000, mean=mu, sigma=sigma, lower=a, upper=b) colMeans(X) cov(X) } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/ptmvnorm.marginal.Rd0000644000176200001440000000653412567600060016667 0ustar liggesusers\name{ptmvtnorm.marginal} \Rdversion{1.1} \alias{ptmvnorm.marginal} \alias{ptmvt.marginal} \title{One-dimensional marginal CDF function for a Truncated Multivariate Normal and Student t distribution} \description{ This function computes the one-dimensional marginal probability function from a Truncated Multivariate Normal and Student t density function using integration in \code{pmvnorm()} and \code{pmvt()}. } \usage{ ptmvnorm.marginal(xn, n = 1, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean))) ptmvt.marginal(xn, n = 1, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean))) } \arguments{ \item{xn}{Vector of quantiles to calculate the marginal probability for.} \item{n}{Index position (1..k) within the random vector xn to calculate the one-dimensional marginal probability for.} \item{mean}{ the mean vector of length k. } \item{sigma}{ the covariance matrix of dimension k. Either \code{corr} or \code{sigma} can be specified. If \code{sigma} is given, the problem is standardized. If neither \code{corr} nor \code{sigma} is given, the identity matrix is used for \code{sigma}.} \item{df}{degrees of freedom parameter} \item{lower}{Vector of lower truncation points, default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points, default is \code{rep( Inf, length = length(mean))}.} } \details{ The one-dimensional marginal probability for index i is \eqn{F_i(x_i) = P(X_i \le x_i)}{F_i(x_i) = P(X_i <= x_i)} \deqn{F_i(x_i) = \int_{a_1}^{b_1} \ldots \int_{a_{i-1}}^{b_{i-1}} \int_{a_{i}}^{x_i} \int_{a_{i+1}}^{b_{i+1}} \ldots \int_{a_k}^{b_k} f(x) dx = \alpha^{-1} \Phi_k(a, u, \mu, \Sigma)} where \eqn{u = (b_1,\ldots,b_{i-1},x_i,b_{i+1},\ldots,b_k)'}{u = (b_1,...,b_{i-1},x_i,b_{i+1},...,b_k)'} is the upper integration bound and \eqn{\Phi_k} is the k-dimensional normal probability (i.e. functions \code{pmvnorm()} and \code{pmvt()} in R package \code{mvtnorm}). } \value{ Returns a vector of the same length as xn with probabilities. } \author{Stefan Wilhelm } \examples{ ## Example 1: Truncated multi-normal lower <- c(-1,-1,-1) upper <- c(1,1,1) mean <- c(0,0,0) sigma <- matrix(c( 1, 0.8, 0.2, 0.8, 1, 0.1, 0.2, 0.1, 1), 3, 3) X <- rtmvnorm(n=1000, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) x <- seq(-1, 1, by=0.01) Fx <- ptmvnorm.marginal(xn=x, n=1, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) plot(ecdf(X[,1]), main="marginal CDF for truncated multi-normal") lines(x, Fx, type="l", col="blue") ## Example 2: Truncated multi-t X <- rtmvt(n=1000, mean=c(0,0,0), sigma=sigma, df=2, lower=lower, upper=upper) x <- seq(-1, 1, by=0.01) Fx <- ptmvt.marginal(xn=x, n=1, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) plot(ecdf(X[,1]), main="marginal CDF for truncated multi-t") lines(x, Fx, type="l", col="blue") } \keyword{distribution} \keyword{multivariate} tmvtnorm/man/rtmvnorm2.Rd0000644000176200001440000000735712567600060015166 0ustar liggesusers\name{rtmvnorm2} \alias{rtmvnorm2} \title{Sampling Random Numbers From The Truncated Multivariate Normal Distribution With Linear Constraints} \description{ This function generates random numbers from the truncated multivariate normal distribution with mean equal to \code{mean} and covariance matrix \code{sigma} and general linear constraints \deqn{lower \le D x \le upper}{lower <= D x <= upper} with either rejection sampling or Gibbs sampling. } \usage{ rtmvnorm2(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), D = diag(length(mean)), algorithm = c("gibbs", "gibbsR", "rejection"), ...) } \arguments{ \item{n}{Number of random points to be sampled. Must be an integer \eqn{\ge 1}{>= 1}.} \item{mean}{Mean vector (d x 1), default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix (d x d), default is \code{diag(ncol(x))}.} \item{lower}{Vector of lower truncation points (r x 1), default is \code{rep( Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points (r x 1), default is \code{rep( Inf, length = length(mean))}.} \item{D}{Matrix for linear constraints (r x d), defaults to diagonal matrix (d x d), i.e. r = d.} \item{algorithm}{Method used, possible methods are the Fortan Gibbs sampler ("gibbs", default), the Gibbs sampler implementation in R ("gibbsR") and rejection sampling ("rejection")} \item{\dots}{additional parameters for Gibbs sampling, given to the internal method \code{rtmvnorm.gibbs()}, such as \code{burn.in.samples}, \code{start.value} and \code{thinning}, see details in \code{\link{rtmvnorm}}} } \details{ This method allows for \eqn{r > d}{r > d} linear constraints, whereas \code{\link{rtmvnorm}} requires a full-rank matrix D \eqn{(d \times d)}{(d x d)} and can only handle \eqn{r \le d}{r <= d} constraints at the moment. The lower and upper bounds \code{lower} and \code{upper} are \eqn{(r \times 1)}{(r x 1)}, the matrix \code{D} is \eqn{(r \times d)}{(r x d)} and x is \eqn{(d \times 1)}{(d x 1)}. The default case is \eqn{r = d}{r = d} and \eqn{D = I_d}{D = I_d}. } \section{Warning}{This method will be merged with \code{\link{rtmvnorm}} in one of the next releases.} \author{ Stefan Wilhelm } \seealso{ \code{\link{rtmvnorm}} } \examples{ \dontrun{ ################################################################################ # # Example 5a: Number of linear constraints r > dimension d # ################################################################################ # general linear restrictions a <= Dx <= b with x (d x 1); D (r x d); a,b (r x 1) # Dimension d=2, r=3 linear constraints # # a1 <= x1 + x2 <= b2 # a2 <= x1 - x2 <= b2 # a3 <= 0.5x1 - x2 <= b3 # # [ a1 ] <= [ 1 1 ] [ x1 ] <= [b1] # [ a2 ] [ 1 -1 ] [ x2 ] [b2] # [ a3 ] [ 0.5 -1 ] [b3] D <- matrix( c( 1, 1, 1, -1, 0.5, -1), 3, 2, byrow=TRUE) a <- c(0, 0, 0) b <- c(1, 1, 1) # mark linear constraints as lines plot(NA, xlim=c(-0.5, 1.5), ylim=c(-1,1)) for (i in 1:3) { abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col="red") } ### Gibbs sampling for general linear constraints a <= Dx <= b mean <- c(0, 0) sigma <- matrix(c(1.0, 0.2, 0.2, 1.0), 2, 2) x0 <- c(0.5, 0.2) # Gibbs sampler start value X <- rtmvnorm2(n=1000, mean, sigma, lower=a, upper=b, D, start.value=x0) # show random points within simplex points(X, pch=20, col="black") } } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/rtmvt.Rd0000644000176200001440000001254212567600060014364 0ustar liggesusers\name{rtmvt} \alias{rtmvt} \title{Sampling Random Numbers From The Truncated Multivariate Student t Distribution} \description{ This function generates random numbers from the truncated multivariate Student-t distribution with mean equal to \code{mean} and covariance matrix \code{sigma}, lower and upper truncation points \code{lower} and \code{upper} with either rejection sampling or Gibbs sampling. } \usage{ rtmvt(n, mean = rep(0, nrow(sigma)), sigma = diag(length(mean)), df = 1, lower = rep(-Inf, length = length(mean)), upper = rep(Inf, length = length(mean)), algorithm=c("rejection", "gibbs"), ...) } \arguments{ \item{n}{Number of random points to be sampled. Must be an integer >= 1.} \item{mean}{Mean vector, default is \code{rep(0, length = ncol(x))}.} \item{sigma}{Covariance matrix, default is \code{diag(ncol(x))}.} \item{df}{Degrees of freedom parameter (positive, may be non-integer)} \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{algorithm}{Method used, possible methods are rejection sampling ("rejection", default) and the R Gibbs sampler ("gibbs").} \item{...}{additional parameters for Gibbs sampling, given to the internal method \code{rtmvt.gibbs()}, such as \code{burn.in.samples}, \code{start.value} and \code{thinning}, see details} } \details{ We sample \eqn{x \sim T(\mu, \Sigma, df)}{x ~ T(mean, Sigma, df)} subject to the rectangular truncation \eqn{lower \le x \le upper}{lower <= x <= upper}. Currently, two random number generation methods are implemented: rejection sampling and the Gibbs Sampler. For rejection sampling \code{algorithm="rejection"}, we sample from \code{\link[mvtnorm]{rmvt}} and retain only samples inside the support region. The acceptance probability will be calculated with \code{\link[mvtnorm]{pmvt}}. \code{\link[mvtnorm]{pmvt}} does only accept integer degrees of freedom \code{df}. For non-integer \code{df}, \code{algorithm="rejection"} will throw an error, so please use \code{algorithm="gibbs"} instead. The arguments to be passed along with \code{algorithm="gibbs"} are: \describe{ \item{\code{burn.in.samples}}{number of samples in Gibbs sampling to be discarded as burn-in phase, must be non-negative.} \item{\code{start.value}}{Start value (vector of length \code{length(mean)}) for the MCMC chain. If one is specified, it must lie inside the support region (\eqn{lower \le start.value \le upper}{lower <= start.value <= upper}). If none is specified, the start value is taken componentwise as the finite lower or upper boundaries respectively, or zero if both boundaries are infinite. Defaults to NULL.} \item{\code{thinning}}{Thinning factor for reducing autocorrelation of random points in Gibbs sampling. Must be an integer \eqn{\ge 1}{>= 1}. We create a Markov chain of length \code{(n*thinning)} and take only those samples \code{j=1:(n*thinning)} where \code{j \%\% thinning == 0} Defaults to 1 (no thinning of the chain).} } } \section{Warning}{ The same warnings for the Gibbs sampler apply as for the method \code{\link{rtmvnorm}}. } \author{Stefan Wilhelm , Manjunath B G } \references{ Geweke, John F. (1991) Efficient Simulation from the Multivariate Normal and Student-t Distributions Subject to Linear Constraints. \emph{Computer Science and Statistics. Proceedings of the 23rd Symposium on the Interface. Seattle Washington, April 21-24, 1991}, pp. 571--578 An earlier version of this paper is available at \url{http://www.biz.uiowa.edu/faculty/jgeweke/papers/paper47/paper47.pdf} } \examples{ ########################################################### # # Example 1 # ########################################################### # Draw from multi-t distribution without truncation X1 <- rtmvt(n=10000, mean=rep(0, 2), df=2) X2 <- rtmvt(n=10000, mean=rep(0, 2), df=2, lower=c(-1,-1), upper=c(1,1)) ########################################################### # # Example 2 # ########################################################### df = 2 mu = c(1,1,1) sigma = matrix(c( 1, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 1), 3, 3) lower = c(-2,-2,-2) upper = c(2, 2, 2) # Rejection sampling X1 <- rtmvt(n=10000, mu, sigma, df, lower, upper) # Gibbs sampling without thinning X2 <- rtmvt(n=10000, mu, sigma, df, lower, upper, algorithm="gibbs") # Gibbs sampling with thinning X3 <- rtmvt(n=10000, mu, sigma, df, lower, upper, algorithm="gibbs", thinning=2) plot(density(X1[,1], from=lower[1], to=upper[1]), col="red", lwd=2, main="Gibbs vs. Rejection") lines(density(X2[,1], from=lower[1], to=upper[1]), col="blue", lwd=2) legend("topleft",legend=c("Rejection Sampling","Gibbs Sampling"), col=c("red","blue"), lwd=2) acf(X1) # no autocorrelation in Rejection sampling acf(X2) # strong autocorrelation of Gibbs samples acf(X3) # reduced autocorrelation of Gibbs samples after thinning } \keyword{distribution} \keyword{multivariate}tmvtnorm/man/dmvnorm.marginal.Rd0000644000176200001440000001262112567600060016461 0ustar liggesusers% --- Source file: dtmvnorm-marginal.Rd --- \encoding{latin1} \name{dtmvnorm.marginal} \alias{dtmvnorm.marginal} \title{One-dimensional marginal density functions from a Truncated Multivariate Normal distribution} \description{ This function computes the one-dimensional marginal density function from a Truncated Multivariate Normal density function using the algorithm given in Cartinhour (1990). } \usage{ dtmvnorm.marginal(xn, n=1, mean= rep(0, nrow(sigma)), sigma=diag(length(mean)), lower=rep(-Inf, length = length(mean)), upper=rep( Inf, length = length(mean)), log=FALSE) } \arguments{ \item{xn}{Vector of quantiles to calculate the marginal density for.} \item{n}{Index position (1..k) within the random vector x to calculate the one-dimensional marginal density for.} \item{mean}{Mean vector, default is \code{rep(0, length = nrow(sigma))}.} \item{sigma}{Covariance matrix, default is \code{diag(length(mean))}.} \item{lower}{Vector of lower truncation points,\\ default is \code{rep(-Inf, length = length(mean))}.} \item{upper}{Vector of upper truncation points,\\ default is \code{rep( Inf, length = length(mean))}.} \item{log}{Logical; if \code{TRUE}, densities d are given as log(d).} } \details{ The one-dimensional marginal density \eqn{f_i(x_i)} of \eqn{x_i} is \deqn{f_i(x_i) = \int_{a_1}^{b_1} \ldots \int_{a_{i-1}}^{b_{i-1}} \int_{a_{i+1}}^{b_{i+1}} \ldots \int_{a_k}^{b_k} f(x) dx_{-i}} Note that the one-dimensional marginal density is not truncated normal, but only conditional densities are truncated normal. } \author{Stefan Wilhelm } \references{ Cartinhour, J. (1990). One-dimensional marginal density functions of a truncated multivariate normal density function. \emph{Communications in Statistics - Theory and Methods}, \bold{19}, 197--203 Arnold et al. (1993). The Nontruncated Marginal of a Truncated Bivariate Normal Distribution. \emph{Psychometrika}, \bold{58}, 471--488 } \examples{ ############################################# # # Example 1: truncated bivariate normal # ############################################# # parameters of the bivariate normal distribution sigma = matrix(c(1 , 0.95, 0.95, 1 ), 2, 2) mu = c(0,0) # sample from multivariate normal distribution X = rmvnorm(5000, mu, sigma) # tuncation in x2 with x2 <= 0 X.trunc = X[X[,2]<0,] # plot the realisations before and after truncation par(mfrow=c(2,2)) plot(X, col="gray", xlab=expression(x[1]), ylab=expression(x[2]), main="realisations from a\n truncated bivariate normal distribution") points(X.trunc) abline(h=0, lty=2, col="gray") #legend("topleft", col=c("gray", "black") # marginal density for x1 from realisations plot(density(X.trunc[,1]), main=expression("marginal density for "*x[1])) # one-dimensional marginal density for x1 using the formula x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=1, mean=mu, sigma=sigma, lower=c(-Inf,-Inf), upper=c(Inf,0)) lines(x, fx, lwd=2, col="red") # marginal density for x2 plot(density(X.trunc[,2]), main=expression("marginal density for "*x[2])) # one-dimensional marginal density for x2 using the formula x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=2, mean=mu, sigma=sigma, lower=c(-Inf,-Inf), upper=c(Inf,0)) lines(x, fx, lwd=2, col="blue") ############################################# # # Example 2 : truncated trivariate normal # ############################################# # parameters of the trivariate normal distribution sigma = outer(1:3,1:3,pmin) mu = c(0,0,0) # sample from multivariate normal distribution X = rmvnorm(2000, mu, sigma) # truncation in x2 and x3 : x2 <= 0, x3 <= 0 X.trunc = X[X[,2]<=0 & X[,3]<=0,] par(mfrow=c(2,3)) plot(X, col="gray", xlab=expression(x[1]), ylab=expression(x[2]), main="realisations from a\n truncated trivariate normal distribution") points(X.trunc, col="black") abline(h=0, lty=2, col="gray") plot(X[,2:3], col="gray", xlab=expression(x[2]), ylab=expression(x[3]), main="realisations from a\n truncated trivariate normal distribution") points(X.trunc[,2:3], col="black") abline(h=0, lty=2, col="gray") abline(v=0, lty=2, col="gray") plot(X[,c(1,3)], col="gray", xlab=expression(x[1]), ylab=expression(x[3]), main="realisations from a\n truncated trivariate normal distribution") points(X.trunc[,c(1,3)], col="black") abline(h=0, lty=2, col="gray") # one-dimensional marginal density for x1 from realisations and formula plot(density(X.trunc[,1]), main=expression("marginal density for "*x[1])) x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=1, mean=mu, sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0)) lines(x, fx, lwd=2, col="red") # one-dimensional marginal density for x2 from realisations and formula plot(density(X.trunc[,2]), main=expression("marginal density for "*x[2])) x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=2, mean=mu, sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0)) lines(x, fx, lwd=2, col="red") # one-dimensional marginal density for x3 from realisations and formula plot(density(X.trunc[,3]), main=expression("marginal density for "*x[3])) x <- seq(-5, 5, by=0.01) fx <- dtmvnorm.marginal(x, n=3, mean=mu, sigma=sigma, lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0)) lines(x, fx, lwd=2, col="red") } \keyword{distribution} \keyword{multivariate}