wavethresh/0000755000176200001440000000000014335177642012446 5ustar liggesuserswavethresh/NAMESPACE0000644000176200001440000001367214230550017013657 0ustar liggesusersimport(MASS) export(cthresh,find.parameters,test.dataCT,filter.select,make.dwwt) export(accessc,accessC,accessC.mwd,accessC.wd,accessC.wp,accessC.wst) export(accessD,accessD.mwd,accessD.wd,accessD.wd3D,accessD.wp,accessD.wpst,accessD.wst) export(addpkt) export(AutoBasis,av.basis,AvBasis,AvBasis.wst,AvBasis.wst2D) export(basisplot,basisplot.BP,basisplot.wp) export(BAYES.THR) export(Best1DCols,bestm,BMdiscr) export(c2to4,CanUseMoreThanOneColor) export(checkmyews,cns,ewspec,ipndacw,LocalSpec,LocalSpec.wd,LocalSpec.wst,LSWsim) export(Chires5,Chires6,CWavDE,dclaw,dencvwd,denplot,denproj,denwd,denwr,pclaw,rclaw) export(compare.filters) export(compgrot,guyrot,rotateback) export(compress,compress.default,compress.imwd) export(conbar) export(convert,convert.wd,convert.wst,ConvertMessage) export(Crsswav) export(Cthreshold) export(CWCV,FullWaveletCV,GetRSSWST,rsswav,WaveletCV,wstCV,wstCVl,wvcvlrss) export(DJ.EX,doppler,example.1,simchirp) export(dof) export(draw,draw.default,draw.imwd,draw.imwdc,draw.mwd,draw.wd,draw.wp,draw.wst,drawbox,drawwp.default,ScalingFunction,support) export(filter.select,first.last,first.last.dh) export(firstdot) export(GenW) export(getarrvec,getpacket,getpacket.wp,getpacket.wpst,getpacket.wst,getpacket.wst2D) export(HaarConcat,HaarMA) export(image.wd,image.wst,imwd,imwr,imwr.imwd,imwr.imwdc) export(InvBasis,InvBasis.wp,InvBasis.wst) export(irregwd,makegrid) export(IsEarly,IsEarly.default,IsEarly.wd) export(IsPowerOfTwo) export(l2norm,linfnorm) export(levarr) export(logabs,madmad,ssq) export(lt.to.name) export(makewpstDO,makewpstRO) export(MaNoVe,MaNoVe.wp,MaNoVe.wst,numtonv) export(mfilter.select,mfirst.last,mpostfilter,mprefilter,mwd,mwr,rcov) export(modernise,modernise.wd) export(newsure,sure) export(nlevelsWT,nlevelsWT.default) export(nullevels,nullevels.imwd,nullevels.wd,nullevels.wst) export(plot.imwd,plot.imwdc,plot.irregwd,plot.mwd,plot.nvwp,plot.wd,plot.wp,plot.wst,plot.wst2D,plotdenwd,plotpkt) export(print.BP,print.imwd,print.imwdc,print.mwd,print.nv,print.nvwp,print.w2d,print.w2m,print.wd,print.wd3D,print.wp,print.wpst,print.wpstCL,print.wpstDO,print.wpstRO,print.wst,print.wst2D) export(PsiJ,PsiJmat,Psiname) export(putC,putC.mwd,putC.wd,putC.wp,putC.wst,putD,putD.mwd,putD.wd,putD.wd3D,putD.wp,putD.wst,putDwd3Dcheck) export(putpacket,putpacket.wp,putpacket.wst,putpacket.wst2D) export(rfft,rfftinv,rfftwt) export(rm.det,rmget,rmname) export(Shannon.entropy) export(summary.imwd,summary.imwdc,summary.mwd,summary.wd,summary.wd3D,summary.wp,summary.wpst,summary.wst,summary.wst2D) export(threshold,threshold.imwd,threshold.imwdc,threshold.irregwd,threshold.mwd,threshold.wd,threshold.wd3D,threshold.wp,threshold.wst) export(TOgetthrda1,TOgetthrda2,TOkolsmi.chi2,TOonebyone1,TOonebyone2,TOshrinkit,TOthreshda1,TOthreshda2) export(tpwd,tpwr) export(uncompress,uncompress.default,uncompress.imwdc) export(wavegrow) export(wd,wd.dh,wd.int,wd3D) export(Whistory,Whistory.wst) export(wp,wpst,wpst2discr,wpst2m,wpstCLASS,wpstREGR) export(wr,wr.int,wr.mwd,wr.wd,wr3D) export(wst,wst2D) export(wvmoments) export(wvrelease) export(WTEnv) useDynLib(wavethresh, .registration=TRUE, .fixes="C_") S3method(AvBasis, wst) S3method(AvBasis, wst2D) S3method(InvBasis, wp) S3method(InvBasis, wst) S3method(IsEarly, default) S3method(IsEarly, wd) S3method(LocalSpec, wd) S3method(LocalSpec, wst) S3method(MaNoVe, wp) S3method(MaNoVe, wst) S3method(accessC, mwd) S3method(accessC, wd) S3method(accessC, wp) S3method(accessC, wst) S3method(accessD, mwd) S3method(accessD, wd) S3method(accessD, wd3D) S3method(accessD, wp) S3method(accessD, wpst) S3method(accessD, wst) S3method(basisplot, BP) S3method(basisplot, wp) S3method(compress, default) S3method(compress, imwd) S3method(convert, wd) S3method(convert, wst) S3method(draw, default) S3method(draw, imwd) S3method(draw, imwdc) S3method(draw, mwd) S3method(draw, wd) S3method(draw, wp) S3method(draw, wst) S3method(getpacket, wp) S3method(getpacket, wpst) S3method(getpacket, wst) S3method(getpacket, wst2D) S3method(image, wd) S3method(image, wst) S3method(imwr, imwd) S3method(imwr, imwdc) S3method(modernise, wd) S3method(nlevelsWT, default) S3method(nullevels, imwd) S3method(nullevels, wd) S3method(nullevels, wst) S3method(plot, imwd) S3method(plot, imwdc) S3method(plot, irregwd) S3method(plot, mwd) S3method(plot, nvwp) S3method(plot, wd) S3method(plot, wp) S3method(plot, wst) S3method(plot, wst2D) S3method(print, BP) S3method(print, imwd) S3method(print, imwdc) S3method(print, mwd) S3method(print, nv) S3method(print, nvwp) S3method(print, w2d) S3method(print, w2m) S3method(print, wd) S3method(print, wd3D) S3method(print, wp) S3method(print, wpst) S3method(print, wpstCL) S3method(print, wpstDO) S3method(print, wpstRO) S3method(print, wst) S3method(print, wst2D) S3method(putC, mwd) S3method(putC, wd) S3method(putC, wp) S3method(putC, wst) S3method(putD, mwd) S3method(putD, wd) S3method(putD, wd3D) S3method(putD, wp) S3method(putD, wst) S3method(putpacket, wp) S3method(putpacket, wst) S3method(putpacket, wst2D) S3method(summary, imwd) S3method(summary, imwdc) S3method(summary, mwd) S3method(summary, wd) S3method(summary, wd3D) S3method(summary, wp) S3method(summary, wpst) S3method(summary, wst) S3method(summary, wst2D) S3method(threshold, imwd) S3method(threshold, imwdc) S3method(threshold, irregwd) S3method(threshold, mwd) S3method(threshold, wd) S3method(threshold, wd3D) S3method(threshold, wp) S3method(threshold, wst) S3method(uncompress, default) S3method(uncompress, imwdc) S3method(Whistory, wst) S3method(wr, int) S3method(wr, mwd) S3method(wr, wd) importFrom("grDevices", "dev.list", "dev.set") importFrom("graphics", "axis", "image", "lines", "locator", "matplot", "par", "persp", "plot", "polygon", "segments", "text", "title") importFrom("stats", "approx", "cor", "dnorm", "fft", "integrate", "mad", "median", "nlminb", "optim", "optimize", "pchisq", "pnorm", "predict", "qnorm", "quantile", "rnorm", "runif", "spectrum", "ts.plot", "var") importFrom("utils", "object.size") wavethresh/data/0000755000176200001440000000000014211626173013346 5ustar liggesuserswavethresh/data/datalist0000644000176200001440000000004014211622540015062 0ustar liggesusersBabyECG BabySS ipd lennon teddy wavethresh/data/BabySS.rda0000644000176200001440000000026614211622540015157 0ustar liggesusersBZh91AY&SY1 q@0 @0@ ɐ&*6P=S b)%2SbIỤm-mT[JMsR~[xU^sWRZ݀ I%6M0֦nzJ".p b`<wavethresh/data/ipd.rda0000644000176200001440000001320614211622540014606 0ustar liggesusersBZh91AY&SYG&Ձ舛Y%6fԶ1@P*]6tf~ӖreiH/Gȉ(&$*( (q+kdONt $K0PX`ݗ?{q]95],_@2[>k fEoLU#2C)%Jvqk=mÍ5L5Uu zO/ /SExuUn:as%{pV.Fbzö*xTM!) ADTfHD ^wnA^8qT~b7' +ZIw1Fxf:]=O!^.l'y\ᬵ3)r;RbY!`ޔOүQ Jk! H*)b)k p*(i/~K;:3mqmf?,MAR#.Lx" #xM-=nHAad`QbTEJpTV"hJD &PgLPR)=-|ആzrH?h6bͦ83)@9ꀚX6˭ݴ>A6x| [}{RON@a&knU%8('MMOlb0gȻ\c,I]T\#3|/nuvyr4kq܌p%9EwQj'>.Z܂Hw?53*|:I|ƷM6m0kk^ێgAafAēJ4s&%k{mZ@ѱs}FK]%*hkmLirnUr{MLY5|GA@,[_k,OPx9jYnC3 HH]QMYe1x1a+$-Hl{ת2$)VwB],`BDN\cT L11+"* ;T{C" 2# Ng$II_'f8Zk}|V ::DKPpD/1H@3%jZ V4zj5 䘗^γU\B:?l*}P.da_@mdX RߏMc=K+xk_[А1+#ynjArkaxȓC9(o~R Xdu}<֌~nkC-']x0dҸ%33!K)%QoHS5GUYP^Z(aؙ|I:BjE^ZT;ײ C"$ \%|ZQOeY,Ne@Gΐg45K)J&Jjݪ:cK-똙 *S>G4Aƴ0)P( pbQgY#Z7 r,rBT K %&h:5\ W9)-I,%Żxr. FjFH.f G95H= D^ޥ* Ac*c 4suFORVz|MtY㧞^R{K|)bcΉj[q-o /2&~߿אu-hag&N4MuW  ߃ظҺ~eNт>_9WX炭+uqyx'8|5w-]6^// |t,)q pZŘè WS2̳ՄC@PTL Pw57ە1'\_/tNzD>܎C ;* .ph􃨜}T9%?ą|QW(FJ*Ɇ1dr K}*)i55-%@l؛b&)""xuEDCDbw}v*iEUUGPyyy\*S|}Rݫ,v1F_r=*|7V"2zTPG ',,(EQWa,J˜i'B9Zӑ/\57tsVYIeZ`l:[-zRSeS]ZmUI[:bnixچhAZ! ̬+D1$Kl̲(,3# Q]A\Eb**`6h1L$4EU #& +d7iU\'S9fBUNj-+z~6eS 3EΎVcט7uDJ}2zUa!HD EeT]=O1bA1A C!ĺ3<+P(6\`wpw $xXwJ[o@P^wM+Ka6:WkL`[I9a N(feMec6oF;gHBOMSUkz7 @fD<[`PeU=[=qpK m]Fm o gJ 5"sX3=qZi%a`|@YVRVmd;$(Y^Ѝs++5kěYҕ[ƌb۞qRMZoߕwiƴAE4<[w7s\ʞ[ i `8_޺YJzU:B]hz<, 7VAiNgv7;3dv c$e/0!P2-CSRev^9lm5#jE-lTA,O5d^E763WCٰAEz.0GXs4޷޴Eԛ]B,N4z ?GhErY`X'LS: $DCۅ$LHL!Mo[WTlCU1kDoBV&g΍[G8nθtCIdLb)SP:=^IA* !~W3ftm".,K#m-yE2bіrujr[H5g# [UZ=I@;99(< !/ p|bڮc kzVJ̆٘|ʀն2ﮢb&KX fB0+3JWݟȀ~뭯O|}5&F Z+Ci#L*lFd0Z;Ҵ;iZ :Q${ p .qȩ+iVR?PAPSTv#Lb!޳N$`m̷ʬ "R b­bkSc*7CE9sU̚YdKvIKś0Zʲӫ5Ee4VUK%ssp2ZnQ4V0E]S%Rܖ m+܅^],K0FI\d# vXEjLB.7&Z;Tk%8bLadcs^ژMe+Os`d4jr&JTڤZZ[CX̳,b\.ȆPDQEQUE2UL£!Ȉ @9 %L ʒ`$1E0 Re+QVޜ QʤN㪊)ѕAUSE5DUEZz4ctB:sRM#GMhepmwzΆ虜wqq"4' -اsZ]:z})zT2ADMTΎy8@IāE9")Z yYzpuiK 24MQN=aKE2,̰$3 (i=:%Q_St&/_zM{J}h)„$&8wavethresh/data/BabyECG.rda0000644000176200001440000000347614211622540015236 0ustar liggesusersBZh91AY&SY€ _^ @@@@` x4P s=ުƩ`?EI ș=AU2a4 bh%O(0OS4#F@` IH C!4h 4 0(QLhjA &O?Y'ʐerFb0L԰$mۗ%1.rx Y3ScCCLuìsyC2(a`Mv`6lx:]K,Hi͜KX\q$HD 1]M1& l8W2iw3)nW3TcD^ ,w`wvKT0Da afcӽ{:Jaa354 ƞsZDn8nʪԦVgZ#!1V4CmKXe=tIjdnK bl$fqo">xC$"YASIF%KMˣ޳kap4-wFʀ!PkdgmN2NԵlR[\:':Ț[}Z.=lRM ;4#{HV>FǶ|a[,W ATDrt"L9F\řUcsMa\qˋF⠘X@.,ȡqܷAe`ib HnC0FH" Ԅ˄ go/k7|[G%CT4ӹ, Dh($M:E<ZB˽ phkTmsq0*9 5)j\[0ppMlKcɶ -Q0dcӌ麙GhwM[Az./uX˹JMoܢ8@_yJ T dǎkT]y zzp.hBOկ^PٮDvҰ|;1cSL\%͋D 4BdSt=ڬXvf Oץ>m1YvŴ]Z_E8s^V`ˇ˜&1A0FJcDX`X1QhDFcd"0 d"̠H&hRQE14D$XДA16 6 QTcdD$E3 h6ɍ`H ҌF5BX"Qc& "hɪ+!$m  ((8ChE24ƍbئQѢȘأIE%QXQlZ-m&$X65 DIa-E+1i"Ѭh!N}:u.D v!$'eW$3601bҔ2BFJ؈#" ]z˜NA i&ut4stLZ&؛}=U<'gQg1qp{LڼB2љ/7ݗ16{q$4I qZx|G)GGyQӪ Oc[̮ {7&mgkЈ]ÂPp^k8O=:4=0NڄNJ3xl컪%yn!!GE."97_OcRtuudN^2\OfH4 " K2цp0DIlB%Rg?'oGATbO6^H؂#|IWMb[mիR] N#=W/%'Hdq†Rxf1`XEZb("(Ha@рwavethresh/data/teddy.rda0000644000176200001440000045364114211622540015156 0ustar liggesusersBZh91AY&SY'&ЬL UTD$f."PRvP"Ш (BU %#b$"-ec)V5 P Jt 9"`(euNR+f iBlNvA  BI Q(DaL)ТRk"h44ZUkMTqiRhf=rPTQ Gt=MV ݵitݶ/uvqmP3.Z'F޺vo&vRŇ/[LzgV'ZSzԹGFiQFʺݭ/]OFaygvxcW{vKlSM miqypz6v<eZ֫=$Ms/!WyB.mZ`@@v:3`2`F#&`G@.u.pZb#F:0'X т;A ACX@H!@FjaFݸDG1060EMcrnܺ7, XYHىَf-b,w7sɟg^{[[˸\,uqiofuVǝn]uflٲޜgZ6UO@@h0F@4 ɤMyO$FM2fFA@hhM44*~@@ @@MLIڍO(COAC@jP4M@E=@h 1FFj?T 4~ M4S=M)&FC@h@44OHQyLɚdyG&2?ROM514&a0F2 bb2 L&FLiD B OSCGh @hb 2C *dҢ"-,8hR= X " v669-WQn*ƇAd l[X+QJo}uT>HWHWIUImfdZK2`F&h ;0#ȭ+H;) he֟Ld o*OoeԬ"ZpzݖfO1!'b0fcU:mNŷVDf=dnmf@[ڃ8juU/cwkpsԅҌ;B ќ$XE-j87XH؛vUI"~"  .HgUP Wћ>Npv$aD$?["c!.#9uա ^] $lTfhU&ũ$ځmpͷzٖ[]DE 2b1_fɖB;s-[sSU?τi"F!Yj/O6I+ RA}m62 V +xkW-q ¦T_KZ݇cX=9~jhs4Eu:U ^/+~ɉMAJMƽbd28~])1OB!O{e@$ub}(p\4\4Jl7DBQ lV7$x94P`<'W4B 'YգTA%7⩂~-A ]ɂ1qMr(͎lp R=v ̑p$櫟V^qeA!@8q([py\޳"c9^e6NJ2ޱՅql ||~uep*6ͣ DR.n.|L(w1~(Wd ;GW4C#BtɼH^CEFs5j,JA4$R+ QX`Ѣh%OnxruJBe/6k2:AH4sw\gco:W~7=u NIsV)Phu|&aFRo:[ `(  1\6KL)qQ 8s `= vhXf!T¡j])1ltF[VRpo)uie:ML=Z\`wUW5Vj.W;(15єȻ*US *\q&,|tM8!ȶ1&ZaTx ]RJP/b-:rX3$i6B6SCպb|{nַs,tmK`hYr\g"2V1s=rv$΋ ٸX&X\nu"Ԑ)!F(Y9%YrX?/hgum}V+`"yVr4H}j0%؛dR ]_O Z0r,K5 3 F&Ei i[i1GHsr̹1ۗ7xw.o-ejH !4Tb 嫿XmKy!S gBntZMJMp\l0Od$IVl#]\Zhy-=.^q2|iGyo=ڒߎ% P%&#,zH{L:`hd繐di]1R<LFA`" Yh ͺ}^in["ƨQUƷ#u9  i[~e}`@z-]CKs=-el|Ng T j[-B(N#rlQ;؅e# 0iEdlPAs6SkȁW. Y7mkYy;2,B #5q.%v ?`$إ-Xb:]nc5B5rvbY9mLd,羚'I+ޘSH7B([4-5uφԦEF`2tvP=PhBz=sYr&Œȷ"-tb,RGNȈ*<-[aNJy,a^)#ڞaA&>Hwe޵0j7Wm!Z,M5mnۣrV 1Z^c RQ ri,k6>zG6M:M6T'%1B EVldkm"ƭ9RyZ)M0cD\-g\Ũ]X<4 ߶ h0`f4p$neٖ.D Ecd=_nn#R%\mXv-eaTHd0F;{Y  M8T5ad HwL ˋuu]pv-WiNf"4)޺,jQsЛPRuU >edGWV]x8 ocz!3" =[IӶ&2bAEmN3xX& i\bL&DQhS6]jh<$<$tPҖ *,VD~KúG nFRu8Aqn7`^MaYŜkZ (tBT 6({zI'=_R=n,2ڿ,kk&^{*6]cr5rΈM( ̮}Y !MJXj/]MvsI2,zmh#[mr3C<%˜ZM/Pc4bwrr^=XtlߥWS2oḨe)T]k93]lUlJalu[+(v7-;wv;}l,[s]ƺB)>Ⱥm W+Y 3KU꒪@ m2oZ ]2S:s]]o)c]:nSd&W Ewa0cnXEIBk2'wK%exjmťޞ JvМPi{䘾ĮHl!ېG13 8IQ- Uo En\jټM-Y"KִONhx0jF+t+dqSV#R6F҉26p0\n5p~dֱj˾u)6xQC)H1cM)sY[[ޝA)v  izG:r)"iԡ]$Enམ0R5vZ7L\$$_xUNgZA~;}]<L_m8؀C;&RIHOIHj/J;QeE nթoZ rniӲv\QAvj(n3 kxcC‚4c" BuI;EAKBrj]ۤݻp3BB)s+N.e155m(XQ;ݵnSk:B0YӒ :eR1LI"*R C2C\ГzkH 6~"$ڂLUZjj h;T)h9*.s3T75߶}~-xѲ4ZF,Qm[ A! o=d+Xg A;2 W )Ue#QE0j.C)MzyhG.Fؙb;)+E1K$;Q7e97A&ʾZ*)]N˿2YoZZurs7|[ aD7ldҧx"pX2?X#V,8=a<( X 2yֱ-pk3&1b9Gp::v0@-i_Zm吥ǧ=eI,d!֋"pafƷuە Mqv1ZcdYoa_roB}^րKLT7q~VK8o\ 5CZcsn!1*FTbnVv+v0עaAʉO1w/cv!:}r00i[8S{FeIIa \SږH26ְ6q,GNR0vhMI w=~fW~aϟq8mY D!{*@kj W^wG'Ǩ@& pIO+ 6W7];JEg& xnм6ԉw|QkNM"3Եr/1f,-טzG"Zб q"t:iܱ8zeh2"$1LqR! E %XCRV]pҗ1CTdz{rn-<09 rJ! ufsa᥸g퐳 W;%je i^<;d-ZN&7b ~/QL=zq B9BȘHо" pj.Fw"jM4=H[RahQ[Vf[|kX;)khm7pDz]q2xHo5EfcIq+!m ;:j(`H[H5P#"#C8WK7(nmݣb`oWJtՎONZ^ɊЙ-@Kuܬ (-GԴ.>lBKL#&1<%^E"[`j@0tsKnm905 ̏k7j#Nk+ 69"QM@VEZ帮FQˑ-6{; q.BUDI( Rqѥ * WJ*ed-ZvjknV{ZwvJU8[et(Ȋep9wm0}+ֻ̀#m5WܦiXp"DJ[Pn;'6-s罢8/gq@B=3"jel p͊E-"ozڀ0{jvC/SFhSV~14~ޣiP(ٷ ɖNעD#ӓ+d8P l;_V6<%z\ЂNŅo2uOgf,2TwP *8(1tIʂҡ<'LsO▫p8^δ5yw2VNe4i}R5c d7,IbUJe# h(GbWO+C1NlGp`yyؾDS]89kvU HuRWʸ)+3SXn'G(~{O-ǥź]ugծr;R|Nk[,W O,7UμSEP)k#CW\&s;\#OtyQ`IZuldH 2?Oos) س& )A +F%{Q`,y->똭ja.*(HbiYόcP./kv< {4&2{\i^l,Nzڱq( 6S 3W"Dq n&.M'mw5so=!u>{4݆тL˴oXcuD6%k1b N~[9z<'h%;/Vwo3mQ"`p闷$k!ӱ *zt;A:͸O%!V[ 类fLlYV"ANmiǤ/ ]&Vv5,cr#g )B>Y6RP\]njX$SУ%cŃE,mCyf]P}5zclx.wf]k{)ZLS ĀI苅m7 Y5H(PaWv:hchy-nߡ;E)ػpE/X4&S( ./Z[-.Xh F%E9 b$q U"PЄe/IoAc{ FD'H[ }{3'&Ӯ]GS~$9q(@  ']2e#S&cuLVa݈M|Uv7V">M7oN "awXYuPVut9W|~xgM#b(d̀$6#&$xTڔaQnL?|gay5K;3Mt{MCS(aOkU]k@$q:({= `[;% &eg cV*ڦz7 * sNAirۥGb+VnS"|VVnmF޶hu(k4!J-kNvxE# h83)`DΆo.WC2^ 4%J'._zҕ:˟5M@#J#ZzKVu+|НG&g5 *ZwL(P"h-Bźm5XN-on;6 _B͏\djQuNߣ@Emhj4] MN\591ASvϲrOO ]"G8°oeV aVkܻe;a-Z4jKa Paih2rph&_= 8<ͪpɨ#mXG TsCArMV@(obXEZܸ:Öɸ 뗒85ڧ6rk~-&3+Z] -q>s/c\uљC^i!#CN9|+GO0ddrFVRvjxNxV5pG̬-Kuj Qݡ )1NPRpLUȷ+` ĆtcNW\P;Y;EeLܪf3Fkr7s[y-$߭q+bܸFT|㥧7dky8s{_PTiq՝ԷʶQΐKs0( cafǷwXf+)s#hBO#QsTEzj WaHb *"XNY,h.Yiv׶h7tjT 9ް@@+~3A0ؖ@έmʈd2PSSV^0ۥ |T ]merUm;4jIooFۗnHhf 6 oC 3h(ZB&a -6Vɾ3xQ(Hɫ󛉖o)VU !Pr`1n1i cz|PHԃ#\2S_%t N{Ǖ$6KhH hd$"EۢF#.Jw!LBoaZp_J6-:uW.,$LVG2Rzi\;qB"o%5XaK"`c`4aN1 A-9Vu#֫zZNz SfSQ{ ^]fY(JP!+$M]ɛ[ lbPfaSN-ͿhDf {d n]hF߰ 6Y^^,4 b3>5 &tZ>5fnsY4&7"/JK1cDY[,4WN6WCMH#F>fG~4-4bVy k"dX4ʽݫk#Ĵ*i+}:y.zC;fe?hԺ&=K̲F14dPP^eB b#+Y"ˁ [d;*.ҽHpc77fUu$GP_lX#@ LKQ^?k4cxR5CH?Ez8Ium҃j{NU b&4lH4 JA84,e+ eXnBjj/ b 1jJ'nJd')$oV&ً!9'.6b,B hE4ą $m5LQG\7Lji J4Mh|+; ݚ<$`,̬. -Ȅ)1CZWٕ^-H{c&cp}뻀˒8)Tӹq;yi<[MMv3ZrY Zyf#a4H@ܨsn5w"ssv+Ҽ" 5iRQdm*ӄj`(!lYE zz}x* ]27Px>zf\cA^=AauLeHUTegD;A>M:)qnس  AIǞ{4I,Pe4 faS@\8SЗlU|3-dkZ,p y#{Ccb5Ֆ. AkU%"0('`]uEjnȕV3{'%1XB1Fϲ1RwCk>5NdZ&n+RCK%PMsecn(%e>L\"kHHTn[38n(-tsO9ᛸ+x3D.Q dy5U?rYgAlYsGcwISx.}ndV풁|d7*X~>)+5v֚kdFo@~|kk#IHlq/97M%@^$bQwFc٫ARjaUOaLg Aa[z:wLG]xG|5tV]YfۃXEOysA VTK"`p5Z0P=wE_X[=d,@zҰm@%fcܶfM5wllVGD)4 7ii )2z15aJZu,Rֆh,mp_t@Zڝ/;)6$DLBIY!V5ɛrleA0mvZͼiܐb޵TdnuT5PRu%ޓDaZ7}D9xdml:i?>9'|j" 9*PV@4cX$ =x iC=Ma!JUZĉ <H |3k8R>C+/ڬ[O7qe 5r]2'Osv89X#n6R6#B$&Rr%4+S _u?]fKEU|]7` 6AE9! ,S1$<@Gjh48Og1EŘjTUUٵd( 里[~;(DqĢP䭓*uD\x\'ixaZ[xýUj&h`_{'F/He8/+4NMPY5+;DVUW?T+/7 `R` IVj:s61JC6(df-WՋ*|@@sd+</ %hí@;^/qP􄘠A`gvxG%/wJhow"(%:$Jpzޢk[]\ vaJނo_(\;tp12k sxm7b$fCgo #26JpW#oe8,B)>[Xg)ka 7=o,O &{G6.idƱo*?]Sn9,.Ş Ȅk]•|v[^Ѳ#ѡ#+{IVnT`1]/u'o7D5'c[X^kM`3g @Ͷ]b`o >21uh0u mJhxim iqxxzMCdSk}!'G67d7JA$FK0*6flXmDLХ/c΅(N(<6dy 9.w/gá(RKGsfp-7eNȟ6cmԌҘD;wwQ*q1*B=FN(:ƼY7\ )!6qGA?94h2MSܲzLpy(a) Uog2= ƅ]> jJdIUX`~dY&04S+ (9"HY%|֤Tجt ,"Xbx'AnnȓC)4BlU$-ʹBg'vЎ!ƥ EFSnwؓicXO]3BG} ;W96ÈJDhLeՁnJ[K ujփc"0.q!\*.JcoiyO40H6-dUVmKXk:ȑ~Opqg9 *R.BWA(B]p-r$b:rHA= A0OZT7pe}D`F !RXdJ7ZQF3hE!67B`mH:h۰ڸŧP 779'~ڴvlQ mdOiLG0FEBp1jk=PGe29yچ*} 6-^R7ad {Cp?+dh"J2آս&t yv}U (Qa+j!c3ˢ&m^O$n&r!>+v.5(%.r;E_s7?'?4'}ppFv!~_^=` H{<@}Rtd5gGmJ5TEGX˫F(IAf_/z_5q/d)8)0Zދȍ|٨n/jZXREַvzM N :X)%vvG \#)P^ƜkE5V"Yb0HS컔b3Iw@Kbf BG"t uƶQ-89#r1ǜF& Gr˹LM 4R(0,<=2, n#/ַ(cJfŶ' `xBd !aT\̴7mץ6Ú,}w1#G޸¶i!~lA m)*_wAMt.}'y?ӠRL6I),&Z*5Rt v|qޯ1N)A;:"#vV-ʫ&k1ƒt i+hМbK&Vl @ RA$"a=ԣ}-QZ-4vQ։wWym֍wNnW ٵ/UE ǚޔ]9}44Ep@CZu+iLb0hCw` 1Pu2t Z-WYd_8%6w\eƏO-^ȍrLcӊc^-ŭ Kp;ܱgejF84VkL~m`ƓϜQO^@!OS) ʡ<|X㶭&f2U6tUQ[]QPy%(ZV`RۨD BMMC" !lPe()ؖw[9u/+Fѣ=axp7fO9e}aFI.i{^6]<Λj+O hdy40d*F"qBƞMEMtW8b<ͻ>XPټfJqp/8d}I'XhLK[PFUX9/&Uv :bt"Iyx!s >uljtsO &mAi"/Z҃h"Bq[d;7[9w"nlV:',C"-A۹Ld5(I#@:-|f/E/Pju0'!b,IRզƆ t=> tcPDN=}~= InlQ^0{${qXڸՊ-2U j4QDVuaPEoDYq\&,vmjа+3MhY#jhᒳW6eCGezxPO9#vh&1#FO^~5V=8Z[I=nF &"QlV*pQ- ĘQ:λXҦY7M?a[zhjc{~ Pܨ:>jH`P1ߩ_K.O붰VoS'@_ʝ_iã"_d˔XʖVoV0|S"H!I+JOڵ\_kdc2G85H;#fTr=3wgX xvw 3Twou hʅe\ lQ_HS_e[nvu%zy(G`in%=ʚ]&{-=\瀝 NGhjpetY*ʈBFrmR/E!DΪlcI҆ WW2o>E}o!d@@HX#eGEGb [eˁNSL;a'&[ UUbfE" c%t UP^yiS~|SV4ġy~^BM5,Kfv͘H,PpH ֤jNֈYyrҷyqIlݞd7箑B@@c;M'#[^v'nY`ۑ*ޫc}w!|TJ0ĤjjNi2'6M]\i|@hҮ ޞ|2aWi86MR`f3 ZBRBuE=B E.3;<Mv© B"P|"liv >1I\a AS>ʶZhH͌4q|>"̪>ffF:"F11\钖>7M4r!U1sG0FĎ떆'AH\.jcF.!S5Ts=toyRG p6l OWa|&F:>.:zQ3bwY;`qų뮃AF DHN2@Djh7!<_3z^>Kw[$ s%`-Q/zk*I$yTDs|' ˲3#y&ZP1(Gh ޳ w#nq{mJ7lֵFf*Ь`LHN-*G=pF]1ytYP.#_ JK] emdt2y< \OA`V0剝)VBof[m3|ųQ""%!v>og̺_CktFX*ƝDPəhq3RoNOFioVWM6Kex*KV8qz;_ dU#yE12EIf&5/Ēv;\_)ʐdv$ VV,]r=KZ߮X1ySWCMN"Ǎ^S 9UkM(э;P.FBUh;DDYd/AP<ެy̯DQ#*Y;Թ>g;N$8+Q`3Wt8,9HY̚1݉k3YዥA'ՙXe:cA>ԯz{AG|*|ax՜B*ԈwZ3$ꭖI<}T%/{LW5>w3Ӑ{eDbBO{ݒ٬)_6w`7D/vǂ3l^ZeBB1+q+[lRFb"/``e,"L (hG=NA-guW'' [i#=)P#bS؊ al͒hw&Qa?_^KVbWk]{=`^ zj=:[.L+d8fz3V$8,k@Ч'7y,7Jg/niG(b26s+ hՍ!EY/Kp Ծ u2*@#c 췖-9k.}\(}JGۗme81+d` Pi;' %Zþ=M/"%4ڂ(:'jO5])z\ET}t1tfp;m.BU9FėEz>"c86)nnh3i; H1n"oTUTA*^}[Y+l`vC\f}67 UU}W_yn'pT0Sl J3k~l䍧lO:6Z;X1!ZHUAQAbUs>wsn®g6b);LL=v]W5+7ȱAK<55MW ʒeFW5V)]: >`{sv!m"lj3 YL!*qƤ73HYSτc)}]{Oy!o 6n~)ئԅp;2Pe91 H[յY0hy Nz 乄V67RlTM#UxP}e_]oMDŽePwCpyN?u\ff_\t|1HC?0>61\Z4츂8fGcXܨ0x6@yw%hуBeeIp ,0(CcDfM] Ebq)ꃐxHAh7*@P6.4!0U F%@'r`̫2FJ`rKlŦ\;#~ZF(R#`/ul8#FQG{Bh^b$3f ƚ!t.%;eZ6~a'otWw=  8" c jcmG7 8>pw4=2矎$S$ZRrU=БZRG{%pl^+tRT| EV;1g$p oN0؇ӎ7^6k0}VmB/Bw1p:b}|'ݹ_*^<~e\5巄rw|tyJ`66s4am/VAh'[!];~zWЇߙk25]A 4jS#)ԛ\KR^W%S5½_3 q;|w#0zϪ4Dk@[ ㍩t7Oػ<9\,,Epl1k$_~j)ccT23#:| R8>taAG:6{OP{ajrB ɀ4~8:Q.PZ8^,Yhn}mA:w4-1JYJ9 h(3kDmX/6+MU+p3'[XaFJ ^Vqv jm}CfWdBQx p604IVƹnk}֏oQ@|Q|3>[|U'ѫ i)԰2LYaM5c-k8Y4:^NfFw7]ӶC6-ˊT52smV0{X.֍wBʒli+[Cξ]ۣ7}qP1ʹ9aX9%hx;IWLM] i{#wcKьl3 r>npqۣOzl֟}d- [t_Q{}Γ\9ڈ^q[$!bqCu6XY@ƍާ79L9xSTd A7g ;A9ϕSk<3pz !ܱkTX wüD6U=1U($]):F%Mp7gY]-~Rs$wLq 1Iyn\)s'w4G\ m!\vT/AGkC^vRSZ놽Y){ٔH_W2{0 > ”0J%1@!Xj'-ўe2 t5v 1Y@08Õx]t =%ahYΧlTS,exObSTf;xԹ>ޗ))T5^ů%Zw,o[YwZi.㨏CfQ 閌aOHb %TQCw_ (N%cFDhh/o&VñnzNPqQ+Q46E4u-#cYoGXZqAI2Kns3 o#S>IUzcGvi`+Fb&JzߵO&ɩ0 ާ&[F&Db Ū~4Ԣ5{@4ugu*N6H#%9.g͗`j? u~t0q.ud|-3:pW.fq%~=&#zbs\ch %&Eɥ7wgX 0zX5ޔۜ;nƂpNWLāb @gCGwMpDXAzcĿj5F0[L.qUzMXB/H1śtަdbfJeE7( 'GGRdfRډC Ȱt &AGE /jiPSZU*gJ7StSj%w2n؊ k! a%`/­N7$If̹ŒJ'^ڥ\Ts@3UDF#4g綸?\٣}' L?90 Yt3 dH2e~c7,cUTh2߿jw<'Xj3݇oy܀y9}VP.lb$ iWr|˗J6 Q*XZ񇥹`wKMDAjZ$Z#OKH>X/|{b_|~ uz5 9R;{!E6e߳"q#NV_)VY@Z@ ϑy9xyޒi,NS !-z mJx880!E%DwJ6xZ]og;o!hdl_V,EG^ f{mng/Ywm_ ?Y>ъ]twx+>^wKweGJB}vcUp \"UI!FB X A3ۃ6|jq۟zo;BzD:Wu:0ȁ-R|^AT8R$龎!iy;. W)>mR!0rTI"@꙱~楸XkU$g!¦?$BC].M&uS2Wg%GrFeݯuq|$EjCx;>ⶆz/E`5'+ `oB$gn|] P]j7߽4ԇ] g0dAJBEٴO$8a+VeŻn@zE|Zۿ37˼v&bTb7 @&gkk/- - =iʃ(A-H7@~q08Wן߷XZ}e*xOA$b Ї+wF:sp82ͽޕU|}[yC"+h|vrۡVؿ/jӀ{[fʖr c_qN`kWU'-x@t)XF!*[{w(Oa~j X c `;=E} 9+sL}ɱq)4W4-$HPr9(0PTt1(;"Vww߁P\_K^lUz-1@{l0' Th8TJGt7b5,p-Rs3@,yaJʆx$]w Ys㴬Ѽ޳]|jj V*(Tm1 Պy/Ai)@mvmh3ezP|Jwmc/.ŖJE fj+^ V-fb[YhykEe֭D\% Ft6CAЦ78Et{cn2JWNwjF).Ŭi)m ?/} ::Cd3 Ad`y%= *4T*L{FDpd?gPmRpd4P4S8eEakg9xݙ^ cS$Ow["B26^QVr˚uhVT$3bo՞ \\`u^ sxo8'sX\]7|WJΜĨN aQMd^8(2gs:fN/,}WM9m):g`:!T$U&97* fV,qUX !hQfM^ Ug̦51:#WrշގO*}zynpy95ӗW\޷xe݊:|~4Hq+@$>"-4H7VmWbЈ ֍O$~T7Mm1\*]Vs.D[ ZjRw:kFtvlqo;e7ȌаWB\YO{}.v⍫#6Q(k)>'ٺAdیwhm̚re̱npLjzA喘%"HExy;![6AۑלgǞ(bu23\~O#v94'Ϛ_o 𼗾로"< 6p%Sv&$ ?PF:'WK>z'r 's(Q,T7HL(-Dd^deVӧT,zcQ("dQUs&}SLbŒgJ+2rQ! "5Yi )#@e 7;u:ƒeZJHЙLUfPl[Ip^\p>.up|GKpaJY X `FVEiWrtLFL28rb6RlR@&a{t7;FAb0dcGIk/ $ N]/+1L҂ jybQy5p]c/=?Ki7(^$,"mMC`ƚC+Fզ%"q&6Ӧm9q[a-w  w]S'nF^S .i'MheBj5|V'cmX@[Yn5J$-B5.Ͳb*\o}3S .GA|WichqVV&Kص*Q P"^[b8ATlp^)>Y]Zx}Յཱk%Tl ^*N)CtoXbns)"z u ߥS|li.7A͖CZ]]V*ra QH_<^nsז9íInp󐛷,9 tt >5ioC.OH^dε5C@}.57 %sY6m</^%)bDiVЌN؛}=u(_>5JnRC"~,:] HT7>ǜ3gXri4ՌB$AqUr{arX̬uEDjzQ LKHD4w8⒠AD]NsxL 6cʔ-J _#%Wwj"H -^ؑ_83lgk6!P7 C۹ֳ~s3>f[c،vgՆE~"e(2C֧N1&#O&LFK .]Y Nh4\ @kmz3ASaL4vxݾb,D 3il$D TkዥNSF1 ]Su7{l(fb`J~V/wb4(  $&ftBW5 Eq v ?Іw]o ɱV03TMHYaxV0) Dk犭uۚ>; :-WimvHf['L Z ,aP)b-TuPwd{"G5Yu0O]K%z :A9X8&ôAj,,7ښǬM:AfkTk>F š]'A11%jUՆef8 &ձ\G΅P":K&5>Зo^b)6WFnmDm^ ̑" BtKۋfg Gޘ;虏<#,?y&΂rO(N4 5GfP8ĸk9!NֵQnroyZMPE~V?)1%k4$NoT۩Z<1]`ͦ6$Ayp)# 0# 6vR!wӄްg} k@欌 q)olVzs y x`eg;~: &p#DO :Nén9׶g@訥{(7x>C3,112 (w3^}\pk5.cJECqu.ScSL0>h}bY92Ǔpqq ϭ|b_MOX@9pr!_\x/%1\i+Ba m6ͦ#e&i@ՌHۼT3IS;nU &7. 0 ֻ݈:nks0M8.C\%p1a)4ijeVD\@v&Y˱D'95%v1Кeqsa$pP@ܒ製zgE瞦MSQ'B5EPm!Bm';jFЁ2O4/Ɲ o QQc #,f' Usy k6"IP KA@v-S*΋%=HD$i=8u>ERECGo!cۛumÂT6ʷc<\;nPآ.T9*kHX]܁tB:-SLJְFYdpǀHo@&y Zu~Dh=Q25U PFiEZM{ֿOHwM[|u8b%QO S*c}-BϘwi GwL%KiIⱳku nW`Q+."L[<8ou-qA CǬ7sn~#o|9zN|C_,Q)?Α]VD*R$gtߪ,'FW'Q\@ڼFtr6fr,Qƒ-q?_fg kMU%>f\FTn"CT;p$+U;g/~wlh!:>pa62ԑJ D_@A5RP=߿Κ@=omëh$ &DikcL>fV 8x(H:֥hOPG YKqz) RXRc}AK;UW/ۘ3P>nR.NZx‘gã[nCԸ%#`Of,K/!Vj4GGC;Z/$C!+DžuًY\{dCl4yjpЖV:M=YCM4v7m%~6Mmr{U YK{Kږ' ݖDkU`܏H&Nd -SvA?Zj@]ĸ,6t뻋5=HɀA5fO$2uY 69-L $j-ϞY-f^VuБ5j(b1bB*3xUA(P 8 9CM:L}m1s[\ Cl0~l/,#4Ts:}c0 td3K*I qJ1߬@d/c܎*؞(XZM)Z wS&3c \YA5O6eyO9q}}. zcƑ'tY9X(4Hy~; 36ڏ1E QY9*y[]/L~+ro^Sg?kN+kvmtBͻxuBq`: <D8xt}$ю%CFWRs UJ3 =_.hW2RH(1r(JuX 4,X U_gL%#[@O"A H“@+oM{Ɨ/)lC0.$PqKCU(y+Jhfᶳ# ΋ iۃh2? Al$ksQYFQCН+0j24d7 y0dzBhD(YVjlnS~`"8|.cCUUWBn! qS܍ugSjzl-ǦDG>Y{,q2zsrWx>Ъ$a[OF;I}lHaGP\n5r㪌u{$.XCg^:Mj0`%e NLdfYŢ6yNtgGg4{* ^嶰] 4 ፭:o^&X84g߱o=_nƻ7Xgp_\,)+Vw㫊 yet_Y7*L'8Մ 0[\A=[ڈ+<.0`k)_篎{둥kۡpCyFq4jF/RI\ tMHS 0/vU]m'kw{oIv Y{ہ'ei*h@C.Y6;qZzv=mYTx!uCSN}>pao|]wkOmQ7nt" MG]# B4xĎс(c x\-| tC=5u y8Ar~?rR1)A d&?PLp`")\-`ehEOE*FM ~ZؗmvhsWT9P,zT! 8Pb2p97HRفThMI>߿xZDcN4E/,DtIa45[V~dTFN؃7on.f#TQ NncIeZ;YKg+]5/l7llWNX_q24e Ѥu3)LߚG?G&}6P_wTH?xE32]TID G6&Z!̼)xIT-rjgť_ Isrmk\x-atz_w[[;s B/*U&^^pvlN3m6M{ܢ6}F2-GB&q,~ ~L>- ]8w5F.p6pZ:>Z&˅jL`k}n\~É@(qB\Qan>A^0Zaᆭ#M^*|Oxۂ$$/ɱJ1iyUxD͢  nkeOa_#z>>8ꞆhoRR`j 2ʳL4 ~KW7S=3`KԱHGw<KD\H)ASM%Ѝ62Ui;ӷ_5z/c6hQuKFbcUP9|6vxNR@z @h9xBn|$B'@'uFmX"VU$kw/BIkU+PtUƼ{kG&)Dt[kTB&qM#~fB֥ -9[VKӚA7ǍZS4w+8*\rqlQ]TzKLNh";Zg~'mN[*_3eS=qhNY" l("ӋF' d?h+0Gbzj)"%!,J!q S 5@̷ֽhyQˠl*Rd^"כi'G*D>/yxǺ`y @Nm]{~g4XB(* `m$P S {xnG+胞vp ˬx3o1` n/-fXF5)N=h&"c>HuwVѺ>U2RFMvXv_9cĨ # #!Q00l%E"pn(~Y,&ڨ6cJq:D3IF^*1TC/dvi2^-3GWgd h\<A. z)ر?it)ERJtӵhV WϘ% X`_rt-q]N.&閟?OG_?bua(vъ9 +06ssO[j؀rt%\@| HɅp\%-ԪV GrXn{/Q˹#ʖEFUEO-9H@áHF4J΂z3(ȭm Cggi,EknGб[cԧo=Kؕ8{GQߖ}-k3$tƋ@%׍Z‰Q`6EXậix ;Lx yҡ6('i%+WgW| 9*qi>kV 8:"*! AkVy LpuyY~4Kq0t\*zt,9R7ZBAS'ȼ(a~NMj!llПzƩ168q:괟cúp.W+ݺA|TU%x.*3k? +6=@\t|ruW[k +쏐86q[x<ǮmrDtY;p8 +'2r$r1F9FRtP?{M KVKqqͭ 1 Α(p3)gR`_=R(-~9kgѷw꬗wԭKV~b|5TvH3Řepz*CDH*%z_ҽۢgVwsH!9ݖS5)#v1R_;a2*`җhӈ@n;[Y!?ш顁hp&ߺu2Dw12D4D=*UfD>QC4VlBS,܈hN>{uQBe3JMyyT6񳑾6e[זP֊RX|P X.J8e"a*/JWBpC<˩m1]BA$ m[ N$rfl[.f JÛUM`U;̬M9kɒduTO1+Yi6P[^5ߋPTL4S)ԉB4\r`9M _bm'CMs.4jhݎ-?M$ԱeN6kZ҃wnn.,$dfc1ܢ=1,D-:VPd[s`P^X_b:71`0RNضR.tB17GmdL<YjD 557{[w|^\,B;ubk] [vՅ> ]7A @9+أ(T'<_{ںs+S+ήȋh]QU HW![uCJ4^+'ʔ2g^]cJݭ@[ȬBhCVa /% B \ȴFzNgtfϳ'N5+.uyA{䣵?˓T'nwآQņ "|$O;n086y9Rڒ8 @C{'CB.#[<9!Bg7tj \ YڬǓ1Cj,V/E2Pv8fjԮIw?76g(J1N䭐M`Fm^4eBW~bSh׳3FtoYpj}+.ڃC5$#:YL_n>۶:5W=^}ԀBF^Ewhme2BՊQVkQ]!Fh:Ɨ{ĞP0!q>Q>- Ad ;Bv0 E) {4Ee $J^VʦiE5Lo ml|̖RYcM.Ihvӆ۲Wܭ$ҰR%P'$҉hV RBc%OɫAIj0Dt'9m&_ZMkīLQ^W@+;~س)BTDS'D(B:M%h-+ˇ j Ā-L<(3۲z lm , %2(GmяZwzytՏ#e0ƜonLv-FcB) #KI,G"DRYkE ZABmLm6M=U=š(WXjjg2`+} /BNȅતd7I.}Y)*3xUa סrFlvX/)e[5^^het7cd4UH#QՅ*%>h#m2y|˴[% #ڰ¥j$ 6{~"iGa4!U>~|mF3(7YstK1c^7X NjgiA-[tV$|oΤ1 yN;^~}ػ@ n <քc%6=$AAx)b&n9f@(P"9 i^ u٪w7aϞIzŇ.&M~P~XֽNܭ0UVM$0ׯb(jd 0B3jV K a*Hao~]#m DX^xFYq޵]J/#ܴyJLce`IJW.Beru-~,&b!/l?E7` JbC,k(!9>wQ8H-ISB/pGؚ/sdn~LQ ' VŽqtpao{/qLzT5TI=]$7#xP"H |fZD.Rxeo@ O"FK,t^TZ_Z϶4kr'yt"]@B[ۥt13vr_CmUwD* "͹ЄHEQoj=J~NX)[b66 L+6(=AIATvI20.43Jt e\P/okco&lhgQ`'O*]㱠tƖ͵e(ͻ@H*1:p%! !uU`. wКiq4X"J R1f{r ^}eMPTj4im E(G":lQj)LA2fƿnڽ۷l0 w1,W1d]W:ģK bFtЛh #hw3BԹB )M= RA i&ҳ" P Ǣ$^% hд[P_SE> 5i cM EMFAЌnQr#^Nƙn.^lҲͻb]"$iN"ƾrK\T !V'Bf EH^bd cxk 3܅+-WȨOy[s11+ zy>5w0◷F~)d#,SĴnWx噣<:=|X_x7x^8qklu7u^^1~;ߣVUF;cK@W[woC3Y6)᪨3/){-UH]w@t(C,sq"&K-μ8la@4e%-X;͘9O㱋mdtJ'SIr)0 DcrDDÜoKGjA:a{;f#W=` x_S)SnbP]`էBYr,9`jy*'~WT,phTk#y-Ve8P-ej#N 9MHB[IRVZ XNا&2g/ =f-wY_'f5XRੳUJwBvS`$'MmA_ Ř{f"k6W/#S %Vك.:Ղ03=^͂r':"1`އ]@Mf1VvDNipim<N/[lRnQf,IXΒeA.o8uڽC_eWFEBq2˰cn`8\ee,`x3+u5kYTN cS8%lh0Y]9𤽘]nA{'工w8ssKQF ܚ/.٠Jy[xacq@'ӧA r"cXN b/p '/kO#9VVШ7a YTJN)H#fx5/]agE}; gcY.f+S:swAh C<4x[Q9dY{fqU[U)\>12>Zw`4WjSXލVT釅G]dXk,`1#A{AfQ5qGX@G1CWkLQrv n3Th[aV+iCL( v+8TяX{xv?=F\T%7=Wڈ1P7tk@ݾ^XWrK[! ˗:˗0r#`Ҋc(4Q1i h䩩5>%e!S5% [Q=nB4ގ)im!]󸵈4Tm/}jMYpX}z@Mʛпy12qj`Fv^ ]6W-@M_4p*%gHb45:HzJ_oajgĂNa2rkI(Ǫ|pn7ixd@C'Vtz^4jBUXXP:C[-N6V9^i`ZDAî`.^8[Vv%o7+I(" ЈM6dcPڮʔI,T(YDk_JU8>ݷ|N SncG-U`aҠ CD 1qh2$qi9iK<DiChggh7и+Fx/+fX$OJ7[*EN9Enߜyƽ:d!pwX7OiL(lW˧:{ǏŎ{I*fTۅ*`P=EKLDG(pDHpU9LaE~~c^[M[ N6+'j(C0P i=@.#5p)eSMW,yR$ (Vpޏu{}yj=*r%U;NV y/o&nқ>qFdдdc_>5<>zZzW?De5NI`<dI GugSjb\Ƈў2O!)mB4`},v?jNdְf` A+Td!{"g5/D Ӳ1`,_H/w`P"#;5Ņ}qcJ$*۩P M23Qr]B j2FrG8 `f~sC}n6QB Tyrm?Rߝ[V3d8{8AOk߆sO~Y"pxmiTŸZ(Ύ8K0UyV%RrQܯ%w mՠUBXp!Ӈ>koGm9*j?Bex.T1aHܦ6GVwG'ory|.snaN-tq> BX*. t;OJ^mqNu÷.g$5B*#%Վ}'9[WO{eLFI 5(ѣ[u4uk'VMO6~r*T{O!q:{HR-C5Z PҡDߝ4PtA3=e6Un}To>,p摓Ph46HݥJN`Aīc4[DNƠʕgy\r$%%(q`iuSJK!.F,hДS/Mw%`vn"$~vަ;Zҹ`mMRLl`C Ra>j$紮*g]ْ,Z;c}>v5#Y~ c|q-nA#x&l)~l_X_K|T%^nymd敤#9u;^w5vIG~QX4hU"^Yfnfs}}NN=_7׽? ^"-pOWQ/pF< Nx 3^g\4{^D| uN b$1{E|quAli&)';+ki6 igwm1X5,g 1ĕ5hېaf-<쟒,gh+.|sH2 `h77_ȭu)~9l ʰ8=op~ tn0lL)g+Ʋ}玖Oy^WjxDɆ{04QHt@:̬M*,kyl[Xyȧ26GH@;}f9 Xe-ctFN_3m`Nd|!h'fTCF;kZPU㯥njV%@( ]UM;詴RhSGZ4[OkQDƬ=,%{V Am-2()EevY@Gim)~oR:x 8]Öae+2ie1A_z֠Be)wZ`5w=]x]C2Ql5Lc 1Y6ZDdR flDb#E,'c[n:V~ct૴Jq韊>4Lf'{ȫŻ]oW7z67Z 0,ֳYUswi7b8KSzo$'"xH@X'dK焓`+V*ôg@`,(#D $vhHnQHMQyx$.D L1v{^iѕĘ 'sf=[x98dIݲ9@#wH!Pf 0fկ:[ʱmf Wh֤HA@״&1"xK'*[(۲ k?exS`l_glwCh;1Aȓe*CWlӱZ[c~K׳h 5Ա 4[%-AQ"uH超Rfm=ķ,VPb崃]A=a숏Ti{zZG߷6Na=Y%l+HԱ6F b2ir?8$ob -{@[wL^Lm:0XpXDUӦo1m~Zyb鶹L~'K2]{w^xQlC `a=݇u0Ca f$ H{\bI l+zԕ5=pd218sz'%3`47kx;P\7k]ehJϽA: \!魼Cuc]N.ARa  n\'򓯽n+ݱJJD_(}i~\ښG|ໝӟ] { ]WY۽ON{w,ߋk=:~H7'&=)fԬH^K*C-݅Rv9쯖szﲦ4tra?V'ͽ u!޽yXw iOTqqaBTAwYEI\'n3# 8īi,q6?J76yWm!)_'Qz`WAӇ5зaL6Xhk{u zOwvo姒]= {C&rЗuhX:p8<"qO\h_dܰI=,D_|$vLOB==epҬeP%w4 \"]}w;'mOs8neUuJ!]fh%-TpEȠTǨuǷ_;}~lqC;mg߅FBƿ)%XlDN=TǴ3Wm)⵴ qH]%[0e%@CG|*:nBikSwS1`4+x$̜TdjPKuHimuF}RZWOAmjr A~q L`$̡J5jhOZ*5v9\9&gM\D6R:]挪Y<6>2DAj= X 2n.ak66wK>uN 'Y /aht&= n̴38dgj8iQޏ7ךW[m'#C?3$|JXC<F߈ѷ|qH)dXl⢎4Zpmy~u\AO\*}6m^՚'VdkIw4ϧSmued9ےwj0@R hd;~`OȓK6!_ER?mc (2Ub?W/+'eӨ A(!D:ǮJy̨H~|yk#s2 N|k3AkHFz[͵ňl?>e> b+IEM$@5L0捁Q<y_Q]!kwk2 דzI/,u7ԇB^mMJq1Mg:8Ό|弧I8.B=T wjT*M >ާe|9LZXeL#d)#^.gz2HNTM8Nws||v`Bb:UxM“Z?  y3MVN{*BbZ}Pm TNB@ PȪB'cƶB-n & ,:\׆cg&OF0HvasޙNA!j0(DH-"E ƢÃ0t ̟MbG[\*s-}l:hHBtDDDלlj9#"b8:k[z J[L;2huJ_K`_:5ɪx}ǻkO0yC<ɰ%Pt6;6=$H3o˥EgDڄA!Ά, \yʶ;q 'i'z;۔opW4-)3O_h_R__}\N[?@#:gv" 7:=|J'ioDi=5L@!Ghl'1!};ނƽVLn ,2Aw SUΖM ~-wXz.ArXI84t_*H]=8F-N^(p:d[b/$}c9P>W&&)IZ!ֵmU-dA4UVnHy|nXs5}0!En&"LTY~{Uj LZb8)<5Iܫ4˯BnltQ*dҖ' r*ĒܭeaVqsC{1R ':8vוƕ04f#%)gV7\pֱբd Ԫ,bG@,\Y5џg–^M K$D1EmZ{zNBҸRhAT ^1fsO ^~ԃT 1V˱9ӟs\z5j ߧ+G4+=WG' v;:Rn5wyWuf|ޒt{ 7):ћ8 jۚZ 2Em# v@}xȠ( 5+h<ob)f"@3$QWWv//O@F{D.=%jEQKGatE^r m凵ZpcF`$u0E<L+gh r7*Mn||OHߌ>K<}ŻC~i;=`nM> ڨz@!HLHA\|uJj\f] `v_ꑞ@yy3M|L؜{/5Ъ,# ^}ǖ} KIлݭUOW35SV?"ĚTzA4q4QAY\M 9>)AATw5ۻ5lνo _[D]r9M0Mx{mq8al293]@YVjX%wѭP_kǰiT}w.gϦFu MJ Ju\#k2 V+=$A 8 Wj|EQ2ČRF e[PI>.o+ιDM ^Q>'Eq9}KT")}_ӄCd/V-.MGAz4d̝ۥf鋵\TGJfopL5p),00"hB nG|I dGm/c8V8H`pVήd~2B[gds\ozʕztk5Q S[:j%&eA(-6q-EU!gTNWL =@zP -+{b@vhcJ9|t)OjĴ z|&lS}Qb\LMa;NOM>^sj-Vmey!cx]f;y Yc^ga>olԝ 6xEWBͨ(B'؊Ѱ &4̯T9}ZPfvZ9Z1'TEJB =zfve䩹,.r+^N ˲;"uZ 1$$nc)'ݖ>4. d&G{c@̜O;\g; DFDbt$P(  L#><5h`hnnp?9{m,j4\mw?xK G{!R[!ZWiBM]\|e݅p<>}/>aȜzL v˜qVO ]L~AHheTVY9D?1ZaCL?AAPF nYr MZӯ& _s8KhBڽGD;iژ2ĂF/ @8޽Q kVCq 5`m\klDvGh&,~Ԅ/˕C򬌇Mv`CəLՐF9E6Pd'}-+a+4k):Q_P/';Ե+iER#mjOϣL;gx:imgkÞprGJu8LuiEw P.Hoׇ"$Fr|UXNf76ɾ|<|P7c=}g!@A[Kojs;aksM8yy IQmJoʃ$ȷό{"vb^0nCdvx.F1<5%r)>-a]0d*w;rv hk2@0SZCvFv/Aʤ>%jwetqFrϚ+_Mkd8qO@@kpG+L DD4[pT|HYW\; G5lƧOI2A 15|A\gfn"HM5-FW0;\K7 HܔwR>+`u}!|Yg ΐԴX>wմ @eB%R `hII_U>Ϳ%#}maQqvl/Gi",0%{![z: ž3>3|Y`Ṇ^s*bڳ9m8-@-Mmyl`32<}k~},BVEo{}&#M`DQř2 {1҉W \;+[/H8́ COGQ;Ů3Tީ(*mB}qHZumab_%o`m͟Iߩz͵&3eu H60]RU5A,S lڷwBdtx=~'u |۾/,;Mb)=Y7#"~ί+|g)>#(؁m=y-HMdA4G 0y"bOtB* [jLsG 18`!& *kZxt%Tehi3|i?'Cmݲh +U'ȂJ-'!=4!@&WQd˻~J8fIB4jc3vŊHmlY2 LJ.֡U|!OlG݅㾝ٶB!ƝB7|=i&GtvÕ&Uzv ͳ\;d=wq 1^iPx-z]$iz FbQȔK b-D~e&/:u$ڏ%rD&BW9$W}\yf֧u9Phr`avr56 7 c"$fTuFcy! KQ0uԔEA& ȄxE"Ýo-(`9Ö""e. 5#!dT"KvfR|B!ӿMp1]`]ymW) ֛rv%MbN׬ۂ-KJҾAՐ ۿVowOù78!9_ݰ\|,uůGyd Tp@ xl|R(>*Ƅ/x!1*yX9cs%/-]^s.kYV"]^&Btoפ]7K LE3L{Y= KK;;aԺA!No5M?,d5)D`!Рp0J*B&׺@WFW5nuׇ>& P2?XgP̌*]uF@'3.oZȯ|wf؁JPANҟp2vP7 LH_7Y|vBƫ"6P#K z}n { sCrlS(1mg]up>w# )Mf̴S/QlHDEj?Л ]TJQeܤyOOدp?dCVe$ myoܯ`/l:%)xJ^) |X{ +,4Ӯ6̀b"Hk̺5C~Jf7`;> ntܧ0?aw10໮qZ?RnÝ}8ShɳC[5 aUק8<cnDR:ԴUEIcTǻ-O[RWC%2`k-vaBGuu-a8 t]LfәZ_to{{3n۷& @<^͵o ㄊý/;̯_,ݢxwn3D@lFhc)\O04a )+mB=ds8Û^fM,\#mD7pdTGy9.04t4 IBImAhϝc6%uһ``rBCZ{7 VZvϤhT<=Gr WnÓvoF6 YE&ݽVm\^~hh#/H/d2w?:vCZN t8#nJ?f>en E^v׋Oؿ9g}q`jmڏz`:V@h`O)hd>rU=!9—S%_72# LؒUjPU-+!+oj 7&SGonXםi[eDľ {{߻ui=Q{w<<0iDkϋ4*~U$$5˕'z~YU x#7WKH*2F@JCt32+68uW]rЪ%a EPPFX} {$24Բ@[)pE4!Qr(~3ogӝ g&:Yީ} ~P_+y\ZkHka3*xm^~UW~U}+_։#(2  YI5*7\vOGWj~|Lv]'n{+7m`|fV P#&2ks{*#c:s꒻#zs1菋)?lep;Rȿ/[-{$C"SZ˥vFwvk@;(=5E SN{Cp(o˖p+6I1r䳏ns0f8Xldb%#G\JhCx(.7/K[Mcs_}|*zTSSI\9Бܵ£|T17QB' cC̒'7i^W/bf_d7̻#] vtHZO=s8)BND2#ՑyU5C^']~\u?2IIsq"(F&9' =<.c)qX[0W{g7xPʹBilhRDyE)E UdH:^}Bh~ŀcPHCLSYϯϺDvP*JU1'SFod#ʲgs`o]zl wI(7)DP]%ߜLF:b?th$ݗ8.Fu^O{xw}y `%-SҰl2<9.ˆF7NvP3yw'1(@s|3hTh[;{:|` g&[/4۔6e)@6DSsZOsH7XO8TW)@QDtvz?=ib3pe` zD&* _wTm~)|'`"Li2f: Z\>>9?8& 8D T#-cհ=x=Qs W X!u0JK-{ETøm(j0Ipd  IadfvH9RШ |zq~&;Fnyw7ہ ۟c>=f҇YK%+985j !S`a]CK]wzԼDd9Z>7,)jk!v4~Wx"1DU ~ut6q}Օg#Y[+(VNrorPLʉ5QwND(˒vX0COy/rV}ߙ%)u$PUqEjk0x1}ktBFw&ϋTd+S ]\a=s22wo0EDicFJ^Z+%1XqFl㨧%s<74yBѵFL6 #8&EJkTC w8*RbK^yv<-vp{o igRT :#Aݳ\ p&dzн^8xd柢E;L#{gQb[T}T^v^)3\S%}pֵ ֭)U\F77~>>?=(uT!_ . gΤ:bfS"{Xs9C2F@EScggWi)R'Ôf>ǤAn_*j4rQy]}D/6nNn>lG,OÇ7.k\D} [Ո Wt"_&ʆS@V8"9V~PV<.T#UլAr3~Ǐ! +W "{rHBP.MOO;˷Ɗ30a ipm^O ce+x]jQ]B!x.΁ᄍ a0`怊#/ ,0y7x<-CCaun2H nQрCY큔řf,OO%z&u@T D2VqE?BGtӟSۉNSe(z/ ڮ=C;(HR %=Eb`^+D\ţbHω5 otOamo{%*DmVwub>MKY !ǂ*N6%5",P` $~-{݌r:}ޛKV; c،E $ Sw Jlh>EL:#ѷ 3. G& [ieNz+Nx;C"sK?^~l1"Y4= @Taiq8VP5.mS9 %,"4"-(t-6)㯩Ѐl C ;+սDv6{Aѥ-64|t.+bՙbK:]Li`cל])ϑc #HrUqʑ)yg9n<bo*׉|Bb ȩWzYkR ~Z9ji?ti3pu/&WpzjS:Z!EHY#:F-,uҀO߽%ܞuGtmŒ $f@fD?UA@Vs ջf_,y Ăkf?3w( јEIRis&Y!][ZPdPP'CT4!F"Bϐba'FuHrUFrƛ.z2Ţ_ϗ%R3/Wz:mң?=` ?ڤ w,p`+mPRL*| _|C|HF:JYh썛 h_7w^D W ڰtj~UԺ8YlA&^mZ4Q}ʤM2.웢ԩn*}yN,Y9TՓS}Эt95EXAWtLJ2 pC=Yⷍ˩CD=5m7PӦc}e?҇Pmno(#韃~A/͜YԘWʯlBW-ss3Lb7ط&[H[҆] KmH%!)NSqHKz=~%zr. g-WNr CSXtKA\|Tl)Evԓ#0p0D2# !bATC7=>I9+=.ǚƩꭂ,='>'_}e0~94QdQI5zv/SGxCy)p%qq!&?`aunB1Q :p9iO1-F޷e# aXӰ٠в܏̄Q d,L(Wo}FrD (ꌤ`!"(ƒ)GiׇrM鬮Omfvd&w.bwQ" ?{1Zݞ1 _157Ovn=džRK|GF!pH@+fz1֣]uD7xPk1yvDJӕ!V3B1$^^} >So&c(Pv) !KoZUZ%dylR:sF=8X%] o˚tD_'knP sT&.%l.ᛓ NS/j~|m`Jt87Rt9eattLV6JڀN#<]#ڈ B d(3?_(hgRn=ߔ>!0leDoV$rynC+ۭ{e`u z\rVewk3b=D.!rjAdrw8ԳΜ_ݶUפ^}~:n7Lw^˓9,'0ry950jx:mi)D 93 @81 Tˠ8F# eLcNs$O#j ]xq |6AF\~=Xvq@&q'n +=wƷ;W13 ǐU>yFMc PlS"31I Y4>0U>C"d0:ܳ qS!cA{(Cw &;qZzF2w[/o9ξ]'PgYxX73Hi/_0[M^N).K27bRo}+IV],}lbg[nzrE9o ϗ'09thô57o?oblE񑛘j$ ^~C_ Y]7ڍlAr &7%s7[V{ʊ1V+JfRe.r:Bȝqۅu  ƢNnBUt:EZc< F~QZVPG־0^I7"Zp5уCv]ïGxèA1O2X[ZjLl"Xe&&, ="z;9/D%Xy<:7NIK/4(T]Uj!u!sI$qw/BԧB+7[a{[lWOK{ǏF*}YJ{oI;rX?<ï[9OdVڞܡmT0ʨT QƔyH!azYY1 ѮWD!~T9Fݗ5P@Z܍x?Җ{􅾰{~~q{dpig}>̹v}"l[as(.ps9?OfHߏxs;wPh}`mY})[~)$'2 "k@F՘H&p!E/b^9Hn{Rb@KDymըHJ$ 1bySZG>' x%%힜t?6b }kx8J9L#|eo`)DRJ $)H`|7U_}5??R;c9bg6g{ߛb=ʑ\/C0UȻ;~ 3[*l0(N`:wXM+Xd|ʹMO>2IIKgPDfx8&=`C_WPc`Rx0c9n˺”ƣl\G;|?;P\]*ÃceA t K~?*GN_ed$m=GchB HBP} t!*_"хݥ+? ;K岀(GB-88!>ţ}cϐYwF[NЏf[tԎTow*"%$/",3ewbݽ\@Ѩ9l,붬 b2(Tr6,|.Y~LpdT cV0Bm۰K8q&[W@} |n?uw<-eão+ʵ¨{ 5l=ћc/h2r>BWfa|QC_ 0ڛbq<_F0w["87 mLUW3Z9[=\g㼭8Sαy:k1箏%9#tlܳ@Z~j 5^Yrr.3p*8 lihZ۫FxUsSeֹˎ`?ooH:qbǟ64j{J'&s{zQuBޗ}Qψ9txyJ +!k6;?{ 3:S9 rG-vZ9Ij^}4Әn&NNiQƻkY~oSW$DYhԐVi Q-:ŗ-rW{vq_㏮Ћ{Ӯ+ y֏}0ۙK/ZL81(&Ej-*)2dpx瘨S:mN7{9&] L6TT)$n!Cw-'ˏ]xcpLd&b'4B-ăt7p0FARՒL-9RFepS,e ƀŢkd&N5w TR"9ݐ\%W4U݌.GTQi @/kwB3Wψh N!&cۧ{'8*ӫW^;mv̶0Vd~ϫV$E2,C=_~>Unf8q nLWH52;ac$x"z2~]nj qLPūtW,aKS (,МhTFԜٝy%4K-i ȮZj#t% i&=>}X3x1gW`ӭ8nruO-藮+$I}vCFA N#и#/}ꖍ%*cZzqp-+4it iw(kЦ(D)5ˤ?16;kgI}}RuZwT !gzH{J!8d~XW_=gG~v}wO3a<()z2pf}J!t; #=œjA_uRS ?H@GK.x`` =ˀјZFA/7CB?>ώLxۍKA>e ?Lv'q6\ ]cog_?:}ֈ0E-tȪx\BsM341 !i9$$%ը&!j##ܪg '_ !-7yKi.LQb7xRI$>t|EéxK1-?v"0W7,Ƕ1w&V[Z1~_66BDzX~w j^b謁;] Wz6܅ 2z,O;QIY^i soN">Z6uã8]!7n~\X3|BMb+ טeo^G* e(3c |o ,&?٤qR坂S $ % a%$>,k* 4B(8EVw?NR h貁9b /yk-~ZmRd0)z9C.w=vc_>κzqnC1_{YР'_lauΝ!a h< HR/xc (!0z6_|{8rmbtP.^bJ @6ypj\GEwb=NJտg"}_ nrJjMTJ%g-Q#M,w wۣkP}ʟNw{|.^/q{<]Bwݺ n{os1nt &>ePXh3gፍCS u$:)Š e; z[CԷ<1Q bz]s՞B Tiqv㰉įKKN/V+>>7ťZu߾m2HfL+ϱ"a @|j}ӯsOzYn83 9Awǃ#qB%}2)ϼ ah}ZOkU۹>ĻZbmvqG/_{_\=lLbBBtɖx"4ݠs7.KԀ3w2^d-JʑV:PmϯrMB Nk\F#=gkdVcףvM$bIdž'7O)L,AcF`t gmIZHsև?S۞Fx <;^Y\Зpݯ@FσXh0u,2 \mR$N)'mJGC-Ԝ:1*:U*)~?f`1} ^u|}b/rx|>mm|:-b]Q:9:nҠI'zXєR7zOU Y?piH'u꓄RJDQ 2 S ` YXKh%DP׶ Jӓy2(:54neuO[Y}ezz[ c߸a>޷~O&rD7RozH:cx_G龂s#Z|n=hڗv5,)o W)?3Jqup絈BbJE&HfQa4t45u@r (0axok } Ӑ _n\ q⦂fwx>o2Yup{YBiöPms 6R(V4\\>6l0l4Ϸ_˗,im&4Fv Ay"k(һacS &W)QĮ!cOe6FjQ]Gʺ:=.㾂|w藶ooZDrT"&Ȗr2K^>Zޤ2όTXg>d"9xX`rǀW ߳s:smU^􎄩<jbUXLz`=]0ߞrRbl"U3;J҅s'dG ]sWbQ nkf |7zmg9hӗ)hsͥkQ.Փg˦ڵ!BO2g[!ioOL/%c%dl-X\yB Ԁc94z n!uKxt V-M?+[/q<< !jqpgˋ+zə"`m*7ND;r% qf)&KܙJAU!C _GvNїo7? |Xo #^z1H+-owxA*+iCOyE8r,IŦ!=7Mޛ4{!?)zQn&ĠAo( ?g³J8T D'K 'Xz[ձiAkEifuPfjRvHdp家Z9wZV-EbȤܣ{d?zJE&.ѕ4?&{D XA^G"YDԲjZ` kC+w,uބGʗWi~7/G׽pZвhFpy{,m'{2&22QÓc>|@kI)\֢;]<gכ͢Y/jOzW+zrh["`s/#vȵXQyڛ\ 2CIPWw4q'T-s ٩W3OnZpV671=3h3v|kBۧQh}*, u e9!ּ^K%?D8uK%gӖ+JfG6. 6ӿyO_GM{kC "dT b &rKܮdm:T W5F2sI" caV7G& Ҭw]vN PAdUHPeTU3h\S9Z4WnGz1m njtov鼓–ruV*$âGяb9wߑYqs*u^|񤋔 NKsrT[H \*5 i$b @`t&j#'if>ۭwzXrJ "H$D Q-Vz{y;ʉNSns‹F8\ERl|wF 2_d0Bn~?z9G ܶ+8&q6ĵ%%ļz?j\;N~+mʇ~ؗz\^'c. RT;HnA/z+QOn=E}>gan.nNyE0b55`QP9^ki7ok+Ie8"D\iz5(hE’A/98ckM\C!p8-+w6}IV ~/Y].āG=}Ư a^H xv_yUFB /׸c\kn·%HI_>_#RȭI>}<ݺhYvhW6o̭OF=1 iN,bSaBӟΎ}uvH`:)sHsHSY]ʻӌj{ӆ{6",vds߻'_H/T$~%d-AXb% ,L+YSܘ {_x.iJ*TFqf@ϸbˣu:nE~!K^M#̠<ٲts^;AV 8mba|l(؟] P#c{IocLV >I]ϗ%Ll+վa5Go0 lfup }EGuûoPn @:`rRk`䈱|6"d2|q'vG^~^˂+fhkRtjc]x!LRBTRO5PkBQ4L"lC}i}v'F^eQ}'k.;(dq6z~yHfJ9 B~Tz Q} m4{?,p}|6.я3FtO2S&*dG7z=4fq@%E;rVz:z5#.&jS\lte@maZr9NHYvroabdP#n̡t[FrNYMXqIx= ߦ `LK,k ;x!'-UJ Mxo(=yfh⧱=[r|nLV5x.3[ YU 9zW,/b iM=IC 'm±ư re &ȃ "7r5R~Y &"m,oʺA#.6pT D'>~~&|UvPBz☱Or +rخ uo,A4leL|qU%c8N=ޟ6DIKDW2`udgk`fn㎒y$T;eh3lXSO[v{PzOo>o 9N <+؉Ğ:voqw' "4G7Yl V`Y‡̔(Zt"E0d@աj톻`W !pmxIJ}ʊ[޾v^too-Lv.:_Į놸mzt"f'mCo`@<cw5ڵ(CT!݌;SQZ>(v<䮶؀ujخsweX+N6Xb?Y~UO+$R~.B[0yٜZrQpOJ6Qx9qyNj;GG_NxOԇrAXCǣ'0"1[\y?!WJHp HmD:l#\<Ǎ.iԨV $+e&3ya#I`ǜ#A#ORMi?]'{TѓߋK>|70S=hޞ5 Kҏ/de.7 `}՛&띾75]bWw%K4e7ZNG,$sz9 FPʱIuwNI<x x*-%/ ułīP;cQаA<N"0t<l4s_gzD!.VBtr9ò͑ҵӜnaCe? x"cKdҫrKǃ (j!"xk4%.Ly@@Iɠ(@< LZJJ:P@ 逓b[imQ%1|ig+Mp.(e*(Pӕ:4IRHвE_qxy=@|ϱX1BzG%lq8'mR-&y8],Pٌkkt%:請%Eg*<  bwڴNL[(wv3 [bîJrkbM0?Zv1$:F@$}·T^N건 D=ɦ\i"9PP"ߋ+M/ۥ1ڶdх|{O,ni8}Q'c۽3y\ϑU-񳞣pNWEy5N?zc"[6&\̝lt{o?t~ ?sקG z'NB|K^Co/CyƒEV&= 3@\.EBl 0XYGfz fu#Uژ p2K OQ1;\ۀe7` :oM}t γP˯5./^ָlm_~bFZ\Me%=K7OJP2r3KO[|IbTP,bG*~G>xQ}ԣoYZEIgWX @aH@IJ? jDB~g(N(v@h_|}Tw- ;dO)ӓ&=|~.iR%1']%R{TL"zp^j\^Il U"2SUb T  wM5bhXfml)zW,]0mmV_{K? g<l??ݯo/^0(9ўID\j?'Y|&2aKDOȬ̓0ԗ0rVq.UrUX4ږ:VH2$U "l-7 r] S?_PUrA<7G7kySR+a#ǥ9H|Ϯf59gcy2X9ݻ'ςpKYNև\Ch~b׵><4 sx&A @Fn8{ϙ$TvWk9-YM"wWlMӗeo\I..AUCm@bϮoGovVn"%+Up#,E(b  R;X$(=9[2 &,o IQl{ֽ5|uކq? 3Ķu}f.M[NQh7M o%28sqrb;c}緇͗:?03sUo[! O~L6a ۑGe:Қṩߩ^ܕ!'咤˴{9@^KCVzP>mhVNOmA1]|e)ӄ ­дh7p76=T@$z7w@پG7;؆|֒{qցM50Ũ̞&6TsKD{E;1E_ghw㤫i:X! |E@9 qA%ը9FrY]A,(B'>}k0q F d۬!^gt)_׮S8i'JgA]=^|zl!=HGg{e=ѿE; "`h7uf]1PlhpcL/:H/6PwxCݗz^=}:qO>RPl۳||^Cqݷׯ{l2cb@ß-qj~>z pTg.9߼vh RB/,PK6dNO3qYzi ɭ,#8~jhrUf9= -V_##ףaAŹmF"SZoo-Kzl#^$&7*@]&ġ4TV!&n^u8Z6|mo b+A=JCq ׏uUOW'Nk笲7(r/ h rTLω0]92״u`LFnLy2씬Hz7-C@>Q𴟞tt)z;4wn[f]O8dhqJB)Vv[yIV@LƆB҂pm(qlSY~"Ʋ+R.e"/ǫ$#n׳:hlKިoҙm/ݐkuy@@C <\Dϼ\DMVc3'oŠ<8w-ikr㛐UH3SSzՎGuy,ipXs/':vPW;W:> Cx A Tԅ8HׂpvBu9%9-:WFvKJ˶>!63):[{]AGŖo)Wzw/s{-QCW ώI^۸䦊\k?WZbbY|G+<& tmIzdv'3( `0)h< p Po7ݒT6;'/{䏕^\@TexL,4L:O0}/åp(`zx#꣪DCIJIGXϦDupw+5*@%dmh#uk} 4d~ w5&^OOg/^{BϘ~|Ŭ{&.k(Emnt2:K|+=Jr bb]MAO5I>;rd*KZ;UrgbKTϥG*;kȂBT9-L l;Rl;j\Iv؈9r# NMsf¼""v"x8<_Oz_p!.nTRgo9]`c9GuIK^BN Gu"ΔԨ 06vl;)؏żPtEXv:0#K}2&*<mͽiFN\.9+5`~(7>|g _gRFNqf (Űwvvy7gR7&H׮ 80lj_#JkiӅ]~ YEt 58)gf{t'[A󒼘.@4l?am.U C[DhCjcAV9jwkz(,;9J5wЁ?ed]8h Utn$7[7nN܃'ݧUw9DqndyhC@_^ԧ{6B֝Œ^3]#5?2yA֪r$^5Txk_'*\C ~i"BG%&g-Gͩ.hGA:U o(]$]=۳;E"h@dpSA)Ui2cJ+Tusmݬqh*\T_]<ϯ}f@-caM^^1.E})N+&nhʗlYx e?܌5nt1{p7O{؛'!(oQbu6ڿn*[{v01ͪ e}~w"y|:*)vuͱ1>9+!u.(;V tw#:RfXhx!^̓9G9rmˋIcWtQRF:p3(YZ3άf^ Њ%x}'~LS쀻}JOo/٥=ki!>w)2FDVOt%k;3b'dYP˓nֻKꍸ+^dbEﳿw !vs2D `EWm}6N4 7PHn{#LEf/sG:|(dhUWX5m eJߤv Y-y\1|rgrje*EzO˜J4Je- VwyR.FdD p1 ΓJ4F+ʒ3!"I!w\-CC}\sa'aJᄍ{k~_9?//M6gc|&" /bNJu2HH匡U8ϸs++Nu9rJ1hA+K)"09 Rb$`~"`ϛ)3CRCnr)#^!DHf"$ܫ ;/ Kݯ9ϭ#Ѻ\߁5ԢP3/ Ov彟z`J'Ή7IOKqx\qT|&yc;[-:㙁 \ֱȻ. *t07#\r Բ<̂sZƾNj)d޼V5qD9v o״}C4곥ge6\իmul8MȭyQ }稠B⥰aAIܶ+GfG@G0 !iTȦu Dx4"˿'?|wnsǻ,o{y!wx~>ɿޟ1rFqd%ZaOk[ic>[V]Df"Fto0p bft8"鯟#^ +3/[iBbZ@'tJ;!z7uOĂУ՞\H v*xNdž`K Tpe1A2UvDRnsJMMrC{BᏙ[ʖBS>xO ?/(׭JPkA-/ q& 2bT)mA ТB^`*4E)GmΡ6mvTP/|ڝ;:eƖJV̑FTSfZa+ap`c֬B:Hc9:xMz􀿽NI^ohhg%* TBqz6F?n?OfRw6@|}8:eG-:@J@WGo"tO ͏V;$Dx}"  pjj$>g#GGTqkσXRkF/ i{mj #/>gTxA"$/JWüX<ԡ`q2 Gzǰܪv  g W\Z03#C y~^ښ"hd0_fϫ)Ä^2} z5<'Ko+[Õ'<>tz8J'gM`;^Չ ?DHc2cvfZP&VMюxQ-xjԢiַ5hpYV]ci@ͱK9ףVN|4l01!N1e=6bwM)+lWrB1Q)7zB )Q#q6h>SPZHzq|CH"-pRjN:Y!ZgZ"Tv8.{z<%eԚ>feudؾ/Ug͠՛&<]9? >gVoBu(v#Ⱦ-Ng+o5Sof?|\?[ɢŚFdQf Jׄt b(Wړ>pBum9`ԥ*LQI:Q([i`%Hi%s* B0k6$4 R, ۯۤ]PKY0(2r=Md6֝L#$C$mP>#IGS^R 5Q9KݮA-q`T>7*-jFeBLvv$IܶpJ)V eO|b/~*{/_ϣݰwss}cO7fg.>MspӬ #GZBv(8q9C~Bs ^;T8}w 7Rߔ/>1moTr0vT!'Sāwsvr| PL]BRh}D _;R#$5k}=z/VanIcҖ O+Q.`,yt@L-bj؛yvzw:Ǝ~uqUI3K n0O2:aY9DNI 2!03DgDԀ(5)Qu8D?30Z ە($(~j=>%nV>lM姾>k5Zv U(%\]f/ٯӋ˔;"6NUb!=W2AOݽ+sfD6ʢEv?ӗkLAe!r_*\ˌrVZ)0 TJã)sqe㶊};x, aטzqǭ;zgXx\s~r>0`]Zk3ClJo0ktq 3HlתCuReIaVTX!O˭=Ya"XxJyJZ}3|+CdZ%$DkxajӶvC(#v^x#kL!k _M !( -=qlq0(U@ZƤtk?sl;B&h.U<&X0\-?=d׹'>W1S˓]"9o+I&{yWsb|N*u˃xLG-33x(HMrL)#C,X.ffN@8<=V\ :QVlAlB*a 8Pa;ja$d^ٟ31dv:m; I׸Sc3`%ٰC*^nyq18JR.5_GxO,îRѯb& wH/ฦ Ľ]ԯ 9GV4ΧqMlaS+CVi=Wی=Ti&_rxGcnz-# F%[_%xvC:iUz=80!{cK"m9mƴToUsL= ڼInD5G|дɻص%NBj\ Ζ;L3# dRY|[T |,^ F>x%c(;Dg! KbYy?ػ~.А@zq"F UV!6Ydml^t1fbOIrEy$?AqdɎ.R0.74u5uˆmb1_ _)mu쁯؟ ҅'y(bSոӇmɥSjgXT1Rb\w@HֵWKy|?؍h(yܴ;p}[(j%nbzBiҫd;Yy^ LdEJyQ0gIu<3*)O(p`P`'x TCȓhffؖw8!XG ?xN|.-R4&i/WlIey~ҥ0ٝLLK_.<;ɹ(6o2Q5F]WAp]g4ӵz~};xNv~NU=Ubu6=!S5jx?"ey%//A@$MQqc +C^u'dBoԾ9mVoNAA ]˥3Hz>ZYRn?ƿ8#ֆ}Ï%O 1!s&%7"UPܽȂp_{{~ҝj z _{BY>e꿳iXJҳƐ'+3 hZ75xy*Ԟ0`s/% Tq @ۀЖ_2sYp~t#3ҝѽŒ%FGS$cowF/ ;TYAs1;إ \$lm\&UोEK=VSwE"vݺѸ ]}-GՓ(I7{~f_q*&V7Mll.).;ydʼntJdƀr/큠=DdAvLߨI-Hj%V-Rصy=ܩ jWa~Cs7Lo0|#TdhP_R.1ujepСN|^VSvM~MhD@άg'ӌq:7Pf'ҋVڱ[!S8@:U4ZJxTS C @:4DK*CkxB CG HJ tD?'mOAM-;nMxhqs1^Z SnthNec>1)Ҽ ޭ &==_wuۘtf\شqp3s,zOc'tc& yO79_,9- F>Si;{z4a܆.En.Zu%a9&xzִ*M)y M装!d$οq"r٘R0ՊŦe51~om-j?F*2nغ}eojԉ.UI$fA~_w<aC]J})pl3z] ΰ4DŽ1:Zonp`?~I3%+DOm{Am0( Cr_ݼ3 i ^@uxVN:%?{BV*ص.uiDӇ;A$$ֱ8Ì1NѽRT<.o#gw_d= cpWWxUj3۱A.ϯ.=Q&˸-HpLXBanB' 21iIYpreX( &cZq VMQ5:5ܹ]"}, ix@S(Cѐ>v ~TÑB*!ߢQfd)^J=:յ]{iN\o/4KsԨ}S7E?C˹<8Scx[O8spLD'gb\9(4ۉސjn\n|"5zb5X}U4ZS;\ql}Ur&bו#[W|78GP֛Qbc~1E%鿍_\oJ'T@ fRqw|u)'kS5_CW q^Pyzz l=kAQ)n=&q<]\n#;ϯZ%:0SVqЧ e %J;`X OY\V "VNENSċHʋЕp` :-o' ZWDQ؋sP*nT-XS8GΈk7Wnժ洑ƚ^\=-|f1+FOsG*:%={g E+>FU_lI'&,bѡS0r!W=p?[ϓ&an][_)5qanv:VcJZژ.AY,&ȯ/ W[Z.Pώ*G^".{L.#΃v yYAMJZ &ɛ둇/_[&/'ځ ݜ+ +1x԰ >68\;Ȕ6~:O>xvY.¶oRrá:5Ljܴdm|㡮M yxt34#wP+z}mlqs>uh@ 1!Ҏ+~NɸDgQ+@e1 S0PRQP1xԴq\Gsl:Xt+QJw >F#U8ψ& 0Mխ-vinuIv[SU–>k>;}ߗ·sųGsoag/?ByGwSf;Å$jJ<@ Eʠ*:'! }?oŭ94Bsb`(gW|i A7h%-8VhT43" a<z4 Tu [ \QDd'ܚl|?%cJc #KM\za][Az J'¿\!xz$gxua[`vClxێ`,/:`uh&:;ħ*|/g/}z n5-5nj'X|~,V9$(G@ U{!\~%wBX[:쥠<x1_/,{|ӻԯ~+(o{ݘ.Jֲ4jb@{Нfaa8ɚSzCûi=]c B\T$`wt֮{nKoƌ jHgI [ْ]>eS1ը8B7LHAW\V·?] (^\%2d9Z]gsf4g"&c  L&<'_ ؅1Lui+R˷UOCL@ H_]?gn( D+8֞&.\WOoش ݿ+skM'拎t0ڇ-+0$ ?0ZiR grU|~=>o>m`w$ ЇulEq;`w7 ّsȌO8Ҏ`=莿mHÕm|Y~.΀ĵYi ܌0:6z%I8 n.(8 E&pul0]q⊾Lr~[=nŴ7J_%'=A:r.,Ơvm5\LZD3K/wZȄO/嶀uy'} nJXΒi? ;~{dP.G(W1#=N Y U /4~w;v?63P'Qz{V& G50$L J0DIb0U w $HA~Y aQ"ʬU0fUAFs˿xwY;[a'(Ԕѐ 6ʣa T.Z9HYsP!CAirQˉ1I*&.B :"(mW)Z(rr?,e 2$uak [q&^%Ok E$2(/~YwgmUj(_&L޹UF\5_6It7LjknĎ(MrmQ'R^,Q+ڐ{`2}[5y}Ӹ٩fno'*$,M^kN $AӞ.urfp*^sbp a1ėڶEYuuklÑk} 0A _19>zwvXr:B Q0).xi&WIz:(vBӿqZQRIwI:kE B̘؞MA=?QqQcsTz 6@m s7&+ߏn l - T$T4OҪstf X}Xh/((i'C˷달;`>tg园cJp&Avj00NPޢg)M)*r]!:ؐRin?}mC_ ?-r!Z6{dWؓ\_' ukK-&H7ӕ_}>><8Z_S7+ 2)BzУҰABnC}əa*fd k/' ]tXIMMZhIv}D8:BYgMۃw"8O}g\q ðx4{K MA!W_d7|m*ɢj,F<=KcsᰅCb:Y~{wy8ͫ3f^mw<)s>ߍjJGΎg[}xެpArXli 'f $$ׄ/>z z"|p!nyZt2&|tXUXpB4#CARVg1HOEYLB*vIe:C4lwKuZqBv>땩aOCruq&k_&n~[.K󔙮\H*G`tp÷c9c6C&Ne39 xH7Fp+ 52JUњ9K`V;AT5"*=h@kfPL)kLF4.)jO~BYUg=3LJ5ݎJ>91RjO0lV̺=_on- Fv?s"WftMΉyvg e Gr{H$1g:95H!(oO@c/Su+4l}w2-. A?v6ɮn\0\tVA]Q!cȎYh xEI0,:XW4 <1 6jeܲ1 PS25cr!V?LesiYB}0|AtՔ2NZuu*:PE)~_ES|BxH'kjs;!6D.졚"UQ^P!xA;#ԠՐfnaG=9`J[W( xZ:C=P80f#!?gɴ*_t |ݽ4PD 荰L0U٩1Tf:ܒqP8I'iy䘻$?$`]ǣjnc -jpCek u-f  VX fsyHx}dr#2[zN~$~l}DT& %J} lz:~% :SCyZLl ztM:m.M"mw/:L|/|a?8=2 }7qgC 6-!Vy 'z8pģő9$Ѕ܃~O|Ի0pI)/z&UofP>%j?+.Q?>.fajbr>g8lQm/IٲZ( >uL{8nA}+:qѓ?q d2D^0&t5AAAFM;~/JnTE PegAܶ_0E,_{v29fUuvI+b-2S?wcÂ_MEڤ8C#\)jz 1Z;O%Q\<'ψN ?*%Pf÷}mUd9~\5"..~ ۾;sk?49>zi9_ii! ExڍY`q U ҝPei4s@}M@-NԼU`RӀo#T=׫SXJz?obؖ<{|>bFvbgnE;p1kKEYZ݀x>ce@`& ,ՔWq>9r?ۇu!M'H8,\W%HE]!'W H˒xVCc4("qt fGCdD8B$bRkK1Љ7eiIt =<Q7c/۞/,d܊a@;F%j{lRx`!,G gc3I1|#y>9Hf闽[qlOل^޾?w>SD=[ͣGn-_aΑubQ_sY 9im"%(6dKGKR{!b_M1vp$ KBPFJr7\cp! Dsw*D TB}v@Gk"#~ss1,$/o?Gc I(*QRfImұ pS|IԵDb " mt ^Qi3?(>?tI"*/'Zwep<,#:#]!OџG'|- J$>f.!r:h>.\]o ܕ 4h0IMl2g ePHz4)pVe~|MUWvM׏}ŏw_W 7979]]{|}o􃫛wN:qV)tg3ʱW6Ũsò@J6RtB3y䣬:^HFM9fgL 2^[1]/w7( jcSG{A} A7 xrF\0pzCS$?X#j"Xg$Mjw5FS* "B_{w>;k%[GrN,&wM~Ѯk:xr%xw>3Š`=뤂B V&:+i g]sU1!KE>>7M5ImpQ]ԽR% %>.rS./v/ד9t}fҬ_1gZ& Vpp4Ih% Q/m9ÉRVyJ~<$mV̙A\ey#;z)WC@9{7WnQ2myqO}W8ZDD㟊|T=¼8)۵?ŤNkq'8J(CE擟e=6IvȯIIOne}l_xpBa ^\mG*Iy3 mt]j>ɜK;B$U6k.kj 3MpFX 7XTuj#BvuA )H-~ǵ{ 5FtWm~ςgsC8A˷'V_ƫ{3a|<f斱Oх(v) `;vu9 p2L$A"@ EA:-/%ۼ?O ?F&K4=ډhJ$Hd=jFj  gZ 9-:{C F3jR@c~[ } )~3BB"[>_xF/>5>45ӧ,ȸ;SDxO LdTsL~*a_pm2ʆWQH}Z"1;c",GVFI$F5"]IV$^Aߣjuכ]޻g_>oa_H~1V6 X~f|@^T&:E+N@n(<' Bj98O+iu1-#yk"`]c"M吴2Dy3ͤSj̞D<ໄT柭{N3 ńGDIZ=) ; ҟ-;ڈbc{YN`vH.O^:iכ'WDzLM JJEX ;)F.ϳ[L=|G11y];:=:w=,÷,:j;×'@sxLĢT"^Vryvיwjygө4QK!$ѝ?ܭ]N)_>*&ӍqӞ| Z2#h>|m)Z ;y!f|/_W޸J;AʨkQZEj]#PQK*:=@zʁǻa=u02KV6AJ)\:tz"诳܆WOm{7s|w'WK]ߒmz?A>q(Μ뇛4[fU2_þ0&o7қ-Fˊti%HA(k63N3 r/gwbj&7n("U wR*h7dZ[tJO(\8ywO]2 m)5!7qr~[.%껾:3nS||LU;4x{ߣ.]D=GmAXgmW'-[SA bxS1Muo"xʕ46*tlRC(-h6#pWF~+^_~}͟Ѹ`_G˞=FߌB|jua8ߖ $[fiY?y*_3 JE5K4 w"[\e0s _xdE`Vz/wgv=!=/Dyz{by?]|`w4cb)VBZf;7;C?r{K*U4NU[X5HyUԤ Ͱ-3Qz=/4@7FeW<}gN8:d}#\NFZtD DGG>_m>Zc,E`&76~ #7gxx ^ lYy:e12BLS ή>c@dpם WvlilRf: ?0K DTo) fNǹ]0JQ;_~Mjwⶌ ;>}Ej.l8XR9k|ߩڂ(IVOmNۍ!c 0sIJ-?vb(2,騩z^M үkģx; Fm$@} -F40|V!J&6HѣhៈpYEKnϋҠ;ij[֑-]48)XB\PqSljVwtvR"Vy2AU7>ln| ֗gF/&Q(&L{,<kVW"^Y F =O_D`D[dP֛Ȫ֤`@G:?Z}膺OpXYggᮭ@%&(Rd1m/ļfT +#H`P\%"g{yZTAFi֌gxO;0{9Vee/d]TGtuϗގi*0߹f6Kj.{ǒA}0*CRyd"q{v\X_1>g.#.faʩ﫱HTr b 7u- x4' |H^i8G30PdVI)*Ghty 0/.kO^[@N=F %&a5v]]b beiT-/Otns}zhQWT!]Y} U?]@&u}:GG99gceݾ%f~l7c9 ήR>6bңm2 = F⎿)UW4%P;VAap@VB: N JRpNzǽeTBPq"B#?Ok9Lg{2Iv,%ʃ%9]W/]υjbo C]#-jgVpa< # Wl Bq2R^=DBwHs^-6P*禃pW[ c -ji RUjh~OưMPw5x6pa Mya'L w}1zS~Vu,`HYFL{w屢 #C s(kB\P# 8r##Μ+ 3B|3 BMqozzܚ,l=(cm *U{U@7 ւ"6q*=ltpܥ &IZ,jأ\OŶ}օn:$.7=vAC `JYok,zkG6xM-a ot:vgrP|~J1[7WOhRD=N~[&|ܜ1?T"鑨ˍi8ۻɆ̾HyˈNsqIPY9lkYDuBZR$)%Yd;j[}AiPsYܐP F&>: I%؂HP;J^c#3Xm 4"놞 7c@ʣ5w Ƒ|(5%+.7{ 73=4+؄#>ytԄϿ@;ĕ<Tb_v;KYOEʄt>|&ަCKh)* J=W5?lV;ٛGoqx:h<j{6qD;/^?Kh@1618הm'_= xޫHg;@|D`ڠH3ux:o,H(1Ip|~wxA*< ;iH;6vhƵM,/guypi!aa;?A>Ag CgxH Hi:twI=q+MS/a fC8xq̧4zs dN+=bSS8gRt1Abs|\ܺ|ÏSNBɃF,<|l}ЄOcS\0&z9:%tք>yFD `Ja5ePԾy[+- vеXL$-M5G%.m%=,0lq 3; X®3v/MN +WBnkpP@%ܽy9Fj@jW޸f|;kT l!wO_ߖƣWWa y ?9e˸Nb q8<2/d7O7ɲ}5o&J72uy7>uErqS 5[ŷt 7;s;P㍜ѧk36B4u> 9ͭķ/j 5?.Z"gcp{]0ucţk5ǘJt;9f iGbt@feEo; 927p7׻ '^AE5qNu?FBU i; y]@9?%sGoW/ꏰ=or yN͔y俁nBr|=ٛScx*++2oR@9 uLLIy]+ɒ9( ''C$rSn&ƜKk۩{1s:׿glE]oH(aT9C#M]1鏿%YH:י;B"D6${ό>G3[SM[N.PG>}܍p,Gԇfuj؆W:As{lx(;_gin .rydbŽws [֋cRX_ӫνQ>eV_g(#Rteݣ>R`cm^r ɩ Mgߪ`,,4-0/Կ4F#$;˧JJrΙ?i|>bdzcAW,\۝Kcs#2`w|2xu|%dpkzqno_tOj:oѸ KƤD|}9 #W u0[DD`>Gf]'8gkBykIc~;ӹ,ϿM (`A; E:HhD=A:oF m&9W^.P%ӗzGܹVk nz¹x-*30='Щg Ƥ] -RUBZIzH\% ]B)%^WIhEFs\HJ7L :&z{rfՓ̭lr{ rP_$[dfàjt7A&spsCn #z_Z\J 8y.nf HIIooa*c=x:a<~I1"&!2t/=եMa [m!>?vv$)9܍Ѥ=+qܺWUt~=Gz4™&Aeb>l1O?]#^=92UKO$Ol%*i O*\_%骫?jۚ{\ّMɷqK۬y|0d/ F%*%/ pI*ڦԗ#6@9y,oɍ``'C,q LTgZe8Im-IU޶{J4-~ݓU%wR Ǩ^Mbݱ} sRs,QS}(,椢c@@g@kB$85"Q41~Cs42䒠;gȕeZJ𤄾Z4"$Ka@Z0<6rr}>^bT6޶{5̫19>&3@ (6gR$ku/wt C)PK\`- ĉ 6ysn)X^dBR/k%%OBef"RB2e'=ynDz_^ ; M_O. ￿} ~daV5ұţg%gJK.XGS]> {޿LGjds7`GY͔ sdWd;z, >(x&8x|7Xa ͎d$sXD"j&D96 5dAdH~)Og'%8ϐ2z_\Ð:6zgt McV7zտX*rʓrU k&"F/L^?1CTYIFd$/ RHI@~0W12Ot4{V& WUJC!db '-bb"csGĝ틳nfH bGڿ7&1Z&ȊF-_PLE!o[w壦>M->vO/mA82f.6{2eX[-@tM9871ͽXhOHBce@pdcW+붻c2 "F`[Tt p,.!޻rDg<-EYb\_ F(Cja$DnvF>Ul}O[ i/vz]07GB^J[sq)Ʌ@5+A>md1XCRցZ?PRy^3e (zEu{/ 0~AV.~\@M|GbxjU3܃:ͭ}t[ R;?1^|[\+/;R GEWUQ?=NպhՖLg,u3:q:p)\dʂeA$)qV";2R H7];{w<{ܿox;jݦq& Ph\"D7) 9_ԞAyH LUl `7&98z]L5] 3Ts4\5O5p'/~0e6^6[xT~LىSF+0i{yЀC7@,<PDaㅡpG48̇G@Uv1-&Ja :0 jUIZU}$9UȔPew?~nN8{kbT_gvwQ›ݠK :qB7CB5pݿo2ll&_b[ͤv&7'^YhKz>!'hRU;x8yU:|Uf͹mqoӂaܐI)auBE->dޗ3i5x#sRAvL/Q}3?ɂ/U-zg[D>_+9#PɞZzW;>eDGs_ߠ` #Xc8ixb|1\򀡀^97AK!)|_ؾ> I!''^ۇQ%+`M`oA}L 6Uu ad 3|?"[ =F1OW+&$7 9!XE?9 @I$J,&>] R%Ekr鐅QAHq]xUҘ~ljM/!(`)oq5G[=-+{ㆱfW]|ܡ甆f%8s^하l0i(W8A|ccK#cFO`$ $@qoږɊ kXvqr;k'&.11tbE #9WܢpG|P\!P*`V BRRW\Hu'8/ʱI+ dc-Şu_!/@r@bxPBtK`T(H@~/2]%`~~ߋ^bnaH97D87:nrO|\($?һ1 /q->1pYd :r?-3ۙvhR #D9ZFM2E !/tm6Ca)֓ek\7v.&'gx~z%伒} [ZcMrv6j7m'6 :f8G]UwP>k[:CC}򟀺 c:.PF= hԜ>+D$$G#U>z-G-G쿂܃3͸xUfuҠHq%fs]ͻ#cf~/Sp&՝hR u*FK/VSP˞ ן ̋4:&k4D풗kX@fƜνbUS˪#0ln'A1=rþOwv|wu8m"5֪7wȎ v̺`vubAbU\"?"Sz 'a,vF?ߺ',c41)ȔEx]UՕ~ )r9T噁KxZK` CFM2öWaܳUhqC#_ 8;o@K0W={c"&"Jŧ{o;S_4W|-WOkEZ#{A(Һ9=VJ0Q?`_vI2FC>D\! @['?8uThyS+0qxC/'ޡoL` 2ʤw_yT)@;\))]Q#H@w( _%.j{2$}˴;_[saK %jYJ|ܿu"k.7]zvuvW<% Ja^~J/`20~]GƒI{ E. (V34c* OC5Vh˹6P r!? 14!"9/ Jx}3g=_ дnT]ͶB^`}MșDbE֫.ۛ86!aT>-@u9vpVSl$?\op݀h`,P_4H(VR,&D'j 8Hm` ?O #(%x*%]62Zܮ"( #MúD P&,GOpv4F%b S)D7g^ "F),"L oF0U,TMwC:ւ`Dc>K.]N:簭E)E[(Jwm(V.;ܻ5[y֨j2Bo̳=D i$W(T Hدu8-E@,1>1Ԁ^U^vmjYD@aL'lsmHc;w*"&"NdTl{$Q"m7=v[',8LP\nHDE2';Ct0m l wd.nt:?̸H됌niV( K>asŋP:7 ~k2S}/Mk-w|afAZ&G c2["Z>Zp  *Wƿz~6T!V0/:&w^<4P⑈e1`vP5 YP=5GɩVȶS&C Ly±4dU#2Q4ޱW\;-.+FKk%4̝ߍqxdz=O!r|ޝe`GM?qDrI@ 0N0_lJ3XD`)l}Y#ˢ X,OÛwڲ]-J hjQ(4:Sh;cҬ%\A~k rŧWkݪ,T3)iP΁9ĉ@=]1tI[ v4v8$:Ew\>#];?fw51ll4BY BmK0Kv9ȝB>7 b׶{%Rj:,h,  fc2`["]?Qoߓ+MkÅʃO:~bp8~3@JD!/\& G/mtk x}_+夼i_5l &j2 ]O95xlu*^*y|Z;ykxWQ651xխ@iU/'id-U̹\fuB6`@ֆu#@6$R8d}gCf sȦߓ o^y1K<5)}]> e2P;(xmB a?2OgT C2g܅G<~ Zf8o~EFhW֮zq=Xx`TRZ: $,D{ , dj~yԯu*OA%;3Vnj i/gq< #wWP4"ՕbO=wim>0kŽx5Bq^jjىs^ߥɵoycG|aao+McKh **\:bݱHj,mȤ\"5ƙ.!O!f@K&z= [y@ Yh $ʯؠwX6+Aν; &/ Нe[?tc$+(k)X7\;˘3/pиo3k#d1a~Bߕ|S|"ZjGkW7yca. rHRp nRBFjYrĚ~l$C,m.NI.(xI64=CkpdQ#G*R /Sgq-s;X?-o68 3+!Ygu]qqƹ ;siJ2Kf`RPƲ1޿&9ooN488ϷMvyШ~0aTňŬ۞L< V=W֧# @99v9Q6WiaP{"47~+[d~Pj]!U2C Tfe s-{yj[ nA=5&vXnC}  2)A+ͽEH"MNbY.bw9/+cҪh!Vٶ+׼2C ݇͸9ȭBu3:A, .<;"C~:*BA&>A I[&=%[sDٮvέN梂y qDL- f:ET: "\}7w$hz(,h"|x }GKBʺ+ MLra?q|qwY=d="Kk' k?;kc\h3D{0-{zK|9BoSs`4ZIЮb{~c:[#*=64"@n wT1R9y8vS&` Xrns D)n?*5鑙܇fi&ݓDɢ]!}{9#8Ԁad.FpMDH8bF@ !@v'N%kj\K mm9M8 %M"ZՐKؐ,26ͺ z0_dlw S:+'%z䌬.ٲh;tX ʕ-A1-X5ߴFU(4.\d4شb?IGv8.8 ۮ%HƯd `t_WD#ܠݳ#x$#ʴM|4*+9莊r~g|YEWM'̿wRw`+ 0Q$Բj]~[z|ο/s`J>+}Kb_4qBLک W'|I~'iFR?6uEɆ+AS`T(AJ'O<˵ӭ}hP AN*YGa>vD㝷P d\9^}W۰ܥƺ =nO Pp3QGmr #D1ޚA\SU+ү(f\V3> nX\ q 6ZzN75[_5Nf *ܦ y˨G *wwɰ|Z_igmoڲ&661:;Ca?\r 퐸? BC0?f\+ɔz!]4oD8 yBm9}M_XDS7#zG˴/\fQ%t+&TaUF0EU8եφh4XMQhJRKwoAhhy<rcx 󻱞+hGc'%yvp.q|W2ͤ4 O}xW`fesLvg7{z)CFD=eXdZ bx]`]=Nh#c\黡zf68tQGhlܧO? wg{=Oa/duힽe|9:SW` iݜK-3%;NNߞ:,tIDyꊯ\6&$'` "6?C;N3•|;ճ]?#%߫z"+_sQ~{V63+w=~YNE3ϵ0l4 ~kEڅְPpYoP,7d2oeJdpe\&Ny9kE) Yi6w?*9柱׾X:$ۻb0}OͫNΟ=2[ÿ%l_0O.ݠ5,7menԅf{[ɩ5f/)8s0NHTqi"B-=!<͉.F=ޟ]P}ŒX);ZMMwy./.lmoq92$nL d>q; kJ̈́"qgї/ukYV.$Z:}J`@Tܮ`9 Cs R JYz·-KzX/Y1~*}c(1[=F9XHu% P5= cf p#tqP{oCRd !B7Y<du㊀9̵Pg2bx JD*4Z9lڥv4շT4E:bJ&#J}ۧ|+ Z(UTWў} = p4#$IA2̬ < F5Yv xtW wpmDBHA%9@1{UHK|O| f0ǃ l QC*&}|0yɘd篞+LʺuyԟyAszǓ9,k ݉ercWR85g\pQ>i+ƶ?EH᪱ ĻEi3$70ecNqb ^>er[uG^zSpZ3ko~+^Q"?f@Nzeq1Mk<%[x\5sƦ7}/(=9X"#x vv lfhXT*,%|4d~kcZu$fryπbq*cհBbB$P8v0g`0DZZf[a |+3U*;\%;& 0Z,d[+V2IIlJ6뫭cam sXqM4MDmحDgf2~`M#m%ݫ6ؓ%_ĪU*UJQU0J¤UTX*UTTWR%RTU%I$UUUTKUTU7J%TZUWQ*TU/RJRRURU^TWe5)Zqp9kjDv8( $*KErHuJIQDrt!JTCT.Q` ų%SGJuݹJNIJPN/l^{dS׶%ImlҚrݵݵ$$ U ADԠTLA4412ȦHM4  4!3Pih44T  3F&ƣFh@dOPi!?@SjPm@4h Oj𒞡@4h4d i@4h42hCF@ BL 4bmDM4Fa0M4hd414hL @bb 4`F&CM41=Eь@bHL$IH05԰)D_\]{)FT}u̜tu]%t!ww| Trvs K(c'5(a!BRCceWV´)‚xˆ¢fcLrHV]@I \$̝Pz(MՆ57c5X W8vL\ua'm{taDhӤ󲲭o]c ۔TɃ+xIB<[ŔkY<GDN8wފd{gvsH ІMJEC4ʩ裭U4;}`z ;Qu Τ󌊰qU48,L&ƊmP6_cQe b-cmҵԼH %6&LSQr$EGK%L")UETD0 PDz  P5lk)W[­pmLf5!AZŲkf2mIrhn 64(fw(:R~#x7Psz=uAP `/V*F(&P(o6 ]öٺfHsl A@-&<˔MfVM1i3tas J5J(,O͞t-o{!OIQB;p*_4lR'0SI0T6rA~E yl(UKԅfZ딒Պ{Y<hQ?kςiwoTS:fOzNԞ+4T, g4 L UO7(gI͖oVNOWެ$ƭ'8{];aƔ8 ڴUPe(| >ee! Q֛_kY͖~LG0LdѾ.AB:"-Kιyͧ}'`@QT+;o@ȒL~^Qٱ593|"n k(\KyT,YYpܗ+'sGP%Du:e 䢒}{KجQF864,Sai,KTw뢙0hw;oS(I:{5j5EaA0Vn) C9vT4"$'$ N-ʊV`9/H㕮-.B32&$+~=f¸Mc(M6" TȚݸNĉrj\jlbMy| aex5c{Ƒu}:%-1;F ,nAڔ9Dz ) "w^^n>` XƝ2 t4iudyr*퀉2m2 e,%nOZlJ4E+HP$*a % uyqR18:4bϗzx$"`۶!%92 6,ih%{7 kTU `3Rhm6J$Zugo{zpi*+zV9"/Њ0z}fGKTWvl, 5>H,`+UɁ@3Xg!"KB:Zj`9۷}[@G[``dӔo8uEjhr Ӣz)4A]ߖK@+c+nq8PBq8Q@ejw/,q҆Qhh(zgԭH"SZ8vk~=v 6hbrNE7L.Mo{Kžn!jKqp'!eVaN-/<f.e۳W,Vv. cD]iT:%XCbrAj@ժv(>% iƇM4-0lxԩS*Wwq1qK ej+;X \Hxj&;InABdJpPQ X"5i QHovi5Bn&"4K$$t! 2tZ|G'{]ihۣ+S[Ch46Lr(8~2띥ЂPa h2"Svq (5& 4ZI5@f8g$CߴZ7tցE[L[hPɄʹpx) K#"i"Iq^sΙ֤H98FaAaBAF^Xmum%n m]< F &ІGM2j5pٹ5&Cä)aa1׳rF0D*<mv^)-qi~&VB(B+0ij񻻇|6^;ȕ A\w3 5 xȅ$F[ !M-:b=TMelSm4\li@n 6h|6@_\芵M(mzP rd]C~ES+y%e |V5bqRPձ+)QN2@B[j(tln]:M@m1lu L"9{)(Vi$4L([#e1IȂ4Zxݒ,u$Y J0.$eJ]@rUEK~|MYP _ |ڳ3*ajl+-D 0پB!}] f0yho#a"VO7!Ydp bMկ!Nqz\n-!փ~\c=pdRMvGE=1dh2 x:#o$ZKt!Vfn"ϤWGG}C hn]̊W-ph/LDRIq@X"Zj(n G7ECEVIJ2>MZ4kVZoR8#}{FsM%53cu-v89EET2hR0QWn 7H& gAX Xѽ $Ve q;-Z0Rd97yh- Z(J'8򝢋Yaq#x0@- Tcj z^H~1[mf"0r&"2 (KNe2M'$^ؔ-\82)UĐLG Vߢg[1IPq,\6= X8=yb(52<!(1!@BwWUP2lfZW[fd4Jv1yʏ{ѦSExꎛ|L@U\&%Q*S*C zwUZt#+ίfْ'E!ۮh&6|{ơ5a,juB@i 3α>@oա6eEŀc $ հ,[HP/yrţ|5e#K Q!oK TBA0SV߈ma߈d DbE9$`Ld! B f;xV@v- ѷ~r3Chz&KABo3&L)em\/2:g]=;U6;4'M: e iH4񸚃!m] h%X BCS"{agz"JdDVUI\zX Gp5 CѰzN_ NE!)!O7%9xeEaxև:m"%tTQ&jHI -GkW}?"bqX=to9lT=-v1]= gaPN$Ȫ`"/xns `@V/7ika1@>jg4DU!yx#T$Ecfrx>-7P!' cдőkw.QhF` C6δnM(sKS̗\Q N}n` Aƚ|4"2+YHձ-Nn>/ 8]1$}Tpd cx)N 6Va~6qXl„.,4̡3ux ])x, ՎӞ"j Ľk ޺ɼifĮ[6j G8G#Q%nz CFEw:50v<ܠ,=,2 jT8 ϕ+QikVyЬ Mwj"'5yaYNH$"hO$,. 9Nx]YVa29.rXcL,ER=m"񕌲wꘙl8@:Նh?za8|=o4qŃ2C($Q'Ʀn3Ykj{5RH)z2Gjz%~kcR"Ŕy' n`nP2 s2O [U}5 ~i|ӆM3HS9!0G5̵L6D,#p] UH;Y .ԙƳE9ԋ*2ۄ 2`l@4A^ywz˖}/_]J Q!3X:* lNJ;eu'h19O-S@ZefGER9a\VvYi+m>=/BBUjL9OLtFYtapS;7u ݶQ}UN6݃Tl,{!c2"X,ZZ؆LT Mj8zC#PB;00jT!<1HD!lq(PH`|N֓@x,)3 cgG&]pdpt2&%"^(JS?qFQ3slEvQU7A*|Vd\$qrh-QxB=GT b`i4"5-mx;99$SWя8(Bg>m9LArHFxћP1]j졢cęœ,m ],O9]~5L9D26c\Pr&S\Q1EC[l`D?__7_ic#wG 'ީb[dYxkTdo΃OPc`RBq[=.c~CVVݖyo=h{$$&ĩ$ %HPBh,)al%z=`u< #|?g<Nvv~l`1i7S Z>z" ?5,̓ZZD_+jަ=X:Dy]`K-@j[n AFx`S_W$U\~m [YX3iL;٩y<3WAo@Kgk-.(#bAhց1̮Gg|z ;-X1hiƧ]nx+tfl9q^-'cOVheX{v4z+P|`]Ghu\Av= @ BF )n{~ߧ\ dM1,U/8$4q<0$QNֱQ7';:yXNa(hn鬶۪"x9[Ei9 G \L ! 0NbX F4aDNpPʳœ9Lk(ˌ70_Ɂ]2eiu"Nh:* eC/pN:Ep|f2G' p8kĒDuGj%֚Uid)^@b"BTB` VʁaÜ<$DB}dAI3Ljm5ErZN6q108kk۱mU?ӼDPbPޛ\(XCFR7^֙K? *<^ 6 T,:" b֢UůIya"|*$p0*fi{uV:fQ~ b{ʙ'G/lI:V_P /B(H@X|9h_dQ d<+$+^o ֣vsg:+`(,i0VߠC\O{V.( "+,#۸+_a(CIVX^VAr|ϓy?m YfŠ0=[T8 M 67GoͲD& ugPǑ[J^CV O?\lvY4ߠLz4_Q1 yq;}=w]ߥ׉܀7ι)y/qݗDm:1U1b#mbAhcg2̺7b @HAwPC=TlE}i( rr$ft[^˭i){d1H<]<0ʇm*Ԛbuw6&&L.{~k@cIt8UOi샩ǃ&D7qPطq.Jh[wܱ+r=G;(ҹ?%i^΋q%{G DPclPu;SL40df# Vl4 ]Zaճ3M:+KWh" #wf=T$S5J<0}nd_dappBv4  dņ*a~56T BLlDbF!=!,cO+R˾yןN`>LUDD(䣑 oHQe&lUtu;*yyo>x<00nݯ>}%۹ 0TD&XD18M')lWRuR~-3CiV6K- f9l1iX[5Ǭ:LG,2ROz2}v?}~9S} RӻDV54+8zP*oجHR7jpm TKS'"D٭#f !ӿfFw'Ut}QX>{Drtcp؉KlS!Ll!.@`خOw'(v4 ^yo";+}W )eݽtbR &DȨ: @0g5;R@P;ßFXLp :]жHDًTxYw5\=aKKyR~1W.(EaCIlgb釚ݙOZǓ73T؛?wRdzVqKz^/^_{Ǫ*\Jt#/;LtB-*?Nr/QyGL%JZM-֕ϫ盚Yl;@'iSDҢI&$iZE ji61I/7{ݚc 1ǫ J&[KRa@JU6-_U<=rnesѻ`~/w=JL4Z4ULYO,pt<ўLXxr )Jbjӛ!@Is9 z&6ZY`,[v~qnCw/-g1/@@Rp.ƎW[9TM |H2 [FٷJs/y1e FmoJf`L 5Cg6JK[ƻf}^_wRMGZ'033YۄJ'jc:fLv8=ZX.㗶 b$ZW1Y @,@*v,/[5;oٓo@w!? 5_]瓆BԔ!nde̴De˝Gh:w^ٶoVL)P qcm,xlȓ^l>yx<* E[5)OI2!fiN XPpHUvhZhS ==;pҞ׭*cH3  Z5!*T ɇmg45S)*ŋy_6}v۞fρ-W ֛(TmZH.j9(dqWeV^7xRz /Ggv!2nQ\"q`%AsZL!4zMl#} Oo{ 4pM,1bΩbXu-I`WUBA9Rvk z~̀wt4:{XIrO,=8< \91n3R{#~Ue&rۙcuze*qwǻW'S/(Rb`=4Q] :'8H7N}nݚv.{wnz0t  x/), Aʪ5u$rG'ɒ㍢`.t#oDje晑9 V eAT+=60tMn.wϹ,XDh0PW)ιb8/d"&Y<3;3F@9puŭS j/> x94}&.S<>nv!FP1@$m):RHjܡ\*⮬flNKF2X,&Wx!iz{5+vr<ٷx,,|#3G.q\A3Ner25@6X"9r×9m{^8aE6N6k&Y-B`g.WF ` v S7u+.wUV5g]o foړk*%F[3 `]Npkbo (; }h`T+Mi)7dȴUQt3`fIT 8/{[sóVӐE6wL6fB8HYy6 MQdLP D@?^3?ӝ9N_+ugjnV׹&x4E$GN"яgSgvOӞ k#FCmqWq67V k㺲hOtdgOHlBpmrAZL4m+m!_{?|%ۻd@1iͳFɸ{JqX 03"C m UWS-qZi)X~1LmLV14f_ٚO]wΫ6jVL6 d;}f<:77&!uCTە>=V.)Jqb" 0F =JE+4k#}n4d$(WQO*!G(!Bd 7WNGN7'4|=h1|f$Z1_ &p<:ByiM 8ps3s. LaċGYu ņVKKe'Teiw>ŬfoHoz74I &8ݡVVqKc Ȅqcn,/D--ՑRU2(l(ousϒAgrj($bH +2RUTT@ " pS 77RABQ%K)JT֥8c=-wx1lޘ%!'u\3=6+ph:,QkTae&5 +o 0OtKi8w7SNGtDRalYW[+,fcDBWƗhwOrwh`_WGjk IxdeA@psM"עc6h1zUV }}/=GuPRa> W}+fpa&!UW #J@J̘$=gG|PA!D FC`k4'I6.i/*B3HTL6ѕThŨHP,N&!"DMD !p],~ͺYN1! LTs>GC;^}{_G8vW%|. ؼOʾ:~0`j.qZLyWPGjMt@ISP 2yo?V0 i3g6,W/}ƯA2K?*_SghÒHG&'6}7@/i{(救YHCzP!`uaJ1%v\G*cEVщJ=7V\F[XV'\G3cH*2PG* !my7Η5KJʥcW~FII9}v~h Re!39UZ A11ot=($u0?FLAA qx[ !aA#2;9'zy[a J -ttJ'^>٭uL{^n޷zqƹlyn6{d+d81.2ߙoy>&B(g#_M9 Uk(U`Ba ĄCILw6yuYNss.Pelk(b0Yΐ~&%ť Bt[2'I6HB ߳βS8g` BIT Tu;d殏By|cܒh,R-; o>mi$-Vd1ⓝ9 zJrm;R:ku4'I# Rd&Ы-WmLUNn>oN4Gc0[`jCLSGhJoL6 BHڹP ͠EVW͛yY-W`y2{ߏioC>Pk*$澝O!=z5g9 JDaS!fA2XZ@Qjr־z{vԡ\2ΗStt? 6FiE4Y1hMZ "!g=DT)D:}& /{8{=> 05Sg6sFnbK3ݷMxEW l[}2qBzښTck0(Gep.|3HrlM[7>gb/tUĀȎSyϛhWߍWȨ0Ma^d^CcKU8,m6Ͼ΋CDbPJHjyŚg3^ɞnyy:WZ|{oBh=PkNJaj@S#E8Wy/'[LG1MWO]7c_[#W׵-jev^h{sYX(!XH Q4q$$Z]yt GT[5Si)Ѳvd[qvd- ەM3,f5H fR_e/;{zEQ:3c!TW;; fff¤-;[fI vԳ!= sa(S=W;C]zퟌ0,BQx 5j-*l G0akEg+5e6*#Xe)ne:zg_7@oe5փβcB䦲h+ [0T$']T%UTE ` S, ᔱ ;Gi|zNIi$oU;L&uǹ,{{˶(qי1BNJt“5P0A58w*D! [ :䷔mƈuu{( 翡1C`e:)u0s*,A!;x< ?Yž׈ 7,*2xwإZƩ-@fY!ЙWpma| n\Տ7yX(iHY1VBihli0i΂Z_/'$G\|7=wR]3O ;r *f4$ ᳮ`m=mŇ(2 s@Q Q6 hNW}֪_ ˧@o ]Ő_: U~NVW - S3P`{7ٱa)e_GNxZ, J`I&z+ @Û&radxX"]7_z o~9\`)]ڂ@H8-yy*{́˂ jP=]~Gs(NE{o>%s>`ab u&n?,y v>j#WA-^74qhg|_^;=uٯ۾c0ax!μ G&6[ѻp5Jr= t1: (k7pzǨ#',oi8wZ_ڎX|oYG\!4K1ٸhPC?ˠ0m=۽X s뿋wpKd`|ՠ\l(n./sX{hWs|$s^'WVSz呷f[zL 2 nx=8连y0bQ*YwH]u(B#??6]ܗO'N5EǼ_#MTx_88_`89a.QuWΜr;, }`!(Wj+)o/)RTQxxQZs,ƥМا{s#QmRoh6n"*oG^Bк6tB:6 x*)4݈FbG1T)JRmol>Ou: =\Gnlr@-~( >0#zT{j!xz- zf&^Y6۸wێfN\&+*M8sbۋ7,hޑ9 E2Rg\cFád%6Su(֚q2,SдQXB)Dq #5e>k+qPvR8*ox>gSeh? 0DhA:bb QZj6 *X\[Nݼؽ|s\uDQ & 0;QF{X&!r/pv(+i7"W Z"8C/l2:o98BBB;\O rb @7IfxApRzuXQͨN!3R.M ! LL@!*]ctLNJt$7T)%MS*\RH(BEwUI&RAI&RAA!zvwG"qX''-QMbb,ֱe5GJ͘ UcBH֢ $ElEU+*Bj"<&PFX&V˶Bn$:H8҂ SB&!R)YJi3j*X &R((&-\a\I`*:I0bh0b m3%UWe$%ĩ*JT*RВUI/^%JWՔUWEUJ$~8)„$F1Xwavethresh/data/lennon.rda0000644000176200001440000011237414211622540015331 0ustar liggesusersBZh91AY&SY1zi6?0p` 0`  Ea st(  @(RT)*UTPIDRITJ+CZ@V$DD)B@B_y (UTDQR%RUU*QIU RA@ P *AE >BIRTm@@DL4HhDOIzPPi"@MAh)EB4 h2!F ԪRhCiz#& 4@d2 &QdT MJbhѡf(h4hi4h@FC@y@zOBP7s9|ۖq'-ɵnTxv yoy&'cxt+%(86f'L-"%ZLU5&hGn *0-Kt,HTx Z 4IePTME SyuQS52gq7 (OP"R;Fw44(@dkM5Aص,e8: j]˷m-k%fܽ -űʎ[ӃDdaEZ8` bK/T\Ğs\J̇ݾesF[j&1 悄id\lу-Z#kZ̄uN戌P5K{K{8wQA:&,DI#/HT NP058kovL ]<ǑsfaNk`O ihxHG{2j kV*wGYq#1ӽA#yUQv;!rs s&+;)B&D_a]ZĮbf؎u$;J'dp#/r;us<+J7#4rc2xl9}\P͙:pbL ^c5N"4nEW1@ڣ4PَdVV 5$Gj ؾ* 5Pa厶>҆9V E]G綞ZY$&a3s<:R9=W1Us5Žہvݩ(<% R 1C P r[L% MDs,O9V ;1Y"eqβg!ĽsZo[]ɤ7GcNIfcVTjXȏ,Ŭ Ѡ)L.Pa_KK8ajmn]w*n@H";+4ʌ5؜H޳4$XĐKU}SO=7;v?nS¯cWn۠ssF $Z&k'(nUY̴bQ)R V ?L,#mY5ml tq,S j saLX'YR-Y l!d7 f y[!LPYኼ9jdk*VQ x]|y29]ڜV#w!4s9Jx:ƺ9L7M)64қf[I\RuCy(\!lk2 ,ad;p|~j=h`ǧ404 Gw-v}`\nNx .4J4]*\+GZ!bښ9IbhFX%Y :B+bKkӢSooksĺZܭx49`A_ n%O}^A^mijyc/2톽OtQK.OՊ޾5M?jl]ݼ-j&^oJE>A~HC 5P#iٓM$hڿ"“]χu'sŗ~Åxߎ|j˾82ßUnAo0qdEöDH=3˭U+ֆ>g~;ny?:pۅ>׷ʱ#?M]iӾ)z㼵҇˿GHE ~DK/_>F1Á[p^rǚze{jܥ Le9aP}ś\ן~NY9㰽G2$OO]&r^/l7*Vt߿o:h^O:/For_>OE={*d w϶ac| q'7aW_ A[4 ן90Uj `~ B&aVZQr2H?G &cF?j>]ϗ=X}Z- E{'^-!/Z-M}xK/**~_(v"W?ޏ}&,o??/&gϟ5m\%C-e@yϏ~a>* ' J)t8҃gUډg2m6bCVHa;,Z|S>z XL( 029ʾj-ԙep[Ƕ{ES`Q1԰S,T<}do]-OsB{t!))xnfjh|>^ʯeS!K~o}m'xIeCS{a?F̲x4,p !tͅ`ǟSB$ԓq bRA;K<RL:}5ސbYZmLq+p8v8}gٜ,__516fdaLdrDIh^O5 ?yO~kCx8l>h|@.Kr!dꚸe EU7:z[ʶVQJ?ɯӟp!~c|zt㲆eӴ)k:Txe@^>J~̔@NUmn w6zYV %~ -z2KP-(Ļb+4߱]~5Nl}EL LyML?r3O9lE.E½ 1YXp_:?Ҍt~$ݲ G1^mN-c} a?;Ŕ\Bj 넢? j@PO&B,qqP~̓93_vVڨ{|~ڞS?=/ӪQxN~z;t N/ 5zgL)]:Pfi]όaڛY=o?~Ȱ4Su1Jb @X䍩m&"?cY}BEV~)ta?^k4yO_ ƺ6Y":k* /m B,=(S{}%w+2 C9UCI'= DzR Ch ̙(h/x?3zK *hb1V~PZm]iX5DZ~{ LEX׾cm(oIlV¸c-2kg=nٲC+ӌ֡}[bVRܤ~ hoK?ŧ+Jyc ϣxe]D-g ⷵ;J{YSȗj3n [-% ϖ}Oe9|RYSFa-r i[?J),*(;bpY6ߧb{᪃7WZLoѿ|"AGrt  K qTIQ s $R%*ħ۝h6 ҙbHY jA9M`{[h=S% O CD>LR;IS`ūs<؀$g<%u&Ďa5E qΆ9G~O@SnҍRKʧ>z[ZMΨ橄3<^tBCjtU^ݣCd  e^QV,+'HsUeG]RCiNN'1-댥a:V2nY#'QlZsTQPe/;Oq'9j(~6y,DdG/waC1v\& 'i5Ƨ4ciFO`kڦH#4{1e_t's()Њv|%8~&BY7F<0z_('tyJ݃UQ KPB(jeQ8C0NL^69YK˷@1`yJx:K4b!R'ezLWL=Vc 6)_F Q~^ơj*|)3;~>?_oed MSȩ̬u,Xl>^ f3ay]0Qo ]\䃮o,%YPĆ:*TUz}p8 e ()ٕ*z=TB~KC]Eu߮S)IΠV`Hu#H| ~LO n֘/fQ*D@"Ift*b)R g c l25ȇ5lO)Zo<z! =MN<Kgv 㳘uAS_C2*F>, {-`z6V k@W/ڹش>)&uNNY=d[Z P[,dw -PTVԢ8X/N(+J{ҐW-Ӂ\M`\:*yWtL2>异iTuLrZ!3FnÙ{M'c{O4 4†ÞY4s`g=`0ΖByro ;,g,h=~EۦB^|žD?ޖObȦB19jgIP&ڲCNbybqh Z@cixl:㦕/#dBfuUIe^#]0< ̣:Ջ38D_gm,f!_w$_}r>e͈Ч BP{9*[~A $p^fB)B]؟9ιɋ9 kf?>6>u. /F\+0l-YZxR`a%r"SLP}zZGm|DZ|F]Z{M(hZ{QfjR= fLAD;|ywhj$gxD*Qb5֚z|p-iÀaR0ZQXzoi~^^|k >]D Z skǩ֩D*~(]0巉IZ9>|x:i2@#T;ZN簦QrC-B85;.JSb~-"XbeAl&ϝ7oc Kݲ#?"}NHPwRBpyeMHG `0! g/),v,7*)F=mr|S z]w?~}%. !-ءTsdCgZOXc '$qOx&"ч:9r<l ֜aia%| 2OOGjT46$kb΁+_Y% /şPlv}{O] 8T[ zr+eD-'ͪZZo3)09@ğG5 m z^k—m!L֌@>(#RD^i&rf aNäeCf"-M!<, <&]zα“"[*eoϧ=dJ$٨Jc#ܱiCv2o'چ=]3@I\D(( (RTN\|፷>N;/TJ$DLkGb2ǭL$w40OpcpgTWHGI'0^]r6ˉpt8NCCXJP禿/:Z+䬋9%Vo1:q -C'(i)ZL-N:}hvnN]_\.q*Њe#>FsYFe* :}D_d0%h~Uot=K0̛bޖd^}@!nDb3T8R/}3l I҈Y&BHN"2#4tǩȐb##PA0V 2*u"u~ftߣbIIFC$}su?*NZǶs,DVȁ#*Y!ҎFM##1W&)UcaոY t$@1z@3f&?7' CуbK!ߜbEx͹+5AwqM>j9\+EOƷgؙez#_#+ҪUԧj7Z2`̓Azs"AQcAPrTD-f Oƿ ʿqv#?kk/B{̂`,SgqJ;Ӆ:Dh&ϝi ϽJ=69Sifa/`Y. FhCˉ@R2L/TRtcI E ؙJB t:l2dJ{۶NhH*%;J 8 %_SPG591c,ERrM+sLC!{fx3r} $2qR6IPd$352p ̍ޱld Aw%x.$S#M+2EC#'(R0>4aUZUm@I̻tצQx3ZB$g| %UV@9dZ0fiJ_{B$iF_p=lM a@ŪN~I g$;_L\.?A/]ڍw%HMT]knׄGsVi,T l6E'(I.hD* N>;<Я)Y{eee^e?$B1M&=jXW5;vm{~LdpZ׭/jңvqL򒊰PԷݝ(F`#<퐝F+S%;NX܃I(n_RQG^sw{\G>ֲrOYqH&@:hF^5j'eBrtW1.$s'2|+KTE ޮwDsK!UgRUiN@QuG1E M }(HΡD=l_e{g!^0@ I ؘ/ at$zVFb=Gŀ+ Am`¸ }X7iRW鍅N1 6+o QxfZ!$2@CΩ(b]~T(\ h!JdA6w"jK-2L@@~ܘ/G,mx&\TrBOs#/L!hF#輕F ADKcܞ6WHݠ^oG6τ럟 NJq!!  8``"2;/dZ!p|8u/ҟdp4݉0#.3fw ֏VnjIG%UފT%ɤx[]LrCG,*VSbY|w#:)n}1k+ `|UR("'7G5C1HR>vI1|=qq!_׫K3`Ho2$傺*KQ:Aǭ7ɒn8,TШe6kb<4S CHCWHHib >D]|-a{bJI+%\6{4Q^jwdGb0-ff^5!ƍ# g0uU=r<cy# Lf ʍ9NN+i5ȆB12ĕ(vRoڠgnSv+WkYuCp=Qh֥8/6AMG瞝Pl_ meSL(ź:R˳Wˠ ~VbՎէQ7gͻtyZ;:b2^u=E8:#) /Ka`0<(b(11 RyLe tIDV2*gV,ZH0ZM('$EF*{@S愜K\Џ۪ƨZ8~s6n #-a{ISqjf~tOfuGrL {III <1mtuRĦD5LhZED\﫟V!%t:euMhW]%@W*ҦeV r̩'@[T * J4PPW@( YhѫaVԦsdJHJժә "NH"ա,PMPcY~.@̮M#8mF3;2}0UUŊU-.8,hOYaZdMNsH>U+t3iu(ucE`}i]VRr2IoXTo#̻'+/XJuպ&6hUإ, C DE+Vs\^urs!0p@8糉絻@ e;;02 9! loR ̳p;L8jƱ׾g\Ds "$)7濋du:4 LA-7׾3l@G^OY# i,0cH4'f5k5=Bb M\&UBlk!NK3ߡz| HuSsqbe’!r&-Rh}lH/ ډ_;,# ˏTg)5•u!<_)TziCJ^ooreCm{t!VT3]88(r$s靽E<u\*{hD]La@kS#2*:U4JzF;>|HyΤԙS"W G\ !]:^tB#b]dKb/Wj*}ј„2ޯMCեgc  mg5 -Mk`6ȓ.CΠеeE "IZ9K,euJVoԝRu+4@bL0j Zu,CVG;T>k]j3Lj*b'^0\MNm8SQߘOwtϪ$)Wmx( ;N}aq|^l4Ҋ4S*dJGuOp1Mz6U. \sK1M`b.lS8a^R4d{G~AvW"K|?ZnX5}fw&%x |ݕWI5({WMU kS- i0t,º.֣Z 16!Iq 9uUhx09&!Zln,s-X&tQ,Ho;r2Ƽ RĎ'ZPFZ 7攸hI1/xC 3ad1i")lTSzE{1 ~Xh(E <JSősTF(bEtԣwή-3PWu5ܬ%_\Lb{;:sUb{|Qy|mmN\9_G=].cZS_:_#f0 QTB(Kl ݹ,x\t0;X"j֙c|5 |!XъxPcs7р۵HC7,j6﬙ֳ_9>yhj3-Ÿ1qM89.QUW+ j x0(8<3D̐8pCd=cAS})˽lJ1cݰk\ ?žSjGI]a@w_g,p#IK.hgnJM%Dv(*,);' `nt -4XNa@sӛ;Mf u|  1ՙ2y5ۏ*>Q7{ȲRX w>B"%__-B"&َQh>Q%?uF~4i(~'}5v 필jlj%@CijxplN(*b8"R ?j1@{:9 EJ(~f+* ˽zoգL>{⥨L4c"\HBs d4I97ogg<h|<[0Y!Gy.hcpO9 mf8фw&lg-R\įO{Xϕʆa6ZJ" 9b?ۅK?rԉ`D&Z0(T҃@8k>gjMA8OH.8 @ؿވ|#COJpwmoyC5Й0rL",L}jX05^ Q$ƅ"քftBG\RO_뺸*.;#l*$H_nX\9ri0$Y +4*`2\O">w3hμ}:^N(7~6f.(pR0@HI@w 'J RUU> gou9;4fF-Nb"DM(-^nxFK2a? ES B 2oΎٺZU"`t^!A-%KXL)<22ʦ&I]:J#DbhYs;8,1qtBD*GfPC,DsŮnf_×oOF5d Wq' ;j3DH%p fJO#OZzܘ !T\JG#4FP AELRfKX$TV'фH(&Ȗ%@p_nTD& P~9Yo A:JG㽍A< y$ A# ¥r믇)Trrzm:h\FTAWfT? l6+|3yI߼lA_'g.{\8af&E%ÖC%PP j\y70n%?CQㄭRe7#ro_E|c z&_- $1& 2ܙTH*)fn9BCƢǐi͖! eݬ[J;;.t'zO{[4RWfڳ99. l@9o{-I^,=C W2n xW\qqet.(\jD#PI,{g\k| :i/!Ha hYY7gh[1luhlT`b3m:xrNMZg#Ǖqk~Vs]2YgbRQ.ypHwF"` NXq@pu7 4u-g.V%%bRS 7Y'f6u6XjImaJu`"w9ľHM8ym3Z`+tN#f÷c)mrj*Ζ ~ oI[0M6/ɟ!>\gH1?yqWbQP8AwaoHq!1՚Xf[fօvăZʦ$\` eR3)$"V ax S\-dž[ gsk-ILQ)2q8R ô0hu,˝BcÓ2z:c5W00#8ڄ L@cɫ!\e$IG5J0&S6/󑨾7I-"sL2"y]N1n JH@#Ch&M`~SN F$`H,Ę.1R*dxR&@ A bĈ"+s^nt}Zf:f׹DwsƔ|nJ\]+swWvcgqp8w,fj^_;;vě<|%kh$0IP&00 MLGraQ *hOI(oTh  -P91eR4_Я Tݜn9FN^@nT0h)[ *xٖaTRM 'R:"VHγ +W蘨*u-2PbKۙKr>սGs4`+ CTzzqXMʰ!4n6 (%1ZTLǞc+&zܩA!Lӑ3֬'F6 7@tS&^ȤSrb~΃ۘ [;_ætw"=@<{s>#g&084+o"*ڝFHC#G ײU?YbGCBz͸U l2քjғɧMos}Wn? 6bᣍQy31DVVzhXho;-8=dĭ\^/gǨ>@Gt0*9!'U܉Skp{슓GʚGޚwHMޕ@F`UoxOw$0LmiK3cIlX: (b.,uLbmYѿ^ل9f mR<uڜT 5vu5tj&^u*IeknA½ݸ'Tj m%Pr1 |U_UG^wڪRlL\Tg,0xr3; @ 2"`,a&Mdp'#%+4 L]@+Rk*P؛t{)s ûޜ7r 9JJ[qcK㜑a~W}/vYHpL J;|wЋ :E8񊭸6:'36fAkGl^s%4gIFԕD$fGk:$EFt (NPN3<7m^LxP !ĕ.!JrՎ\v0S$ndi¯Ƒ5ȷy>iI_ݗ%i\#+G)"ʍqT~PɴwX̕gd%+qa\  Ê?eǴzɟ~j&k:`h CWvhNƛ$IgCͯW浜'KfR Y9New*] XJjq?ZZ~U3uS<|ˌd%Zy r" :MY@1#R ",0񋎤GAf;(`_.5Fbs7TDuQ>a&jg\10fB%'`"߽nsY{O-ÄItu%ȑ`a*pb!!gP0eEDܝT&PA2cgRُc(?BAB~^I5ĨmFK|^rNs䳷 GC1gވ W „ }$+BI*¦7B,`@EHg-!q2 9˓%՚YUĐ&S}߫J62! #Π07sjWB^Tk.D t{ևYaZ* %zKP. ;ZR2A@(8UVkYɏ+oZhlߑwam' 1Cz,c$A2smBi_`!GCqPuP {H`tcˁ⛼p^\SE%nnc{k֛3]KkFHB.ڣ3PS*LUTm@UXl@ΔUGi{5a7:P;7]r:eb#)"L A7d?g;?:c|=xu"̱ZC6L@)M#ñmEx7&8]G}b暏3,+0"L^YZll3U%7!MKT4VgŮ~'.7)ֲr$HB*Iڶ zbg *sWW0گw/ZLݡJPY+6c6C4j#CJX d5iIfpXi*?1xZj?е8E͕BX]iTfpޝn1\\uF&ԫ0.Jh: ^ ؊p0IdI''0WU{9;.j\Z=},Ԉ@!yaGl%Z2k|"DdfYfnu2NMydȧPꩁ݉=Ȱp4|̑Xqւ]Pbo'HUl|:8X^kEt7JD7,!*`-,1df9vm@`6D(y2hk4q؆Ʌ5~ZRi1Zwu[$zx}=*UnZF+%BeCQ",G|@cu{Ut8 /׳ב5Nk.;;ZUz''^ j yG,U^Zs*ݐT< ?$F;onќ]r% ܨQtI 7 <@Y8.L P%Jj.X:֣Y)dAlb BDJJU+#Un1Z[tҫ`$2!Z}e0R! HJe7sOH?3PʬJ۟ 3v昭/"Glg VC2y|WKc͝fh^{*izT];XiLwf s:rgZwF2LD'LYl[95%#R9c搋8rXzL5@W=rb sL9+_aM5"i{U7'k+ zcKZ'⧰koK_ڞӀɹ<=K U0gYlj v;;g+:}A96ӊn{Lf+A6+Sdj ofDB wvgKy~zqR'A- RB&+xe!r1(됼'o J +Fq &h֦mt^SCU>f0}?zT*|68K>9qL LvVHI Zr\{f'd =}5 U7kQ p?3]>nLU4|_!6ismv7vThb_&k*LzOɍPz1|ez8F&h!̬%ckPlf=Wx~z&xE E>ʻDmxK-@\Z}se+1ꜷ2]wޟX)ƤR@s_AXg3vyjw}AD#)Pe.+w0Øllֲ:Ii8$"xE+]B0ň1S{s&W#al.TJ D5ڰTd ?[b}ikv8RP6x얂lF[: BƐh}Bs7u 𬨲 <\KGbq4]T{9GuLեu"ăt9o qً֝"uy3/ $R9-)p *RqpDN1ANוUW#Y7kF^/0&> #3έt7Ua:rj,pc LVy}UKT}]WSĥE'\ʻ*XD,Tp?QУ=\٣A>5Q{_SxpA[^E),g{g>7= =}F%f8{ɆE IA f2W70IZ0V[z_ثjɌ~t\S =1,Y죾-pX-FDz0p|SbΑ` bTwԚ'N봯KGH^V}/8TC#\PAk*nkȏp |8iKEb]UQQNQ vqrTc5[du)ٟid@&`=Py,BfV6$ >_Y{H֩rM!=u 3ط(:#L ũ}LW? f:SrQ;WL[9?eS6 AyP"0uu $n NT r{N M+ !͏كPpFi0@ed &]МĝAדiѭ~&&os"*Sk[zQ>ij5^ȏ AxCmSʴՊ'B[$T;nxjSĀ_b]4awS:WBˏguqEI~*G3lomz{9XNa(y>DR!Vrxy:׫z$rLa<$Pa7Ehtc) :PկV•p")) d\uY\aB &)L E&:0RBUFLF`IWTz2/d0S[\]ry(a[lFa2!~뻉XI潅d3ɻzwWN4c4y;J-1bf fr\[%Zj'xΤ;}wO3{Ifsțhø lXCdKMw_x"'smݵ@%P𡱁J^^s8#20q ĨRHÉv"`Ƀ,V̀05Z2RatRYuVTlW֡!6 1 !(q;*QH6̞jJM<#ft94O a kP8X58NKācdh!\Ig~ٿSg)c>/Fvc^4x;01D!>9aSkM>]Xr9,y.%T<B{J EwJV*Z-2dPLʄJs!  E-/sXih' j?n~yYY++P,k<._Y7u>榁#` Tc/OK8$~H ;5np:OOIB<\8qtcnPfHB"@20H QY>=m =Ysye ;V LlbbDwo/{aRQA@K Ab+{tC|0p'0Wn7~~=Cwv ۶&BF-z]%A_JMRWe.0PgH6]ۻxNg4fsg!Vm M)سF"aݖ[z D۽aԧ vv8՞rGRuBE٤-rn0).8' } e Lc Y)iB,IBq&t yO/cFYf=bAL5~4׷[bwm KcS |Y0xۄ|he)܁IV[UR+* ;6_ x|Q3E<IKְͿ6+- 4a0ZشlBxN^CI{Yg\`aYH .-&LL3Yb$Hj4 )b8XaAXv`9[ӛ 25Ʃ%%ISnٕ4 +SG]XIpZY(tۉ%zm8N$8}r۫˖z")e.SE%I?O;.Iȵj(gu~>ϡ EK"~ҠfkeTpje*P46(vY2Y/wH؉5b٫R\d]R.8 JJRE`%gmXʵRvʯ*YLaYdXrv1[-MrCu%"1\Z1rsd':YmUFy/!LDs!*@e}2hؼX_syCfMi C/g:Osq!dCp3Q,Adi${ni SeD#Mp2 jRFlM%+h0d&,pBpi'&ażͣ6 pQ1XR0dX&g  5ua͵]tэ,9+Nlӄ8dN#&lL G S>96 dP"8gˋ @6ց $V2P5 BL+V  ib* G" AŃ`(u04b>/p`p yOw5`"r] u;c,!1t YHW/+cƖ)~hV%9ub\\6¢DƖw^<82w%S/M| YE]0.cE"bXF"E8e2֎D$(@WWܵ 4QՉ6= "9B !AFGMdevMٻ&C-pc~{n$VJZUX):8ݖk/:\̒0wc1ݸ Yu$B|Ggc_>{>z#q˛t@DE B]Z413؈me2n)v:Dqa] @H 5H`VFSkTrsλ5,V4Ğ[XHͳ,$NhE|/7w]м4;Ҋ.sKe@ٶXH& h*!IZ4&iP{O5t dc#L9+^IJrB6b Uرۖn\ֆQ\+Ѷ5mlbVڧuW2vlmU,j,QIcF4o;6R̚$!MA$$Q"HPi6ڍImAEFR/*6,@hb( 4&ѥ66(Ɗ(1c3cLI)4(B2hCA ynTQFf%))%Q^#Z)1PJ,V5I,m&0 chɢ 1$sk` 4fI(1j^Δ)[jUvӆI]Uʷ7ƱZ&ڤeDB]1lmU_~I_]N3hؤOOJ0l ^ऻb/ґ6MϠOIr;gy^2ml+N0.Ym]H]lȏ/43V [[:IH]e-Gf|^8J\:ExV񪃨Wr5EB4Q+-ki*ĥD\bϨ@ Jڱ3j;5ʢ6m_U8ZlGUJ6颗4EϳXm+8rWDd"\#"(H}4wavethresh/man/0000755000176200001440000000000014332764500013211 5ustar liggesuserswavethresh/man/levarr.rd0000644000176200001440000000140714211622540015026 0ustar liggesusers\name{levarr} \alias{levarr} \title{Subsidiary routine that generates a particular permutation} \usage{ levarr(v, levstodo) } \arguments{ \item{v}{the vector to permute} \item{levstodo}{the number of levels associated with the current level in the object you wish to permute} } \description{ Not intended for casual user use. This function is used to provide the partition to reorder \code{\link{wst.object}} into \code{\link{wd.object}} (nondecimated time ordered) objects. } \details{ Description says all } \value{ A permutation of the \code{v} vector according to the number of levels that need handling } \seealso{\code{\link{getarrvec}}, \code{\link{convert.wd}}, \code{\link{convert.wst}}} \examples{ levarr(1:4, 3) # [1] 1 3 2 4 } \author{G P Nason} \keyword{manip} wavethresh/man/getpacket.wst.rd0000644000176200001440000000661214211622540016321 0ustar liggesusers\name{getpacket.wst} \alias{getpacket.wst} \title{Get packet of coefficients from a packet ordered non-decimated wavelet object (wst).} \description{ This function extracts and returns a packet of coefficients from a packet-ordered non-decimated wavelet object (\code{\link{wst}}) object. The \code{\link{wst}} objects are computed by the \code{\link{wst}} function amongst others. } \usage{ \method{getpacket}{wst}(wst, level, index, type="D", aspect, \dots) } \arguments{ \item{wst}{Packet-ordered non-decimated wavelet object from which you wish to extract the packet from.} \item{level}{The resolution level of the coefficients that you wish to extract.} \item{index}{The index number within the resolution level of the packet of coefficients that you wish to extract.} \item{type}{This argument must be either "\code{C}" or "\code{D}". If the argument is "\code{C}" then non-decimated father wavelet coefficients corresponding to the packet that you want are returned. If the argument is "\code{D}" then non-decimated mother wavelet coefficients are returned. } \item{aspect}{Function applied to the coefficients before return. This is suppled as a character string which gets converted to a function to apply. For example, "Mod" for complex-valued coefficients returns the absolute values.} \item{\dots}{Other arguments} } \details{ The \code{\link{wst}} function produces a packet-ordered non-decimated wavelet object: \code{\link{wst}}. The coefficients in this structure can be organised into a binary tree with each node in the tree containing a packet of coefficients. Each packet is obtained by repeated application of the usual DG quadrature mirror filter with both even and odd dyadic decimation. See the detailed description given in Nason and Silverman, 1995. This function enables whole packets of coefficients to be extracted at any resolution level. The index argument chooses a particular packet within each level and thus ranges from 0 to \eqn{2^{J-j}} for j=0,..., J-1. Each packet corresponds to the wavelet coefficients with respect to different origins. Note that both mother and father wavelet coefficient at different shifts are available by using the type argument. } \value{ A vector containing the packet of packet-ordered non-decimated wavelet coefficients that you wished to extract. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{wst}}, \code{\link{wst.object}}, } \examples{ # # Take the packet-ordered non-decimated transform of some random data # MyWST <- wst(rnorm(1:512)) # # The above data set was 2^9 in length. Therefore there are # coefficients at resolution levels 0, 1, 2, ..., and 8. # # The high resolution coefficients are at level 8. # There should be 256 coefficients at level 8 in index location 0 and 1. # length(getpacket(MyWST, level=8, index=0)) #[1] 256 length(getpacket(MyWST, level=8, index=1)) #[1] 256 # # There are also 256 FATHER wavelet coefficients at each of these two indices # (origins) # length(getpacket(MyWST, level=8, index=0, type="C")) #[1] 256 length(getpacket(MyWST, level=8, index=1, type="C")) #[1] 256 # # There should be 4 coefficients at resolution level 2 # getpacket(MyWST, level=2, index=0) #[1] -0.92103095 0.70125471 0.07361174 -0.43467375 # # Here are the equivalent father wavelet coefficients # getpacket(MyWST, level=2, index=0, type="C") #[1] -1.8233506 -0.2550734 1.9613138 1.2391913 } \keyword{manip} \author{G P Nason} wavethresh/man/l2norm.rd0000644000176200001440000000175514211622540014752 0ustar liggesusers\name{l2norm} \alias{l2norm} \title{Compute L2 distance between two vectors of numbers. } \description{ Compute L2 distance between two vectors of numbers (square root of sum of squares of differences between two vectors). } \usage{ l2norm(u,v) } \arguments{ \item{u}{first vector of numbers} \item{v}{second vector of numbers} } \details{ Function simply computes the L2 distance between two vectors and is implemented as \code{sqrt(sum((u-v)^2))} } \value{ A real number which is the L2 distance between two vectors. } \note{This function would probably be more accurate if it used the Splus function \code{vecnorm}.} \section{RELEASE}{Version 3.6 Copyright Guy Nason 1995} \seealso{ \code{\link{linfnorm}}, \code{\link{wstCV}}, \code{\link{wstCVl}}. } \examples{ # # What is the L2 norm between the following sets of vectors # p <- c(1,2,3,4,5) q <- c(1,2,3,4,5) r <- c(2,3,4,5,6) l2norm(p,q) # [1] 0 l2norm(q,r) # [1] 2.236068 l2norm(r,p) # [1] 2.236068 } \keyword{algebra} \author{G P Nason} wavethresh/man/convert.wd.rd0000644000176200001440000000623314211622540015626 0ustar liggesusers\name{convert.wd} \alias{convert.wd} \title{Convert a non-decimated wd object into a wst object. } \description{ Convert a time-ordered non-decimated wavelet transform object into a packet-ordered non-decimated wavelet transform object.} \usage{ \method{convert}{wd}(wd, \dots) } \arguments{ \item{wd}{The \code{\link{wd}} class object that you wish to convert.} \item{\dots}{any other arguments} } \details{ In WaveThresh3 a non-decimated wavelet transform can be ordered in two different ways: as a time-ordered or packet-ordered representation. The coefficients in the two objects are \emph{exactly the same} it is just their internal representation and ordering which is different. The two different representations are useful in different situations. The packet-ordering is useful for curve estimation applications and the time-ordering is useful for time series applications. See Nason, Sapatinas and Sawczenko, 1998 for further details on ordering and weaving. Note that the input object must be of the non-decimated type. In other words the type component of the input object must BE "\code{station}". Once the input object has been converted the output can be used with any of the functions suitable for the \code{\link{wst.object}}. The \code{\link{getarrvec}} function actually computes the permutation to weave coefficients from one ordering to another. } \value{ An object of class \code{\link{wst}} containing exactly the same information as the input object but ordered differently as a packet-ordered object. } \section{RELEASE}{Version 3.6 Copyright Guy Nason 1997 } \seealso{ \code{\link{convert}}, \code{\link{getarrvec}}, \code{\link{levarr}}, \code{\link{wd}}, \code{\link{wd.object}}, \code{\link{wst}}, \code{\link{wst.object}}. } \examples{ # # Generate a sequence of 32 random normals (say) and take their # \code{time-ordered non-decimated wavelet transform} # myrand <- wd(rnorm(32), type="station") # # Print out the result (to verify the class and type of the object) # #myrand #Class 'wd' : Discrete Wavelet Transform Object: # ~~ : List with 8 components with names # C D nlevelsWT fl.dbase filter type bc date # #$ C and $ D are LONG coefficient vectors ! # #Created on : Tue Sep 29 12:17:53 1998 #Type of decomposition: station # #summary(.): #---------- #Levels: 5 #Length of original: 32 #Filter was: Daub cmpct on least asymm N=10 #Boundary handling: periodic #Transform type: station #Date: Tue Sep 29 12:17:53 1998 # # Yep, the myrand object is of class: \code{\link{wd.object}}. # # Now let's convert it to class \code{\link{wst}}. The object # gets returned and, as usual in S, is printed. # convert(myrand) #Class 'wst' : Stationary Wavelet Transform Object: # ~~~ : List with 5 components with names # wp Carray nlevelsWT filter date # #$wp and $Carray are the coefficient matrices # #Created on : Tue Sep 29 12:17:53 1998 # #summary(.): #---------- #Levels: 5 #Length of original: 32 #Filter was: Daub cmpct on least asymm N=10 #Date: Tue Sep 29 12:17:53 1998 # # Yes. The returned object is of class \code{\link{wst.object}}. # I.e. it has been converted successfully. } \author{G P Nason} \keyword{manip} wavethresh/man/wr.int.rd0000644000176200001440000000272714211622634014766 0ustar liggesusers\name{wr.int} \alias{wr.int} \title{Computes inverse "wavelets on the interval" transform. } \description{ This function actually computes the inverse of the "wavelets on the interval" transform. } \usage{ \method{wr}{int}(wav.int.object, \dots) } \arguments{ \item{wav.int.object}{A list with components defined by the return from the \code{\link{wd.int}} function.} \item{\dots}{any other arguments} } \details{ (The WaveThresh implementation of the ``wavelets on the interval transform'' was coded by Piotr Fryzlewicz, Department of Mathematics, Wroclaw University of Technology, Poland; this code was largely based on code written by Markus Monnerjahn, RHRK, Universitat Kaiserslautern; integration into WaveThresh by \code{GPN}). See the help on the "wavelets on the interval code" in the wd help page. } \value{ The inverse wavelet transform of the wav.int.object supplied. } \note{ It is not recommended that the casual user call this function. The "wavelets on the interval" transform is best called in \code{WaveThresh} via the \code{\link{wd}} function with the argument bc argument set to "\code{interval}". } \section{RELEASE}{Version 3.9.6 (Although Copyright Piotr Fryzlewicz and Markus Monnerjahn 1995-9).} \seealso{ \code{\link{wd.int}}, \code{\link{wd}}, \code{\link{wr}}. } \examples{ # # The user is expected to call the wr # for inverting a "wavelets on the interval transform". # } \keyword{smooth} \keyword{nonlinear} \author{Piotr Fryzlewicz and Markus Monnerjahn} wavethresh/man/griddata.rd0000644000176200001440000000351514211622540015314 0ustar liggesusers\name{griddata objects} \alias{griddata objects} \title{Data interpolated to a grid objects.} \description{ These are objects of classes \code{griddata} These objects store the results of interpolating a 1-D regression data set to a grid which is a power of two in length } \details{ The help page for \code{\link{makegrid}} and Kovac, (1997), p.81 give further details about how a \code{griddata} object is constructed. } \value{ The following components must be included in a legitimate griddata object. \item{gridt}{a vector containing the values of the grid on the "x" axis.} \item{gridy}{a vector containing the values of the grid on the "y" axis. This vector has to be the same length as gridt. Typically the values in (\code{gridt, gridy}) are the results of interpolating arbitrary data (\code{x,y}) onto (\code{gridt, gridy}).} \code{G}{Codes the value of the linear interpolant matrix for the corresponding entry in \code{gridt}. The value at each point corresponds to the proportion of the original data point pointed to by \code{Gindex} that contributes to the new value at the corresponding \code{gridt} value. See Kovac, (1997), page 81 for further information.} \item{Gindex}{Each entry in \code{Gindex} refers to one of the pairs in (\code{x,y}) which is contributing to the (\code{gridt, gridy}) interpolant. See previous help for \code{G}.} } \section{GENERATION}{This class of objects is returned from the \code{\link{makegrid}} function to represent the results of interpolating a 1-D regression data set to a grid.} \section{METHODS}{The \code{griddata} class of objects really on has one function that uses it: \code{\link{irregwd}}.} \section{RELEASE}{ Version 3.9.6 Copyright Arne Kovac 1997 Copyright Guy Nason (help pages) 1999. } \section{SEE ALSO}{ \code{\link{makegrid}}, \code{\link{irregwd}} } \keyword{manip} \author{Arne Kovac} wavethresh/man/wr.wd.rd0000644000176200001440000000656514211622634014612 0ustar liggesusers\name{wr.wd} \alias{wr.wd} \title{Wavelet reconstruction for wd class objects (inverse discrete wavelet transform). } \description{ This function performs the reconstruction stage of Mallat's pyramid algorithm (Mallat 1989), i.e. the discrete inverse wavelet transform. The actual transform is performed by some C code, this is dynamically linked into S (if your machine can do this). } \usage{ \method{wr}{wd}(wd, start.level = 0, verbose = FALSE, bc = wd$bc, return.object = FALSE, filter.number = wd$filter$filter.number, family = wd$filter$family, \dots) } \arguments{ \item{wd}{A wavelet decomposition object as returned by \code{\link{wd}}, and described in the help for that function and the help for \code{\link{wd.object}}.} \item{start.level}{The level you wish to start reconstruction at. The is usually the first (level 0). This argument is ignored for a wd object computed using the ``wavelets on the interval'' transform (i.e. using the \code{bc="interval"} option of \code{\link{wd}}.} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} \item{bc}{The boundary conditions used. Usually these are determined by those used to create the supplied wd object, but you sometimes change them with possibly silly results.} \item{filter.number}{The filter number of the wavelet used to do the reconstruction. Again, as for bc, you should probably leave this argument alone. Ignored if the bvc component of the \code{\link{wd}} object is "\code{interval}".} \item{family}{The type of wavelet used to do the reconstruction. You can change this argument from the default but it is probably NOT wise. Ignored if the bvc component of the \code{\link{wd}} object is "\code{interval}".} \item{return.object}{If this is F then the top level of the reconstruction is returned (this is the reconstructed function at the highest resolution). Otherwise if it is T the whole wd reconstructed object is returned. Ignored if the \code{bvc} component of the \code{\link{wd}} object is "\code{interval}".} \item{\dots}{any other arguments} } \details{ The code implements Mallat's inverse pyramid algorithm. In the reconstruction the quadrature mirror filters G and H are supplied with c0 and d0, d1, ... d(m- 1) (the wavelet coefficients) and rebuild c1,..., cm. If the \code{bc} component of the \code{\link{wd}} object is "\code{interval}" then the \code{wr.int} function which implements the inverse ``wavelet on the interval'' transform due to Cohen, Daubechies and Vial, 1993 is used instead. } \value{ Either a vector containing the top level reconstruction or an object of class wd containing the results of the reconstruction, details to be found in help for \code{\link{wd.object}}. } \section{RELEASE}{Version 3 Copyright Guy Nason 1994 Integration of ``wavelets on the interval'' code by Piotr Fryzlewicz and Markus Monnerjahn was at Version 3.9.6, 1999. } \seealso{ \code{\link{wd}}, \code{\link{wr.int}}, \code{\link{accessC}}, \code{\link{accessD}}, \code{\link{filter.select}}, \code{\link{plot.wd}}, \code{\link{threshold}} } \examples{ # # Take the wd object generated in the examples to wd (called wds) # # Invert this wd object # #yans <- wr(wds) # # Compare it to the original, called y # #sum((yans-y)^2) #[1] 9.805676e-017 # # A very small number # } \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/putpacket.rd0000644000176200001440000000237114211622634015540 0ustar liggesusers\name{putpacket} \alias{putpacket} \title{Insert a packet of coefficients into a wavelet object. } \description{ This generic function inserts packets of coefficients into various types of wavelet objects. This function is generic. Particular methods exist. For objects of class: \describe{ \item{wp}{use the \code{\link{putpacket.wp}} method.} \item{wst}{use the \code{\link{putpacket.wst}} method.} \item{wst2D}{use the \code{\link{putpacket.wst2D}} method.} } See individual method help pages for operation and examples. Use the \code{\link{putC}} and \code{\link{putD}} function to insert whole resolution levels of coefficients simultaneously. } \usage{ putpacket(\dots) } \arguments{ \item{\dots}{See individual help pages for details.} } \value{ A wavelet object of the same class as \code{x} the input object. The returned wavelet object is the same as the input except that the appropriate packet of coefficients supplied is replaced. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{putpacket.wp}}, \code{\link{putpacket.wst}}, \code{\link{putpacket.wst2D}}, \code{\link{putD}}, \code{\link{putC}}, \code{\link{wp.object}}, \code{\link{wst.object}}, \code{\link{wst2D.object}}. } \keyword{manip} \author{G P Nason} wavethresh/man/plot.wp.rd0000644000176200001440000001401214211622540015132 0ustar liggesusers\name{plot.wp} \alias{plot.wp} \title{Plot wavelet packet transform coefficients} \usage{ \method{plot}{wp}(x, nvwp = NULL, main = "Wavelet Packet Decomposition", sub, first.level = 5, scaling = "compensated", dotted.turn.on = 5, color.force = FALSE, WaveletColor = 2, NodeVecColor = 3, fast = FALSE, SmoothedLines = TRUE, ...) } \arguments{ \item{x}{The wp object whose coefficients you wish to plot.} \item{nvwp}{An optional associated wavelet packet node vector class object of class \code{nvwp}. This object is a list of packets in the wavelet packet table. If this argument is specified then it is possible to highlight the packets in the nvwp objects in a different color using the \code{NodeVecColor} argument} \item{main}{The main title of the plot.} \item{sub}{A subtitle for the plot.} \item{first.level}{The first resolution level to begin plotting at. This argument can be quite useful when you want to supress some of the coarser levels in the diagram.} \item{scaling}{How you want the coefficients to be scaled. The options are: \code{global} - one scale factor is chosen for the whole plot. The scale factor depends on the coefficient to be included on the plot that has the largest absolute value. The global option is useful when comparing coefficients that might appear anywhere in the plot; \code{by.level} - a scale factor is chosen for each resolution level in the plot. The scale factor for a level depends on the coefficient in that level that has the largest absolute value. The \code{by.level} option is useful when you wish to compare coefficients within a resolution level. The other option is \code{compensated} which is the same as global except for that finer scales' coefficients are scaled up by a factor of SQRT(2) I don't know why compensated is the default option? That is probably silly!} \item{dotted.turn.on}{The plot usually includes some dotted vertical bars that separate wavelet packets to make it clearer which packets are which. This option controls the coarsest resolution level at which dotted lines appear. All levels equal to and finer than this level will receive the vertical dotted lines.} \item{color.force}{If FALSE then some "clever" code in CanUseMoreThanOneColor tries to figure out how many colours can be used (THIS HAS NOT BEEN MADE TO WORK IN R) and hence whether colour can be used to pick out wavelet packets or elements of a node vector. This option was designed to work with S. It doesn't work with R and so it is probably best to set \code{color.force=T}. In this way no interrogation is done and the lines/packets are plotted in the appropriate colours with no questions asked.} \item{WaveletColor}{A colour specification for the colour for wavelet coefficients. Wavelet coefficients are a component of wavelet packet coefficients and this option allows them to be drawn in a different color. In R you can use names like "red", "blue" to select the colors. In R you'll also need to set the color.force option to TRUE.} \item{NodeVecColor}{If a nvwp object is supplied this option can force coefficients that are part of that nvwp to be drawn in the specified color. See the explanation for the \code{WaveletColor} option above about specification in R.} \item{fast}{This option no longer does anything.} \item{SmoothedLines}{If TRUE then the scaling function coefficients are drawn using lines (and look like mini versions of the original). If FALSE then the scaling function coefficients are drawn using the \code{segments} function and look like a coarser shadowy version of the original.} \item{\dots}{Other arguments to the plot command} } \description{ This function plots wavelet packet transform coefficients arising from a \code{\link{wp.object}} object. } \details{ A wavelet packet object contains wavelet packet coefficients of a signal (usually obtained by the \code{\link{wp}} wavelet packet transform function). Given a wavelet packet object wp it possesses \code{nlevelsWT(wp)} resolution levels. In WaveThresh the coarsest level is level 0 and the finest is level nlevelsWT-1. For wavelet packets the number of packets at level j is 2^(nlevelsWT-j). This function plots the wavelet packet coefficients. At the bottom of the plot the original input function (if present) is plotted. Then levels above the original plot successively coarser wavelet packet coefficients. From the Mallat transform point of view smoothing goes up off the the left of the picture and detail to the right. The packets are indexed from 0 to the number of packets going from left to right within each resolution level. The function has the ability to draw wavelet coefficients in a different color using the \code{WaveletColor} argument. Optionally, if a node vector wavelet packet object is also supplied, which contains the specification of a basis selected from the packet table, then packets in that node vector can be highlighted in a another colour determined by the \code{NodeVecColor}. Packets are drawn on the plot and can be separated by vertical dotted lines. The resolution levels at which this happens can be controlled by the \code{dotted.turn.on} option. The coarsest resolution level to be drawn is controlled by the \code{first.level} option. } \value{ Nothing } \seealso{\code{\link{MaNoVe}}, \code{\link{wp}}, \code{\link{wp.object}}} \examples{ # # Generate some test data # v <- DJ.EX()$blocks # # Let's plot these to see what they look like # \dontrun{plot(v, type="l")} # # Do a wavelet packet transform # vwp <- wp(v) # # And create a node vector # vnv <- MaNoVe(vwp) # # Now plot the wavelet packets with the associated node vector # \dontrun{plot(vwp, vnv, color.force=T, WaveletColor="red", dotted.turn.on=7)} # # The wavelet coefficients are plotted in red. Packets from the node vector # are depicted in green. The node vector gets plotted after the wavelet # coefficients so the green packets overlay the red (retry the plot command # but without the vnv object to see just the # wavelet coefficients). The vertical dotted lines start at resolution # level 7. # # } \author{G P Nason} \keyword{hplot} wavethresh/man/DJ.EX.rd0000644000176200001440000000445114211622540014345 0ustar liggesusers\name{DJ.EX} \alias{DJ.EX} \title{Produce Donoho and Johnstone test functions} \usage{ DJ.EX(n=1024, signal=7, rsnr=7, noisy=FALSE, plotfn=FALSE) } \arguments{ \item{n}{Number of samples of the function required.} \item{signal}{A factor that multiples the function values.} \item{rsnr}{If Gaussian noise is to be added to the functions then this argument specifies the root signal to noise ratio.} \item{noisy}{If TRUE then Gaussian noise is added to the signal so that the root signal to noise ratio is \code{rsnr}. If FALSE then just the signals are returned.} \item{plotfn}{If TRUE then a plot is produced. If FALSE no plot is produced.} } \description{ Function to produce the blocks, bumps, Doppler and heavisine functions described by Donoho and Johnstone (1994). } \details{ The Donoho and Johnstone test functions were designed to reproduce various features to be found in real world signals such as jump discontinuities (blocks), spikes (the NMR-like bumps), varying frequency behaviour (Doppler) and jumps/spikes in smooth signals (heavisine). These functions are most often used for testing wavelet shrinkage methods and comparing them to other nonparametric regression techniques. (Donoho, D.L. and Johnstone, I.M. (1994), Ideal spatial adaptation by wavelet shrinkage. \emph{Biometrika}, \bold{81}, 425--455). Another version of the Doppler function can be found in the standalone \code{\link{doppler}} function. Another function for this purpose is the Piecewise Polynomial created in Nason and Silverman (1994) an encapsulated in WaveThresh by \code{\link{example.1}} (Nason, G.P. and Silverman, B.W. (1994) The discrete wavelet transform in S, \emph{J. Comput. Graph. Statist.}, \bold{3}, 163--191. \emph{NOTE: This function might not give exactly the same function values as the equivalent function in WaveLab} } \value{ A list with four components: blocks, bumps, heavi and doppler containing the sampled signal values for the four types of Donoho and Johnstone test functions. Each of these are deemed to be sampled on an equally spaced grid from 0 to 1. } \seealso{\code{\link{doppler}},\code{\link{example.1}}, \code{\link{threshold}}, \code{\link{wd}}} \examples{ # # Show a picture of the four test functions with the default args # \dontrun{DJ.EX(plotfn=TRUE)} } \author{Theofanis Sapatinas} \keyword{nonparametric} wavethresh/man/threshold.wst.rd0000644000176200001440000002135314211622634016351 0ustar liggesusers\name{threshold.wst} \alias{threshold.wst} \title{Threshold (NDWT) packet-ordered non-decimated wavelet decomposition object} \description{ This function provides various ways to threshold a \code{\link{wst}} class object } \usage{ \method{threshold}{wst}(wst, levels = 3:(nlevelsWT(wst) - 1), dev = madmad, policy = "universal", value = 0, by.level = FALSE, type = "soft", verbose = FALSE, return.threshold = FALSE, cvtol = 0.01, cvnorm = l2norm, add.history = TRUE, \dots) } \arguments{ \item{wst}{The packet ordered non-decimated wavelet decomposition object that you wish to threshold.} \item{levels}{a vector of integers which determines which scale levels are thresholded in the decomposition. Each integer in the vector must refer to a valid level in the \code{\link{wst}} object supplied. This is usually any integer from 0 to \code{\link{nlevelsWT}}(wst)-1 inclusive. Only the levels in this vector contribute to the computation of the threshold and its application. } \item{dev}{this argument supplies the function to be used to compute the spread of the absolute values coefficients. The function supplied must return a value of spread on the variance scale (i.e. not standard deviation) such as the \code{var()} function. A popular, useful and robust alternative is the \code{\link{madmad}} function} \item{policy}{selects the technique by which the threshold value is selected. Each policy corresponds to a method in the literature. At present the different policies are: "\code{universal}", "\code{LSuniversal}", "\code{\link{sure}}", "\code{cv}", "\code{manual}", The policies are described in detail \code{below}. } \item{value}{This argument conveys the user supplied threshold. If the \code{policy="manual"} then value is the actual threshold value.} \item{by.level}{If FALSE then a global threshold is computed on and applied to all scale levels defined in \code{levels}. If TRUE a threshold is computed and applied separately to each scale level.} \item{type}{determines the type of thresholding this can be "\code{hard}" or "\code{soft}".} \item{verbose}{if TRUE then the function prints out informative messages as it progresses.} \item{return.threshold}{If this option is TRUE then the actual \emph{value} of the threshold is returned. If this option is FALSE then a thresholded version of the input is returned.} \item{cvtol}{Parameter for the cross-validation "\code{cv}" policy.} \item{cvnorm}{A function to compute the distance between two vectors. Two useful possibilities are \code{\link{l2norm}} and \code{\link{linfnorm}}. Selection of different metrics causes the cross-validation denoising method to optimize for different criteria.} \item{add.history}{If \code{TRUE} then the thresholding operation details are add to the returned \code{\link{wst}} object. This can be useful when later tracing how an object has been treated.} \item{\dots}{any other arguments} } \details{ This function thresholds or shrinks wavelet coefficients stored in a \code{\link{wst}} object and returns the coefficients in a modified \code{\link{wst}} object. The thresholding step is an essential component of denoising using the \code{packet-ordered non-decimated wavelet transform}. If the denoising is carried out using the \code{\link{AvBasis}} basis averaging technique then this software is an implementation of the Coifman and Donoho translation-invariant (TI) denoising. (Although it is the denoising technique which is translation invariant, not the packet ordered non-decimated transform, which is translation equivariant). However, the \code{threshold.wst} algorithm can be used in other denoising techniques in conjunction with the basis selection and inversion functions \code{\link{MaNoVe}} and \code{\link{InvBasis}}. The basic idea of thresholding is very simple. In a signal plus noise model the wavelet transform of signal is very sparse, the wavelet transform of noise is not (in particular, if the noise is iid Gaussian then so if the noise contained in the wavelet coefficients). Thus since the signal gets concentrated in the wavelet coefficients and the noise remains "spread" out it is "easy" to separate the signal from noise by keeping large coefficients (which correspond to signal) and delete the small ones (which correspond to noise). However, one has to have some idea of the noise level (computed using the dev option in threshold functions). If the noise level is very large then it is possible, as usual, that no signal "sticks up" above the noise. Many of the pragmatic comments for successful thresholding given in the help for \code{\link{threshold.wd}} hold true here: after all non-decimated wavelet transforms are merely organized collections of standard (decimated) discrete wavelet transforms. We reproduce some of the issues relevant to thresholding \code{\link{wst}} objects. Some issues to watch for: \describe{ \item{levels}{The default of \code{levels = 3:(nlevelsWT(wd) - 1)} for the \code{levels} option most certainly does not work globally for all data problems and situations. The level at which thresholding begins (i.e. the given threshold and finer scale wavelets) is called the \code{primary resolution} and is unique to a particular problem. In some ways choice of the primary resolution is very similar to choosing the bandwidth in kernel regression albeit on a logarithmic scale. See Hall and Patil, (1995) and Hall and Nason (1997) for more information. For each data problem you need to work out which is the best primary resolution. This can be done by gaining experience at what works best, or using prior knowledge. It is possible to "automatically" choose a "best" primary resolution using cross-validation (but not yet in WaveThresh). Secondly the levels argument computes and applies the threshold at the levels specified in the \code{levels} argument. It does this for all the levels specified. Sometimes, in wavelet shrinkage, the threshold is computed using only the finest scale coefficients (or more precisely the estimate of the overall noise level). If you want your threshold variance estimate only to use the finest scale coefficients (e.g. with universal thresholding) then you will have to apply the \code{threshold.wd} function twice. Once (with levels set equal to \code{\link{nlevelsWT}}(wd)-1 and with \code{return.threshold=TRUE} to return the threshold computed on the finest scale and then apply the threshold function with the \code{manual} option supplying the value of the previously computed threshold as the \code{value} options. } \item{by.level}{for a \code{\link{wd}} object which has come from data with noise that is correlated then you should have a threshold computed for each resolution level. See the paper by Johnstone and Silverman, 1997.} } } \value{ An object of class \code{\link{wst}}. This object contains the thresholded wavelet coefficients. Note that if the \code{return.threshold} option is set to TRUE then the threshold values will be returned rather than the thresholded object. } \note{ This section gives a brief description of the different thresholding policies available. For further details \emph{see the associated papers}. If there is no paper available then a small description is provided here. More than one policy may be good for problem, so experiment! Some of the policies here were specifically adapted to the This section gives a brief description of the different thresholding policies available. For further details see the associated papers. If there is no paper available then a small description is provided here. More than one policy may be good for problem, so experiment! Some of the policies here were specifically adapted to the \code{\link{wst.object}} but some weren't so beware. They are arranged here in alphabetical order: \describe{ \item{cv}{See Nason, 1996.} \item{LSuniversal}{See Nason, von Sachs and Kroisandt, 1998. This is used for smoothing of a wavelet periodogram and shouldn't be used generally.} \item{manual}{specify a user supplied threshold using \code{value} to pass the value of the threshold. The \code{value} argument should be a vector. If it is of length 1 then it is replicated to be the same length as the \code{levels} vector, otherwise it is repeated as many times as is necessary to be the levels vector's length. In this way, different thresholds can be supplied for different levels. Note that the \code{by.level} option has no effect with this policy.}\item{sure}{See Donoho and Johnstone, 1994 and Johnstone and Silverman, 1997.} \item{universal}{See Donoho and Johnstone, 1995.} } } \section{RELEASE}{Version 3.6 Copyright Guy Nason 1997 } \seealso{ \code{\link{AvBasis}}, \code{\link{AvBasis.wst}}, \code{\link{InvBasis}}, \code{\link{InvBasis.wst}}, \code{\link{MaNoVe}},\code{\link{MaNoVe.wst}}, \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{threshold}}. } \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/logabs.rd0000644000176200001440000000111314211622540014774 0ustar liggesusers\name{logabs} \alias{logabs} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Take the logarithm of the squares of the argument } \description{ Take the log of the squares of the argument } \usage{ logabs(x) } \arguments{ \item{x}{A number } } \details{ Description says all } \value{ Just the logarithm of the square of the argument } \author{ G P Nason } \seealso{\code{\link{image.wd}}, \code{\link{image.wst}} } \examples{ logabs(3) # [1] 1.098612 } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{math} wavethresh/man/wst.rd0000644000176200001440000001030714211622634014353 0ustar liggesusers\name{wst} \alias{wst} \title{Packet-ordered non-decimated wavelet transform.} \description{ Computes the packet-ordered non-decimated wavelet transform (TI-transform). This algorithm is functionally equivalent to the time-ordered non-decimated wavelet transform (computed by \code{\link{wd}} with the \code{type="station"} argument). } \usage{ wst(data, filter.number=10, family="DaubLeAsymm", verbose=FALSE) } \arguments{ \item{data}{A vector containing the data you wish to decompose. The length of this vector must be a power of 2.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments. Note: as of version 4.6 you can use the Lina-Mayrand complex-valued compactly supported wavelets. } \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} } \details{ The packet-ordered non-decimated wavelet transform is more properly known as the TI-transform described by Coifman and Donoho, 1995. A description of this implementation can be found in Nason and Silverman, 1995. The coefficients produced by this transform are exactly the same as those produced by the \code{\link{wd}} function with the \code{type="station"} option \emph{except} in that function the coefficients are \emph{time-ordered}. In the \code{wst} function the coefficients are produced by a wavelet packet like algorithm with a \emph{cyclic rotation} step instead of processing with the father wavelet mirror filter at each level. The coefficients produced by this function are useful in curve estimation problems in conjunction with the thresholding function \code{\link{threshold.wst}} and either of the inversion functions \code{\link{AvBasis.wst}} and \code{\link{InvBasis.wst}} The coefficients produced by the \code{time-ordered non-decimated wavelet transform} are more useful for time series applications: e.g. the evolutionary wavelet spectrum computation performed by \code{\link{ewspec}}. Note that a time-ordered non-decimated wavelet transform object may be converted into a packet-ordered non-decimated wavelet transform object (and vice versa) by using the \code{\link{convert}} function. } \value{ An object of class: \code{\link{wst}}. The help for the \code{\link{wst}} describes the intricate structure of this class of object. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1995 } \seealso{ \code{\link{wst.object}}, \code{\link{threshold.wst}}, \code{\link{AvBasis.wst}}, \code{\link{InvBasis.wst}}, \code{\link{filter.select}}, \code{\link{convert}}, \code{\link{ewspec}}, \code{\link{plot.wst}}, } \examples{ # # Let's look at the packet-ordered non-decimated wavelet transform # of the data we used to do the time-ordered non-decimated wavelet # transform exhibited in the help page for wd. # test.data <- example.1()$y # # Plot it to see what it looks like (piecewise polynomial) # \dontrun{ts.plot(test.data)} # # Now let's do the packet-ordered non-decimated wavelet transform. # TDwst <- wst(test.data) # # And let's plot it.... # \dontrun{plot(TDwst)} # # The coefficients in this plot at each resolution level are the same # as the ones in the non-decimated transform plot in the wd # help page except they are in a different order. For more information # about how the ordering works in each case see # Nason, Sapatinas and Sawczenko, 1998. # # Next examples # ------------ # The chirp signal is also another good examples to use. # # # Generate some test data # test.chirp <- simchirp()$y \dontrun{ts.plot(test.chirp, main="Simulated chirp signal")} # # Now let's do the packet-ordered non-decimated wavelet transform. # For a change let's use Daubechies extremal phase wavelet with 6 # vanishing moments (a totally arbitrary choice, please don't read # anything into it). # chirpwst <- wst(test.chirp, filter.number=6, family="DaubExPhase") \dontrun{plot(chirpwst, main="POND WT of Chirp signal")} } \keyword{math} \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/wst2D.object.rd0000644000176200001440000000743714211622634016020 0ustar liggesusers\name{wst2D.object} \alias{wst2D.object} \title{(Packet ordered) Two-dimensional nondecimated wavelet transform decomposition objects.} \description{ These are objects of class \code{wst2D} They represent a decomposition of a function with respect to a set of (all possible) shifted two-dimensional wavelets. They are a 2D extension of the \code{\link{wst.object}}. } \details{ To retain your sanity we recommend that the coefficients from a \code{wst2D} object be extracted or replaced using \itemize{ \item{\code{\link{getpacket.wst2D}} to obtain individual packets of either father or mother wavelet coefficients.} \item{\code{\link{putpacket.wst2D}} to insert coefficients.} } You can obtain the coefficients directly from the \code{wst2D$wst2D} component but you have to understand their organization described above.} \value{ The following components must be included in a legitimate \code{wst2D} object. \item{wst2D}{This a three-dimensional array. Suppose that the original image that created the \code{wst2D} object is n x n. Then the dimension of the \code{wst2D} array is [ nlevelsWT, 2n, 2n]. The first index of the array refers to the resolution level of the array with "resolution level = index - 1" (so, e.g. the coarsest scale detailed is stored at index 1 and the finest at index nlevels). For a given resolution level (selected first index) the associated 2n x 2n matrix contains the two-dimensional non-decimated wavelet coefficients for that level packed as follows. At the finest resolution level the 2n x 2n coefficient image may be broken up into four n x n subimages. Each of the four images corresponds to data shifts in the horizontal and vertical directions. The top left image corresponds to ``no shift'' and indeed the top left image corresponds to the coefficients obtained using the decimated 2D wavelet transform as obtained using the \code{\link{imwd}} function. The top right image corresponds to a horizontal data shift; the bottom left to a vertical data shift and the bottom right corresponds to both horizontal and vertical data shift. Within each of the four n x n images named in the previous paragraph are again 4 subimages each of dimension n/2 x n/2. These correspond to (starting at the top left and moving clockwise) the smooth (CC), horizontal detail (DC), diagonal detail (DD) and vertical detail (CD). At coarser resolution levels the coefficients are smaller submatrices corresponding to various levels of data shifts and types of detail (smooth, horizontal, vertical, diagonal). We strongly recommend the use of the \code{\link{getpacket.wst2D}} and \code{\link{putpacket.wst2D}} functions to remove and replace coefficients from \code{wst2D}} objects. \item{nlevelsWT}{The number of levels in the decomposition. If you raise 2 to the power of 2 \code{nlevels} you get the number of data points used in the decomposition.} \item{filter}{a list containing the details of the filter that did the decomposition (equivalent to the return value from the \code{\link{filter.select}} function).} \item{date}{The date that the transform was performed or the \code{wst2D} was modified.} } \section{GENERATION}{ This class of objects is returned from the \code{\link{wst2D}} function which computes the \emph{packets-ordered} two-dimensional non-decimated wavelet transform (effectively all possible shifts of the standard two-dimensional discrete wavelet transform). Many other functions return an object of class \code{wst2D}. } \section{METHODS}{ The wst2D class of objects has methods for the following generic functions: \code{\link{AvBasis}}, \code{\link{getpacket}}. \code{\link{plot}}, \code{\link{print}}, \code{\link{putpacket}}, \code{\link{summary}}, } \section{RELEASE}{ Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wst2D}} } \keyword{classes} \keyword{smooth} \author{G P Nason} wavethresh/man/compress.default.rd0000644000176200001440000000446214211622540017015 0ustar liggesusers\name{compress.default} \alias{compress.default} \title{Do "zero" run-length encoding compression of a vector of numbers. } \description{ Efficiently compress a vector containing many zeroes. } \usage{ \method{compress}{default}(v, verbose=FALSE,\dots) } \arguments{ \item{v}{The vector that you wish to compress. This compression function is efficient at compressing vectors with many zeroes, but is not a \emph{general} compression routine.} \item{verbose}{If\code{TRUE} then this routine prints out the degree of compression achieved. } \item{\dots}{any other arguments} } \details{ Images are large objects. Thresholded 2d wavelet objects (\code{\link{imwd}}) are also large, but many of their elements are zero. compress.default takes a vector, decides whether compression is necessary and if it is makes an object of class \code{compressed} containing the nonzero elements and their position in the original vector. The decision whether to compress the vector or not depends on two things, first the number of non-zero elements in the vector (r, say), and second the length of the vector (n, say). Since the position and value of the non-zero elements is stored we will need to store 2r values for the non-zero elements. So compression takes place if \code{2r < n}. This function is the default method for the generic function \code{\link{compress}}. It can be invoked by calling compress for an object of the appropriate class, or directly by calling compress.default regardless of the class of the object. } \value{ An object of class compressed if \code{compression} took place, otherwise a an object of class \code{uncompressed}. } \note{ Sometimes the compressed object can be larger than the original. This usually only happens for small objects, so it doesn't really matter. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{compress}}, \code{\link{imwd}}, \code{\link{threshold.imwd}}, \code{\link{uncompress}} } \examples{ # # Compress a vector with lots of zeroes # compress(c(rep(0,100),99)) #$position: #[1] 101 # #$values: #[1] 99 # #$original.length: #[1] 101 # #attr(, "class"): #[1] "compressed" # # Try to compress a vector with not many zeroes # compress(1:10) #$vector: #[1] 1 2 3 4 5 6 7 8 9 10 # #attr(, "class"): #[1] "uncompressed" # # } \keyword{manip} \author{G P Nason} wavethresh/man/lt.to.name.rd0000644000176200001440000000544514211622540015520 0ustar liggesusers\name{lt.to.name} \alias{lt.to.name} \title{Convert desired level and orientation into code used by imwd} \usage{ lt.to.name(level, type) } \arguments{ \item{level}{Resolution level of coefficients that you want to extract or manipulate.} \item{type}{One of CC, CD, DC or DD indicating smoothed, horizontal, vertical or diagonal coefficients} } \description{ Function codes the name of a desired level and wavelet coefficient orientation into a string which is used by the 2D DWT functions to access and manipulate wavelet coefficients. } \details{ For the 1D wavelet transform (and others) the \code{\link{accessC}} and \code{\link{accessD}} function extracts wavelet coefficients from 1D wavelet decomposition objects. For \code{\link{imwd.object}} class objects, which are the 2D wavelet transforms of lattice objects (images) the wavelet coefficients are stored within components of the list object that underlies the imwd object. This function provides an easy way to specify a resolution level and orientation in a human readable way and this function then produces the character string necessary to access the wavelet coefficients in an imwd object. Note that this function \emph{does not} actually extract any coefficients itself. } \value{ A character string which codes the level and type of coefficients. It reads wXLY X is the resolution level and Y is an integer corresponding to the orientation (1=horizontal, 2=vertical, 3=diagonal, 4=smoothed). } \seealso{ \code{\link{imwd}}, \code{\link{imwd.object}} } \examples{ # # Generate the character string for the component of the imwd object # # The string associated with the diagonal detail at the third level... # lt.to.name(3, "DD") # [1] "w3L3" # # Show how to access wavelet coefficients of imwd object. # # First, make up some data (using matrix/rnorm) and then subject it # to an image wavelet transform. # tmpimwd <- imwd(matrix(rnorm(64),64,64)) # # Get the horizontal coefficients at the 2nd level # tmpimwd[[ lt.to.name(2, "CD") ]] # [1] 6.962251e-13 4.937486e-12 3.712157e-12 -3.064831e-12 6.962251e-13 # [6] 4.937486e-12 3.712157e-12 -3.064831e-12 6.962251e-13 4.937486e-12 # [11] 3.712157e-12 -3.064831e-12 6.962251e-13 4.937486e-12 3.712157e-12 # [16] -3.064831e-12 # # # If you want the coefficients returned as a matrix use the matrix function, # i.e. # matrix(tmpimwd[[ lt.to.name(2, "CD") ]], 4,4) # [,1] [,2] [,3] [,4] #[1,] 6.962251e-13 6.962251e-13 6.962251e-13 6.962251e-13 #[2,] 4.937486e-12 4.937486e-12 4.937486e-12 4.937486e-12 #[3,] 3.712157e-12 3.712157e-12 3.712157e-12 3.712157e-12 #[4,] -3.064831e-12 -3.064831e-12 -3.064831e-12 -3.064831e-12 # # Note that the dimensions of the matrix depend on the resolution level # that you extract and dim = 2^level } \author{G P Nason} \keyword{manip} wavethresh/man/wr.rd0000644000176200001440000000131414211622634014164 0ustar liggesusers\name{wr} \alias{wr} \title{Wavelet reconstruction (inverse DWT).} \description{ Performs inverse discrete wavelet transform. This function is generic. Particular methods exist. For the \code{\link{wd}} class object this generic function uses \code{\link{wr.wd}}. } \usage{ wr(...) } \arguments{ \item{\dots}{See individual help pages for details.} } \details{ See individual method help pages for operation and examples. } \value{ Usually the wavelet reconstruction of x. Although the return value varies with the precise method used. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wd}}, \code{\link{wd.object}}, \code{\link{wr.wd}} } \keyword{manip} \author{G P Nason} wavethresh/man/nullevels.wd.rd0000644000176200001440000000426414211622540016161 0ustar liggesusers\name{nullevels.wd} \alias{nullevels.wd} \title{Sets whole resolution levels of coefficients equal to zero in a wd object.} \description{ Sets whole resolution levels of coefficients equal to zero in a \code{\link{wd.object}} } \usage{ \method{nullevels}{wd}(wd, levelstonull, \dots) } \arguments{ \item{wd}{An object of class \code{\link{wd}}.} \item{levelstonull}{An integer vector specifying which resolution levels of coefficients of \code{\link{wd}} that you wish to set to zero. } \item{\dots}{any other arguments} } \details{ Setting whole resolution levels of coefficients to zero can be very useful. For examples, one can construct a linear smoothing method by setting all coefficients above a particular resolution (the \emph{primary resolution} equal to zero. Also setting particular levels equal to zero can also be useful for removing noise which is specific to a particular resolution level (as long as important signal is not also contained at that level). Note that this function removes the horiztonal, diagonal and vertical detail coefficients at the resolution level specified. It does not remove the father wavelet coefficients at those resolution levels. To remove individual coefficients on a systematic basis you probably want to look at the \code{\link{threshold}} function. } \value{ An object of class \code{\link{wd}} where the coefficients in resolution levels specified by \code{levelstonull} have been set to zero. } \section{RELEASE}{Version 3.8.1 Copyright Guy Nason 1997 } \seealso{ \code{\link{nullevels}}, \code{\link{wd}}, \code{\link{wd.object}}, \code{\link{threshold}}. } \examples{ # # Generate some test data # test.data <- example.1()$y # # Do wavelet transform of test.data and plot the wavelet coefficients # wds <- wd(test.data) \dontrun{plot(wds)} # # Now let us set all the coefficients in ODD resolution levels equal to zero! # # This is just to illustrate the capabilities of the function. I cannot # imagine you wanting to do this in practice! ## wdsnl <- nullevels(wds, levelstonull = c(1, 3, 5, 7)) # # Now let's plot the result # \dontrun{plot(wdsnl, scaling = "by.level")} # # Lo and behold the odd levels have been set to zero! } \keyword{manip} \author{G P Nason} wavethresh/man/summary.wp.rd0000644000176200001440000000123114211622634015654 0ustar liggesusers\name{summary.wp} \alias{summary.wp} \title{Print out some basic information associated with a wp object} \usage{ \method{summary}{wp}(object, \dots) } \arguments{ \item{object}{The object to print a summary about} \item{\dots}{Other arguments} } \description{ Prints out the number of levels, the length of the original vector from which the object came, the type of wavelet filter associated with the decomposition. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{wp}}} \examples{ vwp <- wp(rnorm(32)) summary(vwp) #Levels: 5 #Length of original: 32 #Filter was: Daub cmpct on least asymm N=10 } \author{G P Nason} \keyword{print} wavethresh/man/plot.wst.rd0000644000176200001440000001123714211622540015327 0ustar liggesusers\name{plot.wst} \alias{plot.wst} \title{Plot packet-ordered non-decimated wavelet transform coefficients. } \usage{ \method{plot}{wst}(x, main = "Nondecimated Wavelet (Packet) Decomposition", sub, first.level = 5, scaling = "compensated", dotted.turn.on = 5, aspect = "Identity", ...) } \arguments{ \item{x}{The wst object whose coefficients you wish to plot.} \item{main}{The main title of the plot.} \item{sub}{A subtitle for the plot.} \item{first.level}{The first resolution level to begin plotting at. This argument can be quite useful when you want to suppress some of the coarser levels in the diagram.} \item{scaling}{How you want the coefficients to be scaled. The options are: \code{global} - one scale factor is chosen for the whole plot. The scale factor depends on the coefficient to be included on the plot that has the largest absolute value. The global option is useful when comparing coefficients that might appear anywhere in the plot; \code{by.level} - a scale factor is chosen for each resolution level in the plot. The scale factor for a level depends on the coefficient in that level that has the largest absolute value. The \code{by.level} option is useful when you wish to compare coefficients within a resolution level. The other option is \code{compensated} which is the same as global except for that finer scales' coefficients are scaled up by a factor of SQRT(2) I don't know why compensated is the default option? It is a bit silly.} \item{dotted.turn.on}{The plot usually includes some dotted vertical bars that separate wavelet packets to make it clearer which packets are which. This option controls the coarsest resolution level at which dotted lines appear. All levels equal to and finer than this level will receive the vertical dotted lines.} \item{aspect}{A transform to apply to the coefficients before plotting. If the coefficients are complex-valued and aspect="Identity" then the modulus of the coefficients are plotted.} \item{\dots}{Other arguments to plot} } \description{ This function plots packet-ordered non-decimated wavelet transform coefficients arising from a \code{\link{wst.object}} object. } \details{ A packet-ordered non-decimated wavelet object contains coefficients of a signal (usually obtained by the \code{\link{wst}} packet-ordered non-decimated wavelet transform, but also functions that derive such objects, such as \code{\link{threshold.wst}}). A packet-ordered nondecimated wavelet object, x, possesses \code{nlevelsWT(x)} resolution levels. In WaveThresh the coarsest level is level 0 and the finest is level \code{nlevelsWT-1}. For packet-ordered nondecimated wavelet the number of blocks (packets) at level \code{j} is \code{2^(nlevelsWT-j)}. This function plots the coefficients. At the bottom of the plot the original input function (if present) is plotted. Then levels above the original plot successively coarser wavelet coefficients. Each packet of coefficients is plotted within dotted vertical lines. At the finest level there are two packets: one (the left one) correspond to the wavelet coefficients that would be obtained using the (standard) decimated wavelet transform function, \code{\link{wd}}, and the other packet are those coefficients that would have been obtained using the standard decimated wavelet transform after a unit cyclic shift. For coarser levels there are more packets corresponding to different cyclic shifts (although the computation is not performed using shifting operations the effect is the same). For full details see Nason and Silverman, 1995. Packets are drawn on the plot and can be separated by vertical dotted lines. The resolution levels at which this happens can be controlled by the \code{dotted.turn.on} option. The coarsest resolution level to be drawn is controlled by the \code{first.level option}. \emph{It should be noted that the packets referred to here are just the blocks of nondecimated wavelet coefficients in a packet-ordering. These are different to wavelet packets (produced by \code{\link{wp}}) and nondecimated wavelet packets (produced by \code{\link{wpst}})} } \value{ Nothing } \seealso{ \code{\link{MaNoVe}},\code{\link{threshold.wst}}, \code{\link{wst}}, \code{\link{wst.object}}} \examples{ # # Generate some test data # v <- DJ.EX()$heavi # # Let's plot these to see what they look like # \dontrun{plot(v, type="l")} # # Do a packet-ordered non-decimated wavelet packet transform # vwst <- wst(v) # # Now plot the coefficients # \dontrun{plot(vwst)} # # Note that the "original" function is at the bottom of the plot. # The finest scale coefficients (two packets) are immediately above. # Increasingly coarser scale coefficients are above that! # } \author{G P Nason} \keyword{hplot} wavethresh/man/accessD.wp.rd0000644000176200001440000000206514211622540015526 0ustar liggesusers\name{accessD.wp} \alias{accessD.wp} \title{Obtain whole resolution level of wavelet packet coefficients from a wavelet packet object (wp).} \description{ Get a whole resolution level's worth of coefficients from a \code{\link{wp}} wavelet packet object. To obtain packets of coefficients from a wavelet packet object you should use the \code{\link{getpacket}} collection of functions. } \usage{ \method{accessD}{wp}(wp, level, \dots) } \arguments{ \item{wp}{Wavelet packet object}. \item{level}{the resolution level that you wish to extract.} \item{\dots}{any other arguments} } \details{ The wavelet packet coefficients are actually stored in a straightforward manner in a matrix component of a \code{\link{wp}} object so it would not be too difficult to extract whole resolution levels yourself. However, this routine makes it easier to do. } \value{ A vector containing the coefficients that you wanted to extract. } \section{RELEASE}{ Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{accessD}}, \code{\link{getpacket}} } \keyword{manip} \author{G P Nason} wavethresh/man/putpacket.wst.rd0000644000176200001440000000510014211622634016345 0ustar liggesusers\name{putpacket.wst} \alias{putpacket.wst} \title{Put a packet of coefficients into a packet ordered non-decimated wavelet object (wst).} \description{ This function inserts a packet of coefficients into a packet-ordered non-decimated wavelet object (\code{\link{wst}}) object. The \code{\link{wst}} objects are computed by the \code{\link{wst}} function amongst others. } \usage{ \method{putpacket}{wst}(wst, level, index, packet, \dots) } \arguments{ \item{wst}{Packet-ordered non-decimated wavelet object into which you wish to insert the packet.} \item{level}{The resolution level of the coefficients that you wish to insert.} \item{index}{The index number within the resolution level of the packet of coefficients that you wish to insert.} \item{packet}{A vector of coefficients that you wish to insert into the \code{\link{wst}} object. The length that the packet has to may be determined by extracting the same packet of coefficients using the \code{\link{getpacket.wst}} function and using the S-Plus length function to determine the length!} \item{\dots}{any other arguments} } \details{ This function actually calls the \code{\link{putpacket.wp}} to do the insertion. In the future this function will be extended to insert father wavelet coefficients as well. } \value{ An object of class \code{\link{wst.object}} containing the packet ordered non-decimated wavelet coefficients that have been modified: i.e. with packet inserted. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{getpacket.wst}}, \code{\link{putpacket}}, \code{\link{putpacket.wp}}, \code{\link{wst}}, \code{\link{wst.object}}. } \examples{ # # Take the packet-ordered non-decimated transform of some random data # MyWST <- wst(rnorm(1:512)) # # The above data set was 2^9 in length. Therefore there are # coefficients at resolution levels 0, 1, 2, ..., and 8. # # The high resolution coefficients are at level 8. # There should be 256 coefficients at level 8 in index location 0 and 1. # length(getpacket(MyWST, level=8, index=0)) # [1] 256 length(getpacket(MyWST, level=8, index=1)) # [1] 256 # # There should be 4 coefficients at resolution level 2 # getpacket(MyWST, level=2, index=0) # [1] -0.92103095 0.70125471 0.07361174 -0.43467375 # # O.k. Let's insert the packet containing the numbers 19,42,21,32 # NewMyWST <- putpacket(MyWST, level=2, index=0, packet=c(19,42,31,32)) # # Let's check that it put the numbers in correctly by reaccessing that # packet... # getpacket(NewMyWST, level=2, index=0) # [1] 19 42 31 32 # # Yep. It inserted the packet correctly. } \keyword{manip} \author{G P Nason} wavethresh/man/WTEnv.rd0000644000176200001440000000331314211622634014540 0ustar liggesusers\name{WTEnv} \alias{WTEnv} \title{Environment that exists to store intermediate calculations for re-use within the same R session.} \description{Environment that stores results of long calculations so that they can be made available for immediate reuse. } \details{This environment is created on package load by wavethresh. The results of some intermediate calculations get stored in here (notably by \code{\link{PsiJ}}, \code{\link{PsiJmat}} and \code{\link{ipndacw}}). The reason for this is that the calculations are typically lengthy and it saves wavethresh time to search the \code{WTEnv} for pre-computed results. For example, \code{\link{ipndacw}} computes matrices of various orders. Matrices of low order form the upper-left corner of matrices of higher order so higher order matrix calculations can make use of the lower order instances. A similar functionality was present in wavethresh in versions 4.6.1 and prior to this. In previous versions computations were saved in the users current data directory. However, the user was never notified about this nor permission sought. The environment \code{WTEnv} disappears when the package disappears and the R session stops - and results of all intermediate calculations disappear too. This might not matter if you never use the larger objects (as it will not take much time to recompute). } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{ipndacw}}, \code{\link{PsiJ}}, \code{\link{PsiJmat}}} \examples{ # # See what it is # WTEnv # # # Compute something that uses the environment # fred <- PsiJ(-5) # # Now let's see what got put in # ls(envir=WTEnv) #[1] "Psi.5.10.DaubLeAsymm" } \keyword{algebra} \author{G P Nason} wavethresh/man/drawbox.rd0000644000176200001440000000114514211622540015200 0ustar liggesusers\name{drawbox} \alias{drawbox} \title{Draw a shaded coloured box} \usage{ drawbox(x,y,w,h,density,col) } \arguments{ \item{x}{The bottom left x coordinate of the box} \item{y}{The bottom left y coordinate of the box} \item{w}{The width of the box} \item{h}{The height of the box} \item{density}{The shading density of the box} \item{col}{The colour of the box} } \description{ Simply draws a box with bottom left corner at (x,y), or width w and height h with shading of density and colour of col. } \details{ Description says all } \value{ None } \seealso{\code{\link{addpkt}}} \author{G P Nason} \keyword{dplot} wavethresh/man/modernise.rd0000644000176200001440000000114414211622540015516 0ustar liggesusers\name{modernise} \alias{modernise} \title{Generic function to upgrade a V2 WaveThresh object to V4} \usage{ modernise(...) } \arguments{ \item{\dots}{Other objects} } \description{ Not really used in practice. The function \code{\link{IsEarly}} can be used to tell if an object comes from an earlier version of wavethresh. Note that the earlier version only has a \code{\link{wd.object}} class object so there is only a method for that. } \details{ Description says all } \value{ A modernised version of the object. } \seealso{\code{\link{IsEarly}},\code{\link{modernise.wd}}} \author{G P Nason} \keyword{manip} wavethresh/man/compgrot.rd0000644000176200001440000000456314211622540015373 0ustar liggesusers\name{compgrot} \alias{compgrot} \title{Compute empirical shift for time ordered non-decimated transforms.} \description{ Computes the empirical shift required for time-ordered non-decimated transform coefficients to bring them into time order. } \usage{ compgrot(J, filter.number, family) } \arguments{ \item{J}{The \code{number of levels} in the non-decimated transform where coefficients are to be time-aligned.} \item{filter.number}{The wavelet filter number to be used, see \code{\link{filter.select}}} \item{family}{The wavelet family, see \code{\link{filter.select}}} } \details{ Time-ordered non-decimated transform coefficients when raw are not in exact time alignment due to the phase of the underlying wavelet. This function returns the shifts that are necessary to apply to each resolution level in the transform to bring back each set of time-ordered coefficients into time alignment. Note that the shifts returned are approximate shifts which work for any Daubechies wavelet. More accurate shifts can be computed using detailed knowledge of the particular wavelet used. Each shift is "to the left". I.e. higher indexed coefficients should take the place of lower-indexed coefficients. Periodic boundaries are assumed. This realignment is mentioned in Walden and Contreras Cristan, (1997) and Nason, Sapatinas and Sawczenko, (1998). } \value{ A vector containing the shifts that need to be applied to each scale level to return them to the correct time alignment. There are \code{J} entries in the vector. The first entry corresponds to the shift required for the finest level coefficients (i.e. level \code{J-1}) and the last entry corresponds to the coarsest level (i.e. level 0). Entry \code{j} corresponds to the shift required for scale level \code{J-j}. } \note{GROT was the shop started by Reginald Perrin. Unfortunately, GROT stands for "Guy ROTation". } \section{RELEASE}{Version 3.6 Copyright Guy Nason 1997 } \seealso{ \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{wpst}}, \code{wpst.object}. } \examples{ # # Let's see how the resolution levels have to be shifted # compgrot(4, filter.number=10, family="DaubExPhase") #[1] 2 6 15 31 # # In other words. Scale level 3 needs to be shifted two units. # Scale level 2 needs to be shifted 6 units # Scale level 1 needs to be shifted 15 units # Scale level 0 needs to be shifted 31 units. } \keyword{manip} \author{G P Nason} wavethresh/man/print.nvwp.rd0000644000176200001440000000455114211622540015663 0ustar liggesusers\name{print.nvwp} \alias{print.nvwp} \title{Print a wavelet packet node vector object, also used by several other functions to obtain packet list information} \usage{ \method{print}{nvwp}(x, printing = TRUE, ...) } \arguments{ \item{x}{The nvwp that you wish to print} \item{printing}{If FALSE then nothing is printed. This argument is here because the results of the printing are also useful to many other routines where you want the results but are not bothered by actually seeing the results} \item{\dots}{Other arguments} } \description{ Ostensibly prints out wavlet packet node vector information, but also produces packet indexing information for several functions. } \details{ A node vector contains selected basis information, but this is stored as a tree object. Hence, it is not immediately obvious which basis elements have been stored. This function produces a list of the packets at each resolution level that have been selected in the basis. This information is so useful to other functions that the function is used even when printing is not the primary objective. } \value{ A list containing two components: \code{level} and \code{pkt}. These are the levels and packet indices of the select packets in the basis. } \seealso{ \code{\link{InvBasis.wp}}, \code{\link{MaNoVe.wp}}, \code{\link{plot.nvwp}}, \code{\link{plot.wp}}} \examples{ v <- rnorm(128) vwp <- wp(v) vnv <- MaNoVe(vwp) print(vnv) #Level: 6 Packet: 1 #Level: 3 Packet: 0 #Level: 2 Packet: 4 #Level: 2 Packet: 13 #Level: 2 Packet: 15 #Level: 1 Packet: 5 #Level: 1 Packet: 10 #Level: 1 Packet: 13 #Level: 1 Packet: 14 #Level: 1 Packet: 15 #Level: 1 Packet: 16 #Level: 1 Packet: 20 #Level: 1 Packet: 21 #Level: 1 Packet: 24 #Level: 0 Packet: 8 #Level: 0 Packet: 9 #Level: 0 Packet: 12 #Level: 0 Packet: 13 #Level: 0 Packet: 14 #Level: 0 Packet: 15 #Level: 0 Packet: 22 #Level: 0 Packet: 23 #Level: 0 Packet: 24 #Level: 0 Packet: 25 #Level: 0 Packet: 34 #Level: 0 Packet: 35 #Level: 0 Packet: 36 #Level: 0 Packet: 37 #Level: 0 Packet: 38 #Level: 0 Packet: 39 #Level: 0 Packet: 44 #Level: 0 Packet: 45 #Level: 0 Packet: 46 #Level: 0 Packet: 47 #Level: 0 Packet: 50 #Level: 0 Packet: 51 #Level: 0 Packet: 56 #Level: 0 Packet: 57 #Level: 0 Packet: 58 #Level: 0 Packet: 59 } \author{G P Nason} \keyword{print} wavethresh/man/draw.wst.rd0000644000176200001440000000372214211622540015306 0ustar liggesusers\name{draw.wst} \alias{draw.wst} \title{Draw mother wavelet or scaling function associated with wst object.} \description{ This function draws the mother wavelet or scaling function associated with a \code{\link{wst.object}}. } \usage{ \method{draw}{wst}(wst, \dots) } \arguments{ \item{wst}{The \code{\link{wst}} class object whose associated wavelet or scaling function you wish to draw. } \item{\dots}{Additional arguments to pass to the \code{\link{draw.default}} function which does the drawing. In particular, arguments can be set to choose between drawing the mother wavelet and scaling function, to set the resolution of the plot, to choose between drawing one and two dimensional pictures. } } \details{ This function extracts the \code{filter} component from the \code{\link{wst}} object (which is constructed using the \code{\link{filter.select}} function) to decide which wavelet packet family to draw. Once decided the \code{\link{draw.default}} function is used to actually do the drawing. } \value{ If the \code{plot.it} argument is set to \code{TRUE} then nothing is returned. Otherwise, Otherwise, as with \code{\link{draw.default}}, the coordinates of what would have been plotted are returned. } \note{If the \code{plot.it} argument is \code{TRUE} (which it is by default) a plot of the appropriate wavelet packet is plotted on the active graphics device.} \section{RELEASE}{Version 3.6 Copyright Guy Nason 1995 } \seealso{ \code{\link{filter.select}}, \code{\link{wst.object}}, \code{\link{draw.default}}. } \examples{ # # Generate some test data # test.data <- example.1()$y \dontrun{ts.plot(test.data)} # # Now do the \code{packet-ordered non-decimated DWT} of the data using the Daubechies # least-asymmetric wavelet N=10 (the default arguments in \code{\link{wst}}). # tdwst <- wst(test.data) # # What happens if we try to draw this new tdwst object? # \dontrun{draw(tdwst)} # # We get a picture of the wavelet that did the transform # } \keyword{hplot} \author{G P Nason} wavethresh/man/rsswav.rd0000644000176200001440000000533614211622634015071 0ustar liggesusers\name{rsswav} \alias{rsswav} \title{Compute mean residual sum of squares for odd prediction of even ordinates and vice versa} \usage{ rsswav(noisy, value = 1, filter.number = 10, family = "DaubLeAsymm", thresh.type = "hard", ll = 3) } \arguments{ \item{noisy}{A vector of dyadic (power of two) length that contains the noisy data that you wish to compute the averaged RSS for.} \item{value}{The specified threshold.} \item{filter.number}{This selects the smoothness of wavelet that you want to perform wavelet shrinkage by cross-validation.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{thresh.type}{this option specifies the thresholding type which can be "hard" or "soft".} \item{ll}{The primary resolution that you wish to assume. No wavelet coefficients that are on coarser scales than ll will be thresholded.} } \description{ Compute mean of residual sum of squares (RSS) for odd prediction of even ordinates and vice versa using wavelet shrinkage with a specified threshold. This is a subsidiary routine of the \code{\link{WaveletCV}} cross validation function. A version implemented in C exists called \code{\link{Crsswav}}. } \details{ \bold{Note}: a faster C based implementation of this function called \code{\link{Crsswav}} is available. It takes the same arguments and returns the same values. Two-fold cross validation can be computed for a wd object using the "cv" policy option in \code{\link{threshold.wd}}. As part of this procedure for each threshold value that the CV optimisation algorithm selects a RSS value must be computed (the CV optimisation algorithm seeks to minimize this RSS value). The RSS value computed is this. First, the even and odd indexed values are separated. The even values are used to construct an estimate of the odd true values using wavelet shrinkage with the given threshold. The sum of squares between the estimate and the noisy odds is computed. An equivalent calculation is performed by swapping the odds and evens. The two RSS values are then averaged and the average returned. This algorithm is described more fully in Nason, (1996). } \value{ A list with the following components \item{ssq}{The RSS value that was computed} \item{df}{The dof value computed on the thresholded wavelet transform of the data with the given threshold and thresholding options. (Although this is not really used for anything).} \item{value}{The value argument that was specified.} \item{type}{the \code{thresh.type} argument that was specified.} \item{lev}{The vector \code{ll:(nlevelsWT(noisy)-1)} (i.e. the levels that were thresholded).} } \seealso{\code{\link{Crsswav}},\code{\link{threshold.wd}}, \code{\link{WaveletCV}}} \author{G P Nason} \keyword{smooth} wavethresh/man/summary.mwd.rd0000644000176200001440000000260614211622634016024 0ustar liggesusers\name{summary.mwd} \alias{summary.mwd} \title{Use summary() on a mwd object. } \description{ This function prints out more information about an \code{\link{mwd.object}} in a nice human-readable form. } \usage{ \method{summary}{mwd}(object, ...) } \arguments{ \item{object}{An object of class \code{\link{mwd}} that you wish to print out more information.} \item{...}{Any other arguments.} } \value{ Nothing of any particular interest. } \note{Prints out information about \code{\link{mwd}} objects in nice readable format.} \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1995-6) } \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd}}, \code{\link{mwd.object}}, \code{\link{mwr}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ # # Generate an mwd object. # tmp <- mwd(rnorm(32)) # # Now get Splus to use summary.mwd # summary(tmp) # Length of original: 32 # Levels: 4 # Filter was: Geronimo Multiwavelets # Scaling fns: 2 # Wavelet fns: 2 # Prefilter: default # Scaling factor: 2 # Boundary handling: periodic # Transform type: wavelet # Date: Tue Nov 16 13:55:26 GMT 1999 } \keyword{nonlinear} \keyword{smooth} \author{Tim Downie} wavethresh/man/wvmoments.rd0000644000176200001440000000251714211622634015601 0ustar liggesusers\name{wvmoments} \alias{wvmoments} \title{Compute moments of wavelets or scaling function} \usage{ wvmoments(filter.number = 10, family = "DaubLeAsymm", moment = 0, scaling.function = FALSE) } \arguments{ \item{filter.number}{The smoothness of wavelet or scaling function to compute moments for, see \code{\link{filter.select}}} \item{family}{The wavelet family to use, see \code{\link{filter.select}}} \item{moment}{The moment to compute} \item{scaling.function}{If \code{FALSE} then a wavelet is used in the moment calculation, alternatively if \code{TRUE} the associated scaling function is used.} } \description{ Numerically compute moments of wavelets or scaling function } \details{ Given a wavelet \eqn{\psi(x)}{psi(x)} this function computes the mth moment \eqn{\int x^m \psi(x) \, dx}{int x^m psi(x) dx}. Note that for low order moments the integration function often fails for the usual numerical reasons (this never happened in S!). It might be that fiddling with the tolerances will improve this situation. } \value{ An object of class \code{integrate} containing the integral and other pieces of interesting information about the moments calculation. } \examples{ wvmoments(filter.number=5, family="DaubExPhase", moment=5) #-1.317600 with absolute error < 7.5e-05 } \seealso{\code{\link{draw.default}}} \author{G P Nason} \keyword{math} wavethresh/man/Psiname.rd0000644000176200001440000000364014211622540015130 0ustar liggesusers\name{Psiname} \alias{Psiname} \title{Return a PsiJ list object style name.} \description{ This function returns a character string according to a particular format for naming \code{\link{PsiJ}} objects. } \usage{ Psiname(J, filter.number, family) } \arguments{ \item{J}{A negative integer representing the order of the \code{\link{PsiJ}} object.} \item{filter.number}{The index number of the wavelet used to build the \code{\link{PsiJ}} object.} \item{family}{The wavelet family used to build the \code{\link{PsiJ}} object.} } \details{ Some of the objects computed by \code{\link{PsiJ}} take a long time to compute. Hence it is a good idea to store them and reuse them. This function generates a name according to a particular naming scheme that permits a search algorithm to easily find the matrices. Each object has three defining characteristics: its \emph{order}, \emph{filter.number} and \emph{family}. Each of these three characteristics are concatenated together to form a name. This function performs exactly the same role as \code{\link{rmname}} except for objects produced by \code{\link{PsiJ}}. } \value{ A character string containing the name of an object according to a particular naming scheme. } \references{Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{Technical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern} \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{PsiJ}} } \examples{ # # What's the name of the order 4 Haar PsiJ object? # Psiname(-4, filter.number=1, family="DaubExPhase") #[1] "Psi.4.1.DaubExPhase" # # What's the name of the order 12 Daubechies least-asymmetric wavelet PsiJ # with 7 vanishing moments? # Psiname(-12, filter.number=7, family="DaubLeAsymm") #[1] "Psi.12.7.DaubLeAsymm" } \keyword{character} \author{G P Nason} wavethresh/man/putC.wd.rd0000644000176200001440000000730614211622540015063 0ustar liggesusers\name{putC.wd} \alias{putC.wd} \title{Puts a whole resolution level of father wavelet coeffients into wd wavelet object.} \description{ Makes a copy of the \code{\link{wd}} object, replaces some father wavelet coefficients data in the copy, and then returns the copy. } \usage{ \method{putC}{wd}(wd, level, v, boundary=FALSE, index=FALSE, \dots) } \arguments{ \item{wd}{Wavelet decomposition object into which you wish to insert the father wavelet coefficients.} \item{level}{the resolution level at which you wish to replace the father wavelet coefficients.} \item{v}{the replacement data, this should be of the correct length.} \item{boundary}{If \code{boundary} is \code{FALSE} then only "real" data is replaced. If boundary is \code{TRUE} then the boundary (bookeeping) elements are replaced as well. Information about the lengths of the vectors can be found in the \code{\link{first.last}} database function and Nason and Silverman, 1994.} \item{index}{If index is \code{TRUE} then the index numbers into the 1D array where the coefficient insertion would take place are returned. If index is \code{FALSE} (default) then the modified \code{wavelet decomposition} object is returned.} \item{\dots}{any other arguments} } \details{ The function \code{\link{accessC}} obtains the father wavelet coefficients for a particular level. The function \code{putC.wd} replaces father wavelet coefficients at a particular resolution level and returns a modified wd object reflecting the change. The need for this function is a consequence of the pyramidal structure of Mallat's algorithm and the memory efficiency gain achieved by storing the pyramid as a linear vector. \code{PutC.wd} obtains information about where the smoothed data appears from the \code{fl.dbase} component of an \code{\link{wd.object}}, in particular the array \code{fl.dbase$first.last.c} which gives a complete specification of index numbers and offsets for \code{wd.object$C}. Note that this function is method for the generic function \code{\link{putC}}. When the \code{\link{wd.object}} is definitely a wd class object then you only need use the generic version of this function. Note also that this function only puts information into \code{\link{wd}} class objects. To extract coefficients from a \code{\link{wd}} object you have to use the \code{\link{accessC}} function (or more precisely, the \code{\link{accessC.wd}} method). } \value{ A \code{\link{wd}} class object containing the modified father wavelet coefficients. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{putC}}, \code{\link{wd.object}}, \code{\link{wd}}, \code{\link{accessC}},\code{\link{putD}}, \code{\link{first.last}}, } \examples{ # # Generate an EMPTY wd object: # zero <- rep(0, 16) zerowd <- wd(zero) # # Put some random father wavelet coefficients into the object at # resolution level 2. For the decimated wavelet transform there # are always 2^i coefficients at resolution level i. So we have to # insert 4 coefficients # mod.zerowd <- putC( zerowd, level=2, v=rnorm(4)) # # If you use accessC on mod.zerowd you would see that there were only # coefficients at resolution level 2 where you just put the coefficients. # # Now, for a time-ordered non-decimated wavelet transform object the # procedure is exactly the same EXCEPT that there are going to be # 16 coefficients at each resolution level. I.e. # # Create empty TIME-ORDERED NON-DECIMATED wavelet transform object # zerowdS <- wd(zero, type="station") # # Now insert 16 random coefficients at resolution level 2 ## mod.zerowdS <- putC(zerowdS, level=2, v=rnorm(16)) # # Once more if you use accessC on mod.zerowdS you will see that there are # only coefficients at resolution level 2. } \keyword{manip} \author{G P Nason} wavethresh/man/image.wd.rd0000644000176200001440000000155314211622540015230 0ustar liggesusers\name{image.wd} \alias{image.wd} \usage{ \method{image}{wd}(x, strut = 10, type = "D", transform = I, ...) } \arguments{ \item{x}{The \code{\link{wd.object}} that you wish to image} \item{strut}{The width of each coefficient in the image} \item{type}{Either "C" or "D" depending if you wish to image scaling function or wavelet coefficients respectively} \item{transform}{Apply a numerical transform to the coefficients before display} \item{\dots}{Other arguments} } \title{Produce image representation of nondecimated wavelet transform} \description{ Produces a representation of a nondecimated wavelet transform (time-ordered) as an image. } \details{ Description says all } \value{ None } \seealso{\code{\link{logabs}}, \code{\link{nlevelsWT}},\code{\link{wd}}} \examples{ tmp <- wd(rnorm(256), type="station") \dontrun{image(tmp)} } \author{G P Nason} \keyword{hplot} wavethresh/man/draw.default.rd0000644000176200001440000001123614211622540016114 0ustar liggesusers\name{draw.default} \alias{draw.default} \title{Draw picture of a wavelet or scaling function. } \description{ This function can produce pictures of one- or two-dimensional wavelets or scaling functions at various levels of resolution. } \usage{ \method{draw}{default}(filter.number = 10, family = "DaubLeAsymm", resolution = 8192, verbose = FALSE, plot.it = TRUE, main = "Wavelet Picture", sub = zwd$ filter$name, xlab = "x", ylab = "psi", dimension = 1, twodplot = persp, enhance = TRUE, efactor = 0.05, scaling.function = FALSE, type="l", \dots) } \arguments{ \item{filter.number }{This selects the index number of the wavelet or scaling function you want to draw (from within the wavelet family).} \item{family}{specifies the family of wavelets that you want to draw. The options are "DaubExPhase" and "DaubLeAsymm".} \item{resolution }{specifies the resolution that the wavelet or scaling function is computed to. It does not necessarily mean that you see all of these points as if the enhance option is TRUE then some function points are omitted.} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} \item{plot.it }{If TRUE then this function attempts to plot the function (i.e. draw it on a graphics device which should be active). If FALSE then this function returns the coordinates of the object that would have been plotted.} \item{main}{a main title for the plot} \item{sub}{a subtitle for the plot.} \item{xlab}{a label string for the x-axis} \item{ylab}{a label string for the y-axis} \item{dimension }{whether to make a picture of the one-dimensional wavelet or the two-dimensional wavelet.} \item{twodplot }{which function to use to produce the two-dimensional plot if dimension=2. The function you supply should accept data just like the contour or persp functions supplied with S-Plus.} \item{enhance}{If this argument is TRUE then the plot is enhanced in the following way. Many of Daubechies' compactly supported wavelets are near to zero on a reasonable proportion of their support. So if such a wavelet is plotted quite a lot of it looks to be near zero and the interesting detail seems quite small. This function chooses a nice range on which to plot the central parts of the function and the function is plotted on this range.} \item{efactor}{Variable which controls the range of plotting associated with the enhance option above. Any observations smaller than efactor times the range of the absolute function values are deemed to be too small. Then the largest range of ``non-small'' values is selected to be plotted.} \item{scaling.function }{If this argument is TRUE the scaling function is plotted otherwise the mother wavelet is plotted.} \item{type}{The \code{type} argument supplied to the plot command} \item{\dots}{other arguments you can supply to the plot routine embedded within this function.} } \details{ The algorithm underlying this function produces a picture of the wavelet or scaling function by first building a \code{wavelet decomposition} object of the correct size (i.e. \code{correct resolution}) and setting all entries equal to zero. Then one coefficient at a carefully selected resolution level is set to one and the decomposition is inverted to obtain a picture of the wavelet. } \value{ If \code{plot.it=FALSE} then usually a list containing coordinates of the object that \emph{would} have been plotted is returned. This can be useful if you don't want S-Plus to do the plotting or you wish to use the coordinates of the wavelets for other purposes.} \note{A plot is produced of the wavelet or scaling function if \code{plot.it=TRUE}.} \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{filter.select}}, \code{\link{ScalingFunction}},\code{\link{wd}}, \code{\link{wd.object}}, \code{\link{wr}}, \code{\link{wr.wd}}. } \examples{ # # First make sure that your favourite graphics device is open # otherwise S-Plus will complain. # # Let's draw a one-dimensional Daubechies least-asymmetric wavelet # N=10 # \dontrun{draw.default(filter.number=10, family="DaubLeAsymm")} # # Wow. What a great picture! # # Now how about a one-dimensional Daubechies extremal-phase scaling function # with N=2 # \dontrun{draw.default(filter.number=2, family="DaubExPhase")} # # Excellent! Now how about a two-dimensional Daubechies least-asymmetric # N=6 wavelet # # N.b. we'll also reduce the resolution down a bit. If we used the default # resolution of 8192 this would be probably too much for most computers. # \dontrun{draw.default(filter.number=6, family="DaubLeAsymm", dimension=2, res=256)} # # What a pretty picture! } \keyword{hplot} \author{G P Nason} wavethresh/man/addpkt.rd0000644000176200001440000000211714211622540015001 0ustar liggesusers\name{addpkt} \alias{addpkt} \title{Add a wavelet packet box to an already set up time-frequency plot} \usage{ addpkt(level, index, density, col, yvals) } \arguments{ \item{level}{The level at which the box or yvals are plotted} \item{index}{The packet index at which the box of yvals are plotted} \item{density}{The density of the shading of the box} \item{col}{The color of the box} \item{yvals}{If this argument is missing then a shaded coloured box is drawn, otherwise a time series of \code{yvals} is plotted where the box would have been.} } \description{ This function assumes that a high-level plot has already been set up using \code{\link{plotpkt}}. Given that this function plots a wavelet packet box at a given level, packet index and with particular shading and color and optionally plotting a sequence of coefficients at that location rather than a shaded box. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{basisplot}},\code{\link{basisplot.BP}}, \code{\link{basisplot.wp}}, \code{\link{plotpkt}}, \code{\link{plot.nvwp}}} \author{G P Nason} \keyword{dplot} wavethresh/man/draw.wp.rd0000644000176200001440000000506514211622540015121 0ustar liggesusers\name{draw.wp} \alias{draw.wp} \title{Draw wavelet packet associated with a wp object.} \description{ This function draws a wavelet packet associated with a \code{\link{wp.object}}. } \usage{ \method{draw}{wp}(wp, level, index, plot.it=TRUE, main, sub, xlab, ylab, \dots) } \arguments{ \item{wp}{The \code{\link{wp}} class object whose associated wavelet packet you wish to draw. } \item{level}{The resolution level of wavelet packet in the wavelet packet decomposition that you wish to draw (corresponds to scale).} \item{index}{The packet index of the wavelet packet in the wavelet packet decomposition that you wish to draw (corresponds to number of oscillations).} \item{plot.it}{If TRUE then the wavelet packet is plotted on the active graphics device. If FALSE then the y-coordinates of the packet are returned. Note that x-coordinates are not returned (the packet is periodic on its range anyway). } \item{main}{The main argument for the plot} \item{sub}{The subtitle for the plot} \item{xlab}{The labels for the x axis} \item{ylab}{The labels for the y axis} \item{\dots}{Additional arguments to pass to the \code{\link{drawwp.default}} function which does the drawing. In particular, arguments can be set to choose between drawing the mother wavelet and scaling function, to set the resolution of the plot, to choose between drawing one and two dimensional pictures. } } \details{ This function extracts the filter component from the \code{\link{wp}} object (which is constructed using the \code{\link{filter.select}} function) to decide which wavelet packet family to draw. Once decided the \code{\link{drawwp.default}} function is used to actually do the drawing. } \value{ If the \code{plot.it} argument is set to \code{TRUE} then nothing is returned. Otherwise, if \code{plot.it} is set to \code{FALSE} the coordinates of what would have been plotted are returned. } \note{If the \code{plot.it} argument is \code{TRUE} (which it is by default) a plot of the appropriate wavelet packet is plotted on the active graphics device.} \section{RELEASE}{Version 3.9.6 Copyright Guy Nason 1998 } \seealso{ \code{\link{filter.select}}, \code{\link{wp}}, \code{\link{wp.object}}, \code{\link{drawwp.default}}. } \examples{ # # Generate some test data # test.data <- example.1()$y \dontrun{ts.plot(test.data)} # # Now do the wavelet packet transform of the data using the Daubechies # least-asymmetric wavelet N=10 (the default arguments in # wp). # tdwp <- wp(test.data) # # What happens if we try to draw this new tdwp object? # \dontrun{draw(tdwd, level=4, index=12)} } \keyword{hplot} \author{G P Nason} wavethresh/man/mwd.rd0000644000176200001440000000620314211622540014321 0ustar liggesusers\name{mwd} \alias{mwd} \title{Discrete multiple wavelet transform (decomposition).} \description{ This function performs the discrete multiple wavelet transform (DMWT). Using an adaption of Mallat's pyramidal algorithm. The DMWT gives vector wavelet coefficients. } \usage{ mwd(data, prefilter.type = "default", filter.type = "Geronimo", bc ="periodic", verbose = FALSE) } \arguments{ \item{data}{A vector containing the data you wish to decompose. The length of this vector must be a power of 2 times the dimension of the DMWT (multiplicity of wavelets).} \item{prefilter.type}{This chooses the method of preprocessing required. The arguments will depend on filter.type, but "default" will always work.} \item{filter.type}{Specifies which multi wavelet filter to use, The options are "\code{Geronimo}" (dimension 2) or "\code{Donovan3}" (dimension 3). The latter has not been tested fully and may contain bugs. See the function \code{\link{mfilter.select}} for further details.} \item{bc}{specifies the boundary handling. If \code{bc=="periodic"} the default, then the function you decompose is assumed to be periodic on its interval of definition, if \code{bc=="symmetric"} then the function beyond its boundaries is assumed to be a symmetric reflection of the function in the boundary.} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} } \details{ The code implements Mallat's pyramid algorithm adapted for multiple wavelets using Xia, Geronimo, Hardin and Suter, 1996. The method takes a data vector of length \code{2^J*M}, and preprocesses it. This has two effects, firstly it puts the data into matrix form and then filters it so that the DMWT can operate more efficiently Most of the technical details are similar to the single wavelet transform except for the matrix algebra considerations, and the prefiltering process. See Downie and Silverman (1998) for further details and how this transform can be used in a statistical context. } \value{ An object of class \code{\link{mwd}}. } \author{ Tim Downie } \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1996)} \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd.object}}, \code{\link{mwr}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ # # Generate some test data # test.data <- example.1()$y \dontrun{ts.plot(test.data)} # # Decompose test.data with multiple wavelet transform and # plot the wavelet coefficients # tdmwd <- mwd(test.data) \dontrun{plot(tdmwd)} #[1] 1.851894 1.851894 1.851894 1.851894 1.851894 1.851894 1.851894 # # You should see a plot with wavelet coefficients like in #\code{\link{plot.wd}} but at each coefficient position # there are two coefficients in two different colours one for each of # the wavelets at that position. # # Note the scale for each level is returned by the function. } \keyword{math} wavethresh/man/accessD.wst.rd0000644000176200001440000000411014211622540015706 0ustar liggesusers\name{accessD.wst} \alias{accessD.wst} \title{Get mother wavelet coefficients from a packet ordered non-decimated wavelet object (wst).} \description{ The mother wavelet coefficients from a packet ordered non-decimated wavelet object, \code{\link{wst}}, are stored in a matrix. This function extracts all the coefficients corresponding to a particular resolution level. } \usage{ \method{accessD}{wst}(wst, level, aspect = "Identity", \dots) } \arguments{ \item{wst}{Packet ordered non-decimated wavelet object from which you wish to extract the mother wavelet coefficients.} \item{level}{The level that you wish to extract. This can range from zero (the coarsest coefficients) to nlevelsWT(wstobj) which returns the original data. } \item{aspect}{Function to apply to coefficient before return. Supplied as a character argument which gets converted to a function. For example, "Mod" which returns the absolute value of complex-valued coefficients.} \item{\dots}{Other arguments} } \details{ The \code{\link{wst}} function performs a packet-ordered non-decimated wavelet transform. This function extracts all the mother wavelet coefficients at a particular resolution level specified by \code{level}. Note that coefficients returned by this function are in \emph{packet order}. They can be used \emph{as is} but for many applications it might be more useful to deal with the coefficients in packets: see the function \code{\link{getpacket.wst}} for further details. Note that all the coefficients here are those of mother wavelets. The non-decimated transform efficiently computes all possible shifts of the discrete wavelet transform computed by \code{\link{wd}}. } \value{ A vector of the extracted coefficients. } \references{ Nason, G.P. and Silverman, B.W. The stationary wavelet transform and some statistical applications. } \seealso{ \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{accessD}}, \code{\link{getpacket.wst}} } \examples{ # # Get the 4th level of mother wavelet coefficients from a decomposition # dat <- rnorm(128) accessD(wst(dat), level=4) } \keyword{manip} \author{G P Nason} wavethresh/man/first.last.dh.rd0000644000176200001440000000214614211622540016217 0ustar liggesusers\name{first.last.dh} \alias{first.last.dh} \usage{ first.last.dh(LengthH, DataLength, type = "wavelet", bc = "periodic", firstk = c(0, DataLength - 1)) } \arguments{ \item{LengthH}{The length of the smoothing (C) filter} \item{DataLength}{The length of the data that you wish to transform} \item{type}{The type of wavelet transform, \code{wavelet} or \code{station} for decimated and nondecimated transforms respectively.} \item{bc}{Boundary conditions, \code{periodic} or \code{symmetric}} \item{firstk}{The first k index, leave as default} } \title{Build special first/last database for some wavelet density functions} \description{ This function builds a special first/last database for some of the wavelet density estimation functions written by David Herrick and described in his PhD thesis. See \code{\link{first.last}} to see what this kind of function does. } \details{ Description says all. } \value{ A list with several components in exactly the same format as for \code{\link{first.last}}. } \seealso{\code{\link{dencvwd}},\code{\link{first.last}},\code{\link{wd.dh}}} \author{David Herrick} \keyword{manip} wavethresh/man/mpostfilter.rd0000644000176200001440000000263314211622540016105 0ustar liggesusers\name{mpostfilter} \alias{mpostfilter} \title{Multiwavelet postfilter} \usage{ mpostfilter(C, prefilter.type, filter.type, nphi, npsi, ndecim, nlevels, verbose = FALSE) } \arguments{ \item{C}{The multivariate sequence you wish to turn back into a univariate one using the inverse of an earlier prefilter operation.} \item{prefilter.type}{Controls the type of prefilter (see Tim Downie's PhD thesis, or references therein. Types include \code{Minimal}, \code{Identity}, \code{Repeat}, \code{Interp}, \code{default}, \code{Xia}, \code{Roach1}, \code{Roach3}, \code{Donovan3} or \code{Linear}} \item{filter.type}{The type of multiwavelet: can be \code{Geronimo} or \code{Donovan3}} \item{nphi}{The number of father wavelets in the system} \item{npsi}{The number of mother wavelets in the system} \item{ndecim}{The ndecim parameter (not apparently used here)} \item{nlevels}{The number of levels in the multiwavelet transform} \item{verbose}{If TRUE then informative messages are printed as the function progresses} } \description{ A multiwavelet postfilter turns a multivariate sequence into a univariate sequence. As such, the postfilter is used on the inverse transform, it is the inverse of an earlier used prefilter. Not intended for direct user use. } \details{ Description says all } \value{ The appropriate postfiltered data. } \seealso{\code{\link{mprefilter}},\code{\link{mwd}}} \author{Tim Downie} \keyword{math} wavethresh/man/checkmyews.rd0000644000176200001440000000121714211622540015674 0ustar liggesusers\name{checkmyews} \alias{checkmyews} \usage{ checkmyews(spec, nsim=10) } \arguments{ \item{spec}{The LSW spectrum} \item{nsim}{The number of realizations} } \title{Check a LSW spectrum through repeated simulation and empirical averages} \description{ Given a LSW spectrum this function simulates \code{nsim} realizations, estimates the spectrum, and then averages the results. The large sample averages should converge to the original spectrum. } \value{A LSW spectrum obtained as the average of \code{nsim} simulations from the \code{spec} spectrum.} \seealso{\code{\link{cns}},\code{\link{LSWsim}}, \code{\link{ewspec}}} \keyword{ts} \author{G P Nason} wavethresh/man/lennon.rd0000644000176200001440000000125314211622540015023 0ustar liggesusers\name{lennon} \docType{data} \alias{lennon} \title{John Lennon image.} \description{ A 256x256 matrix. Each entry of the matrix contains an image intensity value. The whole matrix represents an image of John Lennon } \usage{ data(lennon) } \format{ A 256x256 matrix. Each entry of the matrix contains an image intensity value. The whole matrix represents an image of John Lennon } \source{ The John Lennon image was supplied uncredited on certain UNIX workstations as an examples image. I am not sure who the Copyright belongs to. Please let me know if you know } \examples{ # # This command produces the image seen above. # # image(lennon) # } \keyword{datasets} \author{G P Nason} wavethresh/man/print.wst2D.rd0000644000176200001440000000303314211622540015666 0ustar liggesusers\name{print.wst2D} \alias{print.wst2D} \title{Print out information about an wst2d object in a readable form.} \description{ This function prints out information about an \code{\link{wst2D.object}} in a nice human- readable form. Note that this function is automatically called by SPlus whenever the name of an \code{\link{wst2D.object}} is typed or whenever such an object is returned to the top level of the S interpreter } \usage{ \method{print}{wst2D}(x, ...) } \arguments{ \item{x}{An object of class \code{\link{wst2D}} that you wish to print out.} \item{\dots}{This argument actually does nothing in this function!} } \details{ Prints out information about \code{\link{wst2D}} objects in nice readable format. } \value{ The last thing this function does is call \code{\link{summary.wst2D}} so the return value is whatever is returned by this function.} \section{RELEASE}{Version 3.9.6 Copyright Guy Nason 1998 } \seealso{ \code{\link{wst2D.object}}, \code{\link{summary.wst2D}}. } \examples{ # # This examples uses the uawst2D object created in the EXAMPLES # section of the help page for wst2D # #uawst2D #Class 'wst2D' : 2D Stationary Wavelet Transform Object: # ~~~~~ : List with 4 components with names # wst2D nlevelsWT filter date # #$wst2D is the coefficient array # #Created on : Fri Nov 5 18:06:17 GMT 1999 # #summary(.): #---------- #Levels: 8 #Length of original: 256 x 256 #Filter was: Daub cmpct on least asymm N=10 #Date: Fri Nov 5 18:06:17 GMT 1999 } \keyword{utilities} \author{G P Nason} wavethresh/man/plotpkt.rd0000644000176200001440000000136714211622540015235 0ustar liggesusers\name{plotpkt} \alias{plotpkt} \title{Sets up a high level plot ready to show the time-frequency plane and wavelet packet basis slots} \usage{ plotpkt(J) } \arguments{ \item{J}{The number of resolution levels associated with the wavelet packet object you want to depict} } \description{ Sets up a high level plot ready to add wavelet packet slots using, e.g. \code{\link{addpkt}}. This function is used by several routines to begin plotting graphical representations of the time-frequency plane and spaces for packets. } \details{ Description says all } \value{ Nothing of interest } \seealso{\code{\link{addpkt}}, \code{\link{basisplot}}, \code{\link{basisplot.BP}}, \code{\link{basisplot.wp}}, \code{\link{plot.nvwp}}} \author{G P Nason} \keyword{dplot} wavethresh/man/draw.wd.rd0000644000176200001440000000360714211622540015105 0ustar liggesusers\name{draw.wd} \alias{draw.wd} \title{Draw mother wavelet or scaling function associated with wd object. } \description{ This function draws the mother wavelet or scaling function associated with a \code{\link{wd.object}}. } \usage{ \method{draw}{wd}(wd, ...) } \arguments{ \item{wd}{The \code{\link{wd}} class object whose associated wavelet or scaling function you wish to draw. } \item{\dots}{Additional arguments to pass to the \code{\link{draw.default}} function which does the drawing. In particular, arguments can be set to choose between drawing the mother wavelet and scaling function, to set the resolution of the plot, to choose between drawing one and two dimensional pictures.} } \details{ This function extracts the filter component from the \code{\link{wd}} object (which is constructed using the \code{\link{filter.select}} function) to decide which wavelet to draw. Once decided the \code{\link{draw.default}} function is used to actually do the drawing. } \value{ If the \code{plot.it} argument is set to TRUE then nothing is returned. Otherwise, as with \code{\link{draw.default}}, the coordinates of what would have been plotted are returned. } \note{If the \code{plot.it} argument is \code{TRUE} (which it is by default) a plot of the mother wavelet or scaling function is plotted on the active graphics device.} \section{RELEASE}{Version 2 Copyright Guy Nason 1993 } \seealso{ \code{\link{filter.select}}, \code{\link{wd.object}}, \code{\link{draw.default}}. } \examples{ # # Generate some test data # test.data <- example.1()$y \dontrun{ts.plot(test.data)} # # Now do the discrete wavelet transform of the data using the Daubechies # least-asymmetric wavelet N=10 (the default arguments in # wd). # tdwd <- wd(test.data) # # What happens if we try to draw this new tdwd object? # \dontrun{draw(tdwd)} # # We get a picture of the wavelet that did the transform # } \keyword{hplot} \author{G P Nason} wavethresh/man/ewspec.rd0000644000176200001440000002070714211622540015025 0ustar liggesusers\name{ewspec} \alias{ewspec} \title{Compute evolutionary wavelet spectrum estimate.} \description{ This function computes the evolutionary wavelet spectrum (EWS) estimate from a time series (or non-decimated wavelet transform of a time series). The estimate is computed by taking the non-decimated wavelet transform of the time series data, taking its modulus; smoothing using TI-wavelet shrinkage and then correction for the redundancy caused by use of the non-decimated wavelet transform. Options below beginning with smooth. are passed directly to the TI-wavelet shrinkage routines. } \usage{ ewspec(x, filter.number = 10, family = "DaubLeAsymm", UseLocalSpec = TRUE, DoSWT = TRUE, WPsmooth = TRUE, verbose = FALSE, smooth.filter.number = 10, smooth.family = "DaubLeAsymm", smooth.levels = 3:(nlevelsWT(WPwst) - 1), smooth.dev = madmad, smooth.policy = "LSuniversal", smooth.value = 0, smooth.by.level = FALSE, smooth.type = "soft", smooth.verbose = FALSE, smooth.cvtol = 0.01, smooth.cvnorm = l2norm, smooth.transform = I, smooth.inverse = I) } \arguments{ \item{x}{The time series that you want to analyze. (See DETAILS below on how to supply preprocessed versions of the time series which bypass early parts of the ewspec function). } \item{filter.number }{This selects the index of the wavelet used in the analysis of the time series (i.e. the wavelet basis functions used to model the time series). For Daubechies compactly supported wavelets the filter number is the number of vanishing moments. } \item{family }{This selects the wavelet family to use in the analysis of the time series (i.e. which wavelet family to use to model the time series). Only use the Daubechies compactly supported wavelets \code{DaubExPhase} and \code{DaubLeAsymm}. } \item{UseLocalSpec }{If you input a time series for \code{x} then this argument should always be \code{T}. (However, you can precompute the modulus of the non-decimated wavelet transform yourself and supply it as \code{x} in which case the \code{\link{LocalSpec}} call within this function is not necessary and you can set UseLocalSpec equal to \code{F}). } \item{DoSWT }{If you input a time series for \code{x} then this argument should always be \code{T}. (However, you can precompute the non-decimated wavelet transform yourself and supply it as \code{x} in which case the \code{wd} call within the function will not be necessary and you can set DoSWT equal to \code{F}). } \item{WPsmooth}{Normally a wavelet periodogram is smoothed before it is corrected. Use \code{WPsmooth=F} is you do not want any wavelet periodogram smoothing (correction is still done). } \item{verbose}{If this option is \code{T} then informative messages are printed as the function progresses. } \item{smooth.filter.number }{This selects the index number of the wavelet that smooths each scale of the wavelet periodogram. See \code{\link{filter.select}} for further details on which wavelets you can use. Generally speaking it is a good idea to use a smoother wavelet for smoothing than the one you used for analysis (above) but since one still wants local smoothing it is best not to use a wavelet that is much smoother. } \item{smooth.family}{This selects the wavelet family that smooths each scale of the wavelet periodogram. See \code{filter.select} for further details on which wavelets you can use. There is no need to use the same family as you used to analyse the time series. } \item{smooth.levels }{The levels to smooth when performing the TI-wavelet shrinkage smoothing. } \item{smooth.dev }{The method for estimating the variance of the empirical wavelet coefficients for smoothing purposes. } \item{smooth.policy }{The recipe for smoothing: determines how the threshold is chosen. See \code{\link{threshold}} for TI-smoothing and choice of potential policies. For EWS estimation \code{LSuniversal} is recommended for thi Chi-squared nature of the periodogram coefficients. However, if the coefficients are transformed (using \code{smooth.transform} and \code{smooth.inverse}) then other, more standard, policies may be appropriate. } \item{smooth.value }{When a manual policy is being used this argument is used to supply a threshold value. See \code{threshold} for more information. } \item{smooth.by.level }{If \code{TRUE} then the wavelet shrinkage is performed by computing and applying a separate threshold to each level in the non-decimated wavelet transform of each scale. Note that each scale in the EWS is smoothed separately and independently: and each smooth consists of taking the (second-stage) non-decimated wavelet transform and applying a threshold to each level of a wavelet transformed scale. If \code{FALSE} then the same threshold is applied to the non-decimated wavelet transform of a scale. Different thresholds may be computed for different scales (in the time series model) but the threshold will be the same for each level arising from the non-decimated transform of a scale. Note: a \code{scale} refers to a set of coefficients coming from a particular scale of the non-decimated wavelet transform of the time series data that \code{models} the time series. A \code{level} refers to the levels of wavelet coefficients obtained from taking the non-decimated wavelet transform of a particular scale.} \item{smooth.type }{The type of shrinkage: either "hard" or "soft". } \item{smooth.verbose }{If \code{T} then informative messages concerning the TI-transform wavelet shrinkage are printed.} \item{smooth.cvtol }{If cross-validated wavelet shrinkage (\code{smooth.policy="cv"}) is used then this argument supplies the cross-validation tolerance. } \item{smooth.cvnorm}{no description for object} \item{smooth.transform }{The transform function to use to transform the wavelet periodogram estimate. The wavelet periodogram coefficients are typically chi-squared in nature, a \code{log} transform can pull the coefficients towards normality so that a \code{smooth.policy} for Gaussian data could be used (e.g. \code{universal}). } \item{smooth.inverse}{the inverse transform of \code{smooth.transform}. } } \details{ This function computes an estimate of the evolutionary wavelet spectrum of a time series according to the paper by Nason, von Sachs and Kroisandt. The function works as follows: \describe{ \item{1}{The non-decimated wavelet transform of the series is computed.} \item{2}{The squared modulus of the non-decimated wavelet transform is computed (this is the raw wavelet periodogram, which is returned).} \item{3}{The squared modulus is smoothed using TI-wavelet shrinkage.} \item{4}{The smoothed coefficients are corrected using the inverse of the inner product matrix of the discrete non-decimated autocorrelation wavelets (produced using the ipndacw function).} } To display the EWS use the \code{plot}function on the \code{S} component, see the examples below. It is possible to supply the non-decimated wavelet transform of the time series and set \code{DoSWT=F} or to supply the squared modulus of the non-decimated wavelet transform using \code{\link{LocalSpec}} and setting \code{UseLocalSpec=F}. This facility saves time because the function is then only used for smoothing and correction. } \value{ A list with the following components: \item{S}{The evolutionary wavelet spectral estimate of the input \code{x}. This object is of class \code{\link{wd}} and so can be plotted, printed in the usual way. } \item{WavPer}{The raw wavelet periodogram of the input \code{x}. The EWS estimate (above) is the smoothed corrected version of the wavelet periodgram. The wavelet periodogram is of class \code{\link{wd}} and so can be plotted, printed in the usual way. } \item{rm}{This is the matrix A from the paper by Nason, von Sachs and Kroisandt. Its inverse is used to correct the raw wavelet periodogram. This matrix is computed using the \code{\link{ipndacw}} function. } \item{irm}{The inverse of the matrix A from the paper by Nason, von Sachs and Kroisandt. It is used to correct the raw wavelet periodogram.} } \references{Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{Technical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{Baby Data}, \code{\link{filter.select}}, \code{\link{ipndacw}}, \code{\link{LocalSpec}}, \code{\link{threshold}} \code{\link{wd}} \code{\link{wd.object}} } \examples{ # # Apply the EWS estimate function to the baby data # } \keyword{manip} \author{G P Nason} wavethresh/man/WaveletCV.rd0000644000176200001440000000727714211622634015412 0ustar liggesusers\name{WaveletCV} \alias{WaveletCV} \title{Wavelet cross-validation} \usage{ WaveletCV(ynoise, x = 1:length(ynoise), filter.number = 10, family = "DaubLeAsymm", thresh.type = "soft", tol = 0.01, verbose = 0, plot.it = TRUE, ll=3) } \arguments{ \item{ynoise}{A vector of dyadic (power of two) length that contains the noisy data that you wish to apply wavelet shrinkage by cross-validation to.} \item{x}{This function is capable of producing informative plots. It can be useful to supply the x values corresponding to the \code{ynoise} values. Further this argument is returned by this function which can be useful for later processors.} \item{filter.number}{This selects the smoothness of wavelet that you want to perform wavelet shrinkage by cross-validation.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{thresh.type}{this option specifies the thresholding type which can be "hard" or "soft".} \item{tol}{this specifies the convergence tolerance for the cross-validation optimization routine (a golden section search).} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} \item{plot.it}{If this is TRUE then plots of the universal threshold (used to obtain an upper bound on the cross-validation threshold) reconstruction and the resulting cross-validation estimate are produced.} \item{ll}{The primary resolution that you wish to assume. No wavelet coefficients that are on coarser scales than ll will be thresholded.} } \description{ Two-fold wavelet shrinkage cross-validation (there is a faster C based version \code{\link{CWCV}}.) } \details{ \bold{Note:} a faster C based implementation of this function called \code{\link{CWCV}} is available. It takes the same arguments (although it has one extra minor argument) and returns the same values. Compute the two-fold cross-validated wavelet shrunk estimate given the noisy data ynoise according to the description given in Nason, 1996. You must specify a primary resolution given by \code{ll}. This must be specified individually on each data set and can itself be estimated using cross-validation (although I haven't written the code to do this). Note. The two-fold cross-validation method performs very badly if the input data is correlated. In this case I would advise using other methods. } \value{ A list with the following components \item{x}{This is just the x that was input. It gets passed through more or less for convenience for the user.} \item{ynoise}{A copy of the input ynoise noisy data.} \item{xvwr}{The cross-validated wavelet shrunk estimate.} \item{yuvtwr}{The universal thresholded version (note this is merely a starting point for the cross-validation algorithm. It should not be taken seriously as an estimate. In particular its estimate of variance is likely to be inflated.)} \item{xvthresh}{The cross-validated threshold} \item{uvthresh}{The universal threshold (again, don't take this value too seriously. You might get better performance using the threshold function directly with specialist options.} \item{xvdof}{The number of non-zero coefficients in the cross-validated shrunk wavelet object (which is not returned).} \item{uvdof}{The number of non-zero coefficients in the universal threshold shrunk wavelet object (which also is not returned)} \item{xkeep}{always returns NULL!} \item{fkeep}{always returns NULL!} } \seealso{\code{\link{CWCV}},\code{\link{Crsswav}},\code{\link{rsswav}},\code{\link{threshold.wd}}} \examples{ # # This function is best used via the policy="cv" option in # the threshold.wd function. # See examples there. # } \author{G P Nason} \keyword{smooth} wavethresh/man/wp.rd0000644000176200001440000000347114211622634014170 0ustar liggesusers\name{wp} \alias{wp} \title{Wavelet packet transform. } \description{ This function computes a wavelet packet transform (computed by the complete binary application of the DH and DG packet operators, as opposed to the Mallat discrete wavelet transform which only recurses on the DH operator [low pass]). } \usage{ wp(data, filter.number=10, family="DaubLeAsymm", verbose=FALSE) } \arguments{ \item{data}{A vector containing the data you wish to decompose. The length of this vector must be a power of 2.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{verbose}{if \code{TRUE} then (un)helpful messages are printed during the execution.} } \details{ The paper by Nason, Sapatinas and Sawczenko, 1998 details this implementation of the wavelet packet transform. A more thorough reference is Wickerhauser, 1994. } \value{ An object of class \code{\link{wp}} which contains the (decimated) wavelet packet coefficients. } \section{RELEASE}{Version 3.0 Copyright Guy Nason 1994 } \seealso{ \code{\link{accessC.wp}}, \code{\link{accessD.wp}}, \code{\link{basisplot.wp}}, \code{\link{draw.wp}},\code{\link{drawwp.default}}, \code{\link{filter.select}}, \code{\link{getpacket.wp}}, \code{\link{InvBasis.wp}}, \code{\link{MaNoVe.wp}}, \code{\link{plot.wp}}, \code{\link{print.wp}}, \code{\link{putC.wp}}, \code{\link{putD.wp}}, \code{\link{putpacket.wp}}, \code{\link{summary.wp}}, \code{\link{threshold.wp}}, \code{\link{wp.object}}. } \examples{ v <- rnorm(128) vwp <- wp(v) } \keyword{math} \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/InvBasis.wst.rd0000644000176200001440000000357114211622540016071 0ustar liggesusers\name{InvBasis.wst} \alias{InvBasis.wst} \title{Invert a wst library representation with a basis specification} \usage{ \method{InvBasis}{wst}(wst, nv, \dots) } \arguments{ \item{wst}{The wst object that you wish to invert} \item{nv}{The node vector, basis spec, that you want to pick out} \item{...}{Other arguments, that don't do anything here} } \description{ Inverts a wst basis representation with a given basis specification, for example an output from the \code{\link{MaNoVe}} function. } \details{ Objects arising from a \code{\link{wst.object}} specification are a representation of a signal with respect to a library of basis functions. A particular basis specification can be obtained using the \code{\link{numtonv}} function which can pick an indexed basis function, or \code{\link{MaNoVe.wst}} which uses the Coifman-Wickerhauser minimum entropy method to select a basis. This function takes a \code{\link{wst.object}} and a particular basis description (in a \code{\link{nv.object}} node vector object) and inverts the representation with respect to that selected basis. } \value{ The inverted reconstruction } \seealso{\code{\link{numtonv}},\code{\link{nv.object}},\code{\link{MaNoVe.wst}},\code{\link{threshold.wst}},\code{\link{wst}}} \examples{ # # Let's generate a noisy signal # x <- example.1()$y + rnorm(512, sd=0.2) # # You can plot this if you like # \dontrun{ts.plot(x)} # # Now take the nondecimated wavelet transform # xwst <- wst(x) # # Threshold it # xwstT <- threshold(xwst) # # You can plot this too if you like # \dontrun{plot(xwstT)} # # Now use Coifman-Wickerhauser to get a "good" basis # xwstTNV <- MaNoVe(xwstT) # # Now invert the thresholded wst using this basis specification # xTwr <- InvBasis(xwstT, xwstTNV) # # And plot the result, and superimpose the truth in dotted # \dontrun{ts.plot(xTwr)} \dontrun{lines(example.1()$y, lty=2)} } \author{G P Nason} \keyword{smooth} wavethresh/man/BabySS.rd0000644000176200001440000000514214211622540014656 0ustar liggesusers\name{BabySS} \docType{data} \alias{BabySS} \title{Physiological data time series.} \description{ Two linked medical time series containing 2048 observations sampled every 16 seconds recorded from 21:17:59 to 06:27:18. Both these time series were recorded from the same 66 day old infant by Prof. Peter Fleming, Dr Andrew Sawczenko and Jeanine Young of the Institute of Child Health, Royal Hospital for Sick Children, Bristol. \code{BabyECG}, is a record of the infant's heart rate (in beats per minute). \code{BabySS} is a record of the infant's sleep state on a scale of 1 to 4 as determined by a trained expert monitoring EEG (brain) and EOG (eye-movement). The sleep state codes are 1=quiet sleep, 2=between quiet and active sleep, 3=active sleep, 4=awake. } \format{ The \code{BabyECG} time series is a nice examples of a non-stationary time series whose spectral (time-scale) properties vary over time. The function \code{\link{ewspec}} can be used to anaylse this time series to inspect the variation in the power of the series over time and scales. The \code{BabySS} time series is a useful independent time series that can be associated with changing power in the \code{BabyECG} series. See the discussion in Nason, von Sachs and Kroisandt. } \source{Institute of Child Health, Royal Hospital for Sick Children, Bristol.} \section{RELEASE}{ Version 3.9 Copyright Guy Nason 1998 } \references{ Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{Technical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern. } \section{SEE ALSO}{ \code{\link{ewspec}} } \examples{ data(BabyECG) data(BabySS) # # Plot the BabyECG data with BabySS overlaid # # Note the following code does some clever scaling to get the two # time series overlaid. # myhrs <- c(22, 23, 24, 25, 26, 27, 28, 29, 30) mylab <- c("22", "23", "00", "01", "02", "03", "04", "05", "06") initsecs <- 59 + 60 * (17 + 60 * 21) mysecs <- (myhrs * 3600) secsat <- (mysecs - initsecs)/16 mxy <- max(BabyECG) mny <- min(BabyECG) ro <- range(BabySS) no <- ((mxy - mny) * (BabySS - ro[1]))/(ro[2] - ro[1]) + mny rc <- 0:4 nc <- ((mxy - mny) * (rc - ro[1]))/(ro[2] - ro[1]) + mny \dontrun{plot(1:length(BabyECG), BabyECG, xaxt = "n", type = "l", xlab = "Time (hours)", ylab = "Heart rate (beats per minute)")} \dontrun{lines(1:length(BabyECG), no, lty = 3)} \dontrun{axis(1, at = secsat, labels = mylab)} \dontrun{axis(4, at = nc, labels = as.character(rc))} # # Sleep state is the right hand axis # # } \keyword{datasets} \author{G P Nason} wavethresh/man/rfftinv.rd0000644000176200001440000000066114211622634015216 0ustar liggesusers\name{rfftinv} \alias{rfftinv} \title{Inverse real FFT, inverse of rfft} \usage{ rfftinv(rz, n = length(rz)) } \arguments{ \item{rz}{The Fourier coefficients to invert} \item{n}{The number of coefficients} } \description{ Inverse function of \code{\link{rfft}} } \details{ Just the inverse function of \code{\link{rfft}}. } \value{ The inverse FT of the input } \seealso{\code{\link{rfft}}} \author{Bernard Silverman} \keyword{math} wavethresh/man/print.nv.rd0000644000176200001440000000406014211622540015307 0ustar liggesusers\name{print.nv} \alias{print.nv} \title{Print a node vector object, also used by several other functions to obtain packet list information} \usage{ \method{print}{nv}(x, printing = TRUE, verbose = FALSE, ...) } \arguments{ \item{x}{The \code{\link{nv.object}} that you wish to print} \item{printing}{If FALSE then nothing is printed. This argument is here because the results of the printing are also useful to many other routines where you want the results but are not bothered by actually seeing the results} \item{verbose}{Not actually used} \item{\dots}{Other arguments} } \description{ Ostensibly prints out node vector information, but also produces packet indexing information for several functions. } \details{ A node vector contains selected basis information, but this is stored as a tree object. Hence, it is not immediately obvious which basis elements have been stored. This function produces a list of the packets at each resolution level that have been selected in the basis. This information is so useful to other functions that the function is used even when printing is not the primary objective. } \value{ A list containing two components: \code{indexlist} and \code{rvector}. The former is a list of packets that were selected at each resolution level. Rvector encodes a list of "rotate/non-rotate" instructions in binary. At each selected packet level a decision has to be made whether to select the LH or RH basis element, and this information is stored in \code{rvector}. } \seealso{ \code{\link{InvBasis.wst}}, \code{\link{nv.object}}, \code{\link{plot.wp}}} \examples{ v <- rnorm(128) vwst <- wst(v) vnv <- MaNoVe(vwst) print(vnv) #Level : 6 Action is R (getpacket Index: 1 ) #Level : 5 Action is L (getpacket Index: 2 ) #Level : 4 Action is L (getpacket Index: 4 ) #Level : 3 Action is R (getpacket Index: 9 ) #Level : 2 Action is L (getpacket Index: 18 ) #There are 6 reconstruction steps # # The L or R indicate whether to move to the left or the right basis function # when descending the node tree # # } \author{G P Nason} \keyword{print} wavethresh/man/uncompress.rd0000644000176200001440000000154714211622634015742 0ustar liggesusers\name{uncompress} \alias{uncompress} \title{Uncompress objects} \description{ Uncompress objects. This function is generic. Particular methods exist. For the \code{\link{imwdc.object}} class object this generic function uses \code{\link{uncompress.imwdc}}. There is a default uncompression method: \code{\link{uncompress.default}} that works on vectors. } \usage{ uncompress(\dots) } \arguments{ \item{\dots}{See individual help pages for details. } } \details{ See individual method help pages for operation and examples } \value{ A uncompressed version of the input. } \section{RELEASE}{Version 2.0 Copyright Guy Nason 1993} \seealso{ \code{\link{uncompress.default}}, \code{\link{uncompress.imwdc}}, \code{\link{imwd}}, \code{\link{imwd.object}}, \code{\link{imwdc.object}}, \code{\link{threshold.imwd}} } \keyword{manip} \keyword{utilities} \author{G P Nason} wavethresh/man/putD.rd0000644000176200001440000000206414211622634014453 0ustar liggesusers\name{putD} \alias{putD} \title{Put mother wavelet coefficients into wavelet structure} \description{ This generic function inserts smooths into various types of wavelet objects. This function is generic. Particular methods exist. For objects of class: \describe{ \item{wd}{use the \code{\link{putD.wd}} method.} \item{wp}{use the \code{\link{putD.wp}} method. } \item{wst}{use the \code{\link{putD.wst}} method.} } See individual method help pages for operation and examples. See \code{\link{accessD}} if you wish to \emph{extract} mother wavelet coefficients. See \code{\link{putC}} if you wish to insert \emph{father} wavelet coefficients. } \usage{ putD(\dots) } \arguments{ \item{\dots}{See individual help pages for details.} } \value{ A wavelet object of the same class as \code{x} with the new mother wavelet coefficients inserted. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{putD.wd}}, \code{\link{putD.wp}}, \code{\link{putD.wst}}, \code{\link{accessD}}, \code{\link{putC}}. } \keyword{manip} \author{G P Nason} wavethresh/man/print.wpstCL.rd0000644000176200001440000000233614211622540016104 0ustar liggesusers\name{print.wpstCL} \alias{print.wpstCL} \title{Prints some information about a wpstCL object} \usage{ \method{print}{wpstCL}(x, \dots) } \arguments{ \item{x}{wpstCL object to print info about} \item{\dots}{Other arguments} } \description{ Prints basic information about a wpstCL object } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{makewpstDO}},\code{\link{wpstCLASS}}} \examples{ # # Use BabySS and BabyECG data for this example. # # Want to predict future values of BabySS from future values of BabyECG # # Build model on first 256 values of both # # See example in makewpstDO from which this one originates # data(BabyECG) data(BabySS) BabyModel <- makewpstDO(timeseries=BabyECG[1:256], groups=BabySS[1:256], mincor=0.5) # # Now, suppose we get some new data for the BabyECG time series. # For the purposes of this example, this is just the continuing example # ie BabyECG[257:512]. We can use our new discriminant model to predict # new values of BabySS # BabySSpred <- wpstCLASS(newTS=BabyECG[257:512], BabyModel) # BabySSpred #wpstCL class object #Results of applying discriminator to time series #Components: BasisMatrix BasisMatrixDM wpstDO PredictedOP PredictedGroups } \author{G P Nason} \keyword{print} wavethresh/man/putpacket.wp.rd0000644000176200001440000000756714211622634016201 0ustar liggesusers\name{putpacket.wp} \alias{putpacket.wp} \title{Inserts a packet of coefficients into a wavelet packet object (wp).} \description{ This function inserts a packet of coefficients into a wavelet packet (\code{\link{wp}}) object. } \usage{ \method{putpacket}{wp}(wp, level, index, packet , \dots) } \arguments{ \item{wp}{Wavelet packet object into which you wish to put the packet.} \item{level}{The resolution level of the coefficients that you wish to insert.} \item{index}{The index number within the resolution level of the packet of coefficients that you wish to insert.} \item{packet}{a vector of coefficients which is the packet you wish to insert.} \item{\dots}{any other arguments} } \details{ The coefficients in this structure can be organised into a binary tree with each node in the tree containing a packet of coefficients. Each packet of coefficients is obtained by chaining together the effect of the two \emph{packet operators} DG and DH: these are the high and low pass quadrature mirror filters of the Mallat pyramid algorithm scheme followed by decimation (see Mallat (1989b)). Starting with data \eqn{c^J} at resolution level J containing \eqn{2^J} data points the wavelet packet algorithm operates as follows. First DG and DH are applied to \eqn{c^J} producing \eqn{d^{J-1}} and \eqn{c^{J-1}} respectively. Each of these sets of coefficients is of length one half of the original data: i.e. \eqn{2^{J-1}}. Each of these sets of coefficients is a set of \emph{wavelet packet coefficients}. The algorithm then applies both DG and DH to both \eqn{d^{J-1}} and \eqn{c^{J-1}} to form a four sets of coefficients at level J-2. Both operators are used again on the four sets to produce 8 sets, then again on the 8 sets to form 16 sets and so on. At level j=J,...,0 there are \eqn{2^{J-j}} packets of coefficients each containing \eqn{2^j} coefficients. This function enables whole packets of coefficients to be inserted at any resolution level. The \code{index} argument chooses a particular packet within each level and thus ranges from 0 (which always refer to the father wavelet coefficients), 1 (which always refer to the mother wavelet coefficients) up to \eqn{2^{J-j}}. } \value{ An object of class \code{\link{wp.object}} which is the same as the input \code{\link{wp.object}} except it now has a modified packet of coefficients. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{wp}}, \code{\link{getpacket.wp}}. \code{\link{putpacket}}. } \examples{ # # Take the wavelet packet transform of some random data # MyWP <- wp(rnorm(1:512)) # # The above data set was 2^9 in length. Therefore there are # coefficients at resolution levels 0, 1, 2, ..., and 8. # # The high resolution coefficients are at level 8. # There should be 256 DG coefficients and 256 DH coefficients # length(getpacket(MyWP, level=8, index=0)) # [1] 256 length(getpacket(MyWP, level=8, index=1)) # [1] 256 # # The next command shows that there are only two packets at level 8 # #getpacket(MyWP, level=8, index=2) # Index was too high, maximum for this level is 1 # Error in getpacket.wp(MyWP, level = 8, index = 2): Error occured # Dumped # # There should be 4 coefficients at resolution level 2 # # The father wavelet coefficients are (index=0) getpacket(MyWP, level=2, index=0) # [1] -0.9736576 0.5579501 0.3100629 -0.3834068 # # The mother wavelet coefficients are (index=1) # getpacket(MyWP, level=2, index=1) # [1] 0.72871405 0.04356728 -0.43175307 1.77291483 # # Well, that exercised the getpacket.wp # function. Now that we know that level 2 coefficients have 4 coefficients # let's insert some into the MyWP object. # MyWP <- putpacket(MyWP, level=2, index=0, packet=c(21,32,67,89)) # # O.k. that was painless. Now let's check that the correct coefficients # were inserted. # getpacket(MyWP, level=2, index=0) #[1] 21 32 67 89 # # Yep. The correct coefficients were inserted. } \keyword{manip} \author{G P Nason} wavethresh/man/imwr.imwdc.rd0000644000176200001440000000406214211622540015613 0ustar liggesusers\name{imwr.imwdc} \alias{imwr.imwdc} \title{Inverse two-dimensional discrete wavelet transform. } \description{ Inverse two-dimensional discrete wavelet transform. } \usage{ \method{imwr}{imwdc}(imwd, verbose=FALSE, \dots) } \arguments{ \item{imwd}{An object of class \code{imwdc}. This type of object is returned by \code{\link{threshold.imwd}} and is a \code{\link{compress.imwd}} compressed version of an \code{\link{imwd}} object.} \item{verbose}{If this argument is true then informative messages are printed detailing the computations to be performed} \item{\dots}{other arguments to supply to the \code{\link{imwr}} function which is called after uncompressing the imwdc object.}} \details{ This function merely uncompresses the supplied \code{\link{imwdc.object}} and passes the resultant \code{\link{imwd}} object to the \code{\link{imwr.imwd}} function. This function is a method for the generic function \code{\link{imwr}} for class \code{\link{imwdc.object}}. It can be invoked by calling \code{\link{imwr}} for an object of the appropriate class, or directly by calling imwr.imwdc regardless of the class of the object. } \value{ A matrix, of dimension determined by the original data set supplied to the initial decomposition (more precisely, determined by the \code{\link{nlevelsWT}} component of the \code{\link{imwdc.object}}). This matrix is the highest resolution level of the reconstruction. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{compress.imwd}}, \code{\link{imwd}}, \code{\link{imwd.object}}, \code{\link{imwr}}. } \examples{ # # Do a decomposition, thresholding, then exact reconstruction # Look at the error # test.image <- matrix(rnorm(32*32), nrow=32) # Test image is just some sort of square matrix whose side length # is a power of two. # max( abs(imwr(threshold(imwd(test.image))) - test.image)) # [1] 62.34 # # The answer is not zero (see contrasting examples in the help page for # imwr.imwd because we have thresholded the # 2D wavelet transform here). } \keyword{nonlinear} \keyword{smooth} \author{G P Nason} wavethresh/man/accessC.mwd.rd0000644000176200001440000000570114211622540015666 0ustar liggesusers\name{accessC.mwd} \alias{accessC.mwd} \title{Get Smoothed Data from Wavelet Structure} \description{ The smoothed and original data from a multiple wavelet decomposition structure, \code{\link{mwd.object}} ect (e.g. returned from \code{\link{mwd}}) are packed into a single matrix in that structure. TRUE his function extracts the data corresponding to a particular resolution level. } \usage{ \method{accessC}{mwd}(mwd, level = nlevelsWT(mwd), \dots) } \arguments{ \item{mwd}{Multiple wavelet decomposition structure from which you wish to extract the smoothed or original data if the structure is from a wavelet decomposition, or the reconstructed data if the structure is from a wavelet reconstruction.} \item{level}{The level that you wish to extract. By default, this is the level with most detail (in the case of structures from a decomposition this is the original data, in the case of structures from a reconstruction this is the top-level reconstruction).} \item{\dots}{any other arguments} } \details{ The \link{mwd} function produces a wavelet decomposition structure. For decomposition, the top level contains the original data, and subsequent lower levels contain the successively smoothed data. So if there are \code{mwd$filter$npsi*2^m} original data points (\code{mwd$filter$npsi} is the multiplicity of wavelets), there will be \code{m+1} levels indexed 0,1,...,m. So \code{accessC.mwd(Mwd, level=m)} pulls out the original data, as does \code{accessC.mwd(mwd)} To get hold of lower levels just specify the level that you're interested in, e.g. \code{accessC.mwd(mwd, level=2)} Gets hold of the second level. The need for this function is a consequence of the pyramidal structure of Mallat's algorithm and the memory efficiency gain achieved by storing the pyramid as a linear matrix of coefficients. AccessC obtains information about where the smoothed data appears from the fl.dbase component of mwd, in particular the array \code{fl.dbase$first.last.c} which gives a complete specification of index numbers and offsets for \code{mwd$C}. Note also that this function only gets information from \link{mwd} class objects. To \emph{put} coefficients into \link{mwd} structures you have to use the \link{putC.mwd} function. See Downie and Silverman, 1998. } \value{ A matrix with \code{mwd$filter$npsi} rows containing the extracted data of all the coefficients at that level. } \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1995-6.)} \seealso{ \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd}}, \code{\link{mwd.object}}, \code{\link{mwr}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}} } \examples{ # # Get the 3rd level of smoothed data from a decomposition # dat <- rnorm(32) accessC.mwd(mwd(dat), level=3) } \keyword{manip} \author{G P Nason} wavethresh/man/denproj.rd0000644000176200001440000000570714211622540015203 0ustar liggesusers\name{denproj} \alias{denproj} \title{Calculate empirical scaling function coefficients of a p.d.f.} \usage{ denproj(x, tau=1, J, filter.number=10, family="DaubLeAsymm", covar=FALSE, nT=20) } \arguments{ \item{x}{Vector containing the data. This can be of any length.} \item{J}{The resolution level at which the empirical scaling function coefficients are to be calculated.} \item{tau}{This parameter allows non-dyadic resolutions to be used, since the resolution is specified as \code{tau * 2J}.} \item{filter.number}{The filter number of the wavelet basis to be used.} \item{family}{The family of wavelets to use, can be "DaubExPhase" or "DaubLeAsymm".} \item{covar}{Logical variable. If TRUE then covariances of the empirical scaling function coefficients are also calculated.} \item{nT}{The number of iterations to be performed in the Daubechies-Lagarias algorithm, which is used to evaluate the scaling functions of the specified wavelet basis at the data points.} } \description{ Calculates empirical scaling function coefficients of the probability density function from sample of data from that density, usually at some "high" resoloution. } \details{ This projection of data onto a high resolution wavelet space is described in detail in Chapter 3 of Herrick (2000). The maximum and minimum values of \code{k} for which the empirical scaling function coefficient is non-zero are determined and the coefficients calculated for all k between these limits as \code{sum(phiJk(xi))/n}. The scaling functions are evaluated at the data points efficiently, using the Daubechies-Lagarias algorithm (Daubechies & Lagarias (1992)). Coded kindly by Brani Vidakovic. Herrick, D.R.M. (2000) Wavelet Methods for Curve and Surface Estimation. PhD Thesis, University of Bristol. Daubechies, I. & Lagarias, J.C. (1992). Two-Scale Difference Equations II. Local Regularity, Infinite Products of Matrices and Fractals. SIAM Journal on Mathematical Analysis, 24(4), 1031--1079. } \value{ A list with components: \item{coef}{A vector containing the empirical scaling function coefficients. This starts with the first non-zero coefficient, ends with the last non-zero coefficient and contains all coefficients, including zeros, in between.} \item{covar}{Matrix containing the covariances, if requested.} \item{klim}{The maximum and minimum values of k for which the empirical scaling function coefficients cJk are non-zero.} \item{p}{The primary resolution \code{tau * 2J}.} \item{filter}{A list containing the filter.number and family specified inthe function call.} \item{n}{The length of the data vector x.} \item{res}{A list containing the values of \code{p}, \code{tau} and \code{J}.} } \author{David Herrick} \seealso{\code{\link{Chires5}}, \code{\link{Chires6}}, \code{\link{denwd}}, \code{\link{denwr}}} \examples{ # Simulate data from the claw density and find the # empirical scaling function coefficients data <- rclaw(100) datahr <- denproj(data, J=8, filter.number=4,family="DaubLeAsymm") } \keyword{smooth} wavethresh/man/nullevels.imwd.rd0000644000176200001440000000417014211622540016503 0ustar liggesusers\name{nullevels.imwd} \alias{nullevels.imwd} \title{Sets whole resolution levels of coefficients equal to zero in a imwd object.} \description{ Sets whole resolution levels of coefficients equal to zero in a \code{\link{imwd.object}} } \usage{ \method{nullevels}{imwd}(imwd, levelstonull, \dots) } \arguments{ \item{imwd}{An object of class \code{\link{imwd}}.} \item{levelstonull}{An integer vector specifying which resolution levels of coefficients of \code{\link{imwd}} that you wish to set to zero. } \item{\dots}{any other arguments} } \details{ Setting whole resolution levels of coefficients to zero can be very useful. For examples, one can construct a linear smoothing method by setting all coefficients above a particular resolution (the \emph{primary resolution} equal to zero. Also setting particular levels equal to zero can also be useful for removing noise which is specific to a particular resolution level (as long as important signal is not also contained at that level). Note that this function removes the horiztonal, diagonal and vertical detail coefficients at the resolution level specified. It does not remove the father wavelet coefficients at those resolution levels. To remove individual coefficients on a systematic basis you probably want to look at the \code{\link{threshold}} function. } \value{ An object of class \code{\link{imwd}} where the coefficients in resolution levels specified by levelstonull have been set to zero. } \section{RELEASE}{Version 3.9.5 Copyright Guy Nason 1998 } \seealso{ \code{\link{nullevels}}, \code{\link{imwd}}, \code{\link{imwd.object}}, \code{\link{threshold}}. } \examples{ # # Do the wavelet transform of the Lennon image # data(lennon) lenimwd <- imwd(lennon) # # Set scales (resolution levels) 2, 4 and 6 equal to zero. # lenwdNL <- nullevels(lenimwd, levelstonull=c(2,4,6)) # # Now let's plot the coefficients using a nice blue-heat colour map # # You will see that coefficients at levels 2, 4 and 6 are black (i.e. zero) # You can see that coefficients at other levels are unaffected and still # show the Lennon coefficients. # \dontrun{plot(lenwdNL)} } \keyword{manip} \author{G P Nason} wavethresh/man/AvBasis.wst.rd0000644000176200001440000001047214211622540015701 0ustar liggesusers\name{AvBasis.wst} \alias{AvBasis.wst} \title{Perform basis averaging for (packet-ordered) non-decimated wavelet transform.} \description{ Perform basis averaging for (packet-ordered) non-decimated wavelet transform. } \usage{ \method{AvBasis}{wst}(wst, Ccode=TRUE, \dots) } \arguments{ \item{wst}{An object of class \code{\link{wst}} that contains coefficients of a packet ordered non-decimated wavelet transform (e.g. produced by the \code{\link{wst}} function.} \item{Ccode}{If TRUE then fast compiled C code is used to perform the transform. If FALSE then S code is used. Almost always use the default TRUE option. (It is conceivable that some implementation can not use the C code and so this option permits use of the slower S code).} \item{\dots}{any other arguments} } \details{ The packet-ordered non-decimated wavelet transform computed by \code{\link{wst}} computes the coefficients of an input vector with respect to a library of all shifts of wavelet basis functions at all scales. Here "all shifts" means all integral shifts with respect to the finest scale coefficients, and "all scales" means all dyadic scales from 0 (the coarsest) to J-1 (the finest) where \code{2^J = n} where \code{n} is the number of data points of the input vector. As such the packet-ordered non-decimated wavelet transform contains a library of all possible shifted wavelet bases. \code{Basis selection} It is possible to select a particular basis and invert that particular representation. In WaveThresh a basis is selected by creating a \code{nv} (node.vector) class object which identifies the basis. Then the function \code{\link{InvBasis}} takes the wavelet representation and the node.vector and inverts the representation with respect to the selected basis. The two functions \code{\link{MaNoVe}} and \code{\link{numtonv}} create a node.vector: the first by using a \code{Coifman-Wickerhauser} minimum entropy best-basis algorithm and the second by basis index. \bold{Basis averaging}. Rather than select a basis it is often useful to preserve information from all of the bases. For examples, in curve estimation, after \link{threshold}ing a wavelet representation the coefficients are coefficients of an estimate of the truth with respect to all of the shifted basis functions. Rather than select \emph{one} of them we can average over all estimates. This sometimes gives a better curve estimate and can, for examples, get rid of Gibbs effects. See Coifman and Donoho (1995) for more information on how to do curve estimation using the packet ordered non-decimated wavelet transform, thresholding and basis averaging. Further it might seem that inverting each wavelet transform and averaging might be a computationally expensive operation: since each wavelet inversion costs order \eqn{n} operations and there are n different bases and so you might think that the overall order is \eqn{n^2}. It turns out that since many of the coarser scale basis functions are duplicated between bases there is redundancy in the non-decimated transform. Coifman and Donoho's TI-denoising algorithm makes use of this redundancy which results in an algorithm which only takes order \eqn{n \log n}{n*logn} operations. For an examples of denoising using the packet-ordered non-decimated wavelet transform and basis averaging see Johnstone and Silverman, 1997. The WaveThresh implementation of the basis averaging algorithm is to be found in Nason and Silverman, 1995 } \value{ A vector containing the average of the wavelet representation over all the basis functions. The length of the vector is \code{2^nlev} where \code{nlev} is the number of levels in the input \code{wst} object. } \section{RELEASE}{Version 3.6.0 Copyright Guy Nason 1995} \seealso{ \code{\link{av.basis}}, \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{MaNoVe}}, \code{\link{numtonv}}, \code{\link{InvBasis}}, \code{\link{wavegrow}} } \examples{ # # Generate some test data # test.data <- example.1()$y # # Now take the packet-ordered non-decimated wavelet transform # tdwst <- wst(test.data) # # Now "invert" it using basis averaging # tdwstAB <- AvBasis(tdwst) # # Let's compare it to the original # sum( (tdwstAB - test.data)^2) # # [1] 9.819351e-17 # # Very small. They're essentially same. # # See the threshold.wst help page for an # an examples of using basis averaging in curve estimation. } \keyword{manip} \author{G P Nason} wavethresh/man/summary.wpst.rd0000644000176200001440000000134714211622634016233 0ustar liggesusers\name{summary.wpst} \alias{summary.wpst} \title{Print out some basic information associated with a wpst object} \usage{ \method{summary}{wpst}(object, \dots) } \arguments{ \item{object}{The object to print a summary about} \item{\dots}{Other arguments} } \description{ Prints out the number of levels, the length of the original vector from which the object came, the type of wavelet filter associated with the decomposition, and the date of production. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{wpst}}} \examples{ vwpst <- wpst(rnorm(32)) summary(vwpst) #Levels: 5 #Length of original: 32 #Filter was: Daub cmpct on least asymm N=10 #Date: Mon Mar 8 21:54:47 2010 } \author{G P Nason} \keyword{print} wavethresh/man/newsure.rd0000644000176200001440000000102514211622540015217 0ustar liggesusers\name{newsure} \alias{newsure} \usage{ newsure(s, x) } \arguments{ \item{s}{Vector of standard deviations of coefficients} \item{x}{Vector of regular (ie non-normalized) coefficients} } \title{Version of sure that acts as subsidiary for threshold.irregwd} \description{ Version of the \code{\link{sure}} function used as a subsidiary for \code{\link{threshold.irregwd}}. } \details{ Description says all } \value{ The SURE threshold } \seealso{\code{\link{sure}}, \code{\link{threshold.irregwd}}} \author{Arne Kovac} \keyword{math} wavethresh/man/accessc.rd0000644000176200001440000000373514211622540015145 0ustar liggesusers\name{accessc} \alias{accessc} \title{Get variance information from irregularly spaced wavelet decomposition object. } \description{ This function gets information from the c component of an \code{\link{irregwd.objects}} an irregularly spaced wavelet decomposition object. Note that this function is \emph{not} the same as \code{\link{accessC}} which obtains father wavelet coefficients from an \code{\link{wd}} class object. } \usage{ accessc(irregwd.structure, level, boundary=FALSE) } \arguments{ \item{irregwd.structure}{Irregular wavelet decomposition object from which you wish to extract parts of the \code{c} component from.} \item{level}{The level that you wish to extract. This value ranges from 0 to the \code{\link{nlevelsWT}}(irregwd.structure)-1.} \item{boundary}{If this argument is T then all of the boundary correction values will be returned as well (note: the length of the returned vector may not be a power of 2). If boundary is false, then just the coefficients will be returned. If the decomposition (or reconstruction) was done with periodic boundary conditions then this option has no effect.} } \details{ The \code{\link{irregwd}} function produces a irregular wavelet decomposition (reconstruction) structure. The \code{c} component is stored in a similar way to the C and D vectors which store the father and mother wavelet coefficients respectively. Hence to access the information the accessc function plays a similar role to \code{\link{accessC}} and \code{\link{accessD}} functions. } \value{ A vector of the extracted data. } \section{RELEASE}{Version 3.9.4 Code Copyright Arne Kovac 1997. Help Copyright Guy Nason 2004. } \seealso{ \code{\link{irregwd}}, \code{\link{irregwd.objects}}, \code{\link{threshold.irregwd}},\code{\link{makegrid}}, \code{\link{plot.irregwd}}} \examples{ # # Most users will not need to use this function. However, see the main # examples for the irregular wavelet denoising in the examples for # makegrid. # } \keyword{manip} \author{G P Nason} wavethresh/man/wd.rd0000644000176200001440000003166414211622634014161 0ustar liggesusers\name{wd} \alias{wd} \title{Wavelet transform (decomposition).} \description{ This function can perform two types of discrete wavelet transform (DWT). The standard DWT computes the DWT according to Mallat's pyramidal algorithm (Mallat, 1989) (it also has the ability to compute the \emph{wavelets on the interval} transform of Cohen, Daubechies and Vial, 1993). The non-decimated DWT (NDWT) contains all possible shifted versions of the DWT. The order of computation of the DWT is O(n), and it is O(n log n) for the NDWT if n is the number of data points. } \usage{ wd(data, filter.number=10, family="DaubLeAsymm", type="wavelet", bc="periodic", verbose=FALSE, min.scale=0, precond=TRUE) } \arguments{ \item{data}{A vector containing the data you wish to decompose. The length of this vector must be a power of 2.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments. For the ``wavelets on the interval'' (\code{bc="interval"}) transform the filter number ranges from 1 to 8. See the table of filter coefficients indexed after the reference to Cohen, Daubechies and Vial, 1993.} \item{family}{specifies the family of wavelets that you want to use. Two popular options are "DaubExPhase" and "DaubLeAsymm" but see the help for \code{\link{filter.select}} for more possibilities. This argument is ignored for the ``wavelets on the interval'' transform (\code{bc="interval"}). Note that, as of version 4.6.1 you can use the Lina-Mayrand complex-valued wavelets. } \item{type}{specifies the type of wavelet transform. This can be "wavelet" (default) in which case the standard DWT is performed (as in previous releases of WaveThresh). If type is "station" then the non-decimated DWT is performed. At present, only periodic boundary conditions can be used with the non-decimated wavelet transform.} \item{bc}{specifies the boundary handling. If \code{bc="periodic"} the default, then the function you decompose is assumed to be periodic on it's interval of definition, if \code{bc="symmetric"} then the function beyond its boundaries is assumed to be a symmetric reflection of the function in the boundary. The symmetric option was the implicit default in releases prior to 2.2. If \code{bc=="interval"} then the ``wavelets on the interval algorithm'' due to Cohen, Daubechies and Vial is used. (The \code{WaveThresh} implementation of the ``wavelets on the interval transform'' was coded by Piotr Fryzlewicz, Department of Mathematics, Wroclaw University of Technology, Poland; this code was largely based on code written by Markus Monnerjahn, RHRK, Universitat Kaiserslautern; integration into \code{WaveThresh} by \code{GPN}. See the nice project report by Piotr on this piece of code). } \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} \item{min.scale}{Only used for the ``wavelets on the interval transform''. The wavelet algorithm starts with fine scale data and iteratively coarsens it. This argument controls how many times this iterative procedure is applied by specifying at which scale level to stop decomposiing. } \item{precond }{Only used for the ``wavelets on the interval transform''. This argument specifies whether preconditioning is applied (called prefiltering in Cohen, Daubechies and Vial, 1993.) Preconditioning ensures that sequences like 1,1,1,1 or 1,2,3,4 map to zero high pass coefficients. } } \details{ If type=="wavelet" then the code implements Mallat's pyramid algorithm (Mallat 1989). For more details of this implementation see Nason and Silverman, 1994. Essentially it works like this: you start off with some data cm, which is a real vector of length \eqn{2^m}, say. Then from this you obtain two vectors of length \eqn{2^(m-1)}. One of these is a set of smoothed data, c(m-1), say. This looks like a smoothed version of cm. The other is a vector, d(m-1), say. This corresponds to the detail removed in smoothing cm to c(m-1). More precisely, they are the coefficients of the wavelet expansion corresponding to the highest resolution wavelets in the expansion. Similarly, c(m-2) and d(m-2) are obtained from c(m-1), etc. until you reach c0 and d0. All levels of smoothed data are stacked into a single vector for memory efficiency and ease of transport across the SPlus-C interface. The smoothing is performed directly by convolution with the wavelet filter (\code{filter.select(n)$H}, essentially low- pass filtering), and then dyadic decimation (selecting every other datum, see Vaidyanathan (1990)). The detail extraction is performed by the mirror filter of H, which we call G and is a bandpass filter. G and H are also known quadrature mirror filters. There are now two methods of handling "boundary problems". If you know that your function is periodic (on it's interval) then use the bc="periodic" option, if you think that the function is symmetric reflection about each boundary then use bc="symmetric". You might also consider using the "wavelets on the interval" transform which is suitable for data arising from a function that is known to be defined on some compact interval, see Cohen, Daubechies, and Vial, 1993. If you don't know then it is wise to experiment with both methods, in any case, if you don't have very much data don't infer too much about your decomposition! If you have loads of data then don't infer too much about the boundaries. It can be easier to interpret the wavelet coefficients from a bc="periodic" decomposition, so that is now the default. Numerical Recipes implements some of the wavelets code, in particular we have compared our code to "wt1" and "daub4" on page 595. We are pleased to announce that our code gives the same answers! The only difference that you might notice is that one of the coefficients, at the beginning or end of the decomposition, always appears in the "wrong" place. This is not so, when you assume periodic boundaries you can imagine the function defined on a circle and you can basically place the coefficient at the beginning or the end (because there is no beginning or end, as it were). The non-deciated DWT contains all circular shifts of the standard DWT. Naively imagine that you do the standard DWT on some data using the Haar wavelets. Coefficients 1 and 2 are added and difference, and also coefficients 3 and 4; 5 and 6 etc. If there is a discontinuity between 1 and 2 then you will pick it up within the transform. If it is between 2 and 3 you will loose it. So it would be nice to do the standard DWT using 2 and 3; 4 and 5 etc. In other words, pick up the data and rotate it by one position and you get another transform. You can do this in one transform that also does more shifts at lower resolution levels. There are a number of points to note about this transform. Note that a time-ordered non-decimated wavelet transform object may be converted into a \code{packet-ordered non-decimated wavelet transform} object (and vice versa) by using the \code{\link{convert}} function. The NDWT is translation equivariant. The DWT is neither translation invariant or equivariant. The standard DWT is orthogonal, the non-decimated transform is most definitely not. This has the added disadvantage that non-decimated wavelet coefficients, even if you supply independent normal noise. This is unlike the standard DWT where the coefficients are independent (normal noise). You might like to consider growing wavelet syntheses using the \code{\link{wavegrow}} function. } \value{ An object of class \code{\link{wd}}. For boundary conditions apart from \code{bc="interval"} this object is a list with the following components. \item{C}{Vector of sets of successively smoothed data. The pyramid structure of Mallat is stacked so that it fits into a vector. The function \code{\link{accessC}} should be used to extract a set for a particular level.} \item{D}{Vector of sets of wavelet coefficients at different resolution levels. Again, Mallat's pyramid structure is stacked into a vector. The function \code{\link{accessD}} should be used to extract the coefficients for a particular resolution level.} \item{nlevelsWT}{The number of resolution levels. This depends on the length of the data vector. If \code{length(data)=2^m}, then there will be m resolution levels. This means there will be m levels of wavelet coefficients (indexed 0,1,2,...,(m-1)), and m+1 levels of smoothed data (indexed 0,1,2,...,m). } \item{fl.dbase}{There is more information stored in the C and D than is described above. In the decomposition ``extra'' coefficients are generated that help take care of the boundary effects, this database lists where these start and finish, so the "true" data can be extracted.} \item{filter}{A list containing information about the filter type: Contains the string "wavelet" or "station" depending on which type of transform was performed. } \item{date}{The date the transform was performed.} \item{bc}{How the boundaries were handled.} If the ``wavelets on the interval'' transform is used (i.e. \code{bc="interval"}) then the internal structure of the wd object is changed as follows. \itemize{ \item{The coefficient vectors C and D have been replaced by a single vector \code{transformed.vector}. The new single vector contains just the transformed coefficients: i.e. the wavelet coefficients down to a particular scale (determined by \code{min.scale} above). The scaling function coefficients are stored first in the array (there will be \code{2^min.scale} of them. Then the wavelet coefficients are stored as consecutive vectors coarsest to finest of length \code{2^min.scale}, \code{2^(min.scale+1)} up to a vector which is half of the length of the original data.) In any case the user is recommended to use the functions \code{\link{accessC}}, \code{\link{accessD}}, \code{\link{putC}} and \code{\link{putD}} to access coefficients from the \code{\link{wd}} object.} \item{The extra component \code{current.scale} records to which level the transform has been done (usually this is \code{min.scale} as specified in the arguments).} \item{The extra component \code{filters.used} is a vector of integers that record which filter index was used as each level of the decomposition. At coarser scales sometimes a wavelet with shorter support is needed. } \item{The extra logical component \code{preconditioned} specifies whether preconditioning was turned on or off.} \item{The component \code{fl.dbase} is still present but only contains data corresponding to the storage of the coefficients that are present in \code{transformed.vector}. In particular, since only one scale of the father wavelet coefficients is stored the component \code{first.last.c} of \code{fl.dbase} is now a three-vector containing the indices of the first and last entries of the father wavelet coefficients and the offset of where they are stored in \code{transformed.vector}. Likewise, the component \code{first.last.d} of \code{fl.dbase} is still a matrix but there are now only rows for each scale level in the \code{transformed.vector} (something like \code{nlevelsWT(wd)-wd$current.scale}). } \item{The \code{filter} coefficient is also slightly different as the filter coefficients are no longer stored here (since they are hard coded into the wavelets on the interval transform.)} } } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 Integration of ``wavelets on the interval'' code by Piotr Fryzlewicz and Markus Monnerjahn was at Version 3.9.6, 1999. } \seealso{ \code{\link{wd.int}}, \code{\link{wr}}, \code{\link{wr.int}}, \code{\link{wr.wd}}, \code{\link{accessC}}, \code{\link{accessD}}, \code{\link{putD}}, \code{\link{putC}}, \code{\link{filter.select}}, \code{\link{plot.wd}}, \code{\link{threshold}}, \code{\link{wavegrow}} } \examples{ # # Generate some test data # test.data <- example.1()$y \dontrun{ts.plot(test.data)} # # Decompose test.data and plot the wavelet coefficients # wds <- wd(test.data) \dontrun{plot(wds)} # # Now do the time-ordered non-decimated wavelet transform of the same thing # wdS <- wd(test.data, type="station") \dontrun{plot(wdS)} # # Next examples # ------------ # The chirp signal is also another good examples to use. # # Generate some test data # test.chirp <- simchirp()$y \dontrun{ts.plot(test.chirp, main="Simulated chirp signal")} # # Now let's do the time-ordered non-decimated wavelet transform. # For a change let's use Daubechies least-asymmetric phase wavelet with 8 # vanishing moments (a totally arbitrary choice, please don't read # anything into it). # chirpwdS <- wd(test.chirp, filter.number=8, family="DaubLeAsymm", type="station") \dontrun{plot(chirpwdS, main="TOND WT of Chirp signal")} # # Note that the coefficients in this plot are exactly the same as those # generated by the packet-ordered non-decimated wavelet transform # except that they are in a different order on each resolution level. # See Nason, Sapatinas and Sawczenko, 1998 # for further information. } \keyword{math} \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/test.dataCT.rd0000644000176200001440000000336414211622634015661 0ustar liggesusers\name{test.dataCT} \alias{test.dataCT} \title{Test functions for wavelet regression and thresholding } \description{ This function evaluates the "blocks", "bumps", "heavisine" and "doppler" test functions of Donoho & Johnstone (1994b) and the piecewise polynomial test function of Nason & Silverman (1994). The function also generates data sets consisting of the specified function plus uncorrelated normally distributed errors. } \usage{ test.dataCT(type = "ppoly", n = 512, signal = 1, rsnr = 7, plotfn = FALSE) } \arguments{ \item{type}{Test function to be computed. Available types are "ppoly" (piecewise polynomial), "blocks", "bumps", "heavi" (heavisine), and "doppler".} \item{n}{Number of equally spaced data points on which the function is evaluated. } \item{signal}{Scaling parameter; the function will be scaled so that the standard deviation of the data points takes this value.} \item{rsnr}{Root signal-to-noise ratio. Specifies the ratio of the standard deviation of the function to the standard deviation of the simulated errors.} \item{plotfn}{If \code{plotfn=TRUE}, then the test function and the simulated data set are plotted} } \value{ A list with the following components: \item{x}{The points at which the test function is evaluated.} \item{y}{The values taken by the test function.} \item{ynoise}{The simulated data set.} \item{type}{The type of function generated, identical to the input parameter type.} \item{rsnr}{The root signal-to-noise ratio of the simulated data set, identical to the input parameter rsnr.} } \section{Side effects}{ If \code{plotfn=T}, the test function and data set are plotted. } \section{RELEASE}{ Part of the CThresh addon to WaveThresh. Copyright Stuart Barber and Guy Nason 2004. } \keyword{manip} \author{Stuart Barber} wavethresh/man/putC.wp.rd0000644000176200001440000000130414211622540015067 0ustar liggesusers\name{putC.wp} \alias{putC.wp} \title{Warning function when trying to insert father wavelet coefficients into wavelet packet object (wp).} \description{ There are no real smooths to insert in a \code{\link{wp}} wavelet packet object. This function returns an error message. To insert coefficients into a wavelet packet object you should use the \code{\link{putpacket}} collection of functions. } \usage{ \method{putC}{wp}(wp, \dots) } \arguments{ \item{wp}{Wavelet packet object.} \item{\dots}{any other arguments} } \value{ An error message! } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{putpacket}}, \code{\link{putpacket.wp}}. } \keyword{error} \author{G P Nason} wavethresh/man/wavethresh-package.Rd0000644000176200001440000000145614332764500017257 0ustar liggesusers\name{wavethresh-package} \alias{wavethresh-package} \alias{wavethresh} \docType{package} \title{\packageTitle{wavethresh}} \description{\packageDescription{wavethresh}} \details{ \packageDESCRIPTION{wavethresh} \packageIndices{wavethresh} See book or individual help pages for main functions. For example, \code{\link{wd}} for the one-dimensional discrete wavelet transform. } \author{ \packageAuthor{wavethresh} Maintainer: \packageMaintainer{wavethresh} } \references{ Nason, G.P. (2008) Wavelet methods in Statistics with R. Springer, New York. \href{https://link.springer.com/book/10.1007/978-0-387-75961-6}{Book URL.} } \keyword{math} \seealso{\code{\link{ewspec}}, \code{\link{imwd}}, \code{\link{threshold}}, \code{\link{wd}}, \code{\link{wst}} } \examples{ # # See examples in individual help pages # } wavethresh/man/plot.irregwd.rd0000644000176200001440000000615514211622540016160 0ustar liggesusers\name{plot.irregwd} \alias{plot.irregwd} \title{Plot variance factors of wavelet transform coefficients for irregularly spaced wavelet transform object} \usage{ \method{plot}{irregwd}(x, xlabels, first.level = 1, main = "Wavelet Decomposition Coefficients", scaling = "by.level", rhlab = FALSE, sub, ...) } \arguments{ \item{x}{The \code{\link{irregwd.objects}} object whose coefficients you wish to plot.} \item{xlabels}{A vector containing the "true" x-axis numbers that went with the vector that was transformed to produce the irregwd object supplied as the first argument to this function. If this argument is missing then the function tries to make up a sensible set of x-axis labels.} \item{first.level}{The first resolution level to begin plotting at. This argument can be quite useful when you want to supress some of the coarser levels in the diagram.} \item{main}{The main title of the plot.} \item{scaling}{How you want the coefficients to be scaled. The options are: \code{global} - one scale factor is chosen for the whole plot. The scale factor depends on the variance factor to be included on the plot that has the largest absolute value. The global option is useful when comparing factors that might appear anywhere in the plot; \code{by.level} - a scale factor is chosen for each resolution level in the plot. The scale factor for a level depends on the variance factor in that level that has the largest absolute value. The \code{by.level} option is useful when you wish to compare coefficients within a resolution level.} \item{rhlab}{If \code{TRUE} then a set of labels is produced on the right hand axis. The axis labels in this case refer to the scale factors used to scale each level and correspond to value of the largest variance factor (in absolute value) in each scale (when \code{scaling=="by.level"}) or absolutely (when \code{scaling="global"}). If the \code{rhlab=FALSE} then no right hand axis labels are produced.} \item{sub}{A subtitle for the plot.} \item{\dots}{Other arguments supplied to the actual plot} } \description{ This function plots the variance factors associated with the wavelet coefficients arising from a \code{\link{irregwd.objects}} irregularly spaced wavelet decomposition object. } \details{ Produces a plot similar in style to the ones in Donoho and Johnstone, 1994. This function is basically the same as \code{\link{plot.wd}} except that variance factors and not coefficients are plotted. A variance factor is a number that quantifies the variability of a coefficient induced by the irregular design that was interpolated to a regular grid by the \code{\link{makegrid}} function which is used the by \code{\link{irregwd}} irregular wavelet transform function. High values of the variance factor correspond to large variance in the wavelet coefficients but due to the irregular design, not the original noise structure on the coefficients. } \value{ If \code{rhlab==TRUE} then the scaling factors applied to each scale level are returned. Otherwise \code{NULL} is returned. } \examples{ # # The help for makegrid contains an example # of using this function. # } \author{Arne Kovac} \keyword{hplot} wavethresh/man/teddy.rd0000644000176200001440000000076614211622634014657 0ustar liggesusers\name{teddy} \docType{data} \alias{teddy} \title{Picture of a teddy bear's picnic.} \description{ A 512x512 matrix. Each entry of the matrix contains an image intensity value. } \usage{ data(teddy) } \format{ A 512x512 matrix. Each entry of the matrix contains an image intensity value. The whole matrix represents an image of a teddy bear's picnic. } \source{ Taken by Guy Nason. } \examples{ # # This command produces the image seen above. # # image(teddy) # } \keyword{datasets} \author{G P Nason} wavethresh/man/madmad.rd0000644000176200001440000000267314211622540014764 0ustar liggesusers\name{madmad} \alias{madmad} \title{Compute square of median absolute deviation (mad) function.} \description{ This function simply returns the square of the median absolute deviation (mad) function in S-Plus. This is required for supply to the \code{\link{threshold}} series of functions which require estimates of spread on the variance scale (not the standard deviation scale). } \usage{ madmad(x) } \arguments{ \item{x}{The vector for which you wish to compute the square of mad on.} } \value{ The square of the median absolute deviation of the coefficients supplied by \code{x}. } \note{Its a MAD MAD world!} \section{RELEASE}{Version 3.4.1 Copyright Guy Nason 1994 } \seealso{ \code{\link{threshold}} } \examples{ # # # Generate some normal data with mean 0 and sd of 8 # and we'll also contaminate it with an outlier of 1000000 # This is akin to signal wavelet coefficients mixing with the noise. # ContamNormalData <- c(1000000, rnorm(1000, mean=0, sd=8)) # # What is the variance of the data? # var(ContamNormalData) # [1] 999000792 # # Wow, a seriously unrobust answer! # # How about the median absolute deviation? # mad(ContamNormalData) # [1] 8.14832 # # A much better answer! # # Now let's use madmad to get the answer on the variance scale # madmad(ContamNormalData) # [1] 66.39512 # # The true variance was 64, so the 66.39512 was a much better answer # than that returned by the call to the variance function. } \keyword{arith} \author{G P Nason} wavethresh/man/makewpstRO.rd0000644000176200001440000002432514211622540015633 0ustar liggesusers\name{makewpstRO} \alias{makewpstRO} \title{ Make a wavelet packet regression object from a dependent and independent time series variable. } \description{ The idea here is to try and build facilities to enable a transfer function model along the lines of that described by Nason and Sapatinas 2002 in \emph{Statistics and Computing}. The idea is to turn the \code{timeseries} variable into a set of nondecimated wavelet packets which are already pre-selected to have some semblance of relationship to the \code{response} time series. The function does not actually perform any regression, in contrast to the related \code{\link{makewpstDO}} but returns a data frame which the user can use to build their own models. } \usage{ makewpstRO(timeseries, response, filter.number = 10, family = "DaubExPhase", trans = logabs, percentage = 10) } \arguments{ \item{timeseries}{ The dependent variable time series. This series is decomposed using the \code{\link{wpst}} function into nondecimated wavelet packets, need to be a power of two length. } \item{response}{ The independent or response time series. } \item{filter.number}{ The type of wavelet used within \code{family}, see \code{\link{filter.select}}. } \item{family}{ The family of wavelet, see \code{\link{filter.select}} } \item{trans}{ A transform to apply to the nondecimated wavelet packet coefficients before any selection } \item{percentage}{ The top \code{percentage} of nondecimated wavelet packets that correlated best with the \code{response} series will be preselected. } } \details{The idea behind this methodology is that a \code{response} time series might not be directly related to the dependent \code{timeseries} time series, but it might be related to the nondecimated wavelet packets of the \code{timeseries}, these packets can pick out various features of the \code{timeseries} including certain delays, oscillations and others. The best packets (the number if controlled by \code{percentage}), those that correlate best with \code{response} are selected and returned. The \code{response} and the best nondecimated wavelet packets are returned in a data frame object and then any convenient form of statistical modeling can be used to build a model of the \code{response} in terms of the packet variables. Once a model has been built it can be interpreted in the usual way, but with respect to nondecimated wavelet packets. Note that nondecimated wavelet packets are essential, as they are all of the same length as the original response series. If a decimated wavelet packet algorithm had been used then it is not clear what to do with the "gaps"! If new \code{timeseries} data comes along the \code{\link{wpstREGR}} function can be used to extract the identical packets as the ones produced by this function (as the result of this function stores the identities of these packets). Then the statistical modelling that build the model from the output of this function, can be used to predict future values of the \code{response} time series from future values of the \code{timeseries} series. } \value{ An object of class \code{wpstRO} containing the following items \item{df}{A data frame containing the \code{response} time series and a number of columns/variables/packets that correlated with response series. These are all entitled "Xn" where n is some integer} \item{ixvec}{A packet index vector. After taking the nondecimated wavelet packet transform, all the packets are stored in a matrix. This vector indicates those that were preselected} \item{level}{The original level from which the preselected vectors came from} \item{pktix}{Another index vector, this time referring to the original wavelet packet object, not the matrix in which they subsequently got stored} \item{nlevelsWT}{The number of resolution levels in the original wavelet packet object} \item{cv}{The correlation vector. These are the values of the correlations of the packets with the response, then sorted in terms of decreasing absolute correlation} \item{filter}{The wavelet filter details} \item{trans}{The transformation function actually used} } \references{ Nason, G.P. and Sapatinas, T. (2002) Wavelet packet transfer function modeling of nonstationary time series. \emph{Statistics and Computing}, \bold{12}, 45-56. } \author{ G P Nason } \seealso{ \code{\link{makewpstDO}}, \code{\link{wpst}}, \code{\link{wpstREGR}}} \examples{ data(BabyECG) baseseries <- BabyECG[1:256] # # Make up a FICTITIOUS response series! # response <- BabyECG[6:261]*3+52 # # Do the modeling # BabeModel <- makewpstRO(timeseries=baseseries, response=response) #Level: 0 .......... #1 .......... #2 .......... #3 .......... #4 ................ #5 #6 #7 # #Contains SWP coefficients #Original time series length: 256 #Number of bases: 25 #Some basis selection performed # Level Pkt Index Orig Index Score #[1,] 5 0 497 0.6729833 #[2,] 4 0 481 0.6120771 #[3,] 6 0 505 0.4550616 #[4,] 3 0 449 0.4309924 #[5,] 7 0 509 0.3779385 #[6,] 1 53 310 0.3275428 #[7,] 2 32 417 -0.3274858 #[8,] 2 59 444 -0.2912863 #[9,] 3 16 465 -0.2649679 #[10,] 1 110 367 0.2605178 #etc. etc. # # # Let's look at the data frame component # names(BabeModel$df) # [1] "response" "X1" "X2" "X3" "X4" "X5" # [7] "X6" "X7" "X8" "X9" "X10" "X11" #[13] "X12" "X13" "X14" "X15" "X16" "X17" #[19] "X18" "X19" "X20" "X21" "X22" "X23" #[25] "X24" "X25" # # Generate a formula including all of the X's (note we could use the . # argument, but we later want to be more flexible # xnam <- paste("X", 1:25, sep="") fmla1 <- as.formula(paste("response ~ ", paste(xnam, collapse= "+"))) # # Now let's fit a linear model, the response on all the Xs # Babe.lm1 <- lm(fmla1, data=BabeModel$df) # # Do an ANOVA to see what's what # anova(Babe.lm1) #Analysis of Variance Table # #Response: response # Df Sum Sq Mean Sq F value Pr(>F) #X1 1 214356 214356 265.7656 < 2.2e-16 *** #X2 1 21188 21188 26.2701 6.289e-07 *** #X3 1 30534 30534 37.8565 3.347e-09 *** #X4 1 312 312 0.3871 0.5344439 #X5 1 9275 9275 11.4999 0.0008191 *** #X6 1 35 35 0.0439 0.8343135 #X7 1 195 195 0.2417 0.6234435 #X8 1 94 94 0.1171 0.7324600 #X9 1 331 331 0.4103 0.5224746 #X10 1 0 0 0.0006 0.9810560 #X11 1 722 722 0.8952 0.3450597 #X12 1 0 0 0.0004 0.9850243 #X13 1 77 77 0.0959 0.7570769 #X14 1 2770 2770 3.4342 0.0651404 . #X15 1 6 6 0.0072 0.9326155 #X16 1 389 389 0.4821 0.4881649 #X17 1 44 44 0.0544 0.8157015 #X18 1 44 44 0.0547 0.8152640 #X19 1 4639 4639 5.7518 0.0172702 * #X20 1 490 490 0.6077 0.4364469 #X21 1 389 389 0.4823 0.4880660 #X22 1 85 85 0.1048 0.7463860 #X23 1 1710 1710 2.1198 0.1467664 #X24 1 12 12 0.0148 0.9033427 #X25 1 82 82 0.1019 0.7498804 #Residuals 230 185509 807 #--- #Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 # # Looks like X1, X2, X3, X5, X14 and X19 are "significant". Also throw in # X4 as it was a highly ranked preselected variable, and refit # fmla2 <- response ~ X1 + X2 + X3 + X4 + X5 + X14 + X19 Babe.lm2 <- lm(fmla2, data=BabeModel$df) # # Let's see the ANOVA table for this # anova(Babe.lm2) #Analysis of Variance Table # #Response: response # Df Sum Sq Mean Sq F value Pr(>F) #X1 1 214356 214356 279.8073 < 2.2e-16 *** #X2 1 21188 21188 27.6581 3.128e-07 *** #X3 1 30534 30534 39.8567 1.252e-09 *** #X4 1 312 312 0.4076 0.5238034 #X5 1 9275 9275 12.1075 0.0005931 *** #X14 1 3095 3095 4.0405 0.0455030 * #X19 1 4540 4540 5.9259 0.0156263 * #Residuals 248 189989 766 #--- #Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 # # So, let's drop X4, refit, and then do ANOVA # Babe.lm3 <- update(Babe.lm2, . ~ . -X4) anova(Babe.lm3) # # After viewing this, drop X14 # Babe.lm4 <- update(Babe.lm3, . ~ . -X14) anova(Babe.lm4) # # Let's plot the original series, and the "fitted" one # \dontrun{ts.plot(BabeModel$df[["response"]])} \dontrun{lines(fitted(Babe.lm4), col=2)} # # Let's plot the wavelet packet basis functions associated with the model # \dontrun{oldpar <- par(mfrow=c(2,2))} \dontrun{z <- rep(0, 256)} \dontrun{zwp <- wp(z, filter.number=BabeModel$filter$filter.number, family=BabeModel$filter$family)} \dontrun{draw(zwp, level=BabeModel$level[1], index=BabeModel$pktix[1], main="", sub="")} \dontrun{draw(zwp, level=BabeModel$level[2], index=BabeModel$pktix[2], main="", sub="")} \dontrun{draw(zwp, level=BabeModel$level[3], index=BabeModel$pktix[3], main="", sub="")} \dontrun{draw(zwp, level=BabeModel$level[5], index=BabeModel$pktix[5], main="", sub="") } \dontrun{par(oldpar)} # # Now let's do some prediction of future values of the response, given # future values of the baseseries # newseries <- BabyECG[257:512] # # Get the new data frame # newdfinfo <- wpstREGR(newTS = newseries, wpstRO=BabeModel) # # Now use the best model (Babe.lm4) with the new data frame (newdfinfo) # to predict new values of response # newresponse <- predict(object=Babe.lm4, newdata=newdfinfo) # # What is the "true" response, well we made up a response earlier, so let's # construct the true response for this future data (in your case you'll # have a separate genuine response variable) # trucfictresponse <- BabyECG[262:517]*3+52 # # Let's see them plotted on the same plot # \dontrun{ts.plot(trucfictresponse)} \dontrun{lines(newresponse, col=2)} # # On my plot they look tolerably close! # } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{multivariate} \keyword{ts} wavethresh/man/GenW.rd0000644000176200001440000001100314211622540014364 0ustar liggesusers\name{GenW} \alias{GenW} \title{Generate (inverse) discrete wavelet transform matrix.} \description{ This function generates a matrix that can perform the discrete wavelet transform (useful for understanding the DWT but use the fast algorithm coded in \code{\link{wd}} for general use). The function returns the matrix for the inverse transform. Since the matrix is orthogonal transpose the matrix to obtain the forward transform matrix. } \usage{ GenW(n=8, filter.number=10, family="DaubLeAsymm", bc="periodic") } \arguments{ \item{n}{The order of the DWT matrix will be n times n. n should be a power of two.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{bc}{boundary conditions to use. This can be \code{periodic} or \code{symmetric} depending on whether you want the returned matrix to assume periodic or symmetric end-reflection boundary conditions.} } \details{ The discrete wavelet transform is usually computed using the fast pyramid algorithm of Mallat. However, the transform can be written in a matrix form and this is useful for understanding what the fast transform does. One wouldn't normally use the matrix for performing the transform but use the fast transform function \code{\link{wd}} instead. The matrix returned by this function represents the inverse DWT. Since the matrix (and transform) is orthogonal one can obtain the matrix representation of the forward transform simply by transposing the matrix using the \code{t} function in S-Plus. The returned matrix is organised as follows. The first column always corresponds to the linear combination corresponding to the scaling function coefficient (so the column is constant. The next \code{n/2} columns correspond to the finest scale wavelet coefficients; the next \code{n/4} columns to the next finest scale and so on until the last column which corresponds to the coarsest scale wavelet coefficients. The matrix is computed by performing successive fast DWTs on unit vectors. } \value{ A matrix of order \code{n} that contains the inverse discrete wavelet transform. } \section{RELEASE}{Version 3.2 Copyright Guy Nason 1998 } \seealso{ \code{\link{wd}}, \code{\link{wr}}. } \examples{ # # Generate the wavelet transform matrix corresponding to the Haar wavelet # transform of order 8 # haarmat <- GenW(8, filter.number=1, family="DaubExPhase") # # Let's look at this matrix # #haarmat # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] #[1,] 0.3535534 0.7071068 0.0000000 0.0000000 0.0000000 0.5 0.0 0.3535534 #[2,] 0.3535534 -0.7071068 0.0000000 0.0000000 0.0000000 0.5 0.0 0.3535534 #[3,] 0.3535534 0.0000000 0.7071068 0.0000000 0.0000000 -0.5 0.0 0.3535534 #[4,] 0.3535534 0.0000000 -0.7071068 0.0000000 0.0000000 -0.5 0.0 0.3535534 #[5,] 0.3535534 0.0000000 0.0000000 0.7071068 0.0000000 0.0 0.5 -0.3535534 #[6,] 0.3535534 0.0000000 0.0000000 -0.7071068 0.0000000 0.0 0.5 -0.3535534 #[7,] 0.3535534 0.0000000 0.0000000 0.0000000 0.7071068 0.0 -0.5 -0.3535534 #[8,] 0.3535534 0.0000000 0.0000000 0.0000000 -0.7071068 0.0 -0.5 -0.3535534 # # As noted above the first column is the l.c. corresponding to the scaling # function coefficient and then the l.c.s corresponding to the wavelet # coefficients from the finest to the coarsest. # # The above matrix represented the inverse DWT. Let's compute the forward # transform matrix representation: # #t(haarmat) # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] #[1,] 0.3535534 0.3535534 0.3535534 0.3535534 0.3535534 0.3535534 0.3535534 0.3535534 #[2,] 0.7071068 -0.7071068 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 #[3,] 0.0000000 0.0000000 0.7071068 -0.7071068 0.0000000 0.0000000 0.0000000 0.0000000 #[4,] 0.0000000 0.0000000 0.0000000 0.0000000 0.7071068 -0.7071068 0.0000000 0.0000000 #[5,] 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000 0.7071068 -0.7071068 #[6,] 0.5000000 0.5000000 -0.5000000 -0.5000000 0.0000000 0.0000000 0.0000000 0.0000000 #[7,] 0.0000000 0.0000000 0.0000000 0.0000000 0.5000000 0.5000000 -0.5000000 -0.5000000 #[8,] 0.3535534 0.3535534 0.3535534 0.3535534 -0.3535534 -0.3535534 -0.3535534 -0.3535534 # # } \keyword{array} \author{G P Nason} wavethresh/man/putpacket.wst2D.rd0000644000176200001440000001007014211622634016535 0ustar liggesusers\name{putpacket.wst2D} \alias{putpacket.wst2D} \title{Replace packet of coefficients in a two-dimensional non-decimated wavelet object (wst2D). } \description{ This function replaces a packet of coefficients from a two-dimensional non-decimated wavelet (\code{\link{wst2D}}) object and returns the modified object. } \usage{ \method{putpacket}{wst2D}(wst2D, level, index, type="S", packet, Ccode=TRUE, \dots) } \arguments{ \item{wst2D}{2D non-decimated wavelet object containing the coefficients you wish to replace.} \item{level}{The resolution level of the coefficients that you wish to replace. Can range from 0 to \code{nlevelsWT(wpst)-1}.} \item{index}{The index number within the resolution level of the packet of coefficients that you wish to replace. Index is a base-4 number which is \code{r} digits long. Each digit can be 0, 1, 2 or 3 corresponding to no shifts, horizontal shift, vertical shift or horizontal and vertical shifts. The number \code{r} indicates the depth of the resolution level from the data resolution i.e. where \code{r = nlevelsWT - level}. Where there is a string of more than one digit the left most digits correspond to finest scale shift selection, the right most digits to the coarser scales (I think).} \item{packet}{A square matrix of dimension \code{2^level} which contains the new coefficients that you wish to insert.} \item{type}{This is a one letter character string: one of "S", "H", "V" or "D" for the smooth coefficients, horizontal, vertical or diagonal detail.} \item{Ccode}{If T then fast C code is used to obtain the packet, otherwise slow SPlus code is used. Unless you have some special reason always use the C code (and leave the argument at its default).} \item{\dots}{any other arguments} } \details{ The \code{\link{wst2D}} function creates a \code{\link{wst2D}} class object. Starting with a smooth the operators H, G, GS and HS (where G, H are the usual Mallat operators and S is the shift-by-one operator) are operated first on the rows and then the columns: i.e. so each of the operators HH, HG, GH, GG, HSH, HSG, GSH, GSG HHS, GHS, HGS, GGS HSHS, HSGS, GSHS and GSGS are applied. Then the same collection of operators is applied to all the derived smooths, i.e. HH, HSH, HHS and HSHS. So the next level is obtained from the previous level with basically HH, HG, GH and GG but with extra shifts in the horizontal, vertical and horizontal and vertical directions. The index provides a way to enumerate the paths through this tree where each smooth has 4 children and indexed by a number between 0 and 3. Each of the 4 children has 4 components: a smooth, horizontal, vertical and diagonal detail, much in the same way as for the Mallat 2D wavelet transform implemented in the WaveThresh function \code{\link{imwd}}. } \value{ An object of class \code{\link{wst2D}} with coefficients at resolution level level, packet index and orientation given by type replaced by the matrix packet. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{getpacket.wst2D}}, \code{\link{wst2D}}, \code{\link{wst2D.object}}. } \examples{ # # Create a random image. # myrand <- matrix(rnorm(16), nrow=4, ncol=4) #myrand # [,1] [,2] [,3] [,4] #[1,] 0.01692807 0.1400891 -0.38225727 0.3372708 #[2,] -0.79799841 -0.3306080 1.59789958 -1.0606204 #[3,] 0.29151629 -0.2028172 -0.02346776 0.5833292 #[4,] -2.21505532 -0.3591296 -0.39354119 0.6147043 # # Do the 2D non-decimated wavelet transform # myrwst2D <- wst2D(myrand) # # Let's access the finest scale detail, not shifted in the vertical # direction. # getpacket(myrwst2D, nlevelsWT(myrwst2D)-1, index=0, type="V") # [,1] [,2] #[1,] -0.1626819 -1.3244064 #[2,] 1.4113247 -0.7383336 # # Let's put some zeros in instead... # zmat <- matrix(c(0,0,0,0), 2,2) newwst2D <- putpacket(myrwst2D, nlevelsWT(myrwst2D)-1, index=0, packet=zmat, type="V") # # And now look at the same packet as before # getpacket(myrwst2D, nlevelsWT(myrwst2D)-1, index=0, type ="V") # [,1] [,2] #[1,] 0 0 #[2,] 0 0 # # Yup, packet insertion o.k. } \keyword{manip} \author{G P Nason} wavethresh/man/ipndacw.rd0000644000176200001440000001241514211622540015161 0ustar liggesusers\name{ipndacw} \alias{ipndacw} \title{Compute inner product matrix of discrete non-decimated autocorrelation wavelets.} \description{ This function computes the inner product matrix of discrete non-decimated autocorrelation wavelets. } \usage{ ipndacw(J, filter.number = 10, family = "DaubLeAsymm", tol = 1e-100, verbose = FALSE, \dots) } \arguments{ \item{J}{Dimension of inner product matrix required. This number should be a negative integer.} \item{filter.number}{The index of the wavelet used to compute the inner product matrix.} \item{family}{The family of wavelet used to compute the inner product matrix.} \item{tol}{In the brute force computation for Daubechies compactly supported wavelets many inner product computations are performed. This tolerance discounts any results which are smaller than \code{tol} which effectively defines how long the inner product/autocorrelation products are.} \item{verbose}{If \code{TRUE} then informative messages are printed. Some of these can be quite fun as the function tells you whether precomputed matrices are being used, how much computation needs to be done and so forth. } \item{\dots}{any other arguments} } \details{ This function computes the inner product matrix of the discrete non-decimated autocorrelation wavelets. This matrix is used to correct the wavelet periodogram as a step to turning it into a evolutionary wavelet spectral estimate. The matrix returned by ipndacw is the one called A in the paper by Nason, von Sachs and Kroisandt. For the Haar wavelet the matrix is computed by using the analytical formulae in the paper by Nason, von Sachs and Kroisandt and is hence very fast and efficient and can be used for large values of -J. For other Daubechies compactly supported wavelets the matrix is computed directly by autocorrelating discrete non-decimated wavelets at different scales and then forming the inner products of these. A function that computes the autocorrelation wavelets themselves is \code{\link{PsiJ}}. This \emph{brute force} computation is slow and memory inefficient hence \code{ipndacw} contains a mechanism that stores any inner product matrix that it creates according to a naming scheme defined by the convention defined in \code{\link{rmname}}. The stored matrices are assigned to the user-visible environment \code{\link{WTEnv}}. These stored matrices can be used in future computations by the following automatic procedure: \describe{ \item{1}{The \code{\link{rmget}} looks to see whether previous computations have been performed that might be useful.} \item{2}{If a matrix of higher order is discovered then the appropriate top-left submatrix is returned, otherwise...} \item{3}{If the right order of matrix is found it is returned, otherwise ...} \item{4}{If a matrix of \emph{smaller} order is found it is used as the top-left submatrix of the answer. The remaining elements to the right of and below the submatrix are computed and then the whole matrix is returned, otherwise...} \item{5}{If none are found then the whole matrix is computed in C and returned.} } In this way a particular matrix for a given wavelet need only be computed once. } \value{ A matrix of order (-J)x(-J) containing the inner product matrix of the discrete non-decimated autocorrelation matrices. } \references{Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{Technical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{ewspec}}, \code{\link{PsiJ}}, \code{\link{rmname}}, \code{\link{rmget}}, \code{\link{filter.select}}. } \examples{ # # Let us create the 4x4 inner product matrix for the Haar wavelet. # We'll turn on the jolly verbose messages as well. # ipndacw(-4, filter.number=1, family="DaubExPhase", verbose=TRUE) #Computing ipndacw #Calling haarmat #Took 0.0699999 seconds # -1 -2 -3 -4 #-1 1.5000 0.7500 0.3750 0.1875 #-2 0.7500 1.7500 1.1250 0.5625 #-3 0.3750 1.1250 2.8750 2.0625 #-4 0.1875 0.5625 2.0625 5.4375 # # If we do this again it will use the precomputed version # ipndacw(-4, filter.number=1, family="DaubExPhase", verbose=TRUE) #Computing ipndacw #Returning precomputed version: using 4 #Took 0.08 seconds # -1 -2 -3 -4 #-1 1.5000 0.7500 0.3750 0.1875 #-2 0.7500 1.7500 1.1250 0.5625 #-3 0.3750 1.1250 2.8750 2.0625 #-4 0.1875 0.5625 2.0625 5.4375 # # Let's use a smoother wavelet from the least-asymmetric family # and generate the 6x6 version. # ipndacw(-6, filter.number=10, family="DaubLeAsymm", verbose=TRUE) #Computing ipndacw #Took 0.95 seconds # -1 -2 -3 -4 -5 #-1 1.839101e+00 3.215934e-01 4.058155e-04 8.460063e-06 4.522125e-08 #-2 3.215934e-01 3.035353e+00 6.425188e-01 7.947454e-04 1.683209e-05 #-3 4.058155e-04 6.425188e-01 6.070419e+00 1.285038e+00 1.589486e-03 #-4 8.460063e-06 7.947454e-04 1.285038e+00 1.214084e+01 2.570075e+00 #-5 4.522125e-08 1.683209e-05 1.589486e-03 2.570075e+00 2.428168e+01 #-6 5.161675e-10 8.941666e-08 3.366416e-05 3.178972e-03 5.140150e+00 # -6 #-1 5.161675e-10 #-2 8.941666e-08 #-3 3.366416e-05 #-4 3.178972e-03 #-5 5.140150e+00 #-6 4.856335e+01 # } \keyword{algebra} \author{G P Nason} wavethresh/man/nullevels.rd0000644000176200001440000000167514211622540015553 0ustar liggesusers\name{nullevels} \alias{nullevels} \title{Set whole resolution levels of coefficients equal to zero.} \description{ Generic function which sets whole resolution levels of coefficients equal to zero. Particular methods exist. For objects of class: \describe{ \item{imwd}{use the \code{\link{nullevels.imwd}} method.} \item{wd}{use the \code{\link{nullevels.wd}} method. } \item{wst}{use the \code{\link{nullevels.wst}} method.} } See individual method help pages for operation and examples. } \usage{ nullevels(\dots) } \arguments{ \item{\dots}{See individual help pages for details.} } \value{ An object of the same class as x but with the specified levels set to zero. } \section{RELEASE}{Version 3.8.1 Copyright Guy Nason 1997 } \seealso{ \code{\link{nullevels.imwd}} \code{\link{nullevels.wd}} \code{\link{nullevels.wst}} \code{\link{wd.object}}, \code{\link{wd}} \code{\link{wst.object}} \code{\link{wst}} } \keyword{manip} \author{G P Nason} wavethresh/man/CWCV.rd0000644000176200001440000001014614211622540014275 0ustar liggesusers\name{CWCV} \alias{CWCV} \title{C Wavelet Cross-validation} \description{ Two-fold wavelet shrinkage cross-validation (in C) } \usage{ CWCV(ynoise, ll, x = 1:length(ynoise), filter.number = 10, family = "DaubLeAsymm", thresh.type = "soft", tol = 0.01, maxits=500, verbose = 0, plot.it = TRUE, interptype = "noise") } \arguments{ \item{ynoise}{A vector of dyadic (power of two) length that contains the noisy data that you wish to apply wavelet shrinkage by cross-validation to.} \item{ll}{The primary resolution that you wish to assume. No wavelet coefficients that are on coarser scales than ll will be thresholded.} \item{x}{This function is capable of producing informative plots. It can be useful to supply the x values corresponding to the ynoise values. Further this argument is returned by this function which can be useful for later processors.} \item{filter.number}{This selects the smoothness of wavelet that you want to perform wavelet shrinkage by cross-validation.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{thresh.type }{this option specifies the thresholding type which can be "hard" or "soft".} \item{tol}{this specifies the convergence tolerance for the cross-validation optimization routine (a golden section search).} \item{maxits}{maximum number of iterations for the cross-validation optimization routine (a golden section search).} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default} \item{plot.it}{If this is TRUE then plots of the universal threshold (used to obtain an upper bound on the cross-validation threshold) reconstruction and the resulting cross-validation estimate are produced.} \item{interptype}{Can take two values noise or normal. This option controls how cross-validation compares the estimate formed by leaving out the data with the "left-out" data. If interptype="noise" then two noisy values are averaged to compare with the estimated curve in between, otherwise if interptype="normal" then the curve estimate is averaged either side of a noisy left-out point.} } \details{ Compute the two-fold cross-validated wavelet shrunk estimate given the noisy data ynoise according to the description given in Nason, 1996. You must specify a primary resolution given by \code{ll}. This must be specified individually on each data set and can itself be estimated using cross-validation (although I haven't written the code to do this). \bold{Note}. The two-fold cross-validation method performs very badly if the input data is correlated. In this case I would advise using the methods proposed in Donoho and Johnstone, 1995 or Johnstone and Silverman, 1997 which can be carried out in WaveThresh using the \code{\link{threshold}} function using the \code{policy="sure"} option. } \value{ A list with the following components \item{x}{This is just the x that was input. It gets passed through more or less for convenience for the user.} \item{ynoise}{A copy of the input ynoise noisy data.} \item{xvwr}{The cross-validated wavelet shrunk estimate.} \item{yuvtwr}{The universal thresholded version (note this is merely a starting point for the cross-validation algorithm. It should not be ta ken seriously as an estimate. In particular its estimate of variance is likely to be inflated.) } \item{xvthresh}{The cross-validated threshold} \item{xvdof}{The number of non-zero coefficients in the cross-validated shrunk wavelet object (which is not returned).} \item{uvdof}{The number of non-zero coefficients in the universal threshold shrunk wavelet object (which also is not returned)} \item{xkeep}{always returns NULL!} \item{fkeep}{always returns NULL!} } \note{Plots of the universal and cross-validated shrunk estimates might be plotted if \code{plot.it=TRUE.}} \section{RELEASE}{Version 3.0 Copyright Guy Nason 1994 } \seealso{ \code{\link{threshold}}. \code{\link{threshold.wd}}. } \examples{ # # This function is best used via the policy="cv" option in # the threshold.wd function. # See examples there. # } \keyword{smooth} \author{G P Nason} wavethresh/man/print.wpstDO.rd0000644000176200001440000000277314211622540016115 0ustar liggesusers\name{print.wpstDO} \alias{print.wpstDO} \title{Print information about a wpstDO class object} \usage{ \method{print}{wpstDO}(x, \dots) } \arguments{ \item{x}{wpstDO object to print out} \item{\dots}{Other information to print} } \description{ Prints out the type of object, prints out the object's names, then uses \code{\link{print.BP}} to print out the best single packets. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{makewpstDO}}} \examples{ # # Use BabySS and BabyECG data for this example. # # Want to predict future values of BabySS from future values of BabyECG # # Build model on first 256 values of both # data(BabyECG) data(BabySS) BabyModel <- makewpstDO(timeseries=BabyECG[1:256], groups=BabySS[1:256], mincor=0.5) # # The results (ie print out answer) BabyModel #Stationary wavelet packet discrimination object #Composite object containing components:[1] "BPd" "BP" "filter" #Fisher's discrimination: done #BP component has the following information #BP class object. Contains "best basis" information #Components of object:[1] "nlevelsWT" "BasisMatrix" "level" "pkt" "basiscoef" #[6] "groups" #Number of levels 8 #List of "best" packets #Level id Packet id Basis coef #[1,] 4 0 0.7340580 #[2,] 5 0 0.6811251 #[3,] 6 0 0.6443167 #[4,] 3 0 0.6193434 #[5,] 7 0 0.5967620 #[6,] 0 3 0.5473777 #[7,] 1 53 0.5082849 # } \author{G P Nason} \keyword{print} wavethresh/man/wpst2discr.rd0000644000176200001440000000246014211622634015643 0ustar liggesusers\name{wpst2discr} \alias{wpst2discr} \title{Reshape/reformat packet coefficients into a multivariate data set} \description{ The packet coefficients of a nondecimated wavelet packet object are stored internally in an efficient form. This function takes the nondecimated wavelet packets and stores them as a matrix (multivariate data set). Each column in the returned matrix corresponds to an individual packet, each row corresponds to a time index in the original packet or time series. } \usage{ wpst2discr(wpstobj, groups) } \arguments{ \item{wpstobj}{A wpst class object, output from \code{\link{wpst}} say} \item{groups}{A time series containing the group membership at each time point} } \details{ Description says it all } \value{An object of class w2d which is a list containing the following items: \item{m}{The matrix containing columns of packet information.} \item{groups}{Passes through the \code{group} argument from input.} \item{level}{Each column corresponds to a packet, this vector contains the information on which resolution level each packet comes from} \item{pktix}{Like for \code{level} but for packet indices} \item{nlevelsWT}{The number of resolution levels in total, from the wpst object} } \seealso{\code{\link{makewpstDO}}, \code{\link{wpst}}} \author{G P Nason} \keyword{multivariate} \keyword{ts} wavethresh/man/accessD.mwd.rd0000644000176200001440000000444414211622540015672 0ustar liggesusers\name{accessD.mwd} \alias{accessD.mwd} \title{Get wavelet coefficients from multiple wavelet structure (mwd).} \description{ The wavelet coefficients from a multiple wavelet decomposition structure, \code{\link{mwd.object}}, (e.g. returned from \code{\link{mwd}}) are packed into a single matrix in that structure. This function extracts the coefficients corresponding to a particular resolution level. } \usage{ \method{accessD}{mwd}(mwd, level, \dots) } \arguments{ \item{mwd}{Multiple wavelet decomposition structure from which you wish to extract the expansion coefficients.} \item{level}{The level that you wish to extract. If the "original" data has \code{mwd$filter$npsi*2^m} data points (\code{mwd$filter$npsi} being the multiplicity of the multiple wavelets) then there are m possible levels that you could want to access, indexed by 0,1,...,(m-1) } \item{\dots}{any other arguments} } \details{ The \code{\link{mwd}} function produces a \code{multiple wavelet decomposition object} . The need for this function is a consequence of the pyramidal structure of \code{Mallats algorithm} and the memory efficiency gain achieved by storing the pyramid as a linear matrix. AccessD obtains information about where the coefficients appear from the fl.dbase component of \code{\link{mwd}}, in particular the array \code{fl.dbase$first.last.d} which gives a complete specification of index numbers and offsets for \code{mwd$D}. Note that this function and \code{\link{accessC}} only work on objects of class \code{\link{mwd}} to \emph{extract} coefficients. You have to use \code{\link{putD.mwd}} to insert wavelet coefficients into a \code{\link{mwd}} object. See Downie and Silverman, 1998. } \value{ A matrix with \code{mwd$filter$npsi} rows containing the extracted coefficients. } \section{RELEASE}{Tim Downie 1995-6} \seealso{ \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd}}, \code{\link{mwd.object}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}} } \examples{ # # Get the 3rd level of smoothed data from a decomposition # data(ipd) accessD.mwd(mwd(ipd), level=3) } \keyword{manip} \author{G P Nason} wavethresh/man/denplot.rd0000644000176200001440000000421214211622540015175 0ustar liggesusers\name{denplot} \alias{denplot} \title{Calculate plotting information for a density estimate.} \usage{ denplot(wr, coef, nT=20, lims, n=50) } \arguments{ \item{wr}{Scaling function coefficients, usually at some high level and usually smoothed (thresholded).} \item{coef}{The output from \code{\link{denproj}} for this analysis, i.e. the object containing the empirical scaling function coefficients. This is required because of the information it contains about the wavelet filter used, the resolution of the projection, and the bounds on the translation index of the scaling function coefficients.} \item{lims}{Vector containing the minimum and maximum x values required on the plot.} \item{nT}{The number of iterations to be performed in the Daubechies-Lagarias algorithm, which is used to evaluate the scaling functions of the specified wavelet basis at the plotting points.} \item{n}{The number of points at which the density estimate is to be evaluated.} } \description{ Calculates plotting information for a wavelet density estimate from high level scaling function coefficients. } \details{ The density estimate is evaluated at \code{n} points between the values in \code{lims}. This function can be used to plot the empirical scaling function density estimate by entering \code{wr=coef$coef}, but since the empirical coefficients are usually found at some very high resolution, such a plot will be very noisy and not very informative. This function will be of much more use as and when thresholding function are included in this density estimation package. } \value{ A list with components: \item{x}{The points at which the density estimate is evaluated.} \item{y}{The values of the density estimate at the points in \code{x}.} } \examples{ # Simulate data from the claw density and find the # empirical scaling function coefficients at a lowish resolution and plot # the resulting density estimate data <- rclaw(100) datahr <- denproj(data, J=3, filter.number=2,family="DaubExPhase") datapl <- denplot(datahr$coef, datahr, lims=c(-3,3), n=1000) \dontrun{plot(datapl, type="l")} } \seealso{\code{\link{denproj}},\code{\link{rclaw}}} \author{David Herrick} \keyword{smooth} wavethresh/man/denwr.rd0000644000176200001440000000351714211622540014656 0ustar liggesusers\name{denwr} \alias{denwr} \title{Wavelet reconstruction for density estimation. } \usage{ denwr(wd, start.level=0, verbose=FALSE, bc=wd$bc, return.object=FALSE, filter.number=wd$filter$filter.number, family=wd$filter$family) } \arguments{ \item{wd}{Wavelet decomposition object to reconstruct} \item{start.level}{The level you wish to start the reconstruction at. This is usually the first level (level 0). Note that this option assumes the coarsest level is labelled 0, so it is best to think of this argument as "the number of levels up from the coarsest level to start the reconstruction".} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} \item{bc}{The boundary conditions used. These should be determined by those used to create the supplied \code{\link{wd.object}} object. In the case of density estimation they are "zero".} \item{filter.number}{The filter number of the wavelet used to do the reconstruction. Again, as for bc, you should probably leave this argument alone.} \item{family}{The type of wavelet used to do the reconstruction. You can change this argument from the default but it is probably NOT wise.} \item{return.object}{If this is FALSE then the top level of the reconstruction is returned (this is the reconstructed function at the highest resolution). Otherwise, if it is TRUE, the whole wd reconstructed object is returned.} } \description{ Performs wavelet reconstruction for density estimation. } \details{ This is the same as \code{\link{wr.wd}}, except that it can handle zero boundary conditions. } \value{ Either a vector containing the top level reconstruction or an object of class \code{\link{wd.object}} containing the results of the reconstruction. } \author{David Herrick} \keyword{smooth} wavethresh/man/LSWsim.rd0000644000176200001440000001252414211622540014713 0ustar liggesusers\name{LSWsim} \alias{LSWsim} \title{Simulate arbitrary locally stationary wavelet process.} \description{ Simulates an arbitrary LSW process given a spectrum. } \usage{ LSWsim(spec) } \arguments{ \item{spec}{An object of class \code{\link{wd}} (the NDWT kind) which contains the spectral information for simulating your process. See examples below on how to create and manipulate this object.} } \details{ This function uses a spectral definition in spec to simulate a locally stationary wavelet process (defined by the Nason, von Sachs and Kroisandt, 2000, JRSSB paper). The input object, \code{spec}, is a \code{\link{wd}} class object which contains a spectral description. In particular, all coefficients must be nonnegative and \code{LSWsim()} checks for this and returns an error if it is not so. Other than that the spectrum can contain pretty much anything. An object of this type can be easily created by the convenience routine \code{\link{cns}}. This creates an object of the correct structure but all elements are initially set to zero. The spectrum structure \code{spec} can then be filled by using the \code{\link{putD}} function. The function works by first checking for non-negativity. Then it takes the square root of all coefficients. Then it multiplies all coefficients by a standard normal variate (from \code{rnorm()}) and multiples the finest level by 2, the next finest by 4, the next by 8 and so on. (This last scalar multiplication is intended to undo the effect of the average basis averaging which combines cofficients but divides by two at each combination). Finally, the modified spectral object is subjected to the \code{\link{convert}} function which converts the object from a \code{\link{wd}} time-ordered NWDT object to a \code{\link{wst}} packet-ordered object which can then be inverted using \code{\link{AvBasis}}. Note that the NDWT transforms in WaveThresh are periodic so that the process that one simulates with this function is also periodic. } \value{ A vector simulated from the spectral description given in the \code{spec} description. The returned vector will exhibit the spectral characteristics defined by \code{spec}. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 2004 } \seealso{ \code{\link{wd.object}}, \code{\link{putD}}, \code{\link{cns}}, \code{\link{AvBasis}}, \code{\link{convert}}, \code{\link{ewspec}}, \code{\link{plot.wst}}, } \examples{ # # Suppose we want to create a LSW process of length 1024 and with a spectral # structure that has a squared sinusoidal character at level 4 and a burst of # activity from time 800 for 100 observations at scale 9 (remember for a # process of length 1024 there will be 9 resolution levels (since 2^10=1024) # where level 9 is the finest and level 0 is the coarsest). # # First we will create an empty spectral structure for series of 1024 observations # # myspec <- cns(1024) # # If you plot it you'll get a null spectrum (since every spectral entry is zero) # \dontrun{plot(myspec, main="My Spectrum")} # # # Now let's add the desired spectral structure # # First the squared sine (remember spectra are positive) # myspec <- putD(myspec, level=4, sin(seq(from=0, to=4*pi, length=1024))^2) # # Let's create a burst of spectral info of size 1 from 800 to 900. Remember # the whole vector has to be of length 1024. # burstat800 <- c(rep(0,800), rep(1,100), rep(0,124)) # # Insert this (00000111000) type vector into the spectrum at fine level 9 # myspec <- putD(myspec, level=9, v=burstat800) # # Now it's worth plotting this spectrum # \dontrun{plot(myspec, main="My Spectrum")} # # The squared sinusoid at level 4 and the burst at level 9 can clearly # be seen # # # Now simulate a random process with this spectral structure. # myLSWproc <- LSWsim(myspec) # # Let's see what it looks like # \dontrun{ts.plot(myLSWproc)} # # # The burst is very clear but the sinusoidal structure is less apparent. # That's basically it. # # You could now play with the spectrum (ie alter it) or simulate another process # from it. # # [The following is somewhat of an aside but useful to those more interested # in the LSW scene. We could now ask, so what? So you can simulate an # LSW process. How can I be sure that it is doing so correctly? Well, here is # a partial, computational, answer. If you simulate many realisations from the # same spectral structure, estimate its spectrum, and then average those # estimates then the average should tend to the spectrum you supplied. Here is a # little function to do this (just for Haar but this function could easily be # developed to be more general): # checkmyews <- function(spec, nsim=10){ ans <- cns(2^nlevelsWT(spec)) for(i in 1:nsim) { cat(".") LSWproc <- LSWsim(spec) ews <- ewspec(LSWproc, filter.number=1, family="DaubExPhase", WPsmooth=F) ans$D <- ans$D + ews$S$D ans$C <- ans$C + ews$S$C } ans$D <- ans$D/nsim ans$C <- ans$C/nsim ans } # If you supply it with a spectral structure (like myspec) # from above and do enough simulations you'll get something looking like # the original myspec structure. E.g. try # \dontrun{plot(checkmyews(myspec, nsim=100))} ## # for fun. This type of check also gives you some idea of how much data # you really need for LSW estimation for given spectral structures.] # } \keyword{manip} \author{G P Nason} wavethresh/man/getarrvec.rd0000644000176200001440000000776514211622540015532 0ustar liggesusers\name{getarrvec} \alias{getarrvec} \title{Compute and return weaving permutation for conversion from wst objects to wd class objects. } \description{ Computes weaving permutation for conversion from \code{\link{wst}} objects to \code{\link{wd}} } \usage{ getarrvec(nlevels, sort=TRUE) } \arguments{ \item{nlevels}{The \code{number of levels} in the non-decimated transform for which the permutation is to be computed.} \item{sort}{If \code{TRUE} then compute permutation for indexing a \code{\link{wst}} object. If \code{FALSE} then compute permutation for indexing a \code{wd} object.} } \details{ Conversion of \code{\link{wst}} objects into \code{\link{wd}} objects and vice versa can be carried out using the \code{\link{convert.wst}} and \code{\link{convert.wd}} functions. These latter functions depend on this getarrvec function to compute the permutation which maps coefficients from one ordering to the other. This function returns a matrix which gives the necessary permutations for scale levels 1 to \code{nlevels-1}. If you want to get the permutation for the level 0 coefficients of the \code{\link{wst}} object you will have to call the \code{\link{levarr}} function directly. This permutation is described in Nason, Sapatinas and Sawczenko, 1998. The function that actually computes the permutations is \code{\link{levarr}}. This function just combines the results from \code{\link{levarr}}. } \value{ A matrix with \code{nlevel}s-1 columns. Column 1 corresponds to scale level \code{nlevels-1} in the \code{\link{wst}} object, and column \code{nlevels-1} corresponds to scale level 1 in the \code{\link{wst}} object. Replace \code{\link{wst}} by \code{\link{wd}} if \code{sort=FALSE}. } \section{RELEASE}{Version 3.6 Copyright Guy Nason 1997 } \seealso{ \code{\link{convert}}, \code{\link{convert.wd}}, \code{\link{convert.wst}}, \code{\link{levarr}}, \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{wpst}}. } \examples{ # # What would the permutation be for a wst # object with 4 levels? # arrvec <- getarrvec(4) #arrvec # [,1] [,2] [,3] # [1,] 1 1 1 # [2,] 9 9 9 # [3,] 2 5 5 # [4,] 10 13 13 # [5,] 3 2 3 # [6,] 11 10 11 # [7,] 4 6 7 # [8,] 12 14 15 # [9,] 5 3 2 #[10,] 13 11 10 #[11,] 6 7 6 #[12,] 14 15 14 #[13,] 7 4 4 #[14,] 15 12 12 #[15,] 8 8 8 #[16,] 16 16 16 # # The permutation for level 3 is in column 1 # The permutation for level 2 is in column 2 # The permutation for level 1 is in column 3. # # The following shows that the above is the right permutation (for level 2 # at least. # # Start off with some random normal data! # myrand <- rnorm(1:16) # # Now take both the time ordered non-decimated wavelet # transform and the packet ordered non-decimated wavelet # transform. # myrwdS <- wd(myrand, type="station") myrwst <- wst(myrand) # # Let's look at the level 2 coefficients of myrwdS # accessD(myrwdS, level=2) # [1] -0.73280829 -0.97892279 1.33305777 1.46320165 -0.94790098 # [6] -1.39276215 0.40023757 0.82517249 -0.56317955 -0.89408713 #[11] 0.77166463 1.56204870 -0.34342230 -1.64133182 0.08235115 #[16] 1.05668106 # # Let's look at the level 2 coefficients of myrwst # accessD(myrwst, level=2) # [1] -0.73280829 -0.94790098 -0.56317955 -0.34342230 1.33305777 # [6] 0.40023757 0.77166463 0.08235115 -0.97892279 -1.39276215 #[11] -0.89408713 -1.64133182 1.46320165 0.82517249 1.56204870 #[16] 1.05668106 # # O.k. So the coefficients are the same, but they are not in the # same order as in myrwdS. So let's use the permutation in the # second column of arrvec to reorder the myrwst coefficients # to have the same order as the myrwdS ones # accessD(myrwst, level=2)[arrvec[,2]] # [1] -0.73280829 -0.97892279 1.33305777 1.46320165 -0.94790098 # [6] -1.39276215 0.40023757 0.82517249 -0.56317955 -0.89408713 #[11] 0.77166463 1.56204870 -0.34342230 -1.64133182 0.08235115 #[16] 1.05668106 # # These coefficients have the correct ordering. } \keyword{array} \author{G P Nason} wavethresh/man/ScalingFunction.rd0000644000176200001440000000201714211622634016623 0ustar liggesusers\name{ScalingFunction} \alias{ScalingFunction} \title{Compute scaling functions on internally predefined grid} \usage{ ScalingFunction(filter.number = 10, family = "DaubLeAsymm", resolution = 4096, itlevels = 50) } \arguments{ \item{filter.number}{The filter number of the associated wavelet. See \code{\link{filter.select}}} \item{family}{The family of the associated wavelet. See \code{\link{filter.select}}} \item{resolution}{The nominal resolution, the actual grid size might be larger than this} \item{itlevels}{The number of complete filtering operations to generate the answer} } \description{ This is a subsidiary routine not intended to be called by a user: use \code{\link{draw}} instead. Generates scaling functions by inserting a Kronecker delta function into the bottom of the inverse DWT and repeating the inverting steps. } \details{ Description says all } \value{ A list containing the \code{x} and \code{y} values of the required scaling function. } \seealso{\code{\link{draw}}} \author{G P Nason} \keyword{dplot} wavethresh/man/wvcvlrss.rd0000644000176200001440000000501714211622634015431 0ustar liggesusers\name{wvcvlrss} \alias{wvcvlrss} \title{Computes estimate of error for function estimate. } \description{ This function is merely a call to the \code{\link{GetRSSWST}} function. } \usage{ wvcvlrss(threshold, ndata, levels, type, filter.number, family, norm, verbose, InverseType) } \arguments{ \item{threshold}{the value of the threshold that you wish to compute the error of the estimate at} \item{ndata}{the noisy data. This is a vector containing the signal plus noise. The length of this vector should be a power of two.} \item{levels}{the levels over which you wish the threshold value to be computed (the threshold that is used in computing the estimate and error in the estimate). See the explanation for this argument in the \code{\link{threshold.wst}} function. } \item{type}{whether to use hard or soft thresholding. See the explanation for this argument in the \code{\link{threshold.wst}} function.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{norm}{which measure of distance to judge the dissimilarity between the estimates. The functions \code{\link{l2norm}} and \code{\link{linfnorm}} are suitable examples.} \item{verbose}{If \code{TRUE} then informative messages are printed during the progression of the function, otherwise they are not.} \item{InverseType}{The possible options are "average" or "minent". The former uses basis averaging to form estimates of the unknown function. The "minent" function selects a basis using the Coifman and Wickerhauser, 1992 algorithm to select a basis to invert.} } \details{ This function is merely a call to the \code{\link{GetRSSWST}} function with a few arguments interchanged. In particular, the first two arguments are interchanged. This is to make life easier for use with the \code{nlminb} function which expects the first argument of the function it is trying to optimise to be the variable that the function is optimised over. } \value{ A real number which is estimate of the error between estimate and truth at the given threshold. } \section{RELEASE}{Version 3.6 Copyright Guy Nason 1995 } \seealso{ \code{\link{GetRSSWST}}. } \examples{ # # This function performs the error estimation step for the # wstCVl function and so is not intended for # user use. # } \keyword{manip} \author{G P Nason} wavethresh/man/IsEarly.rd0000644000176200001440000000110714211622540015100 0ustar liggesusers\name{IsEarly} \alias{IsEarly} \title{Generic function to detect whether object is from an early version} \usage{ IsEarly(x) } \arguments{ \item{x}{The object you want to see whether its from an early version} } \description{ Generic function to detect whether object is from an early version of WaveThresh } \details{ Description says all } \value{ Returns TRUE if object is from an earlier version of WaveThresh, FALSE if not. } \seealso{\code{\link{ConvertMessage}},\code{\link{IsEarly.default}},\code{\link{IsEarly}}, \code{\link{IsEarly.wd}}} \author{G P Nason} \keyword{error} wavethresh/man/Whistory.wst.rd0000644000176200001440000000102114211622634016173 0ustar liggesusers\name{Whistory.wst} \alias{Whistory.wst} \title{Obsolete function: as Whistory, but for wst objects} \usage{ \method{Whistory}{wst}(wst, all=FALSE, \dots) } \arguments{ \item{wst}{The object that you want to display the history for} \item{all}{Print the whole history list} \item{\dots}{Other arguments} } \description{ Obsolete function, see \code{\link{Whistory}}. } \details{ Description says all } \value{ Nothing, but history information is printed. } \seealso{\code{\link{Whistory}}} \author{G P Nason} \keyword{utilities} wavethresh/man/BAYES.THR.rd0000644000176200001440000001132614211622540015033 0ustar liggesusers\name{BAYES.THR} \alias{BAYES.THR} \title{Bayesian wavelet thresholding.} \description{ This function carries out Bayesian wavelet thresholding of noisy data, using the BayesThresh method of Abramovich, Sapatinas, & Silverman (1998). } \usage{ BAYES.THR(data, alpha = 0.5, beta = 1, filter.number = 8, family = "DaubLeAsymm", bc = "periodic", dev = var, j0 = 5, plotfn = FALSE) } \arguments{ \item{data}{A vector of length a power of two, containing noisy data to be thresholded.} \item{alpha, beta}{Hyperparameters which determine the priors placed on the wavelet coefficients. Both alpha and beta take positive values; see Abramovich, Sapatinas, & Silverman (1998) or Chipman & Wolfson (1999) for more details on selecting \code{alpha} and \code{beta}.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments. For the ``wavelets on the interval'' (\code{bc="interval"}) transform the filter number ranges from 1 to 8. See the table of filter coefficients indexed after the reference to Cohen, Daubechies and Vial, (1993).} \item{family}{Specifies the family of wavelets that you want to use. Two popular options are "DaubExPhase" and "DaubLeAsymm" but see the help for \link{filter.select} for more possibilities. This argument is ignored for the ``wavelets on the interval'' transform (\code{bc="interval"}).} \item{bc}{Specifies the boundary handling. If \code{bc="periodic"} the default, then the function you decompose is assumed to be periodic on it's interval of definition, if \code{bc="symmetric"} then the function beyond its boundaries is assumed to be a symmetric reflection of the function in the boundary. The symmetric option was the implicit default in releases prior to 2.2. If\code{bc=="interval"} then the ``wavelets on the interval algorithm'' due to Cohen, Daubechies and Vial is used. (The \code{WaveThresh} implementation of the ``wavelets on the interval transform'' was coded by Piotr Fryzlewicz, Department of Mathematics, Wroclaw University of Technology, Poland; this code was largely based on code written by Markus Monnerjahn, RHRK, Universitat Kaiserslautern; integration into \code{WaveThresh} by \code{GPN}).} \item{dev}{This argument supplies the function to be used to compute the spread of the absolute values coefficients. The function supplied must return a value of spread on the variance scale (i.e. not standard deviation) such as the \code{var()} function. A popular, useful and robust alternative is the \code{madmad} function.} \item{j0}{The primary resolution level. While BayesThresh thresholds at all resolution levels, j0 is used in assessing the universal threshold which is used in the empirical Bayes estimation of hyperparameters.} \item{plotfn}{If TRUE, BAYES.THR draws the noisy data and the thresholded function estimate.} } \details{ A mixture prior consisting of a zero-mean normal distribution and a point mass at zero is placed on each wavelet coefficient. The empirical coefficients are then calculated and the priors updated to give posterior distributions for each coefficient. The thresholded value of each coefficient is the median of that coefficient's posterior distribution. See Abramovich, Sapatinas, & Silverman (1998) for more details of the procedure; the help page for \code{\link{threshold.wd}} has more information about wavelet thresholding in general. The function \code{wave.band} uses the same priors to compute posterior credible intervals for the regression function, using the method described by Barber, Nason, & Silverman (2001). } \value{ A vector containing the thresholded estimate of the function from which the data was drawn. } \section{RELEASE}{3.9.5 Code by Fanis Sapatinas/Felix Abramovich Documentation by Stuart Barber } \seealso{ \code{\link{threshold.wd}}, \code{\link{wd}} } \examples{ # # Generate some noisy test data and plot it. # blocks.data <- DJ.EX(n=512, noisy=TRUE)$blocks # # Now try BAYES.THR with the default parameters. # blocks.thr <- BAYES.THR(blocks.data, plotfn=TRUE) # # The default wavelet is Daubechies' least asymmetric wavelet # with 8 vanishing moments; quite a smooth wavelet. Since the # flat sections are still rather noisy, try Haar wavelets: # blocks.thr <- BAYES.THR(blocks.data, plotfn=TRUE, filter.number=1, family = "DaubExPhase") # # To show the importance of a sensible prior, consider alpha = 4, # beta = 1 (which implies a smoother prior than the default). # blocks.thr <- BAYES.THR(blocks.data, plotfn=TRUE, filter.number=1, family = "DaubExPhase", alpha=4, beta=1) # # Here, the extreme values of the function are being smoothed towards zero. # } \keyword{smooth} \author{G P Nason} wavethresh/man/threshold.rd0000644000176200001440000000271114211622634015532 0ustar liggesusers\name{threshold} \alias{threshold} \title{Threshold coefficients} \description{ Modify coefficients by thresholding or shrinkage. This function is generic. Particular methods exist for the following objects: \describe{ \item{wd object}{the \code{\link{threshold.wd}} function is used;} \item{imwd object}{the \code{\link{threshold.imwd}} function is used;} \item{imwdc object}{the \code{\link{threshold.imwdc}} function is used;} \item{irregwd object}{the \code{\link{threshold.irregwd}} function is used;} \item{wd3D object}{the \code{\link{threshold.wd3D}} function is used;} \item{wp object}{the \code{\link{threshold.wp}} function is used;} \item{wst object}{the \code{\link{threshold.wst}} function is used.} } } \usage{ threshold(...) } \arguments{ \item{\dots}{See individual help pages for details.} } \details{ See individual method help pages for operation and examples. } \value{ Usually a copy of the input object but containing thresholded or shrunk coefficients. } \section{RELEASE}{Version 2 Copyright Guy Nason 1993 } \seealso{ \code{\link{imwd.object}}, \code{\link{imwdc.object}}, \code{irregwd object}, \code{\link{threshold.imwd}}, \code{\link{threshold.imwdc}}, \code{\link{threshold.irregwd}}, \code{\link{threshold.wd}}, \code{\link{threshold.wd3D}}, \code{\link{threshold.wp}}, \code{\link{threshold.wst}} \code{\link{wd.object}}, \code{\link{wd3D.object}}, \code{\link{wp.object}}, \code{\link{wst.object}}. } \keyword{manip} \author{G P Nason} wavethresh/man/putD.wp.rd0000644000176200001440000000470214211622634015101 0ustar liggesusers\name{putD.wp} \alias{putD.wp} \title{Puts a whole resolution level of wavelet packet coeffients into wp wavelet object.} \description{ Makes a copy of the \code{\link{wp}} object, replaces a whole resolution level of wavelet packet coefficients data in the copy, and then returns the copy. } \usage{ \method{putD}{wp}(wp, level, value, \dots) } \arguments{ \item{wp}{Wavelet packet object into which you wish to insert the wavelet packet coefficients.} \item{level}{the resolution level at which you wish to replace the wavelet packet coefficients.} \item{value}{the replacement data, this should be of the correct length.} \item{\dots}{any other arguments} } \details{ The function \code{\link{accessD.wp}} obtains the wavelet packet coefficients for a particular level. For wavelet packet transforms the number of coefficients at each resolution level is the same and equal to \code{2^nlevelsWT} where \code{nlevels} is the number of levels in the \code{\link{wp.object}}. The number of coefficients at each resolution level is also, of course, the number of data points used to initially form the \code{\link{wp}} object in the first place. Use the \code{\link{accessD.wp}} to extract whole resolution levels of wavelet packet coefficients. We don't recommend that you use this function unless you really know what you are doing. Usually it is more convenient to manipulate individual \emph{packets} of coefficients using \code{\link{getpacket}}/\code{\link{putpacket}} functions. If you must use this function to insert whole resolution levels of coefficients you must ensure that the data vector you supply is valid: i.e. contains packet coefficients in the right order. } \value{ A \code{\link{wp}} class object containing the modified wavelet packet coefficients. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wp.object}}, \code{\link{wp}}, \code{\link{accessD}}, \code{\link{accessD.wp}}, \code{\link{getpacket.wp}}, \code{\link{putpacket.wp}}. } \examples{ # # Generate an EMPTY wp object: # zero <- rep(0, 16) zerowp <- wp(zero) # # Put some random mother wavelet coefficients into the object at # resolution level 2. For the wavelet packet transform there # are always 16 coefficients at every resolution level. # mod.zerowp <- putD( zerowp, level=2, v=rnorm(16)) # # If you plot mod.zerowp you will see that there are only # coefficients at resolution level 2 where you just put the coefficients. } \keyword{manip} \author{G P Nason} wavethresh/man/basisplot.BP.rd0000644000176200001440000000154414211622540016035 0ustar liggesusers\name{basisplot.BP} \alias{basisplot.BP} \title{Plot time-frequency plane and basis slots associated with basis object} \usage{ \method{basisplot}{BP}(x, num=min(10, length(BP$level)), ...) } \arguments{ \item{x}{The \code{BP} class object, possibly coming from the BP component of the object returned by \code{\link{makewpstDO}} that you wish to plot} \item{num}{The number of packets that you wish to add to the plot} \item{\dots}{Other arguments} } \description{ The \code{x} objects store basis information obtained through the \code{\link{makewpstDO}} object. This function plots where the basis packets are on the time frequency plane. } \details{ Description says all } \value{ Nothing of note } \seealso{\code{\link{makewpstDO}},\code{\link{Best1DCols}}} \examples{ # # See example in help for \code{\link{makewpstDO}} # } \author{G P Nason} \keyword{hplot} wavethresh/man/uncompress.default.rd0000644000176200001440000000160214211622634017355 0ustar liggesusers\name{uncompress.default} \alias{uncompress.default} \title{Undo zero run-length encoding for a vector.} \usage{ \method{uncompress}{default}(v, verbose=FALSE, ...) } \arguments{ \item{v}{The object to uncompress} \item{verbose}{Print an informative message whilst executing} \item{\dots}{Other arguments} } \description{ This function inverts the action carried out by the \code{\link{compress.default}} function. } \details{ The inverse of \code{\link{compress.default}} } \value{ The uncompressed, reinstated, vector. } \seealso{\code{\link{compress.default}}, \code{\link{uncompress}}} \examples{ uncompress(compress(c(1, rep(0,99), 1))) #[1] 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #[38] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 #[75] 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 } \author{G P Nason} \keyword{manip} wavethresh/man/dclaw.rd0000644000176200001440000000150114211622540014620 0ustar liggesusers\name{dclaw} \alias{dclaw} \alias{rclaw} \alias{pclaw} \title{Claw distribution} \usage{ rclaw(n) dclaw(x) pclaw(q) } \arguments{ \item{n}{Number of draws from \code{rclaw} distribution} \item{x}{Vector of ordinates} \item{q}{Vector of quantiles} } \description{ Random generation, density and cumulative probability for the claw distribution. } \details{ The claw distribution is a normal mixture distribution, introduced in Marron & Wand (1992). Marron, J.S. & Wand, M.P. (1992). Exact Mean Integrated Squared Error. \emph{Ann. Stat.}, \bold{20}, 712--736. } \value{Random samples (rclaw), density (dclaw) or probability (pclaw) of the claw distribution. } \examples{ # Plot the claw density on the interval [-3,3] x <- seq(from=-3, to=3, length=500) \dontrun{plot(x, dclaw(x), type="l")} } \author{David Herrick} \keyword{smooth} wavethresh/man/rmname.rd0000644000176200001440000000347714211622634015027 0ustar liggesusers\name{rmname} \alias{rmname} \title{Return a ipndacw matrix style name.} \description{ This function returns a character string according to a particular format for naming \code{\link{ipndacw}} matrices. } \usage{ rmname(J, filter.number, family) } \arguments{ \item{J}{A negative integer representing the order of the \code{\link{ipndacw}} matrix.} \item{filter.number}{The index number of the wavelet used to build the \code{\link{ipndacw}} matrix.} \item{family}{The wavelet family used to build the \code{\link{ipndacw}} matrix.} } \details{ Some of the matrices computed by \code{\link{ipndacw}} take a long time to compute. Hence it is a good idea to store them and reuse them. This function generates a name according to a particular naming scheme that permits a search algorithm to easily find the matrices. Each matrix has three defining characteristics: its \emph{order}, \emph{filter.number} and \emph{family}. Each of these three characteristics are concatenated together to form a name. } \value{ A character string containing the name of a matrix according to a particular naming scheme. } \references{Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{Technical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{ewspec}}, \code{\link{ipndacw}}, } \examples{ # # What's the name of the order 4 Haar matrix? # rmname(-4, filter.number=1, family="DaubExPhase") #[1] "rm.4.1.DaubExPhase" # # What's the name of the order 12 Daubechies least-asymmetric wavelet # with 7 vanishing moments? # rmname(-12, filter.number=7, family="DaubLeAsymm") #[1] "rm.12.7.DaubLeAsymm" } \keyword{manip} \author{G P Nason} wavethresh/man/av.basis.rd0000644000176200001440000000240114211622540015234 0ustar liggesusers\name{av.basis} \alias{av.basis} \title{Perform basis averaging for wst class object} \usage{ av.basis(wst, level, ix1, ix2, filter) } \arguments{ \item{wst}{The \code{\link{wst.object}} that you wish to basis average} \item{level}{The resolution level the function is currently operating at} \item{ix1}{Which "left" packet in the level you are accessing} \item{ix2}{Which "right" packet} \item{filter}{The wavelet filter details, see \code{\link{filter.select}}} } \description{ \bold{Note:} that this function is not for direct user use. This function is a helper routine for the \code{\link{AvBasis.wst}} function which is the one that should be used by users. This function works by recursion, essentially it merges the current levels C coefficients from one packet shift with its associated D coefficients, does the same for the other packet shift and then averages the two reconstructions to provide the C coefficients for the next level up. } \details{ Description says all, see help page for \code{\link{AvBasis.wst}}. } \value{ Returns the average basis reconstruction of a \code{\link{wst.object}}. } \seealso{\code{\link{AvBasis}}, \code{\link{AvBasis.wst}}, \code{\link{conbar}}, \code{\link{rotateback}}, \code{\link{getpacket}}} \author{G P Nason} \keyword{manip} wavethresh/man/draw.rd0000644000176200001440000000266614211622540014500 0ustar liggesusers\name{draw} \alias{draw} \title{Draw wavelets or scaling functions.} \description{ Draws the mother wavelet or scaling function associated with an object. This function is generic. Particular methods exist. The following functions are used for the following objects: \describe{ \item{imwd.object}{the \code{\link{draw.imwd}} function is used.} \item{imwdc.object}{the \code{\link{draw.imwdc}} function is used.} \item{wd.object}{the \code{\link{draw.wd}} function is used.} \item{wp.object}{the \code{\link{draw.wp}} function is used.} \item{wst.object}{the \code{\link{draw.wst}} function is used.} } All of the above method functions use the \code{\link{draw.default}} function which is the function which actually does the drawing. } \usage{ draw(...) } \arguments{ \item{\dots}{methods may have additional arguments} } \details{ See individual method help pages for operation and examples. } \value{ If the \code{plot.it} argument is supplied then the draw functions tend to return the coordinates of what they were meant to draw and don't actually draw anything. } \section{RELEASE}{Version 2 Copyright Guy Nason 1993} \seealso{ \code{\link{draw.default}}, \code{\link{draw.imwd}}, \code{\link{draw.imwdc}}, \code{\link{draw.wd}}, \code{\link{draw.wp}}, \code{\link{draw.wst}}, \code{\link{imwd.object}}, \code{\link{imwdc.object}}, \code{\link{wd.object}}, \code{\link{wp.object}}, \code{\link{wst.object}}. } \keyword{hplot} \author{G P Nason} wavethresh/man/print.mwd.rd0000644000176200001440000000370514211622540015460 0ustar liggesusers\name{print.mwd} \alias{print.mwd} \title{Use print() on a mwd object.} \description{ This function prints out information about an \code{\link{mwd.object}} in a nice human-readable form. Note that this function is automatically called by SPlus whenever the name of an \code{\link{mwd.object}} is typed or whenever such an object is returned to the top level of the S interpreter. } \usage{ \method{print}{mwd}(x, ...) } \arguments{ \item{x}{An object of class mwd that you wish to print out.} \item{\dots}{This argument actually does nothing in this function!} } \details{ Prints out information about \code{\link{mwd}} objects in nice readable format. } \value{ The last thing this function does is call \code{\link{summary.mwd}} so the return value is whatever is returned by this function. } \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1995-6) } \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}},\code{\link{mwd}}, \code{\link{mwd.object}}, \code{\link{mwr}}, \code{\link{plot.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ # # Generate an mwd object. # tmp <- mwd(rnorm(32)) # # Now get Splus to use print.mwd # tmp # Class 'mwd' : Discrete Multiple Wavelet Transform Object: # ~~~ : List with 10 components with names # C D nlevelsWT ndata filter fl.dbase type bc prefilter date # # $ C and $ D are LONG coefficient vectors ! # # Created on : Tue Nov 16 13:16:07 GMT 1999 # Type of decomposition: wavelet # # summary: # ---------- # Length of original: 32 # Levels: 4 # Filter was: Geronimo Multiwavelets # Scaling fns: 2 # Wavelet fns: 2 # Prefilter: default # Scaling factor: 2 # Boundary handling: periodic # Transform type: wavelet # Date: Tue Nov 16 13:16:07 GMT 1999 } \keyword{utilities} \author{G P Nason} wavethresh/man/BMdiscr.rd0000644000176200001440000000124414211622540015055 0ustar liggesusers\name{BMdiscr} \alias{BMdiscr} \title{Subsidiary routine for makewpstDO function} \description{ Function actually performs discrimination on reduced variable set supplied to it from \code{\link{Best1DCols}} function. } \usage{ BMdiscr(BP) } \arguments{ \item{BP}{An list of the same format as returned by \code{\link{Best1DCols}}} } \details{ Not intended for direct user use } \value{ Returns a list of objects: essentially the input argument \code{BP} and the return value from a call to the \code{lda} function which performs the discrimination operation. } \seealso{\code{\link{Best1DCols}},\code{\link{makewpstDO}}} \author{G P Nason} \keyword{ts} \keyword{multivariate} wavethresh/man/rotateback.rd0000644000176200001440000000124114211622634015652 0ustar liggesusers\name{rotateback} \alias{rotateback} \title{Cyclically shift a vector one place to the right} \usage{ rotateback(v) } \arguments{ \item{v}{The vector to shift} } \description{ Cyclically shifts the elements of a vector one place to the right. The right-most element becomes the first element. } \details{ Subsidiary function used by the \code{\link{av.basis}} function which is the R function component of the \code{\link{AvBasis.wst}} function. } \value{ The rotated vector } \examples{ # # Here is a test vector # v <- 1:10 # # Apply this function # rotateback(v) #[1] 10 1 2 3 4 5 6 7 8 9 # # A silly little function really! } \author{G P Nason} \keyword{math} wavethresh/man/nullevels.wst.rd0000644000176200001440000000315614211622540016363 0ustar liggesusers\name{nullevels.wst} \alias{nullevels.wst} \title{Sets whole resolution levels of coefficients equal to zero in a wst object.} \description{ Sets whole resolution levels of coefficients equal to zero in a \code{\link{wd}} object. } \usage{ \method{nullevels}{wst}(wst, levelstonull, \dots) } \arguments{ \item{wst}{An object of class \code{\link{wst}}.} \item{levelstonull}{An integer vector specifying which resolution levels of coefficients of \code{\link{wst}} that you wish to set to zero. } \item{\dots}{any other arguments} } \details{ Setting whole resolution levels of coefficients to zero can be very useful. For examples, one can construct a linear smoothing method by setting all coefficients above a particular resolution (the \emph{primary resolution} equal to zero. Also setting particular levels equal to zero can also be useful for removing noise which is specific to a particular resolution level (as long as important signal is not also contained at that level). To remove individual coefficients on a systematic basis you probably want to look at the \code{\link{threshold}} function. } \value{ An object of class \code{\link{wst}} where the coefficients in resolution levels specified by \code{levelstonull} have been set to zero. } \section{RELEASE}{Version 3.8.1 Copyright Guy Nason 1997 } \seealso{ \code{\link{nullevels}}, \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{threshold}}. } \examples{ # # Look at the examples for \code{\link{nullevels.wd}}. # The operation is almost identical except that \code{\link{wst}} # objects are replaced by \code{\link{wd}} ones. } \keyword{manip} \author{G P Nason} wavethresh/man/wp.object.rd0000644000176200001440000000531114211622634015430 0ustar liggesusers\name{wp.object} \alias{wp.object} \title{Wavelet Packet decomposition objects.} \description{ These are objects of classes \code{wp} They represent a decomposition of a function with respect to a set of wavelet packet functions. } \details{ To retain your sanity we recommend that wavelet packets be extracted in one of two ways: \itemize{ \item{use \code{\link{getpacket.wp}} to obtain individual packets.} \item{use \code{\link{accessD.wp}} to obtain all coefficients at a particular resolution level.} } You can obtain the coefficients directly from the \code{wp$wp} component but you have to understand their organization described above. } \value{ The following components must be included in a legitimate `wp' object. \item{wp}{a matrix containing the wavelet packet coefficients. Each row of the matrix contains coefficients with respect to a particular resolution level. There are \code{nlevelsWT(wp)+1} rows in the matrix. Row \code{nlevels(wp)+1} (the ``bottom'') row contains the ``original'' data used to produce the wavelet packet coefficients. Rows \code{nlevels}(wp) to row 1 contain coefficients at resolution levels \code{nlevels(wp)-1} to 0 (so the first row contains coefficients at resolution level 0). The columns contain the coefficients with respect to packets. A different packet length exists at each resolution level. The packet length at resolution level \code{i} is given by \code{2^i}. However, the \code{\link{getpacket.wp}} function should be used to access individual packets from a \code{\link{wp}} object.} \item{nlevelsWT}{The number of levels in the wavelet packet decomposition. If you raise 2 to the power of nlevels you get the number of data points used in the decomposition. } \item{filter}{a list containing the details of the filter that did the decomposition (equivalent to the return value from the \code{\link{filter.select}} function). } \item{date}{The date that the transform was performed or the wp was modified.} } \section{GENERATION}{ This class of objects is returned from the \code{\link{wp}} function to represent a wavelet packet decomposition of a function. Many other functions return an object of class wp. } \section{METHODS}{ The wp class of objects has methods for the following generic functions: \code{\link{InvBasis}}, \code{\link{MaNoVe}}, \code{\link{accessC}}, \code{\link{accessD}}, \code{\link{basisplot}}, \code{\link{draw}}. \code{\link{getpacket}}, \code{\link{nlevelsWT}}, \code{\link{plot}}, \code{\link{print}}, \code{\link{putC}}, \code{\link{putD}}, \code{\link{putpacket}}, \code{\link{summary}}, \code{\link{threshold}}. } \section{RELEASE}{ Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wp}} } \keyword{classes} \keyword{smooth} \author{G P Nason} wavethresh/man/putDwd3Dcheck.rd0000644000176200001440000000253714211622634016200 0ustar liggesusers\name{putDwd3Dcheck} \alias{putDwd3Dcheck} \title{Check argument list for putD.wd3D} \description{ This function checks the argument list for \code{\link{putD.wd3D}} and is not meant to be directly called by any user. } \usage{ putDwd3Dcheck(lti, dima, block, nlx) } \arguments{ \item{lti}{At which level of the \code{\link{wd3D.object}} you wish to insert a block of coefficients.} \item{dima}{A vector, of length 3, which specifies the dimension of the block to insert.} \item{block}{A character string which specifies which block is being inserted (one of GGG, GGH, GHG, GHH, HGG, HGH, HHG, or HHH).} \item{nlx}{The number of levels in the \code{\link{wd3D.object}} that you wish to insert the coefficients into (can be obtained using the \code{\link{nlevelsWT}} function). } } \details{ This function merely checks that the dimensions and sizes of the array to be inserted into a \code{\link{wd3D.object}} using the \code{\link{putD.wd3D}} function are correct. } \section{RELEASE}{Version 3.9.6 Copyright Guy Nason 1997 } \seealso{ \code{\link{accessD}}, \code{\link{putD}}, \code{\link{accessD.wd3D}}, \code{\link{print.wd3D}}, \code{\link{putD}}, \code{\link{summary.wd3D}}, \code{\link{threshold.wd3D}}, \code{\link{wd3D}}, \code{\link{wd3D.object}}, \code{\link{wr3D}}. } \examples{ # # Not intended to be used by the user! # } \keyword{manip} \author{G P Nason} wavethresh/man/nv.object.rd0000644000176200001440000000516314211622540015426 0ustar liggesusers\name{nv.object} \alias{nv.object} \title{Node vector objects.} \description{ These are objects of classes \code{nv} They represent a basis in a packet-ordered non-decimated wavelet transform object. } \details{ A \code{nv} object is a description of a basis which is a path through a packet ordered non-decimated wavelet transform. To view the basis just print it! See the examples in \code{\link{numtonv}} for a print out of its structure. A similar object exists for describing a basis in a wavelet packet object see nvwp. } \value{ The following components must be included in a legitimate `nv' object. \item{node.list}{This is a complicated structure composed of one-dimensional array of \code{nv$nlevelsWT} lists. Each item in the array is itself a list having two components\code{$upperctrl} and \code{upperl}. Each component is described as follows: \describe{ \item{upperctrl}{The `upperctrl' item in each is the most important. It consists of a vector of characters. Each character refers to a node in the non-decimated wavelet tree at that level and can only be one of the characters L (for left), R (for right) and S (for stop). Each character in the vector informs reconstruction algorithms that, to do the best thing (whatever the best thing is in any particular case, e.g. select the minimum entropy node downwards), you should select the left/right node or stop at the current node.} \item{upperl}{The `upperl' vector is in 1-1 correspondance with the `upperctrl' vector. Each entry is a number related in some way to the L/R/S entry. (For the minumum entropy this is the minmum entropy achieved by this selection).} \item{nlevelsWT}{The number of levels in the \code{\link{wst}} object that was involved in the creation of the \code{nv} object. Nv objects describe a basis relative to a packet ordered non-decimated wavelet transform object and thus must know the number of levels in that object.} } } } \section{GENERATION}{ This class of objects is returned from the \code{\link{MaNoVe.wst}} and \code{\link{numtonv}} functions. The former returns the minimum entropy basis (most sparse basis) obtained using the Coifman-Wickerhauser, 1992 algorithm. The latter permits selection of a basis by an index number. } \section{METHODS}{ The \code{nv} class of objects has methods for the following generic functions: print, \code{\link{nlevelsWT}}, \code{\link{InvBasis}}, } \section{RELEASE}{ Version 3.6.0 Copyright Guy Nason 1995 } \seealso{ \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{numtonv}}, \code{\link{print}}, \code{\link{nlevelsWT}}, \code{\link{InvBasis}}, \code{\link{MaNoVe.wst}}. } \keyword{classes} \author{G P Nason} wavethresh/man/filter.select.rd0000644000176200001440000001402314211622540016274 0ustar liggesusers\name{filter.select} \alias{filter.select} \title{Provide wavelet filter coefficients.} \description{ This function stores the filter coefficients necessary for doing a discrete wavelet transform (and its inverse), including complex-valued compactly supported wavelets. } \usage{ filter.select(filter.number, family="DaubLeAsymm", constant=1) } \arguments{ \item{filter.number}{This selects the desired filter, an integer that takes a value dependent upon the family that you select. For the complex-valued wavelets in the Lina-Mayrand family, the filter number takes the form x.y where x is the number of vanishing moments (3, 4, or 5) and y is the solution number (1 for x = 3 or 4 vanishing moments; 1, 2, 3, or 4 for x = 5 vanishing moments). Note: this argument has a different meaning for Littlewood-Paley wavelets, see the note below in the Details section.} \item{family}{ This selects the basic family that the wavelet comes from. The choices are \bold{DaubExPhase} for Daubechies' extremal phase wavelets, \bold{DaubLeAsymm} for Daubechies' ``least-asymmetric'' wavelets, \bold{Coiflets} for Coiflets, \bold{Lawton} for Lawton's complex-valued wavelets (equivalent to Lina-Mayrand 3.1 wavelets), \bold{LittlewoodPaley} for a approximation to Littlewood-Paley wavelets, or \bold{LinaMayrand} for the Lina-Mayrand family of complex-valued Daubechies' wavelets.} \item{constant}{This constant is applied as a multiplier to all the coefficients. It can be a vector, and so you can adapt the filter coefficients to be whatever you want. (This is feature of negative utility, or ``there is less to this than meets the eye'' as my old PhD supervisor would say [GPN]).} } \details{ This function contains at least three types of filter. Two types can be selected with family set to DaubExPhase, these wavelets are the Haar wavelet (selected by filter.number=1 within this family) and Daubechies ``extremal phase'' wavelets selected by filter.numbers ranging from 2 to 10). Setting family to DaubLeAsymm gives you Daubechies least asymmetric wavelets, but here the filter number ranges from 4 to 10. For Daubechies wavelets, filter.number corresponds to the N of that paper, the wavelets become more regular as the filter.number increases, but they are all of compact support. With family equal to ``Coiflets'' the function supports filter numbers ranging from 1 to 5. Coiflets are wavelets where the scaling function also has vanishing moments. With family equal to ``LinaMayrand'', the function returns complex-valued Daubechies wavelets. For odd numbers of vanishing moments, there are symmetric complex-valued wavelets i this family, and for five or more vanishing moments there are multiple distinct complex-valued wavelets, distinguished by their (arbitrary) solution number. At present, Lina-Mayrand wavelets 3.1, 4.1, 5.1, 5.2, 5.3, and 5.4 are available in WaveThresh. Setting family equal to ``Lawton'' chooses complex-valued wavelets. The only wavelet available is the one with ``filter.number'' equal to 3. With family equal to ``LittlewoodPaley'' the Littlewood-Paley wavelet is used. The scaling function is also the same as (or at least proportional to, depending on your normalization) that of the Shannon scaling function, so its an approximation to the Shannon wavelet transform. The ``filter.number'' argument has a special meaning for the Littlewood-Paley wavelets: it does not represent vanishing moments here. Instead, it controls the number of filter taps in the quadrature mirror filter: typically longer values are better, up to the length of the series. Increasing it higher than the length of the series does not usually have much effect. Note: extreme caution should be taken with the Littlewood-Paley wavelet. This implementation is pure time-domain and as such can only be thought of as an approximation to a complete Shannon/LP implementation. For example, in actuality the wavelets are NOT finite impluse response filters like with Daubechies wavelets. This means that it is possible for an infinite number of Littlewood Paley wavelet coefficients to be nonzero. However, computers can not store an infinite number of coefficients and some will be lost. This is most noticeable with functions with discontinuities and other homogeneities but it can also happen with some smooth functions. A way to check how "bad" is can be is to transform your desired function followed immediately by the inverse transform and compare the original with the resultant sequence. The function \code{\link{compare.filters}} can be used to compare two filters. } \value{ Alist is returned with four components describing the filter: \item{H}{A vector containing the filter coefficients.} \item{G}{A vector containing filter coefficients (if Lawton or Lina-Mayrand wavelets are selected, otherwise this is NULL).} \item{name}{A character string containing the name of the filter.} \item{family}{A character string containing the family of the filter.} \item{filter.number}{The filter number used to select the filter from within a family.} } \note{The (Daubechies) filter coefficients should always sum to sqrt(2). This is a useful check on their validity. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994, This version originally part of the cthresh release which was merged into wavethresh in Oct 2012. Original cthresh version due to Stuart Barber } \seealso{ \code{\link{wd}}, \code{\link{wr}}, \code{\link{wr.wd}}, \code{\link{accessC}}, \code{\link{accessD}}, \code{\link{compare.filters}}, \code{\link{imwd}}, \code{\link{imwr}}, \code{\link{threshold}}, \code{\link{draw}}. } \examples{ #This function is usually called by others. #However, on occasion you may wish to look at the coefficients themselves. # # look at the filter coefficients for N=4 (by default Daubechies' # least-asymmetric wavelets.) # filter.select(4) #$H: #[1] -0.07576571 -0.02963553 0.49761867 0.80373875 0.29785780 #[6] -0.09921954 -0.01260397 0.03222310 # #$G: #NULL # #$name: #[1] "Daub cmpct on least asymm N=4" # #$family: #[1] "DaubLeAsymm" # #$filter.number: #[1] 4 } \keyword{utilities} \author{Stuart Barber and G P Nason} wavethresh/man/tpwr.rd0000644000176200001440000000154214211622634014533 0ustar liggesusers\name{tpwr} \alias{tpwr} \title{Inverse tensor product 2D wavelet transform.} \usage{ tpwr(tpwdobj, verbose = FALSE) } \arguments{ \item{tpwdobj}{An object which is a list which contains the items indicated in the return value of \code{\link{tpwd}}} \item{verbose}{Whether informative messages are printed} } \description{ Performs the inverse transform to \code{\link{tpwd}}. } \details{ Performs the inverse transform to \code{\link{tpwd}}. } \value{ A matrix, or image, containing the inverse tensor product wavelet transform of the image contained in the \code{tpwd} component of the \code{tpwdobj} object. } \seealso{\code{\link{imwr}},\code{\link{tpwd}}} \examples{ data(lennon) ltpwd <- tpwd(lennon) # # now perform the inverse and compare to the original # ltpwr <- tpwr(ltpwd) sum((ltpwr - lennon)^2) # [1] 9.22802e-10 } \author{G P Nason} \keyword{math} wavethresh/man/IsEarly.default.rd0000644000176200001440000000105614211622540016526 0ustar liggesusers\name{IsEarly.default} \alias{IsEarly.default} \title{Detects whether object is from an earlier version of WaveThresh} \usage{ \method{IsEarly}{default}(x) } \arguments{ \item{x}{Object to discern} } \description{ Detects whether object is from an earlier version of WaveThresh. } \details{ The default method always returns FALSE, i.e. unless the object is of a specific type handled by a particular method then it won't be from an earlier version. } \value{ Always FALSE for the generic } \seealso{\code{\link{IsEarly}}} \author{G P Nason} \keyword{error} wavethresh/man/summary.wst.rd0000644000176200001440000000133714211622634016052 0ustar liggesusers\name{summary.wst} \alias{summary.wst} \title{Print out some basic information associated with a wst object} \usage{ \method{summary}{wst}(object, \dots) } \arguments{ \item{object}{The object to print a summary about} \item{\dots}{Other arguments} } \description{ Prints out the number of levels, the length of the original vector from which the object came, the type of wavelet filter associated with the decomposition, and the date of production. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{wst}}} \examples{ vwst <- wst(rnorm(32)) summary(vwst) #Levels: 5 #Length of original: 32 #Filter was: Daub cmpct on least asymm N=10 #Date: Mon Mar 8 21:56:12 2010 } \keyword{print} \author{G P Nason} wavethresh/man/TOthreshda1.rd0000644000176200001440000000362614211622634015672 0ustar liggesusers\name{TOthreshda1} \alias{TOthreshda1} \title{Data analytic wavelet thresholding routine} \usage{ TOthreshda1(ywd, alpha = 0.05, verbose = FALSE, return.threshold = FALSE) } \arguments{ \item{ywd}{The \code{\link{wd.object}} that you wish to threshold.} \item{alpha}{The smoothing parameter which is a p-value } \item{verbose}{Whether messages get printed} \item{return.threshold}{If TRUE then the threshold value gets returned rather than the actual thresholded object} } \description{ This function might be better called using the regular \code{\link{threshold}} function using the \code{op1} policy. Corresponds to the wavelet thresholding routine developed by Ogden and Parzen (1994) Data dependent wavelet thresholding in nonparametric regression with change-point applications. \emph{Tech Rep 176}, University of South Carolina, Department of Statistics. } \details{ The TOthreshda1 method operates by testing the max of each set of squared wavelet coefficients to see if it behaves as the nth order statistic of a set of independent chi^2(1) r.v.'s. If not, it is removed, and the max of the remaining subset is tested, continuing in this fashion until the max of the subset is judged not to be significant. In this situation, the level of the hypothesis tests, alpha, has default value 0.05. Note that the choice of alpha controls the smoothness of the resulting wavelet estimator -- in general, a relatively large alpha makes it easier to include coefficients, resulting in a more wiggly estimate; a smaller alpha will make it more difficult to include coefficients, yielding smoother estimates. } \value{ Returns the threshold value if \code{return.threshold==TRUE} otherwise returns the shrunk set of wavelet coefficients. } \seealso{\code{\link{threshold}},\code{\link{TOthreshda2}}, \code{\link{wd}}} \author{Todd Ogden} \keyword{smooth} wavethresh/man/AutoBasis.rd0000644000176200001440000000112414211622540015421 0ustar liggesusers\name{AutoBasis} \alias{AutoBasis} \title{Run Coifman-Wickerhauser best basis algorithm on wavelet packet object} \description{ Runs the Coifman-Wickerhauser best basis algorithm on a wavelet packet object. Packets not in the basis are replaced by vectors of NAs. Superceded by the \code{\link{MaNoVe}} functions. } \details{ Superceded by the \code{\link{MaNoVe}} functions (which run in C code). } \value{A wp class object which contains the select basis. All packets that are not in the basis get replaced by vectors of NAs. } \seealso{\code{\link{MaNoVe}}} \author{G P Nason} \keyword{math} wavethresh/man/putD.wd.rd0000644000176200001440000000724714211622634015074 0ustar liggesusers\name{putD.wd} \alias{putD.wd} \title{Puts a whole resolution level of mother wavelet coeffients into wd wavelet object.} \description{ Makes a copy of the \code{\link{wd}} object, replaces some mother wavelet coefficients data in the copy, and then returns the copy. } \usage{ \method{putD}{wd}(wd, level, v, boundary=FALSE, index=FALSE, \dots) } \arguments{ \item{wd}{Wavelet decomposition object into which you wish to insert the mother wavelet coefficients.} \item{level}{the resolution level at which you wish to replace the mother wavelet coefficients.} \item{v}{the replacement data, this should be of the correct length.} \item{boundary}{If \code{boundary} is \code{FALSE} then only "real" data is replaced. If boundary is \code{TRUE} then the boundary (bookeeping) elements are replaced as well. Information about the lengths of the vectors can be found in the \code{\link{first.last}} database function and Nason and Silverman, 1994.} \item{index}{If index is \code{TRUE} then the index numbers into the 1D array where the coefficient insertion would take place are returned. If index is \code{FALSE} (default) then the modified \code{wavelet decomposition} object is returned.} \item{\dots}{any other arguments} } \details{ The function \code{\link{accessD}} obtains the mother wavelet coefficients for a particular level. The function \code{putD.wd} replaces father wavelet coefficients at a particular resolution level and returns a modified wd object reflecting the change. The need for this function is a consequence of the pyramidal structure of Mallat's algorithm and the memory efficiency gain achieved by storing the pyramid as a linear vector. \code{PutD.wd} obtains information about where the smoothed data appears from the \code{fl.dbase} component of an \code{\link{wd.object}}, in particular the array \code{fl.dbase$first.last.d} which gives a complete specification of index numbers and offsets for \code{wd.object$D}. Note that this function is method for the generic function \code{\link{putD}}. When the \code{\link{wd.object}} is definitely a wd class object then you only need use the generic version of this function. Note also that this function only puts information into \code{\link{wd}} class objects. To extract coefficients from a \code{\link{wd}} object you have to use the \code{\link{accessD}} function (or more precisely, the \code{\link{accessD.wd}} method). } \value{ A \code{\link{wd}} class object containing the modified mother wavelet coefficients. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{putD}}, \code{\link{wd.object}}, \code{\link{wd}}, \code{\link{accessD}},\code{\link{putD}}, \code{\link{first.last}}, } \examples{ # # Generate an EMPTY wd object: # zero <- rep(0, 16) zerowd <- wd(zero) # # Put some random father wavelet coefficients into the object at # resolution level 2. For the decimated wavelet transform there # are always 2^i coefficients at resolution level i. So we have to # insert 4 coefficients # mod.zerowd <- putD( zerowd, level=2, v=rnorm(4)) # # If you plot mod.zerowd you will see that there are only # coefficients at resolution level 2 where you just put the coefficients. # # Now, for a time-ordered non-decimated wavelet transform object the # procedure is exactly the same EXCEPT that there are going to be # 16 coefficients at each resolution level. I.e. # # Create empty TIME-ORDERED NON-DECIMATED wavelet transform object # zerowdS <- wd(zero, type="station") # # Now insert 16 random coefficients at resolution level 2 # mod.zerowdS <- putD(zerowdS, level=2, v=rnorm(16)) # # Once more if you plot mod.zerowdS then there will only be # coefficients at resolution level 2. } \keyword{manip} \author{G P Nason} wavethresh/man/GetRSSWST.rd0000644000176200001440000000640114211622540015237 0ustar liggesusers\name{GetRSSWST} \alias{GetRSSWST} \title{Computes estimate of error for function estimate. } \description{ Computes estimate of error for function estimate. Given noisy data and a threshold value this function uses Nason's 1996 two-fold cross-validation algorithm, but using packet ordered non-decimated wavelet transforms to compute two estimates of an underlying ``true'' function and uses them to compute an estimate of the error in estimating the truth. } \usage{ GetRSSWST(ndata, threshold, levels, family = "DaubLeAsymm", filter.number = 10, type = "soft", norm = l2norm, verbose = 0, InverseType = "average") } \arguments{ \item{ndata}{the noisy data. This is a vector containing the signal plus noise. The length of this vector should be a power of two.} \item{threshold}{the value of the threshold that you wish to compute the error of the estimate at} \item{levels}{the levels over which you wish the threshold value to be computed (the threshold that is used in computing the estimate and error in the estimate). See the explanation for this argument in the \code{\link{threshold.wst}} function. } \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments.} \item{type}{whether to use hard or soft thresholding. See the explanation for this argument in the \code{\link{threshold.wst}} function.} \item{norm}{which measure of distance to judge the dissimilarity between the estimates. The functions \code{\link{l2norm}} and \code{\link{linfnorm}} are suitable examples.} \item{verbose}{If \code{TRUE} then informative messages are printed during the progression of the function, otherwise they are not.} \item{InverseType}{The possible options are "average" or "minent". The former uses basis averaging to form estimates of the unknown function. The "minent" function selects a basis using the Coifman and Wickerhauser, 1992 algorithm to select a basis to invert.} } \details{ This function implements the component of the cross-validation method detailed by Nason, 1996 for computing an estimate of the error between an estimate and the ``truth''. The difference here is that it uses the packet ordered non-decimated wavelet transform rather than the standard Mallat \code{\link{wd}} discrete wavelet transform. As such it is an examples of the translation-invariant denoising of Coifman and Donoho, 1995 but uses cross-validation to choose the threshold rather than SUREshrink. Note that the procedure outlined above can use \code{\link{AvBasis}} basis averaging or basis selection and inversion using the Coifman and Wickerhauser, 1992 best-basis algorithm } \value{ A real number which is estimate of the error between estimate and truth at the given threshold. } \section{RELEASE}{Version 3.6 Copyright Guy Nason 1995 } \seealso{ \code{\link{linfnorm}}, \code{\link{linfnorm}}, \code{\link{wstCV}}, \code{\link{wstCVl}}. } \examples{ # # This function performs the error estimation step for the # \code{\link{wstCV}} function and so is not intended for # user use. # } \keyword{manip} \author{G P Nason} wavethresh/man/imwr.rd0000644000176200001440000000160714211622540014513 0ustar liggesusers\name{imwr} \alias{imwr} \title{Inverse two-dimensional wavelet transform. } \description{ Perform inverse two-dimensional wavelet transform using Mallat's, 1989 algorithm. This function is generic. Particular methods exist. For the \code{\link{imwd}} class object this generic function uses \code{\link{imwr.imwd}}. For the \code{imwdc} class object this generic function uses \code{\link{imwr.imwdc}}. } \usage{ imwr(...) } \arguments{ \item{\dots}{See individual help pages for details.} } \details{ See individual method help pages for operation and examples. } \value{ A square matrix whose side length is a power of two that represents the inverse 2D wavelet transform of the input object x. } \section{RELEASE}{Version 2 Copyright Guy Nason 1993 } \seealso{ \code{\link{imwd}}, \code{\link{imwr.imwd}}, \code{\link{imwr.imwdc}}. } \keyword{nonlinear} \keyword{smooth} \author{G P Nason} wavethresh/man/print.wpst.rd0000644000176200001440000000177014211622540015666 0ustar liggesusers\name{print.wpst} \alias{print.wpst} \title{Prints out basic information about a wpst class object} \usage{ \method{print}{wpst}(x, \dots) } \arguments{ \item{x}{The wpst object that you wish to print info about} \item{\dots}{Other arguments} } \description{ Prints out basic information about a wpst class object generated by the, e.g., \code{\link{wpst}} function. \emph{Note:} stationary wavelet packet objects are now known as nondecimated wavelet packet objects. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{wpst}}} \examples{ v <- rnorm(128) vwpst <- wpst(v) \dontrun{print(vwpst)} #Class 'wpst' : Stationary Wavelet Packet Transform Object: # ~~~ : List with 5 components with names # wpst nlevelsWT avixstart filter date # #$wpst is a coefficient vector # #Created on : Fri Mar 5 15:06:56 2010 # #summary(.): #---------- #Levels: 7 #Length of original: 128 #Filter was: Daub cmpct on least asymm N=10 #Date: Fri Mar 5 15:06:56 2010 } \author{G P Nason} \keyword{print} wavethresh/man/makegrid.rd0000644000176200001440000000634114211622540015320 0ustar liggesusers\name{makegrid} \alias{makegrid} \title{Interpolate data to a grid.} \description{ This function takes a set of univariate (x,y) data with x arbitrary in (0,1) and linearly interpolates (x,y) to an equally spaced dyadic grid. } \usage{ makegrid(t, y, gridn = 2^(floor(log(length(t)-1,2)) + 1)) } \arguments{ \item{t}{A vector of \code{x} data. Each of the entries of \code{x} must lie between 0 and 1.} \item{y}{A vector of \code{y} data. Each entry of \code{y} corresponds to the same-positioned entry in \code{x} and so\code{y} must be of the same length as \code{x}. } \item{gridn}{The number of grid points in the dyadic grid that the (x,y) gets interpolated to. By default this is the next power of two larger than the length of {x}.} } \details{ One method for performing wavelet regression on data that is not equally spaced nor of power of two length is that described in Kovac, (1997) and Kovac and Silverman, (2000). The Kovac-Silverman algorithm linearly interpolates arbitrarily spaced (x,y) data to a dyadic grid and applies wavelet shrinkage to the interpolated data. However, if one assumes that the original data obeys a signal+noise model with iid data the interpolated data will be correlated due to the interpolation. This fact needs to be taken into account after taking the DWT and before thresholding one realizes that each coefficient has its own variance. The Kovac-Silverman algorithm computes this variance efficiently using knowledge of the interpolation scheme. } \value{ An object of class \code{griddata}. } \section{RELEASE}{Version 3.9.6 Copyright Arne Kovac 1997 Copyright Guy Nason (help pages) 1999} \seealso{ \code{\link{accessc}}, \code{\link{irregwd}}, \code{\link{newsure}}, \code{\link{plot.irregwd}}, \code{\link{threshold.irregwd}}, } \examples{ # # Generate some values in (0,1), then sort them (for plotting) # tt <- sort(runif(100)) # # Now evaluate the \code{\link{doppler}} function and add # some noise. # yy <- doppler(tt) + rnorm(100, 0, 0.15) # # Now make the grid with this data # yygrid <- makegrid(t=tt, y=yy) # # Jolly good. Now let's take the wavelet transform of this gridded data. # Note that we have to use the \code{\link{irregwd}} function # of the gridded data as it computes the variances of the coefficients # as well as the coefficients themselves. # yyirregwd <- irregwd(yygrid) # # You might want to plot the coefficients # # If you want to see the actual coefficients you have to first convert # the class of the yyirregwd object to a wd object and then use # \code{\link{plot.wd}} like this # yyirregwd2 <- yyirregwd class(yyirregwd2) <- "wd" \dontrun{plot(yyirregwd2)} # # If you want to see the variance factors (essentially the coefficient # variances divided by the overall variance). Then just use # \code{\link{plot.irregwd}} # \dontrun{plot(yyirregwd)} # # Ok. So you've seen the coefficients. Now let's do some thresholding. # yy.thresh.sure <- threshold(yyirregwd, policy="sure", type="soft", dev=madmad) # # And now do the reconstruct # yy.wr <- wr(yy.thresh.sure) # # And you can even plot the answer on the new grid! # \dontrun{plot(yygrid$gridt, yy.wr, type="l")} # # And superimpose the original data! # \dontrun{points(tt, yy)} # # This is sort of \code{Doppler} like! } \keyword{dplot} \author{Arne Kovac} wavethresh/man/PsiJmat.rd0000644000176200001440000001212214211622540015076 0ustar liggesusers\name{PsiJmat} \alias{PsiJmat} \title{Compute discrete autocorrelation wavelets but return result in matrix form.} \description{ This function computes discrete autocorrelation wavelets using the \code{\link{PsiJ}} function but it returns the results as a matrix rather than a list object. } \usage{ PsiJmat(J, filter.number = 10, family = "DaubLeAsymm", OPLENGTH=10^7) } \arguments{ \item{J}{Discrete autocorrelation wavelets will be computed for scales -1 up to scale J. This number should be a negative integer.} \item{filter.number}{The index of the wavelet used to compute the discrete autocorrelation wavelets.} \item{family}{The family of wavelet used to compute the discrete autocorrelation wavelets.} \item{OPLENGTH}{This integer variable defines some workspace of length OPLENGTH. The code uses this workspace. If the workspace is not long enough then the routine will stop and probably tell you what OPLENGTH should be set to.} } \details{ The discrete autocorrelation wavelet values are computed using the \code{\link{PsiJ}} function. This function merely organises them into a matrix form. } \value{ A matrix containing -J rows and a number of columns less than OPLENGTH. Each row contains the values of the discrete autocorrelation wavelet for a different scale. Row one contains the scale -1 coefficients, row two contains the scale -2, and so on. The number of columns is an odd number. The middle position of each row is the value of the discrete autocorrelation wavelet at zero --- this is always 1. The discrete autocorrelation wavelet is symmetric about this point. \emph{Important} Apart from the central element none of the other columns line up in this way. This could be improved upon. } \references{ Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{Technical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{PsiJ}} } \examples{ # # As a simple first examples we shall compute the matrix containing # the discrete autocorrelation wavelets up to scale 3. # PsiJmat(-3, filter.number=1, family="DaubExPhase") #Computing PsiJ #Took 0.25 seconds # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [,11] #[1,] 0.000 0.00 0.000 0.0 0.000 0.00 -0.500 1 -0.500 0.00 0.000 #[2,] 0.000 0.00 0.000 0.0 -0.250 -0.50 0.250 1 0.250 -0.50 -0.250 #[3,] -0.125 -0.25 -0.375 -0.5 -0.125 0.25 0.625 1 0.625 0.25 -0.125 # [,12] [,13] [,14] [,15] #[1,] 0.0 0.000 0.00 0.000 #[2,] 0.0 0.000 0.00 0.000 #[3,] -0.5 -0.375 -0.25 -0.125 # # Note that this contains 3 rows (since J=-3). # Each row contains the same discrete autocorrelation wavelet at different # scales and hence different resolutions. # Compare to the output given by PsiJ for the # equivalent wavelet and scales. # Note also that apart from column 8 which contains 1 (the value of the # ac wavelet at zero) none of the other columns line up. E.g. the value of # this wavelet at 1/2 is -0.5: this appears in columns 9, 10 and 12 # we could have written it differently so that they should line up. # I might do this in the future. # # # Let's compute the matrix containing the discrete autocorrelation # wavelets up to scale 6 using Daubechies N=10 least-asymmetric # wavelets. # P6mat <- PsiJmat(-6, filter.number=10, family="DaubLeAsymm") # # What is the dimension of this matrix? # dim(P6mat) #[1] 6 2395 # # Hmmm. Pretty large, so we shan't print it out. # # However, these are the ac wavelets... Therefore if we compute their # inner product we should get the same as if we used the ipndacw # function directly. # P6mat %*% t(P6mat) # [,1] [,2] [,3] [,4] [,5] #[1,] 1.839101e+00 3.215934e-01 4.058155e-04 8.460063e-06 4.522125e-08 #[2,] 3.215934e-01 3.035353e+00 6.425188e-01 7.947454e-04 1.683209e-05 #[3,] 4.058155e-04 6.425188e-01 6.070419e+00 1.285038e+00 1.589486e-03 #[4,] 8.460063e-06 7.947454e-04 1.285038e+00 1.214084e+01 2.570075e+00 #[5,] 4.522125e-08 1.683209e-05 1.589486e-03 2.570075e+00 2.428168e+01 #[6,] 5.161675e-10 8.941666e-08 3.366416e-05 3.178972e-03 5.140150e+00 # [,6] #[1,] 5.161675e-10 #[2,] 8.941666e-08 #[3,] 3.366416e-05 #[4,] 3.178972e-03 #[5,] 5.140150e+00 #[6,] 4.856335e+01 # # Let's check it against the ipndacw call # ipndacw(-6, filter.number=10, family="DaubLeAsymm") # -1 -2 -3 -4 -5 #-1 1.839101e+00 3.215934e-01 4.058155e-04 8.460063e-06 4.522125e-08 #-2 3.215934e-01 3.035353e+00 6.425188e-01 7.947454e-04 1.683209e-05 #-3 4.058155e-04 6.425188e-01 6.070419e+00 1.285038e+00 1.589486e-03 #-4 8.460063e-06 7.947454e-04 1.285038e+00 1.214084e+01 2.570075e+00 #-5 4.522125e-08 1.683209e-05 1.589486e-03 2.570075e+00 2.428168e+01 #-6 5.161675e-10 8.941666e-08 3.366416e-05 3.178972e-03 5.140150e+00 # -6 #-1 5.161675e-10 #-2 8.941666e-08 #-3 3.366416e-05 #-4 3.178972e-03 #-5 5.140150e+00 #-6 4.856335e+01 # # Yep, they're the same. # } \keyword{manip} \author{G P Nason} wavethresh/man/print.w2m.rd0000644000176200001440000000166114211622540015375 0ustar liggesusers\name{print.w2m} \alias{print.w2m} \title{ Print a w2m class object } \description{ These objects are the matrix representation of a nondecimated wavelet packet object } \usage{ \method{print}{w2m}(x, maxbasis = 10, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ The w2m object to print } \item{maxbasis}{ The maximum number of basis functions to report on } \item{\dots}{ Other arguments } } \details{ Prints out information about a w2m object. This function gets called during \code{\link{makewpstRO}}, and so you can see its output in the example code in that help function } \value{ None } \author{ G P Nason } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{makewpstRO}},\code{\link{wpst2m}}} \examples{ # # See example in makewpstRO # } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{print} wavethresh/man/wstCVl.rd0000644000176200001440000001151714211622634014764 0ustar liggesusers\name{wstCVl} \alias{wstCVl} \title{Performs two-fold cross-validation estimation using packet-ordered non-decimated wavelet transforms and a (vector) level-dependent threshold. } \description{ Performs Nason's 1996 two-fold cross-validation estimation using packet-ordered non-decimated wavelet transforms and a (vector) level-dependent threshold. } \usage{ wstCVl(ndata, ll = 3, type = "soft", filter.number = 10, family = "DaubLeAsymm", tol = 0.01, verbose = 0, plot.it = FALSE, norm = l2norm, InverseType = "average", uvdev = madmad) } \arguments{ \item{ndata}{the noisy data. This is a vector containing the signal plus noise. The length of this vector should be a power of two.} \item{ll}{the primary resolution for this estimation. Note that the primary resolution is \emph{problem-specific}: you have to find out which is the best value.} \item{type}{whether to use hard or soft thresholding. See the explanation for this argument in the \code{\link{threshold.wst}} function.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{tol}{the cross-validation tolerance which decides when an estimate is sufficiently close to the truth (or estimated to be so).} \item{verbose}{If \code{TRUE} then informative messages are printed during the progression of the function, otherwise they are not.} \item{plot.it}{Whether or not to produce a plot indicating progress.} \item{norm}{which measure of distance to judge the dissimilarity between the estimates. The functions \code{\link{l2norm}} and \code{\link{linfnorm}} are suitable examples.} \item{InverseType}{The possible options are "average" or "minent". The former uses basis averaging to form estimates of the unknown function. The "minent" function selects a basis using the Coifman and Wickerhauser, 1992 algorithm to select a basis to invert.} \item{uvdev}{Universal thresholding is used to generate an upper bound for the ideal threshold. This argument provides the function that computes an estimate of the variance of the noise for use with the universal threshold calculation (see \code{\link{threshold.wst}}).} } \details{ This function implements a modified version of the cross-validation method detailed by Nason, 1996 for computing an estimate of the error between an estimate and the ``truth''. The difference here is that it uses the packet ordered non-decimated wavelet transform rather than the standard Mallat wd discrete wavelet transform. As such it is an examples of the translation-invariant denoising of Coifman and Donoho, 1995 but uses cross-validation to choose the threshold rather than SUREshrink. Further, this function computes level-dependent thresholds. That is, it can compute a different threshold for each resolution level. Note that the procedure outlined above can use \code{\link{AvBasis}} basis averaging or basis selection and inversion using the Coifman and Wickerhauser, 1992 best-basis algorithm } \value{ A list returning the results of the cross-validation algorithm. The list includes the following components: \item{ndata}{a copy of the input noisy data} \item{xvwr}{a reconstruction of the best estimate computed using this algorithm. It is the inverse (computed depending on what the InverseType argument was) of the \code{xvwrWSTt} component.} \item{xvwrWSTt}{a thresholded version of the packet-ordered non-decimated wavelet transform of the noisy data using the best threshold discovered by this cross-validation algorithm.} \item{uvt}{the universal threshold used as the upper bound for the algorithm that tries to discover the optimal cross-validation threshold. The lower bound is always zero.} \item{xvthresh}{the best threshold as discovered by cross-validation. Note that this is vector, a level-dependent threshold with one threshold value for each resolution level. The first entry corresponds to level \code{ll}, the last entry corresponds to level \code{nlevelsWT(ndata)-1} and the entries in between linearly to the levels in between. The \code{\link{wstCV}} function should be used to compute a global threshold.} \item{optres}{The results from performing the optimisation using the \code{nlminb} function from Splus. This object contains many interesting components with information about how the optimisation went. See the \code{nlminb} help page for information.} } \section{RELEASE}{ Version 3.6 Copyright Guy Nason 1995 } \seealso{ \code{\link{GetRSSWST}}, \code{\link{linfnorm}}, \code{\link{linfnorm}}, \code{\link{threshold.wst}}, \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{wstCV}} } \examples{ # # Example PENDING # } \keyword{math} \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/AvBasis.wst2D.rd0000644000176200001440000000475714211622540016100 0ustar liggesusers\name{AvBasis.wst2D} \alias{AvBasis.wst2D} \title{Perform basis averaging for (packet-ordered) 2D non-decimated wavelet transform.} \description{ Perform basis averaging for (packet-ordered) 2D non-decimated wavelet transform. } \usage{ \method{AvBasis}{wst2D}(wst2D, \dots) } \arguments{ \item{wst2D}{An object of class \code{\link{wst2D}} that contains coefficients of a packet ordered 2D non-decimated wavelet transform (e.g. produced by the \code{\link{wst2D}} function.} \item{\dots}{any other arguments} } \details{ The packet-ordered 2D non-decimated wavelet transform computed by \code{\link{wst2D}} computes the coefficients of an input matrix with respect to a library of all shifts of wavelet basis functions at all scales. Here "all shifts" means all integral shifts with respect to the finest scale coefficients with shifts in both the horizontal and vertical directions, and "all scales" means all dyadic scales from 0 (the coarsest) to J-1 (the finest) where \code{2^J = n} where \code{n} is the dimension of the input matrix. As such the packet-ordered 2D non-decimated wavelet transform contains a library of all possible shifted wavelet bases. \bold{Basis averaging}. Rather than select \emph{a} basis it is often useful to preserve information from all of the bases. For examples, in curve estimation, after thresholding, the coefficients are coefficients of an estimate of the truth with respect to all of the shifted basis functions. Rather than select one of them we can average over all estimates. This sometimes gives a better curve estimate and can, for examples, get rid of Gibbs effects. See Coifman and Donoho (1995) for more information on how to do curve estimation using the packet ordered non-decimated wavelet transform, thresholding and basis averaging. See Lang et al. (1995) for further details of surface/image estimation using the 2D non-decimated DWT. } \value{ A square matrix of dimension $2^nlevelsWT$ containing the average-basis ``reconstruction'' of the \code{\link{wst2D}} object. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998} \seealso{ \code{\link{wst2D}}, \code{\link{wst2D.object}} } \examples{ # # Generate some test data # #test.data <- matrix(rnorm(16), 4,4) # # Now take the 2D packet ordered DWT # #tdwst2D <- wst2D(test.data) # # Now "invert" it using basis averaging # #tdwstAB <- AvBasis(tdwst2D) # # Let's compare it to the original # #sum( (tdwstAB - test.data)^2) # # [1] 1.61215e-17 # # Very small. They're essentially same. # } \keyword{manip} \author{G P Nason} wavethresh/man/plot.nvwp.rd0000644000176200001440000000146114211622540015502 0ustar liggesusers\name{plot.nvwp} \alias{plot.nvwp} \title{Depict wavelet packet basis specfication} \usage{ \method{plot}{nvwp}(x, \dots) } \arguments{ \item{x}{The wavelet packet node vector you wish to plot, nvwp class object} \item{\dots}{Other arguments to the central plot function} } \description{ The nvwp class object (generated from \code{\link{MaNoVe.wp}} for example) contains a wavelet packet basis specification. This function produces a graphical depiction of such a basis. } \details{ The vertical axis indicates the resolution level, the horizontal axes indicates the packet index for the finest scales. } \value{ Nothing } \seealso{\code{\link{MaNoVe.wp}},\code{\link{print.nvwp}},\code{\link{wp}}} \examples{ v <- rnorm(512) vwp <- wp(v) vnv <- MaNoVe(vwp) \dontrun{plot(vnv)} } \author{G P Nason} \keyword{hplot} wavethresh/man/threshold.wd3D.rd0000644000176200001440000001703014211622634016332 0ustar liggesusers\name{threshold.wd3D} \alias{threshold.wd3D} \title{Threshold 3D DWT object} \description{ This function provides various ways to threshold a \code{\link{wd3D}} class object. } \usage{ \method{threshold}{wd3D}(wd3D, levels = 3:(nlevelsWT(wd3D) - 1), type = "hard", policy = "universal", by.level = FALSE, value = 0, dev = var, verbose = FALSE, return.threshold = FALSE, \dots) } \arguments{ \item{wd3D}{The 3D DWT wavelet decomposition object that you wish to threshold.} \item{levels}{a vector of integers which determines which scale levels are thresholded in the decomposition. Each integer in the vector must refer to a valid level in the \code{\link{wd3D}} object supplied. This is usually any integer from 0 to \code{\link{nlevelsWT}}(wd3D)-1 inclusive. Only the levels in this vector contribute to the computation of the threshold and its application. } \item{type}{determines the type of thresholding this can be "\code{hard}" or "\code{soft}".} \item{policy}{selects the technique by which the threshold value is selected. Each policy corresponds to a method in the literature. At present the different policies are: "\code{universal}" and "\code{manual}". The policies are described in detail \code{below}.} \item{by.level}{If FALSE then a global threshold is computed on and applied to all scale levels defined in \code{levels}. If TRUE a threshold is computed and applied separately to each scale level.} \item{value}{This argument conveys the user supplied threshold. If the \code{policy="manual"} then value is the actual threshold value.} \item{dev}{this argument supplies the function to be used to compute the spread of the absolute values coefficients. The function supplied must return a value of spread on the variance scale (i.e. not standard deviation) such as the \code{var()} function. A popular, useful and robust alternative is the \code{\link{madmad}} function.} \item{verbose}{if TRUE then the function prints out informative messages as it progresses.} \item{return.threshold}{If this option is TRUE then the actual \emph{value} of the threshold is returned. If this option is FALSE then a thresholded version of the input is returned.} \item{\dots}{any other arguments} } \details{ This function thresholds or shrinks wavelet coefficients stored in a \code{\link{wd3D}} object and returns the coefficients in a modified \code{\link{wd3D}} object. See the seminal papers by Donoho and Johnstone for explanations about thresholding. For a gentle introduction to wavelet thresholding (or shrinkage as it is sometimes called) see Nason and Silverman, 1994. For more details on each technique see the descriptions of each method below The basic idea of thresholding is very simple. In a signal plus noise model the wavelet transform of signal is very sparse, the wavelet transform of noise is not (in particular, if the noise is iid Gaussian then so if the noise contained in the wavelet coefficients). Thus since the signal gets concentrated in the wavelet coefficients and the noise remains "spread" out it is "easy" to separate the signal from noise by keeping large coefficients (which correspond to signal) and delete the small ones (which correspond to noise). However, one has to have some idea of the noise level (computed using the dev option in threshold functions). If the noise level is very large then it is possible, as usual, that no signal "sticks up" above the noise. There are many components to a successful thresholding procedure. Some components have a larger effect than others but the effect is not the same in all practical data situations. Here we give some rough practical guidance, although \emph{you must refer to the papers below when using a particular technique.} \bold{You cannot expect to get excellent performance on all signals unless you fully understand the rationale and limitations of each method below.} I am not in favour of the "black-box" approach. The thresholding functions of WaveThresh3 are not a black box: experience and judgement are required! Some issues to watch for: \describe{ \item{levels}{The default of \code{levels = 3:(wd$nlevelsWT - 1)} for the \code{levels} option most certainly does not work globally for all data problems and situations. The level at which thresholding begins (i.e. the given threshold and finer scale wavelets) is called the \emph{primary resolution} and is unique to a particular problem. In some ways choice of the primary resolution is very similar to choosing the bandwidth in kernel regression albeit on a logarithmic scale. See Hall and Patil, (1995) and Hall and Nason (1997) for more information. For each data problem you need to work out which is the best primary resolution. This can be done by gaining experience at what works best, or using prior knowledge. It is possible to "automatically" choose a "best" primary resolution using cross-validation (but not in WaveThresh). Secondly the levels argument computes and applies the threshold at the levels specified in the \code{levels} argument. It does this for all the levels specified. Sometimes, in wavelet shrinkage, the threshold is computed using only the finest scale coefficients (or more precisely the estimate of the overall noise level). If you want your threshold variance estimate only to use the finest scale coefficients (e.g. with universal thresholding) then you will have to apply the \code{threshold.wd} function twice. Once (with levels set equal to \code{\link{nlevelsWT}}(wd)-1 and with \code{return.threshold=TRUE} to return the threshold computed on the finest scale and then apply the threshold function with the manual option supplying the value of the previously computed threshold as the value options. } \item{by.level}{for a \code{\link{wd}} object which has come from data with noise that is correlated then you should have a threshold computed for each resolution level. See the paper by Johnstone and Silverman, 1997.} } } \value{ An object of class \code{\link{wd3D}}. This object contains the thresholded wavelet coefficients. Note that if the return.threshold option is set to TRUE then the threshold values will be returned rather than the thresholded object. } \note{ POLICIES This section gives a brief description of the different thresholding policies available. For further details \emph{see the associated papers}. If there is no paper available then a small description is provided here. More than one policy may be good for problem, so experiment! They are arranged here in alphabetical order: \describe{ \item{manual}{specify a user supplied threshold using value to pass the value of the threshold. The value argument should be a vector. If it is of length 1 then it is replicated to be the same length as the \code{levels} vector, otherwise it is repeated as many times as is necessary to be the \code{levels} vector's length. In this way, different thresholds can be supplied for different levels. Note that the \code{by.level} option has no effect with this policy. } \item{universal}{See Donoho and Johnstone, 1995.} } } \section{RELEASE}{Version 3.9.6 Copyright Guy Nason 1997.} \seealso{ \code{\link{threshold}}, \code{\link{accessD.wd3D}}, \code{\link{print.wd3D}}, \code{\link{putD.wd3D}}, \code{\link{putDwd3Dcheck}}, \code{\link{summary.wd3D}}, \code{\link{threshold.wd3D}}, \code{\link{wd3D.object}}, \code{\link{wr3D}}. } \examples{ # # Generate some test data # test.data <- array(rnorm(8*8*8), dim=c(8,8,8)) testwd3D <- wd3D(test.data) # # Now let's threshold # testwd3DT <- threshold(testwd3D, levels=1:2) # # That's it, one can apply wr3D now to reconstruct # if you like! # } \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/wpstREGR.rd0000644000176200001440000000300014211622634015203 0ustar liggesusers\name{wpstREGR} \alias{wpstREGR} \title{Construct data frame using new time series using information from a previously constructed wpstRO object } \description{The \code{\link{makewpstRO}} function takes two time series, performs a nondecimated wavelet packet transform with the "dependent" variable one, stores the "best" packets (those that individually correlate with the response series) and returns the data frame that contains the response and the best packets. The idea is that the user then performs some kind of modelling between response and packets. This function takes a new "dependent" series and returns the best packets in a new data frame in the same format as the old one. The idea is that the model and the new data frame can be used together to predict new values for the response } \usage{ wpstREGR(newTS, wpstRO) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{newTS}{The new "dependent" time series } \item{wpstRO}{The previously constructed wpstRO object made by \code{\link{makewpstRO}} } } \details{ Description says it all } \value{ New values of the response time series } \references{ See reference to Nason and Sapatinas paper in the help for \code{\link{makewpstRO}}. } \author{ G P Nason } \seealso{\code{\link{makewpstRO}}, \code{\link{wpst}} } \examples{ # # See extended example in makewpstRO help, includes example of using this fn # } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{multivariate} \keyword{ts} wavethresh/man/make.dwwt.rd0000644000176200001440000000213314211622540015431 0ustar liggesusers\name{make.dwwt} \alias{make.dwwt} \title{Compute diagonal of the matrix WWT} \description{ Computes the values which specify the covariance structure of complex-valued wavelet coefficients. } \usage{ make.dwwt(nlevels, filter.number = 3.1, family = "LinaMayrand") } \arguments{ \item{nlevels}{The number of levels of the wavelet decomposition.} \item{filter.number, family}{Specifies the wavelet used; see filter.select for more details.} } \details{ If real-valued signals are decomposed by a discrete wavelet transform using a complex-valued Daubechies wavelet (as described by Lina & Mayrand (1995)), the resulting coefficients are complex-valued. The covariance structure of these coefficients are determined by the diagonal entries of the matrix \eqn{WW^T}. This function computes these values for use in shrinkage. For more details, see Barber & Nason (2004) } \value{ A vector giving the diagonal elements of \eqn{WW^T}. } \section{RELEASE}{ Part of the CThresh addon to WaveThresh. Copyright Stuart Barber and Guy Nason 2004. } \seealso{ \code{\link{cthresh}} } \keyword{manip} \author{Stuart Barber} wavethresh/man/LocalSpec.wst.rd0000644000176200001440000000135514211622540016216 0ustar liggesusers\name{LocalSpec.wst} \alias{LocalSpec.wst} \title{Obsolete function (use ewspec)} \usage{ \method{LocalSpec}{wst}(wst, \dots) } \arguments{ \item{wst}{The wst object to perform local spectral analysis on} \item{\dots}{Other arguments to \code{\link{LocalSpec.wd}}. } } \description{ This function computes a local spectra as described in Nason and Silverman (1995). However, the function is obsolete and superceded by \code{\link{ewspec}}. } \details{ Description says it all. However, this function converts the \code{\link{wst.object}} object to a nondecimated \code{\link{wd.object}} and then calls \code{\link{LocalSpec.wd}}. } \value{ Same value as \code{\link{LocalSpec.wd}}. } \seealso{\code{\link{ewspec}}} \author{G P Nason} \keyword{ts} wavethresh/man/putD.wst.rd0000644000176200001440000000504314211622634015267 0ustar liggesusers\name{putD.wst} \alias{putD.wst} \title{Puts a whole resolution level of mother wavelet coeffients into wst wavelet object.} \description{ Makes a copy of the \code{\link{wst}} object, replaces a whole resolution level of mother wavelet coefficients data in the copy, and then returns the copy. } \usage{ \method{putD}{wst}(wst, level, value, \dots) } \arguments{ \item{wst}{Packet-ordered non-decimated wavelet object into which you wish to insert the mother wavelet coefficients.} \item{level}{the resolution level at which you wish to replace the mother wavelet coefficients.} \item{value}{the replacement data, this should be of the correct length} \item{\dots}{any other arguments}} \details{ The function \code{\link{accessD.wst}} obtains the mother wavelet coefficients for a particular level. The function \code{putD.wst} replaces mother wavelet coefficients at a particular resolution level and returns a modified wst object reflecting the change. For the non-decimated wavelet transforms the number of coefficients at each resolution level is the same and equal to \code{2^nlevelsWT} where \code{nlevels} is the number of levels in the \code{\link{wst.object}}. The number of coefficients at each resolution level is also, of course, the number of data points used to initially form the \code{wst} object in the first place. Use the \code{\link{accessD.wst}} to extract whole resolution levels of mother wavelet coefficients. Use \code{\link{accessC.wst}} and \code{\link{putC.wst}} to extract/insert whole resolution levels of father wavelet coefficients. Use the \code{\link{getpacket.wst}} and \code{\link{putpacket.wst}} functions to extract/insert packets of coefficients into a packet-ordered non-decimated wavelet object. } \value{A \code{\link{wst}} class object containing the modified mother wavelet coefficients. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wst.object}}, \code{\link{wst}}, \code{\link{putD}}, \code{\link{accessD.wst}}, \code{\link{putC.wst}}, \code{\link{getpacket.wst}}, \code{\link{putpacket.wst}}. } \examples{ # # Generate an EMPTY wst object: # zero <- rep(0, 16) zerowst <- wst(zero) # # Put some random mother wavelet coefficients into the object at # resolution level 2. For the non-decimated wavelet transform there # are always 16 coefficients at every resolution level. # mod.zerowst <- putD( zerowst, level=2, v=rnorm(16)) # # If you plot mod.zerowst you will see that there are only # coefficients at resolution level 2 where you just put the coefficients. } \keyword{manip} \author{G P Nason} wavethresh/man/print.imwdc.rd0000644000176200001440000000320614211622540015770 0ustar liggesusers\name{print.imwdc} \alias{print.imwdc} \title{Print out information about an imwdc object in readable form. } \description{ This function prints out information about an \code{\link{imwdc.object}} in a nice human-readable form. Note that this function is automatically called by SPlus whenever the name of an \code{\link{imwdc.object}} is typed or whenever such an object is returned to the top level of the S interpreter. } \usage{ \method{print}{imwdc}(x, ...) } \arguments{ \item{x}{An object of class imwdc that you wish to print out.} \item{\dots}{This argument actually does nothing in this function! } } \details{ Prints out information about \code{imwdc} objects in nice readable format. } \value{ The last thing this function does is call \code{\link{summary.imwdc}} so the return value is whatever is returned by this function. } \section{RELEASE}{Version 2.2 Copyright Guy Nason 1994 } \seealso{ \code{\link{imwdc.object}}, \code{\link{summary.imwdc}}. } \examples{ # # Generate an imwd object. # tmp <- imwd(matrix(0, nrow=32, ncol=32)) # # Now get R to use print.imwd # tmp # Class 'imwd' : Discrete Image Wavelet Transform Object: # ~~~~ : List with 27 components with names # nlevelsWT fl.dbase filter type bc date w4L4 w4L1 w4L2 w4L3 # w3L4 w3L1 w3L2 w3L3 w2L4 w2L1 w2L2 w2L3 w1L4 w1L1 w1L2 w1L3 w0L4 w0L1 # w0L2 w0L3 w0Lconstant # # $ wNLx are LONG coefficient vectors ! # # summary(.): # ---------- # UNcompressed image wavelet decomposition structure # Levels: 5 # Original image was 32 x 32 pixels. # Filter was: Daub cmpct on least asymm N=10 # Boundary handling: periodic } \keyword{utilities} \author{G P Nason} wavethresh/man/imwd.rd0000644000176200001440000001167714211622540014505 0ustar liggesusers\name{imwd} \alias{imwd} \title{Two-dimensional wavelet transform (decomposition). } \description{ This function can perform two types of two-dimensional discrete wavelet transform (DWT). The standard transform (\code{type="wavelet"}) computes the 2D DWT according to Mallat's pyramidal algorithm (Mallat, 1989). The spatially ordered non-decimated 2D DWT (NDWT) (\code{type="station"}) contains all possible spatially shifted versions of the DWT. The order of computation of the DWT is O(n), and it is O(n log n) for the NDWT if n is the number of pixels. } \usage{ imwd(image, filter.number=10, family="DaubLeAsymm", type="wavelet", bc="periodic", RetFather=TRUE, verbose=FALSE) } \arguments{ \item{image}{A square matrix containing the image data you wish to decompose. The sidelength of this matrix must be a power of 2.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{type}{specifies the type of wavelet transform. This can be "wavelet" (default) in which case the standard 2D DWT is performed (as in previous releases of WaveThresh). If type is "station" then the 2D spatially-ordered non-decimated DWT is performed. At present, only periodic boundary conditions can be used with the 2D spatially ordered non-decimated wavelet transform.} \item{bc}{specifies the boundary handling. If bc=="periodic" the default, then the function you decompose is assumed to be periodic on it's interval of definition, if bc=="symmetric" then the function beyond its boundaries is assumed to be a symmetric reflection of the function in the boundary. The symmetric option was the implicit default in releases prior to 2.2. Note that only periodic boundary conditions are valid for the 2D spatially-ordered non-decimated wavelet transform.} \item{RetFather}{If \code{TRUE} then this argument causes the scaling function coefficients at each resolution level to be returned as well as the wavelet coefficients. If \code{FALSE} then no scaling function coefficients are returned. The opportunity of returning father wavelet coefficients has been added since previous versions of WaveThresh.} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} } \details{ The 2D algorithm is essentially the application of many 1D filters. First, the columns are attacked with the smoothing (H) and bandpass (G) filters, and the rows of each of these resultant images are attacked again with each of G and H, this results in 4 images. Three of them, GG, GH, and HG correspond to the highest resolution wavelet coefficients. The HH image is a smoothed version of the original and can be further attacked in exactly the same way as the original image to obtain GG(HH), GH(HH), and HG(HH), the wavelet coefficients at the second highest resolution level and HH(HH) the twice-smoothed image, which then goes on to be further attacked. If \code{RetFather=TRUE} then the results of the HH smooth (the scaling function coefficients) are returned additionally. There are now two methods of handling "boundary problems". If you know that your function is periodic (on it's interval) then use the bc="periodic" option, if you think that the function is symmetric reflection about each boundary then use bc="symmetric". If you don't know then it is wise to experiment with both methods, in any case, if you don't have very much data don't infer too much about your decomposition! If you have loads of data then don't worry too much about the boundaries. It can be easier to interpret the wavelet coefficients from a bc="periodic" decomposition, so that is now the default. The spatially-ordered non-decimated DWT contains all spatial (toroidal circular) shifts of the standard DWT. The standard DWT is orthogonal, the spatially-ordered non-decimated transform is most definitely not. This has the added disadvantage that non-decimated wavelet coefficients, even if you supply independent normal noise. This is unlike the standard DWT where the coefficients are independent (normal noise). The two-dimensional packet-ordered non-decimated discrete wavelet transform is computed by the \code{\link{wst2D}} function. } \value{ An object of class \code{\link{imwd.object}} containing the two-dimensional wavelet transform (possibly spatially-ordered non-decimated). } \section{RELEASE}{Version 3.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wd}}, \code{\link{imwd.object}}, \code{\link{filter.select}} } \examples{ data(lennon) # # Let's use the lennon test image # \dontrun{image(lennon)} # # Now let's do the 2D discrete wavelet transform # lwd <- imwd(lennon) # # Let's look at the coefficients # \dontrun{plot(lwd)} } \author{G P Nason} \keyword{smooth} wavethresh/man/nlevelsWT.default.rd0000644000176200001440000000260114211622540017076 0ustar liggesusers\name{nlevelsWT.default} \alias{nlevelsWT.default} \title{Returns number of levels associated with an object} \description{ This function returns the number of scale levels associated with either a wavelet type object or an atomic object. } \usage{ \method{nlevelsWT}{default}(object, \dots) } \arguments{ \item{object}{An object for which you wish to determine how many levels it has or is associated with.} \item{\dots}{any other arguments} } \details{ This function first checks to see whether the input object has a component called nlevelsWT. If it does then it returns the value of this component. If it does not then it takes the length of the object and then uses the \code{\link{IsPowerOfTwo}} function to return the power of two which equals the length (if any) or NA if the length of the object is not a power of two. } \value{ The number of resolution (scale) levels associated with the object. } \author{Version 3.6.0 Copyright Guy Nason 1995 } \seealso{ \code{\link{nlevelsWT}} } \examples{ # # Generate some test data # test.data <- example.1()$y # # Now, this vector is 512 elements long. What number of levels would any # wavelet object be that was associated with this vector? # nlevelsWT(test.data) # [1] 9 # # I.e. 2^9=512. Let's check by taking the wavelet transform of the # test data and seeing how many levels it actually has # nlevelsWT(wd(test.data)) # [1] 9 } \keyword{arith} wavethresh/man/mwr.rd0000644000176200001440000000566214211622540014347 0ustar liggesusers\name{mwr} \alias{mwr} \title{Multiple discrete wavelet transform (reconstruction). } \description{ This function performs the reconstruction stage of Mallat's pyramid algorithm adapted for multiple wavelets (see Xia et al.(1996)), i.e. the discrete inverse \emph{multiple} wavelet transform. } \usage{ mwr(mwd, prefilter.type = mwd$prefilter, verbose = FALSE, start.level = 0, returnC = FALSE) } \arguments{ \item{mwd}{A multiple wavelet decomposition object as returned by \code{\link{mwd}}.} \item{prefilter.type}{Usually best not to change this (i.e. not to use a different prefilter on the reconstruction to the one used on decomposition).} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} \item{start.level}{The level you wish to start reconstruction at. The is usually the first (level 0).} \item{returnC}{If this is FALSE then a vector of the same length as the argument data supplied to the function \code{\link{mwd}} that constructed the supplied \code{\link{mwd.object}}. is returned, Ie. the reconstructed data. If true then the last level (highest resolution) C coefficients are returned in matrix form. This matrix has not been postprocessed. } } \details{ The code implements Mallat's pyramid algorithm adapted for multiple wavelet decompositions (Xia et al. 1996). In the reconstruction the quadrature mirror filters G and H are supplied with C0 and D0, D1, ... D(J-1) (the wavelet coefficients) and rebuild C1,..., CJ. The matrix CJ is postprocessed which returns the full reconstruction If \code{\link{mwd.object}} was obtained directly from \code{\link{mwd}} then the original function can be reconstructued exactly. Usually, the \code{\link{mwd.object}} has been modified in some way, for examples, some coefficients set to zero by \code{\link{threshold}}. Mwr then reconstructs the function with that set of wavelet coefficients. See also Downie and Silverman, 1998 } \value{ Either a vector containing the final reconstruction or a matrix containing unpostprocessed coefficients. } \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1996)} \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd}}, \code{\link{mwd.object}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ # # Decompose and then exactly reconstruct test.data # test.data <- rnorm(128) tdecomp <- mwd(test.data) trecons <- mwr(tdecomp) # # Look at accuracy of reconstruction max(abs(trecons - test.data)) #[1] 2.266631e-12 # # See also the examples of using \code{\link{wr}} or mwr in # the \code{examples} section of # the help for \code{\link{threshold.mwd}}. } \keyword{manip} \author{Tim Downie} wavethresh/man/imwdc.object.rd0000644000176200001440000001140114211622540016076 0ustar liggesusers\name{imwdc.object} \alias{imwdc.object} \title{Two-dimensional compressed wavelet decomposition objects.} \description{ These are objects of classes \code{imwdc} They represent a decomposition of an image with respect to a two-dimensional wavelet basis } \details{ In previous releases the original image was stored as the "original" component of a imwd object. This is not done now as the resulting objects were excessively large. To uncompress this class of object back into an object of class \code{\link{imwd.object}} use the \code{\link{uncompress.imwdc}} function. } \value{ The following components must be included in a legitimate `imwdc' object. \item{nlevelsWT}{number of levels in wavelet decomposition. If you raise 2 to the power of nlevels then you get the dimension of the image that you originally started with. } \item{type}{If \code{type="wavelet"} then the image was decomposed according to the 2D Mallat pyramidal algorithm. If \code{type="station"} then the image was decomposed using the 2D spatially ordered non-decimated wavelet transform.} \item{fl.dbase}{The first last database associated with the decomposition. For images, this list is not very useful as each level's components is stored as a list component, rather than being packaged up in a single vector as in the 1D case. Nevertheless the internals still need to know about fl.dbase to get the computations correct. See the help for \code{\link{first.last}} if you are a masochist. } \item{filter}{A filter object as returned by the \code{\link{filter.select}} function. This component records the filter used in the decomposition. The reconstruction routines use this component to find out what filter to use in reconstruction. } \item{wNLx}{The object will probably contain many components with names of this form. These are all the wavelet coefficients of the decomposition. In "wNLx" the "N" refers to the level number and the "x" refers to the direction of the coefficients with "1" being horizontal, "2" being vertical and "3" being diagonal. Note that imwdc objects do not contain scaling function coefficients. This would negate the point of having a compressed object. Each vector stores its coefficients using an object of class compressed, i.e. the vector is run-length encoded on zeroes. Note that the levels should be in numerically decreasing order, so if nlevelsWT is 5, then there will be w5L1, w5L2, w5L3 first, then down to w1L1, w1L2, and w1L3. Note that these coefficients store their data according to the \code{\link{first.last}} database \code{fl.dbase$first.last.d}, so refer to them using this. Note that if \code{type="wavelet"} then images at level N are subimages of side length \code{2^N} pixels. If the type component is \code{"station"} then each coefficient subimage is of the same dimension as the input image used to create this object.} \item{w0Lconstant}{This is the coefficient of the bottom level scaling function coefficient. So for examples, if you used Haar wavelets this would be the sample mean of the data (scaled by some factor depending on the number of levels, nlevelsWT).} \item{bc}{This component details how the boundaries were treated in the decomposition.} } \section{GENERATION}{ This class of objects is returned from the \code{\link{threshold.imwd}} function to represent a thresholded two-dimensional wavelet decomposition of a function. Some other functions return an object of class imwdc. } \section{METHODS}{ The imwd class of objects has methods for the following generic functions: \code{\link{draw}}, \code{\link{imwr}}, \code{\link{nullevels}}, \code{\link{plot}}, \code{\link{print}}, \code{\link{summary}}, \code{\link{threshold.imwdc}}. } \section{RELEASE}{ Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{imwd}} \code{\link{imwd.object}}, \code{\link{threshold.imwd}}, \code{\link{uncompress.imwdc}}. } \examples{ # # Perform the standard two-dimensional DWT # on the lennon image. # data(lennon) lwd <- imwd(lennon) # # Now let's see how many horizontal detail coefficients there are at # scale 6 # length(lwd$w6L1) # [1] 4096 # # So the horizontal detail ``image'' at scale contains 64x64=4096 coefficients. # A lot! # # Now, suppose we threshold this # two-dimensional wavelet decomposition object # lwdT <- threshold(lwd) # # First of all. What is the class of the detail coefficients now? # class(lwdT$w6L1) # [1] "compressed" # # Aha. So this set of coefficients got compressed using the # compress.default function. # # How many coefficients are being stored here? # lwdT$w6L1 # $position: # [1] 173 2829 2832 2846 # # $values: # [1] 141.5455 -190.2810 -194.5714 -177.1791 # # $original.length: # [1] 4096 # # attr(, "class"): # [1] "compressed" # # Wow! Only 4 coefficients are not zero. Wicked compression! } \keyword{classes} \keyword{smooth} \author{G P Nason} wavethresh/man/IsPowerOfTwo.rd0000644000176200001440000000173514211622540016106 0ustar liggesusers\name{IsPowerOfTwo} \alias{IsPowerOfTwo} \title{Decides whether vector elements are integral powers of two (returns NA if not). } \description{ This function checks to see whether its input is a power of two. If it is then it returns that power otherwise it returns NA. } \usage{ IsPowerOfTwo(n) } \arguments{ \item{n}{Vector of numbers that are to be checked whether it is a power of two.} } \details{ Function takes the log of the input, divides this by log(2) and if the result is integral then it knows the input is true power of two. } \value{ If \code{n} is a power of two, then the power is returned otherwise \code{NA} is returned. } \section{RELEASE}{Version 3.6.0 Copyright Guy Nason 1995 } \seealso{ \code{\link{nlevelsWT.default}}. } \examples{ # # Try and see whether 1,2,3 or 4 are powers of two! # IsPowerOfTwo(1:4) # [1] 0 1 NA 2 # # Yes, 1,2 and 4 are the 0, 1 and 2nd power of 2. However, 3 is not an # integral power of two. } \keyword{arith} \author{G P Nason} wavethresh/man/linfnorm.rd0000644000176200001440000000177514211622540015367 0ustar liggesusers\name{linfnorm} \alias{linfnorm} \title{Compute L infinity distance between two vectors of numbers. } \description{ Compute L infinity distance between two vectors of numbers (maximum absolute difference between two vectors). } \usage{ linfnorm(u,v) } \arguments{ \item{u}{first vector of numbers} \item{v}{second vector of numbers} } \details{ Function simply computes the L infinity distance between two vectors and is implemented as \code{max(abs(u-v))} } \value{ A real number which is the L infinity distance between two vectors. } \note{ This function would probably be more accurate if it used the Splus function \code{vecnorm}.} \section{RELEASE}{Version 3.6 Copyright Guy Nason 1995 } \seealso{ \code{\link{l2norm}}, \code{\link{wstCV}}, \code{\link{wstCVl}}. } \examples{ # # What is the L infinity norm between the following sets of vectors # p <- c(1,2,3,4,5) q <- c(1,2,3,4,5) r <- c(2,3,4,5,6) linfnorm(p,q) # [1] 0 linfnorm(q,r) # [1] 1 linfnorm(r,p) # [1] 1 } \keyword{algebra} \author{G P Nason} wavethresh/man/imwr.imwd.rd0000644000176200001440000000456714211622540015462 0ustar liggesusers\name{imwr.imwd} \alias{imwr.imwd} \title{Inverse two-dimensional discrete wavelet transform.} \description{ This functions performs the reconstruction stage of Mallat's pyramid algorithm (i.e. the inverse discrete wavelet transform) for images. } \usage{ \method{imwr}{imwd}(imwd, bc=imwd$bc, verbose=FALSE, \dots) } \arguments{ \item{imwd}{An object of class `\code{\link{imwd}}'. This type of object is returned by `\code{\link{imwd}}'.} \item{bc}{This argument specifies the boundary handling, it is best left to be the boundary handling specified by that in the supplied imwd (as is the default).} \item{verbose}{If this argument is true then informative messages are printed detailing the computations to be performed} \item{\dots}{any other arguments} } \details{ Details of the algorithm are to be found in Mallat (1989). Similarly to the decomposition function, \code{\link{imwd}} the inverse algorithm works by applying many 1D reconstruction algorithms to the coefficients. The filters in these 1D reconstructions are incorporated in the supplied \code{\link{imwd.object}} and originally created by the \code{\link{filter.select}} function in WaveThresh3. This function is a method for the generic function \code{\link{imwr}} for class \code{\link{imwd.object}}. It can be invoked by calling \code{\link{imwr}} for an object of the appropriate class, or directly by calling imwr.imwd regardless of the class of the object. } \value{ A matrix, of dimension determined by the original data set supplied to the initial decomposition (more precisely, determined by the \code{\link{nlevelsWT}} component of the \code{\link{imwd.object}}). This matrix is the highest resolution level of the reconstruction. If a \code{\link{imwd}} two-dimensional wavelet transform is followed immediately by a \code{\link{imwr}} inverse two-dimensional wavelet transform then the returned matrix will be exactly the same as the original image. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{imwd}}, \code{\link{imwd.object}}, \code{\link{imwr}}. } \examples{ # # Do a decomposition, then exact reconstruction # Look at the error # test.image <- matrix(rnorm(32*32), nrow=32) # # Test image is just some sort of square matrix whose side length # is a power of two. # max( abs(imwr(imwd(test.image)) - test.image)) # [1] 1.014611e-11 } \keyword{nonlinear} \keyword{smooth} \author{G P Nason} wavethresh/man/wr3D.rd0000644000176200001440000000176014211622634014360 0ustar liggesusers\name{wr3D} \alias{wr3D} \title{Inverse DWT for 3D DWT object. } \description{ Performs the inverse DWT for \code{\link{wd3D.object}}, i.e. 3D DWT objects. } \usage{ wr3D(obj) } \arguments{ \item{obj}{A \code{\link{wd3D.object}} 3D DWT object as returned by \code{\link{wd3D}}. } } \details{ The code implements a 3D version of Mallat's inverse pyramid algorithm. } \value{ A 3D array containing the inverse 3D DWT of obj. } \section{RELEASE}{Version 3.9.6 Copyright Guy Nason 1997} \seealso{ \code{\link{wr}}, \code{\link{accessD.wd3D}}, \code{\link{print.wd3D}}, \code{\link{putD.wd3D}}, \code{\link{putDwd3Dcheck}}, \code{\link{summary.wd3D}}, \code{\link{threshold.wd3D}}, \code{\link{wd3D}}, \code{\link{wd3D.object}}. } \examples{ # # Now let's take the object generated by the last stage in the EXAMPLES # section of threshold.wd3D and invert it! # #testwr <- wr3D(testwd3DT) # # You'll find that testwr is an array of dimension 8x8x8! # } \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/Whistory.rd0000644000176200001440000000155014211622634015366 0ustar liggesusers\name{Whistory} \alias{Whistory} \usage{ Whistory(\dots) } \arguments{ \item{\dots}{Arguments to pass to method} } \title{Obsolete function supposedly detailed history of object} \description{ The original idea behind this obsolete function was to interrogate an object and return the modifications that had been successively applied to the function. The reason for this was that after a long data analysis session one would end up with a whole set of, e.g., thresholded or otherwise modified objects and it would have been convenient for each object not only to store its current value but also the history of how it got to be that value. } \details{ Description says all } \value{ No return value, although function was meant to print out a list times and dates when the object was modified. } \seealso{\code{\link{Whistory.wst}}} \author{G P Nason} \keyword{utilities} wavethresh/man/conbar.rd0000644000176200001440000000503614211622540015001 0ustar liggesusers\name{conbar} \alias{conbar} \title{Performs inverse DWT reconstruction step} \usage{ conbar(c.in, d.in, filter) } \arguments{ \item{c.in}{The father wavelet coefficients that you wish to reconstruct in this level's convolution.} \item{d.in}{The mother wavelet coefficients that you wish to reconstruct in this level's convolution.} \item{filter}{A given filter that you wish to use in the level reconstruction. This should be the output from the \code{\link{filter.select}} function.} } \description{ Wrapper to the C function \code{conbar} which is the main function in WaveThresh to do filter convolution/reconstruction with data. Although users use the \code{\link{wr}} function to perform a complete inverse discrete wavelet transform (DWT) this function repeatedly uses the \code{conbar} routine, once for each level to reconstruct the next finest level. The C \code{conbar} routine is possibly the most frequently utilized by WaveThresh. } \details{ The \code{\link{wr}} function performs the inverse wavelet transform on an \code{\link{wd.object}} class object. Internally, the \code{\link{wr}} function uses the C \code{conbar} function. Other functions also make use of \code{conbar} and some R functions also would benefit from using the fast C code of the \code{conbar} reconstruction hence this WaveThresh function. Some of the other functions that use conbar are listed in the SEE ALSO section. Many other functions call C code that then uses the C version of \code{conbar}. } \value{ A vector containing the reconstructed coefficients. } \seealso{ \code{\link{av.basis}} \code{\link{InvBasis.wp}} \code{\link{wr}} } \examples{ # # Let's generate some test data, just some 32 normal variates. # v <- rnorm(32) # # Now take the wavelet transform with default filter arguments (which # are filter.number=10, family="DaubLeAsymm") # vwd <- wd(v) # # Now, let's take an arbitrary level, say 2, and reconstruct level 3 # scaling function coefficients # c.in <- accessC(vwd, lev=2) d.in <- accessD(vwd, lev=2) # conbar(c.in, d.in, filter.select(filter.number=10, family="DaubLeAsymm")) #[1] -0.50368115 0.04738620 -0.90331807 1.08497622 0.90490528 0.06252717 #[7] 2.55894899 -1.26067508 # # Ok, this was the pure reconstruction from using only level 2 information. # # Let's check this against the "original" level 3 coefficients (which get # stored on the decomposition step in wd) # accessC(vwd, lev=3) #[1] -0.50368115 0.04738620 -0.90331807 1.08497622 0.90490528 0.06252717 #[7] 2.55894899 -1.26067508 # # Yep, the same numbers! # } \author{G P Nason} \keyword{math} wavethresh/man/threshold.imwdc.rd0000644000176200001440000000337714211622634016645 0ustar liggesusers\name{threshold.imwdc} \alias{threshold.imwdc} \title{Threshold two-dimensional compressed wavelet decomposition object} \description{ This function provides various ways to threshold a \code{imwdc} class object. } \usage{ \method{threshold}{imwdc}(imwdc, verbose=FALSE, ...) } \arguments{ \item{imwdc}{The two-dimensional compressed wavelet decomposition object that you wish to threshold.} \item{verbose}{if TRUE then the function prints out informative messages as it progresses.} \item{\dots}{other arguments passed to the \code{\link{threshold.imwd}} function to control the thresholding characteristics such as policy, type of thresholding etc.} } \details{ This function performs exactly the same function as \code{\link{threshold.imwd}} except is accepts objects of class \code{imwdc} rather than imwd. Indeed, this function physically calls the \code{\link{threshold.imwd}} function after using the \code{\link{uncompress}} function to convert the input \code{imwdc} object into a \code{\link{imwd}} object. } \value{ An object of class \code{imwdc} if the compression option is supplied and set to TRUE, otherwise a \code{\link{imwd}} object is returned. In either case the returned object contains the thresholded coefficients. Note that if the \code{return.threshold} option is set to TRUE then the threshold values will be returned rather than the thresholded object. } \references{The FDR code segments were kindly donated by Felix Abramovich. } \section{RELEASE}{Version 3.6 Copyright Guy Nason and others 1997 } \seealso{ \code{\link{imwd}}, \code{\link{imwd.object}}, \code{\link{imwdc.object}}, \code{\link{threshold}}, \code{\link{uncompress}}. } \examples{ # # See examples in \code{\link{threshold.imwd}}. # } \keyword{nonlinear} \keyword{smooth} \author{G P Nason} wavethresh/man/print.wd.rd0000644000176200001440000000307114211622540015277 0ustar liggesusers\name{print.wd} \alias{print.wd} \title{Print out information about an wd object in readable form. } \description{ This function prints out information about an \code{\link{wd.object}} in a nice human-readable form. Note that this function is automatically called by SPlus whenever the name of an \code{\link{wd.object}} is typed or whenever such an object is returned to the top level of the S interpreter } \usage{ \method{print}{wd}(x, ...) } \arguments{ \item{x}{An object of class \code{\link{wd}} that you wish to print out.} \item{\dots}{This argument actually does nothing in this function!} } \details{ Prints out information about \code{\link{wd}} objects in nice readable format. } \value{ The last thing this function does is call \code{\link{summary.wd}} so the return value is whatever is returned by this function.} \section{RELEASE}{Version 3.0 Copyright Guy Nason 1994 } \seealso{ \code{\link{wd.object}}, \code{\link{summary.wd}}. } \examples{ # # Generate an wd object. # tmp <- wd(rnorm(32)) # # Now get R to use print.wd # tmp # Class 'wd' : Discrete Wavelet Transform Object: # ~~ : List with 8 components with names # C D nlevelsWT fl.dbase filter type bc date # # $ C and $ D are LONG coefficient vectors ! # # Created on : Fri Oct 23 19:56:00 1998 # Type of decomposition: wavelet # # summary(.): # ---------- # Levels: 5 # Length of original: 32 # Filter was: Daub cmpct on least asymm N=10 # Boundary handling: periodic # Transform type: wavelet # Date: Fri Oct 23 19:56:00 1998 # # } \keyword{print} \author{G P Nason} wavethresh/man/accessD.wd3D.rd0000644000176200001440000000616314211622540015704 0ustar liggesusers\name{accessD.wd3D} \alias{accessD.wd3D} \title{Get wavelet coefficients from 3D wavelet object} \description{ This function extracts and returns arrays of wavelet coefficients, corresponding to a particular resolution level, from a \code{\link{wd}} wavelet decomposition object. The pyramid of coefficients in a wavelet decomposition (returned from the \code{\link{wd3D}} function, say) are packed into a single array in \code{WaveThresh3}. } \usage{ \method{accessD}{wd3D}(obj, level = nlevelsWT(obj)-1, block, \dots) } \arguments{ \item{obj}{3D Wavelet decomposition object from which you wish to extract the wavelet coefficients.} \item{level}{The resolution level at which you wish to extract coefficients. The minimum level you can enter is 0, the largest is one less than the number of nlevelsWT stored in the obj object.} \item{block}{if block is missing then a list containing all of the wavelet coefficient blocks GGG, GGH, GHG, GHH, HGG, HGH, HHG (and HHH, if level=0) is returned. Otherwise block should be one of the character strings GGG, GGH, GHG, GHH, HGG, HGH, HHG and then only that sub-block is returned from the resolution level specified.} \item{\dots}{any other arguments} } \details{ The need for this function is a consequence of the pyramidal structure of Mallat's algorithm and the memory efficiency gain achieved by storing the pyramid as a array. Note that this functiOn is a method for the generic function \code{\link{accessD}}. } \value{ If the block is missing then a list is returned containing all the sub-blocks of coefficients for the specificed resolution \code{level}. Otherwise the block character string specifies which sub-block of coefficients to return. } \section{RELEASE}{Version 3.9.6 Copyright Guy Nason 1997 } \seealso{ \code{link{accessD}}, \code{link{print.wd3D}}, \code{link{putD.wd3D}}, \code{link{putDwd3Dcheck}}, \code{link{summary.wd3D}}, \code{link{threshold.wd3D}}, \code{link{wd3D}}, \code{link{wd3D object}}, \code{link{wr3D}}. } \examples{ # # Generate some test data # a <- array(rnorm(8*8*8), dim=c(8,8,8)) # # Perform the 3D DWT # awd3D <- wd3D(a) # # How many levels does this object have? # nlevelsWT(awd3D) # [1] 3 # # So conceivably we could access levels 0, 1 or 2. # # Ok. Let's get the level 1 HGH sub-block coefficients: # accessD(awd3D, level=1, block="HGH") # #, , 1 # [,1] [,2] #[1,] 0.8359289 1.3596832 #[2,] -0.1771688 0.2987303 # #, , 2 # [,1] [,2] #[1,] -1.2633313 1.00221652 #[2,] -0.3004413 0.04728019 # # This was a 3D array of dimension size 2 (8 -> 4 -> 2, level 3, 2 and then 1) # # # Let's do the same call except this time don't specify the block arg. # alllev1 <- accessD(awd3D, level=1) # # This new object should be a list containing all the subblocks at this level. # What are the components? # names(alllev1) #[1] "GHH" "HGH" "GGH" "HHG" "GHG" "HGG" "GGG" # # O.k. Let's look at HGH again # alllev1$HGH # #, , 1 # [,1] [,2] #[1,] 0.8359289 1.3596832 #[2,] -0.1771688 0.2987303 # #, , 2 # [,1] [,2] #[1,] -1.2633313 1.00221652 #[2,] -0.3004413 0.04728019 # # Same as before. # } \keyword{manip} \author{G P Nason} wavethresh/man/image.wst.rd0000644000176200001440000000206114211622540015426 0ustar liggesusers\name{image.wst} \alias{image.wst} \usage{ \method{image}{wst}(x, nv, strut = 10, type = "D", transform = I, \dots) } \arguments{ \item{x}{The wst object you wish to image} \item{nv}{An associated node vector, this argument is no longer used and should be omitted (in the S version it permitted coloration of particular bases)} \item{strut}{The number of pixels/width that each coefficient should be drawn with} \item{type}{Either "C" or "D" depending on whether you wish to image scaling function coefficients or wavelet ones} \item{transform}{A numerical transform you wish to apply to the coefficients before imaging} \item{\dots}{Other arguments} } \title{Produce image representation of a wst class object} \description{ Produces an image representation of the coefficients contained within a \code{\link{wst.object}} class object. } \details{ Description says all } \value{ None } \seealso{\code{\link{logabs}},\code{\link{wst}}} \examples{ tmp <- wst(rnorm(1024)) \dontrun{image(tmp)} \dontrun{image(tmp, transform=logabs)} } \author{G P Nason} \keyword{hplot} wavethresh/man/mfirst.last.rd0000644000176200001440000001021314211622540015774 0ustar liggesusers\name{mfirst.last} \alias{mfirst.last} \title{Build a first/last database for multiple wavelet transforms. } \description{ This function is not intended for user use, but is used by various functions involved in computing and displaying multiple wavelet transforms. } \usage{ mfirst.last(LengthH, nlevels, ndecim, type = "wavelet", bc = "periodic") } \arguments{ \item{LengthH}{Number of filter matrix coefficients.} \item{nlevels}{Number of levels in the decomposition} \item{ndecim}{The decimation scale factor for the multiple wavelet basis.} \item{type}{Whether the transform is non-decimated or ordinary (wavelet). The non-decimated multiple wavelet transform is not yet supported.} \item{bc}{This argument determines how the boundaries of the the function are to be handled. The permitted values are periodic or \code{symmetric} } } \details{ Suppose you begin with \code{2^m}=2048 coefficient vectors. At the next level you would expect 1024 smoothed data vectors, and 1024 wavelet vectors, and if \code{bc="periodic"} this is indeed what happens. However, if \code{bc="symmetric"} you actually need more than 1024 (as the wavelets extend over the edges). The first last database keeps track of where all these "extras" appear and also where they are located in the packed vectors C and D of pyramidal coefficients within wavelet structures. For examples, given a \code{first.last.c} row of \deqn{-2 3 20}{-2 3 20} The `position' of the coefficient vectors would be \deqn{c_{-2}, c_{-1}, c_{0}, c_{1}, c_{2}, c_{3}}{c_{-2}, c_{-1}, c_{0}, c_{1}, c_{2}, c_{3}} In other words, there are 6 coefficients, starting at -2 and ending at 3, and the first of these (\eqn{c_{-2}}) appears at column 20 of the \code{$C} component matrix of the wavelet structure. You can ``do'' first.last in your head for periodic boundary handling but for more general boundary treatments (e.g. symmetric) first.last is indispensable. The numbers in first last databases were worked out from inequalities derived from: Daubechies, I. (1988). } \value{ A first/last database structure, a list containing the following information: \item{first.last.c}{A \code{(m+1)x3} matrix. The first column specifies the real index of the first coefficient vector of the smoothed data at a level, the 2nd column is the real index of the last coefficient vector, the last column specifies the offset of the first smoothed datum at that level. The offset is used by the C code to work out where the beginning of the sequence is within a packed vector of the pyramid structure. The first and 2nd columns can be used to work out how many numbers there are at a level. If bc="periodic" then the pyramid is a true power of 2 pyramid, that is it starts with a power of 2, and the next level is half of the previous. If bc="symmetric" then the pyramid is nearly exactly a power of 2, but not quite, see the Details section for why this is so.} \item{nvecs.c}{The number of C coefficient vectors.} \item{first.last.d}{A \code{mx3} matrix. As for \code{first.last.c} but for the wavelet coefficients packed as the D component of a wavelet structure.} \item{nvecs.d}{The number of \code{D} coefficient vectors.} } \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1995-6)} \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mwd.object}}, \code{\link{mwd}}, \code{\link{mwr}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ # #To see the housekeeping variables for a decomposition with # 4 filter coefficient matices # 5 resolution levels and a decimation scale of two # use: mfirst.last(4,5,2) # $first.last.c: # First Last Offset # [1,] 0 0 62 # [2,] 0 1 60 # [3,] 0 3 56 # [4,] 0 7 48 # [5,] 0 15 32 # [6,] 0 31 0 # # $nvecs.c: # [1] 63 # # $first.last.d: # First Last Offset # [1,] 0 0 30 # [2,] 0 1 28 # [3,] 0 3 24 # [4,] 0 7 16 # [5,] 0 15 0 # # $nvecs.d: # [1] 31 } \keyword{datagen} \author{Tim Downie} wavethresh/man/threshold.wd.rd0000644000176200001440000003076014211622634016150 0ustar liggesusers\name{threshold.wd} \alias{threshold.wd} \title{Threshold (DWT) wavelet decomposition object} \description{ This function provides various ways to threshold a \code{\link{wd}} class object. } \usage{ \method{threshold}{wd}(wd, levels = 3:(nlevelsWT(wd) - 1), type = "soft", policy = "sure", by.level = FALSE, value = 0, dev = madmad, boundary = FALSE, verbose = FALSE, return.threshold = FALSE, force.sure = FALSE, cvtol = 0.01, cvmaxits=500, Q = 0.05, OP1alpha = 0.05, alpha = 0.5, beta = 1, C1 = NA, C2 = NA, C1.start = 100, al.check=TRUE, \dots) } \arguments{ \item{wd}{The DWT wavelet decomposition object that you wish to threshold.} \item{levels}{a vector of integers which determines which scale levels are thresholded in the decomposition. Each integer in the vector must refer to a valid level in the \code{\link{wd}} object supplied. This is usually any integer from 0 to \code{\link{nlevelsWT}}(wd)-1 inclusive. Only the levels in this vector contribute to the computation of the threshold and its application.} \item{type}{determines the type of thresholding this can be "hard" or "soft".} \item{policy}{selects the technique by which the threshold value is selected. Each policy corresponds to a method in the literature. At present the different policies are: "\code{universal}", "\code{LSuniversal}", "\code{\link{sure}}", "\code{BayesThresh}", "\code{cv}", "\code{fdr}", "\code{op1}", "\code{op2}", "\code{manual}", "\code{mannum}" and "\code{probability}". The policies are described in detail below.} \item{by.level}{If FALSE then a global threshold is computed on and applied to all scale levels defined in \code{levels}. If TRUE a threshold is computed and applied separately to each scale level.} \item{value}{This argument conveys the user supplied threshold. If the \code{policy="manual"} then \code{value} is the actual threshold value; if the \code{policy="mannum"} then \code{value} conveys the total number of ordered coefficients kept (from the largest); if \code{policy="probability"} then \code{value} conveys the the user supplied quantile level.} \item{dev}{this argument supplies the function to be used to compute the spread of the absolute values coefficients. The function supplied must return a value of spread on the variance scale (i.e. not standard deviation) such as the \code{var()} function. A popular, useful and robust alternative is the \code{\link{madmad}} function.} \item{boundary}{If this argument is TRUE then the boundary bookeeping values are included for thresholding, otherwise they are not.} \item{verbose}{if TRUE then the function prints out informative messages as it progresses.} \item{return.threshold}{If this option is TRUE then the actual \emph{value} of the threshold is returned. If this option is FALSE then a thresholded version of the input is returned.} \item{force.sure}{If TRUE then the \code{\link{sure}} threshold is computed on a vector even when that vector is very sparse. If FALSE then the normal SUREshrink procedure is followed whereby the universal threshold is used for sparse vectors of coefficients.} \item{cvtol}{Parameter for the cross-validation \code{"cv"} policy.} \item{cvmaxits}{Maximum number of iterations allowed for the cross-validation \code{"cv"} policy.} \item{Q}{Parameter for the false discovery rate \code{"fdr"} policy.} \item{OP1alpha}{Parameter for Ogden and Parzen's first "\code{op1}" and \code{"op2"} policies.} \item{alpha}{Parameter for BayesThresh \code{"BayesThresh"} policy.} \item{beta}{Parameter for BayesThresh \code{"BayesThresh"} policy.} \item{C1}{Parameter for BayesThresh \code{"BayesThresh"} policy.} \item{C2}{Parameter for BayesThresh \code{"BayesThresh"} policy.} \item{C1.start}{Parameter for BayesThresh \code{"BayesThresh"} policy.} \item{al.check}{If TRUE then the function checks that the levels are in ascending order. If they are not then this can be an indication that the default level arguments are not appropriate for this data set (\code{wd} object). However, a strange order might be appropriate for some reason if deliberately set, so setting this argument equal to FALSE turns off the check and warning.} \item{\dots}{any other arguments} } \details{ This function thresholds or shrinks wavelet coefficients stored in a \code{\link{wd}} object and returns the coefficients in a modified \code{\link{wd}} object. See the seminal papers by Donoho and Johnstone for explanations about thresholding. For a gentle introduction to wavelet thresholding (or shrinkage as it is sometimes called) see Nason and Silverman, 1994. For more details on each technique see the descriptions of each method below The basic idea of thresholding is very simple. In a signal plus noise model the wavelet transform of signal is very sparse, the wavelet transform of noise is not (in particular, if the noise is iid Gaussian then so if the noise contained in the wavelet coefficients). Thus since the signal gets concentrated in the wavelet coefficients and the noise remains "spread" out it is "easy" to separate the signal from noise by keeping large coefficients (which correspond to signal) and delete the small ones (which correspond to noise). However, one has to have some idea of the noise level (computed using the dev option in threshold functions). If the noise level is very large then it is possible, as usual, that no signal "sticks up" above the noise. There are many components to a successful thresholding procedure. Some components have a larger effect than others but the effect is not the same in all practical data situations. Here we give some rough practical guidance, although \emph{you must refer to the papers below when using a particular technique.} \bold{You cannot expect to get excellent performance on all signals unless you fully understand the rationale and limitations of each method below.} I am not in favour of the "black-box" approach. The thresholding functions of WaveThresh3 are not a black box: experience and judgement are required! Some issues to watch for: \describe{ \item{levels}{The default of \code{levels = 3:(wd$nlevelsWT - 1)} for the \code{levels} option most certainly does not work globally for all data problems and situations. The level at which thresholding begins (i.e. the given threshold and finer scale wavelets) is called the \emph{primary resolution} and is unique to a particular problem. In some ways choice of the primary resolution is very similar to choosing the bandwidth in kernel regression albeit on a logarithmic scale. See Hall and Patil, (1995) and Hall and Nason (1997) for more information. For each data problem you need to work out which is the best primary resolution. This can be done by gaining experience at what works best, or using prior knowledge. It is possible to "automatically" choose a "best" primary resolution using cross-validation (but not in WaveThresh). Secondly the levels argument computes and applies the threshold at the levels specified in the \code{levels} argument. It does this for all the levels specified. Sometimes, in wavelet shrinkage, the threshold is computed using only the finest scale coefficients (or more precisely the estimate of the overall noise level). If you want your threshold variance estimate only to use the finest scale coefficients (e.g. with universal thresholding) then you will have to apply the \code{threshold.wd} function twice. Once (with levels set equal to \code{\link{nlevelsWT}}(wd)-1 and with \code{return.threshold=TRUE} to return the threshold computed on the finest scale and then apply the threshold function with the manual option supplying the value of the previously computed threshold as the value options. Thirdly, if you apply wavelet shrinkage to a small data set then you need to ensure you've chosen the \code{levels} argument appropriately. For example, if your original data was of length 8, then the associated \code{wd} wavelet decomposition object will only have levels 0, 1 and 2. So, the default argument for levels (starting at 3 and higher) will almost certainly be wrong. The code now warns for these situations. } \item{by.level}{for a \code{\link{wd}} object which has come from data with noise that is correlated then you should have a threshold computed for each resolution level. See the paper by Johnstone and Silverman, 1997.} } } \value{ An object of class \code{\link{wd}}. This object contains the thresholded wavelet coefficients. Note that if the \code{return.threshold} option is set to TRUE then the threshold values will be returned rather than the thresholded object. } \references{Various code segments detailed above were kindly donated by Felix Abramovich, Theofanis Sapatinas and Todd Ogden. } \note{ POLICIES This section gives a brief description of the different thresholding policies available. For further details see \emph{the associated papers}. If there is no paper available then a small description is provided here. More than one policy may be good for problem, so experiment! They are arranged here in alphabetical order: \describe{ \item{BayesThresh}{See Abramovich, Silverman and Sapatinas, (1998). Contributed by Felix Abramovich and Fanis Sapatinas.} \item{cv}{See Nason, 1996.} \item{fdr}{See Abramovich and Benjamini, 1996. Contributed by Felix Abramovich.} \item{LSuniversal}{See Nason, von Sachs and Kroisandt, 1998. This is used for smoothing of a wavelet periodogram and shouldn't be used generally.} \item{manual}{specify a user supplied threshold using \code{value} to pass the value of the threshold. The \code{value} argument should be a vector. If it is of length 1 then it is replicated to be the same length as the \code{levels} vector, otherwise it is repeated as many times as is necessary to be the \code{levels} vector's length. In this way, different thresholds can be supplied for different levels. Note that the \code{by.level} option has no effect with this policy.} \item{mannum}{You decided how many of the largest (in absolute value) coefficients that you want to keep and supply this number in value.} \item{op1}{See Ogden and Parzen, 1996. Contributed by Todd Ogden.} \item{op2}{See Ogden and Parzen, 1996. Contributed by Todd Ogden.} \item{probability}{The \code{probability} policy works as follows. All coefficients that are smaller than the valueth quantile of the coefficients are set to zero. If \code{by.level} is false, then the quantile is computed for all coefficients in the levels specified by the "levels" vector; if \code{by.level} is true, then each level's quantile is estimated separately. The probability policy is pretty stupid - do not use it.} \item{sure}{See Donoho and Johnstone, 1994.} \item{universal}{See Donoho and Johnstone, 1995.} } } \section{RELEASE}{Version 3.6 Copyright Guy Nason and others 1997 } \seealso{ \code{\link{wd}}, \code{\link{wd.object}}, \code{\link{wr}}, \code{\link{wr.wd}}, \code{\link{threshold}}. } \examples{ # # Generate some test data # test.data <- example.1()$y \dontrun{ts.plot(test.data)} # # Generate some noisy data # ynoise <- test.data + rnorm(512, sd=0.1) # # Plot it # \dontrun{ts.plot(ynoise)} # # Now take the discrete wavelet transform # N.b. I have no idea if the default wavelets here are appropriate for # this particular examples. # ynwd <- wd(ynoise) \dontrun{plot(ynwd)} # # Now do thresholding. We'll use a universal policy, # and madmad deviance estimate on the finest # coefficients and return the threshold. We'll also get it to be verbose # so we can watch the process. # ynwdT1 <- threshold(ynwd, policy="universal", dev=madmad, levels= nlevelsWT(ynwd)-1, return.threshold=TRUE, verbose=TRUE) # threshold.wd: # Argument checking # Universal policy...All levels at once # Global threshold is: 0.328410967430135 # # Why is this the threshold? Well in this case n=512 so sqrt(2*log(n)), # the universal threshold, # is equal to 3.53223. Since the noise is about 0.1 (because that's what # we generated it to be) the threshold is about 0.353. # # Now let's apply this threshold to all levels in the noisy wavelet object # ynwdT1obj <- threshold(ynwd, policy="manual", value=ynwdT1, levels=0:(nlevelsWT(ynwd)-1)) # # And let's plot it # \dontrun{plot(ynwdT1obj)} # # You'll see that a lot of coefficients have been set to zero, or shrunk. # # Let's try a Bayesian examples this time! # ynwdT2obj <- threshold(ynwd, policy="BayesThresh") # # And plot the coefficients # \dontrun{plot(ynwdT2obj)} # # Let us now see what the actual estimates look like # ywr1 <- wr(ynwdT1obj) ywr2 <- wr(ynwdT2obj) # # Here's the estimate using universal thresholding # \dontrun{ts.plot(ywr1)} # # Here's the estimate using BayesThresh # \dontrun{ts.plot(ywr2)} } \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/print.wst.rd0000644000176200001440000000203614211622540015502 0ustar liggesusers\name{print.wst} \alias{print.wst} \title{Print out information about an wst object in readable form.} \usage{ \method{print}{wst}(x, \dots) } \arguments{ \item{x}{The \code{\link{wst.object}} object to print info on} \item{\dots}{Other arguments} } \description{ This function prints out information about an \code{\link{wst.object}} object in a nice human-readable form. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{wst}}, \code{\link{wst.object}}} \examples{ # # Generate an wst object (a "nonsense" one for # the example). # vwst <- wst(DJ.EX()$heavi) # # Now get Splus/R to use print.wst # vwst #Class 'wst' : Stationary Wavelet Transform Object: # ~~~ : List with 5 components with names # wp Carray nlevelsWT filter date # #$wp and $Carray are the coefficient matrices # #Created on : Wed Sep 08 09:24:03 2004 # #summary(.): #---------- #Levels: 10 #Length of original: 1024 #Filter was: Daub cmpct on least asymm N=10 #Date: Wed Sep 08 09:24:03 2004 } \author{G P Nason} \keyword{print} wavethresh/man/accessC.wst.rd0000644000176200001440000000371714211622540015721 0ustar liggesusers\name{accessC.wst} \alias{accessC.wst} \title{Get smoothed data from packet ordered non-decimated wavelet object (wst)} \description{ The smoothed data from a packet ordered non-decimated wavelet object (returned from \code{\link{wst}}) are stored in a matrix. This function extracts all the coefficients corresponding to a particular resolution level. } \usage{ \method{accessC}{wst}(wst, level, aspect, \dots) } \arguments{ \item{wst}{Packet ordered non-decimated wavelet object from which you wish to extract the smoothed or original data (if the object is directly from a packet ordered non-decimated wavelet transform of some data).} \item{level}{The level that you wish to extract. This can range from zero (the coarsest coefficients) to nlevelsWT(wstobj) which returns the original data.} \item{aspect}{Applies function to coefficients before return. Supplied as a character string which gets converted to a function. For example "Mod" which returns the absolute values of the coefficients} \item{\dots}{Other arguments} } \value{ A vector of the extracted data. } \details{ The \code{\link{wst}} function performs a packet-ordered non-decimated wavelet transform. This function extracts all the father wavelet coefficients at a particular resolution level specified by \code{level}. Note that coefficients returned by this function are in emph{packet order}. They can be used \emph{as is} but for many applications it might be more useful to deal with the coefficients in packets: see the function \code{\link{getpacket.wst}} for further details. } \references{ Nason, G. P. and Silverman, B. W. (1994). The discrete wavelet transform in S. \emph{Journal of Computational and Graphical Statistics}, \bold{3}, 163--191. } \seealso{ \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{accessC}}, \code{\link{getpacket.wst}} } \examples{ # # Get the 3rd level of smoothed data from a decomposition # dat <- rnorm(64) accessC(wst(dat), level=3) } \keyword{manip} \author{G P Nason} wavethresh/man/sure.rd0000644000176200001440000000254114211622634014515 0ustar liggesusers\name{sure} \alias{sure} \usage{ sure(x) } \arguments{ \item{x}{Vector of (normalized) wavelet coefficients. Coefficients should be supplied divided by their standard deviation, or some robust measure of scale} } \title{Computes the minimum of the SURE thresholding function} \description{ Computes the minimum of the SURE thresholding function for wavelet shrinkage as described in Donoho, D.L. and Johnstone, I.M. (1995) Adapting to unknown smoothness via wavelet shrinkage. \emph{J. Am. Statist. Ass.}, \bold{90}, 1200-1224. } \details{ SURE is a method for unbiasedly estimating the risk of an estimator. Stein (1981) showed that for a nearly arbitrary, nonlinear biased estimator, one can estimate its loss unbiasedly. See the Donoho and Johnstone, 1995 for further references and explanation. This function minimizes formula (11) from that paper. } \seealso{\code{\link{threshold}}} \value{ The absolute value of the wavelet coefficient that minimizes the SURE criteria } \examples{ # # Let's create "pretend" vector of wavelet coefficients contaminated with # "noise". # v <- c(0.1, -0.2, 0.3, -0.4, 0.5, 99, 12, 6) # # Now, what's sure of this? # sure(v) # # [1] 0.5 # # # I.e. the large significant coefficients are 99, 12, 6 and the noise is # anything less than this in abs value. So sure(v) is a good point to threshold # at. } \author{G P Nason} \keyword{math} wavethresh/man/print.imwd.rd0000644000176200001440000000320314211622540015622 0ustar liggesusers\name{print.imwd} \alias{print.imwd} \title{Print out information about an imwd object in readable form. } \description{ This function prints out information about an \code{\link{imwd.object}} in a nice human-readable form. Note that this function is automatically called by SPlus whenever the name of an \code{\link{imwd.object}} is typed or whenever such an object is returned to the top level of the S interpreter. } \usage{ \method{print}{imwd}(x, ...) } \arguments{ \item{x}{An object of class imwd that you wish to print out.} \item{\dots}{This argument actually does nothing in this function! } } \details{ Prints out information about \code{\link{imwd}} objects in nice readable format. } \value{ The last thing this function does is call \code{\link{summary.imwd}} so the return value is whatever is returned by this function. } \section{RELEASE}{Version 3.0 Copyright Guy Nason 1994 } \seealso{ \code{\link{imwd.object}}, \code{\link{summary.imwd}}. } \examples{ # # Generate an imwd object. # tmp <- imwd(matrix(0, nrow=32, ncol=32)) # # Now get R to use print.imwd # tmp # Class 'imwd' : Discrete Image Wavelet Transform Object: # ~~~~ : List with 27 components with names # nlevelsWT fl.dbase filter type bc date w4L4 w4L1 w4L2 w4L3 # w3L4 w3L1 w3L2 w3L3 w2L4 w2L1 w2L2 w2L3 w1L4 w1L1 w1L2 w1L3 w0L4 w0L1 # w0L2 w0L3 w0Lconstant # # $ wNLx are LONG coefficient vectors ! # # summary(.): # ---------- # UNcompressed image wavelet decomposition structure # Levels: 5 # Original image was 32 x 32 pixels. # Filter was: Daub cmpct on least asymm N=10 # Boundary handling: periodic } \keyword{utilities} \author{G P Nason} wavethresh/man/LocalSpec.rd0000644000176200001440000000153614211622540015403 0ustar liggesusers\name{LocalSpec} \alias{LocalSpec} \title{Compute Nason and Silverman smoothed wavelet periodogram.} \description{ This function is obsolete. Use the function \code{\link{ewspec}}. Performs the Nason and Silverman smoothed wavelet periodogram as described in Nason and Silverman (1995). This function is generic. Particular methods exist. For the wd class object this generic function uses \code{\link{LocalSpec.wd}}. } \usage{ LocalSpec(...) } \arguments{ \item{\dots}{See individual help pages for details.} } \details{ See individual method help pages for operation and examples. } \value{ The LocalSpec of the wavelet object supplied. See method help files for examples. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1997 } \seealso{ \code{\link{wd}}, \code{\link{wd.object}}, \code{\link{LocalSpec.wd}} } \keyword{methods} \author{G P Nason} wavethresh/man/convert.rd0000644000176200001440000000163214211622540015213 0ustar liggesusers\name{convert} \alias{convert} \title{Convert one type of wavelet object into another. } \description{ Convert one type of wavelet object into another. This function is generic. Particular methods exist: \code{\link{convert.wd}} is used to convert non-decimated \code{\link{wd}} objects into \code{\link{wst}} objects. \code{\link{convert.wst}} is used to convert \code{\link{wst}} objects into non-decimated \code{\link{wd}} objects. } \usage{ convert(...) } \arguments{ \item{\dots}{See individual help pages for details.} } \details{ See individual method help pages for operation and examples. } \value{ An object containing the converted representation. } \section{RELEASE}{Version 3.6.0 Copyright Guy Nason 1995 } \seealso{ \code{\link{convert.wd}}, \code{\link{convert.wst}}, \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{wst}}, \code{\link{wst.object}}. } \keyword{manip} \author{G P Nason} wavethresh/man/putD.mwd.rd0000644000176200001440000000770514211622634015250 0ustar liggesusers\name{putD.mwd} \alias{putD.mwd} \title{Put wavelet coefficients into multiple wavelet structure } \description{ The wavelet coefficients from a multiple wavelet decomposition structure, \code{\link{mwd.object}}, (e.g. returned from \code{\link{mwd}}) are packed into a single matrix in that structure. This function copies the \code{\link{mwd.object}}, replaces some wavelet coefficients in the copy, and then returns the copy. } \usage{ \method{putD}{mwd}(mwd, level, M, boundary = FALSE, index = FALSE, \dots) } \arguments{ \item{mwd}{Multiple wavelet decomposition structure whose coefficients you wish to replace.} \item{level}{The level that you wish to replace.} \item{M}{Matrix of replacement coefficients.} \item{boundary}{If \code{boundary} is \code{FALSE} then only the "real" data is replaced (and it is easy to predict the required length of \code{M}). If \code{boundary} is \code{TRUE} then you can replace the boundary values at a particular level as well (but it is hard to predict the required length of\code{M}, and the information has to be obtained from the \code{mfirst.last} database component of \code{mwd}).} \item{index}{If index is \code{TRUE} then the index numbers into the \code{mwd$D} array where the matrix \code{M} would be stored is returned. Otherwise, (default) the modified \code{\link{mwd.object}} is returned. } \item{\dots}{any other arguments} } \details{ The \code{\link{mwd}} function produces a wavelet decomposition structure. The need for this function is a consequence of the pyramidal structure of Mallat's algorithm and the memory efficiency gain achieved by storing the pyramid as a linear matrix of coefficients. PutD obtains information about where the wavelet coefficients appear from the fl.dbase component of mwd, in particular the array \code{fl.dbase$first.last.d} which gives a complete specification of index numbers and offsets for \code{mwd$D}. Note also that this function only puts information into mwd class objects. To extract coefficients from mwd structures you have to use the accessD.mwd function. See Downie and Silverman, 1998. } \value{ An object of class \code{\link{mwd.object}} if index is \code{FALSE}, otherwise the index numbers indicating where the \code{M} matrix would have been inserted into the \code{mwd$D} object are returned. } \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1995-6). } \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd}}, \code{\link{mwd.object}}, \code{\link{mwr}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ # # Generate an mwd object # tmp <- mwd(rnorm(32)) # # Now let's examine the finest resolution detail... # accessD(tmp, level=3) # [,1] [,2] [,3] [,4] [,5] [,6] #[1,] 0.8465672 0.4983564 0.3408087 0.1340325 0.5917774 -0.06804291 #[2,] 0.6699962 -0.2535760 -1.0344445 0.2068644 -0.4912086 1.16039885 # [,7] [,8] #[1,] -0.6226445 0.2617596 #[2,] -0.4956576 -0.5555795 # # # A matrix. There are two rows one for each mother wavelet in this # two-ple multiple wavelet transform and at level 3 there are 2^3 columns. # # Let's set the coefficients of the first mother wavelet all equal to zero # for this examples # newdmat <- accessD(tmp, level=3) newdmat[1,] <- 0 # # Ok, let's insert it back at level 3 # tmp2 <- putD(tmp, level=3, M=newdmat) # # And check it # accessD(tmp2, level=3) # [,1] [,2] [,3] [,4] [,5] [,6] [,7] #[1,] 0.0000000 0.000000 0.000000 0.0000000 0.0000000 0.000000 0.0000000 #[2,] 0.6699962 -0.253576 -1.034445 0.2068644 -0.4912086 1.160399 -0.4956576 # [,8] #[1,] 0.0000000 #[2,] -0.5555795 # # # Yep, all the first mother wavelet coefficients at level 3 are now zero. } \keyword{manip} \author{Tim Downie} wavethresh/man/plot.wst2D.rd0000644000176200001440000000460014211622540015511 0ustar liggesusers\name{plot.wst2D} \alias{plot.wst2D} \title{Plot packet-ordered 2D non-decimated wavelet coefficients.} \description{ This function plots packet-ordered 2D non-decimated wavelet coefficients arising from a \code{\link{wst2D}} object. } \usage{ \method{plot}{wst2D}(x, plot.type="level", main="", ...) } \arguments{ \item{x}{The \code{\link{wst2D}} object whose coefficients you wish to plot.} \item{plot.type}{So far the only valid argument is "level" which plots coefficients a level at a time.} \item{main}{The main title of the plot.} \item{...}{Any other arguments.} } \details{ The coefficients in a \code{\link{wst2D}} object are stored in a three-dimensional subarray called \code{wst2D}. The first index of the 3D array indexes the resolution level of coefficients: this function with \code{plot.type="level"} causes an image of coefficients to be plotted one for each resolution level. The following corresponds to images produced on S+ graphics devices (e.g. image on \code{motif()}). Given a resolution level there are \code{4^(nlevelsWT-level)} packets within a level. Each packet can be addressed by a base-4 string of length \code{nlevels-level}. A zero corresponds to no shift, a 1 to a horizontal shift, a 2 to a vertical shift and a 3 to both a horizontal and vertical shift. So, for examples, at resolution level \code{nlevelsWT-1} there are 4 sub-images each containing 4 sub-images. The main subimages correspond to (clockwise from bottom-left) no shift, horizontal shift, both shift and vertical shifts. The sub-images of the sub-images correspond to the usual smooth, horizontal detail, diagonal detail and vertical detail (clockwise, again from bottom left). Coarser resolution levels correspond to finer shifts! The following figure demonstrates the \code{nlevels-1} resolution level for the \code{ua} image (although the whole image has been rotated by 90 degrees clockwise for display here!): } \value{ A plot of the coefficients contained within the \code{\link{wst2D}} object is produced. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{getpacket.wst2D}}, \code{\link{putpacket.wst2D}}, \code{\link{wst2D}}, \code{\link{wst2D.object}}. } \examples{ # # The above picture is one of a series produced by # #plot(uawst2D) # # Where the uawst2D object was produced in the EXAMPLES section # of the help for \code{\link{wst2D}} } \keyword{hplot} \author{G P Nason} wavethresh/man/wd.dh.rd0000644000176200001440000000273614211622634014551 0ustar liggesusers\name{wd.dh} \alias{wd.dh} \title{Compute specialized wavelet transform for density estimation} \usage{ wd.dh(data, filter.number = 10, family = "DaubLeAsymm", type = "wavelet", bc = "periodic", firstk = NULL, verbose = FALSE) } \arguments{ \item{data}{The father wavelet coefficients} \item{filter.number}{The smoothness of the underlying wavelet to use, see \code{\link{filter.select}}} \item{family}{The wavelet family to use, see \code{\link{filter.select}}} \item{type}{The type of wavelet to use} \item{bc}{Type of boundarie conditions} \item{firstk}{A parameter that originates from \code{\link{denproj}}} \item{verbose}{If \code{TRUE} then informative messages are printed.} } \description{ Computes the discrete wavelet transform, but with zero boundary conditions especially for density estimation. } \details{ This is a subsidiary routine, not intended for direct user use for density estimation. The main routines for wavelet density estimation are \code{\link{denwd}}, \code{\link{denproj}}, \code{\link{denwr}}. The input to this function should be projected father wavelet coefficients as computed by \code{\link{denproj}}, but usually supplied to this function by \code{\link{denwd}}. Thresholding should be carried out by the user independently of these functions. } \seealso{ \code{\link{denproj}}, \code{\link{denwd}}} \value{ An object of class \code{\link{wd}}, but assumed on the basis of zero boundary conditions. } \author{David Herrick} \keyword{math} \keyword{smooth} wavethresh/man/getpacket.wp.rd0000644000176200001440000000705214211622540016131 0ustar liggesusers\name{getpacket.wp} \alias{getpacket.wp} \title{Get packet of coefficients from a wavelet packet object (wp).} \description{ This function extracts and returns a packet of coefficients from a wavelet packet (\code{\link{wp}}) object. } \usage{ \method{getpacket}{wp}(wp, level, index, \dots ) } \arguments{ \item{wp}{Wavelet packet object from which you wish to extract the packet from.} \item{level}{The resolution level of the coefficients that you wish to extract.} \item{index}{The index number within the resolution level of the packet of coefficients that you wish to extract.} \item{\dots}{any other arguments} } \details{ The \code{\link{wp}} produces a wavelet packet object. The coefficients in this structure can be organised into a binary tree with each node in the tree containing a packet of coefficients. Each packet of coefficients is obtained by chaining together the effect of the \emph{two packet operators} DG and DH: these are the high and low pass quadrature mirror filters of the Mallat pyramid algorithm scheme followed by decimation (see Mallat~(1989b)). Starting with data \eqn{c^J} at resolution level J containing \eqn{2^J} data points the wavelet packet algorithm operates as follows. First DG and DH are applied to \eqn{c^J} producing \eqn{d^{J-1}} and \eqn{c^{J-1}} respectively. Each of these sets of coefficients is of length one half of the original data: i.e. \eqn{2^{J-1}}. Each of these sets of coefficients is a set of \emph{wavelet packet coefficients}. The algorithm then applies both DG and DH to both \eqn{d^{J-1}} and \eqn{c^{J-1}} to form a four sets of coefficients at level J-2. Both operators are used again on the four sets to produce 8 sets, then again on the 8 sets to form 16 sets and so on. At level j=J,...,0 there are \eqn{2^{J-j}} packets of coefficients each containing \eqn{2^j} coefficients. This function enables whole packets of coefficients to be extracted at any resolution level. The index argument chooses a particular packet within each level and thus ranges from 0 (which always refer to the father wavelet coefficients), 1 (which always refer to the mother wavelet coefficients) up to \eqn{2^{J-j}}. } \value{ A vector containing the packet of wavelet packet coefficients that you wished to extract. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{wp}}, \code{\link{putpacket.wp}}, \code{\link{basisplot.wp}}, \code{\link{draw.wp}}, \code{\link{InvBasis.wp}}, \code{\link{MaNoVe.wp}}, \code{nlevelsWT.wp}, \code{\link{plot.wp}}. \code{\link{threshold.wp}}. } \examples{ # # Take the wavelet packet transform of some random data # MyWP <- wp(rnorm(1:512)) # # The above data set was 2^9 in length. Therefore there are # coefficients at resolution levels 0, 1, 2, ..., and 8. # # The high resolution coefficients are at level 8. # There should be 256 DG coefficients and 256 DH coefficients # length(getpacket(MyWP, level=8, index=0)) #[1] 256 length(getpacket(MyWP, level=8, index=1)) #[1] 256 # # The next command shows that there are only two packets at level 8 # \dontrun{getpacket(MyWP, level=8, index=2)} #Index was too high, maximum for this level is 1 #Error in getpacket.wp(MyWP, level = 8, index = 2): Error occured #Dumped # # There should be 4 coefficients at resolution level 2 # # The father wavelet coefficients are (index=0) getpacket(MyWP, level=2, index=0) #[1] -0.9736576 0.5579501 0.3100629 -0.3834068 # # The mother wavelet coefficients are (index=1) # #[1] 0.72871405 0.04356728 -0.43175307 1.77291483 # # There will be 127 packets at this level. # } \keyword{manip} \author{G P Nason} wavethresh/man/wavegrow.rd0000644000176200001440000000657214211622634015410 0ustar liggesusers\name{wavegrow} \alias{wavegrow} \title{Interactive graphical tool to grow a wavelet synthesis} \usage{ wavegrow(n = 64, filter.number = 10, family = "DaubLeAsymm", type = "wavelet", random = TRUE, read.value = TRUE, restart = FALSE) } \arguments{ \item{n}{Number of points in the decomposition} \item{filter.number}{The wavelet filter.number to use, see \code{\link{filter.select}}} \item{family}{The wavelet family to use in the reconstruction} \item{type}{If \code{"wavelet"} then carry out the regular wavelet transform, otherwise if \code{"station"} do the nondecimated transform.} \item{random}{If \code{TRUE} then iid Gaussian coefficients are inserted into the tableaux. If \code{FALSE} and \code{read.value=TRUE} then the user is promoted for a value, otherwise the value 1 is inserted into the tableaux at the selected point.} \item{read.value}{If \code{TRUE} then a value is read and used to insert that size of wavelet coefficient at the selected point. If \code{FALSE} then a coefficient of size 1 is inserted.} \item{restart}{If \code{TRUE} then after a coefficient has been inserted, and plots done, the next selection causes all the coefficients to be reset to zero and a single coefficient inserted. This actually has the overall action of being able to select a coefficient location and view the size and shape of the wavelet produced.} } \description{ Use mouse to select which wavelets to enter a wavelet synthesis, continually plot the reconstruction and the wavelet tableaux. } \details{ This function can perform many slightly different actions. However, the basic idea is for a tableaux of wavelet coefficients to be displayed in one graphics window, and the reconstruction of those coefficients to be displayed in another graphics window. Hence, two graphics windows, capable of plotting and mouse interaction (e.g. X11, windows or quartz) with the locator function, are required to be active. When the function starts up an initial random tableaux is displayed and its reconstruction. The next step is for the user to select coefficients on the tableaux. What happens next specifically depends on the arguments above. By default selecting a coefficient causes that coefficient scale and location to be identified, then a random sample is taken from a N(0,1) random variable and assigned to that coefficient. Hence, the tableaux is updated, the reconstruction with the new coefficient computed and both are plotted. If \code{type="wavelet"} is used then decimated wavelets are used, if \code{type="station"} then the time-ordered non-decimated wavelets are used. If \code{random=FALSE} then new values for the coefficients are either selected (by asking the user for input) if \code{read.value=TRUE} or the value of 1 is input. If \code{restart=TRUE} then the function merely displays the wavelet associated with the selected coefficient. Hence, this option is useful to demonstrate to people how wavelets from different points of the tableaux have different sizes, scales and locations. If the mouse locator function is exited (this can be a right-click in some windowing systems, or pressing ESCAPE) then the function asks whether the user wishes to continue. If not then the function returns the current tableux. Hence, this function can be useful for users to build their own tabeleaux. } \value{ The final tableaux. } \seealso{\code{\link{wd}}} \author{G P Nason} \keyword{hplot} \keyword{iplot} wavethresh/man/first.last.rd0000644000176200001440000001012414211622540015620 0ustar liggesusers\name{first.last} \alias{first.last} \title{Build a first/last database for wavelet transforms.} \description{ This function is not intended for user use, but is used by various functions involved in computing and displaying wavelet transforms. It basically constructs "bookeeping" vectors that \code{WaveThresh} uses for working out where coefficient vectors begin and end. } \usage{ first.last(LengthH, DataLength, type, bc="periodic", current.scale=0) } \arguments{ \item{LengthH}{Length of the filter used to produce a wavelet decomposition.} \item{DataLength}{Length of the data before transforming. This must be a power of 2, say \eqn{2^m}.} \item{type}{The type of wavelet transform. Can be "wavelet" or "periodic"} \item{bc}{This character string argument determines how the boundaries of the the function are to be handled. The permitted values are \code{periodic} or \code{symmetric}. } \item{current.scale}{Can handle a different initial scale, but usually left at the default} } \details{ Suppose you begin with \eqn{2^m=2048} coefficients. At the next level you would expect 1024 smoothed data coefficients, and 1024 wavelet coefficients, and if \code{bc="periodic"} this is indeed what happens. However, if \code{bc="symmetric"} you actually need more than 1024 (as the wavelets extend over the edges). The first last database keeps track of where all these "extras" appear and also where they are located in the packed vectors C and D of pyramidal coefficients within wavelet structures. For examples, given a \code{first.last.c row} of \deqn{-2 3 20}{-2 3 20} The actual coefficients would be \deqn{c_{-2}, c_{-1}, c_{0}, c_{1}, c_{2}, c_{3}}{c_{-2}, c_{-1}, c_{0}, c_{1}, c_{2}, c_{3}} In other words, there are 6 coefficients, starting at -2 and ending at 3, and the first of these (\eqn{c_{-2}}) appears at an offset of 20 from the beginning of the \code{$C} component vector of the wavelet structure. You can ``do'' \code{first.last} in your head for \code{periodic} boundary handling but for more general boundary treatments (e.g. \code{symmetric}) \code{first.last} is indispensable. } \value{ A first/last database structure, a list containing the following information: \item{first.last.c}{A (m+1)x3 matrix. The first column specifies the real index of the first coefficient of the smoothed data at a level, the 2nd column is the real index of the last coefficient, the last column specifies the offset of the first smoothed datum at that level. The offset is used by the C code to work out where the beginning of the sequence is within a packed vector of the pyramid structure. The first and 2nd columns can be used to work out how many numbers there are at a level. If \code{bc="periodic"} then the pyramid is a true power of 2 pyramid, that is it starts with a power of 2, and the next level is half of the previous. If \code{bc="symmetric"} then the pyramid is nearly exactly a power of 2, but not quite, see the Details section for why this is so. } \item{ntotal}{The total number of smoothed data/original data points.} \item{first.last.d}{A mx3 matrix. As for \code{first.last.c} but for the wavelet coefficients packed as the D component of a wavelet structure.} \item{ntotal.d}{The total number of wavelet coefficients.} } \references{Nason, G.P. and Silverman, B.W. (1994). The discrete wavelet transform in S.} \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wd}}, \code{\link{wr}}, \code{\link{wr.wd}}, \code{\link{accessC}}, \code{\link{accessD}}, \code{\link{filter.select}}. \code{\link{imwd}}. } \examples{ # #If you're twisted then you may just want to look at one of these. # first.last(length(filter.select(2)), 64) #$first.last.c: #First Last Offset #[1,] 0 0 126 #[2,] 0 1 124 #[3,] 0 3 120 #[4,] 0 7 112 #[5,] 0 15 96 #[6,] 0 31 64 #[7,] 0 63 0 # #$ntotal: #[1] 127 # #$first.last.d: #First Last Offset #[1,] 0 0 62 #[2,] 0 1 60 #[3,] 0 3 56 #[4,] 0 7 48 #[5,] 0 15 32 #[6,] 0 31 0 # #$ntotal.d: #[1] 63 # # } \keyword{manip} \author{G P Nason} wavethresh/man/MaNoVe.wst.rd0000644000176200001440000001157514211622540015503 0ustar liggesusers\name{MaNoVe.wst} \alias{MaNoVe.wst} \title{Make Node Vector (using Coifman-Wickerhauser best-basis type algorithm) on nondecimated wavelet transform object} \usage{ \method{MaNoVe}{wst}(wst, entropy=Shannon.entropy, verbose=FALSE, stopper=FALSE, alg="C", \dots) } \arguments{ \item{wst}{The wst object for which you wish to find the best basis for.} \item{entropy}{The function used for computing the entropy of a vector} \item{verbose}{Whether or not to print out informative messages} \item{stopper}{Whether the computations are temporarily stopped after each packet. This can be useful in conjunction with the \code{verbose} argument so as to see computations proceed one packet at a time.} \item{alg}{If "C" then fast compiled C code is used (in which case the \code{entropy} function is ignored and the C code uses an internal Shannon entropy. Otherwise, slower R code is used but an arbitrary \code{entropy} argument can be used} \item{\dots}{Other arguments} } \description{ This method chooses a "best-basis" using the Coifman-Wickerhauser (1992) algorithm applied to nondecimated wavelet transform, \code{\link{wst.object}}, objects. } \details{ Description says all } \value{ A wavelet node vector object, of class \code{nv}, a basis description. This can be fed into a basis inversion using, say, the function \code{\link{InvBasis}}. } \seealso{ \code{\link{InvBasis}}, \code{\link{MaNoVe}}, \code{\link{MaNoVe.wp}}, \code{\link{Shannon.entropy}}, \code{\link{wst.object}}, \code{\link{wst}} } \examples{ # # What follows is a simulated denoising example. We first create our # "true" underlying signal, v. Then we add some noise to it with a signal # to noise ratio of 6. Then we take the packet-ordered non-decimated wavelet # transform and then threshold that. # # Then, to illustrate this function, we compute a "best-basis" node vector # and use that to invert the packet-ordered NDWT using this basis. As a # comparison we also use the Average Basis method # (cf Coifman and Donoho, 1995). # # NOTE: It is IMPORTANT to note that this example DOES not necessarily # use an appropriate or good threshold or necessarily the right underlying # wavelet. I am trying to show the general idea and please do not "quote" this # example in literature saying that this is the way that WaveThresh (or # any of the associated authors whose methods it attempts to implement) # does it. Proper denoising requires a lot of care and thought. # # # Here we go.... # # Create an example vector (the Donoho and Johnstone heavisine function) # v <- DJ.EX()$heavi # # Add some noise with a SNR of 6 # vnoise <- v + rnorm(length(v), 0, sd=sqrt(var(v))/6) # # Take packet-ordered non-decimated wavelet transform (note default wavelet # used which might not be the best option for denoising performance). # vnwst <- wst(vnoise) # # Let's take a look at the wavelet coefficients of vnoise # \dontrun{plot(vnwst)} # # Wow! A huge number of coefficients, but mostly all noise. # # # Threshold the resultant NDWT object. # (Once again default arguments are used which are certainly not optimal). # vnwstT <- threshold(vnwst) # # Let's have a look at the thresholded wavelet coefficients # \dontrun{plot(vnwstT)} # # Ok, a lot of the coefficients have been removed as one would expect with # universal thresholding # # # Now select packets for a basis using a Coifman-Wickerhauser algorithm # vnnv <- MaNoVe(vnwstT) # # Let's have a look at which packets got selected # vnnv # Level : 9 Action is R (getpacket Index: 1 ) # Level : 8 Action is L (getpacket Index: 2 ) # Level : 7 Action is L (getpacket Index: 4 ) # Level : 6 Action is L (getpacket Index: 8 ) # Level : 5 Action is R (getpacket Index: 17 ) # Level : 4 Action is L (getpacket Index: 34 ) # Level : 3 Action is L (getpacket Index: 68 ) # Level : 2 Action is R (getpacket Index: 137 ) # Level : 1 Action is R (getpacket Index: 275 ) # There are 10 reconstruction steps # # So, its not the regular decimated wavelet transform! # # Let's invert the representation with respect to this basis defined by # vnnv # vnwrIB <- InvBasis(vnwstT, vnnv) # # And also, for completeness let's do an Average Basis reconstruction. # vnwrAB <- AvBasis(vnwstT) # # Let's look at the Integrated Squared Error in each case. # sum( (v - vnwrIB)^2) # [1] 386.2501 # sum( (v - vnwrAB)^2) # [1] 328.4520 # # So, for this limited example the average basis method does better. Of course, # for *your* simulation it could be the other way round. "Occasionally", the # inverse basis method does better. When does this happen? A good question. # # Let's plot the reconstructions and also the original # \dontrun{plot(vnwrIB, type="l")} \dontrun{lines(vnwrAB, lty=2)} \dontrun{lines(v, lty=3)} # # The dotted line is the original. Neither reconstruction picks up the # spikes in heavisine very well. The average basis method does track the # original signal more closely though. # } \author{G P Nason} \keyword{smooth} wavethresh/man/MaNoVe.wp.rd0000644000176200001440000000205714211622540015307 0ustar liggesusers\name{MaNoVe.wp} \alias{MaNoVe.wp} \title{Make Node Vector (using Coifman-Wickerhauser best-basis type algorithm) on wavelet packet object} \usage{ \method{MaNoVe}{wp}(wp, verbose=FALSE, \dots) } \arguments{ \item{wp}{The wp object for which you wish to find the best basis for.} \item{verbose}{Whether or not to print out informative messages} \item{\dots}{Other arguments} } \description{ This method chooses a "best-basis" using the Coifman-Wickerhauser (1992) algorithm applied to wavelet packet, \code{\link{wp.object}}, objects. } \details{ Description says all } \value{ A wavelet packet node vector object of class \code{nvwp}, a basis description. This can be fed into a basis inversion using, say, the function \code{\link{InvBasis}}. } \seealso{ \code{\link{InvBasis}}, \code{\link{MaNoVe}}, \code{\link{MaNoVe.wst}}, \code{\link{wp.object}}, \code{\link{wp}} } \examples{ # # See example of use of this function in the examples section # of the help of plot.wp # # A node vector vnv is created there that gets plotted. # } \author{G P Nason} \keyword{smooth} wavethresh/man/Chires5.rd0000644000176200001440000000235214211622540015035 0ustar liggesusers\name{Chires5} \alias{Chires5} \title{Subsid routine for denproj (calcs scaling function coefs without cov)} \usage{ Chires5(x, tau=1, J, filter.number=10, family="DaubLeAsymm", nT=20) } \arguments{ \item{x}{The data (random sample for density estimation)} \item{tau}{Fine tuning parameter} \item{J}{Resolution level} \item{filter.number}{The smoothness of the wavelet, see \code{\link{filter.select}}} \item{family}{The family of the wavelet, see \code{\link{family}}} \item{nT}{The number of iterations in the Daubechies-Lagarias algorithm} } \description{ A subsidiary routine for \code{\link{denproj}}. Not intended for direct user use. } \details{ As description } \value{ A list with the following components: \item{coef}{The scaling function coefficients} \item{klim}{The integer translates of the scaling functions used} \item{p}{The primary resolution, calculated in code as tau*2^J} \item{filter}{The usual filter information, see \code{\link{filter.select}}} \item{n}{The length of the data \code{x}} \item{res}{A list containing components: \code{p}, as above, \code{tau} as input and \code{J} as above. This summarizes the resolution information} } \seealso{\code{\link{Chires6}},\code{\link{denproj}}} \author{David Herrick} \keyword{smooth} wavethresh/man/basisplot.wp.rd0000644000176200001440000000347314211622540016165 0ustar liggesusers\name{basisplot.wp} \alias{basisplot.wp} \title{Function to graphically select a wavelet packet basis} \usage{ \method{basisplot}{wp}(x, draw.mode=FALSE, \dots) } \arguments{ \item{x}{The \code{\link{wp.object}} for which you wish to select a basis graphically for.} \item{draw.mode}{If TRUE then TWO graphics windows have to be open. Every time a packet is selected in the packet selection window, a representation of the wavelet packet basis function is drawn in the other window} \item{\dots}{Other arguments} } \description{ Note, one or two (depending on the state of \code{draw.mode}) graphics windows with mouse-clickable interfaces have to open to use this function. Graphically select a wavelet packet basis associated with a wavelet packet object. Left-click selects packets, right click exits the routine. } \details{ A wavelet packet basis described in WaveThresh using the node vector object (class from \code{\link{MaNoVe.wp}}) which for wavelet packets is \code{nvwp}. This function takes a \code{\link{wp.object}} object and graphically depicts all possible basis function locations. The user is then invited to click on different packets, these change colour. When finished, the user right clicks on the graphic and the selected basis is returned. \emph{Note that the routine does not check to see whether the basis is legal. You have to do this.} A legal basis can select packets from different levels, however you can't select packets that both cover the same packet index, however every packet index has to be covered. A better function \emph{would} check basis legality! } \value{ An object of class \code{nvwp} which contains the specification for the basis. } \seealso{\code{\link{addpkt}}, \code{\link{InvBasis}}, \code{\link{MaNoVe.wp}}, \code{\link{plotpkt}}, \code{\link{wp}}} \author{G P Nason} \keyword{hplot} wavethresh/man/imwd.object.rd0000644000176200001440000000713514211622540015744 0ustar liggesusers\name{imwd.object} \alias{imwd.object} \title{Two-dimensional wavelet decomposition objects.} \description{ These are objects of classes \code{imwd} They represent a decomposition of an image with respect to a two-dimensional wavelet basis (or tight frame in the case of the two-dimensional (space-ordered) non-decimated wavelet decomposition). } \details{ In previous releases the original image was stored as the "original" component of a imwd object. This is not done now as the resulting objects were excessively large. } \value{ The following components must be included in a legitimate `imwd' object. \item{nlevelsWT}{number of levels in wavelet decomposition. If you raise 2 to the power of nlevels then you get the dimension of the image that you originally started with. } \item{type}{If \code{type="wavelet"} then the image was decomposed according to the 2D Mallat pyramidal algorithm. If \code{type="station"} then the image was decomposed using the 2D spatially ordered non-decimated wavelet transform.} \item{fl.dbase}{The first last database associated with the decomposition. For images, this list is not very useful as each level's components is stored as a list component, rather than being packaged up in a single vector as in the 1D case. Nevertheless the internals still need to know about fl.dbase to get the computations correct. See the help for \code{\link{first.last}} if you are a masochist. } \item{filter}{A filter object as returned by the \code{\link{filter.select}} function. This component records the filter used in the decomposition. The reconstruction routines use this component to find out what filter to use in reconstruction. } \item{wNLx}{The object will probably contain many components with names of this form. These are all the wavelet coefficients of the decomposition. In "wNLx" the "N" refers to the level number and the "x" refers to the direction of the coefficients with "1" being horizontal, "2" being vertical and "3" being diagonal and "4" corresonding to scaling function coefficients at the given resolution level. Note that the levels should be in numerically decreasing order, so if nlevelsWT is 5, then there will be w5L1, w5L2, w5L3 first, then down to w1L1, w1L2, and w1L3. Note that these coefficients store their data according to the \code{\link{first.last}} database \code{fl.dbase$first.last.d}, so refer to them using this. Note that if \code{type="wavelet"} then images at level N are subimages of side length \code{2^N} pixels. If the type component is \code{"station"} then each coefficient subimage is of the same dimension as the input image used to create this object.} \item{w0Lconstant}{This is the coefficient of the bottom level scaling function coefficient. So for examples, if you used Haar wavelets this would be the sample mean of the data (scaled by some factor depending on the number of levels, nlevelsWT).} \item{bc}{This component details how the boundaries were treated in the decomposition.} } \section{GENERATION}{ This class of objects is returned from the \code{\link{imwd}} function to represent a two-dimensional (possibly space-ordered non-decimated) wavelet decomposition of a function. Many other functions return an object of class imwd. } \section{METHODS}{ The imwd class of objects has methods for the following generic functions: \code{\link{compress}}, \code{\link{draw}}, \code{\link{imwr}}, \code{\link{nullevels.imwd}}, \code{\link{plot}}, \code{\link{print}}, \code{\link{summary}}, \code{\link{threshold.imwd}}. } \section{RELEASE}{ Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{imwd}} } \keyword{classes} \keyword{smooth} \author{G P Nason} wavethresh/man/print.wp.rd0000644000176200001440000000267314211622540015322 0ustar liggesusers\name{print.wp} \alias{print.wp} \title{Print out information about an wd object in readable form. } \description{ This function prints out information about an \code{\link{wp.object}} in a nice human-readable form. Note that this function is automatically called by SPlus whenever the name of an \code{\link{wp.object}} is typed or whenever such an object is returned to the top level of the S interpreter } \usage{ \method{print}{wp}(x, ...) } \arguments{ \item{x}{An object of class \code{\link{wp}} that you wish to print out.} \item{\dots}{This argument actually does nothing in this function!} } \details{ Prints out information about \code{\link{wp}} objects in nice readable format. } \value{ The last thing this function does is call \code{\link{summary.wp}} so the return value is whatever is returned by this function.} \section{RELEASE}{Version 3.0 Copyright Guy Nason 1994 } \seealso{ \code{\link{wp.object}}, \code{\link{summary.wp}}. } \examples{ # # Generate an wp object. # tmp <- wp(rnorm(32)) # # Now get Splus to use print.wp # tmp # # Now get Splus to use print.wp # # tmp # Class 'wp' : Wavelet Packet Object: # ~~ : List with 4 components with names # wp nlevelsWT filter date # # $wp is the wavelet packet matrix # # Created on : Fri Oct 23 19:59:01 1998 # # summary(.): # ---------- # Levels: 5 # Length of original: 32 # Filter was: Daub cmpct on least asymm N=10 } \keyword{manip} \author{G P Nason} wavethresh/man/dencvwd.rd0000644000176200001440000000470614211622540015172 0ustar liggesusers\name{dencvwd} \alias{dencvwd} \title{ Calculate variances of wavlet coefficients of a p.d.f. } \usage{ dencvwd(hrproj, filter.number=hrproj$filter$filter.number, family=hrproj$filter$family, type="wavelet", bc="zero", firstk=hrproj$klim, RetFather=TRUE, verbose=FALSE) } \arguments{ \item{hrproj}{Output from \code{\link{denproj}} with \code{covar=T} argument.} \item{filter.number}{The filter number of the wavelet basis to be used. This argument should not be altered from the default, as it is tied to the \code{hrproj} argument} \item{family}{The family of wavelets to use. This argument should not be altered.} \item{type}{The type of decomposition to be performed. This argument should not be altered.} \item{bc}{The type of boundary conditions to be used. For density estimation this should always be zero.} \item{firstk}{The bounds on the translation index of the empirical scaling function coefficients.} \item{RetFather}{Ignore this.} \item{verbose}{If TRUE the function will be chatty. Note that comments are only availble for part of the algorithm, so might not be very enlightening.} } \description{ Calculates the variances of the empirical wavelet coefficients by performing a 2D wavelet decomposition on the covariance matrix of the empirical scaling function coefficients of the probability density function. } \details{ This function is basically \code{\link{imwd}} adapted to handle zero boundary conditions, except that only the variances are returned, i.e. the diagonals of the covariance matrices produced. Note that this code is not very efficient. The full covariance matrices of all levels of coefficients are calculated, and then the diagonals are extracted. } \value{ An object of class \code{\link{wd.object}}, but the contents are not a standard wavelet transform, ie the object is used to hold other information which organisationally is arranged like a wavelet tranform, ie variances of coefficients. } \seealso{\code{\link{denproj}},\code{\link{imwd}}} \examples{ # Simulate data from the claw density, find the # empirical scaling function coefficients and covariances and then decompose # both to give wavelet coefficients and their variances. data <- rclaw(100) datahr <- denproj(data, J=8, filter.number=2,family="DaubExPhase", covar=TRUE) data.wd <- denwd(datahr) \dontrun{plotdenwd(data.wd, top.level=(datahr$res$J-1))} datavar <- dencvwd(datahr) \dontrun{plotdenwd(datavar, top.level=(datahr$res$J-1))} } \author{David Herrick} \keyword{smooth} wavethresh/man/PsiJ.rd0000644000176200001440000001536214211622540014405 0ustar liggesusers\name{PsiJ} \alias{PsiJ} \title{Compute discrete autocorrelation wavelets.} \description{ This function computes discrete autocorrelation wavelets. The inner products of the discrete autocorrelation wavelets are computed by the routine \code{\link{ipndacw}}. } \usage{ PsiJ(J, filter.number = 10, family = "DaubLeAsymm", tol = 1e-100, OPLENGTH=10^7, verbose=FALSE) } \arguments{ \item{J}{Discrete autocorrelation wavelets will be computed for scales -1 up to scale J. This number should be a negative integer.} \item{filter.number}{The index of the wavelet used to compute the discrete autocorrelation wavelets.} \item{family}{The family of wavelet used to compute the discrete autocorrelation wavelets.} \item{tol}{In the brute force computation for Daubechies compactly supported wavelets many inner product computations are performed. This tolerance discounts any results which are smaller than \code{tol} which effectively defines how long the inner product/autocorrelation products are.} \item{OPLENGTH}{This integer variable defines some workspace of length OPLENGTH. The code uses this workspace. If the workspace is not long enough then the routine will stop and probably tell you what OPLENGTH should be set to.} \item{verbose}{If \code{TRUE} then informative error messages are printed.} } \details{ This function computes the discrete autocorrelation wavelets. It does not have any direct use for time-scale analysis (e.g. \code{\link{ewspec}}). However, it is useful to be able to numerically compute the discrete autocorrelation wavelets for arbitrary wavelets and scales as there are still unanswered theoretical questions concerning the wavelets. The method is a brute force -- a more elegant solution would probably be based on interpolatory schemes. \bold{Horizontal scale}. This routine returns only the values of the discrete autocorrelation wavelets and not their horiztonal positions. Each discrete autocorrelation wavelet is compactly supported with the support determined from the compactly supported wavelet that generates it. See the paper by Nason, von Sachs and Kroisandt which defines the horiztonal scale (but basically the finer scale discrete autocorrelation wavelets are interpolated versions of the coarser ones. When one goes from scale j to j-1 (negative j remember) an extra point is inserted between all of the old points and the discrete autocorrelation wavelet value is computed there. Thus as J tends to negative infinity the numerical approximation tends towards the continuous autocorrelation wavelet. This function stores any discrete autocorrelation wavelet sets that it computes. The storage mechanism is not as advanced as that for \code{\link{ipndacw}} and its subsidiary routines \code{\link{rmget}} and \code{\link{firstdot}} but helps a little bit. The \code{\link{Psiname}} function defines the naming convention for objects returned by this function. Sometimes it is useful to have the discrete autocorrelation wavelets stored in matrix form. The \code{\link{PsiJmat}} does this. Note: intermediate calculations are stored in a user-visible environment called \code{\link{WTEnv}}. Previous versions of wavethresh stored this in the user's default data space (\code{.GlobalEnv}) but wavethresh did not ask permission nor notify the user. You can make these objects persist if you wish. } \value{ A list containing -J components, numbered from 1 to -J. The [[j]]th component contains the discrete autocorrelation wavelet at scale j. } \references{Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \code{echnical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern.} \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{ewspec}}, \code{\link{ipndacw}}, \code{\link{PsiJmat}}, \code{\link{Psiname}}. } \examples{ # # Let us create the discrete autocorrelation wavelets for the Haar wavelet. # We shall create up to scale 4. # PsiJ(-4, filter.number=1, family="DaubExPhase") #Computing PsiJ #Returning precomputed version #Took 0.00999999 seconds #[[1]]: #[1] -0.5 1.0 -0.5 # #[[2]]: #[1] -0.25 -0.50 0.25 1.00 0.25 -0.50 -0.25 # #[[3]]: # [1] -0.125 -0.250 -0.375 -0.500 -0.125 0.250 0.625 1.000 0.625 0.250 #[11] -0.125 -0.500 -0.375 -0.250 -0.125 # #[[4]]: # [1] -0.0625 -0.1250 -0.1875 -0.2500 -0.3125 -0.3750 -0.4375 -0.5000 -0.3125 #[10] -0.1250 0.0625 0.2500 0.4375 0.6250 0.8125 1.0000 0.8125 0.6250 #[19] 0.4375 0.2500 0.0625 -0.1250 -0.3125 -0.5000 -0.4375 -0.3750 -0.3125 #[28] -0.2500 -0.1875 -0.1250 -0.0625 # # You can plot the fourth component to get an idea of what the # autocorrelation wavelet looks like. # # Note that the previous call stores the autocorrelation wavelet # in Psi.4.1.DaubExPhase. This is mainly so that it doesn't have to # be recomputed. # # Note that the x-coordinates in the following are approximate. # \dontrun{plot(seq(from=-1, to=1, length=length(Psi.4.1.DaubExPhase[[4]])), Psi.4.1.DaubExPhase[[4]], type="l", xlab = "t", ylab = "Haar Autocorrelation Wavelet")} # # # Now let us repeat the above for the Daubechies Least-Asymmetric wavelet # with 10 vanishing moments. # We shall create up to scale 6, a higher resolution version than last # time. # p6 <- PsiJ(-6, filter.number=10, family="DaubLeAsymm", OPLENGTH=5000) p6 ##[[1]]: # [1] 3.537571e-07 5.699601e-16 -7.512135e-06 -7.705013e-15 7.662378e-05 # [6] 5.637163e-14 -5.010016e-04 -2.419432e-13 2.368371e-03 9.976593e-13 #[11] -8.684028e-03 -1.945435e-12 2.605208e-02 6.245832e-12 -6.773542e-02 #[16] 4.704777e-12 1.693386e-01 2.011086e-10 -6.209080e-01 1.000000e+00 #[21] -6.209080e-01 2.011086e-10 1.693386e-01 4.704777e-12 -6.773542e-02 #[26] 6.245832e-12 2.605208e-02 -1.945435e-12 -8.684028e-03 9.976593e-13 #[31] 2.368371e-03 -2.419432e-13 -5.010016e-04 5.637163e-14 7.662378e-05 #[36] -7.705013e-15 -7.512135e-06 5.699601e-16 3.537571e-07 # #[[2]] # scale 2 etc. etc. # #[[3]] scale 3 etc. etc. # #scales [[4]] and [[5]]... # #[[6]] #... # remaining scale 6 elements... #... #[2371] -1.472225e-31 -1.176478e-31 -4.069848e-32 -2.932736e-41 6.855259e-33 #[2376] 5.540202e-33 2.286296e-33 1.164962e-42 -3.134088e-35 3.427783e-44 #[2381] -1.442993e-34 -2.480298e-44 5.325726e-35 9.346398e-45 -2.699644e-36 #[2386] -4.878634e-46 -4.489527e-36 -4.339365e-46 1.891864e-36 2.452556e-46 #[2391] -3.828924e-37 -4.268733e-47 4.161874e-38 3.157694e-48 -1.959885e-39 ## # Let's now plot the 6th component (6th scale, this is the finest # resolution, all the other scales will be coarser representations) # # # Note that the x-coordinates in the following are non-existant! # \dontrun{ts.plot(p6[[6]], xlab = "t", ylab = "Daubechies N=10 least-asymmetric Autocorrelation Wavelet")} } \keyword{manip} \author{G P Nason} wavethresh/man/plot.imwd.rd0000644000176200001440000000515014211622540015447 0ustar liggesusers\name{plot.imwd} \alias{plot.imwd} \alias{plot.imwdc} \title{Draw a picture of the 2D wavelet coefficients using image} \usage{ \method{plot}{imwd}(x, scaling = "by.level", co.type = "abs", package = "R", plot.type = "mallat", arrangement = c(3, 3), transform = FALSE, tfunction = sqrt, ...) \method{plot}{imwdc}(x, verbose=FALSE, ...) } \arguments{ \item{x}{The 2D imwd object you wish to depict} \item{scaling}{How coefficient scaling is performed. The options are \code{by.level} to scale the coefficients independently by level, anything else causes coefficients to be scaled globally} \item{co.type}{Can be \code{"abs"} for the absolute values of the coefficients to be plotted, can be \code{"mabs"} for the negative absolute values or \code{"none"} for none of this.} \item{package}{Can be \code{"R"} for the R package, or \code{"S"}. The latter does less interesting things and results in a simpler plot} \item{plot.type}{If this argument is \code{"mallat"} the coefficients at different scales and orientations are packed into one image and plotted, a format originating from Mallat's early papers on this. The other possibility is \code{"cols"} which plots each combination of scale and direction on a separate plot. This latter format is useful for examining coefficients, especially at the coarser scales.} \item{arrangement}{If \code{plot.type="cols"} then this argument specifies how many rows and columns there are in the plot array.} \item{transform}{If FALSE then the coefficients are plotted as they are (subject to the \code{co.type} argument above), if TRUE then the transform function supplied by \code{tfunction} is applied to the coefficients.} \item{tfunction}{If \code{transform=TRUE} then this function gets applied to transform the coefficients before plotting} \item{verbose}{Print out informative messages} \item{...}{Supply other arguments to the call to the \code{image} function. This is very useful to, e.g., can the colours, or other aspects of the image} } \description{ This function images 2D the absolute values discrete wavelet transform coefficients arising from a \code{\link{imwd.object}} object. } \details{ Description says all } \value{ If the \code{package="S"} argument is set then a matrix is returned containing the image that would have been plotted (and this only works if the \code{plot.type="mallat"} argument is set also. } \seealso{\code{\link{imwd}}, \code{\link{imwd.object}}, \code{\link{threshold.imwd}}} \examples{ data(lennon) lwd <- imwd(lennon) \dontrun{plot(lwd)} \dontrun{plot(lwd, col=grey(seq(from=0, to=1, length=100)), transform=TRUE)} } \author{G P Nason} \keyword{hplot} wavethresh/man/accessD.wd.rd0000644000176200001440000000627514211622540015521 0ustar liggesusers\name{accessD.wd} \alias{accessD.wd} \title{Get detail (mother wavelet) coefficients from wavelet object (wd).} \description{ This function extracts and returns a vector of mother wavelet coefficients, corresponding to a particular resolution level, from a \code{\link{wd}} wavelet decomposition object. The pyramid of coefficients in a wavelet decomposition (returned from the \code{\link{wd}} function, say) are packed into a single vector in WaveThresh. } \usage{ \method{accessD}{wd}(wd, level, boundary=FALSE, aspect="Identity", \dots) } \arguments{ \item{wd}{Wavelet decomposition object from which you wish to extract the mother wavelet coefficients.} \item{level}{The resolution level at which you wish to extract coefficients.} \item{boundary}{some methods of wavelet transform computation handle the boundaries by keeping some extra bookkeeping coefficients at either end of a resolution level. If this argument is TRUE then these bookkeeping coefficients are returned when the mother wavelets are returned. Otherwise, if FALSE, these coefficients are not returned.} \item{aspect}{The aspect argument permits the user to supply a function to modify the returned coefficients. The function is applied to the vector of coefficients before it is returned. This can be useful, say, with the complex DWT where you could supply aspect="Mod" if you wanted to return the modulus of the coefficients at a given resolution level. The default argument, "Identity", ensures that the coefficients are not modified before returning.} \item{\dots}{any other arguments} } \details{ The need for this function is a consequence of the pyramidal structure of Mallat's algorithm and the memory efficiency gain achieved by storing the pyramid as a linear vector. AccessD obtains information about where the smoothed data appears from the \code{fl.dbase} component of an \code{\link{wd}} object, in particular the array \code{fl.dbase$first.last.d} which gives a complete specification of index numbers and offsets for \code{wd.object$D}. Note that this function is a method for the generic function \code{\link{accessD}}. Note also that this function only retrieves information from \code{\link{wd}} class objects. To insert coefficients into \code{\link{wd}} objects you have to use the \code{\link{putD}} function (or more precisely, the \code{\link{putD.wd}} method). } \value{ A vector containing the mother wavelet coefficients at the required resolution level (the coefficients might have been modified depending on the value of the aspect argument). } \references{ Mallat, S. G. (1989) A theory for multiresolution signal decomposition: the wavelet representation. \emph{IEEE Transactions on Pattern Analysis and Machine Intelligence}. \bold{11}, 674--693. Nason, G. P. and Silverman, B. W. (1994). The discrete wavelet transform in S. \emph{Journal of Computational and Graphical Statistics}, \bold{3}, 163--191 } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wr}}, \code{\link{wd}}, \code{\link{accessD}}, \code{\link{filter.select}}, \code{\link{threshold}} } \examples{ # # Get the 4th resolution level of wavelet coefficients. # dat <- rnorm(128) accessD(wd(dat), level=4) } \keyword{manip} \author{G P Nason} wavethresh/man/bestm.rd0000644000176200001440000000405214211622540014644 0ustar liggesusers\name{bestm} \alias{bestm} \title{ Function called by makewpstRO to identify which packets are individually good for correlating with a response } \description{ This function is used when you have a huge number of packets where you want to identify which ones are, individually, candidates for the good prediction of a response } \usage{ bestm(w2mobj, y, percentage = 50) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{w2mobj}{ The w2m object that contains the packets you wish to preselect } \item{y}{ The response time series } \item{percentage}{ The percentage of the w2m packets that you wish to select } } \details{ This function naively addresses a very common problem. The object w2mobj contains a huge number of variables which might shed some light on the response object \code{y}. The problem is that the dimensionality of \code{w2mobj} is larger than that of the length of the series \code{y}. The solution here is to choose a large, but not huge, subset of the variables that might be potentially useful in correlating with \code{y}, discard the rest, and return the "best" or preselected variables. Then the dimensionality is reduced and more sophisticated methods can be used to perform better quality modelling of the response \code{y} on the packets in \code{w2mobj}. } \value{ A list of class w2m with the following components: \item{m}{A matrix containing the select packets (as columns), reordered so that the best packets come first} \item{ixvec}{A vector which indexes the best packets into the original supplied matrix} \item{pktix}{The original wavelet packet indices corresponding to each packet} \item{level}{As \code{pktix} but for the wavelet packet levels} \item{nlevelsWT}{The number of resolution levels in the original wavelet packet object} \item{cv}{The ordered correlations} } \author{ G P Nason } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{\code{\link{makewpstRO}}} % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{manip} wavethresh/man/wr.mwd.rd0000644000176200001440000000105414211622634014753 0ustar liggesusers\name{wr.mwd} \alias{wr.mwd} \title{Multiple wavelet reconstruction for mwd objects} \usage{ \method{wr}{mwd}(...) } \arguments{ \item{\dots}{Arguments to the \code{\link{mwr}} function.} } \description{ This function is method for the \code{\link{function}} to apply the inverse multiple wavelet transform for \code{\link{mwd.object}} objects. } \details{ The function is merely a wrapper for \code{\link{mwr}} } \value{ The same return value as for \code{\link{mwr}}. } \seealso{ \code{\link{mwd}}, \code{\link{mwr}}} \author{Tim Downie} \keyword{math} wavethresh/man/numtonv.rd0000644000176200001440000000734714211622540015252 0ustar liggesusers\name{numtonv} \alias{numtonv} \title{Convert an index number into a node vector object.} \description{ Convert an index number into a \code{node vector} object. } \usage{ numtonv(number, nlevels) } \arguments{ \item{number}{The index number of a particular basis within a wavelet object.} \item{nlevels}{The number of levels that the wavelet object has (can often be discovered using the \code{\link{nlevels}} function). } } \details{ A basis within a (e.g. non-decimated) wavelet object (such as a \code{\link{wst.object}}) is represented in WaveThresh by a \code{nv} or node vector. A packet-ordered non-decimated wavelet transform object \code{\link{wst}} for short) which is the transform of a vector of length \code{n} contains \code{n} bases. Each basis can be indexed from 0 to \code{(n-1)} . A \code{\link{wst.object}} is simply a fully populated binary tree. There are nlevels levels in the tree with a split at each level. The root of the tree is at level 0, there are two branches at level 1, four at level 2, eight at level 3 and so on. A path through the tree can be constructed by starting at the root and choosing "left" or "right" at each possible branch. For certain data situations this path is constructed using minimum entropy algorithms (for examples \code{\link{MaNoVe}}). This function (numtonv takes the numerical representation of a path and converts it into a \code{node.vector} form suitable for passing to \code{\link{InvBasis}} to invert the representation according to a basis specicified by number. The least significant digit in number corresponds to deciding on the left/right decision at the fine leaves of the tree (high-frequency structure) and the most significant digit in number corresponds to deciding on the left/right decision at the root. Therefore gradually incrementing number from 0 to \code{2^{nlevels}-1} steps through all possible bases in the \code{\link{wst}} object ranging from all decisions being made "left" to all decisions being made "right". The "number" dividied by \code{2^{nlevels}} corresponds exactly to the binary number epsilon in Nason and Silverman (1995). } \value{ An object of class \code{nv} (node vector). This contains information about a path through a wavelet object (a basis in a wavelet object). } \section{RELEASE}{Version 3.6.0 Copyright Guy Nason 1995 } \seealso{ \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{MaNoVe}}, \code{\link{nv.object}}, \code{\link{InvBasis}}, \code{\link{nlevels}}. } \examples{ # # Generate some test data # test.data <- example.1()$y # # Make it noisy # ynoise <- test.data + rnorm(512, sd=0.1) # # Do packet ordered non-decimated wavelet transform # ynwst <- wst(ynoise) # # Now threshold the coefficients # ynwstT <- threshold(ynwst) # # Select basis number 9 (why not?) # NodeVector9 <- numtonv(9, nlevelsWT(ynwstT)) # # Let's print it out to see what it looks like # (nb, if you're repeating this examples, the basis might be different # as you may have generated different pseudo random noise to me) # NodeVector9 # Level : 8 Action is R (getpacket Index: 1 ) # Level : 7 Action is L (getpacket Index: 2 ) # Level : 6 Action is L (getpacket Index: 4 ) # Level : 5 Action is R (getpacket Index: 9 ) # Level : 4 Action is L (getpacket Index: 18 ) # Level : 3 Action is L (getpacket Index: 36 ) # Level : 2 Action is L (getpacket Index: 72 ) # Level : 1 Action is L (getpacket Index: 144 ) # Level : 0 Action is L (getpacket Index: 288 ) # There are 9 reconstruction steps # # The print-out describes the tree through ynwstT that corresponds to # basis 9. # # The NodeVector9 and ynwstT objects could now be supplied to # InvBasis.wst for inverting ynwstT according # to the NodeVector9 or basis number 9. } \keyword{algebra} \author{G P Nason} wavethresh/man/plot.mwd.rd0000644000176200001440000001267514211622540015310 0ustar liggesusers\name{plot.mwd} \alias{plot.mwd} \title{Use plot on an mwd object. } \description{ Plots the wavelet coefficients of a \code{\link{mwd}} class object. } \usage{ \method{plot}{mwd}(x, first.level = 1, main = "Wavelet Decomposition Coefficients", scaling = "compensated", rhlab = FALSE, sub = x$filter$name, NotPlotVal = 0.05, xlab = "Translate", ylab = "Resolution level", return.scale = TRUE, colour = (2:(npsi + 1)), \dots) } \arguments{ \item{x}{The \code{\link{mwd}} object whose coefficients you wish to plot.} \item{first.level}{The first resolution level to begin plotting at. This argument can be quite useful when you want to supress some of the coarser levels in the diagram.} \item{main}{The main title of the plot.} \item{scaling}{How you want the coefficients to be scaled. The options are: "\code{global}" - one scale factor is chosen for the whole plot. The scale factor depends on the coefficient to be included on the plot that has the largest absolute value. The global option is useful when comparing coefficients that might appear anywhere in the plot; "\code{by.level}" - a scale factor is chosen for each resolution level in the plot. The scale factor for a level depends on the coefficient in that level that has the largest absolute value. The "\code{by.level}" option is useful when you wish to compare coefficients within a resolution level. The other option is "\code{compensated}" which is the same as "\code{global}" except for that finer scales' coefficients are scaled up by a factor of SQRT(2) for \code{compensated}. This latter options is sometimes useful. } \item{rhlab}{If \code{T} then a set of labels is produced on the right hand axis. The axis labels in this case refer to the scale factors used to scale each level and correspond to value of the largest coefficient (in absolute value) in each scale (when \code{scaling=="by.level"}) or absolutely (when \code{scaling="global"}). If the \code{rhlab} argument is \code{FALSE} then no right hand axis labels are produced.} \item{sub}{A subtitle for the plot.} \item{NotPlotVal}{Doesn't seem to be implemented.} \item{xlab}{A title for the x-axis} \item{ylab}{A title for the y-axis} \item{return.scale}{If true (default) the scale for each resolution level is returned} \item{colour}{A vector of length \code{mwd$npsi}, the values of which are the colours used to plot the coefficients, one for each distinct type of wavelet (with apologies to our American cousins for spelling colour correctly!)} \item{\dots}{other arguments to be supplied to plot.} } \details{ Produces a plot similar to the ones in Donoho and Johnstone, 1994. Wavelet coefficients for each resolution level are plotted one above the other, with the high resolution coefficients at the bottom, and the low resolution at the top. Each vector is represented by \code{mwd$npsi} lines one for each element in the coefficient vector. If colour is supported by the device each element will be represented by a different coulour. The coefficients are plotted using the \code{segment} function, with a large positive coefficient being plotted above an imaginary horizontal centre line, and a large negative coefficient plotted below it. The position of a coefficient along a line is indicative of the wavelet basis function's translate number. The resolution levels are labelled on the left-hand side axis, and the maximum values of the absolute values of the coefficients for the particular level form the right-hand side axis. The levels of coefficients can be scaled in three ways. If you are not interested in comparing the relative scales of coefficients from different levels, then the default scaling option, "\code{by.level}" is what you need. This computes the maximum of the absolute value of the coefficients at a particular level and scales the so that the fit nicely onto the plot. For this option, each level is scaled \bold{DIFFERENTLY}. To obtain a uniform scale for all the levels specify the "\code{global}" option to the \code{scaling} argument. This will allow you to make inter-level comparisons. } \value{ Axis labels for each resolution level unless \code{return.scale=F} when \code{NULL} is returned. The axis values are the maximum of the absolute value of the coefficients at that resolution level. They are returned because they are sometimes hard to read on the plot. } \note{A plot of the coefficients contained within the \code{\link{mwd}} object at each resolution level is produced.} \section{RELEASE}{ Version 3.9.6 (Although Copyright Tim Downie 1995-6). } \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd}}, \code{\link{mwd.object}}, \code{\link{mwr}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ # # Generate some test data # test.data <- example.1()$y \dontrun{ts.plot(test.data)} # # Decompose test.data with multiple wavelet transform and # plot the wavelet coefficients # tdmwd <- mwd(test.data) \dontrun{plot(tdmwd)} #[1] 1.851894 1.851894 1.851894 1.851894 1.851894 1.851894 1.851894 # # You should see a plot with wavelet coefficients like in # plot.wd but at each coefficient position # there are two coefficients in two different colours one for each of # the wavelets at that position. # # Note the scale for each level is returned by the function. } \keyword{hplot} \author{G P Nason} wavethresh/man/summary.wst2D.rd0000644000176200001440000000142314211622634016234 0ustar liggesusers\name{summary.wst2D} \alias{summary.wst2D} \title{Print out some basic information associated with a wst2D object} \usage{ \method{summary}{wst2D}(object, \dots) } \arguments{ \item{object}{The object to print a summary about} \item{\dots}{Other arguments} } \description{ Prints out the number of levels, the dimensions of the original image from which the object came, the type of wavelet filter associated with the decomposition, and the date of production. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{wst2D}}} \examples{ m <- matrix(rnorm(32*32), nrow=32) mwst2D <- wst2D(m) summary(mwst2D) #Levels: 5 #Length of original: 32 x 32 #Filter was: Daub cmpct on least asymm N=10 #Date: Mon Mar 8 21:57:55 2010 } \author{G P Nason} \keyword{print} wavethresh/man/CWavDE.rd0000644000176200001440000000425614211622540014611 0ustar liggesusers\name{CWavDE} \alias{CWavDE} \title{Simple wavelet density estimator with hard thresholding} \usage{ CWavDE(x, Jmax, threshold=0, nout=100, primary.resolution=1, filter.number=10, family="DaubLeAsymm", verbose=0, SF=NULL, WV=NULL) } \arguments{ \item{x}{Vector of real numbers. This is the data for which you want a density estimate for} \item{Jmax}{The maximum resolution of wavelets} \item{threshold}{The hard threshold value for the wavelet coefficients} \item{nout}{The number of ordinates in the density estimate} \item{primary.resolution}{The usual wavelet density estimator primary resolution} \item{filter.number}{The wavelet filter number, see \code{\link{filter.select}}} \item{family}{The wavelet family, see \code{\link{filter.select}}} \item{verbose}{The level of reporting performed by the function, legit values are 0, 1 or 2, with 2 being more reports} \item{SF}{Scaling function values in format as returned by \code{\link{draw.default}}} \item{WV}{Wavelet function values in format as returned by \code{\link{draw.default}}} } \description{ This function implements the density estimator with hard thresholding described by Hall, P. and Patil, P. (1995) Formulae for mean integrated squared error of nonlinear wavelet-based density estimators, \emph{Ann. Statist.}, \bold{23}, 905-928. } \details{ As the description. } \value{ A list containing the following components: \item{x}{A vector of length \code{nout} that covers the range of the input data \code{x}, plus some more depending on the support of the wavelet and the primary resolution.} \item{y}{A vector of length \code{nout} that contains the output wavelet density estimate} \item{sfix}{The integer values of the translates of the scaling functions used in the estimate} \item{wvixmin}{As for sfix, but a vector of length \code{Jmax} which contains the minimum integer wavelet translates} \item{wvixmax}{As for wvixmin, but with the maxima} } \examples{ # # Let's generate a bi-modal artificial set of data. # x <- c( rnorm(100), rnorm(100, 10)) # # Now perform simple wavelet density estimate # wde <- CWavDE(x, Jmax=10, threshold=1) # # Plot results # \dontrun{plot(wde$x, wde$y, type="l")} } \author{G P Nason} \keyword{smooth} wavethresh/man/MaNoVe.rd0000644000176200001440000000140414211622540014655 0ustar liggesusers\name{MaNoVe} \alias{MaNoVe} \title{Make Node Vector (using Coifman-Wickerhauser best-basis type algorithm)} \usage{ MaNoVe(\dots) } \arguments{ \item{\dots}{Methods may have other arguments} } \description{ This generic function chooses a ``best-basis'' using the Coifman-Wickerhauser (1992) algorithm. This function is generic. Particular methods exist: \code{\link{MaNoVe.wp}} and \code{\link{MaNoVe.wst}}. } \details{ Description says all. } \value{ A node vector, which describes a particular basis specification relevant to the kind of object that the function was applied to. } \seealso{ \code{\link{MaNoVe.wp}}, \code{\link{MaNoVe.wst}}, \code{\link{wp.object}}, \code{\link{wst.object}}, \code{\link{wp}}, \code{\link{wst}}} \author{G P Nason} \keyword{smooth} wavethresh/man/irregwd.objects.rd0000644000176200001440000000360414211622540016627 0ustar liggesusers\name{irregwd.objects} \alias{irregwd.objects} \title{Irregular wavelet decomposition objects.} \description{ These are objects of classes \code{wd} They represent a decomposition of a function with respect to a wavelet basis. The function will have been interpolated to a grid and these objects represent the discrete wavelet transform \code{\link{wd}}. } \section{GENERATION}{ This class of objects is returned from the \code{\link{irregwd}} function. Some other functions that process these kinds of objects also return this class of object (such as \code{\link{threshold.irregwd}}.) } \section{METHODS}{ The \code{irregwd} class of objects has methods for the following generic functions: \code{\link{plot}}, \code{\link{threshold}}, } \section{STRUCTURE}{ All components in a legitimate `irregwd' are identical to the components in an ordinary \code{\link{wd.object}} with the exception of \code{type} component and with the addition of the following component: \describe{ \item{c}{vector that aids in the calculation of variances of wavelet coefficients (used by \code{\link{threshold.irregwd}}).} } } \details{ To retain your sanity the C and D coefficients should be extracted by the \code{\link{accessC}} and \code{\link{accessD}} functions and inserted using the \code{\link{putC}} and \code{\link{putD}} functions (or more likely, their methods), rather than by the \code{$} operator. One can use the \code{\link{accessc}} function to obtain the \code{c} component. Mind you, if you want to muck about with coefficients directly, then you'll have to do it yourself by working out what the fl.dbase list means (see \code{\link{first.last}} for a description.) } \section{RELEASE}{ Version 3.9.4 Copyright Arne Kovac 1997, Help Copyright Guy Nason 2004 } \seealso{ \code{\link{irregwd}}, \code{\link{threshold.irregwd}}, \code{\link{plot.irregwd}},\code{\link{wd}} } \keyword{smooth} \author{G P Nason} wavethresh/man/wpst.rd0000644000176200001440000000401314211622634014530 0ustar liggesusers\name{wpst} \alias{wpst} \title{Non-decimated wavelet packet transform. } \description{ This function computes the non-decimated wavelet packet transform as described by Nason, Sapatinas and Sawczenko, 1998. The non-decimated wavelet packet transform (NWPT) contains all possible shifted versions of the wavelet packet transform. } \usage{ wpst(data, filter.number=10, family="DaubLeAsymm", FinishLevel) } \arguments{ \item{data}{A vector containing the data you wish to decompose. The length of this vector must be a power of 2.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{FinishLevel}{At which level to stop decomposing. The full decomposition decomposes to level 0, but you could stop earlier.} } \details{ This function computes the packet-ordered non-decimated wavelet packet transform of data as described by Nason, Sapatinas and Sawczenko, 1998. It assumes periodic boundary conditions. The order of computation of the NWPT is \eqn{O(n^2)} if n is the number of input data points. Packets can be extracted from the \code{wpst.object} produced by this function using the \code{\link{getpacket.wpst}} function. Whole resolution levels of non-decimated wavelet packet coefficients in time order can be obtained by using the \code{\link{accessD.wpst}} function. } \value{ An object of class \code{\link{wpst}} containing the discrete packet-ordered non-decimated wavelet packet coefficients. } \section{RELEASE}{Version 3.8.8 Copyright Guy Nason 1997 } \seealso{ \code{\link{accessD}}, \code{\link{accessD.wpst}}, \code{\link{filter.select}}, \code{\link{getpacket}}, \code{\link{getpacket.wpst}}, \code{\link{makewpstDO}} } \examples{ v <- rnorm(128) vwpst <- wpst(v) } \keyword{math} \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/threshold.imwd.rd0000644000176200001440000002231114211622634016467 0ustar liggesusers\name{threshold.imwd} \alias{threshold.imwd} \title{Threshold two-dimensional wavelet decomposition object} \description{ This function provides various ways to threshold a \code{\link{imwd}} class object. } \usage{ \method{threshold}{imwd}(imwd, levels = 3:(nlevelsWT(imwd) - 1), type = "hard", policy = "universal", by.level = FALSE, value = 0, dev = var, verbose = FALSE, return.threshold = FALSE, compression = TRUE, Q = 0.05, \dots) } \arguments{ \item{imwd}{The two-dimensional wavelet decomposition object that you wish to threshold.} \item{levels}{a vector of integers which determines which scale levels are thresholded in the decomposition. Each integer in the vector must refer to a valid level in the \code{\link{imwd}} object supplied. This is usually any integer from 0 to \code{\link{nlevelsWT}}(wd)-1 inclusive. Only the levels in this vector contribute to the computation of the threshold and its application. (except for the \code{fdr} policy). } \item{type}{determines the type of thresholding this can be "\code{hard}" or "\code{soft}".} \item{policy}{selects the technique by which the threshold value is selected. Each policy corresponds to a method in the literature. At present the different policies are: "\code{universal}", "\code{manual}", "\code{fdr}", "\code{probability}". The policies are described in detail below.} \item{by.level}{If FALSE then a global threshold is computed on and applied to all scale levels defined in levels. If TRUE a threshold is computed and applied separately to each scale level.} \item{value}{This argument conveys the user supplied threshold. If the \code{policy="manual"} then value is the actual threshold value; if \code{policy="probability"} then \code{value} conveys the the user supplied quantile level.} \item{dev}{this argument supplies the function to be used to compute the spread of the absolute values coefficients. The function supplied must return a value of spread on the variance scale (i.e. not standard deviation) such as the \code{var()} function. A popular, useful and robust alternative is the \code{\link{madmad}} function.} \item{verbose}{if TRUE then the function prints out informative messages as it progresses.} \item{return.threshold}{If this option is TRUE then the actual \emph{value} of the threshold is returned. If this option is FALSE then a thresholded version of the input is returned.} \item{compression}{If this option is TRUE then this function returns a comressed two-dimensional wavelet transform object of class \code{imwdc}. This can be useful as the resulting object will be smaller than if it was not compressed. The compression makes use of the fact that many coefficients in a thresholded object will be exactly zero. If this option is FALSE then a larger \code{\link{imwd}} object will be returned.} \item{Q}{Parameter for the false discovery rate \code{"fdr"} policy. } \item{\dots}{any other arguments} } \details{ This function thresholds or shrinks wavelet coefficients stored in a \code{\link{imwd}} object and by default returns the coefficients in a modified \code{imwdc} object. See the seminal papers by Donoho and Johnstone for explanations about thresholding. For a gentle introduction to wavelet thresholding (or shrinkage as it is sometimes called) see Nason and Silverman, 1994. For more details on each technique see the descriptions of each method below The basic idea of thresholding is very simple. In a signal plus noise model the wavelet transform of an image is very sparse, the wavelet transform of noise is not (in particular, if the noise is iid Gaussian then so if the noise contained in the wavelet coefficients). Thus, since the image gets concentrated in few wavelet coefficients and the noise remains "spread" out it is "easy" to separate the signal from noise by keeping large coefficients (which correspond to true image) and delete the small ones (which correspond to noise). However, one has to have some idea of the noise level (computed using the dev option in threshold functions). If the noise level is very large then it is possible, as usual, that no image coefficients "stick up" above the noise. There are many components to a successful thresholding procedure. Some components have a larger effect than others but the effect is not the same in all practical data situations. Here we give some rough practical guidance, although \emph{you must refer to the papers below when using a particular technique}. \bold{You cannot expect to get excellent performance on all signals unless you fully understand the rationale and limitations of each method below}. I am not in favour of the "black-box" approach. The thresholding functions of WaveThresh3 are not a black box: experience and judgement are required! Some issues to watch for: \describe{ \item{levels}{The default of \code{levels = 3:(wd$nlevelsWT - 1)} for the \code{levels} option most certainly does not work globally for all data problems and situations. The level at which thresholding begins (i.e. the given threshold and finer scale wavelets) is called the \emph{primary resolution} and is unique to a particular problem. In some ways choice of the primary resolution is very similar to choosing the bandwidth in kernel regression albeit on a logarithmic scale. See Hall and Patil, (1995) and Hall and Nason (1997) for more information. For each data problem you need to work out which is the best primary resolution. This can be done by gaining experience at what works best, or using prior knowledge. It is possible to "automatically" choose a "best" primary resolution using cross-validation (but not in WaveThresh). Secondly the levels argument computes and applies the threshold at the levels specified in the \code{levels} argument. It does this for all the levels specified. Sometimes, in wavelet shrinkage, the threshold is computed using only the finest scale coefficients (or more precisely the estimate of the overall noise level). If you want your threshold variance estimate only to use the finest scale coefficients (e.g. with universal thresholding) then you will have to apply the \code{threshold.imwd} function twice. Once (with levels set equal to \code{\link{nlevelsWT}}(wd)-1) and with \code{return.threshold=TRUE} to return the threshold computed on the finest scale and then apply the threshold function with the \code{manual} option supplying the value of the previously computed threshold as the \code{value} options. } Note that the fdr policy does its own thing. \item{by.level}{for a \code{\link{wd}} object which has come from data with noise that is correlated then you should have a threshold computed for each resolution level. See the paper by Johnstone and Silverman, 1997.} } } \value{ An object of class \code{imwdc} if the \code{compression} option above is TRUE, otherwise a \code{\link{imwd}} object is returned. In either case the returned object contains the thresholded coefficients. Note that if the \code{return.threshold} option is set to TRUE then the threshold values will be returned rather than the thresholded object. } \references{The FDR code segments were kindly donated by Felix Abramovich. } \note{ This section gives a brief description of the different thresholding policies available. For further details see the \emph{associated papers}. If there is no paper available then a small description is provided here. More than one policy may be good for problem, so experiment! They are arranged here in alphabetical order: \describe{ \item{fdr}{See Abramovich and Benjamini, 1996. Contributed by Felix Abramovich.} \item{manual}{specify a user supplied threshold using \code{value} to pass the value of the threshold. The \code{value} argument should be a vector. If it is of length 1 then it is replicated to be the same length as the \code{levels} vector, otherwise it is repeated as many times as is necessary to be the \code{levels} vector's length. In this way, different thresholds can be supplied for different levels. Note that the \code{by.level} option has no effect with this policy.} \item{probability}{The \code{probability} policy works as follows. All coefficients that are smaller than the valueth quantile of the coefficients are set to zero. If \code{by.level} is false, then the quantile is computed for all coefficients in the levels specified by the "levels" vector; if \code{by.level} is true, then each level's quantile is estimated separately. The probability policy is pretty stupid - do not use it.} \item{universal}{See Donoho and Johnstone, 1995.} } } \section{RELEASE}{Version 3.6 Copyright Guy Nason and others 1997 } \seealso{ \code{\link{imwd}}, \code{\link{imwd.object}}, \code{\link{imwdc.object}}. \code{\link{threshold}}. } \examples{ # # Let's use the lennon test image # data(lennon) \dontrun{image(lennon)} # # Now let's do the 2D discrete wavelet transform # lwd <- imwd(lennon) # # Let's look at the coefficients # \dontrun{plot(lwd)} # # Now let's threshold the coefficients # lwdT <- threshold(lwd) # # And let's plot those the thresholded coefficients # \dontrun{plot(lwdT)} # # Note that the only remaining coefficients are down in the bottom # left hand corner of the plot. All the others (black) have been set # to zero (i.e. thresholded). } \keyword{nonlinear} \keyword{smooth} \author{G P Nason} wavethresh/man/example.1.rd0000644000176200001440000000266714211622540015336 0ustar liggesusers\name{example.1} \alias{example.1} \title{Compute and return piecewise polynomial coordinates. } \usage{ example.1() } \arguments{ None } \description{ This function computes and returns the coordinates of the piecewise polynomial described by Nason and Silverman, 1994. This function is a useful test function for evaluating wavelet shrinkage methodology as it contains smooth parts, a discontinuity and it is periodic. (Nason, G.P. and Silverman, B.W. (1994) The discrete wavelet transform in S, \emph{J. Comput. Graph. Statist.}, \bold{3}, 163--191.) } \details{ This function computes and returns the x and y coordinates of the piecewise polynomial function described in Nason and Silverman, 1994. The formula for the piecewise polynomial (which is piecewise cubic) is given in Nason and Silverman, 1994. The piecewise polynomial returned is a discrete sample on 512 equally spaced points between 0 and 1 (including 0 but excluding 1). The Donoho and Johnstone test functions can be generated using the \code{\link{DJ.EX}} function. } \value{ A list with two components: \item{x}{a vector of length 512 containing the ordered x ordinates of the piecewise polynomial.} \item{y}{a vector of length 512 containing the corresponding y ordinates of the piecewise polynomial.} } \seealso{\code{\link{DJ.EX}}} \examples{ # # Generate the piecewise polynomial # test.data <- example.1()$y \dontrun{ts.plot(test.data)} } \author{G P Nason} \keyword{nonparametric} wavethresh/man/print.w2d.rd0000644000176200001440000000073314211622540015363 0ustar liggesusers\name{print.w2d} \alias{print.w2d} \title{Print method for printing w2d class objects} \usage{ \method{print}{w2d}(x, \dots) } \arguments{ \item{x}{The w2d class object that you wish to print info about} \item{\dots}{Other arguments} } \description{ Prints information about a w2d class object. These objects are not typically directly used by a user. } \details{ Description says all } \seealso{\code{\link{wpst2discr}}} \value{ Nothing } \author{G P Nason} \keyword{print} wavethresh/man/putD.wd3D.rd0000644000176200001440000000447514211622634015263 0ustar liggesusers\name{putD.wd3D} \alias{putD.wd3D} \title{Put wavelet coefficient array into a 3D wavelet object} \description{ This function put an array of wavelet coefficients, corresponding to a particular resolution level into a \code{\link{wd}} wavelet decomposition object. The pyramid of coefficients in a wavelet decomposition (returned from the \code{\link{wd3D}} function, say) are packed into a single array in \code{WaveThresh3}. } \usage{ \method{putD}{wd3D}(x, v, \dots) } \arguments{ \item{x}{3D Wavelet decomposition object into which you wish to insert the wavelet coefficients.} \item{v}{This argument is a list with the following components: \describe{ \item{a}{A 3-dimensional array with each dimension of length equal to two to the power of lev which is the level at which you wish to insert the coefficients into x.} \item{lev}{The level at which you wish to insert the coefficients into \code{x}.} \item{block}{A character string indicating which coefficient block you wish to insert the coefficients into. This can be one of GGG, GGH, GHG, GHH, HGG, HGH, HHG. Additionally this can be HHH when the lev argument above is zero.}}} \item{\dots}{Other arguments} } \details{ The need for this function is a consequence of the pyramidal structure of Mallat's algorithm and the memory efficiency gain achieved by storing the pyramid as an array. Note that this function is a method for the generic function \code{\link{putD}}. } \value{ A new \code{\link{wd3D.object}} is returned with the coefficients at level \code{lev} in block given by block are replaced by the contents of \code{a}, if \code{a} is of the correct dimensions! } \section{RELEASE}{Version 3.9.6 Copyright Guy Nason 1997 } \seealso{ \code{\link{accessD}}, \code{\link{accessD.wd3D}}, \code{\link{print.wd3D}}, \code{\link{putD}}, \code{\link{putDwd3Dcheck}}, \code{\link{summary.wd3D}}, \code{\link{threshold.wd3D}}, \code{\link{wd3D}}, \code{\link{wd3D.object}}, \code{\link{wr3D}}. } \examples{ # # Generate some test data # a <- array(rnorm(8*8*8), dim=c(8,8,8)) # # Perform the 3D DWT # awd3D <- wd3D(a) # # Replace the second level coefficients by uniform random variables # in block GGG (for some reason) # # newsubarray <- list(a = array(runif(4*4*4), dim=c(4,4,4)), lev=2, block="GGG") awd3D <- putD(awd3D, v=newsubarray) } \keyword{manip} \author{G P Nason} wavethresh/man/print.wd3D.rd0000644000176200001440000000325514211622540015472 0ustar liggesusers\name{print.wd3D} \alias{print.wd3D} \title{Print out information about an wd3D object in a readable form.} \description{ This function prints out information about an \code{\link{wd3D.object}} in a readable form. Note that this function is automatically called by SPlus whenever the name of an \code{\link{wd3D.object}} is typed or whenever such an object is returned to the top level of the S interpreter } \usage{ \method{print}{wd3D}(x, ...) } \arguments{ \item{x}{An object of class \code{\link{wd3D}} that you wish to print out.} \item{\dots}{This argument actually does nothing in this function!} } \details{ Prints out information about \code{\link{wd3D}} objects in nice readable format. } \value{ The last thing this function does is call \code{\link{summary.wd3D}} so the return value is whatever is returned by this function.} \section{RELEASE}{Version 3.9.6 Copyright Guy Nason 1997 } \seealso{ \code{\link{accessD.wd3D}}, \code{\link{print.wd3D}}, \code{\link{putD.wd3D}}, \code{\link{putDwd3Dcheck}}, \code{\link{summary.wd3D}}, \code{\link{threshold.wd3D}}, \code{\link{wd3D}}, \code{\link{wd3D.object}}, \code{\link{wr3D}}. } \examples{ # # Generate an wd3D object. # tmp <- wd3D(array(rnorm(512), dim=c(8,8,8))) # # Now get R to use print.wd # tmp #Class 'wd3d' : 3D DWT Object: # ~~~~ : List with 5 components with names # a filter.number family date nlevelsWT # #$ a is the wavelet coefficient array #Dimension of a is [1] 8 8 8 # #Created on : Wed Oct 20 17:24:15 BST 1999 # #summary(.): #---------- #Levels: 3 #Filter number was: 10 #Filter family was: DaubLeAsymm #Date: Wed Oct 20 17:24:15 BST 1999 } \keyword{manip} \author{G P Nason} wavethresh/man/uncompress.imwdc.rd0000644000176200001440000000304614211622634017040 0ustar liggesusers\name{uncompress.imwdc} \alias{uncompress.imwdc} \title{Uncompress an imwdc class object} \usage{ \method{uncompress}{imwdc}(x, verbose=FALSE, \dots) } \arguments{ \item{x}{The object to uncompress} \item{verbose}{If TRUE then print out messages} \item{\dots}{Other arguments} } \description{ An \code{\link{imwdc.object}} is a run-length encoded object, essentially has all zeroes removed and only non-zero elements stored. This function undoes the compression. } \details{ Description says all, inverse of \code{\link{compress.imwd}} function. } \value{ The uncompressed \code{\link{imwd.object}}. } \seealso{\code{\link{imwd}}, \code{\link{compress.imwd}}} \examples{ data(lennon) # # Do 2D wavelet transform on lennon image # lwd <- imwd(lennon) # # Do threshold the wavelet coefficients, a lot of zeroes are present # lmdT <- threshold(lwd) # # What is the class of the thresholded object? # class(lmdT) #[1] "imwdc" # # note that the coefficients are stored efficiently in the imwdc class object # uncompress(lmdT) #Class 'imwd' : Discrete Image Wavelet Transform Object: #~~~~ : List with 30 components with names #nlevelsWT fl.dbase filter w0Lconstant bc type w0L1 w0L2 w0L3 w1L1 w1L2 #w1L3 w2L1 w2L2 w2L3 w3L1 w3L2 w3L3 w4L1 w4L2 w4L3 w5L1 w5L2 w5L3 w6L1 #w6L2 w6L3 w7L1 w7L2 w7L3 # #$ wNLx are LONG coefficient vectors ! # #summary(.): #---------- #UNcompressed image wavelet decomposition structure #Levels: 8 #Original image was 256 x 256 pixels. #Filter was: Daub cmpct on least asymm N=10 #Boundary handling: periodic } \author{G P Nason} \keyword{manip} wavethresh/man/doppler.rd0000644000176200001440000000170314211622540015177 0ustar liggesusers\name{doppler} \alias{doppler} \title{Evaluate the Donoho and Johnstone Doppler signal. } \usage{ doppler(t) } \arguments{ \item{t}{The domain of the Doppler function (where you wish to evaluate this Doppler function} } \description{ This function evaluates and returns the Doppler signal from Donoho and Johnstone, (1994). } \details{ This function evaluates and returns the Doppler signal from Donoho and Johnstone, (1994). (Donoho, D.L. and Johnstone, I.M. (1994), Ideal spatial adaptation by wavelet shrinkage. \emph{Biometrika}, \bold{81}, 425--455). Another version of this function can be found in \code{\link{DJ.EX}}. } \value{ A vector of the same length as the input vector containing the Doppler signal at \code{t} } \seealso{\code{\link{DJ.EX}}} \examples{ # # Evalute the Doppler signal at 100 arbitrarily spaced points. # tt <- sort(runif(100)) dopp <- doppler(tt) \dontrun{plot(tt, dopp, type="l")} } \author{Arne Kovac} \keyword{nonparametric} wavethresh/man/TOgetthrda1.rd0000644000176200001440000000176214211622634015671 0ustar liggesusers\name{TOgetthrda1} \alias{TOgetthrda1} \alias{TOgetthrda2} \alias{TOkolsmi.chi2} \alias{TOonebyone1} \alias{TOonebyone2} \alias{TOshrinkit} \title{Subsidiary routines for Ogden and Parzen's wavelet shrinkage methods} \usage{ TOgetthrda1(dat, alpha) TOgetthrda2(dat, alpha) TOkolsmi.chi2(dat) TOonebyone1(dat, alpha) TOonebyone2(dat, alpha) TOshrinkit(coeffs, thresh) } \arguments{ \item{dat}{data} \item{alpha}{a p-value, generally smoothing parameter} \item{coeffs}{Some coefficients to be shrunk} \item{thresh}{a threshold} } \description{ Corresponds to the wavelet thresholding routine developed by Ogden and Parzen (1994) Data dependent wavelet thresholding in nonparametric regression with change-point applications. \emph{Tech Rep 176}, University of South Carolina, Department of Statistics. } \details{ Not intended for direct use. } \value{ Various depending on the function } \seealso{\code{\link{TOthreshda1}},\code{\link{TOthreshda2}},\code{\link{threshold}}} \author{Todd Ogden} \keyword{smooth} wavethresh/man/accessC2.rd0000644000176200001440000000224114211622540015156 0ustar liggesusers\name{accessC} \alias{accessC} \title{Get "detail" (mother wavelet) coefficients data from wavelet object} \description{ This generic function extracts detail from various types of wavelet objects. It extracts and returns a whole resolution level of coefficients. To obtain individual packets from relevant transforms use the \link{getpacket}() series of functions. This function is generic. Particular methods exist. For objects of class: \describe{ \item{wd}{use the \code{\link{accessC.wd}} method} %\item{wd3D}{use the \code{\link{accessD.wd3D}} method} \item{wp}{use the \code{\link{accessC.wp}} method} %\item{wpst}{use the \code{\link{accessC.wpst}} method} \item{wst}{use the \code{\link{accessC.wst}} method} } See individual method help pages for operation and examples. } \usage{ accessC(\dots) } \arguments{ \item{\dots}{See individual help for details.} } \value{ A vector coefficients representing the detail coefficients for the requested resolution level. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994} \seealso{ \code{\link{accessC.wd}}, \code{\link{accessC.wp}},\code{\link{accessC.wst}},\code{\link{accessD}} } \keyword{manip} \author{G P Nason} wavethresh/man/firstdot.rd0000644000176200001440000000365414211622540015377 0ustar liggesusers\name{firstdot} \alias{firstdot} \title{Return the location of the first period character within a character string (for a vector of strings of arbitrary length). } \description{ Returns the index of the location of the first period character within a character string for a series of strings in a vector of character string of arbitrary length). This is a subsidiary routine for \code{\link{rmget}} and not really intended for user use. } \usage{ firstdot(s) } \arguments{ \item{s}{Vector of character strings.} } \details{ A very simple function. It searches through a character string for the first period character and the returns the position of that period character. It performs this search for each of the character strings in the input vector. } \value{ A vector of integers of the same length as the input vector. Each integer in the output vector is the index position of the first period character in the corresponding character string in the input vector. If a character string does not contain a period character then the corresponding output integer is zero. } \references{Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{Technical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern.} \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{rmget}} } \examples{ # # Let's find the first dot in the following strings... # firstdot("mary.had.a.little.lamb") #[1] 5 # # I.e. the first period was after "mary" -- the fifth character # # This following string doesn't have any periods in it. # firstdot("StellaArtois") #[1] 0 # # The function works on vectors of character strings # TopCricketAve <- c("Don.Bradman", "Graeme.Pollock", "George.Headley", "Herbert.Sutcliffe", "Vinod.Kambli", "Javed.Miandad") firstdot(TopCricketAve) #[1] 4 7 7 8 6 6 } \keyword{utilities} \author{G P Nason} wavethresh/man/threshold.wp.rd0000644000176200001440000001756014211622634016167 0ustar liggesusers\name{threshold.wp} \alias{threshold.wp} \title{Threshold wavelet packet decomposition object} \description{ This function provides various ways to threshold a \code{\link{wp}} class object. } \usage{ \method{threshold}{wp}(wp, levels = 3:(nlevelsWT(wp) - 1), dev = madmad, policy = "universal", value = 0, by.level = FALSE, type = "soft", verbose = FALSE, return.threshold = FALSE, cvtol = 0.01, cvnorm = l2norm, add.history = TRUE, \dots) } \arguments{ \item{wp}{The wavelet packet object that you wish to threshold.} \item{levels}{a vector of integers which determines which scale levels are thresholded in the decomposition. Each integer in the vector must refer to a valid level in the \code{\link{wd}} object supplied. This is usually any integer from 0 to \code{\link{nlevelsWT}}(wp)-1 inclusive. Only the levels in this vector contribute to the computation of the threshold and its application. } \item{policy}{selects the technique by which the threshold value is selected. Each policy corresponds to a method in the literature. At present the different policies are: "\code{universal}" and "\code{manual}". The policies are described in detail \code{below}.} \item{by.level}{If FALSE then a global threshold is computed on and applied to all scale levels defined in \code{levels}. If TRUE a threshold is computed and applied separately to each scale level.} \item{value}{This argument conveys the user supplied threshold. If the \code{policy="manual"} then value is the actual threshold value.} \item{dev}{this argument supplies the function to be used to compute the spread of the absolute values coefficients. The function supplied must return a value of spread on the variance scale (i.e. not standard deviation) such as the \code{var()} function. A popular, useful and robust alternative is the \code{\link{madmad}} function.} \item{type}{determines the type of thresholding this can be "\code{hard}" or "\code{soft}".} \item{verbose}{if TRUE then the function prints out informative messages as it progresses.} \item{return.threshold}{If this option is TRUE then the actual \emph{value} of the threshold is returned. If this option is FALSE then a thresholded version of the input is returned.} \item{cvtol}{Not used, but reserved for future use} \item{cvnorm}{Not used, but reserved for future use} \item{add.history}{if \code{TRUE} then a history statement is added to the object for displaying.} \item{\dots}{any other arguments} } \details{ This function thresholds or shrinks wavelet coefficients stored in a \code{\link{wp}} object and returns the coefficients in a modified \code{\link{wp}} object. See the seminal papers by Donoho and Johnstone for explanations about thresholding. For a gentle introduction to wavelet thresholding (or shrinkage as it is sometimes called) see Nason and Silverman, 1994. For more details on each technique see the descriptions of each method below The basic idea of thresholding is very simple. In a signal plus noise model the wavelet transform of signal is very sparse, the wavelet transform of noise is not (in particular, if the noise is iid Gaussian then so if the noise contained in the wavelet coefficients). Thus since the signal gets concentrated in the wavelet coefficients and the noise remains "spread" out it is "easy" to separate the signal from noise by keeping large coefficients (which correspond to signal) and delete the small ones (which correspond to noise). However, one has to have some idea of the noise level (computed using the dev option in threshold functions). If the noise level is very large then it is possible, as usual, that no signal "sticks up" above the noise. There are many components to a successful thresholding procedure. Some components have a larger effect than others but the effect is not the same in all practical data situations. Here we give some rough practical guidance, although \emph{you must refer to the papers below when using a particular technique.} \bold{You cannot expect to get excellent performance on all signals unless you fully understand the rationale and limitations of each method below.} I am not in favour of the "black-box" approach. The thresholding functions of WaveThresh3 are not a black box: experience and judgement are required! Some issues to watch for: \describe{ \item{levels}{The default of \code{levels = 3:(wd$nlevelsWT - 1)} for the \code{levels} option most certainly does not work globally for all data problems and situations. The level at which thresholding begins (i.e. the given threshold and finer scale wavelets) is called the \emph{primary resolution} and is unique to a particular problem. In some ways choice of the primary resolution is very similar to choosing the bandwidth in kernel regression albeit on a logarithmic scale. See Hall and Patil, (1995) and Hall and Nason (1997) for more information. For each data problem you need to work out which is the best primary resolution. This can be done by gaining experience at what works best, or using prior knowledge. It is possible to "automatically" choose a "best" primary resolution using cross-validation (but not in WaveThresh). Secondly the levels argument computes and applies the threshold at the levels specified in the \code{levels} argument. It does this for all the levels specified. Sometimes, in wavelet shrinkage, the threshold is computed using only the finest scale coefficients (or more precisely the estimate of the overall noise level). If you want your threshold variance estimate only to use the finest scale coefficients (e.g. with universal thresholding) then you will have to apply the \code{threshold.wp} function twice. Once (with levels set equal to \code{\link{nlevelsWT}}(wd)-1 and with \code{return.threshold=TRUE} to return the threshold computed on the finest scale and then apply the threshold function with the manual option supplying the value of the previously computed threshold as the value options. } \item{by.level}{for a \code{\link{wd}} object which has come from data with noise that is correlated then you should have a threshold computed for each resolution level. See the paper by Johnstone and Silverman, 1997.} } } \value{ An object of class \code{\link{wp}}. This object contains the thresholded wavelet coefficients. Note that if the \code{return.threshold} option is set to TRUE then the threshold values will be returned rather than the thresholded object. } \note{ POLICIES This section gives a brief description of the different thresholding policies available. For further details \emph{see the associated papers}. If there is no paper available then a small description is provided here. More than one policy may be good for problem, so experiment! They are arranged here in alphabetical order: \describe{ \item{universal}{See Donoho and Johnstone, 1995.} } } \section{RELEASE}{Version 3.6 Copyright Guy Nason and others1997.} \seealso{ \code{\link{wp}}, \code{\link{wp.object}}, \code{\link{InvBasis}}, \code{\link{MaNoVe}}, \code{\link{threshold}}. } \examples{ # # Generate some test data # test.data <- example.1()$y \dontrun{ts.plot(test.data)} # # Generate some noisy data # ynoise <- test.data + rnorm(512, sd=0.1) # # Plot it # \dontrun{ts.plot(ynoise)} # # Now take the discrete wavelet packet transform # N.b. I have no idea if the default wavelets here are appropriate for # this particular examples. # ynwp <- wp(ynoise) # # Now do thresholding. We'll use a universal policy, # and madmad deviance estimate on the finest # coefficients and return the threshold. We'll also get it to be verbose # so we can watch the process. # ynwpT1 <- threshold(ynwp, policy="universal", dev=madmad) # # This is just another wp object. Is it sensible? # Probably not as we have just thresholded the scaling function coefficients # as well. So the threshold might be more sensibly computed on the wavelet # coefficients at the finest scale and then this threshold applied to the # whole wavelet tree?? } \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/putC.wst.rd0000644000176200001440000000506614211622540015267 0ustar liggesusers\name{putC.wst} \alias{putC.wst} \title{Puts a whole resolution level of father wavelet coeffients into wst wavelet object.} \description{ Makes a copy of the \code{\link{wst}} object, replaces a whole resolution level of father wavelet coefficients data in the copy, and then returns the copy. } \usage{ \method{putC}{wst}(wst, level, value, \dots) } \arguments{ \item{wst}{Packet-ordered non-decimated wavelet object into which you wish to insert the father wavelet coefficients.} \item{level}{the resolution level at which you wish to replace the father wavelet coefficients.} \item{value}{the replacement data, this should be of the correct length.} \item{\dots}{any other arguments} } \details{ The function \code{\link{accessC.wst}} obtains the father wavelet coefficients for a particular level. The function \code{putC.wst} replaces father wavelet coefficients at a particular resolution level and returns a modified wst object reflecting the change. For the non-decimated wavelet transforms the number of coefficients at each resolution level is the same and equal to \code{2^nlevelsWT} where \code{nlevels} is the number of levels in the \code{\link{wst.object}}. The number of coefficients at each resolution level is also, of course, the number of data points used to initially form the \code{\link{wst}} object in the first place. Use the \code{\link{accessC.wst}} to extract whole resolution levels of father wavelet coefficients. Use \code{\link{accessD.wst}} and \code{\link{putD.wst}} to extract/insert whole resolution levels of mother wavelet coefficients. Use the \code{\link{getpacket.wst}} and \code{\link{putpacket.wst}} functions to extract/insert packets of coefficients into a packet-ordered non-decimated wavelet object. } \value{ A \code{\link{wst}} class object containing the modified father wavelet coefficients } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wst.object}}, \code{\link{wst}}, \code{\link{putC}}, \code{\link{accessD.wst}}, \code{\link{putD.wst}}, \code{\link{getpacket.wst}}, \code{\link{putpacket.wst}}. } \examples{ # # Generate an EMPTY wst object: # zero <- rep(0, 16) zerowst <- wst(zero) # # Put some random father wavelet coefficients into the object at # resolution level 2. For the non-decimated wavelet transform there # are always 16 coefficients at every resolution level. # mod.zerowst <- putC( zerowst, level=2, v=rnorm(16)) # # If you use accessC on mod.zerowd you would see that there were only # coefficients at resolution level 2 where you just put the coefficients. } \keyword{manip} \author{G P Nason} wavethresh/man/accessD.wpst.rd0000644000176200001440000000342314211622540016074 0ustar liggesusers\name{accessD.wpst} \alias{accessD.wpst} \title{Get coefficients from a non-decimated wavelet packet object (wpst) in time order.} \description{ The coefficients from a non-decimated wavelet packet object, \code{\link{wpst}}, are stored in a particular order in the wpst component of the wpstobj object. This function extracts all the coefficients corresponding to a particular wavelet packet in time order. } \usage{ \method{accessD}{wpst}(wpst, level, index, \dots) } \arguments{ \item{wpst}{Non-decimated wavelet packet object from which you wish to extract time-ordered coefficients.} \item{level}{The resolution level that you wish to extract. This can range from zero (the coarsest coefficients) to nlevelsWT-1(wstobj) which are the finest scale coefficients.} \item{index}{The wavelet packet index that you require (sequency ordering). This can range from 0 (father wavelet coeffcients) to \code{2^(nlevelsWT - level) - 1}, i.e. the maximum is dependent on the resolution level.} \item{\dots}{any other arguments} } \details{The \code{\link{wpst}} function performs a non-decimated wavelet packet transform. This function extracts the coefficients at a particular resolution level specified by level in time order. It is possible to extract the individual packets (before interweaving, i.e. the direct result of multiple applications of the packet operators) by using the \code{\link{getpacket.wpst}} function.} \references{ Nason, G.P., Sapatinas, T. and Sawczenko, A. Statistical modelling using undecimated wavelet transforms. } \seealso{ \code{\link{wpst}}, \code{wpst.object}, \code{\link{accessD}}, \code{\link{getpacket.wpst}} } \examples{ # # Get the 4th level of coefficients from a decomposition # dat <- rnorm(128) accessD(wpst(dat), level=4, index=3) } \keyword{manip} \author{G P Nason} wavethresh/man/print.wpstRO.rd0000644000176200001440000000136514232245366016141 0ustar liggesusers\name{print.wpstRO} \alias{print.wpstRO} \title{ Print a wpstRO class object } \description{ Prints out a representation of an wpstRO object } \usage{ \method{print}{wpstRO}(x, maxbasis = 10, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ The wpstRO object to print } \item{maxbasis}{ The maximum number of basis packets to report on } \item{\dots}{ Other arguments } } \details{ Description says all } \value{ None } \author{ G P Nason } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{\code{\link{makewpstRO}} } \examples{ # # See example in makewpstRO function # } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{print} wavethresh/man/summary.wd.rd0000644000176200001440000000147414211622634015651 0ustar liggesusers\name{summary.wd} \alias{summary.wd} \title{Print out some basic information associated with a wd object} \usage{ \method{summary}{wd}(object, \dots) } \arguments{ \item{object}{The object to print a summary about} \item{\dots}{Other arguments} } \description{ Prints out the number of levels, the length of the original vector from which the object came, the type of wavelet filter associated with the decomposition, the type of boundary handling, the transform type and the date of production. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{wd}}} \examples{ vwd <- wd(1:8) summary(vwd) #Levels: 3 #Length of original: 8 #Filter was: Daub cmpct on least asymm N=10 #Boundary handling: periodic #Transform type: wavelet #Date: Mon Mar 8 21:30:32 2010 } \author{G P Nason} \keyword{print} wavethresh/man/Cthreshold.rd0000644000176200001440000000370514211622540015635 0ustar liggesusers\name{Cthreshold} \alias{Cthreshold} \title{Calls C code to threshold wd class object.} \usage{ Cthreshold(wd, thresh.type = "soft", value = 0, levels = 3:(nlevelsWT(wd) - 1)) } \arguments{ \item{wd}{The wavelet object that you wish to threshold.} \item{thresh.type}{The type of thresholding. This can be "soft" or "hard". See \code{\link{threshold}} and methods for further details.} \item{value}{The threshold value that you want to be used (e.g. for hard thresholding wavelet coefficients whose absolute value is less than} \item{levels}{The resolution levels that you wish to compute the threshold on and apply the threshold to.} } \description{ A routine that calls a C code function to do thresholding. This is really a test routine to call a C thresholding function (Cthreshold) and the user is advised to use the R based generic thresholding function \code{\link{threshold}} and/or its methods as they contain a wider range of thresholding options. } \details{ For general use it is recommended to use the \code{\link{threshold}} functions as they have a wider variety of options and also work for more complex varieties of wavelet transforms (i.e. non-decimated, complex-valued, etc). However, in the right, limited, situation this function can be useful. This function directly calls the C thresholding function Cthreshold(). The C function is used by routines that operate on behalf of the function that carries out two-fold cross validation in C (\code{\link{CWCV}}) which is also accessible using the \code{policy="cv"} option too \code{\link{threshold.wd}} This function can be used by the user. It might be a bit faster than \code{\link{threshold.wd}} but mostly because it is simpler and does less checking than \code{\link{threshold.wd}}. } \value{ A \code{\link{wd.object}} class object, but containing thresholded coefficients. } \seealso{\code{\link{threshold}}} \examples{ # # See copious examples in the help to threshold.wd # } \author{G P Nason} \keyword{smooth} wavethresh/man/Crsswav.rd0000644000176200001440000000223514211622540015163 0ustar liggesusers\name{Crsswav} \alias{Crsswav} \title{Wrapper to C code version of rsswav} \usage{ Crsswav(noisy, value = 1, filter.number = 10, family = "DaubLeAsymm", thresh.type = "hard", ll = 3) } \arguments{ \item{noisy}{A vector of dyadic (power of two) length that contains the noisy data that you wish to compute the averaged RSS for.} \item{value}{The specified threshold.} \item{filter.number}{This selects the smoothness of wavelet that you want to perform wavelet shrinkage by cross-validation.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{thresh.type}{this option specifies the thresholding type which can be "hard" or "soft".} \item{ll}{The primary resolution that you wish to assume. No wavelet coefficients that are on coarser scales than ll will be thresholded.} } \description{ Crsswav is called by \code{\link{WaveletCV}} which is itself called by \code{\link{threshold.wd}} to carry out its cross-validation policy. } \details{ Description says all } \value{ Same value as for \code{\link{rsswav}} } \seealso{\code{\link{rsswav}}, \code{\link{WaveletCV}}} \author{G P Nason} \keyword{smooth} wavethresh/man/wpst2m.rd0000644000176200001440000000266014211622634014775 0ustar liggesusers\name{wpst2m} \alias{wpst2m} \title{ Converts a nondecimated wavelet packet object to a (large) matrix with packets stored as columns } \description{ Takes a nondecimated wavelet packet transform, takes the packets one packet at a time and stores them in a matrix. The packets are rotated on extraction and storage in the matrix in an attempt to align them, they are also optionally transformed by \code{trans}. The rotation is performed by \code{\link{compgrot}}. Note that the coefficients are of some series, not the basis functions themselves. } \usage{ wpst2m(wpstobj, trans = identity) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{wpstobj}{ The nondecimated wavelet packet object to store } \item{trans}{ The optional transform to apply to the coefficients } } \details{ Description says all } \value{ A list, of class w2m, with the following components: \item{m}{The matrix containing the packets} \item{level}{A vector containing the levels from where the packets in m come from} \item{pktix}{A vector containing the packet indices from where the packets in m come from} \item{nlevelsWT}{The number of resolution levels from the original wpst object} } \author{ G P Nason } \seealso{\code{\link{makewpstRO}}, \code{\link{print.w2m}}} \examples{ # # Not intended to be directly used by users # } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{manip} wavethresh/man/rmget.rd0000644000176200001440000000645714211622634014667 0ustar liggesusers\name{rmget} \alias{rmget} \title{Search for existing ipndacw matrices. } \description{ Returns the integer corresponding to the smallest order \code{\link{ipndacw}} matrix of greater than or equal to order than the order, J requested. Not really intended for user use. } \usage{ rmget(requestJ, filter.number, family) } \arguments{ \item{requestJ}{A positive integer representing the order of the \code{\link{ipndacw}} matrix that is \emph{required}.} \item{filter.number}{The index number of the wavelet used to build the \code{\link{ipndacw}} matrix that is required.} \item{family}{The wavelet family used to build the \code{\link{ipndacw}} matrix that is required.} } \details{ Some of the matrices computed by \code{\link{ipndacw}} take a long time to compute. Hence it is a good idea to store them and reuse them. This function is asked to find an \code{\link{ipndacw}} matrix of a particular order, \emph{filter.number} and \emph{family}. The function steps through all of the directories in the \code{search()} list collecting names of all \code{\link{ipndacw}} matrices having the same \emph{filter.number} and \emph{family} characteristics. It then keeps any names where the \emph{order} is larger than, or equal to, the requested order. This means that a suitable \code{\link{ipndacw}} matrix of the same or larger order is visible in one of the \code{search()} directories. The matrix name with the smallest \code{order} is selected and the \emph{order} of the matrix is returned. The routine that called this function can then \code{get()} the matrix and either use it "as is" or extract the top-left hand corner of it if \code{requestJ} is less than the order returned by this function. If no such matrix, as described by the previous paragraph, exists then this function returns \code{NULL}. This function calls the subsidiary routine \code{\link{firstdot}}. } \value{ If a matrix of order larger than or equal to the requested order exists somewhere on the search path \emph{and} the \code{filter.number} and \code{\link{family}} is as specified then its order is returned. If more than one such matrix exists then the order of the smallest one larger than or equal to the requested one is returned. If no such matrix exists the function returns NULL. } \references{Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{Technical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern.} \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{firstdot}}, \code{\link{ipndacw}}, \code{\link{rmname}}. } \examples{ # # Suppose there are no matrices in the search path. # # Let's look for the matrix rm.4.1.DaubExPhase (Haar wavelet matrix of # order 4) # rmget(requestJ=4, filter.number=1, family="DaubExPhase") #NULL # # I.e. a NULL return code. So there were no suitable matrices. # #If we create two Haar ipndacw matrix of order 7 and 8 # ipndacw(-7, filter.number=1, family="DaubExPhase") ipndacw(-8, filter.number=1, family="DaubExPhase") # # Now let's repeat the earlier search # rmget(requestJ=4, filter.number=1, family="DaubExPhase") #[1] 7 # # So, as we the smallest Haar ipndacw matrix available larger than # the requested order of 4 is "7". # } \author{G P Nason} \keyword{manip} wavethresh/man/guyrot.rd0000644000176200001440000000236214211622540015065 0ustar liggesusers\name{guyrot} \alias{guyrot} \title{Cyclically rotate elements of a vector} \usage{ guyrot(v, n) } \arguments{ \item{v}{Vector whose elements you wish to rotate} \item{n}{Integer determining the amount to rotate, can be negative} } \description{ This function shifts (or rotates) the elements of the input vector in a cyclic fashion (end periodicity is used). } \details{ A very simple function which cyclically shifts the elements of a vector. Not necessarily intended as a top level user function but it is a useful little function. } \value{ A vector containing the shifted or rotated coefficients. } \seealso{\code{\link{wpst2discr}}, \code{\link{wpstCLASS}}} \examples{ # # Start off with an example vector # v <- c(1,2,3,4,5,6) # # Rotate it one element to the right, rightmost element gets rotated round # to be first element. # guyrot(v,1) # [1] 6 1 2 3 4 5 # # Rotate v two spaces to the left, leftmost two elements get rotated around # to be new last elements guyrot(v, -2) # # [1] 3 4 5 6 1 2 # # # Now issue a larger rotation, e.g. 19! # guyrot(v,19) # [1] 6 1 2 3 4 5 # # Its just the same as rotating by 1 since the input vector is of length 6 # and so rotating by 19 is the same as rotating by 6,6,6, and then 1! # } \author{G P Nason} \keyword{math} wavethresh/man/c2to4.rd0000644000176200001440000000116214211622540014464 0ustar liggesusers\name{c2to4} \alias{c2to4} \usage{ c2to4(index) } \arguments{ \item{index}{The integer you wish to convert} } \title{Take integer, represent in binary, then think of and return that representation in base 4} \description{ Not designed, or really useful, for casual user use! For example: take the integer 5. In binary this is 101. Then, this representation in base 4 is 16+1 =17. This function is used by \code{\link{accessD.wpst}} to help it access coefficients. } \details{ Description says all } \value{ The converted number } \seealso{\code{\link{accessD.wpst}}} \examples{ c2to4(5) } \author{G P Nason} \keyword{misc} wavethresh/man/summary.imwdc.rd0000644000176200001440000000160014211622634016331 0ustar liggesusers\name{summary.imwdc} \alias{summary.imwdc} \title{Print out some basic information associated with an imwdc object} \usage{ \method{summary}{imwdc}(object, \dots) } \arguments{ \item{object}{The object to print a summary about} \item{\dots}{Other arguments} } \description{ Prints out the number of levels, the dimensions of the original image from which the object came, the type of wavelet filter associated with the decomposition, the type of boundary handling. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{imwd}}, \code{\link{threshold.imwd}}} \examples{ m <- matrix(rnorm(32*32),nrow=32) mimwd <- imwd(m) mimwdc <- threshold(mimwd) summary(mimwdc) #Compressed image wavelet decomposition structure #Levels: 5 #Original image was 32 x 32 pixels. #Filter was: Daub cmpct on least asymm N=10 #Boundary handling: periodic } \author{G P Nason} \keyword{print} wavethresh/man/getpacket.wpst.rd0000644000176200001440000000657114211622540016505 0ustar liggesusers\name{getpacket.wpst} \alias{getpacket.wpst} \title{Get packet of coefficients from a non-decimated wavelet packet object (wpst). } \description{ This function extracts and returns a packet of coefficients from a non-decimated wavelet packet (\code{\link{wpst}}) object. } \usage{ \method{getpacket}{wpst}(wpst, level, index, \dots ) } \arguments{ \item{wpst}{Non-decimated wavelet packet object from which you wish to extract the packet from.} \item{level}{The resolution level of the coefficients that you wish to extract. Can range from 0 to \code{\link{nlevelsWT}}(wpst). The coefficients at level \code{\link{nlevels}} are the data the created the \code{wpst.object}. } \item{index}{The index number within the resolution level of the packet of coefficients that you wish to extract. Index ranges from 0 to \eqn{(4^r)-1} where \code{r = nlevelsWT - level}. } \item{\dots}{any other arguments} } \details{ The \code{\link{wpst}} transform produces a non-decimated wavelet packet object. This is a "cross" between a \code{wavelet packet} object and a \code{non-decimated wavelet} object. In other words the transform produces \emph{wavelet packet} coefficients at every possible integer shift (unlike the ordinary wavelet packet transform which is aligned to a dyadic grid). Each packet of coefficients is obtained by chaining together the effect of the two \emph{packet operators} DG and DH: these are the high and low pass quadrature mirror filters of the Mallat pyramid algorithm scheme followed by both even \emph{and} odd decimation. For a full description of this algorithm and how coefficients are stored within see Nason, Sapatinas and Sawczenko, 1998. Note that this function extracts \emph{packets}. If you want to obtain the wavelet packet coefficients for each shift you need to use the \code{\link{accessD.wpst}}function. This function extracts particular wavelet packet coefficients for a particular shift. In particular, this function returns a number of coefficients dependent on the scale level requested whereas \code{\link{accessD.wpst}} always returns a vector of coefficients of length equal to the input data that created the \code{wpst.object} initially. } \value{ A vector containing the packet of non-decimated wavelet packet coefficients that you wished to extract. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{accessD.wpst}}, \code{\link{wpst}}, } \examples{ # # Create some random data # myrand <- rnorm(16) #myrand # [1] 0.19268626 -0.41737181 -0.30806613 0.07435407 0.99871757 # [6] -0.58935121 -1.38049759 -0.13346631 1.55555403 -1.60581265 #[11] 0.14353621 1.21277774 1.13762337 -1.08577934 -0.29745609 #[16] 0.50977512 # # Do the non-decimated wavelet packet transform # myrwpst <- wpst(myrand) # # Let's access what is a level nlevelsWT(myrwpst) # getpacket(myrwpst, nlevelsWT(myrwpst), index=0) # [1] 0.19268626 -0.41737181 -0.30806613 0.07435407 0.99871757 # [6] -0.58935121 -1.38049759 -0.13346631 1.55555403 -1.60581265 #[11] 0.14353621 1.21277774 1.13762337 -1.08577934 -0.29745609 #[16] 0.50977512 # # I.e. the data that created the object. # # How about extracting the 3rd (last) packet at level 3? # getpacket(myrwpst, 3, index=3) #[1] -2.660657144 0.688415755 -1.764060698 0.717267105 -0.206916242 #[6] -0.659983747 0.005836952 -0.196874007 # # Of course, there are only 8 coefficients at this level. } \keyword{manip} \author{G P Nason} wavethresh/man/InvBasis.wp.rd0000644000176200001440000000373214211622540015701 0ustar liggesusers\name{InvBasis.wp} \alias{InvBasis.wp} \title{Invert a wp library representation with a particular basis spec} \usage{ \method{InvBasis}{wp}(wp, nvwp, pktlist, verbose=FALSE, \dots) } \arguments{ \item{wp}{The wavelet packet object you wish to invert.} \item{nvwp}{A basis specification in the format of a node vector (wp) object, obtained, eg by the \code{\link{MaNoVe.wp}} function} \item{pktlist}{Another way of specifying the basis. If this argument is not specified then it is generated automatically from the \code{nvwp} argument. If it is specified then it overrides the one generated by \code{nvwp}} \item{verbose}{If TRUE then informative messages are printed.} \item{...}{Other arguments, not used} } \description{ Inverts a wp basis representation with a given basis specification, for example an output from the \code{\link{MaNoVe}} function.} \details{ Objects arising from a \code{\link{wp.object}} specification are a representation of a signal with respect to a library of wavelet packet basis functions. A particular basis specification can be obtained using the \code{\link{numtonv}} function which can pick an indexed basis function, or \code{\link{MaNoVe.wp}} which uses the Coifman-Wickerhauser minimum entropy method to select a basis. This function takes a \code{\link{wp.object}} and a particular basis description (in a \code{\link{nv.object}} node vector object) and inverts the representation with respect to that selected basis. The function can alternatively take a packet list \code{pktlist} specification which overrides the node vector if supplied. If the \code{pktlist} is missing then one is generated internally from the \code{nvwp} object using the \code{\link{print.nvwp}} function. } \value{ The inverted reconstruction } \seealso{\code{\link{InvBasis}},\code{\link{MaNoVe.wp}},\code{\link{numtonv}},\code{\link{print.nvwp}},\code{\link{wp}}} \examples{ # # The example in InvBasis.wst can be used here, but replaced wst by wp # } \author{G P Nason} \keyword{smooth} wavethresh/man/HaarMA.rd0000644000176200001440000000461614211622540014631 0ustar liggesusers\name{HaarMA} \alias{HaarMA} \title{Generate Haar MA processes.} \description{ This function generates an arbitrary number of observations from a Haar MA process of any order with a particular variance. } \usage{ HaarMA(n, sd=1, order=5) } \arguments{ \item{n}{The number of observations in the realization that you want to create. Note that n does NOT have to be a power of two.} \item{sd}{The standard deviation of the innovations.} \item{order}{The order of the Haar MA process.} } \details{ A Haar MA process is a special kind of time series moving-average (MA) process. A Haar MA process of order k is a MA process of order \eqn{2^k}. The coefficients of the Haar MA process are given by the filter coefficients of the discrete Haar wavelet at different scales. For examples: the Haar MA process of order 1 is an MA process of order 2. The coefficients are \eqn{1/\sqrt{2}}{1/sqrt(2)} and \eqn{-1/\sqrt{2}}{-1/sqrt(2)}. The Haar MA process of order 2 is an MA process of order 4. The coefficients are 1/2, 1/2, -1/2, -1/2 and so on. It is possible to define other processes for other wavelets as well. Any Haar MA process is a good examples of a (stationary) LSW process because it is sparsely representable by the locally-stationary wavelet machinery defined in Nason, von Sachs and Kroisandt. } \value{ A vector containing a realization of a Haar MA process of the specified order, standard deviation and number of observations. } \references{Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{Technical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern.} \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{HaarConcat}}, \code{\link{ewspec}}, } \examples{ # # Generate a Haar MA process of order 1 (high frequency series) # MyHaarMA <- HaarMA(n=151, sd=2, order=1) # # Plot it # \dontrun{ts.plot(MyHaarMA)} # # Generate another Haar MA process of order 3 (lower frequency), but of # smaller variance # MyHaarMA2 <- HaarMA(n=151, sd=1, order=3) # # Plot it # \dontrun{ts.plot(MyHaarMA2)} # # Let's plot them next to each other so that you can really see the # differences. # # Plot a vertical dotted line which indicates where the processes are # joined # \dontrun{ts.plot(c(MyHaarMA, MyHaarMA2))} \dontrun{abline(v=152, lty=2)} } \keyword{manip} \author{G P Nason} wavethresh/man/getpacket.wst2D.rd0000644000176200001440000001054714211622540016511 0ustar liggesusers\name{getpacket.wst2D} \alias{getpacket.wst2D} \title{Get packet of coefficients from a two-dimensional non-decimated wavelet object (wst2D).} \description{ This function extracts and returns a packet of coefficients from a two-dimensional non-decimated wavelet (\code{\link{wst2D}}) object. } \usage{ \method{getpacket}{wst2D}(wst2D, level, index, type="S", Ccode=TRUE, \dots) } \arguments{ \item{wst2D}{2D non-decimated wavelet object from which you wish to extract a packet from.} \item{level}{The resolution level of the coefficients that you wish to extract. Can range from 0 to \code{\link{nlevelsWT}}(wpst)-1.} \item{index}{The index number within the resolution level of the packet of coefficients that you wish to extract. Index is a base-4 number which is r digits long. Each digit can be 0, 1, 2 or 3 corresponding to no shifts, horizontal shift, vertical shift or horizontal and vertical shifts. The number r indicates the depth of the resolution level from the data resolution i.e. where \code{r = nlevelsWT - level}. Where there is a string of more than one digit the left most digits correspond to finest scale shift selection, the right most digits to the coarser scales (I think).} \item{type}{This is a one letter character string: one of "S", "H", "V" or "D" for the smooth coefficients, horizontal, vertical or diagonal detail.} \item{Ccode}{If \code{T} then fast C code is used to obtain the packet, otherwise slow SPlus code is used. Unless you have some special reason always use the C code (and leave the argument at its default).} \item{\dots}{any other arguments} } \details{ The \code{\link{wst2D}} function creates a \code{\link{wst2D}} class object. Starting with a smooth the operators H, G, GS and HS (where G, H are the usual Mallat operators and S is the shift-by-one operator) are operated first on the rows and then the columns: i.e. so each of the operators HH, HG, GH, GG, HSH, HSG, GSH, GSG HHS, GHS, HGS, GGS HSHS, HSGS, GSHS and GSGS are applied. Then the same collection of operators is applied to all the derived smooths, i.e. HH, HSH, HHS and HSHS. So the next level is obtained from the previous level with basically HH, HG, GH and GG but with extra shifts in the horizontal, vertical and horizontal and vertical directions. The index provides a way to enumerate the paths through this tree where each smooth has 4 children and indexed by a number between 0 and 3. Each of the 4 children has 4 components: a smooth, horizontal, vertical and diagonal detail, much in the same way as for the Mallat 2D wavelet transform implemented in the WaveThresh function \code{\link{imwd}}. } \value{ A matrix containing the packet of the 2D non-decimated wavelet coefficients that you require. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{putpacket.wst2D}}, \code{\link{wst2D}}, \code{\link{wst2D.object}}. } \examples{ # # Create a random image. # myrand <- matrix(rnorm(16), nrow=4, ncol=4) #myrand # [,1] [,2] [,3] [,4] #[1,] 0.01692807 0.1400891 -0.38225727 0.3372708 #[2,] -0.79799841 -0.3306080 1.59789958 -1.0606204 #[3,] 0.29151629 -0.2028172 -0.02346776 0.5833292 #[4,] -2.21505532 -0.3591296 -0.39354119 0.6147043 # # Do the 2D non-decimated wavelet transform # myrwst2D <- wst2D(myrand) # # Let's access the finest scale detail, not shifted in the vertical # direction. # getpacket(myrwst2D, nlevelsWT(myrwst2D)-1, index=0, type="V") # [,1] [,2] #[1,] -0.1626819 -1.3244064 # # Compare this to the ordinary 2D DWT for the vertical detail at this # resolution level imwd(myrand)[[lt.to.name( 1, "DC")]] #[1] -0.1626819 -1.3244064 1.4113247 -0.7383336 # # The same numbers but they're not in matrix format because # imwd returns vectors not matrices. # # Now back to the wst2D object. Let's # extract vertical detail again at level 1 but this time the horizontally # shifted data. # getpacket(myrwst2D, level=1, index=1, type="V") # [,1] [,2] #[1,] -0.5984427 0.2599445 #[2,] -0.6502002 1.8027955 # # So, yes, different data. Now how about at a deeper resolution level. # Lets have a horizontal shift, as before, for the level 1 but follow it # with a diagonal shift and this time extract the smooth component: # getpacket(myrwst2D, level=0, index=13, type="S") # [,1] #[1,] -0.5459394 # # Of course, only one number because this is at level 0 } \keyword{manip} \author{G P Nason} wavethresh/man/accessC.wd.rd0000644000176200001440000000772614211622540015522 0ustar liggesusers\name{accessC.wd} \alias{accessC.wd} \title{Get smoothed data from wavelet object (wd)} \description{ The smoothed and original data from a wavelet decomposition structure (returned from \code{\link{wd}}) are packed into a single vector in that structure. This function extracts the data corresponding to a particular resolution level. } \usage{ \method{accessC}{wd}(wd, level = nlevelsWT(wd), boundary=FALSE, aspect, \dots) } \arguments{ \item{wd}{ wavelet decomposition structure from which you wish to extract the smoothed or original data if the structure is from a wavelet decomposition, or the reconstructed data if the structure is from a wavelet reconstruction. } \item{level}{ the level that you wish to extract. By default, this is the level with most detail (in the case of structures from a decomposition this is the original data, in the case of structures from a reconstruction this is the top-level reconstruction). } \item{boundary}{logical; if \code{TRUE} then all of the boundary correction values will be returned as well (note: the length of the returned vector may not be a power of 2).\cr If \code{boundary} is false, then just the coefficients will be returned. If the decomposition (or reconstruction) was done with periodic boundary conditions, this option has no effect.} \item{aspect}{Applies a function to the coefficients before return. Supplied as a text string which gets converted to a function. For example, "Mod" for complex-valued arguments} \item{\dots}{any other arguments} } \value{A vector of the extracted data. } \details{ The \link{wd} (\code{\link{wr.wd}}) function produces a wavelet decomposition (reconstruction) structure. For decomposition, the top level contains the original data, and subsequent lower levels contain the successively smoothed data. So if there are \eqn{2^m} original data points, there will be m+1 levels indexed 0,1,\dots{},m. So \code{accessC.wd(wdobj, level=m)} pulls out the original data, as does \code{accessC.wd(wdobj)} To get hold of lower levels just specify the level that you're interested in, e.g. \code{accessC.wd(wdobj, level=2)} gets hold of the second level. For reconstruction, the top level contains the ultimate step in the Mallat pyramid reconstruction algorithm, lower levels are intermediate steps. The need for this function is a consequence of the pyramidal structure of Mallat's algorithm and the memory efficiency gain achieved by storing the pyramid as a linear vector. AccessC obtains information about where the smoothed data appears from the fl.dbase component of an \link{wd.object}, in particular the array \code{fl.dbase$first.last.c} which gives a complete specification of index numbers and offsets for \code{wd.object$C}. Note that this function is method for the generic function \code{\link{accessC}}. When the \code{\link{wd.object}} is definitely a wd class object then you only need use the generic version of this function. Note that this function only gets information from \code{\link{wd}} class objects. To insert coefficients etc. into \code{\link{wd}} structures you have to use the \code{\link{putC}} function (or more precisely, the \code{\link{putC.wd}} method). } \references{ Mallat, S. G. (1989) A theory for multiresolution signal decomposition: the wavelet representation. \emph{IEEE Transactions on Pattern Analysis and Machine Intelligence} \bold{11}, 674--693. Nason, G. P. and Silverman, B. W. (1994). The discrete wavelet transform in S. \emph{Journal of Computational and Graphical Statistics,} \bold{3}, 163--191. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994} \seealso{ \code{\link{wr}}, \code{\link{wd}}, \code{\link{accessD}}, \code{\link{accessD.wd}}, \code{\link{filter.select}}, \code{\link{threshold}}, \code{\link{putC.wd}}, \code{\link{putD.wd}}.} \examples{ ## Get the 3rd level of smoothed data from a decomposition dat <- rnorm(64) accessC(wd(dat), level=3) } \keyword{manip} \author{G P Nason} wavethresh/man/InvBasis.rd0000644000176200001440000000101014211622540015237 0ustar liggesusers\name{InvBasis} \alias{InvBasis} \title{Generic basis inversion for libraries} \usage{ InvBasis(...) } \arguments{ \item{...}{Usually a library representation and a basis specification} } \description{ Will invert either a \code{wst} or \code{wp} object given that object and some kind of basis specification. } \details{ Description says it all } \value{ The reconstruction. } \seealso{\code{\link{InvBasis.wp}},\code{\link{InvBasis.wst}},\code{\link{MaNoVe}},\code{\link{numtonv}}} \author{G P Nason} \keyword{smooth} wavethresh/man/HaarConcat.rd0000644000176200001440000000240614211622540015536 0ustar liggesusers\name{HaarConcat} \alias{HaarConcat} \usage{ HaarConcat() } \arguments{ None } \title{Generate a concatenated Haar MA process} \description{ This function generates a particular set of four concatenated Haar MA processes. } \details{ This function generates a realization of particular kind of non-stationary time series probability model. The returned time series is the result of concatenating 4 time series each of length 128 from the Haar MA process generator (\code{\link{HaarMA}}) of orders 1, 2, 3 and 4. The standard deviation of the innovations is 1. This function was used to generate the figure of the concatenated Haar MA process in Nason, von Sachs and Kroisandt. It produces a kind of time series that can be sparsely represented by the wavelet machinery but at the same time is non-stationary. See Nason, von Sachs and Kroisandt (2000) Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{J R Statist Soc, B}, \bold{62}, 271-292. } \value{ A vector containing 512 observations from four concatenated Haar MA processes } \seealso{\code{\link{HaarMA}},\code{\link{ewspec}}} \examples{ # # Generate the concatenated Haar MA process. # MyHaarCC <- HaarConcat() # # Plot it # \dontrun{ts.plot(MyHaarCC)} } \author{G P Nason} \keyword{smooth} wavethresh/man/wst2D.rd0000644000176200001440000000660214211622634014544 0ustar liggesusers\name{wst2D} \alias{wst2D} \title{(Packet-ordered) 2D non-decimated wavelet transform.} \description{ This function computes the (packet-ordered) 2D non-decimated wavelet transform } \usage{ wst2D(m, filter.number=10, family="DaubLeAsymm") } \arguments{ \item{m}{A matrix containing the image data that you wish to decompose. Each dimension of the matrix must be the same power of 2.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments.} \item{family}{specifies the family of wavelets that you want to use. Two popular options are "DaubExPhase" and "DaubLeAsymm" but see the help for \code{\link{filter.select}} for more possibilities.} } \details{ The \code{wst2D} computes the (packet-ordered) 2D non-decimated discrete wavelet transform. Such a transform may be used in wavelet shrinkage of images using the \code{\link{AvBasis.wst2D}} function to perform an "average-basis" inverse. Such a transform was used to denoise images in the paper by Lang, Guo, Odegard, Burrus and Wells, 1995. The algorithm works by mixing the HH, GH, HG and GG image operators of the 2D (decimated) discrete wavelet transform (see Mallat, 1989 and the implementation in WaveThresh called \code{\link{imwd}}) with the shift operator S (as documented in Nason and Silverman, 1995) to form new operators (as given in the help to \code{\link{getpacket.wst2D}}). Subimages can be obtained and replaced using the \code{\link{getpacket.wst2D}} and \code{\link{putpacket.wst2D}} functions. This function is a 2D analogue of the (packet-ordered) non-decimated discrete wavelet transform implemented in WaveThresh as \code{\link{wst}}. } \value{ An object of class \code{\link{wst2D}}. } \section{RELEASE}{Version 3.9.5 Copyright Guy Nason 1998} \seealso{ \code{\link{AvBasis.wst2D}}, \code{\link{getpacket.wst2D}}, \code{\link{imwd}}, \code{\link{plot.wst2D}}, \code{\link{print.wst2D}}, \code{\link{putpacket.wst2D}}, \code{\link{summary.wst2D}}, \code{\link{wst2D.object}}. } \examples{ # # We shall use the lennon image. # data(lennon) # # # Now let's apply the (packet-ordered) 2D non-decimated DWT to it... # (using the default wavelets) # uawst2D <- wst2D(lennon) # # One can use the function plot.wst2D to get # a picture of all the resolution levels. However, let's just look at them # one at a time. # # How many levels does our uawst2D object have? # nlevelsWT(uawst2D) #[1] 8 # # O.k. Let's look at resolution level 7 # \dontrun{image(uawst2D$wst2D[8,,])} # # # There are four main blocks here (each of 256x256 pixels) which themselves # contain four sub-blocks. The primary blocks correspond to the no shift, # horizontal shift, vertical shift and "horizontal and vertical" shifts # generated by the shift S operator. Within each of the 256x256 blocks # we have the "usual" Mallat smooth, horizontal, vertical and diagonal # detail, with the smooth in the top left of each block. # # Let's extract the smooth, with no shifts at level 7 and display it # \dontrun{image(getpacket(uawst2D, level=7, index=0, type="S"))} # # # Now if we go two more resolution levels deeper we have now 64x64 blocks # which contain 32x32 subblocks corresponding to the smooth, horizontal, # vertical and diagonal detail. # # # Groovy eh? } \keyword{math} \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/wst.object.rd0000644000176200001440000000650714211622634015627 0ustar liggesusers\name{wst.object} \alias{wst.object} \title{(Packet ordered) Nondecimated wavelet transform decomposition objects.} \description{ These are objects of class \code{wst} They represent a decomposition of a function with respect to a set of (all possible) shifted wavelets. } \value{ The following components must be included in a legitimate `wst' object. \item{wp}{a matrix containing the packet ordered non-decimated wavelet coefficients. Each row of the matrix contains coefficients with respect to a particular resolution level. There are \code{nlevelsWT(wst)+1} rows in the matrix. Row \code{nlevels(wst)+1} (the ``bottom'') row contains the ``original'' data used to produce the wavelet packet coefficients. Rows \code{nlevels(wst)} to row 1 contain coefficients at resolution levels \code{nlevels(wst)-1} to 0 (so the first row contains coefficients at resolution level 0). The columns contain the coefficients with respect to packets. A different packet length exists at each resolution level. The packet length at resolution level \code{i} is given by \code{2^i}. However, the \code{\link{getpacket.wst}} function should be used to access individual packets from a \code{\link{wst}} object.} \item{Carray}{A matrix of the same dimensions and format as \code{wp} but containing the father wavelet coefficients.} \item{nlevelsWT}{The number of levels in the decomposition. If you raise 2 to the power of \code{nlevels} you get the number of data points used in the decomposition.} \item{filter}{a list containing the details of the filter that did the decomposition (equivalent to the return value from the \code{\link{filter.select}} function).} \item{date}{The date that the transform was performed or the wst was modified.} } \details{ To retain your sanity we recommend that the coefficients from a \code{wst} object be extracted in one of two ways: \itemize{ \item{use \code{\link{getpacket.wst}} to obtain individual packets of either father or mother wavelet coefficients.} \item{use \code{\link{accessD.wst}} to obtain all mother coefficients at a particular resolution level.} \item{use \code{\link{accessC.wst}} to obtain all father coefficients at a particular resolution level.} } You can obtain the coefficients directly from the \code{wst$wp} component (mother) or \code{wst$Carray} component (father) but you have to understand their organization described above. } \section{GENERATION}{ This class of objects is returned from the \code{\link{wst}} function which computes the \emph{packets-ordered} non-decimated wavelet transform (effectively all possible shifts of the standard discrete wavelet transform). Many other functions return an object of class \code{wst}. } \section{METHODS}{ The wst class of objects has methods for the following generic functions: \code{\link{AvBasis}}, \code{\link{InvBasis}}, \code{\link{LocalSpec}}, \code{\link{MaNoVe}}, \code{\link{accessC}}, \code{\link{accessD}}, \code{\link{convert}}, \code{\link{draw}}. \code{\link{getpacket}}. \code{\link{image}}. \code{\link{nlevelsWT}}, \code{\link{nullevels}}, \code{\link{plot}}, \code{\link{print}}, \code{\link{putC}}, \code{\link{putD}}, \code{\link{putpacket}}, \code{\link{summary}}, \code{\link{threshold}}. } \section{RELEASE}{ Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wst}} } \keyword{classes} \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/rfft.rd0000644000176200001440000000217114211622634014477 0ustar liggesusers\name{rfft} \alias{rfft} \title{Real Fast Fourier transform} \usage{ rfft(x) } \arguments{ \item{x}{The vector whose Fourier transform you wish to take} } \description{ Compute a real Fast Fourier transform of \code{x}. } \details{ Given a vector x this function computes the real continuous Fourier transform of \code{x}, i.e. it regards \code{x} as points on a periodic function on [0,1] starting at 0, and finding the coefficients of the functions 1, \eqn{\sqrt{2}\cos(2\pi t)}{sqrt(2) cos(2 pi t)}, \eqn{\sqrt{2}\sin(2\pi t)}{sqrt(2) sin(2 pi t)}, etc. that gives the expansion of the interpolant of \code{x}. The number of terms in the expansion is the length of \code{x}. If \code{x} is of even length, the last coefficient will be that of a cosine term with no matching sine. } \value{ Returns the Fourier coefficients } \seealso{ \code{\link{LocalSpec.wd}}, \code{\link{rfftinv}}} \examples{ x <- seq(from=0, to=2*pi, length=150) s1 <- sin(10*x) s2 <- sin(7*x) s <- s1 + s2 w <- rfft(s) \dontrun{ts.plot(w)} # # Should see two peaks, corresponding to the two sines at different frequencies # } \author{Bernard Silverman} \keyword{math} wavethresh/man/rcov.rd0000644000176200001440000000161314211622634014507 0ustar liggesusers\name{rcov} \alias{rcov} \title{Computes robust estimate of covariance matrix} \usage{ rcov(x) } \arguments{ \item{x}{Matrix that you wish to find robust covariance of. Number of variables is number of rows, number of observations is number of columns. This is the opposite way round to the convention expected by \code{var}, for example} } \description{ Computes a robust correlation matrix from x. } \details{ Method originates from Huber's "Robust Statistics" book. Note that the columns of \code{x} must be observations, this is the opposite way around to the usual way for functions like \code{var}. } \value{ The robust covariance matrix } \seealso{\code{\link{threshold.mwd}}} \examples{ # # A standard normal data matrix with 3 variables, 100 observations # v <- matrix(rnorm(100*3), nrow=3, ncol=100) # # Robust covariance # rcov(v) } \author{Tim Downie} \keyword{robust} \keyword{multivariate} wavethresh/man/tpwd.rd0000644000176200001440000000327214211622634014517 0ustar liggesusers\name{tpwd} \alias{tpwd} \title{Tensor product 2D wavelet transform} \usage{ tpwd(image, filter.number = 10, family = "DaubLeAsymm", verbose = FALSE) } \arguments{ \item{image}{The image you wish to subject to the tensor product WT} \item{filter.number}{The smoothness of wavelet, see \code{\link{filter.select}}} \item{family}{The wavelet family you wish to use} \item{verbose}{Whether or not you wish to print out informative messages} } \description{ Performs the tensor product 2D wavelet transform. This is a related, but different, 2D wavelet transform compared to \code{\link{imwd}}. } \details{ The transform works by first taking the regular 1D wavelet transform across all columns in the image and storing these coefficients line by line back into the image. Then to this new image we apply the regular 1D wavelet transform across all rows in the image. Hence, the top-left coefficient is the smoothed version both horizontally and vertically. The left-most row contains the image smoothed horiztonally, but then detail picked up amongst the horizontal smooths vertically. Suggested by Rainer von Sachs. } \value{ A list with the following components: \item{tpwd}{A matrix with the same dimensions as the input \code{image}, but containing the tensor product wavelet transform coefficients.} \item{filter.number}{The filter number used} \item{family}{The wavelet family used} \item{type}{The type of transform used} \item{bc}{The boundary conditions used} \item{date}{When the transform occurred} } \seealso{\code{\link{imwd}},\code{\link{tpwr}}} \examples{ data(lennon) ltpwd <- tpwd(lennon) \dontrun{image(log(abs(ltpwd$tpwd)), col=grey(seq(from=0, to=1, length=100)))} } \author{G P Nason} \keyword{math} wavethresh/man/print.BP.rd0000644000176200001440000000113614211622540015166 0ustar liggesusers\name{print.BP} \alias{print.BP} \title{Print top best basis information for BP class object} \usage{ \method{print}{BP}(x, \dots) } \arguments{ \item{x}{The BP object you wish to print} \item{\dots}{Other arguments} } \description{ The function \code{\link{Best1DCols}} works out what are the best packets in a selection of packets. This function prints out what the best packet are. The \code{\link{Best1DCols}} is not intended for user use, and hence neither is this print method. } \details{ Description says all } \value{ None. } \seealso{\code{\link{Best1DCols}}} \author{G P Nason} \keyword{print} wavethresh/man/AvBasis.rd0000644000176200001440000000144514211622540015065 0ustar liggesusers\name{AvBasis} \alias{AvBasis} \title{Basis averaging ("inversion")} \description{ Average of whole collection of basis functions. This function is generic. Particular methods exist. For the \code{\link{wst}} class object this generic function uses \code{\link{AvBasis.wst}}. In the future we hope to add methods for \code{\link{wp}} and \code{\link{wpst}} class objects. } \usage{ AvBasis(...) } \arguments{ \item{\dots}{See individual help pages for details} } \details{ See individual method help pages for operation and examples. } \value{ A vector containing the average of the representation over all bases. } \section{RELEASE}{ Version 3.6.0 Copyright Guy Nason 1995 } \seealso{ \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{AvBasis.wst}} } \keyword{manip} \author{G P Nason} wavethresh/man/Best1DCols.rd0000644000176200001440000000372014211622540015436 0ustar liggesusers\name{Best1DCols} \alias{Best1DCols} \title{Extract the best (one-dimensional) nondecimated WP packets} \description{ This function takes the whole set of nondecimated wavelet packets and selects those packets that correlate best with the "response" groups. The idea is to reduce the large dimensionality (number of packets) into something more manageable which can then be fed into a proper discriminator. } \usage{ Best1DCols(w2d, mincor= 0.69999999999999996) } \arguments{ \item{w2d}{An object that gets returned from a call to the \code{\link{wpst2discr}} function which turns a wpst class object into a regular multivariate matrix} \item{mincor}{The threshold above which variables (packets) get included into the final mix if their correlation with the groups variable is higher than this value.} } \details{This function is not intended for direct user use. In this function, the w2d object contains a matrix where each column contains the coefficients of a single packet from a non-decimated wavelet packet transform. The number of rows of the matrix is the same as the original time series and hence each column can be correlated with a separate groups variable that contains the group membership of a separate variable which changes over time. Those packet columns that have correlation greater than the \code{mincor} value are extracted and returned in the \code{BasisMatrix} item of the returned list. } \value{A list with the following components: \item{nlevelsWT}{The number of levels of the nondecimated wavelet packet encapsulator, w2d} \item{BasisMatrix}{The highest correlating packets, sorted according to decreasing correlation} \item{level}{The levels corresponding to the selected packets} \item{pkt}{The packet indices corresponding to the selected packets} \item{basiscoef}{The sorted correlations} \item{groups}{The groups time series} } \seealso{\code{\link{makewpstDO}},\code{\link{wpst2discr}}} \author{G P Nason} \keyword{ts} \keyword{multivariate} wavethresh/man/FullWaveletCV.rd0000644000176200001440000000305214211622540016214 0ustar liggesusers\name{FullWaveletCV} \alias{FullWaveletCV} \title{Perform whole wavelet cross-validation in C code} \usage{ FullWaveletCV(noisy, ll = 3, type = "soft", filter.number = 10, family = "DaubLeAsymm", tol = 0.01, verbose = 0) } \arguments{ \item{noisy}{A vector of dyadic (power of two) length that contains the noisy data that you wish to apply wavelet shrinkage by cross-validation to.} \item{ll}{The primary resolution that you wish to assume. No wavelet coefficients that are on coarser scales than ll will be thresholded.} \item{type}{this option specifies the thresholding type which can be "hard" or "soft".} \item{filter.number}{This selects the smoothness of wavelet that you want to perform wavelet shrinkage by cross-validation.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{tol}{this specifies the convergence tolerance for the cross-validation optimization routine (a golden section search).} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} } \description{ Perform whole wavelet cross-validation in C code. This routine equivalent to \code{\link{CWCV}} except that more preparatory material is passed to C code for speed. The major difference is that \bold{only} the cross-validated wavelet threshold is returned. } \details{ Description says all } \value{ The cross-validated wavelet threshold. } \seealso{\code{\link{CWCV}}} \author{G P Nason} \keyword{smooth} wavethresh/man/CanUseMoreThanOneColor.rd0000644000176200001440000000105714211622540020011 0ustar liggesusers\name{CanUseMoreThanOneColor} \alias{CanUseMoreThanOneColor} \title{Deprecated function} \usage{ CanUseMoreThanOneColor() } \description{ Not used any more. This function used to interrogate the display device to see whether more than one color could be used. The function is set to return true whether of not the display device actually has this capability. It is used in the \code{\link{plot.wp}} function. } \details{ Description says it all. } \value{ This function always returns TRUE } \seealso{\code{\link{plot.wp}}} \author{G P Nason} \keyword{misc} wavethresh/man/summary.wd3D.rd0000644000176200001440000000132314211622634016031 0ustar liggesusers\name{summary.wd3D} \alias{summary.wd3D} \title{Print out some basic information associated with a wd3D object} \usage{ \method{summary}{wd3D}(object, \dots) } \arguments{ \item{object}{The object to print a summary about} \item{\dots}{Other arguments} } \description{ Prints out the number of levels, the type of wavelet filter associated with the decomposition, and the date of production. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{wd3D}}} \examples{ test.data.3D <- array(rnorm(8*8*8), dim=c(8,8,8)) tdwd3D <- wd3D(test.data.3D) summary(tdwd3D) #Levels: 3 #Filter number was: 10 #Filter family was: DaubLeAsymm #Date: Mon Mar 8 21:48:00 2010 } \author{G P Nason} \keyword{print} wavethresh/man/draw.imwd.rd0000644000176200001440000000367614211622540015441 0ustar liggesusers\name{draw.imwd} \alias{draw.imwd} \title{Draw mother wavelet associated with an imwd object. } \description{ This function draws the mother wavelet associated with an \code{\link{imwd.object}} --- a two-dimensional wavelet decomposition. } \usage{ \method{draw}{imwd}(wd, resolution=128, ...) } \arguments{ \item{wd}{The \code{\link{imwd}} class object whose associated wavelet you wish to draw. } \item{resolution}{The resolution at which the computation is done to compute the wavelet picture. Generally the resolution should be lower for two-dimensional wavelets since the number of computations is proportional to the square of the resolution (the DWT is still O(n) though).} \item{\dots}{Additional arguments to pass to the \code{\link{draw.default}} function which does the drawing.} } \details{ This function extracts the \code{filter} component from the \code{\link{imwd}} object (which is constructed using the \code{\link{filter.select}} function) to decide which wavelet to draw. Once decided the \code{\link{draw.default}} function is used to actually do the drawing. } \value{ If the plot.it argument is set to \code{TRUE} then nothing is returned. Otherwise, as with \code{\link{draw.default}}, the coordinates of what would have been plotted are returned. } \note{ If the \code{plot.it} argument is\code{TRUE} (which it is by default) a plot of the mother wavelet or scaling function is plotted on the active graphics device. } \section{RELEASE}{Version 2 Copyright Guy Nason 1993 } \seealso{ \code{\link{filter.select}}, \code{\link{imwd.object}}, \code{\link{draw.default}}. } \examples{ # # Let's use the lennon test image # data(lennon) \dontrun{image(lennon)} # # Now let's do the 2D discrete wavelet transform using Daubechies' # least-asymmetric wavelet N=6 # lwd <- imwd(lennon, filter.number=6) # # And now draw the wavelet that did this transform # \dontrun{draw(lwd)} # # A nice little two-dimensional wavelet! # } \keyword{hplot} \author{G P Nason} wavethresh/man/wstCV.rd0000644000176200001440000001220114211622634014577 0ustar liggesusers\name{wstCV} \alias{wstCV} \title{Performs two-fold cross-validation estimation using packet-ordered non-decimated wavelet transforms and one, global, threshold. } \description{ Performs Nason's 1996 two-fold cross-validation estimation using packet-ordered non-decimated wavelet transforms and one, global, threshold. } \usage{ wstCV(ndata, ll = 3, type = "soft", filter.number = 10, family = "DaubLeAsymm", tol = 0.01, verbose = 0, plot.it = FALSE, norm = l2norm, InverseType = "average", uvdev = madmad) } \arguments{ \item{ndata}{the noisy data. This is a vector containing the signal plus noise. The length of this vector should be a power of two.} \item{ll}{the primary resolution for this estimation. Note that the primary resolution is \emph{problem-specific}: you have to find out which is the best value.} \item{type}{whether to use hard or soft thresholding. See the explanation for this argument in the \code{\link{threshold.wst}} function.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments.} \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} \item{tol}{the cross-validation tolerance which decides when an estimate is sufficiently close to the truth (or estimated to be so).} \item{verbose}{If \code{TRUE} then informative messages are printed during the progression of the function, otherwise they are not.} \item{plot.it}{If \code{TRUE} then a plot of the progress of optimising the error estimate for different values of the threshold is generated as the algorithm proceeds. The algorithm tries to minimize the error estimate so you should see a ``bowl'' developing. After each iteration the error estimate is plotted with the iteration number so you should see the numbers tend to the bottom of the bowl.} \item{norm}{which measure of distance to judge the dissimilarity between the estimates. The functions \code{\link{l2norm}} and \code{\link{linfnorm}} are suitable examples.} \item{InverseType}{The possible options are "average" or "minent". The former uses basis averaging to form estimates of the unknown function. The "minent" function selects a basis using the Coifman and Wickerhauser, 1992 algorithm to select a basis to invert.} \item{uvdev}{Universal thresholding is used to generate an upper bound for the ideal threshold. This argument provides the function that computes an estimate of the variance of the noise for use with the universal threshold calculation (see \code{\link{threshold.wst}}).} } \details{ This function implements the cross-validation method detailed by Nason, 1996 for computing an estimate of the error between an estimate and the ``truth''. The difference here is that it uses the \code{packet ordered non-decimated wavelet transform} rather than the standard Mallat \code{\link{wd}} discrete wavelet transform. As such it is an examples of the translation-invariant denoising of Coifman and Donoho, 1995 but uses cross-validation to choose the threshold rather than SUREshrink. Note that the procedure outlined above can use \code{\link{AvBasis}} basis averaging or basis selection and inversion using the Coifman and Wickerhauser, 1992 best-basis algorithm } \value{ A list returning the results of the cross-validation algorithm. The list includes the following components: \item{ndata}{a copy of the input noisy data} \item{xvwr}{a reconstruction of the best estimate computed using this algorithm. It is the inverse (computed depending on what the InverseType argument was) of the \code{xvwrWSTt} component. } \item{xvwrWSTt}{a thresholded version of the packet-ordered non-decimated wavelet transform of the noisy data using the best threshold discovered by this cross-validation algorithm.} \item{uvt}{the universal threshold used as the upper bound for the algorithm that tries to discover the optimal cross-validation threshold. The lower bound is always zero.} \item{xvthresh}{the best threshold as discovered by cross-validation. Note that this is one number, the global threshold. The \code{\link{wstCVl}} function should be used to compute a level-dependent threshold. } \item{xkeep}{a vector containing the various thresholds used by the optimisation algorithm in trying to determine the best one. The length of this vector cannot be pre-determined but depends on the noisy data, thresholding method, and optimisation tolerance. } \item{fkeep}{a vector containing the value of the estimated error used by the optimisation algorithm in trying to minimize the estimated error. The length, like that of xkeep cannot be predetermined for the same reasons.} } \note{ If \code{plot.it} is \code{TRUE} then a plot indicating the progression of the optimisation algorithm is plotted. } \section{RELEASE}{ Version 3.6 Copyright Guy Nason 1995} \seealso{ \code{\link{GetRSSWST}}, \code{\link{linfnorm}}, \code{\link{linfnorm}}, \code{\link{threshold.wst}}, \code{\link{wst}}, \code{\link{wst.object}}, \code{\link{wstCVl}}. } \examples{ # # Example PENDING # } \keyword{math} \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/draw.mwd.rd0000644000176200001440000000403514211622540015256 0ustar liggesusers\name{draw.mwd} \alias{draw.mwd} \title{Draws a wavelet or scaling function used to compute an `mwd' object } \description{ Draws picture of one wavelet or scaling function associated with the multiple wavelet decomposition object. \code{\link{mwd.object}}. } \usage{ \method{draw}{mwd}(mwd, phi = 0, psi = 0, return.funct = FALSE, \dots) } \arguments{ \item{mwd}{The \code{\link{mwd}} class object whose associated wavelet or scaling function you wish to draw. } \item{phi}{description not yet available} \item{psi}{If \code{phi} is non-zero then the `phi'-th scaling function of the wavelet family used for mwd will be plotted. \code{phi} must be between 0 and \code{mwd$filter$nphi}.} \item{return.funct}{If true then the vector used as phi/psi in the plot command is returned.} \item{\dots}{Additional arguments to pass to the \code{plot} function} } \details{ It is usual to specify just one of phi and psi. IF neither phi nor psi are specified then phi=1 is the default. An error is generated if both phi=0 and psi=0 or if both are nonzero. } \value{ If the \code{return.funct} argument is set to \code{TRUE} then the function values in the plot are returned otherwise \code{NULL} is returned. } \note{If the \code{return.funct} argument is \code{FALSE} a plot of the mother wavelet or scaling function is plotted on the active graphics device. } \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1995-6).} \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd}}, \code{\link{mwd.object}}, \code{\link{mwr}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ # # Do a multiple wavelet decomposition on vector: ynoise # ynoise <- rnorm(512, sd = 0.1) ymwd <- mwd(ynoise,filter.type="Geronimo") # # Draw a picture of the second Geronimo wavelet. # \dontrun{draw(ymwd,psi=2)} # # } \keyword{hplot} \author{G P Nason} wavethresh/man/wvrelease.rd0000644000176200001440000000053514211622634015535 0ustar liggesusers\name{wvrelease} \alias{wvrelease} \title{Prints out the release number of the WaveThresh package} \usage{ wvrelease() } \arguments{ None.} \description{ PRints out the release number of the WaveThresh package, and some copyright info. } \details{ Description says all } \value{ Nothing } \examples{ wvrelease() } \author{G P Nason} \keyword{misc} wavethresh/man/accessD.rd0000644000176200001440000000223714211622540015102 0ustar liggesusers\name{accessD} \alias{accessD} \title{Get "detail" (mother wavelet) coefficients data from wavelet object} \description{ This generic function extracts detail from various types of wavelet objects. It extracts and returns a whole resolution level of coefficients. To obtain individual packets from relevant transforms use the \link{getpacket}() series of functions. This function is generic. Particular methods exist. For objects of class: \describe{ \item{wd}{use the \code{\link{accessD.wd}} method} \item{wd3D}{use the \code{\link{accessD.wd3D}} method} \item{wp}{use the \code{\link{accessD.wp}} method} \item{wpst}{use the \code{\link{accessD.wpst}} method} \item{wst}{use the \code{\link{accessD.wst}} method} } See individual method help pages for operation and examples. } \usage{ accessD(\dots) } \arguments{ \item{\dots}{See individual help for details.} } \value{ A vector coefficients representing the detail coefficients for the requested resolution level. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994} \seealso{ \code{\link{accessD.wd}}, \code{\link{accessD.wp}},\code{\link{accessD.wst}},\code{\link{accessC}} } \keyword{manip} \author{G P Nason} wavethresh/man/IsEarly.wd.rd0000644000176200001440000000120714211622540015512 0ustar liggesusers\name{IsEarly.wd} \alias{IsEarly.wd} \title{Function to detect whether a wd object is from WaveThresh2 or not} \usage{ \method{IsEarly}{wd}(x) } \arguments{ \item{x}{The wd object that you are trying to check} } \description{ Function to detect whether a wd object is from WaveThresh2 or not. } \details{ The function merely looks to see whether the wd object has a component called date. If it does not then it is from version 2. This routine is legacy and not very important anymore. } \value{ Returns TRUE if from an earlier version of WaveThresh (v2), returns FALSE if not. } \seealso{\code{\link{IsEarly}}} \author{G P Nason} \keyword{misc} wavethresh/man/rfftwt.rd0000644000176200001440000000115014211622634015046 0ustar liggesusers\name{rfftwt} \alias{rfftwt} \title{Weight a Fourier series sequence by a set of weights} \usage{ rfftwt(xrfft, wt) } \arguments{ \item{xrfft}{The Fourier series sequence to weight} \item{wt}{The weights} } \description{ Weight the real Fourier series \code{xrfft} of even length by a weight sequence \code{wt}. The first term of \code{xrfft} is left alone, and the weights are then applied to pairs of terms in \code{xrfft}. Note: \code{wt} is half the length of \code{xrfft}. } \details{ Description says all } \value{ The weighted sequence } \seealso{\code{\link{rfft}}} \author{Bernard Silverman} \keyword{math} wavethresh/man/basisplot.rd0000644000176200001440000000101614211622540015527 0ustar liggesusers\name{basisplot} \alias{basisplot} \title{Generic basis plot function} \usage{ basisplot(x, \dots) } \arguments{ \item{x}{basis to plot} \item{\dots}{various arguments to methods} } \description{ Plots a representation of a time-frequency plane and then plots the locations, and sometimes time series representations of coefficients, for the packets in the basis. } \details{ Description says all } \value{ Nothing, usually } \seealso{\code{\link{basisplot.BP}}, \code{\link{basisplot.wp}}} \author{G P Nason} \keyword{hplot} wavethresh/man/simchirp.rd0000644000176200001440000000230014211622634015346 0ustar liggesusers\name{simchirp} \alias{simchirp} \title{Compute and return simulated chirp function.} \description{ This function computes and returns the coordinates of the reflected simulated chirp function described in Nason and Silverman, 1995. This function is a useful test function for evaluating wavelet shrinkage and time-scale analysis methodology as its frequency changes over time. } \usage{ simchirp(n=1024) } \arguments{ \item{n}{The number of ordinates from which to sample the chirp signal.} } \details{ This function computes and returns the x and y coordinates of the reflected chirp function described in Nason and Silverman, 1995. The formula for the reflected simulated chirp is *formula* The chirp returned is a discrete sample on \code{n} equally spaced points between -1 and 1. } \value{ A list with two components: \item{x}{a vector of length \code{n} containing the ordered x ordinates of the chirp from -1 to 1.} \item{y}{a vector of length \code{n} containing the corresponding y ordinates of the chirp.} } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \examples{ # # Generate the chirp # test.data <- simchirp()$y \dontrun{ts.plot(test.data)} } \keyword{manip} \author{G P Nason} wavethresh/man/wd.object.rd0000644000176200001440000001020214211622634015407 0ustar liggesusers\name{wd.object} \alias{wd.object} \title{Wavelet decomposition objects} \description{ These are objects of classes \code{wd} They represent a decomposition of a function with respect to a wavelet basis (or tight frame in the case of the (time-ordered) non-decimated wavelet decomposition). } \value{ The following components must be included in a legitimate `wd' object. \item{C}{a vector containing each level's smoothed data. The wavelet transform works by applying both a smoothing filter and a bandpass filter to the previous level's smoothed data. The top level contains data at the highest resolution level. Each of these levels are stored one after the other in this vector. The matrix \code{fl.dbase$first.last.c} determines exactly where each level is stored in the vector. Likewise, coefficients stored when the NDWT has been used should only be extracted using the ``access'' and ``put'' functions below.} \item{D}{wavelet coefficients. If you were to write down the discrete wavelet transform of a function then these D would be the coefficients of the wavelet basis functions. Like the C, they are also formed in a pyramidal manner, but stored in a linear array. The storage details are to be found in \code{fl.dbase$first.last.d} Likewise, coefficients stored when the NDWT has been used should only be extracted using the ``access'' and ``put'' functions below. } \item{nlevelsWT}{The number of levels in the pyramidal decomposition that produces the coefficients. If you raise 2 to the power of nlevels you get the number of data points used in the decomposition.} \item{fl.dbase}{The first last database associated with this decomposition. This is a list consisting of 2 integers, and 2 matrices. The matrices detail how the coefficients are stored in the C and D components of the `wd.object'. See the help on \code{\link{first.last}} for more information. } \item{filter}{a list containing the details of the filter that did the decomposition} \item{type}{either \bold{wavelet} indicating that the ordinary wavelet transform was performed or \bold{station} indicating that the time-ordered non-decimated wavelet transform was done.} \item{date}{The date that the transform was performed or the wd was modified.} \item{bc}{how the boundaries were handled} } \details{ To retain your sanity the C and D coefficients should be extracted by the \code{\link{accessC}} and \code{\link{accessD}} functions and inserted using the \code{\link{putC}} and \code{\link{putD}} functions (or more likely, their methods), rather than by the \code{$} operator. Mind you, if you want to muck about with coefficients directly, then you'll have to do it yourself by working out what the fl.dbase list means (see \code{\link{first.last}} for a description.) Note the \emph{time-ordered non-decimated wavelet transform} used to be called the \emph{stationary wavelet transform}. In fact, the non-decimated transform has several possible names and has been reinvented many times. There are two versions of the non-decimated transform: the coefficients are the same in each version just ordered differently within a resolution level. The two transforms are \itemize{ \item{The function \code{\link{wd}}() with an argument \code{type="station"} computes the \emph{time-ordered} non-decimated transform (see Nason and Silverman, 1995) which is useful in time-series applications (see e.g. Nason, von Sachs and Kroisandt, 1998).} \item{The function \code{\link{wst}}() computes the packets ordered non-decimated transform is useful for curve estimation type applications (see e.g. Coifman and Donoho, 1995). } } } \section{GENERATION}{ This class of objects is returned from the \code{\link{wd}} function to represent a (possibly time-ordered non-decimated) wavelet decomposition of a function. Many other functions return an object of class wd. } \section{METHODS}{ The wd class of objects has methods for the following generic functions: \code{\link{plot}}, \code{\link{threshold}}, \code{\link{summary}}, \code{\link{print}}, code{\link{draw}}. } \section{RELAEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wd}}, \code{\link{wst}} } \keyword{classes} \keyword{smooth} \author{G P Nason} wavethresh/man/ConvertMessage.rd0000644000176200001440000000064214211622540016460 0ustar liggesusers\name{ConvertMessage} \alias{ConvertMessage} \title{Print out a text message about an object which is from old version of WaveThresh} \usage{ConvertMessage()} \arguments{None} \description{ Print out text message about an object being from an old version of WaveThresh. } \details{ Description says all! } \value{ None } \seealso{\code{\link{IsEarly.default}},\code{\link{IsEarly}}} \author{G P Nason} \keyword{error} wavethresh/man/threshold.irregwd.rd0000644000176200001440000001636214211622634017203 0ustar liggesusers\name{threshold.irregwd} \alias{threshold.irregwd} \title{hold irregularly spaced wavelet decomposition object} \description{ This function provides various ways to threshold a \code{\link{irregwd}} class object. } \usage{\method{threshold}{irregwd}(irregwd, levels = 3:(nlevelsWT(wd) - 1), type = "hard", policy = "universal", by.level = FALSE, value = 0, dev = var, boundary = FALSE, verbose = FALSE, return.threshold = FALSE, force.sure=FALSE, cvtol = 0.01, Q = 0.05, alpha=0.05, \dots) } \arguments{ \item{irregwd}{The irregularly spaced wavelet decomposition object that you wish to threshold.} \item{levels}{a vector of integers which determines which scale levels are thresholded in the decomposition. Each integer in the vector must refer to a valid level in the \code{\link{irregwd}} object supplied. This is usually any integer from 0 to \code{\link{nlevelsWT}}(irregwd)-1 inclusive. Only the levels in this vector contribute to the computation of the threshold and its application.} \item{type}{determines the type of thresholding this can be "hard" or "soft".} \item{policy}{selects the technique by which the threshold value is selected. Each policy corresponds to a method in the literature. At present the different policies are: \code{"universal"}, \code{"LSuniversal"}, \code{"sure"}, \code{"cv"}, \code{"fdr"}, \code{"op1"}, \code{"op2"}, \code{"manual"}, \code{"mannum"}, \code{"probability"}. A description of the policies can be obtained by clicking on the above links.} \item{by.level}{If \code{FALSE} then a global threshold is computed on and applied to all scale levels defined in levels. If \code{TRUE} a threshold is computed and applied separately to each scale level.} \item{value}{This argument conveys the user supplied threshold. If the \code{policy="manual"} then value is the actual threshold value.} \item{dev}{this argument supplies the function to be used to compute the spread of the absolute values coefficients. The function supplied must return a value of spread on the variance scale (i.e. not standard deviation) such as the \code{var()} function. A popular, useful and robust alternative is the \code{\link{madmad}} function.} \item{boundary}{If this argument is \code{TRUE} then the boundary bookeeping values are included for thresholding, otherwise they are not.} \item{verbose}{if \code{TRUE} then the function prints out informative messages as it progresses.} \item{return.threshold}{If this option is \code{TRUE} then the actual \emph{value} of the threshold is returned. If this option is \code{FALSE} then a thresholded version of the input is returned.} \item{force.sure}{If \code{TRUE} then the \code{SURE} threshold is computed on a vector even when that vector is very sparse. If \code{FALSE} then the normal SUREshrink procedure is followed whereby the universal threshold is used for sparse vectors of coefficients.} \item{cvtol}{Parameter for the cross-validation \code{"cv"} policy.} \item{Q}{Parameter for the false discovery rate \code{"fdr"} policy.} \item{alpha}{Parameter for Ogden and Parzen's first \code{"op1"} and \code{"op2"} policies.} \item{\dots}{other arguments} } \details{ This function thresholds or shrinks wavelet coefficients stored in a \code{\link{irregwd}} object and returns the coefficients in a modified \code{\link{irregwd}} object. The thresholding step is an essential component of denoising. The basic idea of thresholding is very simple. In a signal plus noise model the wavelet transform of signal is very sparse, the wavelet transform of noise is not (in particular, if the noise is iid Gaussian then so if the noise contained in the wavelet coefficients). Thus since the signal gets concentrated in the wavelet coefficients and the noise remains "spread" out it is "easy" to separate the signal from noise by keeping large coefficients (which correspond to signal) and delete the small ones (which correspond to noise). However, one has to have some idea of the noise level (computed using the dev option in threshold functions). If the noise level is very large then it is possible, as usual, that no signal "sticks up" above the noise. For thresholding of an \emph{irregularly spaced wavelet decomposition} things are a little different. The original data are irregularly spaced (i.e. [x,y] where the \eqn{x_i} are irregularly spaced) and even if one assumes iid error on the original data once this has been interpolated to a grid by the \code{\link{makegrid}} function the interpolated data values are not independent. The \code{\link{irregwd}} function computes the wavelet transform of the interpolated data but also computes the variance of each coefficient using a fast transform. This variance information is stored in the c component of \code{\link{irregwd}} objects and this function, \code{threshold.irregwd}, makes use of this variance information when thresholding each coefficient. For more details see Kovac and Silverman, 2000 Some issues to watch for: \describe{ \item{levels}{The default of \code{levels = 3:(wd$nlevelsWT - 1)} for the \code{levels} option most certainly does not work globally for all data problems and situations. The level at which thresholding begins (i.e. the given threshold and finer scale wavelets) is called the \emph{primary resolution} and is unique to a particular problem. In some ways choice of the primary resolution is very similar to choosing the bandwidth in kernel regression albeit on a logarithmic scale. See Hall and Patil, (1995) and Hall and Nason (1997) for more information. For each data problem you need to work out which is the best primary resolution. This can be done by gaining experience at what works best, or using prior knowledge. It is possible to "automatically" choose a "best" primary resolution using cross-validation (but not yet in WaveThresh). Secondly the levels argument computes and applies the threshold at the levels specified in the levels argument. It does this for all the levels specified. Sometimes, in wavelet shrinkage, the threshold is computed using only the finest scale coefficients (or more precisely the estimate of the overall noise level). If you want your threshold variance estimate only to use the finest scale coefficients (e.g. with universal thresholding) then you will have to apply the \code{threshold.wd} function twice. Once (with levels set equal to \code{nlevelsWT}(wd)-1 and with \code{return.threshold=TRUE} to return the threshold computed on the finest scale and then apply the threshold function with the manual option supplying the value of the previously computed threshold as the value options.} \item{by.level}{for a \code{\link{wd}} object which has come from data with noise that is correlated then you should have a threshold computed for each resolution level. See the paper by Johnstone and Silverman, 1997.} } } \value{ An object of class \code{\link{irregwd}}. This object contains the thresholded wavelet coefficients. Note that if the \code{return.threshold} option is set to \code{TRUE} then the threshold values will be returned rather than the thresholded object. } \section{RELEASE}{Version 3.6 Copyright Guy Nason 1997 } \seealso{ \code{\link{makegrid}}, \code{\link{irregwd}}, \code{\link{irregwd}} object, \code{\link{accessc}}, } \examples{ # # See main examples of these functions in the help to makegrid # } \keyword{manip} \author{Arne Kovac} wavethresh/man/compress.rd0000644000176200001440000000151014211622540015361 0ustar liggesusers\name{compress} \alias{compress} \title{Compress objects} \description{ Compress objects. This function is generic. Particular methods exist. For the \code{\link{imwd}} class object this generic function uses \code{\link{compress.imwd}}. There is a default compression method: \code{\link{compress.default}} that works on vectors. } \usage{ compress(\dots) } \arguments{ \item{\dots}{See individual help pages for details. } } \details{ See individual method help pages for operation and examples } \value{ A compressed version of the input. } \section{RELEASE}{Version 2.0 Copyright Guy Nason 1993} \seealso{ \code{\link{compress.default}}, \code{\link{compress.imwd}}, \code{\link{imwd}}, \code{\link{imwd.object}}, \code{\link{imwdc.object}}, \code{\link{threshold.imwd}} } \keyword{manip} \keyword{utilities} \author{G P Nason} wavethresh/man/TOthreshda2.rd0000644000176200001440000000346714211622634015676 0ustar liggesusers\name{TOthreshda2} \alias{TOthreshda2} \title{Data analytic wavelet thresholding routine} \usage{ TOthreshda2(ywd, alpha = 0.05, verbose = FALSE, return.threshold = FALSE) } \arguments{ \item{ywd}{The \code{\link{wd.object}} that you wish to threshold.} \item{alpha}{The smoothing parameter which is a p-value } \item{verbose}{Whether messages get printed} \item{return.threshold}{If TRUE then the threshold value gets returned rather than the actual thresholded object} } \description{ This function might be better called using the regular \code{\link{threshold}} function using the \code{op2} policy. Corresponds to the wavelet thresholding routine developed by Ogden and Parzen (1994) Data dependent wavelet thresholding in nonparametric regression with change-point applications. \emph{Tech Rep 176}, University of South Carolina, Department of Statistics. } \details{ The TOthreshda2 method operates in a similar fashion to \code{\link{TOthreshda1}} except that it takes the cumulative sum of squared coefficients, creating a sample "Brownian bridge" process, and then using the standard Kolmogorov-Smirnov statistic in testing. In this situation, the level of the hypothesis tests, alpha, has default value 0.05. Note that the choice of alpha controls the smoothness of the resulting wavelet estimator -- in general, a relatively large alpha makes it easier to include coefficients, resulting in a more wiggly estimate; a smaller alpha will make it more difficult to include coefficients, yielding smoother estimates. } \value{ Returns the threshold value if \code{return.threshold==TRUE} otherwise returns the shrunk set of wavelet coefficients. } \seealso{\code{\link{threshold}},\code{\link{TOthreshda1}}, \code{\link{wd}}} \author{Todd Ogden} \keyword{smooth} wavethresh/man/threshold.mwd.rd0000644000176200001440000001303314211622634016317 0ustar liggesusers\name{threshold.mwd} \alias{threshold.mwd} \title{Use threshold on an mwd object. } \description{ Applies hard or soft thresholding to multiple wavelet decomposition object mwd.object. } \usage{ \method{threshold}{mwd}(mwd, levels = 3:(nlevelsWT(mwd) - 1), type = "hard", policy = "universal", boundary = FALSE, verbose = FALSE, return.threshold = FALSE, threshold = 0, covtol = 1e-09, robust = TRUE, return.chisq = FALSE, bivariate = TRUE, \dots) } \arguments{ \item{mwd}{The multiple wavelet decomposition object that you wish to threshold.} \item{levels}{a vector of integers which determines which scale levels are thresholded in the decomposition. Each integer in the vector must refer to a valid level in the \code{\link{mwd}} object supplied. This is usually any integer from 0 to \code{\link{nlevelsWT}}(wd)-1 inclusive. Only the levels in this vector contribute to the computation of the threshold and its application. } \item{type}{determines the type of thresholding this can be "\code{hard}" or "\code{soft}".} \item{policy}{selects the technique by which the threshold value is selected. Each policy corresponds to a method in the literature. At present the different policies are "\code{universal}", "\code{manual}", "\code{single}". The policies are described in detail below. } \item{boundary}{If this argument is \code{TRUE} then the boundary bookeeping values are included for thresholding, otherwise they are not. } \item{verbose}{if \code{TRUE} then the function prints out informative messages as it progresses. } \item{return.threshold}{If this option is \code{TRUE} then the actual \emph{value} of the threshold is returned. If this option is FALSE then a thresholded version of the input is returned.} \item{threshold}{This argument conveys the user supplied threshold. If the \code{policy="manual"} then \code{value} is the actual threshold value. Any other \code{policy} means that the \code{threshold} value is ignored.} \item{covtol}{The tolerance for what constitutes a singular variance matrix. If smallest eigenvalue of the estimated variance matrix is less than \code{covtol} then it is assumed to be singular and no thresholding is done at that level. Note: do not confuse \code{covtol} with \code{cvtol} an argument in \code{\link{threshold.wd}}.} \item{robust}{If TRUE the variance matrix at each level is estimated using a robust method (mad) otherwise it is estimated using var().} \item{return.chisq}{If TRUE the vector of values to be thresholded is returned. These values are a quadratic form of each coefficient vector, and under normal assumptions the noise component will have a chi-squared distribution (see Downie and Silverman 1996). } \item{bivariate}{this line is in construction} \item{\dots}{any other arguments} } \details{ Thresholding modifies the coefficients within a \code{\link{mwd.object}}. The modification can be performed either with a "hard" or "soft" thresholding selected by the type argument. Unless policy="single", the following method is applied. The columns of \code{mwd$D} are taken as coefficient vectors \eqn{D_{j,k}}. From these \eqn{\chi^2_{j,k}=D_{j,k} \cdot V_j^{-1}}. \eqn{D_{j,k}} is computed, where \eqn{V_j^{-1}} is the inverse of the estimated variance of the coefficient vectors in that level (j). \eqn{\chi^2_{j,k}} is a positive scalar which is to be thresholded in a similar manner to univariate hard or soft thresholding. To obtain the new values of \eqn{D_{j,k}} shrink the vector by the same proportion as was the corresponding \eqn{\chi^2_{j,k}} term. i } \value{ An object of class \code{\link{mwd}}. This object contains the thresholded wavelet coefficients. Note that if the \code{return.threshold} option is set to TRUE then the threshold values will be returned, or if \code{return.chisq} the vector of values to be thresholded will be returned, rather than the thresholded object.} \note{ POLICIES \describe{ \item{single}{If \code{policy="single"} then univariate thresholding is applied to each element of D as in (Strela et al 1999).} \item{universal}{The \code{universal} threshold is computed using 2log(n) (See Downie & Silverman 1996) where n is the number of coefficient vectors to be thresholded.} \item{manual}{The "\code{manual}" policy is simple. You supply a \code{threshold} value to the threshold argument and hard or soft thresholding is performed using that value} } } \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1995-6). } \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd}}, \code{\link{mwd.object}}, \code{\link{mwr}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ # # Generate some test data # test.data <- example.1()$y \dontrun{ts.plot(test.data)} # # Generate some noisy data # ynoise <- test.data + rnorm(512, sd=0.1) ## # Plot it # \dontrun{ts.plot(ynoise)} # # Now take the discrete multiple wavelet transform # N.b. I have no idea if the default wavelets here are appropriate for # this particular examples. # ynmwd <- mwd(ynoise) \dontrun{plot(ynwd)} # [1] 2.020681 2.020681 2.020681 2.020681 2.020681 2.020681 2.020681 # # Now do thresholding. We'll use the default arguments. # ynmwdT <- threshold(ynmwd) # # And let's plot it # \dontrun{plot(ynmwdT)} # # Let us now see what the actual estimate looks like # ymwr <- wr(ynmwdT) # # Here's the estimate... # \dontrun{ts.plot(ywr1)} } \keyword{smooth} \keyword{nonlinear} \author{Tim Downie} wavethresh/man/ipd.rd0000644000176200001440000000435714211622540014316 0ustar liggesusers\name{ipd} \docType{data} \alias{ipd} \title{Inductance plethysmography data.} \description{ Inductance plethysmography trace. } \usage{data(ipd)} \source{ This data set contains 4096 observations of inductance plethsymography data sampled at 50Hz starting at 1229.98 seconds. This is a regular time series object. I am grateful to David Moshal and Andrew Black of the Department of Anaesthesia, University of Bristol for permission to include this data set. This data set was used in Nason, 1996 to illustrate noise reduction with wavelet shrinkage and using cross-validation for choosing the threshold. A plethysmograph is an apparatus for measuring variations in the size of parts of the body. In this experiment the inductance plethysmograph consists of a coil of wire encapsulated in a belt. A radio-frequency carrier signal is passed through the wire and size variations change the inductance of the coil that can be detected as a change in voltage. When properly calibrated the output voltage of the inductance plethysmograph is proportional to the change in volume of the part of the body under examination. It is of both clinical and scientific interest to discover how anaesthetics or analgesics may alter normal breathing patterns post-operatively. Sensors exist that measure blood oxygen saturation but by the time they indicate critically low levels the patient is often apnoeic (cease breathing) and in considerable danger. It is possible for a nurse to continually observe a patient but this is expensive, prone to error and requires training. In this examples the plethysmograph is arranged around the chest and abdomen of a set of patients and is used to measure the flow of air during breathing. The recordings below were made by the Department of Anaesthesia at the Bristol Royal Infirmary after the patients had undergone surgery under general anaesthetic. The data set (shown below) shows a section of plethysmograph recording lasting approximately 80 seconds. The two main sets of regular oscillations correspond to normal breathing. The disturbed behaviour in the centre of the plot where the normal breathing pattern disappears corresponds to the patient vomiting. } \examples{ # data(ipd) \dontrun{ts.plot(ipd)} } \keyword{datasets} \author{G P Nason} wavethresh/man/accessC.wp.rd0000644000176200001440000000123114211622540015517 0ustar liggesusers\name{accessC.wp} \alias{accessC.wp} \title{Warning function when trying to access smooths from wavelet packet object (wp).} \description{ There are no real smooths to access in a \code{\link{wp}} wavelet packet object. This function returns an error message. To obtain coefficients from a wavelet packet object you should use the \code{\link{getpacket}} collection of functions. } \usage{ \method{accessC}{wp}(wp, \dots) } \arguments{ \item{wp}{Wavelet packet object.} \item{\dots}{any other arguments} } \value{An error message!} \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994} \seealso{ \code{\link{getpacket}} } \keyword{manip} \author{G P Nason} wavethresh/man/plotdenwd.rd0000644000176200001440000000600114211622540015526 0ustar liggesusers\name{plotdenwd} \alias{plotdenwd} \title{Plot the wavelet coefficients of a p.d.f.} \usage{ plotdenwd(wd, xlabvals, xlabchars, ylabchars, first.level=0, top.level=nlevelsWT(wd)-1, main="Wavelet Decomposition Coefficients", scaling="global", rhlab=FALSE, sub, NotPlotVal=0.005, xlab="Translate", ylab="Resolution Level", aspect="Identity", \dots) } \arguments{ \item{wd}{Wavelet decomposition object, usually output from \code{\link{denwd}}, possibly thresholded.} \item{xlabvals}{X-axis values at which the \code{xlabchars} will be printed} \item{xlabchars}{The x-label characters to be plotted at \code{xlabvals}} \item{ylabchars}{The y-label characters} \item{first.level}{This specifies how many of the coarse levels of coefficients are omitted from the plot. The default value of 0 means that all levels are plotted.} \item{top.level}{This tells the plotting rountine the true resolution level of the finest level of coefficients. The default results in the coarsest level being labelled 0. The "correct" value can be determined from the empirical scaling function coefficient object (output from denproj) as in the example below.} \item{main}{The title of the plot.} \item{scaling}{The type of scaling applied to levels within the plot. This can be "compensated", "by.level" or "global". See \code{\link{plot.wd}} for further details.} \item{rhlab}{Determines whether the scale factors applied to each level before plotting are printed as the right hand axis.} \item{sub}{The plot subtitle} \item{NotPlotVal}{If the maximum coefficient in a particular level is smaller than \code{NotPlotVal}, then the level is not plotted.} \item{xlab}{The x-axis label} \item{ylab}{The y-axis label} \item{aspect}{Function to apply to coefficients before plotting} \item{\dots}{Other arguments to the main plot routine} } \description{ Plots the wavelet coefficients of a density function. } \details{ Basically the same as \code{\link{plot.wd}} except that it copes with the zero boundary conditions used in density estimation. Note that for large filter number wavelets the high level coefficients will appear very squashed compared with the low level coefficients. This is a consequence of the zero boundary conditions and the use of the convention that each coefficient is plotted midway between two coefficients at the next highest level, as in \code{\link{plot.wd}}. } \value{ Axis labels to the right of the picture (scale factors). These are returned as they are sometimes hard to read on the plot. } \examples{ # Simulate data from the claw density, find the empirical # scaling function coefficients, decompose them and plot # the resulting wavelet coefficients data <- rclaw(100) datahr <- denproj(data, J=8, filter.number=2, family="DaubExPhase") data.wd <- denwd(datahr) \dontrun{plotdenwd(data.wd, top.level=(datahr$res$J-1))} # # Now use a smoother wavelet # datahr <- denproj(data, J=8, filter.number=10, family="DaubLeAsymm") data.wd <- denwd(datahr) \dontrun{plotdenwd(data.wd, top.level=(datahr$res$J-1))} } \author{David Herrick} \keyword{hplot} wavethresh/man/BabyECG.rd0000644000176200001440000000513014211622540014724 0ustar liggesusers\name{BabyECG} \docType{data} \alias{BabyECG} \title{Physiological data time series.} \description{ Two linked medical time series containing 2048 observations sampled every 16 seconds recorded from 21:17:59 to 06:27:18. Both these time series were recorded from the same 66 day old infant by Prof. Peter Fleming, Dr Andrew Sawczenko and Jeanine Young of the Institute of Child Health, Royal Hospital for Sick Children, Bristol. \code{BabyECG}, is a record of the infant's heart rate (in beats per minute). BabySS is a record of the infant's sleep state on a scale of 1 to 4 as determined by a trained expert monitoring EEG (brain) and EOG (eye-movement). The sleep state codes are 1=quiet sleep, 2=between quiet and active sleep, 3=active sleep, 4=awake. } \format{ The \code{BabyECG} time series is a nice examples of a non-stationary time series whose spectral (time-scale) properties vary over time. The function \code{\link{ewspec}} can be used to anaylse this time series to inspect the variation in the power of the series over time and scales. The \code{BabySS} time series is a useful independent time series that can be associated with changing power in the \code{BabyECG} series. See the discussion in Nason, von Sachs and Kroisandt. } \source{Institute of Child Health, Royal Hospital for Sick Children, Bristol.} \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \references{ Nason, G.P., von Sachs, R. and Kroisandt, G. (1998). Wavelet processes and adaptive estimation of the evolutionary wavelet spectrum. \emph{Technical Report}, Department of Mathematics University of Bristol/ Fachbereich Mathematik, Kaiserslautern. } \section{SEE ALSO}{\code{\link{ewspec}}} \examples{ data(BabyECG) data(BabySS) # # Plot the BabyECG data with BabySS overlaid # # Note the following code does some clever scaling to get the two # time series overlaid. # myhrs <- c(22, 23, 24, 25, 26, 27, 28, 29, 30) mylab <- c("22", "23", "00", "01", "02", "03", "04", "05", "06") initsecs <- 59 + 60 * (17 + 60 * 21) mysecs <- (myhrs * 3600) secsat <- (mysecs - initsecs)/16 mxy <- max(BabyECG) mny <- min(BabyECG) ro <- range(BabySS) no <- ((mxy - mny) * (BabySS - ro[1]))/(ro[2] - ro[1]) + mny rc <- 0:4 nc <- ((mxy - mny) * (rc - ro[1]))/(ro[2] - ro[1]) + mny \dontrun{plot(1:length(BabyECG), BabyECG, xaxt = "n", type = "l", xlab = "Time (hours)", ylab = "Heart rate (beats per minute)")} \dontrun{lines(1:length(BabyECG), no, lty = 3)} \dontrun{axis(1, at = secsat, labels = mylab)} \dontrun{axis(4, at = nc, labels = as.character(rc))} # # Sleep state is the right hand axis # # } \keyword{datasets} \author{G P Nason} wavethresh/man/nlevelsWT.rd0000644000176200001440000000155114211622540015456 0ustar liggesusers\name{nlevelsWT} \alias{nlevelsWT} \title{Returns number of scale (resolution) levels.} \description{ Returns the number of scales (or resolutions) in various wavelet objects and for some objects returns the number of scales that would result if processed by a wavelet routine. This function is generic. One methods exists at present as most wavelet objects store the number of levels as the \code{nlevelsWT} component. The method that exists is\code{\link{nlevelsWT.default}} } \usage{ nlevelsWT(...) } \arguments{ \item{\dots}{See individual help pages for details.} } \details{ See individual method help pages for operation and examples. } \value{ An integer representing the number of levels associated with the object. } \section{RELEASE}{Version 3.6.0 Copyright Guy Nason 1995 } \seealso{ \code{\link{nlevelsWT.default}} } \keyword{arith} \author{G P Nason} wavethresh/man/summary.imwd.rd0000644000176200001440000000154214211622634016173 0ustar liggesusers\name{summary.imwd} \alias{summary.imwd} \title{Print out some basic information associated with an imwd object} \usage{ \method{summary}{imwd}(object, \dots) } \arguments{ \item{object}{The object to print a summary about} \item{\dots}{Other arguments} } \description{ Prints out the number of levels, the dimensions of the original image from which the object came, the type of wavelet filter associated with the decomposition, the type of boundary handling. } \details{ Description says all } \value{ Nothing } \seealso{\code{\link{imwd}}, \code{\link{threshold.imwd}}} \examples{ m <- matrix(rnorm(32*32),nrow=32) mimwd <- imwd(m) summary(mimwd) #UNcompressed image wavelet decomposition structure #Levels: 5 #Original image was 32 x 32 pixels. #Filter was: Daub cmpct on least asymm N=10 #Boundary handling: periodic } \author{G P Nason} \keyword{print} wavethresh/man/wd3D.object.rd0000644000176200001440000000351714211622634015611 0ustar liggesusers\name{wd3D.object} \alias{wd3D.object} \title{Three-dimensional wavelet object} \description{ These are objects of classes wd3D They contain the 3D discrete wavelet transform of a 3D array (with each dimension being the same dyadic size). } \details{ To retain your sanity the wavelet coefficients at any resolution level in directions, GGG, GGH, GHG, GHH, HGG, HGH, HHG should be extracted by the \code{\link{accessD}}() function and inserted using the \code{\link{putD}} function rather than by the \code{$} operator. } \value{ The following components must be included in a legitimate `wd' object. \item{a}{a three-dimensional array containing the 3D discrete wavelet coefficients. The coefficients are stored in a pyramid structure for efficiency.} \item{nlevelsWT}{The number of levels in the pyramidal decomposition that produces the coefficients. If you raise 2 to the power of nlevels you get the number of data points used in each dimension of the decomposition.} \item{filter.number}{the number of the wavelet family that did the DWT.} \item{family}{the family of wavelets that did the DWT.} \item{date}{the date that the transform was computed.} } \section{generation}{ This class of objects is returned from the wd3D function to represent a three-dimensional DWT of a 3D array. Other functions return an object of class wd3D. } \section{methods}{ The wd3D class of objects has methods for the following generic functions: \code{\link{accessD}}, \code{\link{print}}, \code{\link{putD}}, \code{\link{summary}}, \code{\link{threshold}}. } \section{release}{Version 3.9.6 Copyright Guy Nason 1997 } \seealso{ \code{\link{wd3D}}, \code{\link{accessD.wd3D}}, \code{\link{print.wd3D}}, \code{\link{putD.wd3D}}, \code{\link{putDwd3Dcheck}}, \code{\link{summary.wd3D}}, \code{\link{threshold.wd3D}}, \code{\link{wr3D}}. } \keyword{classes} \author{G P Nason} wavethresh/man/convert.wst.rd0000644000176200001440000000633114211622540016030 0ustar liggesusers\name{convert.wst} \alias{convert.wst} \title{Convert a non-decimated wst object into a wd object. } \description{ Convert a packed-ordered non-decimated wavelet transform object into a time-ordered non-decimated wavelet transform object.} \usage{ \method{convert}{wst}(wst, \dots) } \arguments{ \item{wst}{The \code{\link{wst}} class object that you wish to convert.} \item{\dots}{any other arguments} } \details{ In WaveThresh3 a non-decimated wavelet transform can be ordered in two different ways: as a time-ordered or packet-ordered representation. The coefficients in the two objects are \emph{exactly the same} it is just their internal representation and ordering which is different. The two different representations are useful in different situations. The packet-ordering is useful for curve estimation applications and the time-ordering is useful for time series applications. See Nason, Sapatinas and Sawczenko, 1998 for further details on ordering and weaving. Note that the input object must be of the non-decimated type. In other words the type component of the input object must be "\code{station}". Once the input object has been converted the output can be used with any of the functions suitable for the \code{\link{wd.object}}. The actual weaving permutation for shuffling coefficients from one representation to another is achieved by the \code{\link{getarrvec}} function. } \value{ An object of class \code{\link{wd}} containing exactly the same information as the input object but ordered differently as a packet-ordered object. } \section{RELEASE}{Version 3.6 Copyright Guy Nason 1997 } \seealso{ \code{\link{convert}}, \code{\link{getarrvec}}, \code{\link{levarr}}, \code{\link{wd}}, \code{\link{wd.object}}, \code{\link{wst}}, \code{\link{wst.object}}. } \examples{ # # Generate a sequence of 32 random normals (say) and take their # \code{packed-ordered non-decimated wavelet transform} # myrand <- wst(rnorm(32)) # # Print out the result (to verify the class and type of the object) # #myrand #Class 'wst' : Stationary Wavelet Transform Object: # ~~~ : List with 8 components with names # wp Carray nlevelsWT filter date # #$WP and $Carray are the coefficient matrices # #Created on : Tue Sep 29 12:29:45 1998 # #summary(.): #---------- #Levels: 5 #Length of original: 32 #Filter was: Daub cmpct on least asymm N=10 #Boundary handling: periodic #Date: Tue Sep 29 12:29:45 1998 # # Yep, the myrand object is of class: \code{\link{wst.object}}. # # Now let's convert it to class \code{\link{wd}}. The object # gets returned and, as usual in S, is printed. # convert(myrand) #Class 'wd' : Discrete Wavelet Transform Object: # ~~ : List with 8 components with names # C D nlevelsWT fl.dbase filter type bc date # #$ C and $ D are LONG coefficient vectors ! # #Created on : Tue Sep 29 12:29:45 1998 #Type of decomposition: station # #summary(.): #---------- #Levels: 5 #Length of original: 32 #Filter was: Daub cmpct on least asymm N=10 #Boundary handling: periodic #Transform type: station #Date: Tue Sep 29 12:29:45 1998 # # The returned object is of class \code{\link{wd}} with a # type of "station". # I.e. it has been converted successfully. } \keyword{manip} \author{G P Nason} wavethresh/man/wd.int.rd0000644000176200001440000000411514211622634014741 0ustar liggesusers\name{wd.int} \alias{wd.int} \title{Computes "wavelets on the interval" transform} \description{ This function actually computes the "wavelets on the interval" transform. \bold{NOTE:} It is not recommended that the casual user call this function. The "wavelets on the interval" transform is best called in \code{WaveThresh} via the \code{\link{wd}} function with the argument bc argument set to \code{"interval"}. } \usage{ wd.int(data, preferred.filter.number, min.scale, precond) } \arguments{ \item{data}{The data that you wish to apply the "wavelets on the interval" transform to.} \item{preferred.filter.number}{Which wavelet to use to do the transform. This is an integer ranging from 1 to 8. See the Cohen, Daubeches and Vial (1993) paper. Wavelets that do not "overlap" a boundary are just like the ordinary Daubechies' wavelets.} \item{min.scale}{At which resolution level to transform to.} \item{precond}{If true performs preconditioning of the input vector to try and ensure that simple polynomial sequences (less than in order to the wavelet used) map to zero elements.} } \details{ (The \code{WaveThresh} implementation of the ``wavelets on the interval transform'' was coded by Piotr Fryzlewicz, Department of Mathematics, Wroclaw University of Technology, Poland; this code was largely based on code written by Markus Monnerjahn, RHRK, Universitat Kaiserslautern; integration into WaveThresh by GPN). See the help on the "wavelets on the interval code" in the \code{\link{wd}} help page. } \value{ A list containing the wavelet transform of the \code{data}. We again emphasize that this list is not intended for human consumption, use the \code{\link{wd}} function with the correct \code{bc="interval"} argument. } \section{RELEASE}{Version 3.9.6 (Although Copyright Piotr Fryzlewicz and Markus Monnerjahn 1995-9). } \seealso{ \code{\link{wd}}, \code{\link{wr}}, \code{\link{wr.int}}. } \examples{ # # The user is expected to call the wr # for inverting a "wavelets on the interval transform" and not to use # this function explicitly # } \keyword{smooth} \keyword{nonlinear} \author{Piotr Fryzlewicz} wavethresh/man/draw.imwdc.rd0000644000176200001440000000414514211622540015574 0ustar liggesusers\name{draw.imwdc} \alias{draw.imwdc} \title{Draw mother wavelet associated with an imwdc object. } \description{ This function draws the mother wavelet associated with an \code{\link{imwdc.object}} --- a compressed two-dimensional wavelet decomposition. } \usage{ \method{draw}{imwdc}(wd, resolution=128, \dots) } \arguments{ \item{wd}{The \code{\link{imwd}} class object whose associated wavelet you wish to draw. (I know its called wd, sorry).} \item{resolution}{The resolution at which the computation is done to compute the wavelet picture. Generally the resolution should be lower for two-dimensional wavelets since the number of computations is proportional to the square of the resolution (the DWT is still O(n) though).} \item{\dots}{Additional arguments to pass to the \code{\link{draw.default}} function which does the drawing.} } \details{ This function extracts the \code{filter} component from the \code{\link{imwd}} object (which is constructed using the \code{\link{filter.select}} function) to decide which wavelet to draw. Once decided the \code{\link{draw.default}} function is used to actually do the drawing. } \value{ If the \code{plot.it} argument is set to \code{TRUE} then nothing is returned. Otherwise, as with \code{\link{draw.default}}, the coordinates of what would have been plotted are returned. } \note{ If the \code{plot.it} argument is \code{TRUE} (which it is by default) a plot of the mother wavelet or scaling function is plotted on the active graphics device. } \section{RELEASE}{Version 2 Copyright Guy Nason 1993 } \seealso{ \code{\link{filter.select}}, \code{\link{imwdc.object}}, \code{\link{draw.default}}. } \examples{ # # Let's use the lennon test image # data(lennon) \dontrun{image(lennon)} # # Now let's do the 2D discrete wavelet transform using Daubechies' # least-asymmetric wavelet N=6 # lwd <- imwd(lennon, filter.number=6) # # Now let's threshold the 2D DWT # The resultant class of object is imwdc object. # lwdT <- threshold(lwd) # # And now draw the wavelet that did this transform # \dontrun{draw(lwdT)} # # A nice little two-dimensional wavelet! # } \keyword{hplot} \author{G P Nason} wavethresh/man/compare.filters.rd0000644000176200001440000000243214211622540016627 0ustar liggesusers\name{compare.filters} \alias{compare.filters} \title{Compares two filters. } \description{Compares two filters (such as those returned from \code{\link{filter.select}}). This function returns TRUE is they are the same otherwise returns FALSE. } \usage{compare.filters(f1,f2) } \arguments{ \item{f1}{Filter, such as that returned by \code{\link{filter.select}} } \item{f2}{Filter, such as that returned by \code{\link{filter.select}} } } \details{ A very simple function. It only needs to check that the \code{family} and \code{filter.number} components of the filter are the same. } \value{ If \code{f1} and \code{f2} are the same the function returns TRUE, otherwise it returns FALSE. } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{filter.select}}. } \examples{ # # Create three filters! # filt1 <- filter.select(4, family="DaubExPhase") filt2 <- filter.select(3, family="DaubExPhase") filt3 <- filter.select(4, family="DaubLeAsymm") # # Now let us see if they are the same... # compare.filters(filt1, filt2) # [1] FALSE compare.filters(filt1, filt3) # [1] FALSE compare.filters(filt2, filt3) # [1] FALSE # # Nope, (what a surprise) they weren't. How about # compare.filters(filt1, filt1) # [1] TRUE # # Yes, they were the same! } \keyword{manip} \author{G P Nason} wavethresh/man/drawwp.default.rd0000644000176200001440000000212114211622540016454 0ustar liggesusers\name{drawwp.default} \alias{drawwp.default} \title{Subsidiary routine that actually computes wavelet packet values} \usage{ drawwp.default(level, index, filter.number = 10, family = "DaubLeAsymm", resolution = 64 * 2^level) } \arguments{ \item{level}{The resolution level of the packet you want} \item{index}{The packet index of the packet you want} \item{filter.number}{The type of wavelet you want, see \code{\link{filter.select}}} \item{family}{The family of wavelet you want, see \code{\link{filter.select}}} \item{resolution}{The number of ordinates at which you want the wavelet packet} } \description{ Function computes the values of a given wavelet packet on a discrete grid. } \details{ Function works by computing a wavelet packet transform of a zero vector. Then inserting a single one somewhere in the desired packet, and then inverts the transform. } \value{ A vector containing the "y" values of the required wavelet packet. } \seealso{\code{\link{draw.wp}},\code{\link{InvBasis}}, \code{\link{nlevelsWT}}, \code{\link{putpacket}}, \code{\link{wp}}} \author{G P Nason} \keyword{dplot} wavethresh/man/putC.mwd.rd0000644000176200001440000000774214211622540015244 0ustar liggesusers\name{putC.mwd} \alias{putC.mwd} \title{Put smoothed data into wavelet structure } \description{ The smoothed and original data from a multiple wavelet decomposition structure, \code{\link{mwd.object}}, (e.g. returned from \code{\link{mwd}}) are packed into a single matrix in that structure. This function copies the \code{\link{mwd.object}}, replaces some smoothed data in the copy, and then returns the copy. } \usage{ \method{putC}{mwd}(mwd, level, M, boundary = FALSE, index = FALSE, \dots) } \arguments{ \item{mwd}{Multiple wavelet decomposition structure whose coefficients you wish to replace.} \item{level}{The level that you wish to replace.} \item{M}{Matrix of replacement coefficients.} \item{boundary}{If \code{boundary} is \code{FALSE} then only the "real" data is replaced (and it is easy to predict the required length of \code{M}). If boundary is TRUE then you can replace the boundary values at a particular level as well (but it is hard to predict the required length of \code{M}, and the information has to be obtained from the \code{mfirst.last} database component of \code{mwd}).} \item{index}{If index is \code{TRUE} then the index numbers into the \code{mwd$C} array where the matrix \code{M} would be stored is returned. Otherwise, (default) the modified \code{\link{mwd.object}} is returned.} \item{\dots}{any other arguments} } \details{ The \code{\link{mwd}} function produces a wavelet decomposition structure. The need for this function is a consequence of the pyramidal structure of Mallat's algorithm and the memory efficiency gain achieved by storing the pyramid as a linear matrix of coefficients. PutC obtains information about where the smoothed data appears from the fl.dbase component of mwd, in particular the array \code{fl.dbase$first.last.c} which gives a complete specification of index numbers and offsets for \code{mwd$C}. Note also that this function only \emph{puts} information into \code{\link{mwd}} class objects. To \emph{extract} coefficients from \code{\link{mwd}} structures you have to use the \code{\link{accessC.mwd}} function. See Downie and Silverman, 1998. } \value{ An object of class \code{\link{mwd.object}} if \code{index} is \code{FALSE}, otherwise the index numbers indicating where the \code{M} matrix would have been inserted into the \code{mwd$C} object are returned. } \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1995-6). } \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd}}, \code{\link{mwd.object}}, \code{\link{mwr}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ # # Generate an mwd object # tmp <- mwd(rnorm(32)) # # Now let's examine the finest resolution smooth... # accessC(tmp, level=3) # [,1] [,2] [,3] [,4] [,5] [,6] #[1,] -0.4669103 -1.3150580 -0.7094966 -0.1979214 0.32079986 0.5052254 #[2,] -0.7645379 -0.8680941 0.1004062 0.6633268 -0.05860848 0.5757286 # [,7] [,8] #[1,] 0.5187380 0.6533843 #[2,] 0.2864293 -0.4433788 # # A matrix. There are two rows one for each father wavelet in this # two-ple multiple wavelet transform and at level 3 there are 2^3 columns. # # Let's set the coefficients of the first father wavelet all equal to zero # for this examples # newcmat <- accessC(tmp, level=3) newcmat[1,] <- 0 # # Ok, let's insert it back at level 3 # tmp2 <- putC(tmp, level=3, M=newcmat) # # And check it # accessC(tmp2, level=3) # [,1] [,2] [,3] [,4] [,5] [,6] [,7] #[1,] 0.0000000 0.0000000 0.0000000 0.0000000 0.00000000 0.0000000 0.0000000 #[2,] -0.7645379 -0.8680941 0.1004062 0.6633268 -0.05860848 0.5757286 0.2864293 # [,8] #[1,] 0.0000000 #[2,] -0.4433788 # # Yep, all the first father wavelet coefficients at level 3 are now zero. } \keyword{manip} \author{G P Nason} wavethresh/man/compress.imwd.rd0000644000176200001440000000301314211622540016320 0ustar liggesusers\name{compress.imwd} \alias{compress.imwd} \title{Compress a (thresholded) imwd class object by removing zeroes. } \description{ Compress a (thresholded) \code{imwd} class object by removing zeroes. } \usage{ \method{compress}{imwd}(x, verbose=FALSE, \dots) } \arguments{ \item{x}{Object to compress. Compression only does anything on \code{thresholded} \code{\link{imwd.object}}. } \item{verbose}{If this is true then report on compression activity.} \item{\dots}{any other arguments} } \details{ Thresholded \code{\link{imwd}} objects are usually very large and contain many zero elements. This function compresses these objects into smaller \code{\link{imwd}} objects by using the \code{\link{compress.default}} function which removing the zeroes. This function is a method for the generic function \code{\link{compress}} for class \code{\link{imwd}} objects. It can be invoked by calling \code{\link{compress}} for an object of the appropriate class, or directly by calling \code{\link{compress.imwd}} regardless of the class of the object } \value{ An object of type "\code{imwdc}" representing the compressed imwd object. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{compress}}, \code{\link{compress.default}}, \code{\link{imwd}}, \code{\link{imwd.object}}, \code{\link{imwdc.object}}, \code{\link{threshold.imwd}}. } \examples{ # # The user shouldn't need to use this function directly as the # \code{\link{threshold.imwd}} function calls it # automatically. # } \keyword{manip} \author{G P Nason} wavethresh/man/irregwd.rd0000644000176200001440000001006514211622540015176 0ustar liggesusers\name{irregwd} \alias{irregwd} \title{Irregular wavelet transform (decomposition).} \description{ This function performs the irregular wavelet transform as described in the paper by Kovac and Silverman. } \usage{irregwd(gd, filter.number=2, family="DaubExPhase", bc="periodic", verbose=FALSE)} \arguments{ \item{gd}{A grid structure which is the output of the \code{\link{makegrid}} function.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 2, the Daubechies extremal phase orthonormal compactly supported wavelet with 2 vanishing moments.} \item{family}{specifies the family of wavelets that you want to use. Two popular options are "DaubExPhase" and "DaubLeAsymm" but see the help for \code{\link{filter.select}} for more possibilities.} \item{bc}{specifies the boundary handling. If \code{bc="periodic"} the default, then the function you decompose is assumed to be periodic on it's interval of definition, if \code{bc="symmetric"} then the function beyond its boundaries is assumed to be a symmetric reflection of the function in the boundary. The symmetric option was the implicit default in releases prior to 2.2.} \item{verbose}{Controls the printing of "informative" messages whilst the computations progress. Such messages are generally annoying so it is turned off by default.} } \details{ If one has irregularly spaced one-dimensional regression data (t,y), say. Then the function \code{\link{makegrid}} interpolates this to a regular grid and then the standard wavelet transform is used to transform the interpolated data. However, unlike the standard wavelet denoising set-up the interpolated data, y, values are correlated. Hence the wavelet coefficients of the interpolated will be correlated (even after using an orthogonal transform). Hence, in particular, the variance of each wavelet coefficient may well be different and so this routine also computes those variances using a fast algorithm (related to the two-dimensional wavelet transform). When thresholding with \code{\link{threshold.irregwd}} the threshold function makes use of the information about the variance of each coefficient to modify the variance locally on a coefficient by coefficient basis. } \value{ An object of class \code{\link{irregwd}} which is a list with the following components. \item{C}{Vector of sets of successively smoothed versions of the interpolated data (see description of equivalent component of \code{\link{wd.object}} for further information.)} \item{D}{Vector of sets of wavelet coefficients of the interpolated data at different resolution levels. (see description of equivalent component of \code{\link{wd.object}} for further information.)} \item{c}{Vector that aids in calculation of variances of wavelet coefficients (used by \code{\link{threshold.irregwd}}).} \item{nlevelsWT}{The number of resolution levels. This depends on the length of the data vector. If \code{length(data)=2^m}, then there will be m resolution levels. This means there will be m levels of wavelet coefficients (indexed 0,1,2,...,(m-1)), and m+1 levels of smoothed data (indexed 0,1,2,...,m).} \item{fl.dbase}{There is more information stored in the C and D than is described above. In the decomposition ``extra'' coefficients are generated that help take care of the boundary effects, this database lists where these start and finish, so the "true" data can be extracted.} \item{filter}{A list containing information about the filter type: Contains the string "wavelet" or "station" depending on which type of transform was performed.} \item{bc}{How the boundaries were handled.} \item{date}{The date the transform was performed.} } \section{RELESASE}{ 3.9.4 Code Copyright Arne Kovac 1997 } \seealso{ \code{\link{makegrid}}, \code{\link{wd}}, \code{\link{wr.wd}}, \code{\link{accessC}}, \code{\link{accessc}}, \code{\link{accessD}}, \code{\link{putD}}, \code{\link{putC}}, \code{\link{filter.select}}, \code{\link{plot.irregwd}}, \code{\link{threshold.irregwd}}. } \examples{ # # See full examples at the end of the help for makegrid. # } \keyword{smooth} \author{Arne Kovac} wavethresh/man/denwd.rd0000644000176200001440000000170714211622540014637 0ustar liggesusers\name{denwd} \alias{denwd} \title{Wavelet decomposition of empirical scaling function coefficients of a p.d.f. } \usage{ denwd(coef) } \arguments{ \item{coef}{Output from \code{\link{denproj}}} } \description{ Performs wavelet decomposition on the empirical scaling function coefficients of the probability density function. } \details{ The empirical scaling function coefficients are decomposed using the DWT with zero boundary conditions. } \value{ An object of class \code{\link{wd.object}} } \seealso{\code{\link{denproj}},\code{\link{plotdenwd}},\code{\link{wd}}, \code{\link{denwr}}} \examples{ # Simulate data from the claw density, find the empirical # scaling function coefficients, decompose them and plot # the resulting wavelet coefficients data <- rclaw(100) datahr <- denproj(data, J=8, filter.number=2,family="DaubExPhase") data.wd <- denwd(datahr) \dontrun{plotdenwd(data.wd, top.level=(datahr$res$J-1))} } \author{David Herrick} \keyword{smooth} wavethresh/man/ssq.rd0000644000176200001440000000074114211622634014345 0ustar liggesusers\name{ssq} \alias{ssq} \title{Compute sum of squares difference between two vectors} \usage{ ssq(u,v) } \arguments{ \item{u}{One of the vectors} \item{v}{The other of the vectors} } \description{ Given two vectors, u and v, of length n, this function computes \eqn{\sum_{i=1}^n (u_i - v_i)^2}{sum((u-v)^2)}. } \details{ Description says all } \value{ The sum of squares difference between the two vectors } \examples{ ssq(c(1,2), c(3,4)) #[1] 8 } \author{G P Nason} \keyword{math} wavethresh/man/support.rd0000644000176200001440000000440714211622634015256 0ustar liggesusers\name{support} \alias{support} \title{Returns support of compactly supported wavelets.} \usage{ support(filter.number=10, family="DaubLeAsymm", m=0, n=0) } \arguments{ \item{filter.number}{The member index of a wavelet within the family. For Daubechies' compactly supported wavelet this is the number of vanishing moments which is related to the smoothness. See \code{\link{filter.select}} for more information on the wavelets.} \item{family}{The family of wavelets. See \code{\link{filter.select}} for more information on the wavelets.} \item{m}{Optional scale value (in usual wavelet terminology this is j)} \item{n}{Optional translation value (in usual wavelet terminology, this is k)} } \description{ Returns the support for compactly supported wavelets. This information is useful for drawing wavelets for annotating axes. } \details{ It is useful to know the support of a wavelet when drawing it to annotate labels. Other functions, such as wavelet density estimation (\code{\link{CWavDE}}), also use this information. } \value{ A list with the following components (each one is a single numeric value) \item{lh}{Left hand support of the wavelet with scale m and translation n. These values change as m and n (although when m=0 the function confusingly returns the next coarser wavelet where you might expect it to return the mother. The mother is indexed by m=-1)} \item{rh}{As lh but returns the rh end.} \item{psi.lh}{left hand end of the support interval for the mother wavelet (remains unchanged no matter what m or n are)} \item{psi.rh}{right hand end of the support interval for the mother wavelet (remains unchanged no matter what m or n are)} \item{phi.lh}{left hand end of the support interval for the father wavelet (remains unchanged no matter what m or n are)} \item{phi.rh}{right hand end of the support interval for the father wavelet (remains unchanged no matter what m or n are)} } \seealso{ \code{\link{CWavDE}}, \code{\link{draw.default}}, \code{\link{filter.select}}} \examples{ # # What is the support of a Haar wavelet? # support(filter.number=1, family="DaubExPhase", m=0, n=0) #$lh #[1] 0 # #$rh #[1] 2 # #$psi.lh #[1] 0 # #$psi.rh #[1] 1 # #$phi.lh #[1] 0 # #$phi.rh #[1] 1 # # So the mother and father wavelet have support [0,1] # } \author{G P Nason} \keyword{math} wavethresh/man/putC.rd0000644000176200001440000000207014211622540014443 0ustar liggesusers\name{putC} \alias{putC} \title{Put smoothed data (father wavelet) coefficients into wavelet structure} \description{ This generic function inserts smooths into various types of wavelet objects. This function is generic. Particular methods exist. For objects of class: \describe{ \item{wd}{use the \code{\link{putC.wd}} method.} \item{wp}{use the \code{\link{putC.wp}} method.} \item{wst}{use the \code{\link{putC.wst}} method.} } See individual method help pages for operation and examples. See \code{\link{accessC}} if you wish to \emph{extract} father wavelet coefficients. See \code{\link{putD}} if you wish to insert \emph{mother} wavelet coefficients } \usage{ putC(...) } \arguments{ \item{\dots}{See individual help pages for details.} } \value{ A wavelet object of the same class as x with the new father wavelet coefficients inserted. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{putC.wd}}, \code{\link{putC.wp}}, \code{\link{putC.wst}}, \code{\link{accessC}}, \code{\link{putD}}. } \keyword{manip} \author{G P Nason} wavethresh/man/cns.rd0000644000176200001440000000300114211622540014306 0ustar liggesusers\name{cns} \alias{cns} \title{Create new zeroed spectrum.} \description{ Part of a two-stage function suite designed to simulate locally stationary wavelet processes in conjunction with the LSWsim function. } \usage{ cns(n, filter.number=1, family="DaubExPhase") } \arguments{ \item{n}{The length of the simulated process that you want to produce. Must be a power of two (for this software).} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments. } \item{family}{specifies the family of wavelets that you want to use. The options are "DaubExPhase" and "DaubLeAsymm".} } \details{ This simple routine merely computes the time-ordered non-decimated wavelet transform of a zero vector of the same length as the eventual simulated series that you wish to produce. If you look at this routine you will see that it is extremely simple. First, it checks to see whether the n that you supplied is a power of two. If it is then it creates a zero vector of that length. This is then non-decimated wavelet transformed with the appropriate wavelet. The output can then be processed and then finally supplied to LSWsim for process simulation. } \value{ An object of class: \code{\link{wd}}, and, in fact, of the non-decimated variety. All wavelet coefficients of this are zero. } \seealso{ \code{\link{LSWsim}}, \code{\link{ewspec}} } \keyword{manip} \author{G P Nason} wavethresh/man/getpacket.rd0000644000176200001440000000202714211622540015501 0ustar liggesusers\name{getpacket} \alias{getpacket} \title{Get a packet of coefficients from a wavelet object} \description{ This generic function extracts packets of coefficients from various types of wavelet objects. This function is generic. Particular methods exist. For objects of class: \describe{ \item{wp}{use the \code{\link{getpacket.wp}} method.} \item{wst}{use the \code{\link{getpacket.wst}} method.} \item{wpst}{use the \code{\link{getpacket.wpst}} method.} } See individual method help pages for operation and examples. Use the \code{\link{accessC}} and \code{\link{accessD}} function to extract whole resolution levels of coefficients simultaneously. } \usage{ getpacket(...) } \arguments{ \item{\dots}{See individual help pages for details.} } \value{ The packet of coefficients requested. } \section{RELEASE}{Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{getpacket.wp}}, \code{\link{getpacket.wst}}, \code{\link{getpacket.wpst}}, \code{\link{accessD}}, \code{\link{accessC}}. } \keyword{manip} \author{G P Nason} wavethresh/man/wd3D.rd0000644000176200001440000000507214211622634014342 0ustar liggesusers\name{wd3D} \alias{wd3D} \title{Three-dimensional discrete wavelet transform} \description{ This function performs the 3D version of Mallat's discrete wavelet transform (see Mallat, 1989, although this paper does not describe in detail the 3D version the extension is trivial). The function assumes \emph{periodic} boundary conditions. } \usage{ wd3D(a, filter.number=10, family="DaubLeAsymm") } \arguments{ \item{a}{A three-dimensional array constructed using the S-Plus \code{array()} function. Each dimension of the array should be equal to the same power of two.} \item{filter.number}{This selects the smoothness of wavelet that you want to use in the decomposition. By default this is 10, the Daubechies least-asymmetric orthonormal compactly supported wavelet with 10 vanishing moments.} \item{family}{specifies the family of wavelets that you want to use. Two popular options are "DaubExPhase" and "DaubLeAsymm" but see the help for \code{\link{filter.select}} for more possibilities.} } \details{ This function implements a straightforward extension of Mallat's, (1989) one- and two-dimensional DWT. The algorithm recursively applies all possible combinations of the G and H detail and smoothing filters to each of the dimensions thus forming 8 different sub-blocks which we label HHH, GHH, HGH, GGH, HHG, GHG, HGG, and GGG. The algorithm recurses on the HHH component of each level (these are the father wavelet coefficients). Making an analogy to the 2D transform where HH, HG, HG and GG is produced at each resolution level: the HG and GH correspond to "horizontal" and "vertical" detail and GG corresponds to "diagonal detail". The GGG corresponds to the 3D "diagonal" version, HGG corresponds to smoothing in dimension 1 and "diagonal" detail in dimensions 2 and 3, and so on. I don't think there are words in the English language which adequately describe "diagonal" in 3D --- maybe cross detail? } \value{ An object of class \code{\link{wd3D}}. } \section{RELEASE}{Version 3.9.6 Copyright Guy Nason 1997} \seealso{ \code{\link{wd}}, \code{\link{imwd}}, \code{\link{accessD.wd3D}}, \code{\link{print.wd3D}}, \code{\link{putD.wd3D}}, \code{\link{putDwd3Dcheck}}, \code{\link{summary.wd3D}}, \code{\link{threshold.wd3D}}, \code{\link{wd3D.object}}, \code{\link{wr3D}}. } \examples{ # # Generate some test data: 512 standard normal observations in an 8x8x8 # array. # test.data.3D <- array(rnorm(8*8*8), dim=c(8,8,8)) # # Now do the 3D wavelet transform # tdwd3D <- wd3D(test.data.3D) # # See examples explaining the 3D wavelet transform. # } \keyword{smooth} \keyword{nonlinear} \author{G P Nason} wavethresh/man/modernise.wd.rd0000644000176200001440000000075014211622540016131 0ustar liggesusers\name{modernise.wd} \alias{modernise.wd} \title{Modernise a wd class object} \usage{ \method{modernise}{wd}(wd, ...) } \arguments{ \item{wd}{The wd object you wish to modernise} \item{...}{Other arguments} } \description{ Upgrade a version 2 \code{\link{wd.object}} to version 4. The function \code{\link{IsEarly}} can tell if the object comes from an earlier version of WaveThresh. } \details{ Description says all. } \value{ The modernised object. } \author{G P Nason} \keyword{manip} wavethresh/man/makewpstDO.rd0000644000176200001440000001366514211622540015622 0ustar liggesusers\name{makewpstDO} \alias{makewpstDO} \title{Help page for a function} \description{Takes two time series: one a real-valued discrete-time time series, timeseries, the other, groups, a time series containing factor levels. This function performs a discriminant analysis of groups on a subset of the best-correlating nondecimated wavelet packets of timeseries } \usage{ makewpstDO(timeseries, groups, filter.number=10, family="DaubExPhase", mincor=0.69999999999999996) } \arguments{ \item{timeseries}{The time series which is the `dependent variable', ie discrimination will be performed on the variables extracted from the non-decimated wavelet packet transform of this time series} \item{groups}{The factor levels as a time series} \item{filter.number}{The smoothness of the wavelet involved in the nondecimated wavelet packet transform. See \code{\link{filter.select}}} \item{family}{The wavelet family, see \code{\link{filter.select}}} \item{mincor}{Variables from the nondecimated wavelet packet transform with correlations less than this argument will be discarded in the first pass, and not considered as possible useful discriminants} } \details{ This function implements the `discrimination' version of the "Wavelet packet transfer function modelling of nonstationary series" by Guy Nason and Theofanis Sapatinas, \emph{Statistics and Computing}, /bold{12}, 45-56. The function first takes the non-decimated wavelet packet transform of \code{timeseries} using the \code{\link{wpst}} function. Then the set of nondecimated wavelet packets is put into matrix form using the \code{\link{wpst2discr}} function. The \code{\link{Best1DCols}} function selects those variables from the matrix whose correlation with the \code{groups} time series is greater than \code{mincor}. The selected variables are put into a reduced matrix. The next step, \code{\link{BMdiscr}}, performs a linear discriminant analysis of the \code{groups} values onto the reduced matrix. In principle, one could have carried out a discriminant analysis using the full matrix of all the packets, but the problem is not well-conditioned and computationally efficient. The strategy adopted by Nason and Sapatinas is to do a "first pass" to select a large number of "likely" variables that might contribute something to discrimination, and then carry out a "second pass" which performs a more detailed analysis to jointly determine which variables are the key ones for discrimination. Note, using the discriminant model developed here, it is possible to use future values of \code{timeseries} and the model to predict future values of \code{groups}. See example below. } \value{ An object of class \code{wpstDO}. This is a list containing the following components. \item{BPd}{Object returned from the \code{\link{BMdiscr}} function. Contains the reduced matrix and the discriminant object} \item{BP}{Object returned from the \code{\link{Best1DCols}} function, essentially the reduced matrix and the groups variable.} \item{filter}{The details of the wavelet filter used. This is used if the other components are used to perform discrimination on new data one needs to know what wavelet was used to perform the original nondecimated wavelet packet transform.} } \seealso{ \code{\link{basisplot.BP}}, \code{\link{Best1DCols}}, \code{\link{BMdiscr}}, \code{\link{wpst}}, \code{\link{wpst2discr}}, \code{\link{wpstCLASS}} } \examples{ # # Use BabySS and BabyECG data for this example. # # Want to predict future values of BabySS from future values of BabyECG # # Build model on first 256 values of both # data(BabyECG) data(BabySS) BabyModel <- makewpstDO(timeseries=BabyECG[1:256], groups=BabySS[1:256], mincor=0.5) # # The results (ie print out answer) #BabyModel #Stationary wavelet packet discrimination object #Composite object containing components:[1] "BPd" "BP" "filter" #Fisher's discrimination: done #BP component has the following information #BP class object. Contains "best basis" information #Components of object:[1] "nlevelsWT" "BasisMatrix" "level" "pkt" "basiscoef" #[6] "groups" #Number of levels 8 #List of "best" packets #Level id Packet id Basis coef #[1,] 4 0 0.7340580 #[2,] 5 0 0.6811251 #[3,] 6 0 0.6443167 #[4,] 3 0 0.6193434 #[5,] 7 0 0.5967620 #[6,] 0 3 0.5473777 #[7,] 1 53 0.5082849 # # You can plot the select basis graphically using # \dontrun{basisplot(BabyModel$BP)} # # An interesting thing are the final "best" packets, these form the # "reduced" matrix, and the final discrimination is done on this # In this case 7 wavelet packets were identified as being good for # univariate high correlation. # # In the second pass lda analysis, using the reduced matrix, the following # turns up as the best linear discriminant vectors # # The discriminant variables can be obtained by typing #BabyModel$BPd$dm$scaling #LD1 LD2 #[1,] 5.17130434 1.8961807 #[2,] 1.56487144 -3.5025251 #[3,] 1.69328553 1.1585477 #[4,] 3.63362324 8.4543247 #[5,] 0.15202947 -0.4530523 #[6,] 0.35659009 -0.3850318 #[7,] 0.09429836 -0.1281240 # # # Now, suppose we get some new data for the BabyECG time series. # For the purposes of this example, this is just the continuing example # ie BabyECG[257:512]. We can use our new discriminant model to predict # new values of BabySS # BabySSpred <- wpstCLASS(newTS=BabyECG[257:512], BabyModel) # # Let's look at the first 10 (eg) values of this prediction # #BabySSpred$class[1:10] #[1] 4 4 4 4 4 4 4 4 4 4 #Good. Now let's look at what the "truth" was: #BabySS[257:267] #[1] 4 4 4 4 4 4 4 4 4 4 #Good. However, the don't agree everywhere, let's do a cross classification #between the prediction and the truth. # #> table(tmp2$class, BabySS[257:512]) # # 1 2 3 4 # 1 4 1 1 0 # 2 116 0 23 3 # 4 2 12 0 94 # #So class 3 and 4 agree pretty much, but class 1 has been mispredicted at class #2 a lot. } \author{G P Nason} \keyword{multivariate} \keyword{ts} wavethresh/man/LocalSpec.wd.rd0000644000176200001440000002350014211622540016007 0ustar liggesusers\name{LocalSpec.wd} \alias{LocalSpec.wd} \title{Compute Nason and Silverman raw or smoothed wavelet periodogram. } \description{ \emph{This smoothing in this function is now obsolete}. You should now use the function \code{\link{ewspec}}. This function computes the Nason and Silverman raw or smoothed wavelet periodogram as described by Nason and Silverman (1995). } \usage{ \method{LocalSpec}{wd}(wdS, lsmooth="none", nlsmooth=FALSE, prefilter=TRUE, verbose=FALSE, lw.number=wdS$filter$filter.number, lw.family=wdS$filter$family, nlw.number=wdS$filter$filter.number, nlw.family=wdS$filter$family, nlw.policy="LSuniversal", nlw.levels=0:(nlevelsWT(wdS) - 1), nlw.type="hard", nlw.by.level=FALSE, nlw.value=0, nlw.dev=var, nlw.boundary=FALSE, nlw.verbose=FALSE, nlw.cvtol=0.01, nlw.Q=0.05, nlw.alpha=0.05, nlw.transform=I, nlw.inverse=I, debug.spectrum=FALSE, \dots) } \arguments{ Note that all options beginning "nlw" are only used if nlsmooth=T, i.e. iff NONLINEAR wavelet smoothing is used. \item{wdS}{The stationary wavelet transform object that you want to smooth or square.} \item{lsmooth}{Controls the LINEAR smoothing. There are three options: "none", "Fourier" and "wavelet". They are described below. Note that Fourier begins with a capital "F".} \item{nlsmooth}{A switch to turn on (or off) the NONLINEAR wavelet shrinkage of (possibly LINEAR smoothed) local power coefficients. This option is either TRUE (to turn on the smoothing) or FALSE (to turn it off).} \item{prefilter}{If TRUE then apply a prefilter to the actual stationary wavelet coefficients at each level. This is a low-pass filter that cuts off all frequencies above the highest frequency allowed by the (Littlewood-Paley) wavelet that bandpassed the current level coefficients. If FALSE then no prefilter is applied.} \item{verbose}{If TRUE then the function chats about what it is doing. Otherwise it is silent.} \item{lw.number}{If wavelet LINEAR smoothing is used then this option controls the \code{filter number} of the wavelet within the family used to perform the LINEAR wavelet smoothing.} \item{lw.family}{If wavelet LINEAR smoothing is used then this option controls the \code{\link{family}} of the wavelet used to perform the LINEAR wavelet smoothing.} \item{nlw.number}{If NONLINEAR wavelet smoothing is also used then this option controls the \code{filter number} of the wavelet used to perform the wavelet shrinkage.} \item{nlw.family}{If NONLINEAR wavelet smoothing is also used then this option controls the \code{\link{family}} of the wavelet used to perform the wavelet shrinkage.} \item{nlw.policy}{If NONLINEAR wavelet smoothing is also used then this option controls the levels to use when performing wavelet shrinkage (see \code{\link{threshold.wd}} for different policy choices).} \item{nlw.levels}{If NONLINEAR wavelet smoothing is also used then this option controls the levels to use when performing wavelet shrinkage (see \code{\link{threshold.wd}} for a detailed description of how levels can be chosen).} \item{nlw.type}{If NONLINEAR wavelet smoothing is also used then this option controls the type of thresholding used in the wavelet shrinkage (either "hard" or "soft", but see \code{\link{threshold.wd}} for a list). } \item{nlw.by.level}{If NONLINEAR wavelet smoothing is also used then this option controls whether level-by-level thresholding is used or if one threshold is chosen for all levels (see \code{\link{threshold.wd}}).} \item{nlw.value}{If NONLINEAR wavelet smoothing is also used then this option controls if a manual (or similar) policy is supplied to \code{nlw.policy} then the nlw.value option carries the manual threshold value (see \code{\link{threshold.wd}}). } \item{nlw.dev}{If NONLINEAR wavelet smoothing is also used then this option controls the type of variance estimator that is used in wavelet shrinkages (see \code{\link{threshold.wd}}). One possibility is the Splus var() function, another is the WaveThresh function \code{\link{madmad}}().} \item{nlw.boundary}{If NONLINEAR wavelet smoothing is also used then this option controls whether boundary coefficients are also thresholded (see \code{\link{threshold.wd}}).} \item{nlw.verbose}{If NONLINEAR wavelet smoothing is also used then this option controls whether the threshold function prints out messages as it thresholds levels (see \code{\link{threshold.wd}}).} \item{nlw.cvtol}{If NONLINEAR wavelet smoothing is also used then this option controls the optimization tolerance is cross-validation wavelet shrinkage is used (see \code{\link{threshold.wd}})} \item{nlw.Q}{If NONLINEAR wavelet smoothing is also used then this option controls the Q value for wavelet shrinkage (see \code{\link{threshold.wd}}).} \item{nlw.alpha}{If NONLINEAR wavelet smoothing is also used then this option controls the alpha value for wavelet shrinkage (see \code{\link{threshold.wd}}).} \item{nlw.transform}{If NONLINEAR wavelet smoothing is also used then this option controls a transformation that is applied to the squared (and possibly linear smoothed) stationary wavelet coefficients before shrinkage. So, for examples, you might want to set \code{nlw.transform=log} to perform wavelet shrinkage on the logs of the squared (and possibly linear smoothed) stationary wavelet coefficients. } \item{nlw.inverse}{If NONLINEAR wavelet smoothing is also used then this option controls the inverse transformation that is applied to the wavelet shrunk coefficients before they are put back into the stationary wavelet transform structure. So, for examples, if the \code{nlw.transform} is \code{log()} you should set the inverse to \code{nlw.inverse=exp}.} \item{debug.spectrum}{If this option is \code{T} then spectrum plots are produced at each stage of the squaring/smoothing. Therefore if you put in the non-decimated wavelet transform of white noise you can get a fair idea of how the coefficients are filtered at each stage.} \item{\dots}{any other arguments} } \details{ \emph{This smoothing in this function is now obsolete.} Use the function \code{\link{ewspec}} instead. However, this function is still useful for computing the raw periodogram. This function attempts to produce a picture of local time-scale power of a signal. There are two main components to this function: linear smoothing of squared coefficients and non-linear smoothing of these. Neither, either or both of these components may be used to process the data. The function expects a non-decimated wavelet transform object (of class wd, type="station") such as that produced by the \code{\link{wd}}() function with the type option set to "\code{station}". The following paragraphs describe the various methods of smoothing. \bold{LINEAR SMOOTHING}. There are three varieties of linear smoothing. None simply squares the coefficients. Fourier and wavelet apply linear smoothing methods in accordance to the prescription given in Nason and Silverman (1995). Each level in the SWT corresponds to a band-pass filtering to a frequency range [sl, sh]. After squaring we obtain power in the range [0, 2sl] and [2sl, 2sh]. The linear smoothing gets rid of the power in [2sl, 2sh]. The Fourier method simply applies a discrete Fourier transform (rfft) and cuts off frequencies above 2sl. The wavelet method is a bit more suble. The DISCRETE wavelet transform is taken of a level (i) and all levels within the DWT, j, where j>i are set to zero and then the inverse is taken. Approximately this performs the same operation as the Fourier method only faster. By default the same wavelets are used to perform the linear smoothing as were used to compute the stationary wavelet transform in the first place. This can be changed by altering \code{lw.number} and \code{lw.family}. \bold{NONLINEAR SMOOTHING}. After either of the linear smoothing options above it is possible to use wavelet shrinkage upon each level in the squared (and possibly Fourier or wavelet linear smoothed) to denoise the coefficients. This process is akin to smoothing the ordinary periodogram. All the usual wavelet shrinkage options are available as \code{nlw}.* where * is one of the usual \code{\link{threshold.wd}} options. By default the same wavelets are used to perform the wavelet shrinkage as were used to compute the non-decimated wavelet transform. These wavelets can be replaced by altering \code{nlw.number} and \code{nlw.family}. Also, it is possible to transform the squared (and possibly smoothed coefficients) before applying wavelet shrinkage. The transformation is effected by supplying an appropriate transformation function (AND ITS INVERSE) to \code{nlw.transform} and \code{nlw.inverse}. (For examples, \code{nlw.transform=log} and\code{ nlw.inverse=exp} might be a good idea). } \value{ An object of class \code{\link{wd}} a time-ordered non-decimated wavelet transform. Each level of the returned object contains a smoothed wavelet periodogram. Note that this is \bold{not} the \emph{corrected} smoothed wavelet periodogram, or the \emph{evolutionary wavelet spectrum}. Use the function \code{\link{ewspec}} to compute the evolutionary wavelet spectrum. } \references{Nason and Silverman, (1995). } \section{RELEASE}{Version 3.9 Copyright Guy Nason 1998 } \seealso{ \code{\link{ewspec}}, } \examples{ # # This function is obsolete. See ewspec() # # Compute the raw periodogram of the BabyECG # data using the Daubechies least-asymmetric wavelet $N=10$. # data(BabyECG) babywdS <- wd(BabyECG, filter.number=10, family="DaubLeAsymm", type="station") babyWP <- LocalSpec(babywdS, lsmooth = "none", nlsmooth = FALSE) \dontrun{plot(babyWP, main="Raw Wavelet Periodogram of Baby ECG")} # # Note that the lower levels of this plot are too large. This is partly because # there are "too many" coefficients at the lower levels. For a better # picture of the local spectral properties of this time series see # the examples section of ewspec # # Other results of this function can be seen in the paper by # Nason and Silverman (1995) above. # } \keyword{smooth} \author{G P Nason} wavethresh/man/wpstCLASS.rd0000644000176200001440000000367114211622634015327 0ustar liggesusers\name{wpstCLASS} \alias{wpstCLASS} \title{ Predict values using new time series values via a non-decimated wavelet packet discrimination object. } \description{Given a timeseries (\code{timeseries}) and another time series of categorical values (\code{groups}) the \code{\link{makewpstDO}} produces a model that permits discrimination of the \code{groups} series using a discriminant analysis based on a restricted set of non-decimated wavelet packet coefficients of \code{timeseries}. The current function enables new \code{timeseries} data, to be used in conjunction with the model to generate new, predicted, values of the \code{groups} time series. } \usage{ wpstCLASS(newTS, wpstDO) } \arguments{ \item{newTS}{A new segment of time series values, of the same time series that was used as the dependent variable used to construct the wpstDO object} \item{wpstDO}{An object that uses values of a dependent time series to build a discriminatory model of a groups time series. Output from the \code{\link{makewpstDO}} function} } \details{ This function performs the same nondecimated wavelet packet (NDWPT) transform of the \code{newTS} data that was used to analyse the original \code{timeseries} and the details of this transform are stored within the \code{wpstDO} object. Then, using information that was recorded in \code{wpstDO} the packets with the same level/index are extracted from the new NDWPT and formed into a matrix. Then the linear discriminant variables, again stored in \code{wpstDO} are used to form predictors of the original \code{groups} time series, ie new values of \code{groups} that correspond to the new values of \code{timeseries}. } \value{ The prediction using the usual R \code{predict.lda} function. The predicted values are stored in the \code{class} component of that list. } \seealso{\code{\link{makewpstDO}}} \examples{ # # See example at the end of help page for makewpstDO # } \author{G P Nason} \keyword{ts} \keyword{multivariate} wavethresh/man/find.parameters.rd0000644000176200001440000000571414211622540016622 0ustar liggesusers\name{find.parameters} \alias{find.parameters} \title{Find estimates of prior parameters} \description{ Estimate the prior parameters for the complex empirical Bayes shrinkage procedure. } \usage{ find.parameters(data.wd, dwwt, j0, code, tol, Sigma) } \arguments{ \item{data.wd}{Wavelet decomposition of the data being analysed.} \item{dwwt}{The diagonal elements of the matrix Wt(W). See \code{\link{make.dwwt}} for details.} \item{j0}{Primary resolution level, as discussed in the help for threshold.wd} \item{code}{Tells the function whether to use NAG code for the search (code="NAG"), R/S-plus for the search with C code to evaluate the likelihood (code="C"), or R/S-plus code for all calculations (code="R" or code="S"). Setting code="NAG" is strongly recommended.} \item{tol}{A tolerance parameter which bounds the mixing weight away from zero and one and the correlation between real and imaginary parts of the prior away from plus or minus one.} \item{Sigma}{The covariance matrix of the wavelet coefficients of white noise.} } \details{ The complex empirical Bayes (CEB) shrinkage procedure described by Barber & Nason (2004) places independent mixture priors on each complex-valued wavelet coefficient. This routine finds marginal maximum likelihood estimates of the prior parameters. If the NAG library is available, routine E04JYF is used otherwise the search is done using optimize (in R) or nlminb (in S-plus). In the latter case, the likelihood values should be computed externally using the C code supplied as part of the CThresh package - although a pure R / S-plus version is available, it is very slow. This function will not usually be called directly by the user, but is called from within cthresh. } \value{ A list with the following components: \item{pars}{Estimates of the prior parameters. Each row of this matrix contains the following parameter estimates for one level of the transform: mixing weight; variance of the real part of the wavelet coefficients; covariance between the real and imaginary parts; variance of the imaginary part of the wavelet coefficients. Note that for levels below the primary resolution, this search is not done and the matrix is full of zeros.} \item{Sigma}{The covariance matrix as supplied to the function.} } \note{ There may be warning messages from the NAG routine E04JYF. If the indicator variable IFAIL is equal to 5, 6, 7, or 8, then a solution has been found but there is doubt over the convergence. For IFAIL = 5, it is likely that the correct solution has been found, while IFAIL = 8 means that you should have little confidence in the parameter estimates. For more details, see the NAG software documentation available online at \code{http://www.nag.co.uk/numeric/fl/manual19/pdf/E04/e04jyf_fl19.pdf} } \section{RELEASE}{ Part of the CThresh addon to WaveThresh. Copyright Stuart Barber and Guy Nason 2004. } \seealso{ \code{\link{cthresh}} } \author{Stuart Barber} \keyword{manip} wavethresh/man/mfilter.select.rd0000644000176200001440000000647214211622540016462 0ustar liggesusers\name{mfilter.select} \alias{mfilter.select} \title{Provide filter coefficients for multiple wavelets.} \description{ This function returns the filter coefficients necessary for doing a discrete multiple wavelet transform (and its inverse). } \usage{ mfilter.select(type = "Geronimo") } \arguments{ \item{type}{The name for the multiple wavelet basis. The two possible types are "Geronimo" and "Donovan3"}. } \details{ This function supplies the multiple wavelet filter coefficients required by the \code{\link{mwd}} function. A multiple wavelet filter is somewhat different from a single wavelet filter. Firstly the filters are made up of matrices not single coefficients. Secondly there is no simple expression for the high pass coefficients G in terms of the low pass coefficients H, so both sets of coefficients must be specified. Note also that the transpose of the filter coefficients are used in the inverse transform, an unnecessary detail with scalar coefficients. There are two filters available at the moment. Geronimo is the default, and is recommended as it has been checked thoroughly. Donovan3 uses three orthogonal wavelets described in Donovan et al. but this coding has had little testing. See Donovan, Geronimo and Hardin, 1996 and Geronimo, Hardin and Massopust, 1994. This function fulfils the same purpose as the \code{\link{filter.select}} function does for the standard DWT \code{\link{wd}}. } \value{ A list is returned with the following eight components which describe the filter: \item{type}{The multiple wavelet basis type string.} \item{H}{A vector containing the low pass filter coefficients.} \item{G}{A vector containing the high pass pass filter coefficients.} \item{name}{A character string containing the full name of the filter.} \item{nphi}{The number of scaling functions in the multiple wavelet basis.} \item{npsi}{The number of wavelet functions in the multiple wavelet basis.} \item{NH}{The number of matrix coefficients in the filter. This is different from length(H).} \item{ndecim}{The decimation factor. I.e. the scale ratio between two successive resolution levels.} } \section{RELEASE}{Version 3.9.6 (Although Copyright Tim Downie 1995-6)} \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mwd.object}}, \code{\link{mwd}}, \code{\link{mwr}}, \code{\link{plot.mwd}}, \code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \examples{ #This function is currently used by `mwr' and `mwd' in decomposing and #reconstructing, however you can view the coefficients. # # look at the filter coefficients for Geronimo multiwavelet # mfilter.select() #$type: #[1] "Geronimo" # #$name: #[1] "Geronimo Multiwavelets" # #$nphi: #[1] 2 # #$npsi: #[1] 2 # #$NH: #[1] 4 # #$ndecim: #[1] 2 #$H: # [1] 0.4242641 0.8000000 -0.0500000 -0.2121320 0.4242641 0.0000000 # [7] 0.4500000 0.7071068 0.0000000 0.0000000 0.4500000 -0.2121320 #[13] 0.0000000 0.0000000 -0.0500000 0.0000000 # #$G: # [1] -0.05000000 -0.21213203 0.07071068 0.30000000 0.45000000 -0.70710678 # # [7] -0.63639610 0.00000000 0.45000000 -0.21213203 0.63639610 -0.30000000 #[13] -0.05000000 0.00000000 -0.07071068 0.00000000 } \keyword{manip} \author{Tim Downie} wavethresh/man/cthresh.rd0000644000176200001440000001164214211622540015175 0ustar liggesusers\name{cthresh} \alias{cthresh} \title{Estimate real signal using complex-valued wavelets } \description{ Implements the multiwavelet style and empirical Bayes shrinkage procedures described in Barber & Nason (2004) } \usage{ cthresh(data, j0 = 3, dwwt, dev = madmad, rule = "hard", filter.number = 3.1, family = "LinaMayrand", plotfn = FALSE, TI = FALSE, details = FALSE, policy = "mws", code = "NAG", tol = 0.01) } \arguments{ \item{data}{The data to be analysed. This should be real-valued and of length a power of two.} \item{j0}{Primary resolution level; no thresholding is done below this level.} \item{dwwt}{description to come} \item{dev}{A function to be used to estimate the noise level of the data. The function supplied must return a value of spread on the variance scale (i.e. not standard deviation) such as the var() function. A popular, useful and robust alternative is the madmad function.} \item{rule}{The type of thresholding done. If policy = "mws", available rules are "hard" or "soft"; if policy = "ebayes", then rule can be "hard", "soft" or "mean".} \item{filter.number, family}{These parameters specify the wavelet used. See \code{\link{filter.select}} for details. Also, if filter.number = 5, estimation is done with all the complex-valued wavelets with 5 vanishing moments and the results averaged. If filter.number = 0, then he averaging is over all available complex-valued wavelets.} \item{plotfn}{If \code{plotfn = true}, then a plot of the noisy data and estimated signal are produced.} \item{TI}{If TI = T, then the non-decimated transform is used. See the help pages for wd and wst for more on the non-decimated transform.} \item{details}{If \code{details = FALSE} (the default), only the estimate of the underlying signal is returned. If \code{details = TRUE}, many other details are also returned.} \item{policy}{Controls the type of thresholding done. Available policies are multiwavelet style (policy = "mws") and empirical Bayes (policy = "ebayes").} \item{code}{Tells cthresh whether external C or NAG code is available to help with the calculations.} \item{tol}{A tolerance parameter used in searching for prior parameters if the empirical Bayes policy is used.} } \details{ If a real-valued signal is decomposed using a complex-valued wavelet (like the Lina-Mayrand wavelets supplied by filter.select), then the wavelet coefficients are also complex-valued. Wavelet shrinkage can still be used to estimate the signal, by asking the question "which coefficients are small (and represent noise) and which are large (and represent signal)?" Two methods of determining which coefficients are small and which are large are proposed by Barber & Nason (2004). One is "multiwavelet style" thresholding (similar to that in Downie & Silverman (1998) where the coefficients are treated like the coefficients of a multiwavelet). Here, the "size" of the wavelet coefficient is determined as modulus of a standardised version of the coefficient. The standardisation is by the square root of the covariance matrix of the coefficient. A Bayesian method is to place a mixture prior on each coefficient. The prior has two components: a bivariate normal and a point mass at (0,0). The parameters are determined by an empirical Bayes argument and then the prior is updated by the data. } \value{ Either a vector containing the estimated signal (if details = FALSE), or a list with the following components: \item{data}{The original data as supplied to cthresh.} \item{data.wd}{The wavelet decomposition of the data.} \item{thr.wd}{The thresholded version of data.wd.} \item{estimate}{The estimate of the underlying signal.} \item{Sigma}{The covariance matrices induced by the wavelet transform. See \code{make.dwwt} for more details.} \item{sigsq}{The estimate of the variance of the noise which corrupted the data.} \item{rule}{Which thresholding rule was used} \item{EBpars}{The empirical Bayes parameters found by the function find.parameters. Only present if the "ebayes" policy was used.} \item{wavelet}{A list with components filter.number and family which, when supplied to \code{link{filter.select}}, determine the wavelet used to decompose the data.}} \note{ The estimates returned by cthresh have an imaginary component. In practice, this component is usually negligible. } \section{RELEASE}{ Part of the CThresh addon to WaveThresh. Copyright Stuart Barber and Guy Nason 2004.} \seealso{ \code{\link{filter.select}}, \code{\link{find.parameters}}, \code{\link{make.dwwt}}, \code{\link{test.dataCT}}, and the undocumented functions in CThresh. } \examples{ # # Make up some noisy data # y <- example.1()$y ynoise <- y + rnorm(512, sd=0.1) # # Do complex-valued wavelet shrinkage with decimated wavelets # est1 <- cthresh(ynoise, TI=FALSE) # # Do complex-valued wavelet shrinkage with nondecimated wavelets # est2 <- cthresh(ynoise, TI=TRUE) # # # plot(1:512, y, lty=2, type="l") lines(1:512, est1, col=2) lines(1:512, est2, col=3) } \author{Stuart Barber} \keyword{manip} wavethresh/man/plot.wd.rd0000644000176200001440000001673214211622540015131 0ustar liggesusers\name{plot.wd} \alias{plot.wd} \title{Plot wavelet transform coefficients.} \description{ This function plots discrete wavelet transform coefficients arising from a \code{\link{wd}} object. } \usage{ \method{plot}{wd}(x,xlabvals, xlabchars, ylabchars, first.level = 0, main = "Wavelet Decomposition Coefficients", scaling = "global", rhlab = FALSE, sub, NotPlotVal = 0.005, xlab = "Translate", ylab = "Resolution Level", aspect = "Identity", \dots) } \arguments{ \item{x}{The wd class object you wish to plot} \item{xlabvals}{A vector containing the "true" x-axis numbers that went with the vector that was transformed to produce the \code{\link{wd}} object supplied as the first argument to this function. If this argument is missing then the function tries to make up a sensible set of x-axis labels.} \item{xlabchars}{Tickmark labels for the x axis} \item{ylabchars}{Tickmark labels for the y axis} \item{first.level}{The first resolution level to begin plotting at. This argument can be quite useful when you want to supress some of the coarser levels in the diagram.} \item{main}{The main title of the plot.} \item{scaling}{How you want the coefficients to be scaled. The options are: \code{global} - one scale factor is chosen for the whole plot. The scale factor depends on the coefficient to be included on the plot that has the largest absolute value. The \code{global} option is useful when comparing coefficients that might appear anywhere in the plot; \code{by.level} - a scale factor is chosen for each resolution level in the plot. The scale factor for a level depends on the coefficient in that level that has the largest absolute value. The \code{by.level} option is useful when you wish to compare coefficients within a resolution level. The two other options are compensated and super which are the same as \code{global} except for that finer scales' coefficients are scaled up by a factor of SQRT(2) for compensated and 2 for super. These latter two options are sometimes useful (more useful for non-decimated \code{\link{wd}} objects, where they act as a sort of \code{\link{ipndacw}} matrix operator). } \item{rhlab}{If \code{TRUE} then a set of labels is produced on the right hand axis. The axis labels in this case refer to the scale factors used to scale each level and correspond to value of the largest coefficient (in absolute value) in each scale (when \code{scaling=="by.level"}) or absolutely (when \code{scaling="global"}). If the \code{rhlab} argument is \code{FALSE} then no right hand axis labels are produced.} \item{sub}{A subtitle for the plot.} \item{NotPlotVal}{This argument ensures that if all (scaled) coefficients in a resolution level are below \code{NotPlotVal} in absolute value then the whole resolution level is not plotted. This can be useful when plotting a \code{\link{wd}} object that is sparse (or has been thresholded and necessarily many coefficients might well be zero) as it speeds up the plot because whole levels do not have to be plotted (the function that does the plotting [\code{segments()}] is quite a slow function). Note that the value of \code{NotPlotVal} refers to \code{scaled} coefficients, those that have been scaled by this function (on any resolution level all coefficients are scaled to lie between -0.5 and 0.5).} \item{xlab}{A title for the x-axis} \item{ylab}{A title for the y-axis} \item{aspect}{This argument describes the name (as a character string) of a function to be applied to the coefficients before plotting. By default the argument is "\code{Identity}", i.e. the coefficients are plotted \emph{as is}. This argument is most useful when a complex-valued wavelets are plotted you could use "\code{Mod}" to plot the modulus of the coefficients, or "\code{Re}" to plot the real parts of the coefficients or "\code{Arg}" to plot the argument of the coefficients. Also, the \code{aspect} argument can be useful for the ordinary wavelet transforms as well if you are interested in a particular transform of the coefficients. } \item{\dots}{fine tuning} } \details{ Produces a plot similar to the ones in Donoho and Johnstone, 1994. A wavelet decomposition of a signal consists of discrete wavelet coefficients at different scales (resolution levels) and locations. This function plots the coefficients as a pyramid (derived from Mallat's pyramid algorithm). See the examples below. The resolution levels are stacked one above the other: coarse scale coefficients are always towards the top of the plot, fine scale coefficients are always located toward the bottom of the plot. The location of coefficients increases from left to right across the plot in synchrony with the input signal to the \code{\link{wd}} object. In other words the position of a coefficient along a line is indicative of the associated wavelet basis function's translate number. The actual coefficients are plotted using S-Plus's \code{segments()} function. This plots each coefficient as a vertical line with positive coefficients being plotted above an imaginary centre line and negative coefficients being plotted below. The resolution levels are labelled on the left-hand side axis, and if \code{rhlab==T} the maximum values of the absolute values of the coefficients, for the particular level, are plotted on the right-hand axis. The coefficients in the plot may be scaled in 4 ways. If you are interested in comparing coefficients in different levels then the default scaling option \code{scaling=="global"} is what you need. This works by finding the coefficient with the largest absolute value amongst all coeffients to be plotted and then scales all the other coefficients by the largest so that all coefficients lie in the range -1/2 to 1/2. The scaled coefficients are then plotted. If you are not interested in comparing relative resolution levels and want to see all that goes on within a particular scale then you should use the scaling option \code{scaling=="by.level"} which picks out the largest coefficient (in absolute value) from each level and scales each level separately. The "\code{compensated}" and super options are like the "\code{global}" option except that finer levels are scaled up (as discussed in the arguments list above): this can be useful when plotting non-decimated wavelet transform coefficients as it emphasizes the higher frequencies. } \value{ If \code{rhlab==T} then the scaling factors applied to each scale level are returned. Otherwise NULL is returned. } \note{A plot of the coefficients contained within the \code{\link{wd}} object is produced. } \section{RELEASE}{ Version 3.5.3 Copyright Guy Nason 1994 } \seealso{ \code{\link{wd}} } \examples{ # # Generate some test data # test.data <- example.1()$y \dontrun{ts.plot(test.data)} # # Decompose test.data and plot the wavelet coefficients # wds <- wd(test.data) \dontrun{plot(wds)} # # Now do the time-ordered non-decimated wavelet transform of the same thing # \dontrun{wdS <- wd(test.data, type="station")} \dontrun{plot(wdS)} # # Next examples # ------------ # The chirp signal is also another good examples to use. # # Generate some test data # test.chirp <- simchirp()$y \dontrun{ts.plot(test.chirp, main="Simulated chirp signal")} # # Now let's do the time-ordered non-decimated wavelet transform. # For a change let's use Daubechies least-asymmetric phase wavelet with 8 # vanishing moments (a totally arbitrary choice, please don't read # anything into it). # chirpwdS <- wd(test.chirp, filter.number=8, family="DaubLeAsymm", type="station") \dontrun{plot(chirpwdS, main="TOND WT of Chirp signal")} } \keyword{hplot} \keyword{smooth} \author{G P Nason} wavethresh/man/Chires6.rd0000644000176200001440000000260614211622540015040 0ustar liggesusers\name{Chires6} \alias{Chires6} \title{Subsid routine for denproj (calcs scaling function coefs with cov)} \usage{ Chires6(x, tau=1, J, filter.number=10, family="DaubLeAsymm", nT=20) } \arguments{ \item{x}{The data (random sample for density estimation)} \item{tau}{Fine tuning parameter} \item{J}{Resolution level} \item{filter.number}{The smoothness of the wavelet, see \code{\link{filter.select}}} \item{family}{The family of the wavelet, see \code{\link{family}}} \item{nT}{The number of iterations in the Daubechies-Lagarias algorithm} } \description{Function is essentially the same as \code{\link{Chires5}} but also returns covariances between coefficients. A subsidiary routine for \code{\link{denproj}}. Not intended for direct user use. } \details{ As description } \value{ A list with the following components: \item{coef}{The scaling function coefficients} \item{covar}{The coefficients' covariance matrix} \item{klim}{The integer translates of the scaling functions used} \item{p}{The primary resolution, calculated in code as tau*2^J} \item{filter}{The usual filter information, see \code{\link{filter.select}}} \item{n}{The length of the data \code{x}} \item{res}{A list containing components: \code{p}, as above, \code{tau} as input and \code{J} as above. This summarizes the resolution information} } \seealso{\code{\link{Chires6}},\code{\link{denproj}}} \author{David Herrick} \keyword{smooth} wavethresh/man/mprefilter.rd0000644000176200001440000000277414211622540015714 0ustar liggesusers\name{mprefilter} \alias{mprefilter} \title{Multiwavelet prefilter} \usage{ mprefilter(data, prefilter.type, filter.type, nlevels, nvecs.c, nphi, npsi, ndecim, verbose = FALSE) } \arguments{ \item{data}{The univariate sequence that you wish to turn into a multivariate one} \item{prefilter.type}{Controls the type of prefilter (see Tim Downie's PhD thesis, or references therein. Types include \code{Minimal}, \code{Identity}, \code{Repeat}, \code{Interp}, \code{default}, \code{Xia}, \code{Roach1}, \code{Roach3}, \code{Donovan3} or \code{Linear}} \item{filter.type}{The type of multiwavelet: can be \code{Geronimo} or \code{Donovan3}} \item{nlevels}{The number of levels in the multiwavelet transform} \item{nvecs.c}{Parameter obtained from the mfirst.last function related to the particular filters} \item{nphi}{The number of father wavelets in the system} \item{npsi}{The number of mother wavelets in the system} \item{ndecim}{The ndecim parameter (not apparently used here)} \item{verbose}{If TRUE then informative messages are printed as the function progresses} } \description{ A multiwavelet prefilter turns a univariate sequence into a bivariate (in this case) sequence suitable for processing by a multiwavelet transform, such as \code{\link{mwd}}. As such, the prefilter is used on the forward transform. Not intended for direct user use. } \details{ Description says all } \value{ The appropriate prefiltered data. } \seealso{\code{\link{mpostfilter}},\code{\link{mwd}}} \author{Tim Downie} \keyword{math} wavethresh/man/rm.det.rd0000644000176200001440000000165614211622634014736 0ustar liggesusers\name{rm.det} \alias{rm.det} \title{Set coarse levels of a wavelets on the interval transform object to zero} \usage{ rm.det(wd.int.obj) } \arguments{ \item{wd.int.obj}{the object whose coarse levels you wish to set to zero} } \description{ Set the wavelet coefficients of certain coarse levels for a "wavelets on the interval" object equal to zero. The operation of this function is somewhat similar to the \code{\link{nullevels}} function, but for objects associated with the "wavelets on the interval code". } \details{ The "wavelets on the interval" code is contained within the \code{\link{wd}} function. All levels coarser than (but not including) the \code{wd.int.obj$current.scale} are set to zero. } \value{ A \code{\link{wd.object}} of \code{type="interval"} containing the modified input object with certain coarse levels set to zero. } \seealso{\code{\link{nullevels}}, \code{\link{wd}}} \author{Piotr Fryzlewicz} \keyword{manip} wavethresh/man/mwd.object.rd0000644000176200001440000000674314211622540015577 0ustar liggesusers\name{mwd.object} \alias{mwd.object} \title{Multiple wavelet decomposition object (1D) } \description{ These are objects of class \code{mwd} They represent a decomposition of a function with respect to a multiple wavelet basis. } \details{ To retain your sanity the C and D coefficients should be extracted by the \code{\link{accessC}} and \code{\link{accessD}} functions and put using the \code{\link{putC}} and \code{\link{putD}} functions, rather than by the \code{$} operator. } \value{ The following components must be included in a legitimate `mwd' object. \item{C}{a matrix containing each level's smoothed data, each column corresponding to one coefficient vector. The wavelet transform works by applying both a smoothing filter and a bandpass filter to the previous level's smoothed data. The top level contains data at the highest resolution level. Each of these levels are stored one after the other in this matrix. The matrix `\code{fl.dbase$first.last.c}' determines exactly which columns in the matrix, store each level.} \item{D}{wavelet coefficient matrix. If you were to write down the discrete wavelet transform of a function then columns of D would be the vector coefficients of the wavelet basis function s. Like the C, they are also formed in a pyramidal manner, but stored in a linear matrix. The storage details are to be found in `\code{fl.dbase$first.last.d}'.} \item{nlevelsWT}{The number of levels in the pyramidal decomposition that produces the coefficients. The precise number of levels depends on the number of different wavelet functions used and the preprocessing method used, as well as the number of data points used.} \item{fl.dbase}{The first last database associated with this decomposition. This is a list consisting of 2 integers, and 2 matrices. The matrices detail how the coefficients are stored in the C and D components of the `mwd.object'. See the help on \code{\link{mfirst.last}} for more information.} \item{filter}{a list containing the details of the filter that did the decomposition. See \code{\link{mfilter.select}}.} \item{type}{either \code{"wavelet"} indicating that the ordinary multiple wavelet transform was performed or \code{"station"} indicating that the non-decimated multiple wavelet transform was done.} \item{prefilter}{Type of preprocessing or prefilter used. This will be specigic for the type of multiple wavelet used.} \item{date}{The date that the transform was performed or the mwd object was last modified.} \item{bc}{how the boundaries were handled} } \section{GENERATION}{ This class of objects is returned from the \code{\link{mwd}} function to represent a multiple wavelet decomposition of a function. Many other functions return an object of class mwd. } \section{METHODS}{ The mwd class of objects has methods for the following generic functions: \code{\link{accessC}}, \code{\link{accessD}}, \code{\link{draw}}, \code{\link{plot}}, \code{\link{print}}, \code{\link{putC}}, \code{\link{putD}}, \code{\link{summary}}, \code{\link{threshold}}, \code{\link{wr.mwd}}. } \section{RELEASE}{ Version 3.9.6 (Although Copyright Tim Downie, 1995-6). } \seealso{ \code{\link{accessC.mwd}}, \code{\link{accessD.mwd}}, \code{\link{draw.mwd}}, \code{\link{mfirst.last}}, \code{\link{mfilter.select}}, \code{\link{mwd.object}}, \code{\link{mwr}}, \code{\link{plot.mwd}},\code{\link{print.mwd}}, \code{\link{putC.mwd}}, \code{\link{putD.mwd}}, \code{\link{summary.mwd}}, \code{\link{threshold.mwd}}, \code{\link{wd}}, \code{\link{wr.mwd}}. } \keyword{classes} \author{Tim Downie} wavethresh/man/Shannon.entropy.rd0000644000176200001440000000320414211622634016637 0ustar liggesusers\name{Shannon.entropy} \alias{Shannon.entropy} \title{Compute Shannon entropy} \description{ Computes Shannon entropy of the squares of a set of coefficients. } \usage{ Shannon.entropy(v, zilchtol=1e-300) } \arguments{ \item{v}{A vector of coefficients (e.g. wavelet coefficients).} \item{zilchtol}{A small number. Any number smaller than this is considered to be zero for the purposes of this function.} } \details{ This function computes the Shannon entropy of the squares of a set of coefficients. The squares are used because we are only interested in the entropy of the energy of the coefficients, not their actual sign. The entropy of the squares of \code{v} is given by \code{sum( v^2 * log(v^2) )}. In this implementation any zero coefficients (determined by being less than \code{zilchtol}) have a zero contribution to the entropy. The Shannon entropy measures how "evenly spread" a set of numbers is. If the size of the entries in a vector is approximately evenly spread then the Shannon entropy is large. If the vector is sparsely populated or the entries are very different then the Shannon entropy is near zero. Note that the input vectors to this function usually have their norm normalized so that diversity of coefficients corresponds to sparsity. } \value{ A number representing the Shannon entropy of the input vector. } \section{RELEASE}{Version 3.7.2 Copyright Guy Nason 1996 } \seealso{ \code{\link{MaNoVe.wst}}, \code{\link{wst}}, } \examples{ # # Generate some test data # # # A sparse set # Shannon.entropy(c(1,0,0,0)) #0 # # A evenly spread set # Shannon.entropy( rep( 1/ sqrt(4), 4 )) #1.386294 } \keyword{manip} \author{G P Nason} wavethresh/man/dof.rd0000644000176200001440000000225314211622540014303 0ustar liggesusers\name{dof} \alias{dof} \title{Compute number of non-zero coefficients in wd object} \description{ Compute number of non-zero coefficients in \code{\link{wd}} object } \usage{ dof(wd) } \arguments{ \item{wd}{A \code{wavelet decomposition} object (such as that returned by the \code{\link{wd}} function).} } \details{ Very simple function that counts the number of non-zero coefficients in a \code{\link{wd}} class object. } \value{ An integer that represents the number of non-zero coefficients in the input \code{\link{wd}} object. } \section{RELEASE}{Version 3.0 Copyright Guy Nason 1994 } \seealso{ \code{\link{wd}}, \code{\link{wd.object}}, \code{\link{threshold}}, \code{\link{threshold.wd}}. } \examples{ # # Let's generate some purely random numbers!! # myrandom <- rnorm(512) # # Take the discrete wavelet transform # myrandomWD <- wd(myrandom) # # How many coefficients are non-zero? # dof(myrandomWD) # [1] 512 # # All of them were nonzero! # # Threshold it # myrandomWDT <- threshold(myrandomWD, policy="universal") # # Now lets see how many are nonzero # dof(myrandomWDT) # [1] 8 # # Wow so 504 of the coefficients were set to zero! Spooky! # } \keyword{models} \author{G P Nason} wavethresh/DESCRIPTION0000644000176200001440000000255714335177642014165 0ustar liggesusersPackage: wavethresh Type: Package Title: Wavelets Statistics and Transforms Version: 4.7.2 Date: 2022-11-14 Authors@R: c(person("Guy", "Nason", role=c("aut", "cre"), email="g.nason@imperial.ac.uk"), person("Stuart", "Barber", role="ctb", email="s.barber@leeds.ac.uk"), person("Tim", "Downie", role="ctb", email="tim.downie@bht-berlin.de"), person("Piotr", "Frylewicz", role="ctb", email="p.fryzlewicz@lse.ac.uk"), person("Arne", "Kovac", role="ctb", email="A.Kovac@bristol.ac.uk"), person("Todd", "Ogden", role="ctb", email="todd.ogden@columbia.edu"), person("Bernard", "Silverman", role="ctb")) Depends: R (>= 2.10), MASS Description: Performs 1, 2 and 3D real and complex-valued wavelet transforms, nondecimated transforms, wavelet packet transforms, nondecimated wavelet packet transforms, multiple wavelet transforms, complex-valued wavelet transforms, wavelet shrinkage for various kinds of data, locally stationary wavelet time series, nonstationary multiscale transfer function modeling, density estimation. License: GPL (>= 2) NeedsCompilation: yes Packaged: 2022-11-14 11:59:38 UTC; guynason Author: Guy Nason [aut, cre], Stuart Barber [ctb], Tim Downie [ctb], Piotr Frylewicz [ctb], Arne Kovac [ctb], Todd Ogden [ctb], Bernard Silverman [ctb] Maintainer: Guy Nason Repository: CRAN Date/Publication: 2022-11-16 15:20:02 UTC wavethresh/build/0000755000176200001440000000000014334426652013542 5ustar liggesuserswavethresh/build/partial.rdb0000644000176200001440000002702114334426652015671 0ustar liggesusers}Y{ǵ Z&qܐ6 lK9Z8 m:1OtnW7<`}y>ԩ޻uj UթSgOML\oĤh=t9s5?qa~ЎJ7|Oo_&}u~[WķږG'\Xu/}6w7*s ״<~c~@{7?m]xɰ_;l{[#Z8/h{Wۮm2Wv5; b{v٫TA{1ڛFof{ so߫&S-r0s 9+.TD]X?t.R3=<ϜTF].͌s4p\YvGƯSzƑvd(g9Hd*}&-k{N)4{Z{ÑrAڷ' о9yM=&\9`hjRo'Wk E% Fq}gdx7rh}f/<-};rsVP.3}߿_h*V`[ S3?zM~x\k<+AkuMbVQ7 7S >h`e:&.Gd`f}f ĤR?Ǡus(|XzSq8@9j{L"^?7Ujg(ln$ܣ\ɱhyq)da,J]'Q[^nϳً=jI A< fbxϥ0󂤭L:c2 L dɂNǽ} ;w値qKbCf}<'n4v jCr:>[ `HA*0Ip\b;?KLV0āÍI&Vݞġ */Ϳ7fzN.\ sa{E JUV#;WԪs5Ur 5]klnl>{:V5eRڏcVS'5βy>"I@FAA?WdNɭk3{z3#ur絒B'F $zl+g)ګdk$יl*0H3ЙpLn_٪ JI.%w*_TQR(48SC}tq~qqnaanJrr?_jɨh19éY2wmИ>?25sdC竁zQ~8թ8XԮPɈArI[Gd2Vd3fȐوٱzI8+$zS_ju9m9$)frl`?U|cc2-s2'w,Wͥ w츦 wbO_cHhCH o{2lyH*KĆ@sqUZN leR+˃SG(3ҧ^z$wOpɝ#oA#&I#4r>_:b#Cfv w>Kߤ(P$WRL5]f{$p=>CJajH&vG -d_Cjܮe"'b6_,yu( !-Gڐ '@~˷L G Q>g#]jmVw(&QXqN $!p*XjB&t,/`5@YJL3,lԞ9bsK7n)n2@kQ q02)Z E^|[Hu(P@Ir${gzW>{rӥ3Ϲ7cyrǵܬԺ`gEffbqdZLO$XQ>v0\(vh7%@D }_[rĞ[ȊPU؋Tڒ:j?N]@澎bn8X`yݑS<ԸU.8 \XA/Y jZ^tDUD׿~["J[5#uq:~ Tb1Pr x$e}oJ虑a ؐ(kFN$Cj80H7ApΜ2 L@6C䆝%~8pQ[/ ԥu4wwrPܞ2v0+B]:u<joPʅ ty|S16sɓe"6C(8P 46ͣMABnb9HQ`gJCeepؔ yp9 ݠCeb1̵ßS@|!ն DֲMr{X>鈡ozl @XzjYHFV&Kl DTA8`^Q䯘Mv"uIjH 6Cbcv= +\Ӌ֡36s)hJ{6]8y@%J- HOPh?p sf+KJd=^DzyQ!>5+s0N c"T,aDJmQ| mFKHpX^bI^WLr M!nb;iym9bs~K1WphuԵg]nP 9QT [-duRLAcA5-Po*0Ұ1Y<Ո:E*Bħw{ J^E/t{L .+QΑP5gsˇ;,`Z3|8@.PkW8.dcǢ){3Y l/E$Q[ꓨ,(rDGpEn$5༡]_r*)2s)Zf?v2,y!Y8z6h$ˊ^|H]A͘bdAEhO@ȌȲLDELjxYDv)w[C8 ‡B pE2QdRVxX|żC5B!wA{^3 @ -'9!K8 MmJ>Q2df4E Ik h-}٪R o׋q 'xY9a'lu s70k\E>vٽ}R gN-K/?G媰Cd }gfNGBw]-ߍ J N3Vb[ٓXX(! .c(z㱐^LNS'kU^,R8PӤgZXz0]b F,p:8 %ue̬*b.#b04d68PM6!xBKp?S>s:bM(g,[G4ad43<JOLJǦ%Vc 0̦jBC+_-8D<6|٫6 9g ime ]HYtPmrJ>zICq . ET^/]qhMvx(ŌmT2tU hՖu5w΋F t<|KQq^GoC)B< uIҿDM.B.?Dc*_?&ZokS2YC+zlFB@u.?%~xõ>8ދ8_^Q,?+ݘP2?Xs4#y#LZ9@SX҉2k{Aဣb%!v,f3q{DL͆rӂ8XvClN^[ݳ̤`G qX;gYO%W8M|Z<?hkuICDF?MuB0d~x_[$sՃM .q*ZaZr<2m_Ls)E{L_4tGĭ uyh*]ܗwQI@n p *DJ綸k! G͆GVOt>|]pZtiI&Bq_"_;9h֦3^X&u wE*!wY%,S<2Q$pU`zMP̩7,vVHYD|tP -wΒyFW=+]Y"Ȥ kw\ j(lyISrdq( Duل" 4tݴ"*EhjQɒ8UeJ%-A2l/:@dx(c?0o` ;U\@DkeSbxpuQ^:YRzA)"1h0R'8 ݰ-]62q  Oh. βvh$E̜z^tg^?eOh蓘N8ܰJVeg&-8, S僋˭T~#M$D5@Q8UGMj\(W 5ȴڒu `ɞh&` V oN]ӎܵEGvwwp p5frVlOv~!J[|ٞT:Ly= ^=\xAQ<@ c9[N31a>E5^i#p)cQ&hhS 3KEnoTP$WTD:03}(3t$^80jhj$Y'13(<غ5e qWqPWlQ@?Мzzt/xv} 4΄'4XJ.Jر$ LN[Yۊ6(-}q`}j HjL_6$;Ut{()qEr#ňy )af6dl _ 2^8#m/eX}wĖU` =ӄidD]E ;I%s|CU{hڗN*8 T/Zq6\iA#T\]ch:_&Ek~ӞȺv,w 9 B< !MNUfL9>,J QP|("+!"<}q1PV#/2>o: $a.+UKb*=ntC\8v^~qaV~/[pࢭ=Pvq >NT|y񵳘^,wAʴ$Ia9h0AѶ>Bxg?/6 Mȝƍh &cSjn 3n0yPˉUR7TkJ^nD“F“S]tݣ8e :vC,,Gm,hUSux @ѕBlYd#'La {{)+,/ ZXE3G)wvbA#&N{a*}1aҧupbM" Q$)VoQ9P1:c=1~ Xf]{:<Ϣk?,~Yz5)qYDV-6i+8p0uABd%}.K\Ć80.6q@qi?wEDa0B{h!cч.zVC#>pBz_]=Ϟ:(ˊ>wV}HY13 $D=EZd=/"z50b!iVZNd<˱z׍դڀtp=tURRsXVSQ:DdVT]e+:@~$T]< jT$u'_FIf?/ރSh%qahb^)UvZY^2LGnBxɔ4wtjfx`[ *iTҖ@ԩ5*%PBr KXR ts KBWs;+O sF[&т1A2~*"Jɢu9ll}SU}gfN'&:2[Mϕ^ &"}|.0k t bHS .F$p]Y;I~Yl>Jt]&G@p%vYj7r^htF至Ƃ@y-;#+bvKo<'P1uIBSU{c¦7FhZkp#OtSԇ5QP!%A:4XMWg1 ŒX B Kr"TLxF]!DX4EC(cUUyf!PJH aV%!JǢtIJR/;Zi4f҃e)ﰙ(>BH1Eo{(7T9(B|"P*ȂDQdg!zTazvFM*zM]]oL=abpls|*"h PA\=g{ Y,6]m1:9ӌhqOGQ`_G2 ~2Zā-`S97KR$>./?`MN'L? @- >(< PSl, =]RtN4ƹPAkGB^ɳRjnY\sbI@  R?Y`zmF'BD6(KzEG{b?_| Yv~Rn3FZ^ĩlOp8uU!k2y6=*"eH~;n3TGjPoMB.5Ō\+}S]94=ڷ[Ϥt&tCע}jW(viZ_䫊P7UG{2ԯ\.!䑧hϧZqTM7[QG6{*ٷ!7g@LR[}6L' [T7.qXŤ0~ь&cGJ/aO'c-D%XF @i OG=eJ9t)|8cլrL+[ox\pUEzSR<̒VL/ߝ_{b$Lߟ g نgS6 v#+WP~22+ *(_]1^6#Յ{_ܝ}/>:l4_N 5Q)br6N_7WHdgYzs:Wv*}9~: TczS ÖpdJ =D̋=kDMho2ͮ9\^fyOhi}4qȀ|x"ʹ ;}:֢ҳ#"zu#KN6#id;~0ڑTi/PxfX=1 #ox>ݻGpBtʷ9Gۗ`]ndQMϦS(r#ĝ.:ݐk#w[f}ql[>gnSXq@pE;\-tp`_l Y[No'(}z^Hwך a,I};/Uכ~3K/6ͶS"'x8@&q=:^o:l`ur^@}XJݕfx%[MfY[L,ET[%n|J. !üPd?,Ĭܣ9zѡ[9BRۢw%1du v(܄~T_iw3]44G>YZYzinIN_zf4/:) {K.JsU,[7?5iڐ>wR,Joݖ=K+M@q" k@5cg"^XBDśQiR}f@[;;sb!L觧ˡNd)3/3A]+R*r_d}_l9S=. V1?\hft2h.6Xo͏4~795ѣ>lj[vNZ{Kpȉvoжu;ťwY٥¦띵Wx[?_}o #G+[϶75HABjVS>A.>J;`BW.0ᆲ}1WOӪǭ=8VWP7jkpw>3}nëWݩ6b07 ɝ5-޾WM;"J[u)z[DIA3BjLZ(A"fơ e1aߦ}tD毗P%O/ae~exQ p4cSPR4>"/o-4*Ԡ}fn5z~.͗wv1'(bSU3CKX'$%(G~B>QOƂЇd!x?'IV(LWǑג͑NhaW}!-cE]t8fDMǴ> -BV|=f]DZݮ56v6=-FPR0:(SO  ! dO)mXËbogC(,az, 3hұy\0|QКf0+S-=7%fq 0@n'Wv??/5OVMwavethresh/src/0000755000176200001440000000000014334426652013232 5ustar liggesuserswavethresh/src/wavethresh_init.c0000644000176200001440000003002414334424107016571 0ustar liggesusers#include // for NULL #include /* .C calls */ /* 1-10 */ extern void accessDwpst(double *coefvec, int *lansvec, int *nlev, int *avixstart, int *primaryindex, int *nwppkt, int *pklength, int *level, double *weave, int *lweave, int *error); extern void av_basisWRAP(double *wst, double *wstC, int *LengthData, int *level, double *H, int *LengthH, double *answer, int *error); extern void c2to4(int *l, int *a); extern void Ccthrcalcodds(long *pnd, double *dr, double *di, double *VVec, double *SigVec, double *pp, double *ans, double *odds); extern void Ccthrnegloglik(double *parvec, double *SigVec, double *di, double *dr, long *pnd, double *pans); extern void Cmnv(double *wst, double *wstC, int *LengthData, int *nlevels, int *upperctrl, double *upperl, int *firstl, int *verbose, int *error); extern void comAB_WRAP(double *wstR, double *wstI, double *wstCR, double *wstCI, int *LengthData, int *level, double *HR, double *HI, double *GR, double *GI, int *LengthH, double *answerR, double *answerI, int *error); extern void computec(int *n,double *c,int *gridn,double *Gmatrix,int *Gindex, double *H, int *LengthH, int *bc, int *error); extern void comwd(double *CR, double *CI, int *LengthC, double *DR, double *DI, int *LengthD, double *HR, double *HI, double *GR, double *GI, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); extern void comwr(double *CR, double *CI, int *LengthC, double *DR, double *DI, int *LengthD, double *HR, double *HI, double *GR, double *GI, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); /* 11-20 */ extern void comwst(double *CaR, double *CaI, double *DataR, double *DataI, int *LengthData, int *levels, double *HR, double *HI, double *GR, double *GI, int *LengthH, int *error); extern void conbarL(double *c_in, int *LengthCin, int *firstCin, double *d_in, int *LengthDin, int *firstDin, double *H, int *LengthH, double *c_out, int *LengthCout, int *firstCout, int *lastCout, int *type, int *bc); extern void Cpostmean(long *pnd, double *dr, double *pi, double *VVec, double *SigVec, double *w, double *ansr, double *ansi); extern void Crsswav(double *noisy, int *nnoisy, double *value, double *C, double *D, int *LengthD, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *ntt, int *ll, int *bc, double *ssq, int *error); extern void CScalFn(double *v, double *ans, int *res, double *H, int *lengthH); extern void Cthreshold(double *D, int *LengthD, int *firstD, int *lastD, int *offsetD, int *Dlevels, int *ntt, double *value, int *levels, int *qlevels, int *bc, int *error); extern void CWavDE(double *x, int *n, double *minx, double *maxx, int *Jmax, double *threshold, double *xout, double *fout, int *nout, double *PrimRes, double *SFx, double *SFy, int *lengthSF, double *WVx, double *WVy, int *lengthWV, int *kmin, int *kmax, int *kminW, int *kmaxW, double *xminW, double *xmaxW, double *phiLH, double *phiRH, double *psiLH, double *psiRH, int *verbose, int *error); extern void CWaveletCV(double *noisy, int *nnoisy, double *UniversalThresh, double *C, double *D, int *LengthD, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *ntt, int *ll, int *bc, double *tol, int *maxits, double *xvthresh, int *interptype, int *error); extern void dec(double* data, int* size, int* filternumber, int* minscale, int* precond, int* filterhistory); extern void FullWaveletCV(double *noisy, int *nnoisy, double *UniversalThresh, double *H, int *LengthH, int *ntt, int *ll, double *tol, double *xvthresh, int *error); /* 21-30 */ extern void getARRel(double *Carray, int *size, int *level, double *GHH, double *HGH, double *GGH, double *HHG, double *GHG, double *HGG, double *GGG); extern void getpacketwst2D(double *am, int *D1, int *D12, int *maxlevel, int *level, int *index, int *type, double *out, int *sl); extern void ixtoco(int *level, int *maxlevel, int *index, int *x, int *y); extern void makegrid(double *x,double *y,int *n,double *gridx,double *gridy, int *gridn, double *G, int *Gindex); extern void multiwd(double *C, int *lengthc, double *D, int *lengthd, int *nlevels, int *nphi, int *npsi, int *ndecim, double *H, double *G, int *NH, int *lowerc, int *upperc, int *offsetc, int *upperd, int *lowerd, int *offsetd, int *nbc); extern void multiwr(double *C, int *lengthc, double *D, int *lengthd, int *nlevels, int *nphi, int *npsi, int *ndecim, double *H, double *G, int *NH, int *lowerc, int *upperc, int *offsetc, int *upperd, int *lowerd, int *offsetd, int *nbc, int *startlevel); extern void PLDE2(double *C, double *p, double *filter, int *nf, int *prec, int *kmin, int *kmax, double *gx, double *gy, int *ng, double *philh, double *phirh, int *error); extern void PsiJ(int *J, double *H, int *LengthH, double *tol, double *wout, int *lwout, int *rlvec, int *error); extern void putarr(double *Carray, int *truesize, int *level, int *Iarrayix, double *Iarray); extern void putpacketwst2D(double *am, int *D1, int *D12, int *maxlevel, int *level, int *index, int *type, double *in, int *sl); /* 31-40 */ extern void rainmatPARENT(int *J, double *H, int *LengthH, double *fmat, double *tol, int *error); extern void rainmatPARTIAL(int *J, int *donej, double *H, int *LengthH, double *fmat, double *tol, int *error); extern void rec(double* data, int* size, int* filterhistory, int* currentscale, int* precond); extern void SAvBasis(double *am, int *D1, int *D12, double *TheSmooth, int *levj, double *H, int *LengthH, int *error); extern void SFDE5(double *x, int *nx, double *p, double *filter, int *nf, int *prec, double *chat, int *kmin, int *kmax, double *philh, double *phirh, int *error); extern void SFDE6(double *x, int *nx, double *p, double *filter, int *nf, int *prec, double *chat, double *covar, int *kmin, int *kmax, double *philh, double *phirh, int *error); extern void StoDCDS(double *C, int *Csize, int *firstCin, double *H, int *LengthH, int *LengthCout, int *firstCout, int *lastCout, int *LengthDout, int *firstDout, int *lastDout, double *ImCC, double *ImDD, int *bc, int *type, int *error); extern void StoIDS(double *C, int *Csize, int *firstCin, double *H, int *LengthH, int *LengthCout, int *firstCout, int *lastCout, int *LengthDout, int *firstDout, int *lastDout, double *ImCC, double *ImCD, double *ImDC, double *ImDD, int *bc, int *type, int *error); extern void StoIRS(double *ImCC, double *ImCD, double *ImDC, double *ImDD, int *LengthCin, int *firstCin, int *LengthDin, int *firstDin, double *H, int *LengthH, int *LengthCout, int *firstCout, int *lastCout, double *ImOut, int *bc, int *error); extern void SWT2Dall(double *m, int *nm, double *am, int *J, double *H, int *LengthH, int *error); /* 41-50 */ extern void tpwd(double *image, int *nrow, int *ncol, int *levr, int *levc, int *firstCr, int *lastCr, int *offsetCr, int *firstDr, int *lastDr, int *offsetDr, int *firstCc, int *lastCc, int *offsetCc, int *firstDc, int *lastDc, int *offsetDc, int *type, int *bc, double *H, int *LengthH, int *error); extern void tpwr(double *image, int *nrow, int *ncol, int *levr, int *levc, int *firstCr, int *lastCr, int *offsetCr, int *firstDr, int *lastDr, int *offsetDr, int *firstCc, int *lastCc, int *offsetCc, int *firstDc, int *lastDc, int *offsetDc, int *type, int *bc, double *H, int *LengthH, int *error); extern void wavedecomp(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); extern void wavedecomp_dh(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); extern void wavepackde(double *Data, int *LengthData, int *levels, double *H, int *LengthH); extern void wavepackrecon(double *rdata, int *ldata, int *nrsteps, int *rvector, double *H, int *LengthH, int *error); extern void wavepackst(double *Carray, double *Data, int *LengthData, int *levels, double *H, int *LengthH, int *error); extern void waverecons(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); extern void waverecons_dh(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); extern void wd3D(double *Carray, int *size, double *H, int *LengthH, int *error); /* 51-53 */ extern void wpCmnv(double *wp, int *LengthData, int *nlevels, int *upperctrl, double *upperl, int *firstl, int *verbose, int *error); extern void wpst(double *ansvec, int *lansvec, int *nlev, int *finish_level, int *avixstart, double *H, int *LengthH, int *error); extern void wr3D(double *Carray, int *truesize, double *H, int *LengthH, int *error); static const R_CMethodDef CEntries[] = { {"accessDwpst", (DL_FUNC) &accessDwpst, 11}, {"av_basisWRAP", (DL_FUNC) &av_basisWRAP, 8}, {"c2to4", (DL_FUNC) &c2to4, 2}, {"Ccthrcalcodds", (DL_FUNC) &Ccthrcalcodds, 8}, {"Ccthrnegloglik", (DL_FUNC) &Ccthrnegloglik, 6}, {"Cmnv", (DL_FUNC) &Cmnv, 9}, {"comAB_WRAP", (DL_FUNC) &comAB_WRAP, 14}, {"computec", (DL_FUNC) &computec, 8}, {"comwd", (DL_FUNC) &comwd, 21}, {"comwr", (DL_FUNC) &comwr, 21}, {"comwst", (DL_FUNC) &comwst, 12}, {"conbarL", (DL_FUNC) &conbarL, 14}, {"Cpostmean", (DL_FUNC) &Cpostmean, 8}, {"Crsswav", (DL_FUNC) &Crsswav, 20}, {"CScalFn", (DL_FUNC) &CScalFn, 5}, {"Cthreshold", (DL_FUNC) &Cthreshold, 12}, {"CWavDE", (DL_FUNC) &CWavDE, 28}, {"CWaveletCV", (DL_FUNC) &CWaveletCV, 23}, {"dec", (DL_FUNC) &dec, 6}, {"FullWaveletCV", (DL_FUNC) &FullWaveletCV, 10}, {"getARRel", (DL_FUNC) &getARRel, 10}, {"getpacketwst2D", (DL_FUNC) &getpacketwst2D, 9}, {"ixtoco", (DL_FUNC) &ixtoco, 5}, {"makegrid", (DL_FUNC) &makegrid, 8}, {"multiwd", (DL_FUNC) &multiwd, 18}, {"multiwr", (DL_FUNC) &multiwr, 19}, {"PLDE2", (DL_FUNC) &PLDE2, 13}, {"PsiJ", (DL_FUNC) &PsiJ, 8}, {"putarr", (DL_FUNC) &putarr, 5}, {"putpacketwst2D", (DL_FUNC) &putpacketwst2D, 9}, {"rainmatPARENT", (DL_FUNC) &rainmatPARENT, 6}, {"rainmatPARTIAL", (DL_FUNC) &rainmatPARTIAL, 7}, {"rec", (DL_FUNC) &rec, 5}, {"SAvBasis", (DL_FUNC) &SAvBasis, 8}, {"SFDE5", (DL_FUNC) &SFDE5, 12}, {"SFDE6", (DL_FUNC) &SFDE6, 13}, {"StoDCDS", (DL_FUNC) &StoDCDS, 16}, {"StoIDS", (DL_FUNC) &StoIDS, 18}, {"StoIRS", (DL_FUNC) &StoIRS, 16}, {"SWT2Dall", (DL_FUNC) &SWT2Dall, 7}, {"tpwd", (DL_FUNC) &tpwd, 22}, {"tpwr", (DL_FUNC) &tpwr, 22}, {"wavedecomp", (DL_FUNC) &wavedecomp, 14}, {"wavedecomp_dh", (DL_FUNC) &wavedecomp_dh, 14}, {"wavepackde", (DL_FUNC) &wavepackde, 5}, {"wavepackrecon", (DL_FUNC) &wavepackrecon, 7}, {"wavepackst", (DL_FUNC) &wavepackst, 7}, {"waverecons", (DL_FUNC) &waverecons, 14}, {"waverecons_dh", (DL_FUNC) &waverecons_dh, 14}, {"wd3D", (DL_FUNC) &wd3D, 5}, {"wpCmnv", (DL_FUNC) &wpCmnv, 8}, {"wpst", (DL_FUNC) &wpst, 8}, {"wr3D", (DL_FUNC) &wr3D, 5}, {NULL, NULL, 0} }; void R_init_wavethresh(DllInfo *dll) { R_registerRoutines(dll, CEntries, NULL, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } wavethresh/src/WAVDE.c0000644000176200001440000013055514332225551014246 0ustar liggesusers/* This file contains the additional code needed to perform wavelet * density estimation in SPlus. The WaveThresh package must be installed. * The locations of the functions are: * * SFDE5 line 0053 * SFDE6 line 0096 * PLDE2 line 0148 * phi line 0194 * diad line 0246 * T line 0263 * StoDCDS line 0281 * DensityCovarianceDecomposeStep line 0334 * AXSDCV line 0444 * StoIDS_dh line 0470 * ImageDecomposeStep_dh line 0535 * wavedecomp_dh line 0756 * convolveC_dh line 0874 * convolveD_dh line 0945 * reflect_dh line 1035 * access0 line 1122 * waverecons_dh line 1137 * conbar_dh line 1238 */ #include #include #include #include /* Error condition */ #define OK (0) /* For boundary condition handling */ #define PERIODIC 1 #define SYMMETRIC 2 #define ZERO 3 /* For the type of wavelet decomposition */ #define WAVELET 1 /* The standard decomposition */ #define STATION 2 /* The stationary decomposition */ #define ACCESSC_DH(c, firstC, lengthC, ix, bc) *(c+reflect_dh(((ix)-(firstC)),(lengthC),(bc))) #define AXSMAT(a, nrow, i, j) (a + (i) + (nrow)*(j)) #define ACCESS(image, size, i, j) *(image + (i)*(size) + (j)) #define max(A, B) ((A) > (B) ? (A) : (B)) #define min(A, B) ((A) < (B) ? (A) : (B)) /* SFDE5 calculates empirical scaling function coefficients from data, * using the Daubechies-Lagarias algorithm */ void SFDE5(double *x, int *nx, double *p, double *filter, int *nf, int *prec, double *chat, int *kmin, int *kmax, double *philh, double *phirh, int *error) /* double *x; The data */ /* int *nx; Number of data points */ /* double *p; The primary resolution */ /* double *filter; Vector of filter coefficients */ /* int *nf; Number of filter coefficients - 1 */ /* int *prec; Precision used in evaluating phi */ /* double *chat; Vector to put coefficient estimates in */ /* int *kmin; minimum value of k */ /* int *kmax; maximum value of k */ /* double *philh; Left hand end of scaling function support */ /* double *phirh; Right hand end of scaling function support */ /* int *error; Error code - mostly out of memory */ { void phi(double y, double *filt, double *out, int *pre, int *n, int *error); register int i, j, k; register int min, max; register double z; double *phix; phix = (double *) calloc(*nf+1, sizeof(double)); if (phix == NULL) { *error = 1; return; } /* calculate coefficient estimates */ for(i=0; i < *nx; i++) { for(j=0; j < *nf; j++) { *(phix+j) = 0.0; } z = *p * *(x+i); min = ceil(z-*phirh); max = floor(z-*philh); phi(z, filter, phix, prec, nf, error); if (*error != 0) return; for (k=min; k <= max; k++) *(chat+(k-*kmin)) += sqrt(*p) * *(phix + k - min) / *nx; } free((void *)phix); } /* As SFDE5, but also calculates covariances of the coefficients */ void SFDE6(double *x, int *nx, double *p, double *filter, int *nf, int *prec, double *chat, double *covar, int *kmin, int *kmax, double *philh, double *phirh, int *error) /* double *x; The data */ /* int *nx; Number of data points */ /* double *p; The primary resolution */ /* double *filter; Vector of filter coefficients */ /* int *nf; Number of filter coefficients - 1 */ /* int *prec; Precision used in evaluating phi */ /* double *chat; Vector to put coefficient estimates in */ /* double *covar; Matrix to put covariance estimates in */ /* int *kmin; minimum value of k */ /* int *kmax; maximum value of k */ /* double *philh; Left hand end of scaling function support */ /* double *phirh; Right hand end of scaling function support */ /* int *error; Error code -- mostly out of memory */ { void phi(double y, double *filt, double *out, int *pre, int *n, int *error); register int i, j, k, l; register int min, max; register double z, phijk, phijl; double *phix; *error = 0; phix = (double *) calloc(*nf+1, sizeof(double)); if (phix == NULL) { *error = 1; return; } /* calculate coefficient estimates */ for(i=0; i < *nx; i++) { for(j=0; j < *nf; j++) { *(phix+j) = 0.0; } z = *p * *(x+i); min = ceil(z-*phirh); max = floor(z-*philh); phi(z, filter, phix, prec, nf, error); if (*error != 0) return; for (k=min; k <= max; k++) { phijk = sqrt(*p) * *(phix + k - min); *(chat+(k-*kmin)) += phijk / *nx; for (l=k; (l < (k+*nf)) && (l <= max); l++) { phijl = sqrt(*p) * *(phix + l - min); *AXSMAT(covar, (*kmax-*kmin+1), (k-*kmin), (l-k)) += phijk * phijl / (*nx * *nx); } } } free((void *)phix); } /* Function to get plotting information for density estimate from high * level scaling function coefficients */ void PLDE2(double *C, double *p, double *filter, int *nf, int *prec, int *kmin, int *kmax, double *gx, double *gy, int *ng, double *philh, double *phirh, int *error) /* double *C; High resolution scaling function coefficients */ /* double *p; The primary resolution */ /* double *filter; Vector of filter coefficients */ /* int *nf; Number of filter coefficients - 1 */ /* int *prec; Precision used in evaluating phi */ /* int *kmin; minimum value of k */ /* int *kmax; maximum value of k */ /* double *gx; grid for drawing density estimate */ /* double *gy; Vector to put density values in */ /* int *ng; Length of above grids */ /* double *philh; Left hand end of scaling function support */ /* double *phirh; Right hand end of scaling function support */ /* int *error; Error Code */ { void phi(double y, double *filt, double *out, int *pre, int *n, int *error); register int i, j, k; register int min, max; register double z; double *phix; *error = 0; phix = (double *) calloc(*nf+1, sizeof(double)); if (phix == NULL) { *error = 1; return; } /* Evaluate density estimate over the grid provided */ for (i=0; i < *ng; i++) { for(j=0; j < *nf; j++) { *(phix+j) = 0.0; } z = *p * *(gx+i); min = ceil(z-*phirh); max = floor(z-*philh); if(min<*kmin) min = *kmin; phi(z, filter, phix, prec, nf, error); if (*error != 0) return; { double a, b; for (k=min; k<=max && k<=*kmax; k++) { a = *(C+(k-*kmin)); b = *(phix + k - min); *(gy+i) += a * sqrt(*p) * b; } } } free((void *)phix); return; } /* Function to evaluate phi_Jk(x) for all k for which it is non-zero */ void phi(double y, double *filt, double *out, int *pre, int *n, int *error) { double T(int index, double *filter, int *n, int j, int k); void diad(double x, int *prec, int *out); int i, j, k, l, *dix; double z, *ans, *tmp; dix = (int *) calloc(*pre, sizeof(int)); if (dix == NULL) { *error = 2; return; } ans = (double *) calloc((*n * *n), sizeof(double)); if (ans == NULL) { free((void *)dix); *error = 3; return; } tmp = (double *) calloc((*n * *n), sizeof(double)); if (tmp == NULL) { free((void *)dix); free((void *)ans); *error = 4; return; } for(i=0; i < *n; i++) { for(j = 0; j < *n; j++) { if(i==j) *AXSMAT(ans, *n, i, j) = 1.0; else *AXSMAT(ans, *n, i, j) = 0.0; } } z = y - floor(y); diad(z, pre, dix); for(i=0; i<*pre; i++) { for(j=0; j < *n; j++) { for(k=0; k < *n; k++) { *AXSMAT(tmp, *n, j, k) = 0.0; for(l=0; l < *n; l++) { *AXSMAT(tmp, *n, j, k) += *AXSMAT(ans, *n, j, l) * T(*(dix+i), filt, n, (l+1), (k+1)); } } } for(j=0; j < *n; j++) { for(k=0; k < *n; k++) { *AXSMAT(ans, *n, j, k) = *AXSMAT(tmp, *n, j, k); } } } for(i=0; i< *n; i++) { for(j=0; j < *n; j++) { *(out + *n - 1 - i) += *AXSMAT(ans, *n, i, j) / *n; } } free((void *)dix); free((void *)ans); free((void *)tmp); } /* Function to find diadic representation of a number in (0,1) */ void diad(double x, int *prec, int *out) { double nu; int i; nu = x; for(i=0; i<*prec; i++) { nu = 2*nu; *(out + i) = floor(nu); nu = nu - floor(nu); } } /* Function to find T_i(j,k) from filter */ double T(int index, double *filter, int *n, int jj, int kk) { int ind; ind=-1; /* MAN: added as a initialization, but *should* be set to true value assuming that index can only be either zero or one */ if(index==0) ind = 2*jj-kk-1; else if(index==1) ind = 2*jj-kk; if(ind < 0 || ind > *n) return(0.0); else return sqrt(2) * *(filter + ind); } /* Function for calling DensityCovarianceDecomposeStep from Splus */ void StoDCDS(double *C, int *Csize, int *firstCin, double *H, int *LengthH, int *LengthCout, int *firstCout, int *lastCout, int *LengthDout, int *firstDout, int *lastDout, double *ImCC, double *ImDD, int *bc, int *type, int *error) { register int i,j; double *cc_out, *dd_out; void DensityCovarianceDecomposeStep(double *C, int Crow, int firstCin, double *H, int LengthH, int LengthCout, int firstCout, int lastCout, int LengthDout, int firstDout, int lastDout, double **cc_out, double **dd_out, int bc, int type, int *error); double AXSDCV(double *a, int nr, int nc, int i, int j); DensityCovarianceDecomposeStep(C, *Csize, *firstCin, H, *LengthH, *LengthCout, *firstCout, *lastCout, *LengthDout, *firstDout, *lastDout, &cc_out, &dd_out, *bc, *type, error); /* Copy images */ for(i=0; i<(int)*LengthDout; ++i) { for(j=0; j < (*LengthH-1); ++j) *AXSMAT(ImDD, (int)*LengthDout, i, j) = *AXSMAT(dd_out, *LengthDout, i, j); } for(i=0; i<(int)*LengthCout; ++i) { for(j=0; j < (*LengthH-1); ++j) *AXSMAT(ImCC, (int)*LengthCout, i, j) = *AXSMAT(cc_out, *LengthCout, i, j); } Free(cc_out); Free(dd_out); } /* Function for decomposing the covariance matrix of scaling function coefs */ void DensityCovarianceDecomposeStep(double *C, int Crow, int firstCin, double *H, int LengthH, int LengthCout, int firstCout, int lastCout, int LengthDout, int firstDout, int lastDout, double **cc_out, double **dd_out, int bc, int type, int *error) /* double *C; Input data image */ /* int Crow; Number of rows of covariance matrix */ /* int firstCin; Index number of first element in input "C" image */ /* double *H; Filter coefficients */ /* int LengthH; Length of filter */ /* Details about output image */ /* int LengthCout; Length of C part of output image */ /* int firstCout; Index number of first element in output "C" image */ /* int lastCout; Index number of last element */ /* int LengthDout; Length of D part of output image */ /* int firstDout; Index number of first element in output "D" image */ /* int lastDout; Index number of last element */ /* double **cc_out; Smoothed output image */ /* double **dd_out; Diagonal detail */ /* int bc; Method of boundary correction */ /* int type; Type of transform, wavelet or stationary */ /* int *error; Error code */ { register int k,l,kmin,kmax,lmin,lmax,row,col; double *afterCC,*afterDD; /* Results */ /* int step_factor; NOT USED This should always be 1 for the WAVELET trans*/ double AXSDCV(double *a, int nr, int nc, int i, int j); *error = 0l; if ((afterCC = (double *)Calloc( LengthCout*(LengthH-1), double))==NULL) { *error = 6l; return; } for (row=0; row < LengthCout; row++) { for(col=0; col < (LengthH-1); col++) { *AXSMAT(afterCC, LengthCout, row, col) = 0.0; } } if ((afterDD = (double *)Calloc( LengthDout*(LengthH-1),double))==NULL){ *error = 9l; return; } for (row=0; row < LengthCout; row++) { for(col=0; col < (LengthH-1); col++) { *AXSMAT(afterDD, LengthDout, row, col) = 0.0; } } /* Link this memory to the returning pointers */ *cc_out = afterCC; *dd_out = afterDD; for(row=firstCin; row < (firstCin+Crow); row++) { for(col=max(row-LengthH+2, firstCin); col < min(row+LengthH-1, firstCin+Crow); col++) { kmin = (int)ceil((0.5 * (double)(row+1-LengthH))); kmax = (int)floor((0.5 * (double)(row))); lmin = (int)ceil((0.5 * (double)(col+1-LengthH))); lmax = (int)floor((0.5 * (double)(col))); for(k=kmin; k <= kmax; k++) { for(l=max(lmin, k); l<=min(lmax, k+LengthH-1); l++) { *AXSMAT(afterCC, LengthCout, (k-firstCout), (l-k)) += *(H+row-2*k) * *(H+col-2*l) * AXSDCV(C, Crow, LengthH-1, row-firstCin, col-firstCin); } } } } for(row=firstCin; row < (firstCin+Crow); row++) { for(col=max(row-LengthH+2, firstCin); col < min(row+LengthH-1, firstCin+Crow); col++) { kmin = (int)ceil((0.5 * (double)(row-1))); kmax = (int)floor((0.5 * (double)(row-2+LengthH))); lmin = (int)ceil((0.5 * (double)(col-1))); lmax = (int)floor((0.5 * (double)(col-2+LengthH))); for(k=kmin; k <= kmax; k++) { for(l=max(lmin, k); l<=min(lmax, k+LengthH-1); l++) { *AXSMAT(afterDD, LengthDout, (k-firstDout), (l-k)) += (int)pow(-1, row+col) * *(H+2*k+1-row) * *(H+2*l+1-col) * AXSDCV(C, Crow, LengthH-1, row-firstCin, col-firstCin); } } } } /* That should be it ! */ return; } /* Function for accessing elements of the covariance matrix */ double AXSDCV(double *a, int nr, int nc, int i, int j) /* double *a; Pointer to covariance object */ /* int nr; Number of rows of a */ /* int nc; Number of columns of a */ /* int i; First index */ /* int j; Second index */ { int ti; if(i > j) { ti = i; i = j; j = ti; } if((j-i) < nc) return *(a + i + nr*(j-i)); else return 0.0; } /* As WaveThresh StoIDS, but allows for zero boundary conditions */ void StoIDS_dh(double *C, int *Csize, int *firstCin, double *H, int *LengthH, int *LengthCout, int *firstCout, int *lastCout, int *LengthDout, int *firstDout, int *lastDout, double *ImCC, double *ImCD, double *ImDC, double *ImDD, int *bc, int *type, int *error) { register int i,j; double *cc_out, *cd_out, *dc_out, *dd_out; void ImageDecomposeStep_dh(double *C, int Csize, int firstCin, double *H, int LengthH, int LengthCout, int firstCout, int lastCout, int LengthDout, int firstDout, int lastDout, double **cc_out, double **cd_out, double **dc_out, double **dd_out, int bc, int type, int *error); ImageDecomposeStep_dh(C, *Csize, *firstCin, H, *LengthH, *LengthCout, *firstCout, *lastCout, *LengthDout, *firstDout, *lastDout, &cc_out, &cd_out, &dc_out, &dd_out, *bc, *type, error); /* Copy images */ for(i=0; i<(int)*LengthDout; ++i) { for(j=0; j<(int)*LengthDout; ++j) *AXSMAT(ImDD, (int)*LengthDout, i, j) = *AXSMAT(dd_out, *LengthDout, i, j); for(j=0; j<(int)*LengthCout; ++j) *AXSMAT(ImDC, (int)*LengthDout, j, i) = *AXSMAT(dc_out, *LengthDout, j, i); } for(i=0; i<(int)*LengthCout; ++i) { for(j=0; j<(int)*LengthDout; ++j) *AXSMAT(ImCD, (int)*LengthCout, j, i) = *AXSMAT(cd_out, *LengthCout, j, i); for(j=0; j<(int)*LengthCout; ++j) *AXSMAT(ImCC, (int)*LengthCout, j, i) = *AXSMAT(cc_out, *LengthCout, j, i); } Free(cc_out); Free(cd_out); Free(dc_out); Free(dd_out); } /* As WaveThresh ImageDecomposeStep, but allows for zero boundary conditions */ void ImageDecomposeStep_dh(double *C, int Csize, int firstCin, double *H, int LengthH, int LengthCout, int firstCout, int lastCout, int LengthDout, int firstDout, int lastDout, double **cc_out, double **cd_out, double **dc_out, double **dd_out, int bc, int type, int *error) /* double *C; Input data image */ /* int Csize; Size of image (side length) */ /* int firstCin; Index number of first element in input "C" image */ /* double *H; Filter coefficients */ /* int LengthH; Length of filter */ /* Details about output image */ /* int LengthCout; Length of C part of output image */ /* int firstCout; Index number of first element in output "C" image */ /* int lastCout; Index number of last element */ /* int LengthDout; Length of D part of output image */ /* int firstDout; Index number of first element in output "D" image */ /* int lastDout; Index number of last element */ /* double **cc_out; Smoothed output image */ /* double **cd_out; Horizontal detail */ /* double **dc_out; Vertical detail */ /* double **dd_out; Diagonal detail */ /* int bc; Method of boundary correction */ /* int type; Type of transform, wavelet or stationary */ /* int *error; Error code */ { register int j,row,col; double *ccopy; /* Used to copy input data to convolution routines */ double *ccopy_out;/* Used to copy output data to afterC after conv. */ double *dcopy_out;/* Used to copy output data to afterD after conv. */ double *afterC; /* Temporary store for image data after C convolution */ double *afterD; /* Temporary store for image data after D convolution */ double *afterCC,*afterCD,*afterDC,*afterDD; /* Results */ int step_factor; /* This should always be 1 for the WAVELET trans*/ void convolveC_dh(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *c_out, int firstCout, int lastCout, int type, int step_factor, int bc); void convolveD_dh(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *d_out, int firstDout, int lastDout, int type, int step_factor, int bc); *error = 0l; step_factor = 1; /* Get memory for afterC */ if ((afterC = (double *)Calloc(Csize*LengthCout,double))==NULL){ *error = 1l; return; } /* Get memory for afterD */ if ((afterD = (double *)Calloc(Csize*LengthDout,double))==NULL){ *error = 2l; return; } /* Get memory for row of image to pass to convolution routines */ if ((ccopy = (double *)Calloc(Csize,double)) == NULL) { *error = 3l; return; } /* Get memory for output row after C convolution */ if ((ccopy_out = (double *)Calloc(LengthCout,double))==NULL) { *error = 4l; return; } /* Get memory for output row after D convolution */ if ((dcopy_out = (double *)Calloc(LengthDout,double))==NULL) { *error = 5l; return; } /* Do convolutions on rows of C */ for(row=0; row < (int)Csize; ++row) { /* Copy row of C into ccopy */ for(j=0; j= 0; --next_level) { if (verbose) Rprintf("%d ", next_level); at_level = next_level + 1; /* For stationary wavelets we need to define a step factor. * This widens the span of the filter. At the top level (*levels->*levels-1) * it is one, as usual. Then for the next step it becomes 2, then 4 etc. */ convolveC_dh( (C+*(offsetC+at_level)), (int)(*(lastC+ at_level) - *(firstC+at_level)+1), (int)(*(firstC+at_level)), H, (int)*LengthH, (C+*(offsetC+next_level)), (int)(*(firstC+next_level)), (int)(*(lastC+next_level)) , (int)*type, step_factor, (int)*bc); convolveD_dh( (C+*(offsetC+at_level)), (int)(*(lastC+ at_level) - *(firstC+at_level)+1), (int)(*(firstC+at_level)), H, (int)*LengthH, (D+*(offsetD+next_level)), (int)(*(firstD+next_level)), (int)(*(lastD+next_level)), (int)*type, step_factor, (int)*bc ); if (*type == STATION) step_factor *= 2; /* Any half decent compiler should * know what to do here ! */ } if (verbose) Rprintf("\n"); return; } /* As WaveThresh convolveC, but allows for zero boundary conditions */ void convolveC_dh(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *c_out, int firstCout, int lastCout, int type, int step_factor, int bc) /* double *c_in; Input data */ /* int LengthCin; Length of this array */ /* int firstCin; The first C value */ /* double *H; Filter */ /* int LengthH; Length of filter */ /* double *c_out; Output data */ /* int firstCout; First index of C array */ /* int lastCout; Last index of C array */ /* int type; Type of wavelet decomposition */ /* int step_factor; For stationary wavelets only */ /* int bc; Method of boundary correction PERIODIC, SYMMETRIC */ { double sum; register int k; register int count_out; register int m; register int cfactor; /* This determines what sort of dilation we do */ /* and depends on the type argument */ int reflect_dh(int n, int lengthC, int bc); double access0(double *c, int lengthC, int n); count_out = 0; switch(type) { case WAVELET: /* Ordinary wavelets */ cfactor = 2; /* Pick every other coefficient */ break; case STATION: /* Stationary wavelets */ cfactor = 1; /* Pick every coefficient */ break; default: /* This is an error, one of the above must have */ /* been picked */ /* However, this must be tested in a previous */ /* routine. */ cfactor=0; /* MAN: added for total cover: shouldn't happen */ break; } if (bc==ZERO) { for(k=firstCout; k<=lastCout; ++k) { sum = 0.0; for(m=0; m= 0) && (n < lengthC)) return(n); else if (n<0) { if (bc==PERIODIC) { /* n = lengthC+n; */ n = n%lengthC + lengthC*((n%lengthC)!=0); if (n < 0) { REprintf("reflect: access error (%d,%d)\n", n,lengthC); REprintf("reflect: left info from right\n"); error("This should not happen: stopping.\n"); } else return(n); } else if (bc==SYMMETRIC) { n = -1-n; if (n >= lengthC) { REprintf("reflect: access error (%d,%d)\n", n,lengthC); error("This should not happen: stopping.\n"); } else return(n); } else { REprintf("reflect: Unknown boundary correction"); REprintf(" value of %d\n", bc); error("This should not happen: stopping.\n"); } } else { if (bc==PERIODIC) { /* printf("periodic extension, was %d (%d) now ",n,lengthC); n = n - lengthC; */ n %= lengthC; /* printf("%d\n", n); */ if (n >= lengthC) { REprintf("reflect: access error (%d,%d)\n", n,lengthC); REprintf("reflect: right info from left\n"); error("This should not happen: stopping.\n"); } else return(n); } else if (bc==SYMMETRIC) { n = 2*lengthC - n - 1; if (n<0) { REprintf("reflect: access error (%d,%d)\n", n,lengthC); error("This should not happen: stopping.\n"); } else return(n); } else { REprintf("reflect: Unknown boundary correction\n"); error("This should not happen: stopping.\n"); } } /* Safety */ REprintf("reflect: SHOULD NOT HAVE REACHED THIS POINT\n"); error("This should not happen: stopping.\n"); return(0); /* for lint only */ } /* Returns c(n), or 0 if n outside given range */ double access0(double *c, int lengthC, int n) /* double *c; data vector */ /* int lengthC; length of vector */ /* int n; index wanted */ { if ((n>=0) && (n0) ? ( ((i)+1)/2):((i)/2) ) void conbar_dh(double *c_in, int LengthCin, int firstCin, double *d_in, int LengthDin, int firstDin, double *H, int LengthH, double *c_out, int LengthCout, int firstCout, int lastCout, int type, int bc) /* int firstCout; This determines summation over n */ /* int lastCout; and this does too */ /* int type; The type of wavelet reconstruction */ { register int n,k; register int cfactor; double sumC, sumD; int reflect_dh(int n, int lengthC, int bc); double access0(double *c, int lengthC, int n); switch(type) { case WAVELET: /* Standard wavelets */ cfactor = 2; break; case STATION: /* Stationary wavelets */ cfactor = 1; break; default: /* This should never happen */ cfactor=0; /* MAN: added for total cover: shouldn't happen */ break; } /* Compute each of the output C for ZERO bcs */ if(bc==ZERO) { for(n=firstCout; n<=lastCout; ++n) { /* We want n+1-LengthH <= 2*k to start off (n-2k<=LengthH-1) */ k = CEIL(n+1-LengthH); sumC = 0.0; while( cfactor*k <= n ) { sumC += *(H+n-cfactor*k)*access0(c_in, LengthCin, k-firstCin); ++k; } /* Now do D part */ k = CEIL(n-1); sumD = 0.0; while( cfactor*k <= (LengthH +n -2) ) { sumD += *(H+1+cfactor*k-n) * access0(d_in, LengthDin, k-firstDin); ++k; } if (n & 1) /* n odd */ sumC -= sumD; else sumC += sumD; *(c_out+(n-firstCout)) = sumC; } } /* Now for other bcs */ else { for(n=firstCout; n<=lastCout; ++n) { /* We want n+1-LengthH <= 2*k to start off (n-2k<=LengthH-1) */ k = CEIL(n+1-LengthH); sumC = 0.0; while( cfactor*k <= n ) { sumC += *(H+n-cfactor*k)*ACCESSC_DH(c_in, firstCin, LengthCin, k, bc); ++k; } /* Now do D part */ k = CEIL(n-1); sumD = 0.0; while( cfactor*k <= (LengthH +n -2) ) { sumD += *(H+1+cfactor*k-n) * ACCESSC_DH(d_in, firstDin, LengthDin, k, bc); ++k; } if (n & 1) /* n odd */ sumC -= sumD; else sumC += sumD; ACCESSC_DH(c_out, firstCout, LengthCout, n, bc) = sumC; } } } wavethresh/src/functions.c0000644000176200001440000121210414334424104015375 0ustar liggesusers#include #include #include #include #include #include /* For boundary condition handling */ #define PERIODIC 1 #define SYMMETRIC 2 /* For the type of wavelet decomposition */ #define WAVELET 1 /* The standard decomposition */ #define STATION 2 /* The stationary decomposition */ /* Threshold types */ #define HARD 1 #define SOFT 2 /* * ACCESSC handles negative accesses, as well as those that exceed the number * of elements */ #define ACCESS(image, size, i, j) *(image + (i)*(size) + (j)) #define ACCESSC(c, firstC, lengthC, ix, bc) *(c+reflect(((ix)-(firstC)),(lengthC),(bc))) #define ACCESSD(l, i) *(Data + (*LengthData*(l)) + (i)) #define POINTD(l,i) (Data + (*LengthData*(l)) + (i)) #define POINTC(l,i) (Carray +(*LengthData*(l)) + (i)) /* * The next three are exclusively for the stationary wavelet packet algorithm * WPST */ #define NPKTS(level, nlev) (1 << (2*(nlev-level))) #define PKTLENGTH(level) (1 << level) #define ACCWPST(a, level, avixstart, pkix, i) *((a) + *(avixstart+(level))+(pkix)*PKTLENGTH(level)+i) /* Optimiser parameters */ #define R 0.61803399 /* The golden ratio for bisection searches */ #define Cons (1.0-R) /* For bisection searches */ /* These next 3 are for the ipndacw code */ #define ACCESSW(w,j,k) *(*(w+j)+k) #define max(a,b) ((a) > (b) ? (a) : (b)) #define min(a,b) ((a) > (b) ? (b) : (a)) /* * The next 5 are for the swt2d code */ #define ACCESS3D(ar, d1, d12, ix1, ix2, ix3) *(ar + (ix3)*(d12)+ (ix2)*(d1)+(ix1)) #define TYPES 0 #define TYPEH 1 #define TYPEV 2 #define TYPED 3 /* * End of the swt2d macro code */ /* The code starts here !! */ /* * Do wavelet cross-validation in C */ void CWaveletCV(double *noisy, int *nnoisy, double *UniversalThresh, double *C, double *D, int *LengthD, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *ntt, int *ll, int *bc, double *tol, int *maxits, double *xvthresh, int *interptype, int *error) /* double *noisy; The noisy data */ /* int *nnoisy; Length of noisy data */ /* double *UniversalThresh; The universal threshold */ /* double *C; Workspace for scaling coefficients */ /* double *D; Workspace for wavelet coefficients */ /* int *LengthD; Length of D workspace */ /* double *H; The wavelets to use */ /* int *LengthH; The length of the filter */ /* int *levels; Number of levels */ /* int *firstC, *lastC, *offsetC; array indexing info for C vector */ /* int *firstD, *lastD, *offsetD; array indexing info for D vector */ /* int *ntt; The threshold type */ /* int *ll; lowest level to threshold; all above too */ /* int *bc; The boundary conditions */ /* double *tol; Tol that causes algorithm termination */ /* int *maxits; Max no. of its permitted in optimization */ /* double *xvthresh; Returned cross-validatory threshold */ /* int *interptype; 1=noise interpolate, 2=std interpolate */ /* int *error; There was an error! */ { register int verbose=0; register int iterations=0; double ax, bx,cx; double x0, x1, x2, x3; /* NOT NEEDED double fa,fb,fc * END */ double f1,f2; double ssq, tmp; void Call_Crsswav(double *noisy, int *nnoisy, double *value, double *C, double *D, int *LengthD, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *ntt, int *ll, int *bc, double *ssq, int *interptype, int *error); ax = 0.0; bx = *UniversalThresh/2.0; cx = *UniversalThresh; x0 = ax; x3 = cx; if (*error != 0) { verbose=1; *error = 0; } else verbose=0; if (verbose) { Rprintf("Entered WaveletCV\n"); } if (fabs(cx - bx) > fabs(bx - ax)) { x1 = bx; x2 = bx + Cons*(cx-bx); } else { x2 = bx; x1 = bx - Cons*(bx-ax); } /* NOT NEEDED Call_Crsswav(noisy, nnoisy, &ax, C, D, LengthD, H, LengthH, levels, firstC, lastC, offsetC, firstD, lastD, offsetD, ntt, ll, bc, &ssq, interptype, error); fa = ssq; if (*error != 0) { *error += 1000; return; } Call_Crsswav(noisy, nnoisy, &bx, C, D, LengthD, H, LengthH, levels, firstC, lastC, offsetC, firstD, lastD, offsetD, ntt, ll, bc, &ssq, interptype, error); fb = ssq; if (*error != 0) { *error += 1100; return; } Call_Crsswav(noisy, nnoisy, &cx, C, D, LengthD, H, LengthH, levels, firstC, lastC, offsetC, firstD, lastD, offsetD, ntt, ll, bc, &ssq, interptype, error); fc = ssq; if (*error != 0) { *error += 1200; return; } * END OF NOT NEEDED */ Call_Crsswav(noisy, nnoisy, &x1, C, D, LengthD, H, LengthH, levels, firstC, lastC, offsetC, firstD, lastD, offsetD, ntt, ll, bc, &ssq, interptype, error); f1 = ssq; if (*error != 0) { *error += 1300; return; } Call_Crsswav(noisy, nnoisy, &x2, C, D, LengthD, H, LengthH, levels, firstC, lastC, offsetC, firstD, lastD, offsetD, ntt, ll, bc, &ssq, interptype, error); f2 = ssq; if (*error != 0) { *error += 1400; return; } /* * Next is the MAIN iterative loop. * As well as checking to see if the solution converges, we need to keep * an eye on the maximum number of iterations. */ while((fabs(x3-x0) > *tol*(fabs(x1) + fabs(x2))) && iterations++ < *maxits) { if (verbose) { Rprintf("x0=%lf, x1=%lf, x2=%lf, x3=%lf\n", x0,x1,x2,x3); Rprintf("f1=%lf, f2=%lf\n", f1,f2); /* fflush(stdout); */ } if (f2 < f1) { x0 = x1; x1 = x2; x2 = R*x1 + Cons*x3; f1 = f2; Call_Crsswav(noisy, nnoisy, &x2, C, D, LengthD, H, LengthH, levels, firstC, lastC, offsetC, firstD, lastD, offsetD, ntt, ll, bc, &ssq, interptype, error); f2 = ssq; if (*error != 0) { *error += 1500; return; } } else { x3 = x2; x2 = x1; x1 = R*x2 + Cons*x0; f2 = f1; Call_Crsswav(noisy, nnoisy, &x1, C, D, LengthD, H, LengthH, levels, firstC, lastC, offsetC, firstD, lastD, offsetD, ntt, ll, bc, &ssq, interptype, error); f1 = ssq; if (*error != 0) { *error += 1600; return; } } } /* * Check to see if we've exceeded maximum iterations and return error, if so * * Also, return a value in tol that indicates how close to the tolerance * we are. */ if (iterations >= *maxits) { *error = 1700; *tol = fabs(x3-x0)/(fabs(x1)+fabs(x2)); return; } if (f1 < f2) tmp = x1; else tmp = x2; x1 = tmp/sqrt(1 - log(2.0)/log((double)*nnoisy)); *xvthresh = x1; return; } /* * Wrapper to call Crsswav or Crsswav2 depending on the value of interptype * This allows one to easily change which type of interpolation one does. * */ void Call_Crsswav(double *noisy, int *nnoisy, double *value, double *C, double *D, int *LengthD, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *ntt, int *ll, int *bc, double *ssq, int *interptype, int *error) /*--------------------- * Argument description double *noisy:: The noisy data - power of 2 int *nnoisy:: The number of noisy data elements, must be power of 2 double *value:: The threshold value at which to estimate CV Score double *C:: double *D:: int *LengthD:: double *H:: The wavelets to use int *LengthH:: The length of the filter int *levels:: int *firstC, *lastC, *offsetC:: int *firstD, *lastD, *offsetD:: int *ntt:: The threshold type int *ll:: The lowest level to threshold; all levels above too int *bc:: The boundary conditions double *ssq:: The answer! int *interptype:: int *error:: There was an error! ---------------------*/ { void Crsswav(double *noisy, int *nnoisy, double *value, double *C, double *D, int *LengthD, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *ntt, int *ll, int *bc, double *ssq, int *error); void Crsswav2(double *noisy, int *nnoisy, double *value, double *C, double *D, int *LengthD, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *ntt, int *ll, int *bc, double *ssq, int *error); switch(*interptype) { case 1: Crsswav(noisy, nnoisy, value, C, D, LengthD, H, LengthH, levels, firstC, lastC, offsetC, firstD, lastD, offsetD, ntt, ll, bc, ssq, error); break; case 2: Crsswav2(noisy, nnoisy, value, C, D, LengthD, H, LengthH, levels, firstC, lastC, offsetC, firstD, lastD, offsetD, ntt, ll, bc, ssq, error); break; default: *error += 3000; break; } return; } /* * Do rsswav in C * * This version interpolates the noisy data and compares it to the * function values at the index points. (Crsswav2.c does it the other way * round - i.e. interpolates the reconstructed function and compares these * to the noisy values). Maybe this function is not as good as Crsswav2.c * because we smooth the noise before comparison. */ void Crsswav(double *noisy, int *nnoisy, double *value, double *C, double *D, int *LengthD, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *ntt, int *ll, int *bc, double *ssq, int *error) /*--------------------- * Argument description *--------------------- double *noisy:: The noisy data - power of 2 int *nnoisy:: The number of noisy data elements, must be power of 2 double *value:: The threshold value at which to estimate CV Score double *C:: double *D:: int *LengthD:: double *H:: The wavelets to use int *LengthH:: The length of the filter int *levels:: int *firstC, *lastC, *offsetC:: int *firstD, *lastD, *offsetD:: int *ntt:: The threshold type int *ll:: The lowest level to threshold:: all levels above too int *bc:: The boundary conditions double *ssq:: The answer! int *error:: There was an error! ---------------------*/ { register int nodd,i; int type; int Dlevels; int *levs_to_do; int qlevels; int local_levels; double *interps; double ssq1=0.0; double tmp; void wavedecomp(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); void waverecons(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); void Cthreshold(double *D, int *LengthD, int *firstD, int *lastD, int *offsetD, int *Dlevels, int *ntt, double *value, int *levels, int *qlevels, int *bc, int *error); /* Rprintf("Crsswav\n"); Rprintf("LengthH is %ld\n", *LengthH); Rprintf("levels is %ld\n", *levels); Rprintf("ll is %ld\n", *ll); fflush(stdout); */ /* Get memory for levels to do array */ local_levels = *levels-1; qlevels = local_levels - *ll; if ((levs_to_do = (int *)malloc((unsigned)qlevels*sizeof(int)))==NULL){ *error = 1; return; } else for(i=0; i < qlevels; ++i) *(levs_to_do+i) = *ll+i; /* Get memory for interps */ nodd = *nnoisy/2; if ((interps = (double *)malloc((unsigned)nodd*sizeof(double)))==NULL) { *error = 2; return; } type = (int)WAVELET; /* Only for wavelet transform */ /* Do the ODD analysis and reconstruction */ /* * Now copy odd elements to C array */ for(i=0; i (threshold) ? (coef):(0.0)) void Cthreshold(double *D, int *LengthD, int *firstD, int *lastD, int *offsetD, int *Dlevels, int *ntt, double *value, int *levels, int *qlevels, int *bc, int *error) { register int i,j, local_level; double cough; double *din; int reflect(int n, int lengthC, int bc); double SoftThreshold(double cough, double threshold); /* Rprintf("Cthreshold\n"); Rprintf("LengthD is %ld\n", *LengthD); Rprintf("ntt is %ld\n", *ntt); Rprintf("value is %lf\n", *value); Rprintf("qlevels is %ld\n", *qlevels); */ *error = 0; /* * Check that threshold value is positive or zero */ if (*value < 0.0) { *error = 3; return; } /* * Check to see that the levels we are asked to look at are legal */ for(i=0; i<*qlevels; ++i) { if (*(levels+i) > *Dlevels) { *error = 1; return; } } /* * Now do the thresholding */ if (*ntt == HARD) { for(i=0; i<*qlevels; ++i) { local_level = *(levels+i); /* * Make din point to correct place in D array */ din = D+*(offsetD+local_level); /* * Now go through this array doing the thresholding */ for(j= *(firstD+local_level); j<= *(lastD+local_level); ++j){ cough = ACCESSC(din, (int)*firstD, (int)*LengthD, j, (int)*bc); cough = HardThreshold(cough, *value); ACCESSC(din, (int)*firstD, (int)*LengthD, j, (int)*bc) = cough; } } } else if (*ntt == SOFT) { for(i=0; i<*qlevels; ++i) { local_level = *(levels+i); /* * Make din point to correct place in D array */ din = D+*(offsetD+local_level); /* * Now go through this array doing the thresholding */ for(j= *(firstD+local_level); j<= *(lastD+local_level); ++j){ cough = ACCESSC(din, (int)*firstD, (int)*LengthD, j, (int)*bc); cough = SoftThreshold(cough, *value); ACCESSC(din, (int)*firstD, (int)*LengthD, j, (int)*bc) = cough; } } } else { *error = 2; return; } } double SoftThreshold(double cough, double threshold) { register double s=1.0; if (cough < 0.0) s = -1.0; if (fabs(cough) > threshold) { return(s*(fabs(cough) - threshold)); } else return(0.0); } /* * Function that estimates function with removed observation */ void EstWitRem(double *ynoise, int *Lynoise, int *removed, double *thresh, double *H, int *LengthH, int *ntt, int *ll, double *answer, int *error) /*--------------------- * Argument description *--------------------- double *ynoise:: The data int *Lynoise:: The length of the data int *removed:: The index to remove from the data double *thresh:: double *H:: The wavelets to use int *LengthH:: The length of the filter int *ntt:: The threshold type int *ll:: The lowest level to threshold; all levels above too double *answer:: int *error:: Possible errors ---------------------*/ { register int i; /* Register int? */ int nleft, nright; /* The number of data points to the left & right */ int nleftExtend; /* The length of the leftEx vector */ int nrightExtend; /* The length of the rightEx vector */ double *leftEx; /* Array that contains left and it's extension */ double *rightEx; /* Array that contains right and it's extension */ int Dlevels; int *levs_to_do; int qlevels; int local_levels; int bc; int type; double *C, *D; int LengthC, LengthD; int levels; int *firstC, *lastC, *offsetC; int *firstD, *lastD, *offsetD; void simpleWT(double *TheData, int *ndata, double *H, int *LengthH, double **C, int *LengthC, double **D, int *LengthD, int *levels, int **firstC, int **lastC, int **offsetC, int **firstD, int **lastD, int **offsetD, int *type, int *bc, int *error); void Cthreshold(double *D, int *LengthD, int *firstD, int *lastD, int *offsetD, int *Dlevels, int *ntt, double *value, int *levels, int *qlevels, int *bc, int *error); void waverecons(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); int LargerPowerOfTwo(int n); /* A function that returns next larger */ /* power of two than it's argument */ /* No errors yet */ *error = 0; /* * Compute number of elements in left and right sections */ --(*removed); nleft = *removed; /* To the left there will be "removed" elements */ nright = *Lynoise - *removed - 1; /* To the right there is this */ /* * Now to do the wavelet transform we have to make two vectors that are * a power of two in length and that are just inter than twice nleft and nright */ nleftExtend = LargerPowerOfTwo(2*nleft); nrightExtend= LargerPowerOfTwo(2*nright); /* Now check that we can do the thresholding using the ll number of levels */ /* * Get memory for these extensions */ if ((leftEx = (double *)malloc((size_t)nleftExtend*sizeof(double)))==NULL){ *error = 2003; return; } if ((rightEx = (double *)malloc((size_t)nrightExtend*sizeof(double)))==NULL){ *error = 2004; return; } /* * Now fill these extensions up */ for(i=0; i0) { if ((levs_to_do = (int *)malloc((size_t)qlevels*sizeof(int)))==NULL){ *error = 2005; return; } else for(i=0; i < qlevels; ++i) *(levs_to_do+i) = *ll+i; /* Threshold */ Dlevels = local_levels - 1; Cthreshold(D, &LengthD, firstD, lastD, offsetD, &Dlevels, ntt, thresh, levs_to_do, &qlevels, &bc, error); if (*error != 0) { return; } free((void *)levs_to_do); } else if (qlevels <0) { *error = 2002; /* ll, the depth of thresholding exceeded the number * of levels that were available for this smaller * wavelet transform */ return; } waverecons(C, D, H, LengthH, &local_levels, firstC, lastC, offsetC, firstD, lastD, offsetD, &type, &bc, error); if (*error != 0) { return; } /* Now transfer them back to leftEx */ for(i=0; i0) { if ((levs_to_do = (int *)malloc((size_t)qlevels*sizeof(int)))==NULL){ *error = 2005; return; } else for(i=0; i < qlevels; ++i) *(levs_to_do+i) = *ll+i; /* Threshold */ Dlevels = local_levels - 1; Cthreshold(D, &LengthD, firstD, lastD, offsetD, &Dlevels, ntt, thresh, levs_to_do, &qlevels, &bc, error); if (*error != 0) { return; } free((void *)levs_to_do); } else if (qlevels<0) { *error = 2001; return; /* ll was too large for this smaller transform */ } waverecons(C, D, H, LengthH, &local_levels, firstC, lastC, offsetC, firstD, lastD, offsetD, &type, &bc, error); if (*error != 0) { return; } /* Now transfer them back to rightEx */ for(i=0; i>=1) ++cnt; n = 1; ++cnt; while(cnt--) n<<=1; return(n); } /* * Do wavelet cross-validation in C */ void FullWaveletCV(double *noisy, int *nnoisy, double *UniversalThresh, double *H, int *LengthH, int *ntt, int *ll, double *tol, double *xvthresh, int *error) /*--------------------- * Argument description *--------------------- double *noisy:: int *nnoisy:: double *UniversalThresh:: double *H:: The wavelets to use int *LengthH:: The length of the filter int *ntt:: The threshold type int *ll:: The lowest level to threshold; all levels above too double *tol:: double *xvthresh:: int *error:: There was an error! ---------------------*/ { int verbose=0; double ax, bx,cx; double x0, x1, x2, x3; /* NOT NEEDED double fa,fb,fc; */ double f1,f2; double ssq; int mRi; /* This is required as an argument to GetRSS, but we don't * make use of it here */ void GetRSS(double *ynoise, int *Lynoise, double *thresh, double *H, int *LengthH, int *ntt, int *ll, double *rss, int *smallestRSSindex, int *verbose, int *error); ax = 0.0; bx = *UniversalThresh/2.0; cx = *UniversalThresh; x0 = ax; x3 = cx; if (*error != 0) { verbose=1; *error = 0; } else verbose=0; if (verbose) { Rprintf("Entered FullWaveletCV\n"); } if (fabs(cx - bx) > fabs(bx - ax)) { x1 = bx; x2 = bx + Cons*(cx-bx); } else { x2 = bx; x1 = bx - Cons*(bx-ax); } if (verbose) { Rprintf("About to enter GetRSS for the first time\n"); } /* NOT NEEDED GetRSS(noisy, nnoisy, &ax, H, LengthH, ntt, ll, &ssq, &mRi, &verbose, error); if (verbose) { Rprintf("Just left GetRSS for the first time\n"); } fa = ssq; if (*error != 0) { *error += 1000; return; } GetRSS(noisy, nnoisy, &bx, H, LengthH, ntt, ll, &ssq, &mRi, &verbose, error); fb = ssq; if (*error != 0) { *error += 1100; return; } GetRSS(noisy, nnoisy, &cx, H, LengthH, ntt, ll, &ssq, &mRi, &verbose, error); fc = ssq; if (*error != 0) { *error += 1200; return; } * END OF NOT NEEDED */ GetRSS(noisy, nnoisy, &x1, H, LengthH, ntt, ll, &ssq, &mRi, &verbose, error); f1 = ssq; if (*error != 0) { *error += 1300; return; } GetRSS(noisy, nnoisy, &x2, H, LengthH, ntt, ll, &ssq, &mRi, &verbose, error); f2 = ssq; if (*error != 0) { *error += 1400; return; } while(fabs(x3-x0) > *tol*(fabs(x1) + fabs(x2))) { if (verbose) { Rprintf("x0=%lf, x1=%lf, x2=%lf, x3=%lf\n", x0,x1,x2,x3); Rprintf("f1=%lf, f2=%lf\n", f1,f2); /* fflush(stdout); */ } if (f2 < f1) { x0 = x1; x1 = x2; x2 = R*x1 + Cons*x3; f1 = f2; GetRSS(noisy, nnoisy, &x2, H, LengthH, ntt, ll, &ssq, &mRi, &verbose, error); f2 = ssq; if (*error != 0) { *error += 1500; return; } } else { x3 = x2; x2 = x1; x1 = R*x2 + Cons*x0; f2 = f1; GetRSS(noisy, nnoisy, &x1, H, LengthH, ntt, ll, &ssq, &mRi, &verbose, error); f1 = ssq; if (*error != 0) { *error += 1600; return; } } } if (f1 < f2) *xvthresh = x1; else *xvthresh = x2; return; } #define GRSTART 4 /* The first index to remove for GetRSS */ void GetRSS(double *ynoise, int *Lynoise, double *thresh, double *H, int *LengthH, int *ntt, int *ll, double *rss, int *smallestRSSindex, int *verbose, int *error) { int removed, local_removed; int minRSSix; double TheMinRSS; double answer; void EstWitRem(double *ynoise, int *Lynoise, int *removed, double *thresh, double *H, int *LengthH, int *ntt, int *ll, double *answer, int *error); /* No error yet!! */ *error = 0; *rss = 0.0; minRSSix = 0; TheMinRSS = 0.0; for(removed=GRSTART; removed<= *Lynoise-GRSTART+1; ++removed) { local_removed = removed; EstWitRem(ynoise, Lynoise, &local_removed, thresh, H, LengthH, ntt, ll, &answer, error); if (*error != 0) { return; } answer -= *(ynoise+removed-1); *rss += answer*answer; if (removed==GRSTART) { minRSSix = removed; TheMinRSS = answer* answer; } else if (TheMinRSS > answer*answer) { minRSSix = removed; TheMinRSS = answer*answer; } if (*verbose>1) Rprintf("GetRSS: Removed is %d, ynoise[%d] is %lf RSS is %lf\n", /*MAN: changed %ld to %d since declared as int (L1329) */ removed, removed, *(ynoise+removed-1), *rss); } *rss /= (*Lynoise - 4); *smallestRSSindex = minRSSix; return; } /* * ImageDecomposeStep - Take an image and do a one level decomp * * Error Codes * * 0 - Ok. * * 1 - Memory error for (afterC) temporary image * * 2 - Memory error for (afterD) temporary image * * 3 - Memory error for (ccopy) temporary row store * * 4 - Memory error for (ccopy_out) temporary row store * * 5 - Memory error for (dcopy_out) temporary row store * * 6-9 - Memory errors for (afterCC,afterCD,afterDC,afterDD) * store for the answers */ void ImageDecomposeStep(double *C, int Csize, int firstCin, double *H, int LengthH, int LengthCout, int firstCout, int lastCout, int LengthDout, int firstDout, int lastDout, double **cc_out, double **cd_out, double **dc_out, double **dd_out, int bc, int type, int *error) /*--------------------- * Argument description *--------------------- double *C:: Input data image int Csize:: Size of image (side length) int firstCin:: Index number of first element in input "C" image double *H:: Filter coefficients int LengthH:: Length of filter Details about output image int LengthCout:: Length of C part of output image int firstCout:: Index number of first element in output "C" image int lastCout:: Index number of last element int LengthDout:: Length of D part of output image int firstDout:: Index number of first element in output "D" image int lastDout:: Index number of last element double **cc_out:: Smoothed output image double **cd_out:: Horizontal detail double **dc_out:: Vertical detail double **dd_out:: Diagonal detail int bc:: Method of boundary correction int type:: Type of transform, wavelet or stationary int *error:: Error code *---------------------*/ { register int j,row,col; double *ccopy; /* Used to copy input data to convolution routines */ double *ccopy_out;/* Used to copy output data to afterC after conv. */ double *dcopy_out;/* Used to copy output data to afterD after conv. */ double *afterC; /* Temporary store for image data after C convolution */ double *afterD; /* Temporary store for image data after D convolution */ double *afterCC,*afterCD,*afterDC,*afterDD; /* Results */ int step_factor; /* This should always be 1 for the WAVELET trans*/ void convolveC(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *c_out, int firstCout, int lastCout, int type, int step_factor, int bc); void convolveD(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *d_out, int firstDout, int lastDout, int type, int step_factor, int bc); *error = 0; step_factor = 1; /* Get memory for afterC */ if ((afterC = (double *)malloc((unsigned)(Csize*LengthCout*sizeof(double))))==NULL){ *error = 1; return; } /* Get memory for afterD */ if ((afterD = (double *)malloc((unsigned)(Csize*LengthDout*sizeof(double))))==NULL){ *error = 2; return; } /* Get memory for row of image to pass to convolution routines */ if ((ccopy = (double *)malloc((unsigned)(Csize*sizeof(double)))) == NULL) { *error = 3; return; } /* Get memory for output row after C convolution */ if ((ccopy_out = (double *)malloc((unsigned)(LengthCout*sizeof(double))))==NULL) { *error = 4; return; } /* Get memory for output row after D convolution */ if ((dcopy_out = (double *)malloc((unsigned)(LengthDout*sizeof(double))))==NULL) { *error = 5; return; } /* Do convolutions on rows of C */ for(row=0; row < (int)Csize; ++row) { /* Copy row of C into ccopy */ for(j=0; j0) ? ( ((i)+1)/2):((i)/2) ) void conbar(double *c_in, int LengthCin, int firstCin, double *d_in, int LengthDin, int firstDin, double *H, int LengthH, double *c_out, int LengthCout, int firstCout, int lastCout, int type, int bc) { register int n,k; register int cfactor; double sumC, sumD; int reflect(int n, int lengthC, int bc); switch(type) { case WAVELET: /* Standard wavelets */ cfactor = 2; break; case STATION: /* Stationary wavelets */ cfactor = 1; break; default: /* This should never happen */ cfactor=0; /* MAN: added for total cover: shouldn't happen */ break; } /* Compute each of the output C */ for(n=firstCout; n<=lastCout; ++n) { /* We want n+1-LengthH <= 2*k to start off */ k = CEIL(n+1-LengthH); sumC = 0.0; while( cfactor*k <= n ) { sumC += *(H + n - cfactor*k)*ACCESSC(c_in, firstCin, LengthCin, k, bc); ++k; } /* Now do D part */ k = CEIL(n-1); sumD = 0.0; while( cfactor*k <= (LengthH +n -2) ) { sumD += *(H+1+cfactor*k-n) * ACCESSC(d_in, firstDin, LengthDin, k, bc); ++k; } if (n & 1) /* n odd */ sumC -= sumD; else sumC += sumD; ACCESSC(c_out, firstCout, LengthCout, n, bc) = sumC; } } /* * CONBARL: Wrapper called by SPlus conbar() to call C conbar. */ void conbarL(double *c_in, int *LengthCin, int *firstCin, double *d_in, int *LengthDin, int *firstDin, double *H, int *LengthH, double *c_out, int *LengthCout, int *firstCout, int *lastCout, int *type, int *bc) { int LLengthCin; int LfirstCin; int LLengthDin; int LfirstDin; int LLengthH; int LLengthCout; int LfirstCout; int LlastCout; int Ltype; int Lbc; void conbar(double *c_in, int LengthCin, int firstCin, double *d_in, int LengthDin, int firstDin, double *H, int LengthH, double *c_out, int LengthCout, int firstCout, int lastCout, int type, int bc); LLengthCin = (int)*LengthCin; LfirstCin = (int)*firstCin; LLengthDin = (int)*LengthDin; LfirstDin = (int)*firstDin; LLengthH = (int)*LengthH; LLengthCout = (int)*LengthCout; LfirstCout = (int)*firstCout; LlastCout = (int)*lastCout; Ltype = (int)*type; Lbc = (int)*bc; conbar(c_in, LLengthCin, LfirstCin, d_in, LLengthDin, LfirstDin, H, LLengthH, c_out, LLengthCout, LfirstCout, LlastCout, Ltype, Lbc); } /* * CONVOLVE - Do filter H filter convolution with boundary */ void convolveC(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *c_out, int firstCout, int lastCout, int type, int step_factor, int bc) /*--------------------- * Argument description *--------------------- double *c_in:: Input data int LengthCin:: Length of this array int firstCin:: The first C value double *H:: Filter int LengthH:: Length of filter double *c_out:: Output data int firstCout:: First index of C array int lastCout:: Last index of C array int type:: Type of wavelet decomposition int step_factor:: For stationary wavelets only int bc:: Method of boundary correction PERIODIC, SYMMETRIC *---------------------*/ { double sum; register int k; register int count_out; register int m; register int cfactor; /* This determines what sort of dilation we do */ /* and depends on the type argument */ int reflect(int n, int lengthC, int bc); count_out = 0; switch(type) { case WAVELET: /* Ordinary wavelets */ cfactor = 2; /* Pick every other coefficient */ break; case STATION: /* Stationary wavelets */ cfactor = 1; /* Pick every coefficient */ break; default: /* This is an error, one of the above must have */ /* been picked */ /* However, this must be tested in a previous */ /* routine. */ cfactor=0; /* MAN: added for total cover: shouldn't happen */ break; } for(k=firstCout; k<=lastCout; ++k) { sum = 0.0; for(m=0; m= 0) && (n < lengthC)) return(n); else if (n<0) { if (bc==PERIODIC) { /* n = lengthC+n; */ n = n%lengthC + lengthC*((n%lengthC)!=0); if (n < 0) { REprintf("reflect: access error (%d,%d)\n", n,lengthC); REprintf("reflect: left info from right\n"); error("This should not happen. Stopping.\n"); } else return(n); } else if (bc==SYMMETRIC) { n = -1-n; if (n >= lengthC) { REprintf("reflect: access error (%d,%d)\n", n,lengthC); error("This should not happen. Stopping.\n"); } else return(n); } else { REprintf("reflect: Unknown boundary correction"); REprintf("value of %d\n", bc); error("This should not happen. Stopping.\n"); } } else { if (bc==PERIODIC) { /* Rprintf("periodic extension, was %d (%d) now ",n,lengthC); n = n - lengthC; */ n %= lengthC; /* Rprintf("%d\n", n); */ if (n >= lengthC) { REprintf("reflect: access error (%d,%d)\n", n,lengthC); REprintf("reflect: right info from left\n"); error("This should not happen. Stopping.\n"); } else return(n); } else if (bc==SYMMETRIC) { n = 2*lengthC - n - 1; if (n<0) { REprintf("reflect: access error (%d,%d)\n", n,lengthC); error("This should not happen. Stopping.\n"); } else return(n); } else { REprintf("reflect: Unknown boundary correction\n"); error("This should not happen. Stopping.\n"); } } /* Safety */ REprintf("reflect: SHOULD NOT HAVE REACHED THIS POINT\n"); error("This should not happen. Stopping.\n"); return(0); /* for lint only */ } /* Rotate a vector */ /* Vector: a_1, a_2, a_3, ..., a_{n-1}, a_n becomes a_2, a_3, a_4, ..., a_n, a_1 rotateback() does the opposite */ void rotater(double *book, int length) { register int i; double tmp; tmp = *book; for(i=0; i0; --i) *(book+i) = *(book+i-1); *book = tmp; } /* * Does a simple wavelet transform * * This is just like the ordinary periodic wavelet transform * * The purpose of this function is simplicity. All you need supply is the * data and some pointers for the arguments * * This function uses Calloc to create the arrays: * * firstC,lastC,offsetC,firstD,lastD,offsetD,C,D * * When you have used their contents it is a good idea for you to destroy * the memory associated with these arrays. To do this call * * free((char *)C); - This frees the memory associated with the pointer * * [We recommend you declare these arrays in the calling program like * * double *C, *D; * int *firstC, *lastC, *offsetC, *firstD, *lastD, *offsetD; * * Then PASS the ADDRESS of these to this function, e.g. * &C, &D etc.] */ void simpleWT(double *TheData, int *ndata, double *H, int *LengthH, double **C, int *LengthC, double **D, int *LengthD, int *levels, int **firstC, int **lastC, int **offsetC, int **firstD, int **lastD, int **offsetD, int *type, int *bc, int *error) /*--------------------- * Argument description *--------------------- double *TheData:: The data to transform; must be a power of two els int *ndata:: The length of the data double *H:: The wavelet filter that you want to use int *LengthH:: The length of the wavelet filter The following arguments are the answer/output double **C:: A pointer to the array of C answers is returned int *LengthC:: The length of the C array is returned double **D:: A pointer to the array of D answers is returned int *LengthD:: The length of the D array is returned int *levels:: The number of levels of the transform is returned int **firstC,**lastC,**offsetC:: These are computed and returned int **firstD,**lastD,**offsetD:: These are computed and returned int *type:: This is filled in with type WAVELET int *bc:: This is filled in with PERIODIC int *error:: Returns any error condition *---------------------*/ { int *lfC,*llC,*loC; /* Local versions of firstC,lastC,offsetC */ int *lfD,*llD,*loD; /* Local versions of firstD,lastD,offsetD */ double *lC, *lD; /* Local versions of C and D */ int cnt,i; void wavedecomp(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); int IsPowerOfTwo(int n); /* No errors yet */ *error = 0; /* Fill in type of transform and type of boundary handling conditions */ *type = (int)WAVELET; *bc = (int)PERIODIC; /* Now work out the size of the arrays needed for the transform */ *levels = (int)IsPowerOfTwo(*ndata); /* Now create memory for first/last and offset */ /* Now create memory for first/last and offset */ if ((lfC = (int *)malloc((size_t)(*levels+1)*sizeof(int)))==NULL) { *error = 3001; return; } if ((llC = (int *)malloc((size_t)(*levels+1)*sizeof(int)))==NULL) { *error = 3002; return; } if ((loC = (int *)malloc((size_t)(*levels+1)*sizeof(int)))==NULL) { *error = 3003; return; } if ((lfD = (int *)malloc((size_t)(*levels)*sizeof(int)))==NULL) { *error = 3004; return; } if ((llD = (int *)malloc((size_t)(*levels)*sizeof(int)))==NULL) { *error = 3005; return; } if ((loD = (int *)malloc((size_t)(*levels)*sizeof(int)))==NULL) { *error = 3006; return; } /* Now fill up these arrays */ *lfC = *llC = 0; cnt = 1; for(i=1; i<*levels+1; ++i) { *(lfC+i) = 0; *(llC+i) = *(llC+i-1) + cnt; cnt<<=1; } *(loC+*levels+1-1) = 0; for(i=*levels+1-2; i>=0; --i) { *(loC+i) = *(loC+i+1) + *(llC+i+1)+1; } *lfD = *llD = 0; cnt = 1; for(i=1; i<*levels; ++i) { *(lfD+i) = 0; *(llD+i) = *(llD+i-1) + cnt; cnt<<=1; } *(loD+*levels-1) = 0; for(i=*levels-2; i>=0; --i) { *(loD+i) = *(loD+i+1) + *(llD+i+1)+1; } /* Now we have to create the C and D arrays */ *LengthC = *loC + 1; *LengthD = *loD + 1; if ((lC = (double *)calloc((size_t)*LengthC,(size_t)sizeof(double)))==NULL) { *error = 3007; return; } if ((lD = (double *)calloc((size_t)*LengthD,(size_t)sizeof(double)))==NULL) { *error = 3008; return; } /* Calloc should already zero these arrays */ for(i=0; i<*ndata; ++i) *(lC+i) = *(TheData+i); /* Sorted, now do the wavelet transform */ wavedecomp(lC, lD, H, LengthH, levels, lfC, llC, loC, lfD, llD, loD, type, bc, error); if (*error != 0) { *error = 3009; return; } /* Now we can return all the answers. To do this we have to link the information * in the l* arrays to the real ones */ *C = lC; *D = lD; *firstC = lfC; *lastC = llC; *offsetC = loC; *firstD = lfD; *lastD = llD; *offsetD = loD; /* That's it, time to go home */ return; } void wavedecomp(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error) /*--------------------- * Argument description *--------------------- double *C:: Input data, and the subsequent smoothed data double *D:: The wavelet coefficients double *H:: The smoothing filter H int *LengthH:: Length of smoothing filter int *levels:: The number of levels in this decomposition int *firstC:: The first possible C coef at a given level int *lastC:: The last possible C coef at a given level int *offsetC:: Offset from C[0] for certain level's coeffs int *firstD:: The first possible D coef at a given level int *lastD:: The last possible D coef at a given level int *offsetD:: Offset from D[0] for certain level's coeffs int *type:: The type of wavelet decomposition int *bc:: Method of boundary correction int *error:: Error code *---------------------*/ { register int next_level,at_level; register int step_factor; /* Controls width of filter for station */ register int verbose; /* Controls message printing, passed in error var*/ void convolveC(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *c_out, int firstCout, int lastCout, int type, int step_factor, int bc); void convolveD(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *d_out, int firstDout, int lastDout, int type, int step_factor, int bc); if (*error == 1l) /* Error switches on verbosity */ verbose = 1; else verbose = 0; switch(*bc) { case PERIODIC: /* Periodic boundary conditions */ if (verbose) Rprintf("Periodic boundary method\n"); break; case SYMMETRIC: /* Symmetric boundary conditions */ if (verbose) Rprintf("Symmetric boundary method\n"); break; default: /* The bc must be one of the above */ Rprintf("Unknown boundary correction method\n"); *error = 1; return; } switch(*type) { case WAVELET: /* Standard wavelets */ if (verbose) Rprintf("Standard wavelet decomposition\n"); break; case STATION: /* Stationary wavelets */ if (verbose) Rprintf("Stationary wavelet decomposition\n"); break; default: /* The type must be of one the above */ if (verbose) Rprintf("Unknown decomposition type\n"); *error = 2; return; } if (verbose) Rprintf("Decomposing into level: "); *error = 0; step_factor = 1; /* This variable should *always* be 1 for standard * wavelets. It should start at 1 for stationary * wavelets and multiply itself by 2 each stage */ for(next_level = *levels - 1; next_level >= 0; --next_level) { if (verbose) Rprintf("%d ", next_level); at_level = next_level + 1; /* For stationary wavelets we need to define a step factor. * This widens the span of the filter. At the top level (*levels->*levels-1) * it is one, as usual. Then for the next step it becomes 2, then 4 etc. */ convolveC( (C+*(offsetC+at_level)), (int)(*(lastC+ at_level) - *(firstC+at_level)+1), (int)(*(firstC+at_level)), H, (int)*LengthH, (C+*(offsetC+next_level)), (int)(*(firstC+next_level)), (int)(*(lastC+next_level)) , (int)*type, step_factor, (int)*bc); convolveD( (C+*(offsetC+at_level)), (int)(*(lastC+ at_level) - *(firstC+at_level)+1), (int)(*(firstC+at_level)), H, (int)*LengthH, (D+*(offsetD+next_level)), (int)(*(firstD+next_level)), (int)(*(lastD+next_level)), (int)*type, step_factor, (int)*bc ); if (*type == STATION) step_factor *= 2; /* Any half decent compiler should * know what to do here ! */ } if (verbose) Rprintf("\n"); return; } void accessDwp(double *Data, int *LengthData, int *nlevels, int *level, double *answer, int *error) /*--------------------- * Argument description *--------------------- double *Data:: This is a 2D array. Top level contains data int *LengthData:: Length of Data, this is power of 2 int *nlevels:: The number of levels in this decomposition int *level:: Which level you want to extract double *answer:: The level of coefficients int *error:: Error code *---------------------*/ { register int i; *error = 0; /* * Check variable integrity */ if (*level < 0) { *error =4000; return; } else if (*level > *nlevels) { *error = 4001; return; } for(i=0; i< *LengthData; ++i) *(answer+i) = ACCESSD(*level, i); } void wavepackde(double *Data, int *LengthData, int *levels, double *H, int *LengthH) /*--------------------- * Argument description *--------------------- double *Data:: This is a 2D array. Top level contains data int *LengthData:: Length of Data, this is power of 2 int *levels:: The number of levels, 2^(levels+1)=LengthData double *H:: The filter to use int *LengthH:: Length of filter *---------------------*/ { int startin, outstart1, outstart2; /* int i,j; */ void wvpkr(double *Data, int startin, int lengthin, int outstart1, int outstart2, int level, double *H, int LengthH, int *LengthData); /* Rprintf("This routine is wavepackde\n"); Rprintf("Length of data is %ld\n", *LengthData); Rprintf("Number of levels is %ld\n", *levels); Rprintf("Data array is:\n"); for(i= (int)*levels; i>=0; --i) for(j=0; j< *LengthData; ++j) { Rprintf("Level %d, Item %d is %lf\n", i,j, ACCESSD(i,j)); } */ startin = 0; outstart1 = 0; outstart2 = ((int)*LengthData)/2; wvpkr(Data, startin, (int)*LengthData, outstart1, outstart2, (int)*levels, H, (int)*LengthH, LengthData); } void wvpkr(double *Data, int startin, int lengthin, int outstart1, int outstart2, int level, double *H, int LengthH, int *LengthData) /* int level; The level where we're at */ { int lengthout; void convolveC(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *c_out, int firstCout, int lastCout, int type, int step_factor, int bc); void convolveD(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *d_out, int firstDout, int lastDout, int type, int step_factor, int bc); lengthout = lengthin/2; convolveC( POINTD(level, startin), lengthin, 0, H, LengthH, POINTD(level-1, outstart1), 0, lengthout-1, WAVELET, 1, PERIODIC); convolveD( POINTD(level, startin), lengthin, 0, H, LengthH, POINTD(level-1, outstart2), 0, lengthout-1, WAVELET, 1, PERIODIC); if (lengthout==1) return; else { /* * Now apply both filters to the LOW pass filtered data */ wvpkr(Data, outstart1, lengthout, outstart1, outstart1+lengthout/2, level-1, H, LengthH, LengthData); /* * Now apply both filters to the HIGH pass filtered data */ wvpkr(Data, outstart2, lengthout, outstart2, outstart2+lengthout/2, level-1, H, LengthH, LengthData); } } /* WAVEPACKRECON - inverse swt */ /* * Error codes * * 1 - As the reconstruction is built up the vector ldata should contain a doubling sequence (apart from the first two numbers which should be the same). This error is returned if this is not the case. * 2 - memory error on creating c_in * 3 - memory error on creating c_out */ void wavepackrecon(double *rdata, int *ldata, int *nrsteps, int *rvector, double *H, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *rdata:: The transformed data, packets are packed together int *ldata:: Array of lengths of packets in rdata int *nrsteps:: The number of reconstruction steps (also length of ldata array) int *rvector:: Integer whose binary decomp reveals rotate/not instruction double *H:: Filter int *LengthH:: Length of filter int *error:: Error code *---------------------*/ { register int i,j; register int msb; register int ldctr; int LengthCin; int LengthCout; int LengthDin; double *c_in; double *c_out; void conbar(double *c_in, int LengthCin, int firstCin, double *d_in, int LengthDin, int firstDin, double *H, int LengthH, double *c_out, int LengthCout, int firstCout, int lastCout, int type, int bc); void rotateback(double *book, int length); /* Set error code to zero as no error has occured yet! */ *error = 0; /* We can use conbar to do all our hard work for us */ /* This is the reconstruction step in the ordinary DWT. The only */ /* modification that we have to make is to rotate the data at each */ /* step if we need to. This information is stored in "rvector" (rotate */ /* vector). This is a single integer whose information is stored in */ /* binary form. Each bit refers to a rotate/non rotate operation and */ /* should be applied by the following method: */ /* * * a. do conbar * b. check next most sig bit of rvector and rotate if 1 * * And start with the most significant bit. */ /* Rprintf("Rvector is %d\n", (int)*rvector); */ /* First let's generate the MSB */ msb = 0x01 << ((int)*nrsteps-1); /* Get initial C data stored */ LengthCin = (int)*(ldata+0); ldctr = LengthCin; /* ldctr measures how far aint rdata we have gone */ if ((c_in = (double *)malloc((unsigned)LengthCin*sizeof(double)))==NULL) { *error = 2; return; } for(j=0; j< LengthCin; ++j) *(c_in+j) = *(rdata+j); LengthCout = LengthCin; c_out=calloc(LengthCout,sizeof(double)); /* MAN: added initialization. Hopefully shouldn't have any bad consequences... */ for(i=0; i< (int)*nrsteps; ++i) { LengthCout *= 2; if (i != 0) free((void *)c_out); if ((c_out=(double *)malloc((unsigned)LengthCout*sizeof(double)))==NULL) { *error = 3; return; } /* Now store D data at this level */ LengthDin = (int)*(ldata+(i+1)); /* Don't need to store cos we can put rdata+ldctr straight in for d_in * for(j=0; j < LengthDin; ++j) * *(d_in+j) = *(rdata+ldctr+j); */ conbar(c_in, LengthCin, 0, rdata+ldctr, LengthDin, 0, H, (int)*LengthH, c_out, LengthCout, 0, LengthCout-1, WAVELET, PERIODIC); ldctr += LengthDin; /* update cos we've moved aint rdata */ /* O.k. chaps, c_out must now become c_in, and we should check that the lengths match */ /* Rprintf("LengthCout is %d\n", LengthCout); Rprintf("i is %d\n", i); Rprintf("nrsteps is %d\n", (int)*nrsteps); */ /*Rprintf("ldata+i+2 is %d\n", (int)*(ldata+i+2));*/ if (i+1 != (int)*nrsteps && LengthCout != (int)*(ldata+i+2)) { *error = 1; return; } /* Do we rotate back ? */ if (msb & (int)*rvector) { /* Rprintf("Rotating\n"); */ rotateback(c_out, LengthCout); } /* Rprintf("msb is: %d\n", msb); */ msb >>= 1; /* Now c_in <- c_out */ free((void *)c_in); if ((c_in = (double *)malloc((unsigned)LengthCout*sizeof(double)))==NULL) { *error = 2; return; } for(j=0; j=0; --i) for(j=0; j< *LengthData; ++j) { Rprintf("Level %d, Item %d is %lf\n", i,j, ACCESSD(i,j)); } */ /* Create a bookeeping vector. That contains the C,C' level smooths thoughout the algorithm */ if ((book = (double *)malloc((unsigned)*LengthData*sizeof(double)))==NULL){ *error = 1; return; } /* Copy original data to book keeping vector */ for(i=0; i< *LengthData; ++i) *(book+i) = *POINTD(*levels, i); startin = 0; outstart1 = 0; outstart2 = ((int)*LengthData)/2; wvpkstr(Carray, Data, startin, (int)*LengthData, outstart1, outstart2, (int)*levels, H, (int)*LengthH, LengthData, book, error); if (*error != 0) return; else free((void *)book); } void wvpkstr(double *Carray, double *Data, int startin, int lengthin, int outstart1, int outstart2, int level, double *H, int LengthH, int *LengthData, double *book, int *error) /* int level; The level where we're at */ { register int i; int lengthout; double *book1, *book2; void convolveC(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *c_out, int firstCout, int lastCout, int type, int step_factor, int bc); void convolveD(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *d_out, int firstDout, int lastDout, int type, int step_factor, int bc); void rotater(double *book, int length); /* Rprintf("wvpkstr entry\n"); Rprintf("lengthout is %d\n", lengthout); */ lengthout = lengthin/2; if ((book1 = (double *)malloc((unsigned)lengthout*sizeof(double)))==NULL) { *error = 1; return; } else if ((book2 = (double *)malloc((unsigned)lengthout*sizeof(double)))==NULL){ *error = 1; return; } convolveC(book, lengthin, 0, H, LengthH, book1, 0, lengthout-1, WAVELET, 1, PERIODIC); for(i=0; i < lengthout; ++i) * POINTC(level-1, (outstart1+i)) = *(book1+i); /* Rprintf("book1 coefficients \n"); for(i=0; i0; --i) * *(book+i) = *(book+i-1); * book = tmp; */ /* COMMENT OUT (replaced by rotater function) tmp = *book; * for(i=0; i= 0; --next_level) { if (verbose) Rprintf("%d ", next_level); at_level = next_level + 1; /* For stationary wavelets we need to define a step factor. * This widens the span of the filter. At the top level (*levels->*levels-1) * it is one, as usual. Then for the next step it becomes 2, then 4 etc. */ comconC( (CR+*(offsetC+at_level)), (CI+*(offsetC+at_level)), (int)(*(lastC+ at_level) - *(firstC+at_level)+1), (int)(*(firstC+at_level)), HR, HI, (int)*LengthH, (CR+*(offsetC+next_level)), (CI+*(offsetC+next_level)), (int)(*(lastC+next_level) - *(firstC+next_level)+1), (int)(*(firstC+next_level)), (int)(*(lastC+next_level)) , (int)*type, step_factor, (int)*bc); comconD( (CR+*(offsetC+at_level)), (CI+*(offsetC+at_level)), (int)(*(lastC+ at_level) - *(firstC+at_level)+1), (int)(*(firstC+at_level)), GR, GI, (int)*LengthH, (DR+*(offsetD+next_level)), (DI+*(offsetD+next_level)), (int)(*(lastD+next_level) - *(lastD+next_level)+1), (int)(*(firstD+next_level)), (int)(*(lastD+next_level)), (int)*type, step_factor, (int)*bc ); if (*type == STATION) step_factor *= 2; /* Any half decent compiler should * know what to do here ! */ } if (verbose) Rprintf("\n"); return; } /* * comwr: Do 1D complex wavelet reconstruction */ void comwr(double *CR, double *CI, int *LengthC, double *DR, double *DI, int *LengthD, double *HR, double *HI, double *GR, double *GI, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error) /*--------------------- * Argument description *--------------------- double *CR:: Input data, and the subsequent smoothed data double *CI:: Input data, and the subsequent smoothed data int *LengthC:: Length of C array double *DR:: The wavelet coefficients double *DI:: The wavelet coefficients int *LengthD:: Length of D array double *HR:: The smoothing filter H double *HI:: The smoothing filter H double *GR:: The highpass filter H double *GI:: The highpass filter H int *LengthH:: Length of smoothing filter int *levels:: The number of levels in this decomposition int *firstC:: The first possible C coef at a given level int *lastC:: The last possible C coef at a given level int *offsetC:: Offset from C[0] for certain level's coeffs int *firstD:: The first possible D coef at a given level int *lastD:: The last possible D coef at a given level int *offsetD:: Offset from D[0] for certain level's coeffs int *type:: The type of wavelet decomposition int *bc:: Method of boundary correction int *error:: Error code *---------------------*/ { register int next_level, at_level; register int verbose; /* Printing messages, passed in error */ if (*error == 1) verbose = 1; else verbose = 0; switch(*bc) { case PERIODIC: /* Periodic boundary conditions */ if (verbose) Rprintf("Periodic boundary method\n"); break; case SYMMETRIC: /* Symmetric boundary conditions */ if (verbose) Rprintf("Symmetric boundary method\n"); break; default: /* The bc must be one of the above */ Rprintf("Unknown boundary correction method\n"); *error = 1; return; break; } switch(*type) { case WAVELET: /* Standard wavelets */ if (verbose) Rprintf("Standard wavelet decomposition\n"); break; case STATION: /* Stationary wavelets */ if (verbose) Rprintf("Stationary wavelet decomposition\n"); break; default: /* The type must be of one the above */ if (verbose) Rprintf("Unknown decomposition type\n"); *error = 2; return; break; } if (verbose) Rprintf("Building level: "); *error = 0; for(next_level = 1; next_level <= *levels; ++next_level) { if (verbose) Rprintf("%d ", next_level); at_level = next_level - 1; comcbr( (CR+*(offsetC+at_level)), (CI+*(offsetC+at_level)), (int)(*(lastC+at_level) - *(firstC+at_level) + 1), (int)(*(firstC+at_level)), (int)(*(lastC+at_level)), (DR+*(offsetD+at_level)), (DI+*(offsetD+at_level)), (int)(*(lastD+at_level) - *(firstD+at_level) + 1), (int)(*(firstD+at_level)), (int)(*(lastD+at_level)), HR, HI, GR, GI, (int)*LengthH, (CR+*(offsetC+next_level)), (CI+*(offsetC+next_level)), (int)(*(lastC+next_level) - *(firstC+next_level)+1), (int)(*(firstC+next_level)), (int)(*(lastC+next_level)), (int)(*type), (int)(*bc) ); } if (verbose) Rprintf("\n"); return; } /* * Emulate the WavDE function in C (but not plotting information) * and don't return the wavelet coefficients. */ #define HARDTHRESH(w,t) ( fabs((w)) > (t) ? (w) : (0.0)) void CWavDE(double *x, int *n, double *minx, double *maxx, int *Jmax, double *threshold, double *xout, double *fout, int *nout, double *PrimRes, double *SFx, double *SFy, int *lengthSF, double *WVx, double *WVy, int *lengthWV, int *kmin, int *kmax, int *kminW, int *kmaxW, double *xminW, double *xmaxW, double *phiLH, double *phiRH, double *psiLH, double *psiRH, int *verbose, int *error) /*--------------------- * Argument description *--------------------- double *x:: The data int *n:: The length of the data double *minx:: The min of the data double *maxx:: The max of the data int *Jmax:: The number of levels in the expansion double *threshold:: Threshold value for thresholding the wv coefs vvv Output Variables vvv double *xout:: The grid on which the density estimate is defined double *fout:: The density estimate defined on the above grid int *nout:: The length of the grid vvv Input variables again vvv double *PrimRes:: The primary resolution double *SFx:: The grid on which the scaling function is defined double *SFy:: The scaling function int *lengthSF:: The length of the grid double *WVx:: The grid on which the wavelet is defined double *WVy:: The wavelet function int *lengthWV:: The length of the grid int *kmin:: minimum k for scaling function coefficient comp. int *kmax:: maximum k for scaling function coefficient comp. int *kminW:: as above but for each wavelet level (1:Jmax) int *kmaxW:: as above but for each wavelet level (1:Jmax) double *xminW:: minimum x value for each level for wavelet double *xmaxW:: maximum x value for each level for wavelet double *phiLH:: left hand end of support of Phi double *phiRH:: right hand end of support of Phi double *psiLH:: left hand end of support of psi double *psiRH:: right hand end of support of psi int *verbose:: Print messages or not? int *error:: Error codes *---------------------*/ /* Error codes 0 - O.k. 1 - Memory error */ { register int i,k,l,j,twopowjp1; register int la; double atmp; double *a; double sum; double divisor; double widthSF,widthWV; double evalF(double *Fx, double *Fy, int *lengthF, double widthF, double x); double xmin, xmax; /* Note these are not the same as maxx and minx */ double SFYscale, WVYscale; /* I forgot to multiply by p^{1/2} etc. */ if (*verbose > 1) Rprintf("Entered CWavDE function\n"); *kmin = (int)floor(*minx - *phiRH/ *PrimRes); *kmax = (int)ceil(*maxx - *phiLH/ *PrimRes); if (*verbose > 1) Rprintf("kmin is %d, kmax is %d\n", *kmin, *kmax); /*MAN: changed %ld to %d since declared as int (L1329) */ la = (int)(*kmax - *kmin) + 1; if ((a = (double *)malloc((unsigned)(sizeof(double)*la)))==NULL) { *error = 1; return; } /* Now compute the widths of the wavelet/scaling function supports */ widthSF = *(SFx+(int)*lengthSF-1) - *SFx; widthWV = *(WVx+(int)*lengthWV-1) - *WVx; /* * Now work out all of the scaling function coefficients */ k = (int)*kmin; /* I forgot to multiply by p^{1/2} ! */ SFYscale = sqrt(*PrimRes); for (i=0; i xmax) xmax = *(xmaxW + j); } divisor = (xmax-xmin)/(double)(*nout-1); for(i=0; i< (int)*nout; ++i) { *(fout+i) = 0.0; *(xout+i) = xmin + (double)i*divisor; } k = *kmin; for (i=0; i 0) Rprintf("Wavelet step: level %d\n", j); twopowjp1 = 1 << (j+1); /* MAN: added parentheses for bit shift */ divisor = *PrimRes*(double)twopowjp1; WVYscale = sqrt(divisor); la = (int)(*(kmaxW+j) - *(kminW+j)) + 1; if ((a = (double *)malloc((unsigned)(sizeof(double)*la)))==NULL) { *error = 1; return; } /* Now compute the coefficients for this level j */ k = *(kminW+j); for(i=0; i *(Fx + (int)*lengthF - 1)) return(0.0); /* * From VALGRIND check, changed the next line from this a = (double)((int)*lengthF - 1) * (x - *Fx)/widthF; * * To this next one immediately after this comment. * This is because if x was equal to the RH end then the ratio would be * one, and then il would be the last element of the array and ir could be * OUTSIDE the array. */ a = (double)((int)*lengthF - 2) * (x - *Fx)/widthF; /* Now a should always be >= 0, since we've already rejected any possible negatives, so we don't have to use floor & ceil here Just (int) will do. */ il = (int)a; ir = il+1; fp = a - (double)il; return( ((1.0-fp)* *(Fy+il)) + (fp* *(Fy+ir)) ); } #define MAX(a,b) ( (a) < (b) ? (b) : (a)) #define MIN(a,b) ( (a) < (b) ? (a) : (b)) void CScalFn(double *v, double *ans, int *res, double *H, int *lengthH) { register int k,n; double sum; int b,e; for(n=0; n< (int)*res; ++n) { sum = 0.0; b = MAX(0, (int )ceil( ((double)(n+1- *lengthH))/2.0)); e = MIN(*res, (int )floor(((double) n)/2.0)); for(k=b; k<= e; ++k) { sum += *(H+n-2*k) * *(v+k); } *(ans+n) = sum; } } /* Perform tensor product wavelet transform */ void tpwd(double *image, int *nrow, int *ncol, int *levr, int *levc, int *firstCr, int *lastCr, int *offsetCr, int *firstDr, int *lastDr, int *offsetDr, int *firstCc, int *lastCc, int *offsetCc, int *firstDc, int *lastDc, int *offsetDc, int *type, int *bc, double *H, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *image:: The image to decompose int *nrow:: The number of rows in the image int *ncol:: The number of cols in the image int *levr:: The number of levels as rows in the image int *levc:: The number of levels as cols in the image int *firstCr:: The first possible C coef at a given level int *lastCr:: The last possible C coef at a given level int *offsetCr:: Offset from C[0] for certain level's coeffs int *firstDr:: The first possible D coef at a given level int *lastDr:: The last possible D coef at a given level int *offsetDr:: Offset from D[0] for certain level's coeffs int *firstCc:: The first possible C coef at a given level int *lastCc:: The last possible C coef at a given level int *offsetCc:: Offset from C[0] for certain level's coeffs int *firstDc:: The first possible D coef at a given level int *lastDc:: The last possible D coef at a given level int *offsetDc:: Offset from D[0] for certain level's coeffs int *type:: The type of wavelet decomposition int *bc:: Method of boundary correction double *H:: The wavelet filter int *LengthH:: The length of the wavelet filter int *error:: 0=no error, various errors possible *---------------------*/ { register int i,j; double *C; /* temporary store for input/output data */ double *D; /* temporary store for wavelet coefficients */ void wavedecomp(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); *error = 0; if ((C = (double *)malloc(2*(unsigned)*ncol *sizeof(double)))==NULL) { *error = 1; return; } if ((D = (double *)malloc((unsigned)*ncol *sizeof(double)))==NULL) { *error = 2; return; } /* First do the wavelet transform across all rows in the image for each row */ for(i=0; i< *nrow; ++i) { /* Copy the row across - sorry there is probably a more efficient way to do this in-place, but what the hell. */ for(j=0; j< *ncol; ++j) { *(D+j) = 0.0; *(C+j) = ACCESS(image, *ncol, i, j); } /* Now do the jolly old wavelet transform */ wavedecomp(C, D, H, LengthH, levc, firstCc, lastCc, offsetCc, firstDc, lastDc, offsetDc, type, bc, error); if (*error != 0) return; /* And put the answers back in the image array */ ACCESS(image, *ncol, i, 0) = *(C+ (*ncol*2)-2); for(j=1; j< *ncol; ++j) { ACCESS(image, *ncol, i, j) = *(D+j-1); } } free(C); free(D); /* Now do it the other way around */ if ((C = (double *)malloc(2*(unsigned)*nrow *sizeof(double)))==NULL) { *error = 1; return; } if ((D = (double *)malloc((unsigned)*nrow *sizeof(double)))==NULL) { *error = 2; return; } /* Second do the wavelet transform across all cols in the image for each col */ for(j=0; j< *ncol; ++j) { /* Copy the row across - sorry there is probably a more efficient way to do this in-place, but what the hell. */ for(i=0; i< *nrow; ++i) { *(D+i) = 0.0; *(C+i) = ACCESS(image, *ncol, i, j); } /* Now do the jolly old wavelet transform */ wavedecomp(C, D, H, LengthH, levr, firstCr, lastCr, offsetCr, firstDr, lastDr, offsetDr, type, bc, error); if (*error != 0) return; /* And put the answers back in the image array */ ACCESS(image, *ncol, 0, j) = *(C+ (*nrow*2)-2); for(i=1; i< *nrow; ++i) { ACCESS(image, *ncol, i, j) = *(D+i-1); } } free(C); free(D); } /* Inverse tensor product wavelet transform */ void tpwr(double *image, int *nrow, int *ncol, int *levr, int *levc, int *firstCr, int *lastCr, int *offsetCr, int *firstDr, int *lastDr, int *offsetDr, int *firstCc, int *lastCc, int *offsetCc, int *firstDc, int *lastDc, int *offsetDc, int *type, int *bc, double *H, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *image:: The tpwd coefficients to reconstruct int *nrow:: The number of rows in the image int *ncol:: The number of cols in the image int *levr:: The number of levels as rows in the image int *levc:: The number of levels as cols in the image int *firstCr:: The first possible C coef at a given level int *lastCr:: The last possible C coef at a given level int *offsetCr:: Offset from C[0] for certain level's coeffs int *firstDr:: The first possible D coef at a given level int *lastDr:: The last possible D coef at a given level int *offsetDr:: Offset from D[0] for certain level's coeffs int *firstCc:: The first possible C coef at a given level int *lastCc:: The last possible C coef at a given level int *offsetCc:: Offset from C[0] for certain level's coeffs int *firstDc:: The first possible D coef at a given level int *lastDc:: The last possible D coef at a given level int *offsetDc:: Offset from D[0] for certain level's coeffs int *type:: The type of wavelet decomposition int *bc:: Method of boundary correction double *H:: The wavelet filter int *LengthH:: The length of the wavelet filter int *error:: 0=no error, various errors possible *---------------------*/ { register int i,j; double *C; /* temporary store for input data */ double *D; /* temporary store for wavelet coefficients */ /* The 1D wavelet reconstruction function */ void waverecons(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); /* Basically just do tpwd backwards! */ *error = 0; if ((C = (double *)malloc(2*(unsigned)*nrow *sizeof(double)))==NULL) { *error = 1; return; } if ((D = (double *)malloc((unsigned)*nrow *sizeof(double)))==NULL) { *error = 2; return; } /* * First do the wavelet reconstruction over all cols in the image for each col */ for(j=0; j< *ncol; ++j) { /* Copy the row across - sorry there is probably a more efficient way to do this in-place, but what the hell. */ *(C+ (*nrow*2)-2) = ACCESS(image, *ncol, 0, j); for(i=1; i< *nrow; ++i) { *(D+i-1) = ACCESS(image, *ncol, i, j); } /* Now do the jolly old wavelet RECONSTRUCTION */ waverecons(C, D, H, LengthH, levc, firstCc, lastCc, offsetCc, firstDc, lastDc, offsetDc, type, bc, error); if (*error != 0) return; /* And put the answers back in the image array */ for(i=0; i< *nrow; ++i) ACCESS(image, *ncol, i, j) = *(C+i); } free(C); free(D); if ((C = (double *)malloc(2*(unsigned)*ncol *sizeof(double)))==NULL) { *error = 1; return; } if ((D = (double *)malloc((unsigned)*ncol *sizeof(double)))==NULL) { *error = 2; return; } /* * Second do the wavelet reconstruction over all rows in the image for each row */ for(i=0; i< *nrow; ++i) { /* Copy the row across - sorry there is probably a more efficient way to do this in-place, but what the hell. */ *(C+ (*ncol*2)-2) = ACCESS(image, *ncol, i, 0); for(j=1; j< *ncol; ++j) { *(D+j-1) = ACCESS(image, *ncol, i, j); } /* Now do the jolly old wavelet reconstruction */ waverecons(C, D, H, LengthH, levr, firstCr, lastCr, offsetCr, firstDr, lastDr, offsetDr, type, bc, error); if (*error != 0) return; /* And put the answers back in the image array */ for(j=0; j< *ncol; ++j) ACCESS(image, *ncol, i, j) = *(C+j); } free(C); free(D); } #define ZILCHTOL 1.0E-300 /* Zero tolerance for Shannon entropy */ #define STOP 1 /* Code for stopping */ #define LEFT 2 /* Code for going left */ #define RIGHT 3 /* Code for going right */ /* Compute Shannon-Weaver entropy substitute - the l^2 log (l^2) "norm" */ void ShannonEntropy(double *v, int *lengthv, double *zilchtol, double *answer, int *error) { register int i; double *vsq; double sum=0.0; double SW=0.0; /* Make private copy of squared coefficients */ *error = 0; if ((vsq = (double *)malloc((unsigned)*lengthv*sizeof(double)))==NULL) { *error = 15000; return; } for(i=0; i < *lengthv; ++i) { *(vsq + i) = *(v+i) * *(v+i); sum += *(vsq+i); if ( *(vsq+i) == 0.0) *(vsq+i) = 1.0; SW += *(vsq+i) * log(*(vsq+i)); } if (sum < *zilchtol) *answer = 0.0; else *answer = -SW; free(vsq); return; } #define ACCESSU(uvec, fv, lev, j) *(uvec + *(fv+lev) + j) void Cmnv(double *wst, double *wstC, int *LengthData, int *nlevels, int *upperctrl, double *upperl, int *firstl, int *verbose, int *error) /*--------------------- * Argument description *--------------------- double *wst:: Table of wavelet packet coefficients double *wstC:: Table of scaling function coefficients int *LengthData:: Length of original data set int *nlevels:: Number of levels in the decomposition int *upperctrl:: Vector to record "control" decisions double *upperl:: Vector to record minimum entropies int *firstl:: Index vector into previous two vectors int *verbose:: Print out verbose messages (1=yes, 0=no) int *error:: Error condition *---------------------*/ { register int i,j,k; register int nll, nul; /* Number of packets in lower and upper levels */ register int kl, kr; /* Daughter packet indices, left and right */ int PacketLength; /* Generic packet lengths */ double *pkt, *pktl, *pktr; /* Generic packets */ double *cpkt; /* Combined packet for level zero computations */ double mpE, dlE, drE; /* Entropies for mother and left & right daughters */ double zilchtol; /* A zero tolerance */ double *getpacket(double *wst, int nlevels, int level, int index, int *error); void ShannonEntropy(double *v, int *lengthv, double *zilchtol, double *answer, int *error); *error = 0; zilchtol = ZILCHTOL; if (*verbose == 1) Rprintf("Cmnv: function entered\n"); nll = (int)*LengthData; nul = nll >> 1; /* Go through each level. i refers to the lower level */ for(i=0; i <= *nlevels-1; ++i) { if (*verbose==1) Rprintf("Cmnv: Packets. Lower: %d Upper %d\n", nll, nul); for(j=0; j>= 1; nll >>= 1; } } /* * Wavelet packet node vector computations * * (from ~guy/projects/WAVELETS/PACKET/wpCmnv.c) */ #define TOP 1 #define BOTTOM 2 void wpCmnv(double *wp, int *LengthData, int *nlevels, int *upperctrl, double *upperl, int *firstl, int *verbose, int *error) /*--------------------- * Argument description *--------------------- double *wp:: Table of wavelet packet coefficients int *LengthData:: Length of original data set int *nlevels:: Number of levels in the decomposition int *upperctrl:: Vector to record "control" decisions double *upperl:: Vector to record minimum entropies int *firstl:: Index vector into previous two vectors int *verbose:: Print out verbose messages (1=yes, 0=no) int *error:: Error condition *---------------------*/ { register int i,j; register int nll, nul; /* Number of packets in lower and upper levels */ register int kl, kr; /* Daughter packet indices, left and right */ int PacketLength; /* Generic packet lengths */ double *pkt, *pktl, *pktr; /* Generic packets */ double mpE, dE; /* Entropies for mother and daughters */ double zilchtol; /* A zero tolerance */ double tmp; /* Temporary holder */ double *getpacket(double *wst, int nlevels, int level, int index, int *error); void ShannonEntropy(double *v, int *lengthv, double *zilchtol, double *answer, int *error); *error = 0; zilchtol = ZILCHTOL; if (*verbose == 1) Rprintf("wpCmnv: function entered\n"); nll = (int)*LengthData; nul = nll >> 1; /* Go through each level. i refers to the lower level */ for(i=0; i <= *nlevels-1; ++i) { if (*verbose==1) Rprintf("wpCmnv: Packets. Lower: %d Upper %d\n", nll, nul); for(j=0; j>= 1; nll >>= 1; } } /* * WPST - Stationary wavelet packet algorithm (i.e "The nightmare") */ void wpst(double *ansvec, int *lansvec, int *nlev, int *finish_level, int *avixstart, double *H, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *ansvec:: Vector of length *lansvec that contains the original data and will contain the stationary wavelet packet coefficients on exit int *lansvec:: Length of the ansvec vector int *nlev:: The number of levels in this transform int *finish_level:: The last level to decompose to int *avixstart:: A vector of length (*nlev+1). The index ranging from 0 to *nlev. The entries in this vector are indices into the ansvec vector indicating the start index for packets for a given level. e.g. *(avixstart + 0) = 0 (always). So that the first index for level 0 packets in ansvec is 0. e.g. *(avixstart+1)=256 (for *nlev=4). So that the first index for level 1 packets in ansvec is 256 etc. double *H:: Filter smoothing coefficients as with all other algs int *LengthH:: The number of filter smoothing coefficients int *error:: Error code. 0=o.k. 1 - memory error in creating c_in 2-5 - memory error for c_out, d_out, c_outR, d_outR *---------------------*/ { register int i,j,k, plev; int pnpkts, ppktlength; double *c_in, *c_out, *d_out, *c_outR, *d_outR; void wpsub(double *c_in, int lc_in, double *c_out, double *d_out, double *c_outR, double *d_outR, double *H, int *LengthH); /* * i represents the child level. Go through each child level, filling in * coefficients as you go */ for(i=(int)*nlev-1; i>=(int)*finish_level; --i) { plev = i+1; /* Parent level */ pnpkts = NPKTS((int)plev, *nlev); /* Number of pkts at p lev */ ppktlength = PKTLENGTH((int)plev); /* Length at parent level */ /* Create input and output packets */ if ((c_in = (double *)malloc((unsigned)(ppktlength*sizeof(double))))==NULL) { *error = 1; return; } if ((c_out = (double *)malloc((unsigned)(sizeof(double)*ppktlength/2)))==NULL) { *error = 2; return; } if ((d_out = (double *)malloc((unsigned)(sizeof(double)*ppktlength/2)))==NULL) { *error = 3; return; } if ((c_outR = (double *)malloc((unsigned)(sizeof(double)*ppktlength/2)))==NULL) { *error = 4; return; } if ((d_outR = (double *)malloc((unsigned)(sizeof(double)*ppktlength/2)))==NULL) { *error = 5; return; } for(j=0; j< pnpkts; ++j) { /* Go thru each parent pkt */ /* Copy parent packet to c_in */ for(k=0; k>i); mask <<= 1; multiplier *= 4; } } /* Next code is from Arne Kovac */ /* Increasing this value would remove some nearly empty diagonals */ double thr=0.0; /* The doubledouble structure is used by the makegrid function for sorting the data with respect to the x-component. */ struct doubledouble { double x; double y; } Doubledouble; /* The ddcomp function is used by the makegrid function for sorting the data as an argument for qsort. */ int ddcomp(const void *a, const void *b) /* MAN: changed to work in qsort below */ { struct doubledouble *q1=(struct doubledouble *)a; struct doubledouble *q2=(struct doubledouble *)b; int t; if(q1->x>q2->x) /* return 1; */ t=1; else if(q1->xx) /* return -1; */ t=-1; else /* return 0; */ t=0; return t; } void makegrid(double *x, double *y, int *n, double *gridx, double *gridy, int *gridn, double *G, int *Gindex) /* This function computes from observations in x und y new data on a grid of length gridn as well as a description of the matrix that maps the original data (or better the ordered original data, so that x[i]<=x[j] when i<=j) to the new grid data. input: x, y vector of observations of length n gridn output: gridx, gridy the constructed grid of length gridn */ { struct doubledouble *q; int i,li=0,ind; /* First, sort the data with respect to x. */ q=(void *)malloc(*n*sizeof(Doubledouble)); for(i=0;i<*n;i++) { q[i].x=x[i]; q[i].y=y[i]; } qsort(q,(size_t)*n,sizeof(struct doubledouble),ddcomp); /* MAN: used to be (int *)ddcomp */ /* Now create the new grid data. */ for(i=0;i<*gridn;i++) { gridx[i]=(i+0.5)/(*gridn); /* Determine the index of the nearest observation left to the grid time point. */ while((li<*n-1)&&(q[li+1].x=gridx[i]) /* We are at the left end */ { gridy[i]=q[0].y; ind=0; G[i]=1; } else { gridy[i]=q[li].y+(gridx[i]-q[li].x)*(q[li+1].y-q[li].y) /(q[li+1].x-q[li].x); ind=li; G[i]=1-(gridx[i]-q[li].x)/(q[li+1].x-q[li].x); } Gindex[i]=ind; } free(q); } /* sigmastruct describes a covariance matrix of size n. diag is an n-vector of pointers to double vectors that correspond to the diagonals of the matrix. If diag[i]==NULL, then the i-th diagonal is empty. This representation is useful for covariance matrices with a band structure. */ struct sigmastruct { int n; double **diag; }; /* createSigma allocates memory for a new covariance matrix of size n. */ int createSigma(struct sigmastruct *Sigma, int n) { int i; Sigma->n=n; if((Sigma->diag=malloc(n*sizeof(double *)))==NULL) { return(-1); } for(i=0;idiag)[i]=NULL; return(0); } /* freeSigma releases the memory used by a sigmastruct element. */ void freeSigma(struct sigmastruct *Sigma) { int i; for(i=0;in;i++) if(Sigma->diag[i]!=NULL) free((Sigma->diag)[i]); free(Sigma->diag); } /* CleanupSigma removes diagonals that contain only elements < thr. */ void cleanupSigma(struct sigmastruct *Sigma) { int i,j; for(i=0;in;i++) if((Sigma->diag)[i]!=NULL) { j=0; while((jn-i)&&(fabs((Sigma->diag)[i][j])=Sigma->n-i) { free(Sigma->diag[i]); Sigma->diag[i]=NULL; } } } /* putSigma changes the entry in the i-th row and j-th column to s and allocates memory for the diagonal if necessary. */ int putSigma(struct sigmastruct *Sigma, int i, int j, double s) { int d=abs(i-j); if(fabs(s)>0.0000001){ /* MAN: added brace to avoid ambiguity */ if((i>=Sigma->n)||(j>=Sigma->n)) { return(-1); /* puts("Error: This element does not exist."); */ } else { if((Sigma->diag)[d]==NULL) if((Sigma->diag[d]=calloc(Sigma->n-d,sizeof(double)))==NULL) return(-2); (Sigma->diag)[d][(i+j-d)/2]=s; } } /* MAN: added */ return(0); /* MAN: added, hopefully won't have bad consequences. */ } /* allocateSigma allocates memory for diagonals of a covariance matrix, specified by the boolean vector d */ int allocateSigma(struct sigmastruct *Sigma, int *d) { int i; for(i=0;in;i++) if(d[i]==TRUE) if((Sigma->diag[i]=calloc(Sigma->n-i,sizeof(double)))==NULL) { *d = (Sigma->n - i)*sizeof(double); return(-1); } return(0); } /* computec is the function that computes the factors for the variances of the wavelet coefficients. Gmatrix, Gindex describe the matrix that maps the original data to the grid data as received by makegrid (s.o.) n, gridn are the numbers of original and grid observations H, LengthH describe the used wavelet filter. The filter coefficients are stored in the vector H, their number in LengthH. bc is either PERIODIC or SYMMETRIC, the boundary correction to be used c contains afterwards the coefficients c_{jk}, such that Var(w_{jk})=c_{jk}\cdot\sigma^2 */ void computec(int *n,double *c,int *gridn,double *Gmatrix,int *Gindex, double *H, int *LengthH, int *bc, int *error) { int virtgn,i,j,k,l,d,zaehler=0,gn=*gridn,laststart=0; int ii,dd,jj,o1,o2,iiG,iiH,jjG,jjH,gn2,LengthH2=*LengthH/2,dH,dG; int *NEEDIT,offset,offset2,band,band1,band2; double cellC,cellD,sig,G[20]; double ProductG[20][20],ProductH[20][20]; struct sigmastruct Sigma,Sigma2,Sigma3; int rc; int createSigma(struct sigmastruct *Sigma, int n); int putSigma(struct sigmastruct *Sigma, int i, int j, double s); int allocateSigma(struct sigmastruct *Sigma, int *d); if(*LengthH>20) { REprintf("Sorry, you can not use this procedure with more than 20 filter coefficients!"); *error = 1; return; } if((NEEDIT=malloc(*gridn*sizeof(int)))==NULL) { *error = 2; *n = *gridn * sizeof(int); /* Contains number of bytes requested */ return; } /* First step: Compute Filter G from Filter H */ sig=-1.0; for(k=0;k<*LengthH;k++) { G[*LengthH-k-1]=sig*H[k]; sig=-sig; } for(k=0;k<*LengthH;k++) for(l=0;l<*LengthH;l++) { ProductG[k][l]=G[k]*G[l]; ProductH[k][l]=H[k]*H[l]; } /* Second step: Compute the variance/covariance-matrix of the grid data */ if (createSigma(&Sigma,gn) < 0) { *error = 3; *n = (int)gn * sizeof(double); return; } for(i=0;i<*gridn;i++) { j=laststart; while(Gindex[i]-Gindex[j]>=2) j++; laststart=j; for(;j<=i;j++) { switch(Gindex[i]-Gindex[j]) { case 1: rc = putSigma(&Sigma,i,j,Gmatrix[i]*(1.0-Gmatrix[j])); if (rc < 0) { if (rc == -1) { *error = 4; return; } if (rc == -2) { *error = 5; *n = (Sigma.n - abs(i-j))*sizeof(double); return; } } break; case 0: rc= putSigma(&Sigma,i,j,Gmatrix[i]*Gmatrix[j]+(1.0-Gmatrix[i])*(1.0-Gmatrix[j])); if (rc < 0) { if (rc == -1) { *error = 4; return; } if (rc == -2) { *error = 5; *n = (Sigma.n - abs(i-j))*sizeof(double); return; } } break; } } } /* And now the difficult part... */ if(*bc==PERIODIC) { while(gn>=2) /* Apply the wavelet filters to the covariance matrix Sigma. */ { gn2=gn/2; /* gn and gn2 are the sizes of Sigma and Sigma2 (or Sigma3). */ /* Store the result of the high pass filter in sigma2... */ if (createSigma(&Sigma2,gn2)<0){ *error = 3l; *n = gn2 * sizeof(double); return; } /* ... and the result of the low pass filter in sigma3. */ if (createSigma(&Sigma3,gn2)<0) { *error = 3; *n = gn2 * sizeof(double); return; } cleanupSigma(&Sigma); /* First we need to know which diagonals in sigma2 and sigma3 will not be empty. */ band1=gn/2; band2=gn/2+1; while((band1>=0)&&(Sigma.diag[band1]==NULL)) band1--; /* GPN Bugfix 22 Apr 2022. This was band1- which seems wrong. band1 needs to decrease itself */ while((band2<=gn-1)&&(Sigma.diag[band2]==NULL)) band2++; if(band1<=gn-band2) band=gn-band2; else band=band1; if(band+*LengthH>gn) for(d=0;dthr) { o1=j%2; iiH=j/2; iiG=(iiH+LengthH2-1)%(gn2); for(k=0;kthr) { o1=(d+j)%2; iiH=((d+j+*gridn)/2)%(gn2); iiG=(iiH+LengthH2-1)%(gn2); for(k=0;k=2) { /* First of all, we want to know how many diagonals and extra coeffs Sigma2 and Sigma3 have. */ if(offset%2==0) gn2=(gn+1)/2+LengthH2-1; else gn2=(gn+2)/2+LengthH2-1; offset2=(offset+*LengthH-1)/2; /* Now allocate memory for them. */ if (createSigma(&Sigma2,gn2) <0) { *error = 3; *n = gn2 * sizeof(double); return; } if (createSigma(&Sigma3,gn2) <0) { *error = 3; *n = gn2*sizeof(double); return; } cleanupSigma(&Sigma); /* Again, we need to know which diagonals in sigma2 and sigma3 will not be empty. */ band=gn-1; while((band>=0)&&(Sigma.diag[band]==NULL)) band--; if(band+2*(*LengthH)>gn) for(d=0;dthr) { o1=(j+offset)%2; iiH=(j+offset%2)/2+LengthH2-1; iiG=iiH; for(k=0;kthr) { o1=(d+j+offset)%2; iiH=(d+j+offset%2)/2+LengthH2-1; iiG=iiH; for(k=0;k=gn) { ii=2*gn-ii-1; } if(jj>=gn) { jj=2*gn-jj-1; } dd=abs(ii-jj); if(Sigma.diag[dd]!=NULL) { sig=(Sigma.diag)[dd][(ii+jj-dd)/2]; cellC+=sig*ProductH[k][l]; cellD+=sig*ProductG[k][l]; } } } Sigma2.diag[d][j]=cellD; Sigma3.diag[d][j]=cellC; } for(j=gn2-d-1;(j>=0)&&(j>=gn2-d-LengthH2+1-offset%2);j--) { cellC=0; cellD=0; i=d+j; iiG=2*i-(*LengthH-2)-offset%2; for(k=0;k<*LengthH;k++,iiG++) { jjG=2*j-(*LengthH-2)-offset%2; for(l=0;l<*LengthH;l++,jjG++) { ii=iiG; jj=jjG; if(ii<0) ii=-ii-1; if(jj<0) jj=-jj-1; if(ii>=gn) { ii=2*gn-ii-1; } if(jj>=gn) { jj=2*gn-jj-1; } dd=abs(ii-jj); if(Sigma.diag[dd]!=NULL) { sig=(Sigma.diag)[dd][(ii+jj-dd)/2]; cellC+=sig*ProductH[k][l]; cellD+=sig*ProductG[k][l]; } } } Sigma2.diag[d][j]=cellD; Sigma3.diag[d][j]=cellC; } } /* This looks now pretty the same as in the periodic case. */ for(j=0;j= *donej) { ll = *(lvec+l); sum = 0.0; for(k=max(1-ll, 1-lj); k <= min(lj-1, ll-1); ++k) { sum += ACCESSW(w, j, k-1+lj) * ACCESSW(w, l, (-k)-1+ll); } *(fmat+*J*l+j) = *(fmat+*J*j+l) = sum; } } } /* Now free the w */ for(j=0; j<*J; ++j) { free((void *)*(w+j)); } free((void *)w); } void wlpart(int *J, int *BigJ, double *H, int *LengthH, int *error) { register int KeepGoing; register int somefull; register int allnonzero; register int i; register int j; double *TheData; int ndata; double *C, *D; int *firstC, *lastC, *offsetC, *firstD, *lastD, *offsetD; int LengthC, LengthD, levels; int type,bc; int *ixvec; void simpleWT(double *TheData, int *ndata, double *H, int *LengthH, double **C, int *LengthC, double **D, int *LengthD, int *levels, int **firstC, int **lastC, int **offsetC, int **firstD, int **lastD, int **offsetD, int *type, int *bc, int *error); void waverecons(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); *error=0; *BigJ = *J + 1; KeepGoing = TRUE; while(KeepGoing) { /* Rprintf("Entered loop BigJ is %ld\n", *BigJ); */ ndata = (int)0x01 << *BigJ; /* Rprintf("ndata is %ld\n", ndata);*/ /* * Basically a dummy wavelet transform to set up first/last stuff */ if ((TheData = (double *)malloc((unsigned)ndata*sizeof(double)))==NULL) { *error = 110; return; } for(i=0; i=0; --i) if (*(v+i) == 0.0) break; return(i); } /* * Cyclically rotate a vector n places to the left * (a C replacement for guyrot) */ void rotateleft(double *v, int *nv, int *n, int *error) { register int i; double *tmp; /* Storage for the ones the fall off the left */ *error = 0; *n = *n % *nv; if (*n == 0) /* No rotation required */ return; if ((tmp = (double *)malloc((unsigned)(*n)*sizeof(double))) == NULL) { *error = 120; return; } for(i=0; i< *n; ++i) *(tmp+i) = *(v+i); for(i=0; i< *nv - *n; ++i) *(v+i) = *(v+i+*n); for(i=0; i< *n; ++i) *(v+ i + *nv - *n) = *(tmp+i); free((void *)tmp); } void rainmatPARENT(int *J, double *H, int *LengthH, double *fmat, double *tol, int *error) /*--------------------- * Argument description *--------------------- int *J:: The dimension of the problem double *H:: The wavelet filter coefficients int *LengthH:: The number of wavelet filter coefficients double *fmat:: The answer double *tol:: Elements smaller than this will be deleted int *error:: Error code. Nonzero is an error *---------------------*/ { register int i; int BigJ; /* The level we must go to to be able to compute * coefficients without error */ int donej; /* Only for partial */ double **coefvec; /* These are the \Psi_j (\tau) */ int *lvec; /* Vector of length *J contains the length * of each vector in coefvec */ void wlpart(int *J, int *BigJ, double *H, int *LengthH, int *error); void mkcoef(int *J, int BigJ, double *H, int *LengthH, double ***coefvec, int *lvec, double *tol, int *error); void rainmat(int *J, int *donej, double **coefvec, int *lvec, double *fmat, int *error); void haarmat(int *J, int *donej, double *fmat, int *error); donej = 0; if (*LengthH == 2) /* Haar - can compute exactly */ { haarmat(J, &donej, fmat, error); return; } /* whichlevel */ wlpart(J, &BigJ, H, LengthH, error); if (*error != 0) return; /* mkcoef */ if ((lvec = (int *)malloc((unsigned)*J*sizeof(int)))==NULL) { *error = 130; return; } for(i=0; i<*J; ++i) *(lvec+i) = 0; mkcoef(J, BigJ, H, LengthH, &coefvec, lvec, tol, error); if (*error != 0) return; /* rainmat */ donej = 0; rainmat(J, &donej, coefvec, lvec, fmat, error); if (*error != 0) return; free((void *)lvec); for(i=0; i<*J; ++i) free((void *)*(coefvec+i)); free((void *)coefvec); } /* Make \Psi_j(\tau) components */ void mkcoef(int *J, int BigJ, double *H, int *LengthH, double ***coefvec, int *lvec, double *tol, int *error) /*--------------------- * Argument description *--------------------- int *J:: Dimension of the problem int BigJ:: The maximum depth that we have to go to double *H:: Wavelet filter coefficients int *LengthH:: Number of wavelet filter coefficients double ***coefvec:: Coefficients of \Psi_j(\tau) int *lvec:: Vector of length *J that will contain length of each component of coefvec double *tol:: Elements smaller than this will be deleted int *error:: Error code *---------------------*/ { register int i,j; register int large_ones; int ndata; int *ixvec; /* Index vector for inserting 1s into blank WT */ double **lcoefvec; /* Local version of coefvec */ double *tmpcfvec; /* Temporary vector */ /* Things needed for the simpleWT */ double *TheData; double *C, *D; int *firstC, *lastC, *offsetC, *firstD, *lastD, *offsetD; int LengthC, LengthD, levels; int type,bc; int n_to_rotate; void simpleWT(double *TheData, int *ndata, double *H, int *LengthH, double **C, int *LengthC, double **D, int *LengthD, int *levels, int **firstC, int **lastC, int **offsetC, int **firstD, int **lastD, int **offsetD, int *type, int *bc, int *error); int idlastzero(double *v, int *nv); void rotateleft(double *v, int *nv, int *n, int *error); void waverecons(double *C, double *D, double *H, int *LengthH, int *levels, int *firstC, int *lastC, int *offsetC, int *firstD, int *lastD, int *offsetD, int *type, int *bc, int *error); ndata = (int)0x01 << BigJ; /* * Create ixvec */ if ((ixvec = (int *)malloc((unsigned)BigJ*sizeof(int)))==NULL){ *error = 140; return; } for(i=0; i< BigJ; ++i) *(ixvec+i) =(0x01 << (BigJ -1 - i)); for(i=1; i< BigJ; ++i) *(ixvec+i) = *(ixvec+i-1) + *(ixvec+i); for(i=0; i< BigJ; ++i) --*(ixvec+i); /* * Basically a dummy wavelet transform to set up first/last stuff */ if ((TheData = (double *)malloc((unsigned)ndata*sizeof(double)))==NULL) { *error = 141; return; } for(i=0; i *tol) ++large_ones; /* Now get memory for the large ones */ if ((tmpcfvec = (double *)malloc((unsigned)large_ones*sizeof(double)))==NULL) { *error = 143; return; } large_ones = 0; for(j=0; j *tol) *(tmpcfvec+large_ones++) = *(TheData+j); /* Install this vector into the array */ *(lcoefvec+i-1) = tmpcfvec; *(lvec+i-1) = (int)large_ones; } /* Install the lcoefvec into the coefvec */ *coefvec = lcoefvec; free((void *)ixvec); free((void *)TheData); } void rainmatOLD(int *J, double *coefvec, int *ixvec, int *lvec, double *fmat, int *error) /*--------------------- * Argument description *--------------------- int *J:: The desired maximum level (positive) double *coefvec:: The \psi_{jk} stacked into one vector int *ixvec:: int *lvec:: A vector of lengths of each \psi_j vector in coefvec. The jth element is the length of the jth \psi_j in coefvec double *fmat:: This vector will contain the answer. This is the lower triangular portion of the J*J matrix, and therefore is of length J(J-1)/2 int *error:: Error code 1- Generating **w 2+j Memory error on 2+j th one *---------------------*/ { /* First we compute the w. One for each j */ double **w; register int j,k,m,l,cnt; double sum; int lj,ll; if ((w = (double **)malloc((unsigned)*J*sizeof(double *)))==NULL) { *error = 1; return; } /* Now populate each of the *w */ for(j=0; j<*J; ++j) { if ((*(w+j) = (double *)malloc((unsigned)(*(lvec+j)*2-1)*sizeof(double)))==NULL) { *error = (int)(2+j); return; } } /* Now compute each of the wjk */ for(j=0; j< *J; ++j) { lj = *(lvec+j); for(k = 1-lj; k <= lj-1; ++k) { sum = 0.0; for(m = max(0, k); m <= min(lj-1, lj-1+k); ++m) { sum += *(coefvec+*(ixvec+j)+m) * *(coefvec+*(ixvec+j)+m-k); } ACCESSW(w, j, k-1+lj) = sum; } } /* Now compute the F */ cnt = 0; for(j=0; j<*J; ++j) { lj = *(lvec+j); for(l=j; l<*J; ++l) { ll = *(lvec+l); sum = 0.0; for(k=max(1-ll, 1-lj); k <= min(lj-1, ll-1); ++k) { sum += ACCESSW(w, j, k-1+lj) * ACCESSW(w, l, (-k)-1+ll); } *(fmat+*J*l+j) = *(fmat+*J*j+l) = sum; ++cnt; } } /* Now free the w */ for(j=0; j<*J; ++j) { free((void *)*(w+j)); } free((void *)w); } /* rainmatPARTIAL - partial matrix filling */ void rainmatPARTIAL(int *J, int *donej, double *H, int *LengthH, double *fmat, double *tol, int *error) /*--------------------- * Argument description *--------------------- int *J:: The dimension of the problem int *donej:: The first j dimensions are already filled double *H:: The wavelet filter coefficients int *LengthH:: The number of wavelet filter coefficients double *fmat:: The answer double *tol:: Elements smaller than this will be deleted int *error:: Error code. Nonzero is an error *---------------------*/ { register int i; int BigJ; /* The level we must go to to be able to compute * coefficients without error */ double **coefvec; /* These are the \Psi_j (\tau) */ int *lvec; /* Vector of length *J contains the length * of each vector in coefvec */ void wlpart(int *J, int *BigJ, double *H, int *LengthH, int *error); void mkcoef(int *J, int BigJ, double *H, int *LengthH, double ***coefvec, int *lvec, double *tol, int *error); void rainmat(int *J, int *donej, double **coefvec, int *lvec, double *fmat, int *error); void haarmat(int *J, int *donej, double *fmat, int *error); if (*LengthH == 2) /* Haar - can compute exactly */ { haarmat(J, donej, fmat, error); return; } /* whichlevel */ wlpart(J, &BigJ, H, LengthH, error); if (*error != 0) return; /* mkcoef */ if ((lvec = (int *)malloc((unsigned)*J*sizeof(int)))==NULL) { *error = 150; return; } for(i=0; i<*J; ++i) *(lvec+i) = 0; mkcoef(J, BigJ, H, LengthH, &coefvec, lvec, tol, error); if (*error != 0) return; /* rainmat */ rainmat(J, donej, coefvec, lvec, fmat, error); if (*error != 0) return; free((void *)lvec); for(i=0; i<*J; ++i) free((void *)*(coefvec+i)); free((void *)coefvec); } void PsiJ(int *J, double *H, int *LengthH, double *tol, double *wout, int *lwout, int *rlvec, int *error) /*--------------------- * Argument description *--------------------- int *J:: The dimension of the problem double *H:: The wavelet filter coefficients int *LengthH:: The number of wavelet filter coefficients double *tol:: Elements smaller than this will be deleted double *wout:: Answers for \Psi_j(\tau) int *lwout:: Length of previous array int *rlvec:: Vector of length J contains lengths of \psi_j int *error:: Error code. Nonzero is an error *---------------------*/ { register int i; int BigJ; /* The level we must go to to be able to compute * coefficients without error */ double **coefvec; /* These are the \psi_j (\tau) */ int *lvec; /* Vector of length *J contains the length * of each vector in coefvec */ void wlpart(int *J, int *BigJ, double *H, int *LengthH, int *error); void mkcoef(int *J, int BigJ, double *H, int *LengthH, double ***coefvec, int *lvec, double *tol, int *error); void PsiJonly(int *J, double **coefvec, int *lvec, double *wout, int *lwout, int *error); /* whichlevel */ wlpart(J, &BigJ, H, LengthH, error); if (*error != 0) return; /* mkcoef */ if ((lvec = (int *)malloc((unsigned)*J*sizeof(int)))==NULL) { *error = 130; return; } for(i=0; i<*J; ++i) *(lvec+i) = 0; mkcoef(J, BigJ, H, LengthH, &coefvec, lvec, tol, error); if (*error != 0) return; PsiJonly(J, coefvec, lvec, wout, lwout, error); if (*error != 0) return; for(i=0; i<*J; ++i) *(rlvec + i) = *(lvec+i); free((void *)lvec); for(i=0; i<*J; ++i) free((void *)*(coefvec+i)); free((void *)coefvec); } void PsiJonly(int *J, double **coefvec, int *lvec, double *wout, int *lwout, int *error) /*--------------------- * Argument description *--------------------- int *J:: The desired maximum level (positive) double **coefvec:: The \psi_{jk} stacked into one vector int *lvec:: A vector of lengths of each \psi_j vector in coefvec. The jth element is the length of the jth \psi_j in coefvec double *wout:: Output contains the \Psi_j(\tau) int *lwout:: Length of this vector. If it is not long enough an error code is returned int *error:: Error code *---------------------*/ { /* First we compute the w. One for each j */ double **w; register int j,k,m; double sum; int totall; int lj,cnt; /* Check output vector is long enough to store answer */ totall = 0; for(j=0; j < *J; ++j) totall += *(lvec+j)*2l - 1l; if (totall > *lwout) { *error = 160; *lwout = totall; return; } if ((w = (double **)malloc((unsigned)*J*sizeof(double *)))==NULL) { *error = 161; return; } /* Now populate each of the *w */ for(j=0; j<*J; ++j) { if ((*(w+j) = (double *)malloc((unsigned)(*(lvec+j)*2-1)*sizeof(double)))==NULL) { *error = 162; *J = (int)j; return; } } /* Now compute each of the wjk */ for(j=0; j< *J; ++j) { lj = *(lvec+j); for(k = 1-lj; k <= lj-1; ++k) { sum = 0.0; for(m = max(0, k); m <= min(lj-1, lj-1+k); ++m) { sum += *((*(coefvec+j))+m) * *((*(coefvec+j))+m-k); } ACCESSW(w, j, k-1+lj) = sum; } } /* Store the w */ cnt = 0; for(j=0; j < *J; ++j) { lj = *(lvec+j); for(k = 1-lj; k <= lj-1; ++k) { *(wout+cnt) = ACCESSW(w, j, k-1+lj); ++cnt; } } /* Now free the w */ for(j=0; j<*J; ++j) { free((void *)*(w+j)); } free((void *)w); } /* haarmat - Computes matrix exactly using formula */ void haarmat(int *J, int *donej, double *fmat, int *error) /*--------------------- * Argument description *--------------------- int *J:: The desired maximum level (positive) int *donej:: The first j columns already filled double *fmat:: This vector will contain the answer. This is the lower triangular portion of the J*J matrix, and therefore is of length J(J-1)/2 int *error:: Error code *---------------------*/ { register int j,l; double a; double twoj, twol, two2j, two2jmo; for(j=0; j<*J; ++j) { for(l=j; l<*J; ++l) { if (l >= *donej) { if (l==j) { twoj = pow(2.0, ((double)j+1)); two2j = twoj*twoj; a = (two2j + 5.0)/(3.0*twoj); } else { two2jmo = pow(2.0, (double)(2*j+1)); twol = pow(2.0, ((double)l+1)); a = (two2jmo + 1.0)/twol; } *(fmat+*J*l+j) = *(fmat+*J*j+l) = a; } } } } /* * Now follows the code from swt2d.c */ /* * Perform whole of SWT2D after initialising */ void SWT2Dall(double *m, int *nm, double *am, int *J, double *H, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *m:: The input data int *nm:: The dimension of the square matrix m double *am:: The *big* answer 3D array int *J:: The level at which to store the initial information double *H:: The smoothing filter int *LengthH:: The length of the smoothing filter int *error:: Error code 0=ok, anything else is memory error *---------------------*/ { int D1, D12; /* Dimensions of am array */ int nm2, nm4; /* nm divided by 2 then 4 */ void initSWT2D(double *m, int *nm, double *am, int *J, double *H, int *LengthH, int *error); void SWT2Drec(double *am, int D1, int D12, int x, int y, int TWOsl, int sl, int J, double *H, int *LengthH, int *error); *error = 0; initSWT2D(m, nm, am, J, H, LengthH, error); if (*error != 0) return; /* * Now for each level use the previous level as the coefficients to * do a 2D wavelet transform for the next level * * Produce level J-2 from J-1 (which was done in initSWT2D) * Produce level J-3 from J-2 ... * ... * Produce level 0 from 1 * Go home! */ D12 = (*J)*(*nm * 2); D1 = (*J); nm2 = *nm/2; nm4 = nm2/2; SWT2Drec(am, D1, D12, 0, 0, nm2, nm4, *J-1, H, LengthH, error); if (*error != 0) return; SWT2Drec(am, D1, D12, *nm, 0, nm2, nm4, *J-1, H, LengthH, error); if (*error != 0) return; SWT2Drec(am, D1, D12, 0, *nm, nm2, nm4, *J-1, H, LengthH, error); if (*error != 0) return; SWT2Drec(am, D1, D12, *nm, *nm, nm2, nm4, *J-1, H, LengthH, error); if (*error != 0) return; } void SmallStore(double *am, int D1, int D12, int J, int sl, int x, int y, int ix, int jy, double *hhout, double *hgout, double *ghout, double *ggout, int nm) /*--------------------- * Argument description *--------------------- double *am:: The *big* matrix to store everything in int D1:: First dimension of am int D12:: First and second dimensions of am multiplied int J:: The level to fill int sl:: Side length of small packets int x:: The origin x coordinate int y:: The origin y coordinate int ix:: The smaller matrix i offset int jy:: The smaller matrix j offset double *hhout:: The new smoothed matrix double *hgout:: The new horizontal detail matrix double *ghout:: The new vertical detail matrix double *ggout:: The new diagonal detail matrix int nm:: Size of the hhout, hgout, ghout, ggout *---------------------*/ { register int i,j; for(i=0; i< sl; ++i) for(j=0; j< sl; ++j) { ACCESS3D(am, D1, D12, J, x+i, y+j) = ACCESS(hhout, nm, ix+i, jy+j); ACCESS3D(am, D1, D12, J, x+i, sl+y+j) = ACCESS(hgout, nm, ix+i, jy+j); ACCESS3D(am, D1, D12, J, sl+x+i, y+j) = ACCESS(ghout, nm, ix+i, jy+j); ACCESS3D(am, D1, D12, J, sl+x+i, sl+y+j) = ACCESS(ggout, nm, ix+i, jy+j); } } /* initialise the answer matrix */ void initSWT2D(double *m, int *nm, double *am, int *J, double *H, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *m:: The input data int *nm:: The dimension of the square matrix m double *am:: The *big* answer 3D array int *J:: The level at which to store the initial information double *H:: The smoothing filter int *LengthH:: The length of the smoothing filter int *error:: Error code 0=ok, anything else is memory error *---------------------*/ { int mlength; /* Length of vector representing matrix */ int D1, D12; /* 1st and Second dimension of answer matrix */ double *hhout, *hgout, *ghout, *ggout; /* Intermediate stores */ int nm2; /* Half of *nm */ /* Carries out a step of the SWT2D algorithm */ void SWT2D(double *m, int *nm, double *hhout, double *hgout, double *ghout, double *ggout, double *H, int *LengthH, int *error); *error = 0; mlength = *nm * *nm; /* First create some space for hhout, hgout, ghout and ggout. */ if ((hhout = (double *)malloc((size_t)(mlength* sizeof(double))))==NULL){ *error = 7; return; } if ((hgout = (double *)malloc((size_t)(mlength* sizeof(double))))==NULL){ *error = 8; return; } if ((ghout = (double *)malloc((size_t)(mlength* sizeof(double))))==NULL){ *error = 9; return; } if ((ggout = (double *)malloc((size_t)(mlength* sizeof(double))))==NULL){ *error = 10; return; } /* Apply the 2D SWT to the initial data and store the HH, GH, HG, GG * matrices in their appropriate place in the big matrix */ SWT2D(m, nm, hhout, hgout, ghout, ggout, H, LengthH, error); #ifdef PRINTON Rprintf("First hhout matrix\n"); { int i,j; for(i=0; i<*nm; ++i) { Rprintf("[%d, ] ", i); for(j=0; j<*nm; ++j) Rprintf("%lf ", ACCESS(hhout, *nm, i,j)); Rprintf("\n"); } } #endif if (*error != 0) return; /* * Now copy each of the results hhout, hgout, ghout and ggout to the answer * matrix am */ D12 = (*J)*(*nm * 2); D1 = (*J); nm2 = *nm / 2; SmallStore(am, D1, D12, *J-1, nm2, 0l, 0l, 0l, 0l, hhout, hgout, ghout, ggout, *nm); SmallStore(am, D1, D12, *J-1, nm2, *nm, 0l, nm2, 0l, hhout, hgout, ghout, ggout, *nm); SmallStore(am, D1, D12, *J-1, nm2, 0l, *nm, 0l, nm2, hhout, hgout, ghout, ggout, *nm); SmallStore(am, D1, D12, *J-1, nm2, *nm, *nm, nm2, nm2, hhout, hgout, ghout, ggout, *nm); free((void *)hhout); free((void *)hgout); free((void *)ghout); free((void *)ggout); } void SWT2Drec(double *am, int D1, int D12, int x, int y, int TWOsl, int sl, int J, double *H, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *am:: The big storage array int D1:: First dimension of am int D12:: First and second dimensions of am multiplied int x:: X origin coordinate of smoothed data int y:: Y origin coordinate of smoothed data int TWOsl:: Side length of smoothed data int sl:: Side length of result packets (2*sl = TWOsl) int J:: Level we accessing from (and putting into j-1) double *H:: The smoothing filter int *LengthH:: The length of the smoothing filter int *error:: Error code *---------------------*/ { register int i,j; double *m; /* Somewhere to put the smoothed data */ int mlength; /* The length of this matrix */ double *hhout, *hgout, *ghout, *ggout; /* Smoothed, hori, verti & diag */ int sl2; /* sl divided by 2 */ void SmallStore(double *am, int D1, int D12, int J, int sl, int x, int y, int ix, int jy, double *hhout, double *hgout, double *ghout, double *ggout, int nm); void SWT2D(double *m, int *nm, double *hhout, double *hgout, double *ghout, double *ggout, double *H, int *LengthH, int *error); void SWT2Drec(double *am, int D1, int D12, int x, int y, int TWOsl, int sl, int J, double *H, int *LengthH, int *error); *error = 0; #ifdef PRINTON Rprintf("SWT2Drec: x=%ld, y=%ld, TWOsl=%ld, sl=%ld, J=%ld\n",x,y,TWOsl,sl,J); #endif mlength = TWOsl * TWOsl; /* Create space for TWOsl * TWOsl matrix m*/ if ((m = (double *)malloc((size_t)mlength*sizeof(double)))==NULL){ *error = 11; return; } /* Fill matrix from am from x,y at origin at level j*/ for(i=0; i0 then we have to recursively get hold of the smooth at the j-1 * level * */ if (levj == 0) ACCESS(hhout, sl, 0, 0) = ACCESS3D(am, D1, D12, levj, x, y); else { SWTGetSmooth(am, D1, D12, hhout, levj, x, y, sl, H, LengthH, error); if (*error != 0) return; } /* * Use S,H,V, and D to reconstruct at level levj, x, y * and put it into out. */ #ifdef PRINTON Rprintf("This is ggout\n"); for(i=0; i= 3) { int length, step, i, j; length = F.Length / 2; step = (int) pow(2.0, Scale); templ = (double*) malloc(length * sizeof(double)); tempr = (double*) malloc(length * sizeof(double)); for (i = 0; i < length; i++) { templ[i] = tempr[i] = 0; switch (Direction) { case NORMAL: for (j = 0; j < length; j++) { templ[i] += Vect[j] * F.PreLeft[i][j]; tempr[i] += Vect[step - length + j] * F.PreRight[i][j]; } break; case INVERSE: for (j = 0; j < length; j++) { templ[i] += Vect[j] * F.PreInvLeft[i][j]; tempr[i] += Vect[step - length + j] * F.PreInvRight[i][j]; } } } for (i = 0; i < length; i++) { Vect[i] = templ[i]; Vect[step - length + i] = tempr[i]; } free(templ); free(tempr); } } void TransStep(int Scale, Filter F, double* Vect) { double* temp; int length, halflength, N, pos, i, j, p; length = (int) pow(2.0, Scale); halflength = length / 2; N = F.Length / 2; temp = (double*) malloc(length * sizeof(double)); if (N > 1) { pos = 0; for (i = 0; i < N; i++) { p = 2 * i; temp[pos] = temp[pos + halflength] = 0; for (j = 0; j <= N + p; j++) { temp[pos] += Vect[j] * F.HLeft[i][j]; temp[pos + halflength] += Vect[j] * F.GLeft[i][j]; } pos++; } for (i = N; i < halflength - N; i++) { p = 2 * i - N + 1; temp[pos] = temp[pos + halflength] = 0; for (j = 0; j < 2 * N; j++) { temp[pos] += Vect[p + j] * F.H[j]; temp[pos + halflength] += Vect[p + j] * F.G[j]; } pos++; } for (i = N - 1; i >= 0; i--) { p = 2 * i; temp[pos] = temp[pos + halflength] = 0; for (j = 0; j <= N + p; j++) { temp[pos] += Vect[length - j - 1] * F.HRight[i][j]; temp[pos + halflength] += Vect[length - j - 1] * F.GRight[i][j]; } pos++; } } else for (i = 0; i < halflength; i++) { p = 2 * i; temp[i] = temp[i + halflength] = 0; for (j = 0; j < 2 * N; j++) { temp[i] += Vect[p + j] * F.H[j]; temp[i + halflength] += Vect[p + j] * F.G[j]; } } for(i = 0; i < length; i++) Vect[i] = temp[i]; free(temp); } void InvTransStep(int Scale, Filter F, double* Vect) { double* temp; int length, doublelength, N, pos, i, j, p; length = (int) pow(2.0, Scale); doublelength = 2 * length; N = F.Length / 2; temp = (double*) malloc(doublelength * sizeof(double)); for (i = 0; i < doublelength; i++) temp[i] = 0; if (N > 1) { pos = 0; for (i = 0; i < N; i++) { p = 2 * i; for (j = 0; j <= N + p; j++) { temp[j] += Vect[pos] * F.HLeft[i][j]; temp[j] += Vect[pos + length] * F.GLeft[i][j]; } pos++; } for (i = N; i < length - N; i++) { p = 2 * i - N + 1; for (j = 0; j < 2 * N; j++) { temp[p + j] += Vect[pos] * F.H[j]; temp[p + j] += Vect[pos + length] * F.G[j]; } pos++; } for (i = N - 1; i >= 0; i--) { p = 2 * i; for (j = 0; j <= N + p; j++) { temp[doublelength - j - 1] += Vect[pos] * F.HRight[i][j]; temp[doublelength - j - 1] += Vect[pos + length] * F.GRight[i][j]; } pos++; } } else for (i = 0; i < length; i++) { p = 2 * i; for (j = 0; j < 2; j++) { temp[p + j] += Vect[i] * F.H[j]; temp[p + j] += Vect[i + length] * F.G[j]; } } for (i = 0; i < doublelength; i++) Vect[i] = temp[i]; free(temp); } void Trans(int MinScale, int Direction, int FilterNumber, double* Vect, int Size, int Precond, int* FilterHistory) { int scale, maxscale, N, NPrev, NNext; Filter temp, temp1; maxscale = (int)(log(Size)/log(2)); if (MinScale >= maxscale) { Rprintf("MinScale must be less than log2(Size).\nNo transformation performed.\n"); return; } if (FilterNumber < 1 || FilterNumber > Nmax) { Rprintf("Filter no %d not implemented.\nNo transformation performed.\n", FilterNumber); return; } N = FilterNumber; if (Direction == NORMAL) for (scale = maxscale; scale > MinScale; scale--) { NPrev = N; while (((int)pow(2.0, scale)) < 8*N && N != 1) N--; FilterHistory[maxscale - scale] = N; temp = GetFilt(N); if (Precond){ /* MAN: added brace for unambiguity */ if (scale == maxscale) Precondition(scale, NORMAL, temp, Vect); else if (N != NPrev) { temp1 = GetFilt(NPrev); Precondition(scale, INVERSE, temp1, Vect); Precondition(scale, NORMAL, temp, Vect); } } /* MAN: added brace for unambiguity */ TransStep(scale, temp, Vect); } else { while (((int)pow(2.0, MinScale+1)) < 8*N && N != 1) N--; for (scale = MinScale; scale < maxscale; scale++) { N = FilterHistory[maxscale - scale - 1]; if (scale < maxscale - 1) NNext = FilterHistory[maxscale - scale - 2]; else NNext = N; temp = GetFilt(N); InvTransStep(scale, temp, Vect); if (Precond){ /* MAN: added for unambiguity */ if (scale + 1 == maxscale) Precondition(maxscale, INVERSE, temp, Vect); else if (N != NNext) { temp1 = GetFilt(NNext); Precondition(scale+1, INVERSE, temp, Vect); Precondition(scale+1, NORMAL, temp1, Vect); } } /* MAN: added for unambiguity */ } } } /* The following is in Fryzlewicz's WavIntC.c */ void dec(double* data, int* size, int* filternumber, int* minscale, int* precond, /* MAN: added missing void fn type */ int* filterhistory) { Trans(*minscale, NORMAL, *filternumber, data, *size, *precond, filterhistory); } void rec(double* data, int* size, int* filterhistory, int* currentscale, int* precond) { /* MAN: added missing void fn type */ Trans(*currentscale, INVERSE, filterhistory[0], data, *size, *precond, filterhistory); } /* The following in Fryzlewicz's ``Filters.c'' */ Filter GetFilt(int N) { Filter temp; int i, j, len, offset, offset1; double NormH, NormHR, NormHL, NormGL, NormGR; temp.Length = 0; for (i = 0; i < 2 * Nmax; i++) temp.H[i] = temp.G[i] = 0; for (i = 0; i < Nmax; i++) for (j = 0; j < 3 * Nmax -1; j++) temp.HLeft[i][j] = temp.GLeft[i][j] = temp.HRight[i][j] = temp.GRight[i][j] = 0; for (i = 0; i < Nmax; i++) for (j = 0; j < Nmax; j++) temp.PreLeft[i][j] = temp.PreInvLeft[i][j] = temp.PreRight[i][j] = temp.PreInvRight[i][j] = 0; if (N < 1 || N > Nmax) { Rprintf("Filter no %d not implemented.", N); return temp; } temp.Length = 2 * N; /* Interior */ offset = 0; len = 2 * N; for (i = 1; i < N; i++) offset += 2 * i; for (i = 0; i < len; i++) temp.H[i] = Interior[i + offset]; NormH = Sum(temp.H, len); for (i = 0; i < len; i++) temp.H[i] = temp.H[i] / NormH * sqrt(2.0); for (i = 0; i < len; i++) temp.G[i] = (-2 * (i % 2) + 1) * temp.H[len - i - 1]; /* Left and Right */ offset = offset1 = 0; for (i = 1; i < N; i++) offset += 4 * i * i; for (i = 0; i < N; i++) { len = N + 2 * i + 1; NormHL = 0.0; NormGL = 0.0; NormHR = 0.0; NormGR = 0.0; for (j = 0; j < len; j++) { temp.HLeft[i][j] = Left[offset + offset1 + 2 * j]; NormHL += pow(temp.HLeft[i][j], 2.0); temp.GLeft[i][j] = Left[offset + offset1 + 2 * j + 1]; NormGL += pow(temp.GLeft[i][j], 2.0); temp.HRight[i][j] = Right[offset + offset1 + 2 * j]; NormHR += pow(temp.HRight[i][j], 2.0); temp.GRight[i][j] = Right[offset + offset1 + 2 * j + 1]; NormGR += pow(temp.GRight[i][j], 2.0); } for (j = 0; j < len; j++) { temp.HLeft[i][j] /= sqrt(NormHL); temp.GLeft[i][j] /= sqrt(NormGL); temp.HRight[i][j] /= sqrt(NormHR); temp.GRight[i][j] /= sqrt(NormGR); } offset1 += 2 * len; } /* Preconditioning Matrices: Left and Right */ if (N > 1) { offset = 0; for (i = 2; i < N; i++) offset += 2 * i * i; for (i = 0; i < N; i++) for (j = 0; j < N; j++) { offset1 = 2 * N * i + 2 * j; temp.PreLeft[i][j] = LeftPre[offset + offset1]; temp.PreInvLeft[i][j] = LeftPre[offset + offset1 + 1]; temp.PreRight[i][j] = RightPre[offset + offset1]; temp.PreInvRight[i][j] = RightPre[offset + offset1 + 1]; } } return temp; } double Sum(double* vect, int length) { double ssum; int i; ssum = 0.0; for (i = 0; i < length; i++) ssum += vect[i]; return ssum; } /* * ThreeD wavelets suite: three-dimensional DWT and inverse * * A generic 3D array has nr rows, nc cols and ns pixels in the sides. * We use the letters r,c and s to index each type. */ /* Macro to access 3D array */ #define ACCESSW3D(array, nr, nc, r, c, s) *(array + (nr)*((c) + (s)*(nc))+(r)) /* * CreateArray3D: Create a 3D array of doubles * * Arguments. * * nr: number of rows (integer) * nc: number of columns (integer) * ns: number of sides (integer) * error: error code. 0 is o.k., 3001 is memory error. * * Returns: NULL on error or pointer to requested array. * */ double *CreateArray3D(int nr, int nc, int ns, int *error) { double *array; *error = 0; if ((array = (double *)malloc((unsigned)(nr*nc*ns)*sizeof(double)))==NULL){ *error = 3001; return(NULL); } else return(array); } /* * DestroyArray3D: Release memory associated with an array * * Arguments. * * array: pointer to 3D array * error: error code. 0=O.k. 3002 means NULL pointer was passed. * * Returns: NULL on error or pointer to requested array. * */ void DestroyArray3D(double *array, int *error) { *error = 0; if (array == NULL) { *error = 3002; return; } else free((void *)array); } /* * wd3Dstep - the guts of the 3D DWT algorithm. This algorithm * could be made more efficient by less memory allocation * but I ain't got time to do it */ void wd3Dstep(double *Carray, int *truesize, int *size, double *H, int *LengthH, int *error) /*--------------------- * Argument description double *Carray:: Input 3D array. All dimensions are size int *truesize:: The true dimensions of the Carray int *size:: Number of rows, columns and sides (power of 2) For this invocation of the routine only double *H:: Wavelet filter coefficients int *LengthH:: Number of wavelet filter coefficients int *error:: Error code. 0=O.k. Memory errors 3003 to 3017 *---------------------*/ { register int r,c,s; /* Counters for rows, cols and sides */ double *Ha,*Ga; /* Will be storage for first application of filters */ double *HH, *GH, *HG, *GG; /* Storage for second application */ double *HHH,*GHH,*HGH,*GGH,*HHG,*GHG,*HGG,*GGG; /* Third application */ int ndata; /* Length of TheData */ int halfsize; double *c_in, *c_out, *d_out; /* Creates a 3D array */ double *CreateArray3D(int nr, int nc, int ns, int *error); void convolveC(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *c_out, int firstCout, int lastCout, int type, int step_factor, int bc); void convolveD(double *c_in, int LengthCin, int firstCin, double *H, int LengthH, double *d_out, int firstDout, int lastDout, int type, int step_factor, int bc); *error = 0; halfsize = *size/2; /* * Get memory for first application */ if ((Ha = CreateArray3D((int)halfsize, (int)*size, (int)*size, error))==NULL){ return; } if ((Ga = CreateArray3D((int)halfsize, (int)*size, (int)*size, error))==NULL){ return; } /* Get some storage for c_in, c_out, d_out */ ndata = *size; if ((c_in = (double *)malloc((unsigned)ndata*sizeof(double)))==NULL){ *error = 3003; return; } if ((c_out = (double *)malloc((unsigned)(int)halfsize*sizeof(double)))==NULL){ *error = 3004; return; } if ((d_out = (double *)malloc((unsigned)(int)halfsize*sizeof(double)))==NULL){ *error = 3005; return; } /* * Now perform wavelet transform across rows for each column and side */ for(c=0; c< *size; ++c) for(s=0; s < *size; ++s) { /* Load up c_in array */ for(r=0; r < *size; ++r) { *(c_in+r) = ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c, s); /* Rprintf("Carray[%d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c, s)); */ } /* Now do convolveC and convolveD on c_in */ convolveC(c_in, (int)*size, 0, H, (int)*LengthH, c_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); convolveD(c_in, (int)*size, 0, H, (int)*LengthH, d_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); /* Now store C in Ha and D in Ga */ for(r=0; r < (int)halfsize; ++r) { ACCESSW3D(Ha, (int)halfsize, (int)*size, r, c, s) = *(c_out+r); /* Rprintf("Ha[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(Ha, (int)halfsize, (int)*size, r,c,s)); */ } for(r=0; r < (int)halfsize; ++r) { ACCESSW3D(Ga, (int)halfsize, (int)*size, r, c, s) = *(d_out+r); /* Rprintf("Ga[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(Ga, (int)halfsize, (int)*size, r,c,s)); */ } /* Go round and do it again */ } /* Now create memory for second application */ if ((HH = CreateArray3D((int)halfsize, (int)halfsize, (int)*size, error))==NULL){ *error = 3006; return; } if ((GH = CreateArray3D((int)halfsize, (int)halfsize, (int)*size, error))==NULL){ *error = 3007; return; } if ((HG = CreateArray3D((int)halfsize, (int)halfsize, (int)*size, error))==NULL){ *error = 3008; return; } if ((GG = CreateArray3D((int)halfsize, (int)halfsize, (int)*size, error))==NULL){ *error = 3009; return; } /* Ha to HH and GH */ /* * Now perform convolution steps over cols on H for each row and side. */ for(r=0; r < halfsize; ++r) for(s=0; s < *size; ++s) { /* Load up c_in array */ for(c=0; c < *size; ++c) *(c_in+c) = ACCESSW3D(Ha, (int)halfsize, (int)*size, r, c, s); /* Now do convolveC and convolveD on c_in */ convolveC(c_in, (int)*size, 0, H, (int)*LengthH, c_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); convolveD(c_in, (int)*size, 0, H, (int)*LengthH, d_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); /* Now store C in HH and D in GH */ for(c=0; c < (int)halfsize; ++c) { ACCESSW3D(HH, (int)halfsize, (int)halfsize, r, c, s) = *(c_out+c); /* Rprintf("HH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(HH, (int)halfsize, (int)halfsize, r,c,s)); */ } for(c=0; c < (int)halfsize; ++c) { ACCESSW3D(GH, (int)halfsize, (int)halfsize, r, c, s) = *(d_out+c); /* Rprintf("GH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(GH, (int)halfsize, (int)halfsize, r,c,s)); */ } /* Go round and do it again */ } /* Ga to HG and GG */ /* * Now perform convolution over cols on G for each row and side. */ for(r=0; r < halfsize; ++r) for(s=0; s < *size; ++s) { /* Load up c_in array */ for(c=0; c < *size; ++c) *(c_in+c) = ACCESSW3D(Ga, (int)halfsize, (int)*size, r, c, s); /* Now do convolveC and convolveD on c_in */ convolveC(c_in, (int)*size, 0, H, (int)*LengthH, c_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); convolveD(c_in, (int)*size, 0, H, (int)*LengthH, d_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); /* Now store C in HG and D in GG */ for(c=0; c < (int)halfsize; ++c) { ACCESSW3D(HG, (int)halfsize, (int)halfsize, r, c, s) = *(c_out+c); /* Rprintf("HG[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(HG, (int)halfsize, (int)halfsize, r,c,s)); */ } for(c=0; c < (int)halfsize; ++c) { ACCESSW3D(GG, (int)halfsize, (int)halfsize, r, c, s) = *(d_out+c); /* Rprintf("GG[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(HG, (int)halfsize, (int)halfsize, r,c,s)); */ } /* Go round and do it again */ } /* Now we've used Ha and Ga and so we can free them */ free((void *)Ha); free((void *)Ga); /* THIRD LEVEL APPLICATION */ /* Now create memory for third application */ if ((HHH = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3010; return; } if ((GHH = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3011; return; } if ((HGH = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3012; return; } if ((GGH = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3013; return; } if ((HHG = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3014; return; } if ((GHG = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3015; return; } if ((HGG = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3016; return; } if ((GGG = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3017; return; } /* HH to HHH and GHH */ /* * Now perform wavelet transform over sides on HH for each row and col. */ for(r=0; r < halfsize; ++r) for(c=0; c < halfsize; ++c) { /* Load up c_in array */ for(s=0; s < *size; ++s) *(c_in+s) = ACCESSW3D(HH, (int)halfsize, (int)halfsize, r, c, s); /* Now do convolveC and convolveD on c_in */ convolveC(c_in, (int)*size, 0, H, (int)*LengthH, c_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); convolveD(c_in, (int)*size, 0, H, (int)*LengthH, d_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); /* Now store C in HHH and D in GHH */ for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(HHH, (int)halfsize, (int)halfsize, r, c, s) = *(c_out+s); /* Rprintf("HHH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(HHH, (int)halfsize, (int)halfsize, r,c,s)); */ } for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(GHH, (int)halfsize, (int)halfsize, r, c, s) = *(d_out+s); } /* Go round and do it again */ } /* GH to HGH and GGH */ /* * Now perform wavelet transform over sides on GH for each row and col. */ for(r=0; r < halfsize; ++r) for(c=0; c < halfsize; ++c) { /* Load up c_in array */ for(s=0; s < *size; ++s) *(c_in+s) = ACCESSW3D(GH, (int)halfsize, (int)halfsize, r, c, s); /* Now do convolveC and convolveD on c_in */ convolveC(c_in, (int)*size, 0, H, (int)*LengthH, c_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); convolveD(c_in, (int)*size, 0, H, (int)*LengthH, d_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); /* Now store C in HGH and D in GGH */ for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(HGH, (int)halfsize, (int)halfsize, r, c, s) = *(c_out+s); /* Rprintf("HGH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(HGH, (int)halfsize, (int)halfsize, r,c,s)); */ } for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(GGH, (int)halfsize, (int)halfsize, r, c, s) = *(d_out+s); /* Rprintf("GGH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(GGH, (int)halfsize, (int)halfsize, r,c,s)); */ } /* Go round and do it again */ } /* HG to HHG and GHG */ /* * Now perform wavelet transform over sides on HG for each row and col. */ for(r=0; r < halfsize; ++r) for(c=0; c < halfsize; ++c) { /* Load up c_in array */ for(s=0; s < *size; ++s) *(c_in+s) = ACCESSW3D(HG, (int)halfsize, (int)halfsize, r, c, s); /* Now do convolveC and convolveD on c_in */ convolveC(c_in, (int)*size, 0, H, (int)*LengthH, c_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); convolveD(c_in, (int)*size, 0, H, (int)*LengthH, d_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); /* Now store C in HHG and D in GHG */ for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(HHG, (int)halfsize, (int)halfsize, r, c, s) = *(c_out+s); } for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(GHG, (int)halfsize, (int)halfsize, r, c, s) = *(d_out+s); } /* Go round and do it again */ } /* GG to HGG and GGG */ /* * Now perform wavelet transform over sides on GG for each row and col. */ for(r=0; r < halfsize; ++r) for(c=0; c < halfsize; ++c) { /* Load up c_in array */ for(s=0; s < *size; ++s) *(c_in+s) = ACCESSW3D(GG, (int)halfsize, (int)halfsize, r, c, s); /* Now do convolveC and convolveD on c_in */ convolveC(c_in, (int)*size, 0, H, (int)*LengthH, c_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); convolveD(c_in, (int)*size, 0, H, (int)*LengthH, d_out, 0, (int)halfsize-1, WAVELET, 1, PERIODIC); /* Now store C in HGG and D in GGG */ for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(HGG, (int)halfsize, (int)halfsize, r, c, s) = *(c_out+s); } for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(GGG, (int)halfsize, (int)halfsize, r, c, s) = *(d_out+s); } /* Go round and do it again */ } /* Now we can get rid of the second level memory */ free((void *)HH); free((void *)GH); free((void *)HG); free((void *)GG); /* Now store the answers in the C array */ /* HHH */ for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) for(s=0; s < (int)halfsize; ++s) { /* HHH */ /* { double tmpf; tmpf= ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c, s) = ACCESSW3D(HHH, (int)halfsize, (int)halfsize, r, c, s); */ ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c, s) = ACCESSW3D(HHH, (int)halfsize, (int)halfsize, r, c, s); /* Rprintf("Carray[%d %d %d] = %lf (from HHH)\n", r,c,s,tmpf); */ /* } */ /* GHH */ ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c, s) = ACCESSW3D(GHH, (int)halfsize, (int)halfsize, r, c, s); /* HGH */ ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c+(int)halfsize, s) = ACCESSW3D(HGH, (int)halfsize, (int)halfsize, r, c, s); /* Rprintf("HGH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(HGH, (int)halfsize, (int)halfsize, r,c,s)); */ /* GGH */ ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c+(int)halfsize, s) = ACCESSW3D(GGH, (int)halfsize, (int)halfsize, r, c, s); /* Rprintf("GGH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(GGH, (int)halfsize, (int)halfsize, r,c,s)); */ /* HHG */ ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c, s+(int)halfsize) = ACCESSW3D(HHG, (int)halfsize, (int)halfsize, r, c, s); /* GHG */ ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c, s+(int)halfsize) = ACCESSW3D(GHG, (int)halfsize, (int)halfsize, r, c, s); /* HGG */ ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c+(int)halfsize, s+(int)halfsize) = ACCESSW3D(HGG, (int)halfsize, (int)halfsize, r, c, s); /* GGG */ ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c+(int)halfsize, s+(int)halfsize) = ACCESSW3D(GGG, (int)halfsize, (int)halfsize, r, c, s); } /* Free the third level memory */ free((void *)HHH); free((void *)GHH); free((void *)HGH); free((void *)GGH); free((void *)HHG); free((void *)GHG); free((void *)HGG); free((void *)GGG); /* Free c_in, c_out, d_out */ free((void *)c_in); free((void *)c_out); free((void *)d_out); } void wd3D(double *Carray, int *size, double *H, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *Carray:: Input and output coefficients int *size:: Dimension of this array double *H:: The wavelet coefficients int *LengthH:: Number of wavelet coefficients int *error:: Error code 0=o.k. *---------------------*/ { int insize; void wd3Dstep(double *Carray, int *truesize, int *size, double *H, int *LengthH, int *error); *error = 0; insize = *size; while(insize >= 2) { /* Rprintf("Outsize is %ld\n", insize*2); */ wd3Dstep(Carray, size, &insize, H, LengthH, error); if (*error != 0) return; insize /= 2; } } /* * Reconstruct 3D wavelet object in Carray */ void wr3D(double *Carray, int *truesize, double *H, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *Carray:: Contains array of wavelet coefficients int *truesize:: Dimension of 3D array Carray double *H:: The wavelet filter coefficients int *LengthH:: Number of wavelet filter coefficients int *error:: Error code (0=o.k.) Memory errors from wr3Dstep 3035l the dimension of Carray is 1, therefore cannot do any further reconstruction *---------------------*/ { int sizeout; void wr3Dstep(double *Carray, int *truesize, int *sizeout, double *H, int *LengthH, int *error); *error = 0; sizeout = 2; if (*truesize < sizeout) { *error = 3035; return; } while(sizeout <= *truesize) { /* Rprintf("Outsize is %ld\n", sizeout); */ wr3Dstep(Carray, truesize, &sizeout, H, LengthH, error); if (*error != 0) return; sizeout *= 2; } } /* * wr3Dstep: Perform 3D wavelet reconstruction step */ void wr3Dstep(double *Carray, int *truesize, int *sizeout, double *H, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *Carray:: Array of wavelet coefficients and previous Cs to replace int *truesize:: True size of Carray int *sizeout:: Size of answer array double *H:: The wavelet coefficients int *LengthH:: Number of wavelet coefficients int *error:: Error code. 0=o.k. Memory errors 3018 to 3034 *---------------------*/ { register int r,c,s; double *Ha,*Ga; /* Will be storage for third application of filters */ double *HH, *GH, *HG, *GG; /* Storage for second application */ double *HHH,*GHH,*HGH,*GGH,*HHG,*GHG,*HGG,*GGG; /* Third application */ double *c_in, *d_in, *c_out; int halfsize; int type,bc; void conbar(double *c_in, int LengthCin, int firstCin, double *d_in, int LengthDin, int firstDin, double *H, int LengthH, double *c_out, int LengthCout, int firstCout, int lastCout, int type, int bc); *error = 0; type = WAVELET; bc = PERIODIC; /* * Take the coefficients from the C array and store them in cubes * half the sizeout * * This is just the inverse of the last part of wd3Dstep */ halfsize = *sizeout/2; /* Now create memory for first application */ if ((HHH = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3018; return; } if ((GHH = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3019; return; } if ((HGH = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3020; return; } if ((GGH = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3021; return; } if ((HHG = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3022; return; } if ((GHG = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3023; return; } if ((HGG = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3024; return; } if ((GGG = CreateArray3D((int)halfsize, (int)halfsize, (int)halfsize, error))==NULL){ *error = 3025; return; } /* HHH */ for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) for(s=0; s < (int)halfsize; ++s) { /* HHH */ ACCESSW3D(HHH, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c, s); /* GHH */ ACCESSW3D(GHH, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c, s); /* HGH */ ACCESSW3D(HGH, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c+(int)halfsize, s); /* Rprintf("HGH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(HGH, (int)halfsize, (int)halfsize, r,c,s)); */ /* GGH */ ACCESSW3D(GGH, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c+(int)halfsize, s); /* Rprintf("GGH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(GGH, (int)halfsize, (int)halfsize, r,c,s)); */ /* HHG */ ACCESSW3D(HHG, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c, s+(int)halfsize); /* GHG */ ACCESSW3D(GHG, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c, s+(int)halfsize); /* HGG */ ACCESSW3D(HGG, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c+(int)halfsize, s+(int)halfsize); /* GGG */ ACCESSW3D(GGG, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c+(int)halfsize, s+(int)halfsize); } /* Now create memory for HH, GH, HG and GG */ if ((HH = CreateArray3D((int)halfsize, (int)halfsize, (int)*sizeout, error)) ==NULL){ *error = 3026; return; } if ((GH = CreateArray3D((int)halfsize, (int)halfsize, (int)*sizeout, error)) ==NULL){ *error = 3027; return; } if ((HG = CreateArray3D((int)halfsize, (int)halfsize, (int)*sizeout, error)) ==NULL){ *error = 3028; return; } if ((GG = CreateArray3D((int)halfsize, (int)halfsize, (int)*sizeout, error)) ==NULL){ *error = 3029; return; } /* We now have to reconstruct HH, GH, HG and GG */ /* Create c_in, d_in and c_out */ if ((c_in = (double *)malloc((unsigned)(int)halfsize*sizeof(double)))==NULL){ *error = 3030; return; } if ((d_in = (double *)malloc((unsigned)(int)halfsize*sizeof(double)))==NULL){ *error = 3031; return; } if ((c_out = (double *)malloc((unsigned)(int)*sizeout*sizeof(double)))==NULL){ *error = 3032; return; } /* Fill up HH by wavelet reconstruction of HHH and GHH */ for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) { /* Fill up c_in and d_in */ for(s=0; s < (int)halfsize; ++s) { *(c_in+s) = ACCESSW3D(HHH, (int)halfsize, (int)halfsize, r, c, s); *(d_in+s) = ACCESSW3D(GHH, (int)halfsize, (int)halfsize, r, c, s); } /* Do the wavelet reconstruction step */ conbar(c_in, (int)halfsize, 0, d_in, (int)halfsize, 0, H, (int)*LengthH, c_out, (int)*sizeout, 0, (int)*sizeout - 1, type, bc); /* Now fill up HH */ for(s=0; s < (int)*sizeout; ++s) { ACCESSW3D(HH, (int)halfsize, (int)halfsize, r, c, s) = *(c_out+s); /* Rprintf("HH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(HH, (int)halfsize, (int)halfsize, r,c,s)); */ } /* Next row and column combination */ } /* Fill up GH by wavelet reconstruction of HGH and GGH */ for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) { /* Fill up c_in and d_in */ for(s=0; s < (int)halfsize; ++s) { *(c_in+s) = ACCESSW3D(HGH, (int)halfsize, (int)halfsize, r, c, s); *(d_in+s) = ACCESSW3D(GGH, (int)halfsize, (int)halfsize, r, c, s); } /* Do the wavelet reconstruction step */ conbar(c_in, (int)halfsize, 0, d_in, (int)halfsize, 0, H, (int)*LengthH, c_out, (int)*sizeout, 0, (int)*sizeout - 1, type, bc); /* Now fill up GH */ for(s=0; s < (int)*sizeout; ++s) { ACCESSW3D(GH, (int)halfsize, (int)halfsize, r, c, s) = *(c_out+s); /* Rprintf("GH[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(GH, (int)halfsize, (int)halfsize, r,c,s)); */ } /* Next row and column combination */ } /* Fill up HG by wavelet reconstruction of HHG and GHG */ for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) { /* Fill up c_in and d_in */ for(s=0; s < (int)halfsize; ++s) { *(c_in+s) = ACCESSW3D(HHG, (int)halfsize, (int)halfsize, r, c, s); *(d_in+s) = ACCESSW3D(GHG, (int)halfsize, (int)halfsize, r, c, s); } /* Do the wavelet reconstruction step */ conbar(c_in, (int)halfsize, 0, d_in, (int)halfsize, 0, H, (int)*LengthH, c_out, (int)*sizeout, 0, (int)*sizeout - 1, type, bc); /* Now fill up HG */ for(s=0; s < (int)*sizeout; ++s) ACCESSW3D(HG, (int)halfsize, (int)halfsize, r, c, s) = *(c_out+s); /* Next row and column combination */ } /* Fill up GG by wavelet reconstruction of HGG and GGG */ for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) { /* Fill up c_in and d_in */ for(s=0; s < (int)halfsize; ++s) { *(c_in+s) = ACCESSW3D(HGG, (int)halfsize, (int)halfsize, r, c, s); *(d_in+s) = ACCESSW3D(GGG, (int)halfsize, (int)halfsize, r, c, s); } /* Do the wavelet reconstruction step */ conbar(c_in, (int)halfsize, 0, d_in, (int)halfsize, 0, H, (int)*LengthH, c_out, (int)*sizeout, 0, (int)*sizeout - 1, type, bc); /* Now fill up GG */ for(s=0; s < (int)*sizeout; ++s) ACCESSW3D(GG, (int)halfsize, (int)halfsize, r, c, s) = *(c_out+s); /* Next row and column combination */ } /* Now we can rid ourselves of HHH, GHH, HGH, GGH, HHG, GHG, HGG, and GGG */ free((void *)HHH); free((void *)GHH); free((void *)HGH); free((void *)GGH); free((void *)HHG); free((void *)GHG); free((void *)HGG); free((void *)GGG); /* Now create memory for Ha and Ga */ if ((Ha = CreateArray3D((int)halfsize, (int)*sizeout, (int)*sizeout, error)) ==NULL){ *error = 3033; return; } if ((Ga = CreateArray3D((int)halfsize, (int)*sizeout, (int)*sizeout, error)) ==NULL){ *error = 3034; return; } /* Fill up Ha by wavelet reconstruction of HH and GH */ for(r=0; r < (int)halfsize; ++r) for(s=0; s < (int)*sizeout; ++s) { /* Fill up c_in and d_in */ for(c=0; c < (int)halfsize; ++c) { *(c_in+c) = ACCESSW3D(HH, (int)halfsize, (int)halfsize, r, c, s); *(d_in+c) = ACCESSW3D(GH, (int)halfsize, (int)halfsize, r, c, s); } /* Do the wavelet reconstruction step */ conbar(c_in, (int)halfsize, 0, d_in, (int)halfsize, 0, H, (int)*LengthH, c_out, (int)*sizeout, 0, (int)*sizeout - 1, type, bc); /* Now fill up Ha */ for(c=0; c < (int)*sizeout; ++c) { ACCESSW3D(Ha, (int)halfsize, (int)*sizeout, r, c, s) = *(c_out+c); /* Rprintf("Ha[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(Ha, (int)halfsize, (int)*sizeout, r,c,s)); */ } /* Next row and side combination */ } /* Fill up Ga by wavelet reconstruction of HG and GG */ for(r=0; r < (int)halfsize; ++r) for(s=0; s < (int)*sizeout; ++s) { /* Fill up c_in and d_in */ for(c=0; c < (int)halfsize; ++c) { *(c_in+c) = ACCESSW3D(HG, (int)halfsize, (int)halfsize, r, c, s); *(d_in+c) = ACCESSW3D(GG, (int)halfsize, (int)halfsize, r, c, s); } /* Do the wavelet reconstruction step */ conbar(c_in, (int)halfsize, 0, d_in, (int)halfsize, 0, H, (int)*LengthH, c_out, (int)*sizeout, 0, (int)*sizeout - 1, type, bc); /* Now fill up Ga */ for(c=0; c < (int)*sizeout; ++c) { ACCESSW3D(Ga, (int)halfsize, (int)*sizeout, r, c, s) = *(c_out+c); /* Rprintf("Ga[ %d, %d, %d] = %lf\n", r,c,s, ACCESSW3D(Ga, (int)halfsize, (int)*sizeout, r,c,s)); */ } /* Next row and side combination */ } /* Now rid outselves of the 2nd level memory */ free((void *)HH); free((void *)GH); free((void *)HG); free((void *)GG); /* Now store the result of combining Ha,Ga back in the Carray */ for(c=0; c < (int)*sizeout; ++c) for(s=0; s < (int)*sizeout; ++s) { /* Fill up c_in and d_in */ for(r=0; r < (int)halfsize; ++r) { *(c_in+r) = ACCESSW3D(Ha, (int)halfsize, (int)*sizeout, r, c, s); *(d_in+r) = ACCESSW3D(Ga, (int)halfsize, (int)*sizeout, r, c, s); } /* Do the wavelet reconstruction step */ conbar(c_in, (int)halfsize, 0, d_in, (int)halfsize, 0, H, (int)*LengthH, c_out, (int)*sizeout, 0, (int)*sizeout - 1, type, bc); /* Now fill up Carray */ for(r=0; r < (int)*sizeout; ++r) ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c, s) = *(c_out+r); /* Next column and side combination */ } /* Free the first level memory */ free((void *)Ha); free((void *)Ga); /* Free c_in, c_out, d_out */ free((void *)c_in); free((void *)c_out); free((void *)d_in); } void getARRel(double *Carray, int *size, int *level, double *GHH, double *HGH, double *GGH, double *HHG, double *GHG, double *HGG, double *GGG) { register int r,c,s; int halfsize; halfsize = 1 << *level; /* Rprintf("Halfsize is %ld\n", halfsize); */ for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) for(s=0; s < (int)halfsize; ++s) { /* GHH */ ACCESSW3D(GHH, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*size, (int)*size, r+(int)halfsize, c, s); /* HGH */ ACCESSW3D(HGH, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*size, (int)*size, r, c+(int)halfsize, s); /* GGH */ ACCESSW3D(GGH, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*size, (int)*size, r+(int)halfsize, c+(int)halfsize, s); /* HHG */ ACCESSW3D(HHG, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*size, (int)*size, r, c, s+(int)halfsize); /* GHG */ ACCESSW3D(GHG, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*size, (int)*size, r+(int)halfsize, c, s+(int)halfsize); /* HGG */ ACCESSW3D(HGG, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*size, (int)*size, r, c+(int)halfsize, s+(int)halfsize); /* GGG */ ACCESSW3D(GGG, (int)halfsize, (int)halfsize, r, c, s) = ACCESSW3D(Carray, (int)*size, (int)*size, r+(int)halfsize, c+(int)halfsize, s+(int)halfsize); } } #define IX_HHH 0 #define IX_GHH 1 #define IX_HGH 2 #define IX_GGH 3 #define IX_HHG 4 #define IX_GHG 5 #define IX_HGG 6 #define IX_GGG 7 void putarr(double *Carray, int *truesize, int *level, int *Iarrayix, double *Iarray) { register int r,c,s; int halfsize; halfsize = 1 << *level; switch(*Iarrayix) { case IX_HHH: Rprintf("Inserting HHH\n"); ACCESSW3D(Carray, (int)*truesize, (int)*truesize, 0, 0, 0) = ACCESSW3D(Iarray, (int)halfsize, (int)halfsize, 0, 0, 0); break; case IX_GHH: Rprintf("Inserting GHH\n"); for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c, s)= ACCESSW3D(Iarray, (int)halfsize, (int)halfsize, r, c, s); } break; case IX_HGH: Rprintf("Inserting HGH\n"); for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c+(int)halfsize, s)= ACCESSW3D(Iarray, (int)halfsize, (int)halfsize, r, c, s); } break; case IX_GGH: Rprintf("Inserting GGH\n"); for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c+(int)halfsize, s)= ACCESSW3D(Iarray, (int)halfsize, (int)halfsize, r, c, s); } break; case IX_HHG: Rprintf("Inserting HHG\n"); for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c, s+(int)halfsize)= ACCESSW3D(Iarray, (int)halfsize, (int)halfsize, r, c, s); } break; case IX_GHG: Rprintf("Inserting GHG\n"); for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c, s+(int)halfsize)= ACCESSW3D(Iarray, (int)halfsize, (int)halfsize, r, c, s); } break; case IX_HGG: Rprintf("Inserting HGG\n"); for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r, c+(int)halfsize, s+(int)halfsize)= ACCESSW3D(Iarray, (int)halfsize, (int)halfsize, r, c, s); } break; case IX_GGG: Rprintf("Inserting GGG\n"); for(r=0; r < (int)halfsize; ++r) for(c=0; c < (int)halfsize; ++c) for(s=0; s < (int)halfsize; ++s) { ACCESSW3D(Carray, (int)*truesize, (int)*truesize, r+(int)halfsize, c+(int)halfsize, s+(int)halfsize)= ACCESSW3D(Iarray, (int)halfsize, (int)halfsize, r, c, s); } break; default: Rprintf("Unknown insertion type\n"); break; } } /* * WaveThresh3 - Beginning of TRD's multiwavelet code. */ /*Multiple wavelet decomposition */ /*TRD November 1994 */ /*last updated May 1995 */ void multiwd(double *C, int *lengthc, double *D, int *lengthd, int *nlevels, int *nphi, int *npsi, int *ndecim, double *H, double *G, int *NH, int *lowerc, int *upperc, int *offsetc, int *lowerd, int *upperd, int *offsetd, int *nbc) /*--------------------- * Argument description *--------------------- double *C:: C coefficients matrix int *lengthc:: number of coefficients in C double *D:: D coefficients matrix int *lengthd:: number of coefficients in D int *nlevels:: number of levels in decomposition int *nphi:: number of scaling functions int *npsi:: number of wavelet functions int *ndecim:: amount of decimation at each level double *H:: Band pass filter double *G:: High pass filter int *NH:: number of coeff matrices in the filter int *lowerc:: for each level the lowest C coefficient int *upperc:: for each level the highest C coefficient int *offsetc:: amount to offset to access each level int *upperd:: for each level the lowest C coefficient int *lowerd:: for each level the highest C coefficient int *offsetd:: amount to offset to access each level int *nbc:: boundary conds 1=period 2=symm. *---------------------*/ { int level,prevlvl,prevoffsetc,index,base,k,l,m,n; void TRDerror(char *s); int trd_reflect(int a, int b); /* ... see L10209,102010 */ int trd_module(int a, int b); for(level=*nlevels-1;level >=0;level--) { /*some frequently used values computed here */ prevlvl=level+1; /* previous level */ prevoffsetc=*(offsetc+prevlvl); /*offset of C for previous level*/ /* prevoffsetd=*(offsetd+prevlvl); NOT USED offset of D for previous level*/ for(k=*(lowerc+level);k<=*(upperc+level);k++){ /*k index of new vector */ for(l=0; l<*nphi;l++){ /*l index of new elmt */ C[(*(offsetc+level)+k-*(lowerc+level))*(*nphi)+l]=0.0; for(m=*ndecim*k;m<*ndecim*k+*NH;m++){ /*index of already known vectors */ /*using periodic boundary conditions index = m mod #coeffs */ index = m-*(lowerc+prevlvl); base=1+*(upperc+prevlvl)-*(lowerc+prevlvl); if(index >= base || index < 0){ if(*nbc==1) index = trd_module(index,base); else if(*nbc==2) index = trd_reflect(index,base); else TRDerror("bad boundary conditions\n"); } for(n=0;n<*nphi;n++){ /* index of already known elemnt */ C[(*(offsetc+level)+k-*(lowerc+level))*(*nphi)+l]+= H[((m-*ndecim*k)*(*nphi)+l)*(*nphi)+n]*C[*nphi*(prevoffsetc+index)+n]; } } } } for(k=*(lowerd+level);k<=*(upperd+level);k++){ /* repeat for D */ for(l=0;l<*npsi;l++){ D[(*(offsetd+level)+k-*(lowerd+level))**npsi+l]=0.0; for(m=*ndecim*k;m<*ndecim*k+*NH;m++){ index = m-*(lowerc+prevlvl); base = 1+*(upperc+prevlvl)-*(lowerc+prevlvl); if(index >= base || index < 0){ if(*nbc==1) index = trd_module(index,base); else if(*nbc==2) index = trd_reflect(index,base); else TRDerror("bad boundary conditions\n"); } for(n=0;n<*nphi;n++){ D[(*(offsetd+level)+k-*(lowerd+level))* *npsi+l]+= G[((m-*ndecim*k)* *npsi+l)**nphi+n]*C[(prevoffsetc+index)* *nphi+n]; } } } } } } /*Double wavelet reconstruction */ /*By T Downie November 1994 */ /*updated Jan 95 */ void multiwr(double *C, int *lengthc, double *D, int *lengthd, int *nlevels, int *nphi, int *npsi, int *ndecim, double *H, double *G, int *NH, int *lowerc, int *upperc, int *offsetc, int *lowerd, int *upperd, int *offsetd, int *nbc, int *startlevel) /*--------------------- * Argument description *--------------------- double *C:: C coefficients matrix int *lengthc:: number of coefficients in C double *D:: D coefficients matrix int *lengthd:: number of coefficients in D int *nlevels:: number of levels in decomposition int *nphi:: number of scaling functions int *npsi:: number of wavelet functions int *ndecim:: amount of decimation at each level double *H:: Band pass filter double *G:: High pass filter int *NH:: number of coeff matrices in the filter int *lowerc:: for each level the lowest C coefficient int *upperc:: for each level the highest C coefficient int *offsetc:: amount to offset to access each level int *upperd:: for each level the lowest C coefficient int *lowerd:: for each level the highest C coefficient int *offsetd:: amount to offset to access each level int *nbc:: boundary conds 1=period 2=symm. int *startlevel; level at which to start the wavelet reconstruction *---------------------*/ { int level,offslvlc,offslvld,index,base,newck,newcl,oldck,oldcl,olddl,lim; int trd_module(int a, int b); int trd_reflect(int a, int b); for(level=*startlevel; level<*nlevels; level++){ /*level=level of convolution*/ offslvlc=*(offsetc+level); /*ammount to offset C for this level */ offslvld=*(offsetd+level); /*ammount to offste D for this level */ for(newck=*(lowerc+level+1); newck<=*(upperc+level+1);newck++){ /*newck=position of the new c coeff*/ for(newcl=0; newcl< *nphi;newcl++){ /*newcl=element of the new c coeff vector*/ lim= newck+1-*NH; while(lim % *ndecim != 0) lim++; for(oldck=lim/ *ndecim; oldck<= ((float) newck) / *ndecim;oldck++){ /*oldck=position of the c/d coeff in conv. */ for(oldcl=0;oldcl< *nphi;oldcl++){ /*oldcl=element of the c coeff in conv.*/ index=oldck- *(lowerc+level); base= 1+*(upperc+level)-*(lowerc+level); if(index < 0 || index >= base){ if(*nbc == 1) index=trd_module(index,base); else index=trd_reflect(index,base); } C[(*(offsetc+level+1)+newck)* *nphi + newcl] += H[((newck-*ndecim*oldck) * *nphi+oldcl) * *nphi + newcl]* C[(offslvlc+index)* *nphi + oldcl]; } for(olddl=0;olddl< *npsi;olddl++){ /*olddl=element of the d coeff in conv.*/ index=oldck- *(lowerd+level); base= 1+*(upperd+level)-*(lowerd+level); if(index < 0 || index >= base){ if(*nbc == 1) index=trd_module(index,base); else index=trd_reflect(index,base); } C[(*(offsetc+level+1)+newck)* *nphi + newcl] += G[((newck-*ndecim*oldck) * *nphi+olddl) * *npsi + newcl]* D[(offslvld+index)* *npsi + olddl]; } } } } } } int trd_reflect(int a, int b) { int trd_module(int a, int b); if(b <= 0) return (-1); else { if (a < -b || a > 2*b) a=trd_module(a,2*b); if (a < 0) a=-1*a-1; if (a > b) a=2*b-a-1; } return(a); } int trd_module(int a, int b) { /* robust modulus function */ /* returns a (mod b) for b >0 and any integer a */ /* returns -1 if b <= 0 */ if (b <= 0) return(-1); else if(a > 0) while(a >= b) a -= b; else if(a < 0) while(a < 0) a +=b ; return(a); } /* * WaveThresh3 - End of TRD's multiwavelet code */ /* * * IsPowerOfTwo(n) * * Returns log to the base 2 of n * * e.g. if n = 2^J then IsPowerOfTwo(n) is J * * If n is not a power of two or is not positive then -1 is returned. * * Author: GPN * */ int IsPowerOfTwo(int n) { int cnt = 0; if (n<=0) return((-1)); while (!(0x01 & n)) { ++cnt; n >>= 1; } if (n > 1) return((-1)); else return(cnt); } void TRDerror(char *s) { REprintf("Module TRDerror in WaveThresh\n"); REprintf("%s", s); error("This should not happen. Stopping.\n"); } /* Following functions are to do Complex-valued non-decimated * wavelet transform PACKET version (i.e. though wst/AvBasis */ #define POINTDR(l,i) (DataR + (*LengthData*(l)) + (i)) #define POINTDI(l,i) (DataI + (*LengthData*(l)) + (i)) #define POINTCR(l,i) (CaR + (*LengthData*(l)) + (i)) #define POINTCI(l,i) (CaI + (*LengthData*(l)) + (i)) /* * COMWST: Complex-valued packet-ordered non-decimated transform */ void comwst(double *CaR, double *CaI, double *DataR, double *DataI, int *LengthData, int *levels, double *HR, double *HI, double *GR, double *GI, int *LengthH, int *error) /*--------------------- * Argument description *--------------------- double *CaR:: Will contain bottom most Cs (real) double *CaI:: Will contain bottom most Cs (imaginary) double *DataR:: This is a 2D array. Zeroeth level contains data double *DataI:: This is a 2D array. Zeroeth level contains data int *LengthData:: Length of Data, this is power of 2 int *levels:: The number of levels, 2^(*levels)=LengthData double *HR:: Smoothing filter (real) double *HI:: Smoothing filter (imag) double *GR:: Detail filter (real) double *GI:: Detail filter (imag) int *LengthH:: Length of filter int *error:: Error code, if non-zero then it's a mem error *---------------------*/ { int startin, outstart1, outstart2; register int i; double *bookR, *bookI; /* Bookkeeping vectors, one for R and I */ void comwvpkstr(double *CaR, double *CaI, double *DataR, double *DataI, int startin, int lengthin, int outstart1, int outstart2, int level, double *HR, double *HI, double *GR, double *GI, int LengthH, int *LengthData, double *bookR, double *bookI, int *error); *error = 0; /* Rprintf("This routine is wavepackst\n"); Rprintf("Length of data is %ld\n", *LengthData); Rprintf("Number of levels is %ld\n", *levels); Rprintf("Data array is:\n"); for(i= (int)*levels; i>=0; --i) for(j=0; j< *LengthData; ++j) { Rprintf("Level %d, Item %d is %lf\n", i,j, ACCESSD(i,j)); } */ /* Create a bookeeping vector. That contains the C,C' level smooths thoughout the algorithm. One for imag as well */ if ((bookR = (double *)malloc((unsigned)*LengthData*sizeof(double)))==NULL){ *error = 1; return; } if ((bookI = (double *)malloc((unsigned)*LengthData*sizeof(double)))==NULL){ *error = 2; return; } /* Copy original data to book keeping vector */ for(i=0; i< *LengthData; ++i) { *(bookR+i) = *POINTDR(*levels, i); *(bookI+i) = *POINTDI(*levels, i); } startin = 0; outstart1 = 0; outstart2 = ((int)*LengthData)/2; comwvpkstr(CaR, CaI, DataR, DataI, startin, (int)*LengthData, outstart1, outstart2, (int)*levels, HR, HI, GR, GI, (int)*LengthH, LengthData, bookR, bookI, error); if (*error != 0) return; else { free((void *)bookR); free((void *)bookI); } } void comwvpkstr(double *CaR, double *CaI, double *DataR, double *DataI, int startin, int lengthin, int outstart1, int outstart2, int level, double *HR, double *HI, double *GR, double *GI, int LengthH, int *LengthData, double *bookR, double *bookI, int *error) { register int i; int lengthout; double *book1R, *book1I, *book2R, *book2I; void comconC(double *c_inR, double *c_inI, int LengthCin, int firstCin, double *HR, double *HI, int LengthH, double *c_outR, double *c_outI, int LengthCout, int firstCout, int lastCout, int type, int step_factor, int bc); void comconD(double *c_inR, double *c_inI, int LengthCin, int firstCin, double *GR, double *GI, int LengthH, double *d_outR, double *d_outI, int LengthDout, int firstDout, int lastDout, int type, int step_factor, int bc); void comrotater(double *bookR, double *bookI, int length); void comwvpkstr(double *CaR, double *CaI, double *DataR, double *DataI, int startin, int lengthin, int outstart1, int outstart2, int level, double *HR, double *HI, double *GR, double *GI, int LengthH, int *LengthData, double *bookR, double *bookI, int *error); /* Rprintf("wvpkstr entry\n"); Rprintf("lengthout is %d\n", lengthout); */ lengthout = lengthin/2; if ((book1R = (double *)malloc((unsigned)lengthout*sizeof(double)))==NULL) { *error = 3; return; } else if ((book1I = (double *)malloc((unsigned)lengthout*sizeof(double)))==NULL) { *error = 4; return; } else if ((book2R = (double *)malloc((unsigned)lengthout*sizeof(double)))==NULL){ *error = 5; return; } else if ((book2I = (double *)malloc((unsigned)lengthout*sizeof(double)))==NULL){ *error = 6; return; } comconC(bookR, bookI, lengthin, 0, HR, HI, LengthH, book1R, book1I, lengthout, 0, lengthout-1, WAVELET, 1, PERIODIC); for(i=0; i < lengthout; ++i) { *POINTCR(level-1, (outstart1+i)) = *(book1R+i); *POINTCI(level-1, (outstart1+i)) = *(book1I+i); } /* Rprintf("book1 coefficients \n"); for(i=0; i0; --i) * *(book+i) = *(book+i-1); * book = tmp; */ /* COMMENT OUT (replaced by rotater function) tmp = *book; * for(i=0; irealval+i); *(answerI+i) = *(acopy->imagval+i); } destroycomplex(acopy); } void destroycomplex(struct complex *a) { free((void *)a->realval); free((void *)a->imagval); free((void *)a); } /* comAB Do the basis averaging for complex WST*/ /* * Error codes * * 1,2 - Memory error in creating clR, clI * 3,4 - Memory error in creating crR, crI * 3 - Memory error in creating packet (getpacket) */ struct complex *comAB(double *wstR, double *wstI, double *wstCR, double *wstCI, int nlevels, int level, int ix1, int ix2, double *HR, double *HI, double *GR, double *GI, int LengthH, int *error) /*--------------------- * Argument description *--------------------- double *wstR:: Wavelet coefficients, non-dec, real double *wstI:: Wavelet coefficients, non-dec, imag double *wstCR:: Father wav. coeffs, non-dec, real double *wstCI:: Father wav. coeffs, non-dec, imag int nlevels:: The original length of the data int level:: The level to reconstruct int ix1:: The "left" packet index int ix2:: The "right" packet index double *HR,*HI:: Smoothing filter double *GR,*GI:: Detail filter int LengthH:: The length of the filter int *error:: Error code *---------------------*/ { register int i; double *clR, *clI; double *crR, *crI; struct complex *genericC; struct complex *answer; double *genCR, *genCI; /* Generic Cs for when we need real and imag */ double *genDR, *genDI; /* Generic Cs for when we need real and imag */ int LengthC; int LengthCin; void comcbr(double *c_inR, double *c_inI, int LengthCin, int firstCin, int lastCin, double *d_inR, double *d_inI, int LengthDin, int firstDin, int lastDin, double *HR, double *HI, double *GR, double *GI, int LengthH, double *c_outR, double *c_outI, int LengthCout, int firstCout, int lastCout, int type, int bc); double *getpacket(double *wst, int nlevels, int level, int index, int *error); struct complex *comAB(double *wstR, double *wstI, double *wstCR, double *wstCI, int nlevels, int level, int ix1, int ix2, double *HR, double *HI, double *GR, double *GI, int LengthH, int *error); void rotateback(double *book, int length); void destroycomplex(struct complex *a); *error = 0; /* * Now we must create cl and cr. These will contain the reconstructions * from the left and right packets respectively. The length of these * vectors depends upon the level we're at. */ LengthC = 1 << (level+1); LengthCin = 1 << level; /* * Create cl and cr: real and imaginary */ if ((clR = (double *)malloc((unsigned)LengthC*sizeof(double)))==NULL) { *error = 1; return(NULL); } if ((clI = (double *)malloc((unsigned)LengthC*sizeof(double)))==NULL) { *error = 2; return(NULL); } if ((crR = (double *)malloc((unsigned)LengthC*sizeof(double)))==NULL) { *error = 3; return(NULL); } if ((crI = (double *)malloc((unsigned)LengthC*sizeof(double)))==NULL) { *error = 4; return(NULL); } /* * What we do next depends on the level. * * If level is zero then we've recursed all the way down to the bottom of * the tree. And we can reconstruct the 2-vectors one-up-the-tree by using * good old conbar(). * * If the level is not zero then we construct at that stage using conbar() * but to obtain the Cs we recurse. */ if (level != 0) { /* Get C's at this level by asking the next level down. */ genericC = comAB(wstR, wstI, wstCR, wstCI, nlevels, level-1, 2*ix1, 2*ix1+1, HR, HI, GR, GI, LengthH, error); if (*error != 0) return(NULL); /* Get D's straight from the wst matrix */ genDR = getpacket(wstR, nlevels, level, ix1, error); genDI = getpacket(wstI, nlevels, level, ix1, error); if (*error != 0) return(NULL); /* Do the reconstruction */ comcbr(genericC->realval, genericC->imagval, LengthCin, 0, LengthCin-1, genDR, genDI, LengthCin, 0, LengthCin-1, HR, HI, GR, GI, LengthH, clR, clI, LengthC, 0, LengthC-1, WAVELET, PERIODIC); destroycomplex(genericC); free((void *)genDR); free((void *)genDI); /* Now do the RHS */ genericC = comAB(wstR, wstI, wstCR, wstCI, nlevels, level-1, 2*ix2, 2*ix2+1, HR, HI, GR, GI, LengthH, error); if (*error != 0) return(NULL); /* Get D's straight from the wst matrix */ genDR = getpacket(wstR, nlevels, level, ix2, error); genDI = getpacket(wstI, nlevels, level, ix2, error); if (*error != 0) return(NULL); /* Do the reconstruction */ comcbr(genericC->realval, genericC->imagval, LengthCin, 0, LengthCin-1, genDR, genDI, LengthCin, 0, LengthCin-1, HR, HI, GR, GI, LengthH, crR, crI, LengthC, 0, LengthC-1, WAVELET, PERIODIC); /* Rotate the RHS back */ rotateback(crR, LengthC); rotateback(crI, LengthC); /* Can get rid of generics now */ destroycomplex(genericC); free((void *)genDR); free((void *)genDI); } else { /* Have to really do it! */ genCR = getpacket(wstCR, nlevels, level, ix1, error); genCI = getpacket(wstCI, nlevels, level, ix1, error); if (*error != 0) return(NULL); genDR = getpacket(wstR, nlevels, level, ix1, error); genDI = getpacket(wstI, nlevels, level, ix1, error); if (*error != 0) return(NULL); /* Do the reconstruction */ comcbr(genCR, genCI, LengthCin, 0, LengthCin-1, genDR, genDI, LengthCin, 0, LengthCin-1, HR, HI, GR, GI, LengthH, clR, clI, LengthC, 0, LengthC-1, WAVELET, PERIODIC); free((void *)genCR); free((void *)genCI); free((void *)genDR); free((void *)genDI); genCR = getpacket(wstCR, nlevels, level, ix2, error); genCI = getpacket(wstCI, nlevels, level, ix2, error); if (*error != 0) return(NULL); genDR = getpacket(wstR, nlevels, level, ix2, error); genDI = getpacket(wstI, nlevels, level, ix2, error); if (*error != 0) return(NULL); /* Do the reconstruction */ comcbr(genCR, genCI, LengthCin, 0, LengthCin-1, genDR, genDI, LengthCin, 0, LengthCin-1, HR, HI, GR, GI, LengthH, crR, crI, LengthC, 0, LengthC-1, WAVELET, PERIODIC); /* Rotate the RHS back */ rotateback(crR, LengthC); rotateback(crI, LengthC); free((void *)genCR); free((void *)genCI); free((void *)genDR); free((void *)genDI); } for(i=0; irealval = clR; answer->imagval = clI; return(answer); } wavethresh/src/cthreb.c0000644000176200001440000001022114332233734014634 0ustar liggesusers#include #include #define PI 3.141592653589793 void Ccthrnegloglik(double *parvec, double *SigVec, double *di, double *dr, long *pnd, double *pans) { double p, Sig11, Sig12, Sig22, VpS11, VpS12, VpS22; double sum=0.0, detVpS, twopirtdetVpS, detSig, twopirtdetSig; double SigInv11, SigInv12, SigInv22, VpSInv11, VpSInv12, VpSInv22; double den1, den2, V11, V12, V22; int i; /* * Evaluate the -ve log likelihood assuming a two-component mixture * prior with mixing parameter p and Normal component variance matrix V. * * Data consists of a vector of length nd; di and dr contain the * imaginary and real parts of the data respectively. */ p = parvec[0]; Sig11 = SigVec[0]; Sig12 = SigVec[1]; Sig22 = SigVec[2]; V11 = parvec[1]; V22 = parvec[3]; V12 = parvec[2] * sqrt(V11*V22); VpS11 = V11 + Sig11; VpS12 = V12 + Sig12; VpS22 = V22 + Sig22; detVpS = VpS11 * VpS22 - pow(VpS12, 2.0); twopirtdetVpS = 2.0 * PI * sqrt(detVpS); detSig = Sig11 * Sig22 - pow(Sig12, 2.0); twopirtdetSig = 2.0 * PI * sqrt(detSig); SigInv11 = Sig22/detSig; SigInv12 = -Sig12/detSig; SigInv22 = Sig11/detSig; VpSInv11 = VpS22/detVpS; VpSInv12 = -VpS12/detVpS; VpSInv22 = VpS11/detVpS; for(i = 0; i < (*pnd); i++){ den1 = VpSInv11 * pow(dr[i], 2.0) + 2.0 *VpSInv12 * dr[i] * di[i] + VpSInv22 * pow(di[i], 2.0); den1 = exp(-0.5 * den1) / twopirtdetVpS; den2 = SigInv11 * pow(dr[i], 2.0) + 2.0 * SigInv12 * dr[i] * di[i] + SigInv22 * pow(di[i], 2.0); den2 = exp(-0.5 * den2) / twopirtdetSig; sum += log(p * den1 + (1.0 - p) * den2); } /* End for(i) */ (*pans) = -sum; } void Ccthrcalcodds(long *pnd, double *dr, double *di, double *VVec, double *SigVec, double *pp, double *ans, double *odds) { int k; double mult, detS, detVpS, tmp, V11, V12, V22; double Sig11, Sig12, Sig22, m11, m12, m22; /* * Compute posterior weights of non-zero components given: * * nd coefficients whose real and imaginary parts are in * dr and di respectively; * * prior and noise covariance matrices in VVec and SigVec; * * and prior weight in pp. * * Return answers in ans */ Sig11 = SigVec[0]; Sig12 = SigVec[1]; Sig22 = SigVec[2]; V11 = VVec[0]; V12 = VVec[1]; V22 = VVec[2]; detS = Sig11 * Sig22 - pow(Sig12, 2.0); detVpS = (V11 + Sig11) * (V22 + Sig22) - pow((V12 + Sig12), 2.0); m11 = Sig22/detS - (V22 + Sig22)/detVpS; m12 = -Sig12/detS + (V12 + Sig12)/detVpS; m22 = Sig11/detS - (V11 + Sig11)/detVpS; mult = (*pp)/(1.0 - (*pp)) * sqrt(detS/detVpS); for(k = 0; k < (*pnd); k++){ tmp = m11*pow(dr[k], 2.0) + 2.0 * m12 * dr[k] * di[k] + m22 * pow(di[k], 2.0); if(tmp > 1400.0) tmp = 1400.0; odds[k] = mult * exp(tmp/2.0); ans[k] = odds[k] / (1 + odds[k]); } } void Cpostmean(long *pnd, double *dr, double *di, double *VVec, double *SigVec, double *w, double *ansr, double *ansi) { int k; double detS, detV, tmp, V11, V12, V22, m11, m12, m22; double Sig11, Sig12, Sig22, SigI11, SigI12, SigI22, mi11, mi12, mi22; /* * Compute posterior means of wavelet coefficients given: * * nd coefficients whose real and imaginary parts are in * dr and di respectively; * * prior and noise covariance matrices in VVec and SigVec; * * posterior mixing weights in w. * * Return answers in ansr and ansi (re and im respectively). */ Sig11 = SigVec[0]; Sig12 = SigVec[1]; Sig22 = SigVec[2]; V11 = VVec[0]; V12 = VVec[1]; V22 = VVec[2]; detS = Sig11 * Sig22 - pow(Sig12, 2.0); detV = V11 * V22 - pow(V12, 2.0); /* Invert Sigma */ SigI11 = Sig22/detS; SigI12 = -Sig12/detS; SigI22 = Sig11/detS; /* Add Sigma^{-1} to V^{-1} */ m11 = SigI11 + V22/detV; m12 = SigI12 - V12/detV; m22 = SigI22 + V11/detV; /* Now invert that sum */ tmp = m11 * m22 - pow(m12, 2.0); mi11 = m22 / tmp; mi12 = -m12 / tmp; mi22 = m11 / tmp; for(k = 0; k < (*pnd); k++){ ansr[k] = w[k] * (dr[k] * (mi11 * SigI11 + mi12 * SigI12) + di[k] * (mi11 * SigI12 + mi12 * SigI22)); ansi[k] = w[k] * (dr[k] * (mi12 * SigI11 + mi22 * SigI12) + di[k] * (mi12 * SigI12 + mi22 * SigI22)); } } wavethresh/R/0000755000176200001440000000000014334426610012636 5ustar liggesuserswavethresh/R/function.r0000644000176200001440000131524314334426610014657 0ustar liggesusers".onAttach"<- function(...) { wvrelease() } # # Create environment for some WaveThresh functions (PsiJ, ipndacw) to store # results for reuse. Let efficient than previous versions of WaveThresh # but plays more nicely with the R people # if (!exists("WTEnv", mode="environment")) { WTEnv <- new.env() } "LinaMayrand3" <- structure(list(S = structure(c(-0.0662912607362388-0.0855811337270078i, -0.0662912607362388+0.0855811337270078i, 0.0352266456251514+0i, 0.332671113131273+0i, 0.110485434560398-0.0855811337270078i, 0.110485434560398+0.0855811337270078i, -0.0854411265843329+0i, 0.806890861720468+0i, 0.662912607362388+0.171163681667578i, 0.662912607362388-0.171163681667578i, -0.135010726159072+0i, 0.45987820885317+0i, 0.662912607362388+0.171163681667578i, 0.662912607362388-0.171163681667578i, 0.45987820885317+0i, -0.135010726159072+0i, 0.110485434560398-0.0855811337270078i, 0.110485434560398+0.0855811337270078i, 0.806890861720468+0i, -0.0854411265843329+0i, -0.0662912607362388-0.0855811337270078i, -0.0662912607362388+0.0855811337270078i, 0.332671113131273+0i, 0.0352266456251514+0i), .Dim = as.integer(c(4, 6))), W = structure(c(-0.0662912607362388+0.0855811337270078i, -0.0662912607362388-0.0855811337270078i, 0.332671113131273+0i, 0.0352266456251514+0i, -0.110485434560398-0.0855811337270078i, -0.110485434560398+0.0855811337270078i, -0.806890861720468+0i, 0.0854411265843329+0i, 0.662912607362388-0.171163681667578i, 0.662912607362388+0.171163681667578i, 0.45987820885317+0i, -0.135010726159072+0i, -0.662912607362388+0.171163681667578i, -0.662912607362388-0.171163681667578i, 0.135010726159072+0i, -0.45987820885317+0i, 0.110485434560398+0.0855811337270078i, 0.110485434560398-0.0855811337270078i, -0.0854411265843329+0i, 0.806890861720468+0i, 0.0662912607362388-0.0855811337270078i, 0.0662912607362388+0.0855811337270078i, -0.0352266456251514+0i, -0.332671113131273+0i), .Dim = as.integer(c(4, 6)))), .Names = c("S", "W")) "LinaMayrand4" <- structure(list(S = structure(c(-0.0177682977370364-0.0843076215447475i, 0.102008915752387-0.140888496674900i, 0.512949613906065+0.139761114430506i, 0.682186908447622+0.309503739778537i, 0.261320230715269-0.0265993641984858i, -0.0829326081014193-0.196341989489948i, -0.0493947656694662-0.0288541287014151i, 0.00584356522937926+0.0277267464287373i), .Dim = as.integer(c(1, 8))), W = structure(c(-0.00584356522937926+0.0277267464287373i, -0.0493947656694662+0.0288541287014151i, 0.0829326081014193-0.196341989489948i, 0.261320230715269+0.0265993641984858i, -0.682186908447622+0.309503739778537i, 0.512949613906065-0.139761114430506i, -0.102008915752387-0.140888496674900i, -0.0177682977370364+0.0843076215447475i), .Dim = as.integer(c(1, 8)))), .Names = c("S", "W")) "LinaMayrand5" <- structure(list(S = structure(c(0.0104924505144049+0.0205904370844365i, -0.0131549130788862+0.0190001547113654i, -0.0480171707489855-0.0286805385686857i, 0.00443868969370267-0.0660029379744943i, -0.0171289081256946+0.00872852869497756i, -0.0407762717133288-0.0282317864304761i, -0.0457735601342806-0.0701496826501424i, 0.109045176430938-0.153497807951817i, -0.080639704153759-0.117947473548549i, 0.0139497502179911-0.217696442313413i, 0.342248869674118+0.0140988497709936i, 0.423036269003173+0.0594750872271794i, 0.151379708479645-0.0942236567554891i, 0.245969162830182-0.123232560001445i, 0.772484323772727+0.144605393302011i, 0.642829163846022+0.350360717350611i, 0.643003234585088+0.182852164538766i, 0.501119052917861+0.350160634132963i, 0.479618312994977+0.059046616665079i, 0.375016379640746+0.0994046669755474i, 0.643003234585088+0.182852164538766i, 0.501119052917861+0.350160634132963i, -0.0564771558731019-0.0836581495806555i, -0.0349735956831048-0.248283003884364i, 0.151379708479645-0.0942236567554891i, 0.245969162830182-0.123232560001445i, -0.0809927427988999-0.0456676283259696i, -0.106064370637416-0.113222843833651i, -0.080639704153759-0.117947473548549i, 0.0139497502179911-0.217696442313413i, 0.0450707806910314+0.0140988497709936i, -0.0103356606306847+0.0594750872271794i, -0.0171289081256946+0.00872852869497756i, -0.0407762717133288-0.0282317864304761i, 0.0142495119522009+0.00120270047413905i, 0.0106798133845187+0.0203460275629919i, 0.0104924505144049+0.0205904370844365i, -0.0131549130788862+0.0190001547113654i, -0.00819760743953431-0.00489641086342034i, 0.000541697299744814-0.00805499281231948i), .Dim = as.integer(c(4, 10))), W = structure(c(0.0104924505144049-0.0205904370844365i, -0.0131549130788862-0.0190001547113654i, -0.00819760743953431+0.00489641086342034i, 0.000541697299744814+0.00805499281231948i, 0.0171289081256946+0.00872852869497756i, 0.0407762717133288-0.0282317864304761i, -0.0142495119522009+0.00120270047413905i, -0.0106798133845187+0.0203460275629919i, -0.080639704153759+0.117947473548549i, 0.0139497502179911+0.217696442313413i, 0.0450707806910314-0.0140988497709936i, -0.0103356606306847-0.0594750872271794i, -0.151379708479645-0.0942236567554891i, -0.245969162830182-0.123232560001445i, 0.0809927427988999-0.0456676283259696i, 0.106064370637416-0.113222843833651i, 0.643003234585088-0.182852164538766i, 0.501119052917861-0.350160634132963i, -0.0564771558731019+0.0836581495806555i, -0.0349735956831048+0.248283003884364i, -0.643003234585088+0.182852164538766i, -0.501119052917861+0.350160634132963i, -0.479618312994977+0.059046616665079i, -0.375016379640746+0.0994046669755474i, 0.151379708479645+0.0942236567554891i, 0.245969162830182+0.123232560001445i, 0.772484323772727-0.144605393302011i, 0.642829163846022-0.350360717350611i, 0.080639704153759-0.117947473548549i, -0.0139497502179911-0.217696442313413i, -0.342248869674118+0.0140988497709936i, -0.423036269003173+0.0594750872271794i, -0.0171289081256946-0.00872852869497756i, -0.0407762717133288+0.0282317864304761i, -0.0457735601342806+0.0701496826501424i, 0.109045176430938+0.153497807951817i, -0.0104924505144049+0.0205904370844365i, 0.0131549130788862+0.0190001547113654i, 0.0480171707489855-0.0286805385686857i, -0.00443868969370267-0.0660029379744943i), .Dim = as.integer(c(4, 10)))), .Names = c("S", "W")) "comp.theta" <- function(djk, Sigma.inv) { # # Takes in the complex wavelet coefficient d_{j,k} and the inverse # of the covariance matrix Sigma. Returns the scalar statistic # theta_{j,k}; this is \chi^2_2 if the coefficient contains # only noise. # if(!is.complex(djk)) stop( "comp.theta should only be used on complex wavelet coefficients." ) tmp <- cbind(Re(djk), Im(djk)) tmp <- diag(tmp %*% Sigma.inv %*% t(tmp)) return(tmp) } "cthr.negloglik" <- function(parvec, dstarvec, Sigma, Sigma.inv, twopirtdetS, code) { # # Compute -log likelihood of sample dstar from # mixture of bivariate normal distributions. # # Each row of dstarvec should contain one coefficient. # if(code == "C") { SigVec <- c(Sigma[1, 1], Sigma[1, 2], Sigma[2, 2]) di <- dstarvec[, 2] dr <- dstarvec[, 1] pnd <- length(di) pans <- 0 Cout <- .C("Ccthrnegloglik", parvec = as.double(parvec), SigVec = as.double(SigVec), di = as.double(di), dr = as.double(dr), pnd = as.integer(pnd), pans = as.double(pans), PACKAGE = "wavethresh") return(Cout$pans) } else { p <- parvec[1] tmp <- parvec[3] * sqrt(parvec[2] * parvec[4]) V <- matrix(c(parvec[2], tmp, tmp, parvec[4]), byrow = TRUE, ncol = 2) VpS <- V + Sigma detVpS <- VpS[1, 1] * VpS[2, 2] - VpS[1, 2] * VpS[2, 1] VpS.inv <- matrix(c(VpS[2, 2], - VpS[1, 2], - VpS[2, 1], VpS[1, 1]), ncol = 2, byrow = TRUE)/detVpS twopirtdetVpS <- 2 * pi * sqrt(detVpS) tmp <- apply(dstarvec, 1, cthreb.mixden, p = p, twopirtdetS = twopirtdetS, twopirtdetVpS = twopirtdetVpS, Sigma.inv = Sigma.inv, VpS.inv = VpS.inv) return( - sum(log(tmp))) } } "cthreb.mixden" <- function(dstar, p, twopirtdetS, twopirtdetVpS, Sigma.inv, VpS.inv) { # # Compute density fn. of dstar from normal mixture # den1 <- exp(-0.5 * t(dstar) %*% VpS.inv %*% dstar)/twopirtdetVpS den2 <- exp(-0.5 * t(dstar) %*% Sigma.inv %*% dstar)/twopirtdetS return(p * den1 + (1 - p) * den2) } "cthreb.odds" <- function(coefs, p, V, Sig, code = "NAG") { # # Takes in coefs from a given level with EB-chosen prior parameters # p and V and DWT covariance matrix Sig. # # Returns posterior weights of coefficients being non-zero. # if(code == "C" || code == "NAG") { dr <- coefs[, 1] di <- coefs[, 2] nd <- length(dr) SigVec <- c(Sig[1, 1], Sig[1, 2], Sig[2, 2]) VVec <- c(V[1, 1], V[1, 2], V[2, 2]) pp <- p ans <- rep(0, nd) odds <- rep(0, nd) Cout <- .C("Ccthrcalcodds", pnd = as.integer(nd), dr = as.double(dr), di = as.double(di), VVec = as.double(VVec), SigVec = as.double(SigVec), pp = as.double(p), ans = as.double(ans), odds = as.double(odds),PACKAGE = "wavethresh") ptilde <- Cout$ans } else { VpS <- V + Sig detS <- Sig[1, 1] * Sig[2, 2] - Sig[1, 2]^2 detVpS <- VpS[1, 1] * VpS[2, 2] - VpS[1, 2]^2 mat <- solve(Sig) - solve(V + Sig) odds <- apply(coefs, 1, odds.matrix.mult, mat = mat) # Take care of excessively huge odds giving NAs in exp(odds/2) odds[odds > 1400] <- 1400 odds <- p/(1 - p) * sqrt(detS/detVpS) * exp(odds/2) ptilde <- odds/(1 + odds) } if(any(is.na(ptilde))) { print("NAs in ptilde; setting those values to one") ptilde[is.na(ptilde)] <- 1 } return(ptilde) } "cthreb.thresh" <- function(coefs, ptilde, V, Sig, rule, code) { # # Takes in coefs from a given level with EB-chosen # prior covariance matrix V, posterior weights ptilde # and DWT covariance matrix Sig. # # Returns thresholded coefficients; how the thresholding is # done depends on rule: # rule == "hard": ptilde < 1/2 -> zero, otherwise # keep unchanged (kill or keep). # rule == "soft": ptilde < 1/2 -> zero, otherwise # use posterior mean (kill or shrink). # rule == "mean": use posterior mean (no zeros). # if(rule == "hard") { coefs[ptilde <= 0.5, ] <- 0 return(coefs) } else if(code == "C" || code == "NAG") { nd <- length(coefs[, 1]) dr <- coefs[, 1] di <- coefs[, 2] ansr <- rep(0, nd) ansi <- rep(0, nd) VVec <- c(V[1, 1], V[1, 2], V[2, 2]) SigVec <- c(Sig[1, 1], Sig[1, 2], Sig[2, 2]) Cout <- .C("Cpostmean", pnd = as.integer(nd), dr = as.double(dr), di = as.double(di), VVec = as.double(VVec), SigVec = as.double(SigVec), ptilde = as.double(ptilde), ansr = as.double(ansr), ansi = as.double(ansi),PACKAGE = "wavethresh") coefs <- cbind(Cout$ansr, Cout$ansi) } else { stop("Unknown code or rule") } if(rule == "mean") return(coefs) coefs[ptilde <= 0.5, ] <- 0 return(coefs) } "cthresh" <- function(data, j0 = 3, dwwt = NULL, dev = madmad, rule = "hard", filter.number = 3.1, family = "LinaMayrand", plotfn = FALSE, TI = FALSE, details = FALSE, policy = "mws", code = "NAG", tol = 0.01) { # # Limited parameter checking # n <- length(data) nlevels <- IsPowerOfTwo(n) if(is.na(nlevels)) stop("Data should be of length a power of two.") if((rule != "hard") & (rule != "soft") & (rule != "mean")) { warning(paste("Unknown rule", rule, "so hard thresholding used" )) rule <- "hard" } if((policy != "mws") & (policy != "ebayes")) { warning(paste("Unknown policy", policy, "so using multiwavelet style thresholding")) policy <- "mws" } # # If 5 vanishing moments is called for, average over all # Lina-Mayrand wavelets with 5 vanishing moments by recursively # calling cthresh; if filter.number=0 use all LimaMayrand wavelets # if(filter.number == 3 & ((family == "LinaMayrand") || (family = "Lawton"))) { filter.number <- 3.1 family <- "LinaMayrand" } else if(filter.number == 4 & family == "LinaMayrand") filter.number <- 4.1 else if((filter.number == 5) & (family == "LinaMayrand")) { est1 <- cthresh(data, j0 = j0, dev = dev, rule = rule, filter.number = 5.1, TI = TI, policy = policy, details = FALSE, plotfn = FALSE, code = code, tol = tol ) est2 <- cthresh(data, j0 = j0, dev = dev, rule = rule, filter.number = 5.2, TI = TI, policy = policy, details = FALSE, plotfn = FALSE, code = code, tol = tol ) est3 <- cthresh(data, j0 = j0, dev = dev, rule = rule, filter.number = 5.3, TI = TI, policy = policy, details = FALSE, plotfn = FALSE, code = code, tol = tol ) est4 <- cthresh(data, j0 = j0, dev = dev, rule = rule, filter.number = 5.4, TI = TI, policy = policy, details = FALSE, plotfn = FALSE, code = code, tol = tol ) estimate <- (est1 + est2 + est3 + est4)/4 if(plotfn) { x <- (1:n)/n plot(x, data, ylim = range(data, Re(estimate))) lines(x, Re(estimate), lwd = 2, col = 2) } return(estimate) } else if((filter.number == 0) & (family == "LinaMayrand")) { est1 <- cthresh(data, j0 = j0, dev = dev, rule = rule, filter.number = 3.1, TI = TI, policy = policy, details = FALSE, plotfn = FALSE, code = code, tol = tol ) est2 <- cthresh(data, j0 = j0, dev = dev, rule = rule, filter.number = 4.1, TI = TI, policy = policy, details = FALSE, plotfn = FALSE, code = code, tol = tol ) est3 <- cthresh(data, j0 = j0, dev = dev, rule = rule, filter.number = 5.1, TI = TI, policy = policy, details = FALSE, plotfn = FALSE, code = code, tol = tol ) est4 <- cthresh(data, j0 = j0, dev = dev, rule = rule, filter.number = 5.2, TI = TI, policy = policy, details = FALSE, plotfn = FALSE, code = code, tol = tol ) est5 <- cthresh(data, j0 = j0, dev = dev, rule = rule, filter.number = 5.3, TI = TI, policy = policy, details = FALSE, plotfn = FALSE, code = code, tol = tol ) est6 <- cthresh(data, j0 = j0, dev = dev, rule = rule, filter.number = 5.4, TI = TI, policy = policy, details = FALSE, plotfn = FALSE, code = code, tol = tol ) estimate <- (est1 + est2 + est3 + est4 + est5 + est6)/6 if(plotfn) { x <- (1:n)/n plot(x, data, ylim = range(data, Re(estimate))) lines(x, Re(estimate), lwd = 2, col = 2) } return(estimate) } # # Take required type of wavelet transform. # if(TI==TRUE) data.wd <- wst(data, filter.number = filter.number, family = family) else data.wd <- wd(data, filter.number = filter.number, family = family) # # Generate covariance matrices # if(is.null(dwwt)) dwwt <- make.dwwt(nlevels = nlevels, filter.number = filter.number, family = family) sigsq <- dev(Re(accessD(data.wd, level = nlevels - 1))) + dev(Im( accessD(data.wd, level = nlevels - 1))) Sigma <- array(0, c(nlevels, 2, 2)) Sigma[, 1:2, 1:2] <- (sigsq * Im(dwwt))/2 Sigma[, 1, 1] <- (sigsq * (1 + Re(dwwt)))/2 Sigma[, 2, 2] <- (sigsq * (1 - Re(dwwt)))/2 thr.wd <- data.wd if(policy == "mws") { # # Do multiwavelet style universal thresholding # if(rule == "mean") { warning("Can't use posterior mean with multiwavelet style thresholding. Using soft thresholding instead" ) rule <- "soft" } lambda <- 2 * log(n) for(j in j0:(nlevels - 1)) { coefs <- accessD(data.wd, level = j) Sigma.inv <- solve(Sigma[j + 1, , ]) thetaj <- comp.theta(coefs, Sigma.inv) if(rule == "hard") coefs[abs(thetaj) < lambda] <- 0 else { k <- Re(coefs)/Im(coefs) thetahat <- pmax(0, thetaj - lambda) varr <- Sigma[j + 1, 1, 1] vari <- Sigma[j + 1, 2, 2] covar <- Sigma[j + 1, 1, 2] bhatsq <- (varr * vari - covar^2) * thetahat bhatsq <- bhatsq/(vari * k^2 - 2 * covar * k + varr) coefs <- complex(modulus = sqrt(bhatsq * (k^2 + 1)), argument = Arg(coefs)) } thr.wd <- putD(thr.wd, level = j, v = coefs) } } else { # # Do empirical Bayes shrinkage/thresholding. # Start by finding parameters: # EBpars <- find.parameters(data.wd = data.wd, dwwt = dwwt, j0 = j0, code = code, tol = tol, Sigma = Sigma) p <- c(EBpars$pars[, 1]) Sigma <- EBpars$Sigma V <- array(0, dim = c(nlevels - 1, 2, 2)) for(i in j0:(nlevels - 1)) V[i, , ] <- matrix(EBpars$pars[i, c(2, 3, 3, 4)], ncol = 2) # # Do thresholding. # for(j in j0:(nlevels - 1)) { coefs <- accessD(data.wd, level = j) coefs <- cbind(Re(coefs), Im(coefs)) ptilde <- cthreb.odds(coefs, p = p[j], V = V[j, , ], Sig = Sigma[j + 1, , ], code = code) coefs.thr <- cthreb.thresh(coefs, ptilde = ptilde, V = V[j, , ], Sig = Sigma[j, , ], rule = rule, code = code) thr.wd <- putD(thr.wd, level = j, v = complex(real = coefs.thr[, 1], imaginary = coefs.thr[, 2])) } } # # Reconstruct # if(TI) data.rec <- AvBasis(thr.wd) else data.rec <- wr(thr.wd) # # Plot data and estimate # if(plotfn) { x <- (1:n)/n plot(x, data, ylim = range(data, Re(data.rec))) lines(x, Re(data.rec), lwd = 2, col = 2) } # # Return either just the estimate or an unweildy list. # if(details == FALSE) invisible(data.rec) else if(policy == "ebayes") invisible(list(data = data, data.wd = data.wd, thr.wd = thr.wd, estimate = data.rec, Sigma = Sigma, sigsq = sigsq, rule = rule, EBpars = EBpars$pars, wavelet = list( filter.number, family))) else invisible(list(data = data, data.wd = data.wd, thr.wd = thr.wd, estimate = data.rec, Sigma = Sigma, sigsq = sigsq, rule = rule, wavelet = list(filter.number, family))) } "filter.select" <- function(filter.number, family = "DaubLeAsymm", constant = 1) { G <- NULL if(family == "DaubExPhase") { family <- "DaubExPhase" # # # The following wavelet coefficients are taken from # Daubechies, I (1988) Orthonormal Bases of Wavelets # Communications on Pure and Applied Mathematics. Page 980 # or Ten Lectures on Wavelets, Daubechies, I, 1992 # CBMS-NSF Regional Conference Series, page 195, Table 6.1 # # Comment from that table reads: # "The filter coefficients for the compactly supported wavelets # with extremal phase and highest number of vanishing moments # compatible with their support width". # if(filter.number == 1) { # # # This is for the Haar basis. (not in Daubechies). # H <- rep(0, 2) H[1] <- 1/sqrt(2) H[2] <- H[1] filter.name <- c("Haar wavelet") } else if(filter.number == 2) { H <- rep(0, 4) H[1] <- 0.482962913145 H[2] <- 0.836516303738 H[3] <- 0.224143868042 H[4] <- -0.129409522551 filter.name <- c("Daub cmpct on ext. phase N=2") } else if(filter.number == 3) { H <- rep(0, 6) H[1] <- 0.33267055295 H[2] <- 0.806891509311 H[3] <- 0.459877502118 H[4] <- -0.13501102001 H[5] <- -0.085441273882 H[6] <- 0.035226291882 filter.name <- c("Daub cmpct on ext. phase N=3") } else if(filter.number == 4) { H <- rep(0, 8) H[1] <- 0.230377813309 H[2] <- 0.714846570553 H[3] <- 0.63088076793 H[4] <- -0.027983769417 H[5] <- -0.187034811719 H[6] <- 0.030841381836 H[7] <- 0.032883011667 H[8] <- -0.010597401785 filter.name <- c("Daub cmpct on ext. phase N=4") } else if(filter.number == 5) { H <- rep(0, 10) H[1] <- 0.160102397974 H[2] <- 0.603829269797 H[3] <- 0.724308528438 H[4] <- 0.138428145901 H[5] <- -0.242294887066 H[6] <- -0.032244869585 H[7] <- 0.07757149384 H[8] <- -0.006241490213 H[9] <- -0.012580752 H[10] <- 0.003335725285 filter.name <- c("Daub cmpct on ext. phase N=5") } else if(filter.number == 6) { H <- rep(0, 12) H[1] <- 0.11154074335 H[2] <- 0.494623890398 H[3] <- 0.751133908021 H[4] <- 0.315250351709 H[5] <- -0.226264693965 H[6] <- -0.129766867567 H[7] <- 0.097501605587 H[8] <- 0.02752286553 H[9] <- -0.031582039318 H[10] <- 0.000553842201 H[11] <- 0.004777257511 H[12] <- -0.001077301085 filter.name <- c("Daub cmpct on ext. phase N=6") } else if(filter.number == 7) { H <- rep(0, 14) H[1] <- 0.077852054085 H[2] <- 0.396539319482 H[3] <- 0.729132090846 H[4] <- 0.469782287405 H[5] <- -0.143906003929 H[6] <- -0.224036184994 H[7] <- 0.071309219267 H[8] <- 0.080612609151 H[9] <- -0.038029936935 H[10] <- -0.016574541631 H[11] <- 0.012550998556 H[12] <- 0.000429577973 H[13] <- -0.001801640704 H[14] <- 0.0003537138 filter.name <- c("Daub cmpct on ext. phase N=7") } else if(filter.number == 8) { H <- rep(0, 16) H[1] <- 0.054415842243 H[2] <- 0.312871590914 H[3] <- 0.675630736297 H[4] <- 0.585354683654 H[5] <- -0.015829105256 H[6] <- -0.284015542962 H[7] <- 0.000472484574 H[8] <- 0.12874742662 H[9] <- -0.017369301002 H[10] <- -0.044088253931 H[11] <- 0.013981027917 H[12] <- 0.008746094047 H[13] <- -0.004870352993 H[14] <- -0.000391740373 H[15] <- 0.000675449406 H[16] <- -0.000117476784 filter.name <- c("Daub cmpct on ext. phase N=8") } else if(filter.number == 9) { H <- rep(0, 18) H[1] <- 0.038077947364 H[2] <- 0.243834674613 H[3] <- 0.60482312369 H[4] <- 0.657288078051 H[5] <- 0.133197385825 H[6] <- -0.293273783279 H[7] <- -0.096840783223 H[8] <- 0.148540749338 H[9] <- 0.030725681479 H[10] <- -0.067632829061 H[11] <- 0.000250947115 H[12] <- 0.022361662124 H[13] <- -0.004723204758 H[14] <- -0.004281503682 H[15] <- 0.001847646883 H[16] <- 0.000230385764 H[17] <- -0.000251963189 H[18] <- 3.934732e-05 filter.name <- c("Daub cmpct on ext. phase N=9") } else if(filter.number == 10) { H <- rep(0, 20) H[1] <- 0.026670057901 H[2] <- 0.188176800078 H[3] <- 0.527201188932 H[4] <- 0.688459039454 H[5] <- 0.281172343661 H[6] <- -0.249846424327 H[7] <- -0.195946274377 H[8] <- 0.127369340336 H[9] <- 0.093057364604 H[10] <- -0.071394147166 H[11] <- -0.029457536822 H[12] <- 0.033212674059 H[13] <- 0.003606553567 H[14] <- -0.010733175483 H[15] <- 0.001395351747 H[16] <- 0.001992405295 H[17] <- -0.000685856695 H[18] <- -0.000116466855 H[19] <- 9.358867e-05 H[20] <- -1.3264203e-05 filter.name <- c("Daub cmpct on ext. phase N=10") } else { stop("Unknown filter number for Daubechies wavelets with extremal phase and highest number of vanishing moments..." ) } } else if(family == "DaubLeAsymm") { family <- "DaubLeAsymm" # # # The following wavelet coefficients are taken from # Ten Lectures on Wavelets, Daubechies, I, 1992 # CBMS-NSF Regional Conference Series, page 198, Table 6.3 # # Comment from that table reads: # "The low pass filter coefficients for the "least-asymmetric" # compactly supported wavelets with maximum number of # vanishing moments, for N = 4 to 10 # if(filter.number == 4) { H <- rep(0, 8) H[1] <- -0.107148901418 H[2] <- -0.041910965125 H[3] <- 0.703739068656 H[4] <- 1.136658243408 H[5] <- 0.421234534204 H[6] <- -0.140317624179 H[7] <- -0.017824701442 H[8] <- 0.045570345896 filter.name <- c("Daub cmpct on least asymm N=4") H <- H/sqrt(2) } else if(filter.number == 5) { H <- rep(0, 10) H[1] <- 0.038654795955 H[2] <- 0.041746864422 H[3] <- -0.055344186117 H[4] <- 0.281990696854 H[5] <- 1.023052966894 H[6] <- 0.89658164838 H[7] <- 0.023478923136 H[8] <- -0.247951362613 H[9] <- -0.029842499869 H[10] <- 0.027632152958 filter.name <- c("Daub cmpct on least asymm N=5") H <- H/sqrt(2) } else if(filter.number == 6) { H <- rep(0, 12) H[1] <- 0.021784700327 H[2] <- 0.004936612372 H[3] <- -0.166863215412 H[4] <- -0.068323121587 H[5] <- 0.694457972958 H[6] <- 1.113892783926 H[7] <- 0.477904371333 H[8] <- -0.102724969862 H[9] <- -0.029783751299 H[10] <- 0.06325056266 H[11] <- 0.002499922093 H[12] <- -0.011031867509 filter.name <- c("Daub cmpct on least asymm N=6") H <- H/sqrt(2) } else if(filter.number == 7) { H <- rep(0, 14) H[1] <- 0.003792658534 H[2] <- -0.001481225915 H[3] <- -0.017870431651 H[4] <- 0.043155452582 H[5] <- 0.096014767936 H[6] <- -0.070078291222 H[7] <- 0.024665659489 H[8] <- 0.758162601964 H[9] <- 1.085782709814 H[10] <- 0.408183939725 H[11] <- -0.198056706807 H[12] <- -0.152463871896 H[13] <- 0.005671342686 H[14] <- 0.014521394762 filter.name <- c("Daub cmpct on least asymm N=7") H <- H/sqrt(2) } else if(filter.number == 8) { H <- rep(0, 16) H[1] <- 0.002672793393 H[2] <- -0.0004283943 H[3] <- -0.021145686528 H[4] <- 0.005386388754 H[5] <- 0.069490465911 H[6] <- -0.038493521263 H[7] <- -0.073462508761 H[8] <- 0.515398670374 H[9] <- 1.099106630537 H[10] <- 0.68074534719 H[11] <- -0.086653615406 H[12] <- -0.202648655286 H[13] <- 0.010758611751 H[14] <- 0.044823623042 H[15] <- -0.000766690896 H[16] <- -0.004783458512 filter.name <- c("Daub cmpct on least asymm N=8") H <- H/sqrt(2) } else if(filter.number == 9) { H <- rep(0, 18) H[1] <- 0.001512487309 H[2] <- -0.000669141509 H[3] <- -0.014515578553 H[4] <- 0.012528896242 H[5] <- 0.087791251554 H[6] <- -0.02578644593 H[7] <- -0.270893783503 H[8] <- 0.049882830959 H[9] <- 0.873048407349 H[10] <- 1.015259790832 H[11] <- 0.337658923602 H[12] <- -0.077172161097 H[13] <- 0.000825140929 H[14] <- 0.042744433602 H[15] <- -0.016303351226 H[16] <- -0.018769396836 H[17] <- 0.000876502539 H[18] <- 0.001981193736 filter.name <- c("Daub cmpct on least asymm N=9") H <- H/sqrt(2) } else if(filter.number == 10) { H <- rep(0, 20) H[1] <- 0.001089170447 H[2] <- 0.000135245020 H[3] <- -0.01222064263 H[4] <- -0.002072363923 H[5] <- 0.064950924579 H[6] <- 0.016418869426 H[7] <- -0.225558972234 H[8] <- -0.100240215031 H[9] <- 0.667071338154 H[10] <- 1.0882515305 H[11] <- 0.542813011213 H[12] <- -0.050256540092 H[13] <- -0.045240772218 H[14] <- 0.07070356755 H[15] <- 0.008152816799 H[16] <- -0.028786231926 H[17] <- -0.001137535314 H[18] <- 0.006495728375 H[19] <- 8.0661204e-05 H[20] <- -0.000649589896 filter.name <- c("Daub cmpct on least asymm N=10") H <- H/sqrt(2) } else { stop("Unknown filter number for Daubechies wavelets with\n least asymmetry and highest number of vanishing moments..." ) } } else if (family == "Coiflets") { family <- "Coiflets" if (filter.number == 1) { H <- rep(0, 6) H[1] <- -0.051429728471 H[2] <- 0.238929728471 H[3] <- 0.602859456942 H[4] <- 0.272140543058 H[5] <- -0.051429972847 H[6] <- -0.011070271529 filter.name <- c("Coiflets N=1") H <- H * sqrt(2) } else if (filter.number == 2) { H <- rep(0, 12) H[1] <- 0.0115876 H[2] <- -0.02932014 H[3] <- -0.04763959 H[4] <- 0.273021 H[5] <- 0.5746824 H[6] <- 0.2948672 H[7] <- -0.05408561 H[8] <- -0.04202648 H[9] <- 0.01674441 H[10] <- 0.003967884 H[11] <- -0.001289203 H[12] <- -0.0005095054 filter.name <- c("Coiflets N=2") H <- H * sqrt(2) } else if (filter.number == 3) { H <- rep(0, 18) H[1] <- -0.002682419 H[2] <- 0.005503127 H[3] <- 0.01658356 H[4] <- -0.04650776 H[5] <- -0.04322076 H[6] <- 0.2865033 H[7] <- 0.5612853 H[8] <- 0.3029836 H[9] <- -0.05077014 H[10] <- -0.05819625 H[11] <- 0.02443409 H[12] <- 0.01122924 H[13] <- -0.006369601 H[14] <- -0.001820459 H[15] <- 0.0007902051 H[16] <- 0.0003296652 H[17] <- -5.019277e-05 H[18] <- -2.446573e-05 filter.name <- c("Coiflets N=3") H <- H * sqrt(2) } else if (filter.number == 4) { H <- rep(0, 24) H[1] <- 0.000630961 H[2] <- -0.001152225 H[3] <- -0.005194524 H[4] <- 0.01136246 H[5] <- 0.01886724 H[6] <- -0.05746423 H[7] <- -0.03965265 H[8] <- 0.2936674 H[9] <- 0.5531265 H[10] <- 0.3071573 H[11] <- -0.04711274 H[12] <- -0.06803813 H[13] <- 0.02781364 H[14] <- 0.01773584 H[15] <- -0.01075632 H[16] <- -0.004001013 H[17] <- 0.002652666 H[18] <- 0.0008955945 H[19] <- -0.0004165006 H[20] <- -0.0001838298 H[21] <- 4.408035e-05 H[22] <- 2.208286e-05 H[23] <- -2.304942e-06 H[24] <- -1.262175e-06 filter.name <- c("Coiflets N=4") H <- H * sqrt(2) } else if (filter.number == 5) { H <- rep(0, 30) H[1] <- -0.0001499638 H[2] <- 0.0002535612 H[3] <- 0.001540246 H[4] <- -0.002941111 H[5] <- -0.007163782 H[6] <- 0.01655207 H[7] <- 0.0199178 H[8] <- -0.06499726 H[9] <- -0.03680007 H[10] <- 0.2980923 H[11] <- 0.5475054 H[12] <- 0.3097068 H[13] <- -0.04386605 H[14] <- -0.07465224 H[15] <- 0.02919588 H[16] <- 0.02311078 H[17] <- -0.01397369 H[18] <- -0.00648009 H[19] <- 0.004783001 H[20] <- 0.001720655 H[21] <- -0.001175822 H[22] <- -0.000451227 H[23] <- 0.0002137298 H[24] <- 9.93776e-05 H[25] <- -2.92321e-05 H[26] <- -1.5072e-05 H[27] <- 2.6408e-06 H[28] <- 1.4593e-06 H[29] <- -1.184e-07 H[30] <- -6.73e-08 filter.name <- c("Coiflets N=5") H <- H * sqrt(2) } else { stop("Unknown filter number for Coiflet wavelets with\n least asymmetry and highest number of vanishing moments...") } } else if(family == "MagKing") { family <- "MagKing" if(filter.number == 4) { H <- c(1-1i, 4-1i, 4+1i, 1+1i)/10 G <- c(-1-2i, 5+2i, -5+2i, 1-2i)/14 filter.name <- c("MagareyKingsbury Wavelet 4-tap") } else stop("Only have 4-tap filter at present") } else if(family == "Nason") { family <- "Nason" if(filter.number == 3) { H <- c(-0.066291+0.085581i, 0.110485+0.085558i, 0.662912-0.171163i, 0.662912-0.171163i, 0.110485+0.085558i, -0.066291+0.085581i) G <- c(-0.066291+0.085581i, -0.110485-0.085558i, 0.662912-0.171163i, -0.662912+0.171163i , 0.110485+0.085558i, 0.066291-0.085581i) filter.name <- c("Nason Complex Wavelet 6-tap") } else stop("Only have 6-tap filter at present") } else if(family == "Lawton") { family <- "Lawton" if(filter.number == 3) { H <- c(-0.066291+0.085581i, 0.110485+0.085558i, 0.662912-0.171163i, 0.662912-0.171163i, 0.110485+0.085558, -0.066291+0.085581i) G <- c(-0.066291-0.085581i, -0.110485+0.085558i, 0.662912+0.171163i, -0.662912-0.171163i , 0.110485-0.085558i, 0.066291+0.085581i) filter.name <- c("Lawton Complex Wavelet 6-tap") } else stop("Only have 6-tap filter at present") } else if(family == "LittlewoodPaley") { family <- "LittlewoodPaley" # # # Define the function that computes the coefficients # hn <- function(n) { if(n == 0) return(1) else { pin2 <- (pi * 1:n)/2 pin2 <- (sin(pin2)/pin2) return(c(rev(pin2), 1, pin2)) } } # Next line changed in 4.6.4: added division by sqrt(2) H <- hn(filter.number)/sqrt(2) filter.name <- paste("Littlewood-Paley, N=", filter.number) } else if(family == "Yates") { if(filter.number != 1) stop("Only filter number 1 exists for Yates wavelet") family <- "Yates" H <- c(-1, 1)/sqrt(2) filter.name <- "Yates" } else if(family == "LinaMayrand") { origfn <- filter.number nsolution <- as.character(filter.number) dotpos <- regexpr("\\.", nsolution) leftint <- substring(nsolution, first = 1, last = dotpos - 1) rightint <- substring(nsolution, first = dotpos + 1, last = nchar(nsolution)) if(nchar(nsolution) == 0) nsolution <- 1 else nsolution <- as.numeric(rightint) filter.number <- as.numeric(leftint) matname <- paste(family, filter.number, sep = "") if(!exists(matname)) { stop(paste("Filter matrix \"", matname, "\" does not exist", sep = "")) } else { fm <- get(matname) if(nsolution > nrow(fm$S)) stop(paste("Solution number ", nsolution, " is too big. Filter matrix ", matname, " only has ", nrow(fm$S), " solutions") ) H <- fm$S[nsolution, ] G <- fm$W[nsolution, ] filter.name <- paste("Lina Mayrand, J=", filter.number, " (nsolution=", nsolution, ")", sep = "") } filter.number <- origfn } else { stop("Unknown family") } H <- H/constant return(list(H = H, G = G, name = filter.name, family = family, filter.number = filter.number)) } "find.parameters" <- function(data.wd, dwwt, j0, code, tol, Sigma) { # # Preliminaries # nlevels <- nlevelsWT(data.wd) pars <- matrix(0, ncol = 4, nrow = nlevels - 1) dimnames(pars) <- list(paste("level", 1:(nlevels - 1)), c("p", "var(re)", "covar(re,im)", "var(im)")) lower <- c(tol, tol, tol - 1, tol) upper <- c(1 - tol, 1000, 1 - tol, 1000) # # Calculate the covariance matrix of white noise put # through the DWT: # detSigma <- rep(0, nlevels) Sigma.inv <- array(0, c(nlevels, 2, 2)) for(i in 1:nlevels) { detSigma[i] <- Sigma[i, 1, 1] * Sigma[i, 2, 2] - Sigma[i, 1, 2]^2 Sigma.inv[i, , ] <- solve(Sigma[i, , ]) } # # Now search at each level in turn. # for(j in j0:(nlevels - 1)) { # # Get a starting point for the # search over p_j and V_j # coefs <- accessD(data.wd, level = j) re <- Re(coefs) im <- Im(coefs) start <- c(min(1 - 10 * tol, 0.5^(j - j0)), var(re), cor(re, im), var(im)) # # Find the MML parameter values # coefs <- accessD(data.wd, level = j) dstarvec <- cbind(Re(coefs), Im(coefs)) if(code == "NAG") { write(c(Sigma[j + 1, 1, 1], Sigma[j + 1, 1, 2], Sigma[ j + 1, 2, 2]), file = "cthresh.maxloglik.data") write(length(re), file = "cthresh.maxloglik.data", append = TRUE) write(t(cbind(re, im)), file = "cthresh.maxloglik.data", append = TRUE, ncolumns = 2) write(start, file = "cthresh.maxloglik.start") write(t(cbind(lower, upper)), file = "cthresh.maxloglik.start", append = TRUE) system("./cthresh.maxloglik") tmp <- scan(file = "cthresh.maxloglik.out", multi.line = TRUE, quiet = TRUE) pars[j, ] <- tmp[1:4] pars[j, 3] <- pars[j, 3] * sqrt(pars[j, 2] * pars[ j, 4]) ifail <- tmp[6] if(ifail > 0) warning(paste("At level", j, "NAG routine e04jyf returned ifail", ifail)) system("rm cthresh.maxloglik.out cthresh.maxloglik.data cthresh.maxloglik.start" ) } else { if(exists("optim")) tmp <- optim(start, cthr.negloglik, method = "L-BFGS-B", lower = lower, upper = upper, dstarvec = dstarvec, Sigma = Sigma[j + 1, , ], Sigma.inv = Sigma.inv[ j + 1, , ], twopirtdetS = 2 * pi * sqrt( detSigma[j + 1]), code = code)$par else tmp <- nlminb(start, cthr.negloglik, lower = lower, upper = upper, dstarvec = dstarvec, Sigma = Sigma[j + 1, , ], Sigma.inv = Sigma.inv[ j + 1, , ], twopirtdetS = 2 * pi * sqrt( detSigma[j + 1]), code = code)$parameters pars[j, ] <- tmp pars[j, 3] <- pars[j, 3] * sqrt(pars[j, 2] * pars[ j, 4]) } } invisible(list(pars = pars, Sigma = Sigma)) } "make.dwwt" <- function(nlevels, filter.number = 3.1, family = "LinaMayrand") { # # Given a choice of wavelet and number of # resolution levels, compute the distinct # elements of diag(WW^T). # zero.wd <- wd(rep(0, 2^nlevels), filter.number = filter.number, family = family) dwwt <- rep(0, nlevels) tmp.wd <- putD(zero.wd, v = 1, level = 0) tmp <- Conj(wr(tmp.wd)) # # tmp contains the row of W which gives the mother wavelet # coefficient. Need Conj() as the inverse DWT corresponds to # Conj(W^T). Now get the corresponding element of diag(WW^T) # by summing the squared elements of tmp. # # Then repeat for each resolution level. # dwwt[1] <- sum(tmp * tmp) for(lev in 1:(nlevels - 1)) { tmp.wd <- putD(zero.wd, v = c(1, rep(0, 2^lev - 1)), level = lev) tmp <- Conj(wr(tmp.wd)) dwwt[lev + 1] <- sum(tmp * tmp) } return(dwwt) } "odds.matrix.mult" <- function(coef, mat) { return(t(coef) %*% mat %*% coef) } "test.dataCT" <- function(type = "ppoly", n = 512, signal = 1, rsnr = 7, plotfn = FALSE) { x <- seq(0., 1., length = n + 1)[1:n] if(type == "ppoly") { y <- rep(0., n) xsv <- (x <= 0.5) y[xsv] <- -16. * x[xsv]^3. + 12. * x[xsv]^2. xsv <- (x > 0.5) & (x <= 0.75) y[xsv] <- (x[xsv] * (16. * x[xsv]^2. - 40. * x[xsv] + 28.))/ 3. - 1.5 xsv <- x > 0.75 y[xsv] <- (x[xsv] * (16. * x[xsv]^2. - 32. * x[xsv] + 16.))/ 3. } else if(type == "blocks") { t <- c(0.1, 0.13, 0.15, 0.23, 0.25, 0.4, 0.44, 0.65, 0.76, 0.78, 0.81) h <- c(4., -5., 3., -4., 5., -4.2, 2.1, 4.3, -3.1, 2.1, -4.2) y <- rep(0., n) for(i in seq(1., length(h))) { y <- y + (h[i] * (1. + sign(x - t[i])))/2. } } else if(type == "bumps") { t <- c(0.1, 0.13, 0.15, 0.23, 0.25, 0.4, 0.44, 0.65, 0.76, 0.78, 0.81) h <- c(4., 5., 3., 4., 5., 4.2, 2.1, 4.3, 3.1, 5.1, 4.2) w <- c(0.005, 0.005, 0.006, 0.01, 0.01, 0.03, 0.01, 0.01, 0.005, 0.008, 0.005) y <- rep(0, n) for(j in 1:length(t)) { y <- y + h[j]/(1. + abs((x - t[j])/w[j]))^4. } } else if(type == "heavi") y <- 4. * sin(4. * pi * x) - sign(x - 0.3) - sign(0.72 - x) else if(type == "doppler") { eps <- 0.05 y <- sqrt(x * (1. - x)) * sin((2. * pi * (1. + eps))/(x + eps)) } else { cat(c("test.dataCT: unknown test function type", type, "\n")) cat(c("Terminating\n")) return("NoType") } y <- y/sqrt(var(y)) * signal ynoise <- y + rnorm(n, 0, signal/rsnr) if(plotfn == TRUE) { if(type == "ppoly") mlab <- "Piecewise polynomial" if(type == "blocks") mlab <- "Blocks" if(type == "bumps") mlab <- "Bumps" if(type == "heavi") mlab <- "HeaviSine" if(type == "doppler") mlab <- "Doppler" plot(x, y, type = "l", lwd = 2, main = mlab, ylim = range( c(y, ynoise))) lines(x, ynoise, col = 2) lines(x, y) } return(list(x = x, y = y, ynoise = ynoise, type = type, rsnr = rsnr)) } "wd"<- function(data, filter.number = 10, family = "DaubLeAsymm", type = "wavelet", bc = "periodic", verbose = FALSE, min.scale = 0, precond = TRUE) { if(verbose == TRUE) cat("wd: Argument checking...") if(!is.atomic(data)) stop("Data is not atomic") DataLength <- length(data) # # # Check that we have a power of 2 data elements # nlevels <- nlevelsWT(data) if(is.na(nlevels)) stop("Data length is not power of two") # # # Check for correct type # if(type != "wavelet" && type != "station") stop("Unknown type of wavelet decomposition") if(type == "station" && bc != "periodic") stop( "Can only do periodic boundary conditions with station" ) # # # Select the appropriate filter # if(verbose == TRUE) cat("...done\nFilter...") if(bc != "interval") filter <- filter.select(filter.number = filter.number, family = family) # # # Build the first/last database # if(verbose == TRUE) cat("...selected\nFirst/last database...") fl.dbase <- first.last(LengthH = length(filter$H), DataLength = DataLength, type = type, bc = bc) # # # # Check if we are doing "wavelets on the interval". If so, do it! # if(bc == "interval") { ans <- wd.int(data = data, preferred.filter.number = filter.number, min.scale = min.scale, precond = precond ) fl.dbase <- first.last(LengthH = length(filter$H), DataLength = DataLength, type = type, bc = bc, current.scale = min.scale) # filter <- list(name = paste("CDV", filter.number, sep = ""), family = "CDV", filter.number = filter.number) l <- list(transformed.vector = ans$transformed.vector, current.scale = ans$current.scale, filters.used = ans$ filters.used, preconditioned = ans$preconditioned, date = ans$date, nlevels = IsPowerOfTwo(length(ans$ transformed.vector)), fl.dbase = fl.dbase, type = type, bc = bc, filter = filter) class(l) <- "wd" return(l) } # # Put in the data # C <- rep(0, fl.dbase$ntotal) C[1:DataLength] <- data # if(verbose == TRUE) error <- 1 else error <- 0 if(verbose == TRUE) cat("built\n") # # # Compute the decomposition # if(verbose == TRUE) cat("Decomposing...\n") nbc <- switch(bc, periodic = 1, symmetric = 2) if(is.null(nbc)) stop("Unknown boundary condition") ntype <- switch(type, wavelet = 1, station = 2) if(is.null(filter$G)) { wavelet.decomposition <- .C("wavedecomp", C = as.double(C), D = as.double(rep(0, fl.dbase$ntotal.d)), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), nlevels = as.integer(nlevels), firstC = as.integer(fl.dbase$first.last.c[, 1]), lastC = as.integer(fl.dbase$first.last.c[, 2]), offsetC = as.integer(fl.dbase$first.last.c[, 3]), firstD = as.integer(fl.dbase$first.last.d[, 1]), lastD = as.integer(fl.dbase$first.last.d[, 2]), offsetD = as.integer(fl.dbase$first.last.d[, 3]), ntype = as.integer(ntype), nbc = as.integer(nbc), error = as.integer(error), PACKAGE = "wavethresh") } else { wavelet.decomposition <- .C("comwd", CR = as.double(Re(C)), CI = as.double(Im(C)), LengthC = as.integer(fl.dbase$ntotal), DR = as.double(rep(0, fl.dbase$ntotal.d)), DI = as.double(rep(0, fl.dbase$ntotal.d)), LengthD = as.integer(fl.dbase$ntotal.d), HR = as.double(Re(filter$H)), HI = as.double( - Im(filter$H)), GR = as.double(Re(filter$G)), GI = as.double( - Im(filter$G)), LengthH = as.integer(length(filter$H)), nlevels = as.integer(nlevels), firstC = as.integer(fl.dbase$first.last.c[, 1]), lastC = as.integer(fl.dbase$first.last.c[, 2]), offsetC = as.integer(fl.dbase$first.last.c[, 3]), firstD = as.integer(fl.dbase$first.last.d[, 1]), lastD = as.integer(fl.dbase$first.last.d[, 2]), offsetD = as.integer(fl.dbase$first.last.d[, 3]), ntype = as.integer(ntype), nbc = as.integer(nbc), error = as.integer(error), PACKAGE = "wavethresh") } if(verbose == TRUE) cat("done\n") error <- wavelet.decomposition$error if(error != 0) { cat("Error ", error, " occured in wavedecomp\n") stop("Error") } if(is.null(filter$G)) { l <- list(C = wavelet.decomposition$C, D = wavelet.decomposition$D, nlevels = nlevelsWT(wavelet.decomposition), fl.dbase = fl.dbase, filter = filter, type = type, bc = bc, date = date()) } else { l <- list(C = complex(real = wavelet.decomposition$CR, imaginary = wavelet.decomposition$CI), D = complex(real = wavelet.decomposition$DR, imaginary = wavelet.decomposition$DI ), nlevels = nlevelsWT(wavelet.decomposition), fl.dbase = fl.dbase, filter = filter, type = type, bc = bc, date = date()) } class(l) <- "wd" return(l) } "wr.wd"<- function(wd, start.level = 0, verbose = FALSE, bc = wd$bc, return.object = FALSE, filter.number = wd$filter$filter.number, family = wd$filter$family, ...) { if(IsEarly(wd)) { ConvertMessage() stop() } if(verbose == TRUE) cat("Argument checking...") # # # Check class of wd # if(verbose == TRUE) cat("Argument checking\n") ctmp <- class(wd) if(is.null(ctmp)) stop("wd has no class") else if(ctmp != "wd") stop("wd is not of class wd") if(start.level < 0) stop("start.level must be nonnegative") if(start.level >= nlevelsWT(wd)) stop("start.level must be less than the number of levels") if(is.null(wd$filter$filter.number)) stop("NULL filter.number for wd") if(bc != wd$bc) warning("Boundary handling is different to original") if(wd$type == "station") stop("Use convert to generate wst object and then AvBasis or InvBasis" ) if(wd$bc == "interval") { warning("All optional arguments ignored for \"wavelets on the interval\" transform" ) return(wr.int(wd)) } type <- wd$type filter <- filter.select(filter.number = filter.number, family = family) LengthH <- length(filter$H) # # # Build the reconstruction first/last database # if(verbose == TRUE) cat("...done\nFirst/last database...") r.first.last.c <- wd$fl.dbase$first.last.c[(start.level + 1):(wd$ nlevels + 1), ] # r.first.last.d <- matrix(wd$fl.dbase$first.last.d[(start.level + 1):(wd$ nlevels), ], ncol = 3) ntotal <- r.first.last.c[1, 3] + r.first.last.c[1, 2] - r.first.last.c[ 1, 1] + 1 names(ntotal) <- NULL C <- accessC(wd, level = start.level, boundary = TRUE) C <- c(rep(0, length = (ntotal - length(C))), C) Nlevels <- nlevelsWT(wd)- start.level error <- 0 # # # Load object code # if(verbose == TRUE) cat("...built\n") if(verbose == TRUE) { cat("Reconstruction...") error <- 1 } ntype <- switch(type, wavelet = 1, station = 2) if(is.null(ntype)) stop("Unknown type of decomposition") nbc <- switch(bc, periodic = 1, symmetric = 2) if(is.null(nbc)) stop("Unknown boundary handling") if(!is.complex(wd$D)) { wavelet.reconstruction <- .C("waverecons", C = as.double(C), D = as.double(wd$D), H = as.double(filter$H), LengthH = as.integer(LengthH), nlevels = as.integer(Nlevels), firstC = as.integer(r.first.last.c[, 1]), lastC = as.integer(r.first.last.c[, 2]), offsetC = as.integer(r.first.last.c[, 3]), firstD = as.integer(r.first.last.d[, 1]), lastD = as.integer(r.first.last.d[, 2]), offsetD = as.integer(r.first.last.d[, 3]), ntype = as.integer(ntype), nbc = as.integer(nbc), error = as.integer(error), PACKAGE = "wavethresh") } else { wavelet.reconstruction <- .C("comwr", CR = as.double(Re(C)), CI = as.double(Im(C)), LengthC = as.integer(length(C)), DR = as.double(Re(wd$D)), DI = as.double(Im(wd$D)), LengthD = as.integer(length(wd$D)), HR = as.double(Re(filter$H)), HI = as.double(Im(filter$H)), GR = as.double(Re(filter$G)), GI = as.double(Im(filter$G)), LengthH = as.integer(LengthH), nlevels = as.integer(Nlevels), firstC = as.integer(r.first.last.c[, 1]), lastC = as.integer(r.first.last.c[, 2]), offsetC = as.integer(r.first.last.c[, 3]), firstD = as.integer(r.first.last.d[, 1]), lastD = as.integer(r.first.last.d[, 2]), offsetD = as.integer(r.first.last.d[, 3]), ntype = as.integer(ntype), nbc = as.integer(nbc), error = as.integer(error), PACKAGE = "wavethresh") } if(verbose == TRUE) cat("done\n") error <- wavelet.reconstruction$error if(error != 0) { cat("Error code returned from waverecons: ", error, "\n") stop("waverecons returned error") } fl.dbase <- wd$fl.dbase if(!is.complex(wd$D)) { l <- list(C = wavelet.reconstruction$C, D = wavelet.reconstruction$D, fl.dbase = fl.dbase, nlevels = nlevelsWT(wd), filter = filter, type = type, bc = bc, date = date()) } else { l <- list(C = complex(real = wavelet.reconstruction$CR, imaginary = wavelet.reconstruction$CI), D = complex(real = wavelet.reconstruction$DR, imaginary = wavelet.reconstruction$ DI), fl.dbase = fl.dbase, nlevels = nlevelsWT(wd), filter = filter, type = type, bc = bc, date = date()) } class(l) <- "wd" if(return.object == TRUE) return(l) else return(accessC(l)) stop("Shouldn't get here\n") } "wst"<- function(data, filter.number = 10, family = "DaubLeAsymm", verbose = FALSE) { if(verbose == TRUE) cat("Argument checking...") DataLength <- length(data) # # # Check that we have a power of 2 data elements # nlevels <- log(DataLength)/log(2) if(round(nlevels) != nlevels) stop("The length of data is not a power of 2") # if(verbose == TRUE) { cat("There are ", nlevels, " levels\n") } # # Select the appropriate filter # if(verbose == TRUE) cat("...done\nFilter...") filter <- filter.select(filter.number = filter.number, family = family) # # # Compute the decomposition # if(verbose == TRUE) cat("Decomposing...\n") newdata <- c(rep(0, DataLength * nlevels), data) Carray <- newdata error <- 0 # # # See whether we are using complex wavelets # if(is.null(filter$G)) { wavelet.station <- .C("wavepackst", Carray = as.double(Carray), newdata = as.double(newdata), DataLength = as.integer(DataLength), levels = as.integer(nlevels), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), error = as.integer(error), PACKAGE = "wavethresh") } else { wavelet.station <- .C("comwst", CaR = as.double(Re(Carray)), CaI = as.double(Im(Carray)), newdataR = as.double(Re(newdata)), newdataI = as.double(Im(newdata)), DataLength = as.integer(DataLength), levels = as.integer(nlevels), HR = as.double(Re(filter$H)), HI = as.double( - Im(filter$H)), GR = as.double(Re(filter$G)), GI = as.double( - Im(filter$G)), LengthH = as.integer(length(filter$H)), error = as.integer(error), PACKAGE = "wavethresh") } if(wavelet.station$error != 0) stop(paste("Memory error in wavepackst (or comwst). Code ", wavelet.station)) if(is.null(filter$G)) { wpm <- matrix(wavelet.station$newdata, ncol = DataLength, byrow = TRUE) Carray <- matrix(wavelet.station$Carray, ncol = DataLength, byrow = TRUE) } else { newdata <- complex(real = wavelet.station$newdataR, imaginary = wavelet.station$newdataI) Carray <- complex(real = wavelet.station$CaR, imaginary = wavelet.station$CaI) wpm <- matrix(newdata, ncol = DataLength, byrow = TRUE) Carray <- matrix(Carray, ncol = DataLength, byrow = TRUE) } wp <- list(wp = wpm, Carray = Carray, nlevels = nlevels, filter = filter, date = date()) class(wp) <- "wst" wp } "AutoBasis"<- function(wp, verbose = FALSE, zilchtol = 1e-08,entropy = Shannon.entropy) { if (!inherits(wp, "wp")) { stop("Can only operate on wavelet packet objects") } if(IsEarly(wp)) { ConvertMessage() stop() } # # # Including the original data set there are nlevels levels. Labelled # 0,...,nlevels-1. Level nlevels-1 is the original data set. # nlevels <- nlevelsWT(wp) for(i in 1:(nlevels - 1)) { NPBaseLev <- 2^(nlevels - i) PKLength <- 2^i if(verbose == TRUE) { cat("Base level is ", i) cat(" Number of packets is ", NPBaseLev, "\n") cat(" Packet Length is ", PKLength, "\n") } scan() for(j in 0:(NPBaseLev - 1)) { p1 <- getpacket(wp, level = (i - 1), index = 2 * j) p2 <- getpacket(wp, level = (i - 1), index = 2 * j + 1) p <- getpacket(wp, level = i, index = j) if(verbose == TRUE) { cat("Comparing: (", i, ",", j, ") with ") cat("(", (i - 1), ",", 2 * j, ") + (", (i - 1), ",", 2 * j + 1, ")\n") } if(is.na(p1[1]) || is.na(p2[1])) { if(verbose == TRUE) { cat("Upper Level is not eligible for") cat(" incorporation. Moving on...\n") } wp <- putpacket(wp, lev = i, index = j, packet = rep(NA, length = length(p))) } else { e1 <- entropy(p1, zilchtol) e2 <- entropy(p2, zilchtol) e <- entropy(p, zilchtol) if(verbose == TRUE) { cat("Entropy:", signif(e, 3), "?", signif(e1, 3), "+", signif(e2, 3), "=", signif(e1 + e2, 3)) } if(e < e1 + e2 || (is.infinite(e) && is.infinite(e1) && is.infinite(e2))) { wp <- putpacket(wp, level = (i - 1), index = 2 * j, packet = rep(NA, length = PKLength/2 )) wp <- putpacket(wp, level = (i - 1), index = 2 * j + 1, packet = rep(NA, length = PKLength/2)) } else { wp <- putpacket(wp, level = i, index = j, packet = rep(NA, length = PKLength)) } if(e < e1 + e2 || (is.infinite(e) && is.infinite(e1) && is.infinite(e2))) cat(" REPLACE\n") else cat(" KEEP\n") } } } wp } "AvBasis"<- function(...) UseMethod("AvBasis") "AvBasis.wst"<- function(wst, Ccode = TRUE, ...) { nlevels <- nlevelsWT(wst) if(is.null(wst$filter$G)) { if(Ccode == FALSE) { answer <- av.basis(wst, level = nlevels - 1, ix1 = 0, ix2 = 1, filter = wst$filter) } else { error <- 0 answer <- rep(0, 2^nlevels) H <- wst$filter$H aobj <- .C("av_basisWRAP", wstR = as.double(wst$wp), wstC = as.double(wst$Carray), LengthData = as.integer(length(answer)), level = as.integer(nlevels - 1), H = as.double(H), LengthH = as.integer(length(H)), answer = as.double(answer), error = as.integer(error), PACKAGE = "wavethresh") if(aobj$error != 0) stop(paste("av_basisWRAP returned error code", aobj$error)) answer <- aobj$answer } } else { error <- 0 answerR <- answerI <- rep(0, 2^nlevels) H <- wst$filter$H G <- wst$filter$G aobj <- .C("comAB_WRAP", wstR = as.double(Re(wst$wp)), wstI = as.double(Im(wst$wp)), wstCR = as.double(Re(wst$Carray)), wstCI = as.double(Im(wst$Carray)), LengthData = as.integer(length(answerR)), level = as.integer(nlevels - 1), HR = as.double(Re(H)), HI = as.double(Im(H)), GR = as.double(Re(G)), GI = as.double(Im(G)), LengthH = as.integer(length(H)), answerR = as.double(answerR), answerI = as.double(answerI), error = as.integer(error), PACKAGE = "wavethresh") if(aobj$error != 0) stop(paste("av_basisWRAP returned error code", aobj$ error)) answer <- complex(real = aobj$answerR, imaginary = aobj$answerI) } answer } "AvBasis.wst2D"<- function(wst2D, ...) { filter <- wst2D$filter amdim <- dim(wst2D$wst2D) im <- matrix(0, nrow = amdim[2]/2, ncol = amdim[2]/2) ans <- .C("SAvBasis", am = as.double(wst2D$wst2D), d1 = as.integer(amdim[1]), d12 = as.integer(amdim[1] * amdim[2]), TheSmooth = as.double(im), levj = as.integer(amdim[1]), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), error = as.integer(0), PACKAGE = "wavethresh") if(ans$error != 0) stop(paste("Error code was ", ans$error)) matrix(ans$TheSmooth, nrow = amdim[2]/2) } "BAYES.THR"<- function(data, alpha = 0.5, beta = 1, filter.number = 8, family = "DaubLeAsymm", bc = "periodic", dev = var, j0 = 5, plotfn = FALSE) { # #------------Estimation of C1 and C2 via universal threshodling----------------- # ywd <- wd(data, filter.number = filter.number, family = family, bc = bc ) sigma <- sqrt(dev(accessD(ywd, level = (nlevelsWT(ywd) - 1)))) uvt <- threshold(ywd, policy = "universal", type = "soft", dev = dev, by.level = FALSE, levels = (nlevelsWT(ywd) - 1), return.threshold = TRUE) universal <- threshold(ywd, policy = "manual", value = uvt, type = "soft", dev = dev, levels = j0:(nlevelsWT(ywd) - 1)) nsignal <- rep(0, nlevelsWT(ywd)) sum2 <- rep(0, nlevelsWT(ywd)) for(j in 0:(nlevelsWT(ywd) - 1)) { coefthr <- accessD(universal, level = j) nsignal[j + 1] <- sum(abs(coefthr) > 0) if(nsignal[j + 1] > 0) sum2[j + 1] <- sum(coefthr[abs(coefthr) > 0]^2) } C <- seq(1000, 15000, 50) l <- rep(0, length(C)) lev <- seq(0, nlevelsWT(ywd) - 1) v <- 2^( - alpha * lev) for(i in 1:length(C)) { l[i] <- 0.5 * sum(- nsignal * (log(sigma^2 + C[i] * v) + 2 * log(pnorm(( - sigma * sqrt(2 * log(2^nlevelsWT(ywd))))/ sqrt(sigma^2 + C[i] * v)))) - sum2/2/(sigma^2 + C[i] * v)) } C1 <- C[l == max(l)] tau2 <- C1 * v p <- 2 * pnorm(( - sigma * sqrt(2 * log(2^nlevelsWT(ywd))))/sqrt(sigma^2 + tau2)) if(beta == 1) C2 <- sum(nsignal/p)/nlevelsWT(ywd) else C2 <- (1 - 2^(1 - beta))/(1 - 2^((1 - beta) * nlevelsWT(ywd))) * sum( nsignal/p) pr <- pmin(1, C2 * 2^( - beta * lev)) rat <- tau2/(sigma^2 + tau2) # # #----------------------Bayesian Thresholding------------------------------------ # bayesian <- ywd for(j in 0:(nlevelsWT(ywd)- 1)) { coef <- accessD(ywd, level = j) w <- (1 - pr[j + 1])/pr[j + 1]/sqrt((sigma^2 * rat[j + 1])/tau2[ j + 1]) * exp(( - rat[j + 1] * coef^2)/2/sigma^2) z <- 0.5 * (1 + pmin(w, 1)) median <- sign(coef) * pmax(0, rat[j + 1] * abs(coef) - sigma * sqrt(rat[j + 1]) * qnorm(z)) bayesian <- putD(bayesian, level = j, v = median) } bayesrec <- wr(bayesian) # #---------------Resulting plots-------------------------------------------- # if(plotfn == TRUE) { x <- seq(1, length(data))/length(data) par(mfrow = c(1, 2)) plot(x, data, type = "l", ylab = "(a) Data") plot(x, bayesrec, type = "l", ylab = "(b) BayesThresh", ylim = c(min(data), max(data))) } return(bayesrec) } "BMdiscr"<- function(BP) { dm <- lda(x = BP$BasisMatrix, grouping = BP$groups) # BMd <- list(BP = BP, dm = dm) } "Best1DCols"<- function(w2d, mincor = 0.7) { m <- w2d$m level <- w2d$level pktix <- w2d$pktix nbasis <- length(level) corvec <- rep(0, nbasis) # # Note: we don't calculate the first one, since the # first basis function is a constant, and so we know # the correlation will be zero # for(i in 2:nbasis) { corvec[i] <- cor(m[, i], w2d$groups) } corvec <- abs(corvec) sv <- corvec > mincor if (sum(sv) < 2) stop("Not enough variables. Decrease mincor") m <- m[, sv] level <- level[sv] pktix <- pktix[sv] corvec <- corvec[sv] sl <- rev(sort.list(corvec)) l <- list(nlevels = nlevelsWT(w2d), BasisMatrix = m[, sl], level = level[ sl], pkt = pktix[sl], basiscoef = corvec[sl], groups = w2d$groups) class(l) <- "BP" l } "CWCV"<- function(ynoise, ll, x = 1:length(ynoise), filter.number = 10, family = "DaubLeAsymm", thresh.type = "soft", tol = 0.01, maxits=500, verbose = 0, plot.it = TRUE, interptype = "noise") { # # Switch on verbosity for function calls if necessary # if(verbose == 2) CallsVerbose <- TRUE else CallsVerbose <- FALSE if(verbose == 1) cat("WaveletCV: Wavelet model building\nThinking ") n <- length(ynoise) ywd <- wd(ynoise, filter.number = filter.number, family = family, verbose = CallsVerbose) univ.threshold <- threshold(ywd, type = thresh.type, return.threshold = TRUE, lev = ll:(nlevelsWT(ywd)- 1), verbose = CallsVerbose, policy = "universal")[1] if(verbose == 1) { cat("Universal threshold: ", univ.threshold, "\n") cat("Now doing universal threshold reconstruction...") } yuvtwd <- threshold(ywd, type = thresh.type, lev = ll:(nlevelsWT(ywd)- 1), verbose = CallsVerbose, policy = "universal") if(verbose == 1) cat("done\nNow reconstructing...") yuvtwr <- wr(yuvtwd, verbose = CallsVerbose) if(verbose == 1) cat("done\nNow plotting universal thresholded\n") if(plot.it == TRUE) { oldpar <- par(mfrow = c(2, 2)) matplot(x, cbind(ynoise, yuvtwr), type = "l", main = "Universal Threshold Reconstruction", xlab = "x", col = c(3, 2), lty = c(3, 2)) } filter <- filter.select(filter.number = filter.number, family = family) N <- length(ynoise) nlevels <- log(N)/log(2) ssq <- 0 if(verbose > 0) error <- 1 else error <- 0 if(round(nlevels) != nlevels) stop("Datalength not power of 2") fl.dbase <- first.last(length(filter$H), N/2) C <- rep(0, fl.dbase$ntotal) D <- rep(0, fl.dbase$ntotal.d) ntt <- switch(thresh.type, hard = 1, soft = 2) if(is.null(ntt)) stop("Unknown threshold type") interptype <- switch(interptype, noise = 1, normal = 2) if(is.null(interptype)) stop("Unknown interptype") bc <- "periodic" nbc <- switch(bc, periodic = 1, symmetric = 2) if(is.null(nbc)) stop("Unknown boundary conditions") xvthresh <- 0 if(verbose == 1) cat("Now optimising cross-validated error estimate\n") ans <- .C("CWaveletCV", noisy = as.double(ynoise), nnoisy = as.integer(N), univ.threshold = as.double(univ.threshold), C = as.double(C), D = as.double(D), LengthD = as.integer(length(D)), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), levels = as.integer(nlevels), firstC = as.integer(fl.dbase$first.last.c[, 1]), lastC = as.integer(fl.dbase$first.last.c[, 2]), offsetC = as.integer(fl.dbase$first.last.c[, 3]), firstD = as.integer(fl.dbase$first.last.d[, 1]), lastD = as.integer(fl.dbase$first.last.d[, 2]), offsetD = as.integer(fl.dbase$first.last.d[, 3]), ntt = as.integer(ntt), ll = as.integer(ll), nbc = as.integer(nbc), tol = as.double(tol), maxits = as.integer(maxits), xvthresh = as.double(xvthresh), interptype = as.integer(interptype), error = as.integer(error), PACKAGE = "wavethresh") if (ans$error == 1700) { message("Algorithm not converging (yet).") message("Maybe increase number of maximum iterations (maxits or cvmaxits)?") message("Or increase tolerance (tol or cvtol) a bit?") message("Wanted to achieve tolerance of ", tol, " but have actually achieved: ", ans$tol) message("Check levels you are thresholding, especially if length of data set is small. E.g. if n<=16 then default levels argument probably should be changed.") stop(paste("Maximum number of iterations", maxits, " exceeded.")) } else if(ans$error != 0) { cat("Error code ", ans$error, "\n") stop("There was an error") } # # # Now do the reconstuction using xvthresh # xvwd <- threshold(ywd, policy = "manual", value = ans$xvthresh, type = thresh.type, lev = ll:(nlevelsWT(ywd)- 1)) xvwddof <- dof(xvwd) xvwr <- wr(xvwd) if(plot.it == TRUE) matplot(x, cbind(ynoise, yuvtwr, xvwr), type = "l", main = "XV Threshold Reconstruction", xlab = "x", col = c(3, 2, 1)) fkeep <- NULL xkeep <- NULL list(x = x, ynoise = ynoise, xvwr = xvwr, yuvtwr = yuvtwr, xvthresh = ans$xvthresh, uvthresh = univ.threshold, xvdof = xvwddof, uvdof = dof(yuvtwd), xkeep = xkeep, fkeep = fkeep) } "CWavDE"<- function(x, Jmax, threshold = 0, nout = 100, primary.resolution = 1, filter.number = 10, family = "DaubLeAsymm", verbose = 0, SF = NULL, WV = NULL) { rx <- range(x) xout <- rep(0, nout) fout <- rep(0, nout) kmin <- 0 kmax <- 0 kminW <- rep(0, Jmax) kmaxW <- rep(0, Jmax) xminW <- rep(0, Jmax) xmaxW <- rep(0, Jmax) # # Generate the scaling function and the wavelet if they're not supplied # if(is.null(SF)) { if(verbose > 0) cat("Computing scaling function\n") SF <- draw.default(filter.number = filter.number, family = family, plot.it = FALSE, scaling.function = TRUE, enhance = FALSE) } if(is.null(WV)) { if(verbose > 0) cat("Computing wavelet function\n") WV <- draw.default(filter.number = filter.number, family = family, plot.it = FALSE, enhance = FALSE) } swv <- support(filter.number = filter.number, family = family) # error <- 0 ans <- .C("CWavDE", x = as.double(x), n = as.integer(length(x)), minx = as.double(rx[1]), maxx = as.double(rx[2]), Jmax = as.integer(Jmax), threshold = as.double(threshold), xout = as.double(xout), fout = as.double(fout), nout = as.integer(nout), primary.resolution = as.double(primary.resolution), SFx = as.double(SF$x), SFy = as.double(SF$y), lengthSF = as.integer(length(SF$x)), WVx = as.double(WV$x), WVy = as.double(WV$y), lengthWV = as.integer(length(WV$x)), kmin = as.integer(kmin), kmax = as.integer(kmax), kminW = as.integer(kminW), kmaxW = as.integer(kmaxW), xminW = as.double(xminW), xmaxW = as.double(xmaxW), phiLH = as.double(swv$phi.lh), phiRH = as.double(swv$phi.rh), psiLH = as.double(swv$psi.lh), psiRH = as.double(swv$psi.rh), verbose = as.integer(verbose), error = as.integer(error), PACKAGE = "wavethresh") if(ans$error != 0) stop(paste("CWavDE returned error code", ans$error)) l <- list(x = ans$xout, y = ans$fout, sfix = ans$kmin:ans$kmax, wvixmin = ans$kminW, wvixmax = ans$kmaxW) l } "CanUseMoreThanOneColor"<- function() { # # In the S version of this code it was possible to interrogate certain # graphics devices to see how many colors they display. # Most users these days will be using X11, or quartz or pdf which can # so this routine is fixed now to return true. return(TRUE) } "ConvertMessage"<- function() { cat("Your wavelet object is from an old release of wavethresh.\n") cat("Please apply the function convert() to your object.\n") cat("This will update it to the most up to date release.\n") cat("e.g. if the name of your wavelet object is \"fred\" then type:\n") cat("fred <- convert(fred)\n") } "Crsswav"<- function(noisy, value = 1, filter.number = 10, family = "DaubLeAsymm", thresh.type = "hard", ll = 3) { filter <- filter.select(filter.number = filter.number, family = family) N <- length(noisy) nlevels <- log(N)/log(2) ssq <- 0 error <- 0 if(round(nlevels) != nlevels) stop("Datalength not power of 2") fl.dbase <- first.last(length(filter$H), N/2) C <- rep(0, fl.dbase$ntotal) D <- rep(0, fl.dbase$ntotal.d) ntt <- switch(thresh.type, hard = 1, soft = 2) if(is.null(ntt)) stop("Unknown threshold type") bc <- "periodic" nbc <- switch(bc, periodic = 1, symmetric = 2) if(is.null(nbc)) stop("Unknown boundary conditions") ans <- .C("Crsswav", noisy = as.double(noisy), nnoisy = as.integer(N), value = as.double(value), C = as.double(C), D = as.double(D), LengthD = as.integer(length(D)), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), levels = as.integer(nlevels), firstC = as.integer(fl.dbase$first.last.c[, 1]), lastC = as.integer(fl.dbase$first.last.c[, 2]), offsetC = as.integer(fl.dbase$first.last.c[, 3]), firstD = as.integer(fl.dbase$first.last.d[, 1]), lastD = as.integer(fl.dbase$first.last.d[, 2]), offsetD = as.integer(fl.dbase$first.last.d[, 3]), ntt = as.integer(ntt), ll = as.integer(ll), nbc = as.integer(nbc), ssq = as.double(ssq), error = as.integer(error), PACKAGE = "wavethresh") if(ans$error != 0) { cat("Error code ", ans$error, "\n") stop("There was an error") } cat("The answer was ", ans$ssq, "\n") return(list(ssq = ans$ssq, value = value, type = thresh.type, lev = ll:( nlevels - 1))) } "Cthreshold"<- function(wd, thresh.type = "soft", value = 0, levels = 3:(nlevelsWT(wd)- 1)) { D <- wd$D Dlevels <- nlevelsWT(wd)- 1 error <- 0 ntt <- switch(thresh.type, hard = 1, soft = 2) if(is.null(ntt)) stop("Unknown thresh.type") nbc <- switch(wd$bc, periodic = 1, symmetric = 2) if(is.null(nbc)) stop("Unknown boundary conditions") ans <- .C("Cthreshold", D = as.double(D), LengthD = as.integer(wd$fl.dbase$ntotal.d), firstD = as.integer(wd$fl.dbase$first.last.d[, 1]), lastD = as.integer(wd$fl.dbase$first.last.d[, 2]), offsetD = as.integer(wd$fl.dbase$first.last.d[, 3]), Dlevels = as.integer(Dlevels), ntt = as.integer(ntt), value = as.double(value), levels = as.integer(levels), qlevels = as.integer(length(levels)), nbc = as.integer(nbc), error = as.integer(error), PACKAGE = "wavethresh") if(ans$error != 0) { stop("Error occurred") cat("Error code was ", ans$error, "\n") } wd$D <- ans$D wd } "DJ.EX"<- function(n = 1024, signal = 7, rsnr = 7, noisy = FALSE, plotfn = FALSE) { x <- seq(1, n)/n #--------------------Blocks--------------------------------------------------- t <- c(0.10000000000000001, 0.13, 0.14999999999999999, 0.23000000000000001, 0.25, 0.40000000000000002, 0.44, 0.65000000000000002, 0.76000000000000001, 0.78000000000000003, 0.81000000000000005) h1 <- c(4, -5, 3, -4, 5, -4.2000000000000002, 2.1000000000000001, 4.2999999999999998, -3.1000000000000001, 2.1000000000000001, -4.2000000000000002) blocks <- rep(0, n) for(i in seq(1, length(h1))) { blocks <- blocks + (h1[i] * (1 + sign(x - t[i])))/2 } #--------------------Bumps---------------------------------------------------- h2 <- c(4, 5, 3, 4, 5, 4.2000000000000002, 2.1000000000000001, 4.2999999999999998, 3.1000000000000001, 5.0999999999999996, 4.2000000000000002) w <- c(0.0050000000000000001, 0.0050000000000000001, 0.0060000000000000001, 0.01, 0.01, 0.029999999999999999, 0.01, 0.01, 0.0050000000000000001, 0.0080000000000000002, 0.0050000000000000001) bumps <- rep(0, n) for(i in seq(1, length(h2))) { bumps <- bumps + h2[i] * pmax(0, (1 - abs((x - t[i])/w[i])))^4 } #-------------------HeaviSine------------------------------------------------- heavi <- 4 * sin(4 * pi * x) - sign(x - 0.29999999999999999) - sign( 0.71999999999999997 - x) #--------------------Doppler-------------------------------------------------- eps <- 0.050000000000000003 doppler <- sqrt(x * (1 - x)) * sin((2 * pi * (1 - eps))/(x + eps)) #------------------------Normalization---------------------------------------- blocks <- blocks/sqrt(var(blocks)) * signal bumps <- bumps/sqrt(var(bumps)) * signal heavi <- heavi/sqrt(var(heavi)) * signal doppler <- doppler/sqrt(var(doppler)) * signal if(noisy == TRUE) { values <- list(blocks = blocks + rnorm(n, 0, signal/rsnr), bumps = bumps + rnorm(n, 0, signal/rsnr), heavi = heavi + rnorm(n, 0, signal/rsnr), doppler = doppler + rnorm(n, 0, signal/rsnr)) } else { values <- list(blocks = blocks, bumps = bumps, heavi = heavi, doppler = doppler) } if(plotfn == TRUE) { par(mfrow = c(3, 2)) plot(x, values$blocks, type = "l", ylab = "(a) Blocks") plot(x, values$bumps, type = "l", ylab = "(b) Bumps") plot(x, values$heavi, type = "l", ylab = "(c) HeaviSine") plot(x, values$doppler, type = "l", ylab = "(d) Doppler") } return(values) } "FullWaveletCV"<- function(noisy, ll = 3, type = "soft", filter.number = 10, family = "DaubLeAsymm", tol = 0.01, verbose = 0) { noisywd <- wd(noisy, filter.number = filter.number, family = family) softuv <- threshold(noisywd, levels = ll:(nlevelsWT(noisywd)- 1), type = "soft", policy = "universal", dev = madmad, return.thresh = TRUE) H <- filter.select(filter.number = filter.number, family = family)$H ntt <- switch(type, hard = 1, soft = 2) error <- verbose xvthresh <- 0 ans <- .C("FullWaveletCV", noisy = as.double(noisy), nnoisy = as.integer(length(noisy)), UniversalThresh = as.double(softuv), H = as.double(H), LengthH = as.integer(length(H)), ntt = as.integer(ntt), ll = as.integer(ll), tol = as.double(tol), xvthresh = as.double(xvthresh), error = as.integer(error), PACKAGE = "wavethresh") if(ans$error != 0) { cat("Error code returned was ", ans$error, "\n") stop("Error detected from C routine") } ans$xvthresh } "GenW"<- function(n = 8, filter.number = 10, family = "DaubLeAsymm", bc = "periodic") { z <- rep(0, n) if(bc == "periodic") { w <- matrix(0, nrow = n, ncol = n) for(i in 1:n) { v <- z v[i] <- 1 wobj <- wd(v, filter.number = filter.number, family = family, bc = bc) w[i, 1] <- accessC(wobj, lev = 0) w[i, 2:n] <- wobj$D } } else { w <- NULL for(i in 1:n) { v <- z v[i] <- 1 wobj <- wd(v, filter.number = filter.number, family = family, bc = bc) wrow <- c(accessC(wobj, lev = 0, boundary = TRUE), wobj$D) w <- rbind(w, wrow) } } w } "GetRSSWST"<- function(ndata, threshold, levels, family = "DaubLeAsymm", filter.number = 10, type = "soft", norm = l2norm, verbose = 0, InverseType = "average") { thverb <- FALSE if(verbose > 1) thverb <- TRUE if(InverseType != "average" && InverseType != "minent") stop(paste( "Unknown InverseType: ", InverseType)) # # Get odds and evens # oddsv <- seq(from = 1, to = length(ndata), by = 2) evensv <- seq(from = 2, to = length(ndata), by = 2) odata <- ndata[oddsv] edata <- ndata[evensv] # # # Build odd thresholded estimate, then, threshold and rebuild # odataWST <- wst(odata, filter.number = filter.number, family = family) odataWSTt <- threshold.wst(odataWST, levels = levels, policy = "manual", value = threshold, verbose = thverb) if(InverseType == "average") odataWSTr <- AvBasis.wst(odataWSTt) # else if(InverseType == "minent") { odataNV <- MaNoVe(odataWSTt) cat("ODD Node Vector\n") cat("---------------\n") print(odataNV) odataWSTr <- InvBasis.wst(odataWSTt, nv = odataNV) } else stop(paste("Unknown InverseType: ", InverseType)) ip <- (odataWSTr[1:(length(odataWSTr) - 1)] + odataWSTr[2:length( odataWSTr)])/2 ip <- c(ip, (odataWSTr[length(odataWSTr)] + odataWSTr[1])/2) # # # Now compute prediction error # pe <- norm(ip, edata) # # # Now repeat all the above the other way around. # # # Build even thresholded estimate, then, threshold and rebuild # edataWST <- wst(edata, filter.number = filter.number, family = family) edataWSTt <- threshold.wst(edataWST, levels = levels, policy = "manual", value = threshold, verbose = thverb) if(InverseType == "average") edataWSTr <- AvBasis.wst(edataWSTt) # else if(InverseType == "minent") { edataNV <- MaNoVe(edataWSTt) cat("EVEN Node Vector\n") cat("---------------\n") print(edataNV) edataWSTr <- InvBasis.wst(edataWSTt, nv = edataNV) } else stop(paste("Unknown InverseType: ", InverseType)) ip <- (edataWSTr[1:(length(edataWSTr) - 1)] + edataWSTr[2:length( edataWSTr)])/2 ip <- c(ip, (edataWSTr[length(edataWSTr)] + edataWSTr[1])/2) # # # Now compute prediction error # pe <- (pe + norm(ip, odata))/2 if(verbose != 0) { cat("For threshold value\n") print(threshold) cat("The pe estimate is ", pe, "\n") } pe } "HaarConcat"<- function() { x1 <- HaarMA(n = 128, order = 1) x2 <- HaarMA(n = 128, order = 2) x3 <- HaarMA(n = 128, order = 3) x4 <- HaarMA(n = 128, order = 4) c(x1, x2, x3, x4) } "HaarMA"<- function(n, sd = 1, order = 5) { # # Generate Haar MA realization # # n - number of observations; sd=variance of increments; order=MA order # z <- rnorm(n = n + (2^order) - 1, mean = 0, sd = sd) J <- order x <- rep(0, n) for(i in (2^J):(2^(J - 1) + 1)) x <- x + z[i:(n + i - 1)] for(i in (2^(J - 1)):1) x <- x - z[i:(n + i - 1)] x <- x * 2^( - J/2) return(x) } "InvBasis"<- function(...) UseMethod("InvBasis") "InvBasis.wp"<- function(wp, nvwp, pktlist, verbose = FALSE, ...) { nlev <- nlevelsWT(wp) if(missing(pktlist)) { pktlist <- print.nvwp(nvwp, printing = FALSE) if(nlev != nlevelsWT(nvwp)) { stop("The node vector you supplied cannot have arisen from the wavelet packet object you supplied as they have different numbers of levels" ) } } lpkts <- length(pktlist$level) ndata <- 2^nlev cfvc <- rep(0, ndata) ixvc <- cfvc counter <- 0 for(i in 1:lpkts) { lev <- pktlist$level[i] pkt <- pktlist$pkt[i] coefs <- getpacket(wp, level = lev, index = pkt) pklength <- 2^lev pkleftix <- pkt * pklength + 1 pkrightix <- pkleftix + pklength - 1 cfvc[pkleftix:pkrightix] <- coefs ixvc[pkleftix:pkrightix] <- counter if(verbose == TRUE) { cat("Level: ", lev, "\n") cat("Packet: ", pkt, "\n") cat("coefs: ") print(coefs) cat("---\n") cat("Packet length: ", pklength, "\n") cat("Packet left ix: ", pkleftix, "\n") cat("Packet right ix: ", pkrightix, "\n") cat("ixvc: ") print(ixvc) cat("---\n") cat("cfvc: ") print(cfvc) cat("---\n") } counter <- counter + 1 } if(verbose == TRUE) { cat("SWEEPER Stage\n") } sweeper <- rle(ixvc)$lengths mx <- min(sweeper) while(mx < ndata) { ix <- ((1:length(sweeper))[sweeper == mx])[1] csweeper <- cumsum(c(1, sweeper))[1:length(sweeper)] lix <- sweeper[ix] rix <- sweeper[ix + 1] if(lix != rix) stop(paste( "wavethresh error: lix and rix are not the same. lix is ", lix, " rix is ", rix)) if(verbose == TRUE) { cat("Sweeper: ") print(sweeper) cat("Cumsum Sweeper: ") print(csweeper) cat("At sweeper index position ", ix, "\n") cat("Left ix is ", lix, "\n") cat("Right ix is ", rix, "\n") cat("Corresponds to ", csweeper[ix], csweeper[ix + 1], "\n") } cfixl <- csweeper[ix] cfixr <- csweeper[ix + 1] pklength <- lix c.in <- cfvc[cfixl:(cfixl + pklength - 1)] d.in <- cfvc[cfixr:(cfixr + pklength - 1)] c.out <- conbar(c.in, d.in, wp$filter) cfvc[cfixl:(cfixr + pklength - 1)] <- c.out sweeper <- sweeper[ - ix] sweeper[ix] <- rix + lix mx <- min(sweeper) } cfvc } "InvBasis.wst"<- function(wst, nv, ...) { # # # Perform an inverse on wst given specification in nv # # indexlist is a list of packet indices for access into appropriate levels of # wst, nrsteps will be the number of reconstruction steps # pnv <- print.nv(nv, printing = FALSE) indexlist <- rev(pnv$indexlist) rvector <- pnv$rvector nrsteps <- length(indexlist) # # # blevel is the bottom level in the decomposition # blevel <- nlevelsWT(nv) - nrsteps # # # Now extract the data and put it all in a vector # rdata <- getpacket(wst, level = blevel, index = indexlist[1], type = "C") ldata <- length(rdata) D <- getpacket(wst, level = blevel, index = indexlist[1]) rdata <- c(rdata, D) ldata <- c(ldata, length(D)) for(i in 2:nrsteps) { D <- getpacket(wst, level = (blevel + i - 1), index = indexlist[ i]) rdata <- c(rdata, D) ldata <- c(ldata, length(D)) } error <- 0 invswr <- .C("wavepackrecon", rdata = as.double(rdata), ldata = as.integer(ldata), nrsteps = as.integer(nrsteps), rvector = as.integer(rvector), H = as.double(wst$filter$H), LengthH = as.integer(length(wst$filter$H)), error = as.integer(error), PACKAGE = "wavethresh") if(invswr$error != 0) stop(paste("Error code was ", invswr$error, " from wavepackrecon")) return(invswr$rdata) } "IsEarly"<- function(x) UseMethod("IsEarly") "IsEarly.default"<- function(x) { return(FALSE) } "IsEarly.wd"<- function(x) { if(is.null(x$date)) return(TRUE) else return(FALSE) } "IsPowerOfTwo"<- function(n) { tvec <- (n == trunc(n)) r <- log(n)/log(2) tvec <- tvec & (r == trunc(r)) r[tvec == FALSE] <- NA r } "LocalSpec"<- function(...) UseMethod("LocalSpec") "LocalSpec.wd"<- function(wdS, lsmooth = "none", nlsmooth = FALSE, prefilter = TRUE, verbose = FALSE, lw.number = wdS$filter$filter.number, lw.family = wdS$filter$family, nlw.number = wdS$filter$filter.number, nlw.family = wdS$filter$family, nlw.policy = "LSuniversal", nlw.levels = 0:(nlevelsWT(wdS) - 1), nlw.type = "hard", nlw.by.level = FALSE, nlw.value = 0, nlw.dev = var, nlw.boundary = FALSE, nlw.verbose = FALSE, nlw.cvtol = 0.01, nlw.Q = 0.050000000000000003, nlw.alpha = 0.050000000000000003, nlw.transform = I, nlw.inverse = I, debug.spectrum = FALSE, ...) { # # # Check the class of the object # cwd <- class(wdS) if(is.null(cwd) || cwd != "wd") stop("Object must be of class wd to perform energy computation" ) else if(wdS$type != "station") stop("swd type should be station (nondecimated)") lnlevels <- nlevelsWT(wdS) N <- 2^lnlevels if(verbose == TRUE) cat("Original data length was:", N, "\n") # # # Decide whether to do no smoothing, Fourier smoothing or wavelet # linear smoothing. # if(lsmooth == "none") { # # # Just square the coefficients in the wdS object # if(verbose == TRUE) cat("Squaring coefficients on level: ") for(i in (lnlevels - 1):0) { if(verbose == TRUE) cat(i, " ") v <- accessD(wdS, level = i) if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) v <- v^2 if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) wdS <- putD(wdS, level = i, v = v) } if(verbose == TRUE) cat("\n") } else if(lsmooth == "Fourier") { # # Perform smoothing using Fourier methods. # For each level take the real cts Fourier transform and smooth # by removing a proportion of the coefficients and inverting the # transform. # # The amount of smoothing is controlled by the fracsmooth variable # Initially this is set to 1/2 as the frequencies we want to remove # are 1/2 to 1. When we move a level up the frequencies we want to # remove are above 1/4 and so on. Note that smoothing starts at # level J-2 (not J-1 as these are the frequencies between 1 and 2 # and I'm not sure what to do with these yet). # # if(verbose == TRUE) { cat("Performing Fourier linear smoothing\n") cat("Processing level: ") } fracsmooth <- 1/2 for(i in (lnlevels - 2):0) { v <- accessD(wdS, level = i) if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) if(verbose == TRUE) cat(i, " ") # # # Do prefiltering if necessary. This low-passes the actual coefficients # to that the cut-off is at the highest frequency of the current # (Littlewood-Paley) wavelet. # if(prefilter == TRUE) { if(verbose == TRUE) cat("prefilter\n") vfft <- rfft(v) n <- length(vfft) start <- 1 + n * fracsmooth if(start <= n) vfft[max(1, start):n] <- 0 v <- rfftinv(vfft) if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) } # # # Square the coefficients! # v <- v^2 if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7) ) # # # Now carry out the Fourier smoothing. # vfft <- rfft(v) n <- length(vfft) start <- 1 + n * fracsmooth # # # Maybe use something like this to adapt to # the shape of the wavelet? # start <- start * 0.77 # if(start <= n) vfft[max(1, start):n] <- 0 v <- rfftinv(vfft) fracsmooth <- fracsmooth/2 if(debug.spectrum == TRUE && i != 0) spectrum(v, spans = c(11, 9, 7)) wdS <- putD(wdS, level = i, v = v) } if(verbose == TRUE) cat("\nSquaring top level only\n") v <- accessD(wdS, level = lnlevels - 1) if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) v <- v^2 if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) wdS <- putD(wdS, level = lnlevels - 1, v) } else if(lsmooth == "wavelet") { # # # Do LINEAR wavelet smoothing # if(verbose == TRUE) { cat("Performing LINEAR wavelet smoothing\n") cat("Processing level ") } fracsmooth <- 1/2 for(i in 0:(lnlevels - 2)) { if(verbose == TRUE) cat(i, " ") v <- accessD(wdS, level = i) # # # Do prefiltering if necessary. This low-passes the actual coefficients # to that the cut-off is at the highest frequency of the current # (Littlewood-Paley) wavelet. # if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) if(prefilter == TRUE) { if(verbose == TRUE) cat("prefilter\n") vfft <- rfft(v) n <- length(vfft) start <- 1 + n * fracsmooth if(start <= n) vfft[max(1, start):n] <- 0 v <- rfftinv(vfft) if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) } # # # Square the coefficients # v <- v^2 # # # Now do the linear wavelet smoothing. This takes each level (i), applies # the standard discrete wavelet transform and nulls levels higher than # the one we are at (j>i). The inverse transform is then applied and # the coefficients restored in the wdS object. # if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) realwd <- wd(v, filter.number = lw.number, family = lw.family) realwd <- nullevels(realwd, levels = (i + 1):(nlevelsWT( realwd) - 1)) v <- wr(realwd) if(debug.spectrum == TRUE && i != 0) spectrum(v, spans = c(11, 9, 7)) wdS <- putD(wdS, level = i, v = v) } if(verbose == TRUE) cat("\nSquaring top level only\n") v <- accessD(wdS, level = lnlevels - 1) if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) v <- v^2 if(debug.spectrum == TRUE) spectrum(v, spans = c(11, 9, 7)) wdS <- putD(wdS, level = lnlevels - 1, v) } else stop(paste("Unknown lsmooth:", lsmooth)) # if(nlsmooth == TRUE) { if(verbose == TRUE) { cat("Performing non-linear wavelet smoothing\n") cat("Processing level: ") } for(i in ((lnlevels - 1):0)) { if(verbose == TRUE) cat(i, " ") v <- accessD(wdS, level = i) v <- nlw.transform(v) vwd <- wd(v, filter.number = nlw.number, family = nlw.family) vwdt <- threshold(vwd, levels = nlw.levels, type = nlw.type, policy = nlw.policy, by.level = nlw.by.level, value = nlw.value, dev = nlw.dev, boundary = nlw.boundary, verbose = nlw.verbose, cvtol = nlw.cvtol, Q = nlw.Q, alpha = nlw.alpha ) v <- wr(vwdt) v <- nlw.inverse(v) wdS <- putD(wdS, level = i, v = v) } if(verbose == TRUE) cat("\n") } wdS } "LocalSpec.wst"<- function(wst, ...) { LocalSpec.wd(convert.wst(wst), ...) } "MaNoVe"<- function(...) UseMethod("MaNoVe") "MaNoVe.wp"<- function(wp, verbose = FALSE, ...) { nlevels <- nlevelsWT(wp) LengthData <- dim(wp$wp)[[2]] upperctrl <- rep(0, LengthData - 1) upperl <- upperctrl firstl <- rev(c(0, cumsum(2^(0:(nlevels - 2))))) if(verbose == TRUE) verbose <- 1 error <- 0 tmp <- .C("wpCmnv", wp = as.double(wp$wp), LengthData = as.integer(LengthData), nlevels = as.integer(nlevels), upperctrl = as.integer(upperctrl), upperl = as.double(upperl), firstl = as.integer(firstl), verbose = as.integer(verbose), error = as.integer(error), PACKAGE = "wavethresh") if(tmp$error != 0) stop(paste("Error condition ", tmp$error, " reported from wpCmnv")) # node.list <- vector("list", nlevels) matchcodes <- c("T", "B") vlength <- 2^(nlevels - 1) # # # Convert C to S # firstl <- firstl + 1 for(i in 1:nlevels) { first <- firstl[i] sv <- first:(first + vlength - 1) node.list[[i]]$upperctrl <- matchcodes[tmp$upperctrl[sv]] node.list[[i]]$upperl <- tmp$upperl[sv] vlength <- vlength/2 } node.vector <- list(node.list = node.list, nlevels = nlevels) class(node.vector) <- "nvwp" node.vector } "MaNoVe.wst"<- function(wst, entropy = Shannon.entropy, verbose = FALSE, stopper = FALSE, alg = "C", ...) { # # Make a node vector. Use C code rather than the slow S code # if(alg == "C") { if(verbose == TRUE) cat("Using C code version\n") nlevels <- nlevelsWT(wst) # node.vector <- vector("list", nlevels) # matchcodes <- c("S", "L", "R") LengthData <- dim(wst$wp)[[2]] upperctrl <- rep(0, LengthData - 1) upperl <- upperctrl firstl <- rev(c(0, cumsum(2^(0:(nlevels - 2))))) if(verbose == TRUE) verbose <- 1 error <- 0 tmp <- .C("Cmnv", wst = as.double(wst$wp), wstC = as.double(wst$Carray), LengthData = as.integer(LengthData), nlevels = as.integer(nlevels), upperctrl = as.integer(upperctrl), upperl = as.double(upperl), firstl = as.integer(firstl), verbose = as.integer(verbose), error = as.integer(error), PACKAGE = "wavethresh") if(tmp$error != 0) stop(paste("Error condition ", tmp$error, " reported from Cmnv")) # node.list <- vector("list", nlevels) matchcodes <- c("S", "L", "R") vlength <- 2^(nlevels - 1) # # # Convert C to S # firstl <- firstl + 1 for(i in 1:nlevels) { first <- firstl[i] sv <- first:(first + vlength - 1) node.list[[i]]$upperctrl <- matchcodes[tmp$upperctrl[sv ]] node.list[[i]]$upperl <- tmp$upperl[sv] vlength <- vlength/2 } node.vector <- list(node.list = node.list, nlevels = nlevels) } else { if(verbose == TRUE) cat("Using S code version\n") nlevels <- nlevelsWT(wst) node.vector <- vector("list", nlevels) matchcodes <- c("S", "L", "R") for(i in 0:(nlevels - 1)) { if(verbose == TRUE) cat("Lower level: ", i, "\n") nll <- 2^(nlevels - i) lowerl <- rep(0, nll) nul <- nll/2 upperl <- rep(0, nul) upperctrl <- rep("", nul) if(verbose == TRUE) cat("Packets. Lower: ", nll, " Upper ", nul, "\n") for(j in 0:(nul - 1)) { if(verbose == TRUE) cat("Upper level index: ", j, "\n") kl <- 2 * j kr <- 2 * j + 1 mother.entropy <- entropy(getpacket(wst, level = i + 1, index = j, type = "C")) if(i == 0) { daughter.left.entropy <- entropy(c(getpacket( wst, level = i, index = kl), getpacket(wst, level = i, index = kl, type = "C"))) daughter.right.entropy <- entropy(c(getpacket( wst, level = i, index = kr), getpacket(wst, level = i, index = kr, type = "C"))) } else { if(verbose == TRUE) cat("Left Ent C contrib ", node.vector[[i]]$ upperl[kl + 1], "\n") daughter.left.entropy <- entropy(getpacket( wst, level = i, index = kl)) + node.vector[[ i]]$upperl[kl + 1] if(verbose == TRUE) cat("Right Ent C contrib ", node.vector[[i ]]$upperl[kr + 1], "\n") daughter.right.entropy <- entropy(getpacket( wst, level = i, index = kr)) + node.vector[[ i]]$upperl[kr + 1] } if(verbose == TRUE) { cat("\tMother ent.: ", mother.entropy, "\n") cat("\tDaug. l .ent: ", daughter.left.entropy, "\n") cat("\tDaug. r .ent: ", daughter.right.entropy, "\n") } ents <- c(mother.entropy, daughter.left.entropy, daughter.right.entropy) pos <- match(min(ents), ents) upperctrl[j + 1] <- matchcodes[pos] upperl[j + 1] <- min(ents) if(verbose == TRUE) cat("\tSelected ", upperctrl[j + 1], upperl[j + 1], "\n") if(stopper == TRUE) scan() } node.vector[[i + 1]] <- list(upperctrl = upperctrl, upperl = upperl) if(verbose == TRUE) print(node.vector) } node.vector <- list(node.list = node.vector, nlevels = nlevels) } class(node.vector) <- "nv" node.vector } "PsiJ"<- function(J, filter.number = 10, family = "DaubLeAsymm", tol = 1e-100, OPLENGTH = 10^7, verbose=FALSE) { if (verbose==TRUE) cat("Computing PsiJ\n") now <- proc.time()[1:2] if(J >= 0) stop("J must be negative integer") if(J - round(J) != 0) stop("J must be an integer") Psiorig <- Psiname(J = J, filter.number = filter.number, family = family) # # # See if matrix already exists. If so, return it # if(exists(Psiorig, envir=WTEnv)) { if (verbose==TRUE) cat("Returning precomputed version\n") speed <- proc.time()[1:2] - now if (verbose==TRUE) cat("Took ", sum(speed), " seconds\n") return(get(Psiorig, envir=WTEnv)) } H <- filter.select(filter.number = filter.number, family = family)$H wout <- rep(0, OPLENGTH) rlvec <- rep(0, - J) error <- 0 answer <- .C("PsiJ", J = as.integer( - J), H = as.double(H), LengthH = as.integer(length(H)), tol = as.double(tol), wout = as.double(wout), lwout = as.integer(length(wout)), rlvec = as.integer(rlvec), error = as.integer(error), PACKAGE = "wavethresh") if(answer$error != 0) { if(answer$error == 160) cat("Increase ", OPLENGTH, " to be larger than ", answer$lwout, "\n") stop(paste("Error code was ", answer$error)) } speed <- proc.time()[1:2] - now if (verbose==TRUE) cat("Took ", sum(speed), " seconds\n") m <- vector("list", - J) lj <- c(0, cumsum(2 * answer$rlvec - 1)) for(j in 1:( - J)) m[[j]] <- answer$wout[(lj[j] + 1):lj[j + 1]] assign(Psiorig, m, envir=WTEnv) m } "PsiJmat"<- function(J, filter.number = 10, family = "DaubLeAsymm", OPLENGTH = 10^7) { J <- - J P <- PsiJ( - J, filter.number = filter.number, family = family, OPLENGTH = OPLENGTH) nc <- length(P[[J]]) nr <- J m <- matrix(0, nrow = nr, ncol = nc) m[J, ] <- P[[J]] for(j in 1:(J - 1)) { lj <- length(P[[j]]) nz <- (nc - lj)/2 z <- rep(0, nz) m[j, ] <- c(z, P[[j]], z) } m } "Psiname"<- function(J, filter.number, family) { if(J >= 0) stop("J must be a negative integer") return(paste("Psi.", - J, ".", filter.number, ".", family, sep = "")) } "ScalingFunction"<- function(filter.number = 10, family = "DaubLeAsymm", resolution = 4096, itlevels = 50) { if(is.na(IsPowerOfTwo(resolution))) stop("Resolution must be a power of two") res <- 4 * resolution # # # Select filter and work out some fixed constants # H <- filter.select(filter.number = filter.number, family = family)$H lengthH <- length(H) ll <- lengthH v <- rep(0, res) # # # Set initial coefficient to 1 in 2nd position on 1st level # v[2] <- 1 # # # Now iterate the successive filtering operations to build up the scaling # function. The actual filtering is carried out by the C routine CScalFn. # for(it in 1:itlevels) { ans <- rep(0, res) z <- .C("CScalFn", v = as.double(v), ans = as.double(ans), res = as.integer(res), H = as.double(H), lengthH = as.integer(lengthH), PACKAGE = "wavethresh") # # # We only ever take the first half of the result # v <- z$ans[1:(res/2)] # # # Set all other coefficients equal to zero. (This is because # rounding errors sometimes cause small values to appear). # v[ - ((2^it + 1):(2^it + ll))] <- 0 # plot(seq(from = 0, to = 2 * filter.number - 1, length = ll), v[( # 2^it + 1):(2^it + ll)], type = "l") v <- sqrt(2) * v llbef <- ll vbef <- v # # # Check to see if the next iteration would send the number # of coefficients over the resolution that we can have. # Exit the loop if it does. # if(2^(it + 1) + lengthH + ll * 2 - 2 > res/2) { cit <- it break } # # # ll is the number of coefficients that are nonzero in # any particular run. This formula updates ll for next time # round. # ll <- lengthH + ll * 2 - 2 # # # Add some zeroes to v to make it the right length. # v <- c(v, rep(0, res - length(v))) } list(x = seq(from = 0, to = 2 * filter.number - 1, length = llbef), y = vbef[(2^cit + 1):(2^cit + llbef)]) } "Shannon.entropy"<- function(v, zilchtol = 1e-300) { vsq <- v^2 if(sum(vsq) < zilchtol) return(0) else { vsq[vsq == 0] <- 1 return( - sum(vsq * log(vsq))) } } "TOgetthrda1"<- function(dat, alpha) { datsq <- sort(dat^2) a <- TOonebyone1(datsq, alpha) if(length(a) == length(datsq)) if(1 - pchisq(datsq[1], 1) < alpha) ggg <- 0 else ggg <- sqrt(datsq[1]) else ggg <- sqrt(datsq[length(datsq) - length(a) + 1]) return(ggg) } "TOgetthrda2"<- function(dat, alpha) { a <- TOonebyone2(dat, alpha) if(length(a) == length(dat)) if(1 - pchisq(min(dat), 1) < alpha) ggg <- 0 else ggg <- sqrt(min(dat)) else ggg <- sqrt(max(dat[sort(order(dat)[1:(length(dat) - length(a) + 1 )])])) return(ggg) } "TOkolsmi.chi2"<- function(dat) { n <- length(dat) return(max(abs(cumsum(dat) - ((1:n) * sum(dat))/n))/sqrt(2 * n)) } "TOonebyone1"<- function(dat, alpha) { i <- length(dat) cc <- 1 - pchisq(dat[i], 1)^i while(cc[length(cc)] < alpha && i > 1) { i <- i - 1 cc <- c(cc, 1 - pchisq(dat[i], 1)^i) } return(cc) } "TOonebyone2"<- function(dat, alpha) { crit <- c(seq(0.28000000000000003, 1.49, by = 0.01), seq(1.5, 2.48, by = 0.02)) alph <- c(0.99999899999999997, 0.999996, 0.99999099999999996, 0.99997899999999995, 0.99995400000000001, 0.99990900000000005, 0.99982899999999997, 0.99969699999999995, 0.99948899999999996, 0.99917400000000001, 0.99871500000000002, 0.99807100000000004, 0.99719199999999997, 0.99602800000000002, 0.99452399999999996, 0.99262300000000003, 0.99026999999999998, 0.98741000000000001, 0.98399499999999995, 0.97997800000000002, 0.97531800000000002, 0.96998300000000004, 0.96394500000000005, 0.95718599999999998, 0.94969400000000004, 0.94146600000000003, 0.93250299999999997, 0.922817, 0.91242299999999998, 0.90134400000000003, 0.88960499999999998, 0.87724000000000002, 0.86428199999999999, 0.85077100000000005, 0.83677500000000005, 0.82224699999999995, 0.80732300000000001, 0.79201299999999997, 0.77636300000000003, 0.76041800000000004, 0.74421999999999999, 0.72781099999999999, 0.71123499999999995, 0.69452899999999995, 0.67773499999999998, 0.660887, 0.64401900000000001, 0.62716700000000003, 0.61036000000000001, 0.59362800000000004, 0.57699800000000001, 0.56049499999999997, 0.54414300000000004, 0.52795899999999996, 0.51197000000000004, 0.49619200000000002, 0.48063400000000001, 0.46531800000000001, 0.45025599999999999, 0.43545400000000001, 0.42093000000000003, 0.40668399999999999, 0.39273000000000002, 0.37907200000000002, 0.36571399999999998, 0.35266199999999998, 0.339918, 0.327484, 0.31536399999999998, 0.30355599999999999, 0.29205999999999999, 0.28087400000000001, 0.27000000000000002, 0.259434, 0.24917400000000001, 0.23921999999999999, 0.22956599999999999, 0.22020600000000001, 0.21113999999999999, 0.20236399999999999, 0.19387199999999999, 0.18565799999999999, 0.17771799999999999, 0.17005000000000001, 0.16264400000000001, 0.155498, 0.14860599999999999, 0.141962, 0.13555800000000001, 0.129388, 0.12345200000000001, 0.117742, 0.11225, 0.10697, 0.101896, 0.097028000000000003, 0.092352000000000004, 0.087868000000000002, 0.083568000000000003, 0.079444000000000001, 0.075495000000000007, 0.071711999999999998, 0.068092, 0.064630000000000007, 0.061317999999999998, 0.058152000000000002, 0.055128000000000003, 0.052243999999999999, 0.049487999999999997, 0.046857999999999997, 0.044350000000000001, 0.041959999999999997, 0.039682000000000002, 0.037513999999999999, 0.035448, 0.033484, 0.031618, 0.029842, 0.028153999999999998, 0.026551999999999999, 0.02503, 0.023588000000000001, 0.022218000000000002, 0.019689999999999999, 0.017422, 0.015389999999999999, 0.013573999999999999, 0.011952000000000001, 0.010508, 0.0092230000000000003, 0.0080829999999999999, 0.0070720000000000002, 0.0061770000000000002, 0.0053880000000000004, 0.0046909999999999999, 0.004078, 0.0035400000000000002, 0.003068, 0.0026540000000000001, 0.0022929999999999999, 0.001977, 0.0017030000000000001, 0.001464, 0.001256, 0.0010759999999999999, 0.00092100000000000005, 0.00078700000000000005, 0.00067100000000000005, 0.00057200000000000003, 0.000484, 0.00041199999999999999, 0.00035, 0.00029500000000000001, 0.00025000000000000001, 0.00021000000000000001, 0.00017799999999999999, 0.00014799999999999999, 0.000126, 0.00010399999999999999, 8.7999999999999998e-05, 7.3999999999999996e-05, 6.0000000000000002e-05, 5.1e-05, 4.1999999999999998e-05, 3.4999999999999997e-05, 3.0000000000000001e-05, 2.4000000000000001e-05, 2.0000000000000002e-05, 1.5999999999999999e-05, 1.2999999999999999e-05, 1.1e-05, 9.0000000000000002e-06) if(alpha < min(alph) || alpha > max(alph)) stop("alpha =", alpha, "is out of range") ind <- match(TRUE, alpha > alph) critval <- crit[ind - 1] + ((alph[ind - 1] - alpha) * (crit[ind] - crit[ ind - 1]))/(alph[ind - 1] - alph[ind]) i <- length(dat) cc <- TOkolsmi.chi2(dat) while(cc[length(cc)] > critval && i > 1) { i <- i - 1 cc <- c(cc, TOkolsmi.chi2(dat[sort(order(dat)[1:i])])) } return(cc) } "TOshrinkit"<- function(coeffs, thresh) { sign(coeffs) * pmax(abs(coeffs) - thresh, 0) } "TOthreshda1"<- function(ywd, alpha = 0.050000000000000003, verbose = FALSE, return.threshold = FALSE) { if(verbose) cat("Argument checking\n") ctmp <- class(ywd) if(is.null(ctmp)) stop("ywd has no class") else if(ctmp != "wd") stop("ywd is not of class wd") if(alpha <= 0 || alpha >= 1) stop("alpha out of range") ans <- ywd n <- length(ywd$D) nlev <- log(n + 1, base = 2) - 1 i <- nlev iloc <- 1 while(i >= 0) { gg <- ywd$D[iloc:(iloc + 2^i - 1)] thresh <- TOgetthrda1(gg, alpha) if(verbose) { cat(paste("At level ", i, ", the threshold is ", thresh, "\n", sep = "")) } if(return.threshold) if(i == nlev) rt <- thresh else rt <- c(thresh, rt) else ans$D[iloc:(iloc + 2^i - 1)] <- TOshrinkit(ywd$D[iloc:( iloc + 2^i - 1)], thresh) iloc <- iloc + 2^i i <- i - 1 } if(return.threshold) return(rt) else return(ans) } "TOthreshda2"<- function(ywd, alpha = 0.050000000000000003, verbose = FALSE, return.threshold = FALSE) { if(verbose) cat("Argument checking\n") ctmp <- class(ywd) if(is.null(ctmp)) stop("ywd has no class") else if(ctmp != "wd") stop("ywd is not of class wd") if(alpha <= 9.0000000000000002e-06 || alpha >= 0.99999899999999997) stop("alpha out of range") ans <- ywd n <- length(ywd$D) nlev <- log(n + 1, base = 2) - 1 i <- nlev iloc <- 1 while(i >= 0) { gg <- ywd$D[iloc:(iloc + 2^i - 1)] thresh <- TOgetthrda2(gg^2, alpha) if(verbose) { cat(paste("At level ", i, ", the threshold is ", thresh, "\n", sep = "")) } if(return.threshold) if(i == nlev) rt <- thresh else rt <- c(thresh, rt) else ans$D[iloc:(iloc + 2^i - 1)] <- TOshrinkit(ywd$D[iloc:( iloc + 2^i - 1)], thresh) iloc <- iloc + 2^i i <- i - 1 } if(return.threshold) return(rt) else return(ans) } "WaveletCV"<- function(ynoise, x = 1:length(ynoise), filter.number = 10, family = "DaubLeAsymm", thresh.type = "soft", tol = 0.01, verbose = 0, plot.it = TRUE, ll = 3) { # # Switch on verbosity for function calls if necessary # if(verbose == 2) CallsVerbose <- TRUE else CallsVerbose <- FALSE if(verbose == 1) cat("WaveletCV: Wavelet model building\nThinking ") n <- length(ynoise) ywd <- wd(ynoise, filter.number = filter.number, family = family, verbose = CallsVerbose) univ.threshold <- threshold(ywd, type = thresh.type, return.threshold = TRUE, lev = ll:(nlevelsWT(ywd) - 1), verbose = CallsVerbose, policy="universal")[1] if(verbose == 1) { cat("Universal threshold: ", univ.threshold, "\n") cat("Now doing universal threshold reconstruction...") } yuvtwd <- threshold(ywd, type = thresh.type, lev = ll:(nlevelsWT(ywd) - 1), verbose = CallsVerbose, policy="universal") if(verbose == 1) cat("done\nNow reconstructing...") yuvtwr <- wr(yuvtwd, verbose = CallsVerbose) if(verbose == 1) cat("done\nNow plotting universal thresholded\n") if(plot.it == TRUE) { oldpar <- par(mfrow = c(2, 2)) matplot(x, cbind(ynoise, yuvtwr), type = "l", main = "Universal Threshold Reconstruction", xlab = "x", col = c(3, 2), lty = c(3, 2)) } if(verbose == 1) cat("Now optimising cross-validated error estimate\n") R <- 0.61803399000000003 C <- 1 - R ax <- 0 bx <- univ.threshold/2 cx <- univ.threshold x0 <- ax x3 <- cx if(abs(cx - bx) > abs(bx - ax)) { x1 <- bx x2 <- bx + C * (cx - bx) } else { x2 <- bx x1 <- bx - C * (bx - ax) } fa <- rsswav(ynoise, value = ax, filter.number = filter.number, family = family, thresh.type = thresh.type, ll = ll)$ssq fb <- rsswav(ynoise, value = bx, filter.number = filter.number, family = family, thresh.type = thresh.type, ll = ll)$ssq fc <- rsswav(ynoise, value = cx, filter.number = filter.number, family = family, thresh.type = thresh.type, ll = ll)$ssq f1 <- rsswav(ynoise, value = x1, filter.number = filter.number, family = family, thresh.type = thresh.type, ll = ll)$ssq f2 <- rsswav(ynoise, value = x2, filter.number = filter.number, family = family, thresh.type = thresh.type, ll = ll)$ssq xkeep <- c(ax, cx, x1, x2) fkeep <- c(fa, fc, f1, f2) if(plot.it == TRUE) { plot(c(ax, bx, cx), c(fa, fb, fc)) text(c(x1, x2), c(f1, f2), lab = c("1", "2")) } cnt <- 3 while(abs(x3 - x0) > tol * (abs(x1) + abs(x2))) { cat("x0=", x0, "x1=", x1, "x2=", x2, "x3=", x3, "\n") cat("f1=", f1, "f2=", f2, "\n") if(f2 < f1) { x0 <- x1 x1 <- x2 x2 <- R * x1 + C * x3 f1 <- f2 f2 <- rsswav(ynoise, value = x2, filter.number = filter.number, family = family, thresh.type = thresh.type, ll = ll) if(verbose == 2) { cat("SSQ: ", signif(f2$ssq, 3), " DF: ", f2$df, "\n") } else if(verbose == 1) cat(".") f2 <- f2$ssq xkeep <- c(xkeep, x2) fkeep <- c(fkeep, f2) if(plot.it == TRUE) text(x2, f2, lab = as.character(cnt)) cnt <- cnt + 1 } else { x3 <- x2 x2 <- x1 x1 <- R * x2 + C * x0 f2 <- f1 f1 <- rsswav(ynoise, value = x1, filter.number = filter.number, family = family, thresh.type = thresh.type, ll = ll) if(verbose == 2) cat("SSQ: ", signif(f1$ssq, 3), " DF: ", f1$df, "\n") else if(verbose == 1) cat(".") f1 <- f1$ssq xkeep <- c(xkeep, x1) fkeep <- c(fkeep, f1) if(plot.it == TRUE) text(x1, f1, lab = as.character(cnt)) cnt <- cnt + 1 } } if(f1 < f2) tmp <- x1 else tmp <- x2 x1 <- tmp/sqrt(1 - log(2)/log(n)) if(verbose == 1) cat("Correcting to ", x1, "\n") else if(verbose == 1) cat("\n") xvwd <- threshold(ywd, policy = "manual", value = x1, type = thresh.type, lev = ll:(nlevelsWT(ywd)- 1)) xvwddof <- dof(xvwd) xvwr <- wr(xvwd) if(plot.it == TRUE) matplot(x, cbind(ynoise, yuvtwr, xvwr), type = "l", main = "XV Threshold Reconstruction", xlab = "x", col = c(3, 2, 1)) g <- sort.list(xkeep) xkeep <- xkeep[g] fkeep <- fkeep[g] list(x = x, ynoise = ynoise, xvwr = xvwr, yuvtwr = yuvtwr, xvthresh = x1, uvthresh = univ.threshold, xvdof = xvwddof, uvdof = dof( yuvtwd), xkeep = xkeep, fkeep = fkeep) } "Whistory"<- function(...) UseMethod("Whistory") "Whistory.wst"<- function(wst, all = FALSE, ...) { ntimes <- length(wst$date) if(ntimes == 1) cat("This object has not been modified\n") cat("This object has been modified ", ntimes - 1, " times\n") cat("The date of the last mod was ", wst$date[ntimes], "\n") cat("That modification was\n") cat(wst$history[ntimes - 1], "\n") if(all == TRUE) { cat("Complete history\n") cat("Modification dates\n") for(i in 1:ntimes) cat(wst$date[i], "\n") cat("Modification record\n") for(i in 1:ntimes) cat(wst$history[i - 1], "\n") } invisible() } "accessC"<- function(...) UseMethod("accessC") "accessC.mwd"<- function(mwd, level = nlevelsWT(mwd), ...) { # # Get smoothed data from multiple wavelet structure. # ctmp <- class(mwd) if(is.null(ctmp)) stop("mwd has no class") else if(ctmp != "mwd") stop("mwd is not of class mwd") if(level < 0) stop("Must have a positive level") else if(level > nlevelsWT(mwd)) stop("Cannot exceed maximum number of levels") level <- level + 1 first.last.c <- mwd$fl.dbase$first.last.c first.level <- first.last.c[level, 1] last.level <- first.last.c[level, 2] offset.level <- first.last.c[level, 3] n <- last.level + 1 - first.level coeffs <- mwd$C[, (offset.level + 1):(offset.level + n)] return(coeffs) } "accessC.wd"<- function(wd, level = nlevelsWT(wd), boundary = FALSE, aspect = "Identity", ...) { if(IsEarly(wd)) { ConvertMessage() stop() } ctmp <- class(wd) if(is.null(ctmp)) stop("wd has no class") else if(ctmp != "wd") stop("wd is not of class wd") if(level < 0) stop("Must have a positive level") else if(level > nlevelsWT(wd)) stop(paste("Cannot exceed maximum number of levels", nlevelsWT(wd) )) if(wd$bc == "interval") { if(level != wd$current.scale) stop(paste( "Requested wd object was decomposed to level ", wd$current.scale, " and so for \"wavelets on the interval\" objects I can only show this level for the scaling function coefficients\n" )) first.level <- wd$fl.dbase$first.last.c[1] last.level <- wd$fl.dbase$first.last.c[2] offset.level <- wd$fl.dbase$first.last.c[3] n <- last.level - first.level + 1 coefs <- wd$transformed.vector[(offset.level + 1 - first.level): (offset.level + n - first.level)] } else { level <- level + 1 first.last.c <- wd$fl.dbase$first.last.c first.level <- first.last.c[level, 1] last.level <- first.last.c[level, 2] offset.level <- first.last.c[level, 3] if(boundary == TRUE) { n <- last.level - first.level + 1 coefs <- wd$C[(offset.level + 1):(offset.level + n)] } else { type <- wd$type if(type == "wavelet") n <- 2^(level - 1) else if(type == "station") n <- 2^nlevelsWT(wd) else stop("Unknown type component") coefs <- wd$C[(offset.level + 1 - first.level):( offset.level + n - first.level)] } } if(aspect == "Identity") return(coefs) else { fn <- get(aspect) return(fn(coefs)) } } "accessC.wp"<- function(wp, ...) { stop("A wavelet packet object does not have ``levels'' of father wavelet coefficients. Use accessD to obtain levels of father and mother coefficients" ) } "accessC.wst"<- function(wst, level, aspect = "Identity", ...) { # # # Get all coefficients at a particular level # First work out how many packets there are at this level # nlevels <- nlevelsWT(wst) if(level < 0) stop("level must nonnegative") else if(level > nlevels) stop(paste("level must be smaller than ", nlevels - 1)) coefs <- wst$Carray[level + 1, ] if(aspect == "Identity") return(coefs) else { fn <- get(aspect) return(fn(coefs)) } } "accessD"<- function(...) UseMethod("accessD") "accessD.mwd"<- function(mwd, level, ...) { # # Get wavelet coefficients from multiple wavelet structure # ctmp <- class(mwd) if(is.null(ctmp)) stop("mwd has no class") else if(ctmp != "mwd") stop("mwd is not of class mwd") if(level < 0) stop("Must have a positive level") else if(level > (nlevelsWT(mwd) - 1)) stop("Cannot exceed maximum number of levels") level <- level + 1 first.last.d <- mwd$fl.dbase$first.last.d first.level <- first.last.d[level, 1] last.level <- first.last.d[level, 2] offset.level <- first.last.d[level, 3] n <- last.level + 1 - first.level coeffs <- mwd$D[, (offset.level + 1):(offset.level + n)] return(coeffs) } "accessD.wd"<- function(wd, level, boundary = FALSE, aspect = "Identity", ...) { if(IsEarly(wd)) { ConvertMessage() stop() } ctmp <- class(wd) if(is.null(ctmp)) stop("wd has no class") else if(ctmp != "wd") stop("wd is not of class wd") if(level < 0) stop("Must have a positive level") else if(level > (nlevelsWT(wd) - 1)) stop(paste("Cannot exceed maximum number of levels: ", wd$ nlevels - 1)) if(wd$bc == "interval") { level <- level - wd$current.scale objname <- deparse(substitute(wd)) if(level < 0) stop(paste("The wd object: ", objname, " was only decomposed down to level: ", wd$ current.scale, " Try a larger level")) if(boundary == TRUE) stop("There are no boundary elements in a wavelets on the interval transform!" ) } level <- level + 1 first.last.d <- wd$fl.dbase$first.last.d first.level <- first.last.d[level, 1] last.level <- first.last.d[level, 2] offset.level <- first.last.d[level, 3] if(boundary == TRUE) { n <- last.level - first.level + 1 coefs <- wd$D[(offset.level + 1):(offset.level + n)] } else { type <- wd$type if(type == "wavelet") { n <- 2^(level - 1) if(wd$bc == "interval") n <- last.level - first.level + 1 } else if(type == "station") n <- 2^nlevelsWT(wd) else stop("Unknown type component") if(wd$bc != "interval") coefs <- wd$D[(offset.level + 1 - first.level):( offset.level + n - first.level)] else coefs <- wd$transformed.vector[(offset.level + 1 - first.level):(offset.level + n - first.level)] } if(aspect == "Identity") return(coefs) else { fn <- get(aspect) return(fn(coefs)) } } "accessD.wd3D"<- function(obj, level = nlevelsWT(obj) - 1, block, ...) { if(level < 0) stop(paste("Level cannot be accessed. You tried to access level", level, ". The minimum is zero")) else if(level >= nlevelsWT(obj)) stop(paste("Level cannot be accessed. You tried to access level", level, ". The maximum level is", nlevelsWT(obj) - 1)) halfsize <- 2^level size <- dim(obj$a)[1] GHH <- HGH <- GGH <- HHG <- GHG <- HGG <- GGG <- array(0, dim = rep( halfsize, 3)) answer <- .C("getARRel", Carray = as.double(obj$a), size = as.integer(size), level = as.integer(level), GHH = as.double(GHH), HGH = as.double(HGH), GGH = as.double(GGH), HHG = as.double(HHG), GHG = as.double(GHG), HGG = as.double(HGG), GGG = as.double(GGG), PACKAGE = "wavethresh") thedim <- rep(halfsize, 3) # # # Return HHH if level = 0 # if(missing(block)) { if(level == 0) list(HHH = array(obj$a[1, 1, 1], dim = thedim), GHH = array(answer$GHH, dim = thedim), HGH = array( answer$HGH, dim = thedim), GGH = array(answer$ GGH, dim = thedim), HHG = array(answer$HHG, dim = thedim), GHG = array(answer$GHG, dim = thedim), HGG = array(answer$HGG, dim = thedim), GGG = array(answer$GGG, dim = thedim)) else list(GHH = array(answer$GHH, dim = thedim), HGH = array( answer$HGH, dim = thedim), GGH = array(answer$ GGH, dim = thedim), HHG = array(answer$HHG, dim = thedim), GHG = array(answer$GHG, dim = thedim), HGG = array(answer$HGG, dim = thedim), GGG = array(answer$GGG, dim = thedim)) } else { if(level != 0 && block == "HHH") stop("HHH only exists at level 0") else return(switch(block, HHH = array(obj$a[1, 1, 1], dim = thedim), GHH = array(answer$GHH, dim = thedim), HGH = array(answer$HGH, dim = thedim), GGH = array(answer$GGH, dim = thedim), HHG = array(answer$HHG, dim = thedim), GHG = array(answer$GHG, dim = thedim), HGG = array(answer$HGG, dim = thedim), GGG = array(answer$GGG, dim = thedim))) } } "accessD.wp"<- function(wp, level, ...) { # # # Get all coefficients at a particular level # First work out how many packets there are at this level # nlev <- nlevelsWT(wp) if(level < 0) stop("level must nonnegative") else if(level > nlev - 1) stop(paste("level must be smaller than ", nlev - 1)) npx <- 2^(nlev - level) return(wp$wp[level + 1, ]) } "accessD.wpst"<- function(wpst, level, index, ...) { nlev <- nlevelsWT(wpst) if(level < 0) stop("Level must be greater than or equal to 0") else if(level >= nlev) stop(paste("Level must be less than ", nlev)) nwppkt <- 2^(nlev - level) # # # Check that packet index "index" is in range # if(index < 0) stop("index must be greater than or equal to 0") else if(index >= nwppkt) stop(paste("index must be less than ", nwppkt)) primary.index <- c2to4(index) # # # Now compute extra multiples for lower levels # for(i in level:(nlev - 1)) { em <- 2^(2 * nlev - 2 * i - 1) primary.index <- c(primary.index, em + primary.index) } # # # Prepare some room for the answer # weave <- rep(0, 2^nlev) ans <- .C("accessDwpst", coefvec = as.double(wpst$wpst), lansvec = as.integer(length(wpst$wpst)), nlev = as.integer(nlev), avixstart = as.integer(wpst$avixstart), primary.index = as.integer(primary.index), nwppkt = as.integer(nwppkt), pklength = as.integer(2^level), level = as.integer(level), weave = as.double(weave), lweave = as.double(length(weave)), error = as.integer(0), PACKAGE = "wavethresh") ans$weave } "accessD.wst"<- function(wst, level, aspect = "Identity", ...) { # # # Get all coefficients at a particular level # First work out how many packets there are at this level # nlevels <- nlevelsWT(wst) if(level < 0) stop("level must nonnegative") else if(level > nlevels - 1) stop(paste("level must be smaller than ", nlevels - 1)) npx <- 2^(nlevels - level) coefs <- wst$wp[level + 1, ] if(aspect == "Identity") return(coefs) else { fn <- get(aspect) return(fn(coefs)) } } "accessc"<- function(irregwd.structure, level, boundary = FALSE) { ctmp <- class(irregwd.structure) if(is.null(ctmp)) stop("irregwd.structure has no class") else if(ctmp != "irregwd") stop("irregwd.structure is not of class irregwd") if(level < 0) stop("Must have a positive level") else if(level > (nlevelsWT(irregwd.structure) - 1)) stop("Cannot exceed maximum number of levels") level <- level + 1 first.last.d <- irregwd.structure$fl.dbase$first.last.d first.level <- first.last.d[level, 1] last.level <- first.last.d[level, 2] offset.level <- first.last.d[level, 3] if(boundary == TRUE) { n <- last.level - first.level + 1 coefs <- irregwd.structure$c[(offset.level + 1):(offset.level + n)] } else { n <- 2^(level - 1) coefs <- irregwd.structure$c[(offset.level + 1 - first.level):( offset.level + n - first.level)] } return(coefs) } "addpkt"<- function(level, index, density, col, yvals) { if(density < 0 || density > 1) stop("Density should be between 0 and 1") density <- density * 40 y <- level level <- level - 1 pktlength <- 2^level x <- index * pktlength h <- 1 w <- pktlength if(missing(yvals)) drawbox(x, y, w, h, density = density, col = col) else { xco <- seq(from = x, to = x + w, length = length(yvals)) yco <- y + h/2 + (h * yvals)/(2 * max(abs(yvals))) lines(xco, yco) } } "av.basis"<- function(wst, level, ix1, ix2, filter) { if(level != 0) { cl <- conbar(av.basis(wst, level - 1, 2 * ix1, 2 * ix1 + 1, filter), getpacket(wst, level = level, index = ix1), filter = filter) cr <- rotateback(conbar(av.basis(wst, level - 1, 2 * ix2, 2 * ix2 + 1, filter), getpacket(wst, level = level, index = ix2), filter = filter)) } else { cl <- conbar(getpacket(wst, level = level, index = ix1, type = "C"), getpacket(wst, level = level, index = ix1), filter) cr <- rotateback(conbar(getpacket(wst, level = level, index = ix2, type = "C"), getpacket(wst, level = level, index = ix2), filter)) } return(0.5 * (cl + cr)) } "basisplot"<- function(x, ...) UseMethod("basisplot") "basisplot.BP"<- function(x, num = min(10, length(BP$level)), ...) { BP <- x plotpkt(nlevelsWT(BP)) dnsvec <- BP$basiscoef[1:num] dnsvec <- dnsvec/max(abs(dnsvec)) for(i in 1:num) addpkt(BP$level[i], BP$pkt[i], dnsvec[i], col = 1) } "basisplot.wp"<- function(x, draw.mode = FALSE, ...) { wp <- x J <- nlevelsWT(wp) oldl <- -1 zero <- rep(0, 2^J) rh <- 2^(J - 1) zwp <- wp(zero, filter.number = wp$filter$filter.number, family = wp$ filter$family) plotpkt(J) for(j in 0:(J - 1)) for(k in 0:(2^(J - j) - 1)) addpkt(j, k, 0, col = 1) znv <- MaNoVe(zwp) origznv <- znv cat("Select packets: Left: select. Right: exit\n") endit <- 0 while(endit == 0) { n <- locator(n = 1) if(length(n) == 0) endit <- 1 else { sellevel <- floor(n$y) if(sellevel < 1 || sellevel > (J - 1)) cat("Click on shaded boxes\n") else { npkts <- 2^(J - sellevel) if(n$x < 0 || n$x > rh) cat("Click on shaded boxes\n") else { pknumber <- floor((npkts * n$x)/rh) if(draw.mode == TRUE && oldl > -1) { addpkt(oldl, oldpn, 1, col = 3) } addpkt(sellevel, pknumber, 1, col = 2) znv$node.list[[sellevel]]$upperctrl[pknumber + 1] <- "T" if(draw.mode == TRUE) { oldl <- sellevel oldpn <- pknumber pktl <- 2^sellevel nhalf <- floor(pktl/2) pkt <- c(rep(0, nhalf), 1, rep(0, nhalf - 1 )) nzwp <- putpacket(zwp, level = sellevel, index = pknumber, packet = pkt) cat("Computing WAIT...") ans <- InvBasis(nzwp, nv = znv) cat("d o n e.\n") znv <- origznv dev.set() ts.plot(ans, xlab = "x", ylab = "Wavelet packet basis function") dev.set() } } } } } znv } "c2to4"<- function(index) { # # Represent index in base 2. Then use this representation and think of # it in base 4 to get the number # ans <- .C("c2to4", index = as.integer(index), answer = as.integer(0) ,PACKAGE = "wavethresh") ans$answer } "compare.filters"<- function(f1, f2) { if(f1$family != f2$family) return(FALSE) else if(f1$filter.number != f2$filter.number) return(FALSE) else return(TRUE) } "compress"<- function(...) UseMethod("compress") "compress.default"<- function(v, verbose = FALSE, ...) { n <- length(v) r <- sum(v != 0) if(n > 2 * r) { position <- (1:n)[v != 0] values <- v[position] answer <- list(position = position, values = values, original.length = n) class(answer) <- "compressed" if(verbose == TRUE) cat("Compressed ", n, " into ", 2 * r, "(", signif((100 * 2 * r)/n, 3), "%)\n") return(answer) } else { answer <- list(vector = v) class(answer) <- "uncompressed" if(verbose == TRUE) cat("No compression\n") return(answer) } } "compress.imwd"<- function(x, verbose = FALSE, ...) { if(verbose == TRUE) cat("Argument checking...") # # # Check class of imwd # if(verbose == TRUE) cat("Argument checking\n") ctmp <- class(x) if(is.null(ctmp)) stop("imwd has no class") else if(ctmp != "imwd") stop("imwd is not of class imwd") squished <- list(nlevels = nlevelsWT(x), fl.dbase = x$fl.dbase, filter = x$filter, w0Lconstant = x$w0Lconstant, type = x$type, bc = x$bc) # # # Go round loop compressing each set of coefficients # for(level in 0:(nlevelsWT(x) - 1)) { if(verbose == TRUE) cat("Level ", level, "\n\t") nm <- lt.to.name(level, "CD") if(verbose == TRUE) cat("CD\t") squished[[nm]] <- compress.default(x[[nm]], verbose = verbose) nm <- lt.to.name(level, "DC") if(verbose == TRUE) cat("\tDC\t") squished[[nm]] <- compress.default(x[[nm]], verbose = verbose) nm <- lt.to.name(level, "DD") if(verbose == TRUE) cat("\tDD\t") squished[[nm]] <- compress.default(x[[nm]], verbose = verbose) } class(squished) <- c("imwdc") if(verbose == TRUE) cat("Overall compression: Was: ", w <- object.size(x), " Now:", s <- object.size(squished), " (", signif((100 * s)/w, 3), "%)\n") squished } "conbar"<- function(c.in, d.in, filter) { # # S interface to C routine conbar # LengthCout <- 2 * length(c.in) c.out <- rep(0, LengthCout) answer <- .C("conbarL", c.in = as.double(c.in), LengthCin = as.integer(length(c.in)), firstCin = as.integer(0), d.in = as.double(d.in), LengthDin = as.integer(length(d.in)), firstDin = as.integer(0), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), c.out = as.double(c.out), LengthCout = as.integer(LengthCout), firstCout = as.integer(0), lastCout = as.integer(LengthCout - 1), type = as.integer(1), bc = as.integer(1), PACKAGE = "wavethresh") answer$c.out } "convert"<- function(...) UseMethod("convert") "convert.wd"<- function(wd, ...) { # # # Convert a wd station object into a wst object # # # First create object of same size and type of desired return object. # if(wd$type != "station") stop( "Object to convert must be of type \"station\" ") n <- 2^nlevelsWT(wd) dummy <- rep(0, n) tmpwst <- wst(dummy, filter.number = wd$filter$filter.number, family = wd$ filter$family) tmpwst$date <- wd$date # # # Now we've got the skeleton let's fill in all the details. # arrvec <- getarrvec(nlevelsWT(wd), sort = FALSE) for(lev in (nlevelsWT(wd) - 1):1) { ds <- accessD.wd(wd, level = lev) cs <- accessC.wd(wd, level = lev) ds <- ds[arrvec[, nlevelsWT(wd) - lev]] cs <- cs[arrvec[, nlevelsWT(wd) - lev]] tmpwst <- putD(tmpwst, level = lev, v = ds) tmpwst <- putC(tmpwst, level = lev, v = cs) } # # # And put final level in for Cs and Ds (for wst only) # tmpwst <- putC(tmpwst, level = nlevelsWT(wd), v = accessC(wd, level = wd$ nlevels)) # tmpwst <- putD(tmpwst, level = nlevelsWT(wd), v = accessC(wd, level = wd$ nlevels)) # # # And zeroth level # tmpwst <- putC(tmpwst, level = 0, v = accessC(wd, level = 0)) arrvec <- sort.list(levarr(1:n, levstodo = nlevelsWT(wd))) tmpwst <- putD(tmpwst, level = 0, v = accessD(wd, level = 0)[arrvec]) tmpwst } "convert.wst"<- function(wst, ...) { # # # Convert a wst object into a wd type station object # # # First create object of same size and type of desired return object. # n <- 2^nlevelsWT(wst) dummy <- rep(0, n) tmpwd <- wd(dummy, type = "station", filter.number = wst$filter$filter.number, family = wst$filter$family) tmpwd$date <- wst$date # # # Now we've got the skeleton let's fill in all the details. # arrvec <- getarrvec(nlevelsWT(wst)) for(lev in (nlevelsWT(wst) - 1):1) { ds <- accessD.wst(wst, level = lev) cs <- accessC.wst(wst, level = lev) ds <- ds[arrvec[, nlevelsWT(wst) - lev]] cs <- cs[arrvec[, nlevelsWT(wst) - lev]] ixs <- putD(tmpwd, level = lev, v = ds, index = TRUE) tmpwd$D[ixs$ix1:ixs$ix2] <- ds ixs <- putC(tmpwd, level = lev, v = cs, index = TRUE) tmpwd$C[ixs$ix1:ixs$ix2] <- cs } # # # And put final level in for Cs # tmpwd <- putC(tmpwd, level = nlevelsWT(wst), v = accessC(wst, level = wst$ nlevels)) # # # And zeroth level # tmpwd <- putC(tmpwd, level = 0, v = accessC(wst, level = 0)) arrvec <- levarr(1:n, levstodo = nlevelsWT(wst)) tmpwd <- putD(tmpwd, level = 0, v = accessD(wst, level = 0)[arrvec]) tmpwd } "dof"<- function(wd) { cwd <- class(wd) if(is.null(cwd)) { stop("Object has no class") } else if(cwd != "wd") stop("Object is not of class wd") else { # # Count number of non-zero coefficients # nlev <- nlevelsWT(wd) # # # nnonzero counts the number of nonzero coefficients # This is already 1, since the C contains first level constant # nnonzero <- 1 for(i in 0:(nlev - 1)) { nnonzero <- nnonzero + sum(accessD(wd, lev = i) != 0) } } nnonzero } "doppler"<- function(t) { sqrt(t * (1 - t)) * sin((2 * pi * 1.05)/(t + 0.050000000000000003)) } "draw"<- function(...) UseMethod("draw") "draw.default"<- function(filter.number = 10, family = "DaubLeAsymm", resolution = 8192, verbose = FALSE, plot.it = TRUE, main = "Wavelet Picture", sub = zwd$filter$name, xlab = "x", ylab = "psi", dimension = 1, twodplot = persp, enhance = TRUE, efactor = 0.050000000000000003, scaling.function = FALSE, type="l", ...) { if(is.na(IsPowerOfTwo(resolution))) stop("Resolution must be a power of two") if(scaling.function == FALSE) { resolution <- resolution/2 # # # First obtain support widths # sp <- support(filter.number = filter.number, family = family, m = 0, n = 0) lh <- c(sp$phi.lh, sp$phi.rh) lh <- lh[1] rh <- sp$phi.rh + 2 * resolution - 1 if(verbose == TRUE) cat("Support of highest resolution wavelets is [", lh, ", ", rh, "]\n") # pic.support <- support(filter.number = filter.number, family = family, m = 0, n = 0) pic.support <- c(pic.support$psi.lh, pic.support$psi.rh) # # # Now go through all levels and see what is the lowest resolution wavelet # that we can use to get the whole wavelet in the support range of the # highest resolution wavelets. # lowest.level <- log(resolution)/log(2) if(verbose == TRUE) cat("Lowest level is: ", lowest.level, "\n") selection <- NULL candidates <- NULL for(m in lowest.level:0) { if(verbose == TRUE) cat("Level ", m, " testing\n") # # # Go through each wavelet at this level and find out # it's support. Then check to see if it lies in the # union of the supports of the highest resolution # wavelets, and select it if it does. # # If fact we examine all the ones that will fit, and choose one that # is near the middle - to get a nice picture. # for(n in 0:(2^(lowest.level - m) - 1)) { lhs <- support(filter.number = filter.number, family = family, m = m, n = n) rhs <- lhs$rh lhs <- lhs$lh if(verbose == TRUE) cat("LHS: ", lhs, " RHS: ", rhs, "\n") if((lhs >= lh) && (rhs <= rh)) { candidates <- c(candidates, n) if(verbose == TRUE) cat("Level ", m, " Position: ", n, " selected\n") } } if(!is.null(candidates)) { if(verbose == TRUE) { cat("Candidates are \n") print(candidates) } n <- floor(median(candidates)) if(verbose == TRUE) cat("Choosing ", n, "\n") selection <- list(m = m, n = n) lhs <- support(filter.number = filter.number, family = family, m = m, n = n) rhs <- lhs$rh lhs <- lhs$lh break } if(!is.null(selection)) break } # # # If we haven't selected anything, then set the coefficient to # be one of the highest resolution coefficients. ALL of these # are guaranteed to be in the union of all their supports! # The picture will be crap though! # if(is.null(selection)) selection <- list(m = 0, n = 0) # # # Build a wd object structure consisting solely of zeroes. # zwd <- wd(rep(0, length = resolution * 2), filter.number = filter.number, family = family, bc = "symmetric") # # # Insert a vector containing a 1 where we want to put the coefficient # wd.lev <- lowest.level - selection$m if(verbose == TRUE) cat("Coefficient insertion at wd level: ", wd.lev, "\n" ) if(wd.lev == 0) pickout <- 1 else { pickout <- rep(0, 2^wd.lev) pickout[selection$n + 1] <- 1 } zwd <- putD(zwd, level = wd.lev, v = pickout) # # # Reconstruct # zwr <- wr(zwd) # # # Scales # if(verbose == TRUE) { cat("ps: ", pic.support[1], pic.support[2], "\n") cat("lh,rh: ", lh, rh, "\n") cat("lhs,rhs: ", lhs, rhs, "\n") } aymax <- ((pic.support[2] - pic.support[1]) * (rh - lh))/(rhs - lhs) ax <- pic.support[1] - (aymax * (lhs - lh))/(rh - lh) ay <- ax + aymax if(verbose == TRUE) cat("ax,ay ", ax, ay, "\n") # # # Scale up y values, because we're actually using a higher "resolution" # wavelet than psi(x) # zwr <- zwr * sqrt(2)^(selection$m + 1) # # # Plot it if required # x <- seq(from = ax, to = ay, length = resolution * 2) if(enhance == TRUE) { sv <- (abs(zwr) > efactor * range(abs(zwr))[2]) sv <- (1:length(sv))[sv] tr <- range(sv) sv <- tr[1]:tr[2] x <- x[sv] zwr <- zwr[sv] main <- paste(main, " (Enhanced)") } if(plot.it == TRUE) { if(dimension == 1) plot(x = x, y = zwr, main = main, sub = sub, xlab = xlab, ylab = ylab, type = type, ...) else if(dimension == 2) { twodplot(x = x, y = x, z = outer(zwr, zwr), xlab = xlab, ylab = xlab, zlab = ylab, ...) title(main = main, sub = sub) invisible() } else stop("Can only do 1 or 2 dimensional plots") } else { if(dimension == 1) return(list(x = x, y = zwr)) else if(dimension == 2) return(list(x = x, y = x, z = outer(zwr, zwr))) else stop("Can only do 1 or 2 dimensional plots") } } else { if(dimension != 1) stop("Can only generate one-dimensional scaling function" ) if(enhance == TRUE) { enhance <- FALSE warning("Cannot enhance picture of scaling function") } if(missing(main)) main <- "Scaling Function" if(missing(ylab)) ylab <- "phi" if(missing(sub)) sub <- filter.select(filter.number = filter.number, family = family)$name phi <- ScalingFunction(filter.number = filter.number, family = family, resolution = resolution) if(plot.it == TRUE) { plot(x = phi$x, y = phi$y, main = main, sub = sub, xlab = xlab, ylab = ylab, type = type, ...) } else return(list(x = phi$x, y = phi$y)) } } "draw.imwd"<- function(wd, resolution = 128, ...) { filter <- wd$filter draw.default(filter.number = filter$filter.number, family = filter$ family, dimension = 2, resolution = resolution, ...) } "draw.imwdc"<- function(wd, resolution = 128, ...) { filter <- wd$filter draw.default(filter.number = filter$filter.number, family = filter$ family, dimension = 2, resolution = resolution, ...) } "draw.mwd"<- function(mwd, phi = 0, psi = 0, return.funct = FALSE, ...) { #draw.mwd # # plots one of the scaling or # wavelet functions used to create mwd # #check phi and psi if(phi > 0 && psi > 0) stop("only one of phi and psi should be nonzero" ) if(phi == 0 && psi < 0) stop("bad psi arguement") if(phi < 0 && psi == 0) stop("bad phi arguement") if(phi == 0 && psi == 0) phi <- 1 if(phi > mwd$filter$nphi) stop("There aren't that many scaling functions") if(psi > mwd$filter$npsi) stop("There aren't that many wavelets") #for the specified case insert a single 1 and reconstruct. if(phi != 0) { main <- c("scaling function No.", phi) M <- matrix(rep(0, 2 * mwd$filter$nphi), nrow = mwd$filter$nphi ) M[phi, 1] <- 1 mwd$D <- matrix(rep(0, mwd$filter$npsi * mwd$fl.dbase$nvecs.d), nrow = mwd$filter$npsi) mwd <- putC.mwd(mwd, level = 1, M) } if(psi != 0) { M <- matrix(rep(0, 2 * mwd$filter$npsi), nrow = mwd$filter$npsi ) M[psi, 1] <- 1 mwd$C <- matrix(rep(0, mwd$filter$nphi * mwd$fl.dbase$nvecs.c), nrow = mwd$filter$nphi) mwd$D <- matrix(rep(0, mwd$filter$npsi * mwd$fl.dbase$nvecs.d), nrow = mwd$filter$npsi) mwd <- putD.mwd(mwd, level = 1, M) } fun <- mwr(mwd, start.level = 1) x <- (2 * (0:(length(fun) - 1)))/length(fun) # # #plotit plot(x, fun, type = "l", ...) if(return.funct == TRUE) return(fun) } "draw.wd"<- function(wd, ...) { if(IsEarly(wd)) { ConvertMessage() stop() } filter <- wd$filter draw.default(filter.number = filter$filter.number, family = filter$ family, type = "l", ...) } "draw.wp"<- function(wp, level, index, plot.it = TRUE, main = "Wavelet Packet", sub = paste(wp$ name, " Level=", level, "Index= ", index), xlab = "Position", ylab = "Wavelet Packet Value", ...) { tmp <- drawwp.default(level = level, index = index, filter.number = wp$ filter$filter.number, family = wp$filter$family, ...) if(plot.it == TRUE) { plot(1:length(tmp), y = tmp, main = main, sub = sub, xlab = xlab, ylab = ylab, type = "l", ...) } else return(list(x = 1:length(tmp), y = tmp)) } "draw.wst"<- function(wst, ...) { filter <- wst$filter draw.default(filter.number = filter$filter.number, family = filter$ family, type = "l", ...) } "drawbox"<- function(x, y, w, h, density, col) { xc <- c(x, x + w, x + w, x) yc <- c(y, y, y + h, y + h) polygon(x = xc, y = yc, density = density, col = col) } "drawwp.default"<- function(level, index, filter.number = 10, family = "DaubLeAsymm", resolution = 64 * 2^level) { # # First construct a zeroed wp object # z <- rep(0, resolution) # # # Now take the wp transform # zwp <- wp(z, filter.number = filter.number, family = family) # # # # The packet to install # if(level == 0) { newpkt <- 1 } else { newpkt <- rep(0, 2^level) newpkt[(2^level)/2] <- 1 } zwp <- putpacket(zwp, level = level, index = index, packet = newpkt) # # # Now set up the packet list # nlev <- nlevelsWT(zwp) npkts <- 2^(nlev - level) levvec <- rep(level, npkts) pkt <- 0:(npkts - 1) basiscoef <- rep(0, npkts) pktlist <- list(nlevels = nlev, level = levvec, pkt = pkt) # # # Do the inverse # zwr <- InvBasis(zwp, pktlist = pktlist) zwr } "ewspec"<- function(x, filter.number = 10, family = "DaubLeAsymm", UseLocalSpec = TRUE, DoSWT = TRUE, WPsmooth = TRUE, verbose = FALSE, smooth.filter.number = 10, smooth.family = "DaubLeAsymm", smooth.levels = 3:(nlevelsWT(WPwst) - 1), smooth.dev = madmad, smooth.policy = "LSuniversal", smooth.value = 0, smooth.by.level = FALSE, smooth.type = "soft", smooth.verbose = FALSE, smooth.cvtol = 0.01, smooth.cvnorm = l2norm, smooth.transform = I, smooth.inverse = I) { # # # Coarser is an old parameter, not needed now # coarser <- 0 if(verbose) cat("Smoothing then inversion\n") # # # First compute the SWT # if(DoSWT == TRUE) { if(verbose) cat("Computing nondecimated wavelet transform of data\n") xwdS <- wd(x, filter.number = filter.number, family = family, type = "station") } else xwdS <- x if(UseLocalSpec == TRUE) { if(verbose) cat("Computing raw wavelet periodogram\n") xwdWP <- LocalSpec(xwdS, lsmooth = "none", nlsmooth = FALSE) } else xwdWP <- x J <- nlevelsWT(xwdWP) # # # Compute the vSNK matrix # if(verbose) cat("Computing A matrix\n") rm <- ipndacw( - J, filter.number = filter.number, family = family) # # Compute the inverse of the vSNK matrix # if(verbose) cat("Computing inverse of A\n") irm <- solve(rm) # # # Create a matrix to store the wavelet periodogram in # if(verbose) cat("Putting wavelet periodogram into a matrix\n") WavPer <- matrix(0, nrow = (J - coarser), ncol = 2^J) # # # Now create the Wavelet Periodogram matrix # # n.b. J is coarsest 0 in wavethresh notation # 1 is finest J-1 in wavethresh notation # # Conversion is j -> J-j # for(j in 1:(J - coarser)) { WavPer[j, ] <- accessD(xwdWP, lev = J - j) } # # # Smooth the wavelet periodogram # if(WPsmooth == TRUE) { if(verbose) { cat("Smoothing the wavelet periodogram\n") cat("Smoothing level: ") } for(j in 1:(J - coarser)) { if(verbose) cat(J - j) WP <- WavPer[j, ] WP <- smooth.transform(WP) WPwst <- wst(WP, filter.number = smooth.filter.number, family = smooth.family) if(verbose == TRUE) cat(".w") WPwstT <- threshold.wst(WPwst, levels = smooth.levels, dev = smooth.dev, policy = smooth.policy, value = smooth.value, by.level = smooth.by.level, type = smooth.type, verbose = smooth.verbose, cvtol = smooth.cvtol, cvnorm = smooth.cvnorm) if(verbose == TRUE) cat(".t") WPwsrR <- AvBasis(WPwstT) if(verbose == TRUE) cat(".i") WavPer[j, ] <- smooth.inverse(WPwsrR) } if(verbose == TRUE) cat("\n") } # # # Need a smaller inverse Rainer matrix if don't do all levels # irm <- irm[1:(J - coarser), 1:(J - coarser)] # # # Now multiply the inverse matrix into the WavPer # S <- irm %*% WavPer # # # Store these levels in the xwdS object # xwdS <- xwdWP for(j in 1:(J - coarser)) { xwdS <- putD(xwdS, lev = J - j, v = S[j, ]) } if(coarser > 0) for(j in (J - coarser + 1):J) xwdS <- putD(xwdS, lev = J - j, v = rep(0, 2^J)) list(S = xwdS, WavPer = xwdWP, rm = rm, irm = irm) } "example.1"<- function() { x <- seq(0, 1, length = 513) x <- x[1:512] y <- rep(0, length(x)) xsv <- (x <= 0.5) # Left hand end y[xsv] <- -16 * x[xsv]^3 + 12 * x[xsv]^2 xsv <- (x > 0.5) & (x <= 0.75) # Middle section y[xsv] <- (x[xsv] * (16 * x[xsv]^2 - 40 * x[xsv] + 28))/3 - 1.5 xsv <- x > 0.75 #Right hand end y[xsv] <- (x[xsv] * (16 * x[xsv]^2 - 32 * x[xsv] + 16))/3 list(x = x, y = y) } "first.last"<- function(LengthH, DataLength, type = "wavelet", bc = "periodic", current.scale = 0) { if(type == "station" && bc != "periodic") stop("Can only do periodic boundary conditions with station") if(type != "station" && type != "wavelet") stop("Type can only be wavelet or station") levels <- log(DataLength)/log(2) first.last.c <- matrix(0, nrow = levels + 1, ncol = 3, dimnames = list( NULL, c("First", "Last", "Offset"))) first.last.d <- matrix(0, nrow = levels - current.scale, ncol = 3, dimnames = list(NULL, c("First", "Last", "Offset"))) if(bc == "periodic") { # Periodic boundary correction if(type == "wavelet") { first.last.c[, 1] <- rep(0, levels + 1) first.last.c[, 2] <- 2^(0:levels) - 1 first.last.c[, 3] <- rev(c(0, cumsum(rev(1 + first.last.c[, 2]))[1:levels])) first.last.d[, 1] <- rep(0, levels) first.last.d[, 2] <- 2^(0:(levels - 1)) - 1 first.last.d[, 3] <- rev(c(0, cumsum(rev(1 + first.last.d[, 2]))[1:(levels - 1)])) ntotal <- 2 * DataLength - 1 ntotal.d <- DataLength - 1 } else if(type == "station") { first.last.c[, 1] <- rep(0, levels + 1) first.last.c[, 2] <- 2^levels - 1 first.last.c[, 3] <- rev(c(0, cumsum(rev(1 + first.last.c[, 2]))[1:levels])) first.last.d[, 1] <- rep(0, levels) first.last.d[, 2] <- 2^levels - 1 first.last.d[, 3] <- rev(c(0, cumsum(rev(1 + first.last.d[, 2]))[1:(levels - 1)])) ntotal <- (levels + 1) * 2^levels ntotal.d <- levels * 2^levels } } else if(bc == "symmetric") { # Symmetric boundary reflection first.last.c[levels + 1, 1] <- 0 first.last.c[levels + 1, 2] <- DataLength - 1 first.last.c[levels + 1, 3] <- 0 ntotal <- first.last.c[levels + 1, 2] - first.last.c[levels + 1, 1] + 1 ntotal.d <- 0 for(i in levels:1) { first.last.c[i, 1] <- trunc(0.5 * (1 - LengthH + first.last.c[i + 1, 1])) first.last.c[i, 2] <- trunc(0.5 * first.last.c[i + 1, 2 ]) first.last.c[i, 3] <- first.last.c[i + 1, 3] + first.last.c[i + 1, 2] - first.last.c[i + 1, 1] + 1 first.last.d[i, 1] <- trunc(0.5 * (first.last.c[i + 1, 1] - 1)) first.last.d[i, 2] <- trunc(0.5 * (first.last.c[i + 1, 2] + LengthH - 2)) if(i != levels) { first.last.d[i, 3] <- first.last.d[i + 1, 3] + first.last.d[i + 1, 2] - first.last.d[i + 1, 1] + 1 } ntotal <- ntotal + first.last.c[i, 2] - first.last.c[i, 1] + 1 ntotal.d <- ntotal.d + first.last.d[i, 2] - first.last.d[i, 1] + 1 } } else if(bc == "interval") { first.last.d[, 1] <- rep(0, levels - current.scale) first.last.d[, 3] <- 2^(current.scale:(levels - 1)) first.last.d[, 2] <- first.last.d[, 3] - 1 first.last.c <- c(0, 2^current.scale - 1, 0) return(list(first.last.c = first.last.c, first.last.d = first.last.d)) } else { stop("Unknown boundary correction method") } names(ntotal) <- NULL names(ntotal.d) <- NULL list(first.last.c = first.last.c, ntotal = ntotal, first.last.d = first.last.d, ntotal.d = ntotal.d) } "firstdot"<- function(s) { ls <- length(s) nc <- nchar(s) fd <- rep(0, ls) for(i in 1:ls) { for(j in 1:nc[i]) { ss <- substring(s[i], j, j) if(ss == ".") { fd[i] <- j break } } } fd } "getarrvec"<- function(nlevels, sort = TRUE) { n <- 2^nlevels v <- 1:n arrvec <- matrix(0, nrow = n, ncol = nlevels - 1) if(sort == TRUE) { for(i in 1:ncol(arrvec)) arrvec[, i] <- sort.list(levarr(v, i)) } else { for(i in 1:ncol(arrvec)) arrvec[, i] <- levarr(v, i) } arrvec } "getpacket"<- function(...) UseMethod("getpacket") "getpacket.wp"<- function(wp, level, index, ...) { if(!inherits(wp, "wp")) stop("wp object is not of class wp") if(level > nlevelsWT(wp)) stop("Not that many levels in wp object") unit <- 2^level LocalIndex <- unit * index + 1 if(index > 2^(nlevelsWT(wp) - level) - 1) { cat("Index was too high, maximum for this level is ", 2^(wp$ nlevels - level) - 1, "\n") stop("Error occured") } if(LocalIndex < 0) stop("Index must be non-negative") packet <- wp$wp[level + 1, (LocalIndex:(LocalIndex + unit - 1))] packet } "getpacket.wpst"<- function(wpst, level, index, ...) { nlev <- nlevelsWT(wpst) if(level < 0) stop("Level must be greater than or equal to 0") else if(level > nlev) stop(paste("Level must be less than or equal to ", nlev)) npkts <- 4^(nlev - level) if(index < 0) stop("Packet index must be greater than or equal to 0") else if(index > npkts - 1) stop(paste("Packet index must be less than or equal to ", npkts - 1)) pktlength <- 2^level lix <- 1 + wpst$avixstart[level + 1] + pktlength * index rix <- lix + pktlength - 1 wpst$wpst[lix:rix] } "getpacket.wst"<- function(wst, level, index, type = "D", aspect = "Identity", ...) { if(type != "D" && type != "C") stop("Type of access must be C or D") class(wst) <- "wp" if(type == "C") wst$wp <- wst$Carray coefs <- getpacket.wp(wst, level = level, index = index) if(aspect == "Identity") return(coefs) else { fn <- get(aspect) return(fn(coefs)) } } "getpacket.wst2D"<- function(wst2D, level, index, type = "S", Ccode = TRUE, ...) { nlev <- nlevelsWT(wst2D) if(level > nlev - 1) stop(paste("Maximum level is ", nlev - 1, " you supplied ", level)) else if(level < 0) stop(paste("Minimum level is 0 you supplied ", level)) if(type != "S" && type != "H" && type != "V" && type != "D") stop("Type must be one of S, H, V or D") if(nchar(index) != nlev - level) stop(paste("Index must be ", nlev - level, " characters long for level ", level)) for(i in 1:nchar(index)) { s1 <- substring(index, i, i) if(s1 != "0" && s1 != "1" && s1 != "2" && s1 != "3") stop(paste("Character ", i, " in index is not a 0, 1, 2 or 3. It is ", s1)) } if(Ccode == TRUE) { ntype <- switch(type, S = 0, H = 1, V = 2, D = 3) amdim <- dim(wst2D$wst2D) sl <- 2^level out <- matrix(0, nrow = sl, ncol = sl) ans <- .C("getpacketwst2D", am = as.double(wst2D$wst2D), d1 = as.integer(amdim[1]), d12 = as.integer(amdim[1] * amdim[2]), maxlevel = as.integer(nlev - 1), level = as.integer(level), index = as.integer(index), ntype = as.integer(ntype), out = as.double(out), sl = as.integer(sl), PACKAGE = "wavethresh") return(matrix(ans$out, nrow = ans$sl)) } else { x <- y <- 0 ans <- .C("ixtoco", level = as.integer(level), maxlevel = as.integer(nlev - 1), index = as.integer(index), x = as.integer(x), y = as.integer(y), PACKAGE = "wavethresh") cellength <- 2^level tmpx <- switch(type, S = 0, H = 0, V = cellength, D = cellength) tmpy <- switch(type, S = 0, H = cellength, V = 0, D = cellength) x <- ans$x + tmpx + 1 y <- ans$y + tmpy + 1 cat("x ", x, "y: ", y, "x+cellength-1 ", x + cellength - 1, "y+cellength-1", y + cellength - 1, "\n") return(wst2D$wst2D[level + 1, x:(x + cellength - 1), y:(y + cellength - 1)]) } } "guyrot"<- function(v, n) { l <- length(v) n <- n %% l if(n == 0) return(v) tmp <- v[(l - n + 1):l] v[(n + 1):l] <- v[1:(l - n)] v[1:n] <- tmp v } "image.wd"<- function(x, strut = 10, type = "D", transform = I, ...) { if(x$type != "station") stop("You have not supplied a nondecimated wd object") nlev <- nlevelsWT(x) if(type == "D" ) { m <- matrix(0, nrow = nlev, ncol = 2^nlev) for(i in 0:(nlev - 1)) { m[i, ] <- accessD(x, lev = i) } } if(type == "C") { mC <- matrix(0, nrow = nlev + 1, ncol = 2^nlev) for(i in 0:nlev) { mC[i, ] <- accessC(x, lev = i) } } nr <- nlev mz <- matrix(0, nrow = nlev, ncol = 2^nlev) if(type == "D") { image(transform(m[rep(1:nr, rep(strut, nr)), ]), main="Wavelet coefficients") } else if(type == "C") image(transform(mC[rep(1:nr, rep(strut, nr)), ]), main = "Scaling function coefficients") } "image.wst"<- function(x, nv, strut = 10, type = "D", transform = I, ...) { m <- x$wp mC <- x$Carray nr <- nrow(m) nlev <- nlevelsWT(x) mz <- matrix(0, nrow = nrow(mC), ncol = ncol(mC)) if(!missing(nv)) { pknums <- print.nv(nv, printing = FALSE)$indexlist mpk <- matrix(0, nrow = nrow(mC), ncol = ncol(mC)) for(i in seq(along = pknums)) { lev <- nlev - i + 1 pklength <- 2^(lev - 1) f <- pknums[i] * pklength + 1 l <- f + pklength - 1 mpk[lev, f:l] <- 1 } } if(type == "D") { image(transform(m[rep(1:nr, rep(strut, nr)), ]), main = "Wavelet coefficients") } else if(type == "C") image(transform(mC[rep(1:nr, rep(strut, nr)), ]), main = "Scaling function coefficients" ) } "imwd"<- function(image, filter.number = 10, family = "DaubLeAsymm", type = "wavelet", bc = "periodic", RetFather = TRUE, verbose = FALSE) { if(verbose == TRUE) cat("Argument checking...") if(nrow(image) != ncol(image)) stop("Number of rows and columns in image are not identical") if(verbose == TRUE) cat("...done\nFilter...") # # # Select wavelet filter # filter <- filter.select(filter.number = filter.number, family = family) Csize <- nrow(image) # # # Check that Csize is a power of 2 # nlev <- IsPowerOfTwo(Csize) if(is.na(nlev)) stop(paste("The image size (", Csize, ") is not a power of 2")) # # # Set-up first/last database # if(verbose == TRUE) cat("...selected\nFirst/last database...") fl.dbase <- first.last(LengthH = length(filter$H), DataLength = Csize, bc = bc, type = type) first.last.c <- fl.dbase$first.last.c first.last.d <- fl.dbase$first.last.d # # # Set up answer list # image.decomp <- list(nlevels = nlev, fl.dbase = fl.dbase, filter = filter, type = type, bc = bc, date = date()) # # # if(verbose == TRUE) cat("...built\n") # # # Ok, go round loop doing decompositions # nbc <- switch(bc, periodic = 1, symmetric = 2) if(is.null(nbc)) stop("Unknown boundary handling") if(type == "station" && bc == "symmetric") stop("Cannot do nondecimated transform with symmetric boundary conditions" ) ntype <- switch(type, wavelet = 1, station = 2) if(is.null(ntype)) stop("Unknown type of transform") # # # Load up original image # smoothed <- as.vector(image) if(verbose == TRUE) { cat(bc, " boundary handling\n") cat("Decomposing...") } for(level in seq(nrow(first.last.d), 1, -1)) { if(verbose == TRUE) cat(level - 1, "") LengthCin <- first.last.c[level + 1, 2] - first.last.c[level + 1, 1] + 1 LengthCout <- first.last.c[level, 2] - first.last.c[level, 1] + 1 LengthDout <- first.last.d[level, 2] - first.last.d[level, 1] + 1 ImCC <- rep(0, (LengthCout * LengthCout)) ImCD <- rep(0, (LengthCout * LengthDout)) ImDC <- rep(0, (LengthDout * LengthCout)) ImDD <- rep(0, (LengthDout * LengthDout)) error <- 0 z <- .C("StoIDS", C = as.double(smoothed), Csize = as.integer(LengthCin), firstCin = as.integer(first.last.c[level + 1, 1]), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), LengthCout = as.integer(LengthCout), firstCout = as.integer(first.last.c[level, 1]), lastCout = as.integer(first.last.c[level, 2]), LengthDout = as.integer(LengthDout), firstDout = as.integer(first.last.d[level, 1]), lastDout = as.integer(first.last.d[level, 2]), ImCC = as.double(ImCC), ImCD = as.double(ImCD), ImDC = as.double(ImDC), ImDD = as.double(ImDD), nbc = as.integer(nbc), ntype = as.integer(ntype), error = as.integer(error), PACKAGE = "wavethresh") error <- z$error if(error != 0) { cat("Error was ", error, "\n") stop("Error reported") } smoothed <- z$ImCC if(RetFather == TRUE) { nm <- lt.to.name(level - 1, "CC") image.decomp[[nm]] <- z$ImCC } nm <- lt.to.name(level - 1, "CD") image.decomp[[nm]] <- z$ImCD nm <- lt.to.name(level - 1, "DC") image.decomp[[nm]] <- z$ImDC nm <- lt.to.name(level - 1, "DD") image.decomp[[nm]] <- z$ImDD } if(verbose == TRUE) cat("\nReturning answer...\n") image.decomp$w0Lconstant <- smoothed image.decomp$bc <- bc image.decomp$date <- date() class(image.decomp) <- "imwd" image.decomp } "imwr"<- function(...) UseMethod("imwr") "imwr.imwd"<- function(imwd, bc = imwd$bc, verbose = FALSE, ...) { if(verbose == TRUE) cat("Argument checking...") # # # Check class of imwd # ctmp <- class(imwd) if(is.null(ctmp)) stop("imwd has no class") else if(ctmp != "imwd") stop("imwd is not of class imwd") if(imwd$type == "station") stop("Cannot invert nonodecimated wavelet transform using imwr") filter <- imwd$filter if(verbose == TRUE) cat("...done\nFirst/last database...") fl.dbase <- imwd$fl.dbase first.last.c <- fl.dbase$first.last.c first.last.d <- fl.dbase$first.last.d if(verbose == TRUE) cat("...extracted\n") ImCC <- imwd$w0Lconstant if(verbose == TRUE) cat("Reconstructing...") # # # Ok, go round loop doing reconstructions # for(level in seq(2, 1 + nlevelsWT(imwd))) { if(verbose == TRUE) cat(level - 1, " ") LengthCin <- first.last.c[level - 1, 2] - first.last.c[level - 1, 1] + 1 LengthCout <- first.last.c[level, 2] - first.last.c[level, 1] + 1 LengthDin <- first.last.d[level - 1, 2] - first.last.d[level - 1, 1] + 1 error <- 0 ImOut <- rep(0, LengthCout^2) nbc <- switch(bc, periodic = 1, symmetric = 2) if(is.null(nbc)) stop("Unknown boundary handling") z <- .C("StoIRS", ImCC = as.double(ImCC), ImCD = as.double(imwd[[lt.to.name(level - 2, "CD")]]), ImDC = as.double(imwd[[lt.to.name(level - 2, "DC")]]), ImDD = as.double(imwd[[lt.to.name(level - 2, "DD")]]), LengthCin = as.integer(LengthCin), firstCin = as.integer(first.last.c[level - 1, 1]), LengthDin = as.integer(LengthDin), firstDin = as.integer(first.last.d[level - 1, 1]), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), LengthCout = as.integer(LengthCout), firstCout = as.integer(first.last.c[level, 1]), lastCout = as.integer(first.last.c[level, 2]), ImOut = as.double(ImOut), nbc = as.integer(nbc), error = as.integer(error), PACKAGE = "wavethresh") error <- z$error if(error != 0) { cat("Error was ", error, "\n") stop("Error reported") } # Do something with ImOut ImCC <- z$ImOut } if(verbose == TRUE) cat("\nReturning image\n") # Return the image matrix(ImCC, nrow = 2^(nlevelsWT(imwd))) } "imwr.imwdc"<- function(imwd, verbose = FALSE, ...) { if(verbose == TRUE) cat("Uncompressing...\n") imwd2 <- uncompress(imwd, ver = verbose) if(verbose == TRUE) cat("Reconstructing...\n") imwr(imwd2, verbose = verbose, ...) } "ipndacw"<- function(J, filter.number = 10, family = "DaubLeAsymm", tol = 1e-100, verbose = FALSE, ...) { if(verbose == TRUE) cat("Computing ipndacw\n") now <- proc.time()[1:2] if(J >= 0) stop("J must be negative integer") if(J - round(J) != 0) stop("J must be an integer") # rmnorig <- rmname(J = J, filter.number = filter.number, family = family ) # # # See if matrix already exists. If so, return it # rm.there <- rmget(requestJ = - J, filter.number = filter.number, family = family) if(!is.null(rm.there)) { if(verbose == TRUE) cat("Returning precomputed version: using ", rm.there, "\n") speed <- proc.time()[1:2] - now if(verbose == TRUE) cat("Took ", sum(speed), " seconds\n") rmnexists <- rmname(J = - rm.there, filter.number = filter.number, family = family) tmp <- get(rmnexists, envir=WTEnv)[1:( - J), 1:( - J)] assign(rmnorig, tmp, envir=WTEnv) return(tmp) } # # # See if partially computed matrix exists. If so, use it. # if(J != -1) { for(j in (1 + J):(-1)) { rmn <- rmname(J = j, filter.number = filter.number, family = family) if(exists(rmn, envir=WTEnv)) { if(verbose == TRUE) { cat("Partial matrix: ", rmn, " exists (") cat(paste(round(100 - (100 * (j * j))/(J * J), digits = 1), "% left to do)\n", sep = "")) } fmat <- rep(0, J * J) H <- filter.select(filter.number = filter.number, family = family)$H error <- 0 answer <- .C("rainmatPARTIAL", J = as.integer( - J), j = as.integer( - j), H = as.double(H), LengthH = as.integer(length(H)), fmat = as.double(fmat), tol = as.double(tol), error = as.integer(error), PACKAGE = "wavethresh") if(answer$error != 0) stop(paste("Error code was ", answer$error)) m <- matrix(answer$fmat, nrow = - J) m[1:( - j), 1:( - j)] <- get(rmn, envir=WTEnv) nm <- as.character(-1:J) dimnames(m) <- list(nm, nm) speed <- proc.time()[1:2] - now if(verbose == TRUE) cat("Took ", sum(speed), " seconds\n") assign(rmnorig, m, envir=WTEnv) return(m) } } } # # # Otherwise have to compute whole matrix # fmat <- rep(0, J * J) H <- filter.select(filter.number = filter.number, family = family)$H error <- 0 answer <- .C("rainmatPARENT", J = as.integer( - J), H = as.double(H), LengthH = as.integer(length(H)), fmat = as.double(fmat), tol = as.double(tol), error = as.integer(error), PACKAGE = "wavethresh") if(answer$error != 0) stop(paste("Error code was ", answer$error)) speed <- proc.time()[1:2] - now if(verbose == TRUE) cat("Took ", sum(speed), " seconds\n") m <- matrix(answer$fmat, nrow = - J) nm <- as.character(-1:J) dimnames(m) <- list(nm, nm) assign(rmnorig, m, envir=WTEnv) m } "irregwd"<- function(gd, filter.number = 2, family = "DaubExPhase", bc = "periodic", verbose = FALSE) { type <- "wavelet" if(verbose == TRUE) cat("wd: Argument checking...") ctmp <- class(gd) if(is.null(ctmp)) stop("gd has no class") else if(ctmp != "griddata") stop("gd is not of class griddata") data <- gd$gridy if(!is.atomic(data)) stop("Data is not atomic") DataLength <- length(data) # # # Check that we have a power of 2 data elements # nlevels <- nlevelsWT(data) # if(is.na(nlevels)) stop("Data length is not power of two") # Check for correct type # if(type != "wavelet" && type != "station") stop("Unknown type of wavelet decomposition") if(type == "station" && bc != "periodic") stop( "Can only do periodic boundary conditions with station" ) # # Select the appropriate filter # if(verbose == TRUE) cat("...done\nFilter...") filter <- filter.select(filter.number = filter.number, family = family) # # # Build the first/last database # if(verbose == TRUE) cat("...selected\nFirst/last database...") fl.dbase <- first.last(LengthH = length(filter$H), DataLength = DataLength, type = type, bc = bc) # # # Put in the data # C <- rep(0, fl.dbase$ntotal) C[1:DataLength] <- data # if(verbose == TRUE) error <- 1 else error <- 0 if(verbose == TRUE) cat("built\n") # # # Compute the decomposition # if(verbose == TRUE) cat("Decomposing...\n") nbc <- switch(bc, periodic = 1, symmetric = 2) if(is.null(nbc)) stop("Unknown boundary condition") ntype <- switch(type, wavelet = 1, station = 2) if(is.null(filter$G)) { wavelet.decomposition <- .C("wavedecomp", C = as.double(C), D = as.double(rep(0, fl.dbase$ntotal.d)), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), nlevels = as.integer(nlevels), firstC = as.integer(fl.dbase$first.last.c[, 1]), lastC = as.integer(fl.dbase$first.last.c[, 2]), offsetC = as.integer(fl.dbase$first.last.c[, 3]), firstD = as.integer(fl.dbase$first.last.d[, 1]), lastD = as.integer(fl.dbase$first.last.d[, 2]), offsetD = as.integer(fl.dbase$first.last.d[, 3]), ntype = as.integer(ntype), nbc = as.integer(nbc), error = as.integer(error), PACKAGE = "wavethresh") tmp <- .C("computec", n = as.integer(length(gd$Gleft)), c = as.double(rep(0, fl.dbase$ntotal.d)), gridn = as.integer(length(gd$G)), G = as.double(gd$G), Gindex = as.integer(gd$Gindex), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), nbc = as.integer(nbc), PACKAGE = "wavethresh") } else { wavelet.decomposition <- .C("comwd", CR = as.double(Re(C)), CI = as.double(Im(C)), LengthC = as.integer(fl.dbase$ntotal), DR = as.double(rep(0, fl.dbase$ntotal.d)), DI = as.double(rep(0, fl.dbase$ntotal.d)), LengthD = as.integer(fl.dbase$ntotal.d), HR = as.double(Re(filter$H)), HI = as.double( - Im(filter$H)), GR = as.double(Re(filter$G)), GI = as.double( - Im(filter$G)), LengthH = as.integer(length(filter$H)), nlevels = as.integer(nlevels), firstC = as.integer(fl.dbase$first.last.c[, 1]), lastC = as.integer(fl.dbase$first.last.c[, 2]), offsetC = as.integer(fl.dbase$first.last.c[, 3]), firstD = as.integer(fl.dbase$first.last.d[, 1]), lastD = as.integer(fl.dbase$first.last.d[, 2]), offsetD = as.integer(fl.dbase$first.last.d[, 3]), ntype = as.integer(ntype), nbc = as.integer(nbc), error = as.integer(error), PACKAGE = "wavethresh") } if(verbose == TRUE) cat("done\n") error <- wavelet.decomposition$error if(error != 0) { cat("Error ", error, " occured in wavedecomp\n") stop("Error") } if(is.null(filter$G)) { l <- list(C = wavelet.decomposition$C, D = wavelet.decomposition$D, c = tmp$c * (tmp$c > 0), nlevels = nlevelsWT(wavelet.decomposition), fl.dbase = fl.dbase, filter = filter, type = type, bc = bc, date = date()) } else { l <- list(C = complex(real = wavelet.decomposition$CR, imaginary = wavelet.decomposition$CI), D = complex(real = wavelet.decomposition$DR, imaginary = wavelet.decomposition$DI ), nlevels = nlevelsWT(wavelet.decomposition), fl.dbase = fl.dbase, filter = filter, type = type, bc = bc, date = date()) } class(l) <- "irregwd" return(l) } "l2norm"<- function(u, v) sqrt(sum((u - v)^2)) "levarr"<- function(v, levstodo) { if(levstodo != 0) { sv <- seq(from = 1, to = length(v), by = 2) return(c(levarr(v[sv], levstodo - 1), levarr(v[ - sv], levstodo - 1))) } else return(v) } "linfnorm"<- function(u, v) { max(abs(u - v)) } "lt.to.name"<- function(level, type) { # # This function converts the level and type (horizontal, vertical, diagonal) # of wavelet coefficients to a character string "wnLx" which should be # interpreted as "nth Level, coefficients x", where x is 1, 2 or 3 in the # scheme of Mallat. (So 1 is horizontal, 2 is vertical and 3 is diagonal). # w is on the front to indicate that these are wavelet coefficients # return(paste("w", as.character(level), "L", switch(type, CD = "1", DC = "2", DD = "3", CC = "4"), sep = "")) } "madmad"<- function(x) mad(x)^2 "makegrid"<- function(t, y, gridn = 2^(floor(log(length(t) - 1, 2)) + 1)) { # # 30th October 2018. Enhancements to do some argument sanity checks # lt <- length(t) ly <- length(y) if (lt != ly) stop("Length of t and y vectors has to be the same") isp2gridn <- IsPowerOfTwo(gridn) if (is.na(isp2gridn)) stop("Length of gridn has to be a power of two") tmp <- .C("makegrid", x = as.double(t), y = as.double(y), n = length(t), gridt = as.double(rep(0, gridn)), gridy = as.double(rep(0, gridn)), gridn = as.integer(gridn), G = as.double(rep(0, gridn)), Gindex = as.integer(rep(0, gridn)), PACKAGE = "wavethresh") l <- list(gridt = tmp$gridt, gridy = tmp$gridy, G = tmp$G, Gindex = tmp$ Gindex) class(l) <- "griddata" l } "makewpstDO"<- function(timeseries, groups, filter.number = 10, family = "DaubExPhase", mincor = 0.69999999999999996) { # # # Using the data in timeseries (which should be a length a power of two) # and the group information (only two groups presently). Create an object # of class wpstDO (nondecimated wavelet packet Discrimination Object). # # Given this wpstDO and another timeseries a function exists to predict # the group membership of each timeseries element # # # First build nondecimated wavelet packet object # twpst <- wpst(timeseries, filter.number = filter.number, family = family) # # # Now convert this to a w2d object including the group information. # tw2d <- wpst2discr(wpstobj = twpst, groups = groups) # # # Now extract the best 1D classifying columns. # tBP <- Best1DCols(w2d = tw2d, mincor = mincor) # # # Do a discriminant analysis # tBPd <- BMdiscr(tBP) l <- list(BPd = tBPd, BP = tBP, filter = twpst$filter) class(l) <- "wpstDO" l } "mfilter.select"<- function(type = "Geronimo") { # # mfilter.select # returns the filter information for a specified # multiple wavelet basis # # Copyright Tim Downie 1995-6. # # if(type == "Geronimo") { name <- "Geronimo Multiwavelets" nphi <- 2 npsi <- 2 NH <- 4 ndecim <- 2 H <- rep(0, 16) G <- rep(0, 16) H[1] <- 0.42426406871193001 H[2] <- 0.80000000000000004 H[3] <- -0.050000000000000003 H[4] <- -0.21213203435596001 H[5] <- 0.42426406871193001 H[7] <- 0.45000000000000001 H[8] <- 0.70710678118655002 H[11] <- 0.45000000000000001 H[12] <- -0.21213203435596001 H[15] <- -0.050000000000000003 # # H6,9,10,13,14,16 are zero. # G[1] <- -0.050000000000000003 G[2] <- -0.21213203435596401 G[3] <- 0.070710678118654793 G[4] <- 0.29999999999999999 G[5] <- 0.45000000000000001 G[6] <- -0.70710678118654802 G[7] <- -0.63639610306789296 G[9] <- 0.45000000000000001 G[10] <- -0.21213203435596401 G[11] <- 0.63639610306789296 G[12] <- -0.29999999999999999 G[13] <- -0.050000000000000003 G[15] <- -0.070710678118654793 # # G8,14,16 are zero. # } else if(type == "Donovan3") { name <- "Donovan Multiwavelets, 3 functions" nphi <- 3 npsi <- 3 NH <- 4 ndecim <- 2 H <- rep(0, 36) G <- rep(0, 36) H[2] <- ( - sqrt(154) * (3 + 2 * sqrt(5)))/3696 H[3] <- (sqrt(14) * (2 + 5 * sqrt(5)))/1232 H[10] <- ( - sqrt(2) * (3 + 2 * sqrt(5)))/44 H[11] <- (sqrt(154) * (67 + 30 * sqrt(5)))/3696 H[12] <- (sqrt(14) * (-10 + sqrt(5)))/112 H[19] <- 1/sqrt(2) H[20] <- (sqrt(154) * (67 - 30 * sqrt(5)))/3696 H[21] <- (sqrt(14) * (10 + sqrt(5)))/112 H[23] <- (3 * sqrt(2))/8 H[24] <- (sqrt(22) * (-4 + sqrt(5)))/88 H[26] <- (sqrt(22) * (32 + 7 * sqrt(5)))/264 H[27] <- (sqrt(2) * (-5 + 4 * sqrt(5)))/88 H[28] <- (sqrt(2) * (-3 + 2 * sqrt(5)))/44 H[29] <- (sqrt(154) * (-3 + 2 * sqrt(5)))/3696 H[30] <- (sqrt(14) * (-2 + 5 * sqrt(5)))/1232 H[31] <- sqrt(154)/22 H[32] <- (3 * sqrt(2))/8 H[33] <- (sqrt(22) * (4 + sqrt(5)))/88 H[34] <- - sqrt(70)/22 H[35] <- (sqrt(22) * (-32 + 7 * sqrt(5)))/264 H[36] <- ( - sqrt(2) * (5 + 4 * sqrt(5)))/88 # # H1,4,5,6,7,8,9,13,14,15,16,17,18,22,25 are zero. # G[5] <- (sqrt(154) * (3 + 2 * sqrt(5)))/3696 G[6] <- ( - sqrt(14) * (2 + 5 * sqrt(5)))/1232 G[8] <- ( - sqrt(7) * (1 + sqrt(5)))/336 G[9] <- (sqrt(77) * (-1 + 3 * sqrt(5)))/1232 G[13] <- (sqrt(2) * (3 + 2 * sqrt(5)))/44 G[14] <- ( - sqrt(154) * (67 + 30 * sqrt(5)))/3696 G[15] <- (sqrt(14) * (10 - sqrt(5)))/112 G[16] <- ( - sqrt(11) * (1 + sqrt(5)))/44 G[17] <- (sqrt(7) * (29 + 13 * sqrt(5)))/336 G[18] <- (sqrt(77) * (-75 + 17 * sqrt(5)))/1232 G[20] <- (sqrt(77) * (-2 + sqrt(5)))/264 G[21] <- (sqrt(7) * (13 - 6 * sqrt(5)))/88 G[22] <- 1/sqrt(2) G[23] <- (sqrt(154) * (-67 + 30 * sqrt(5)))/3696 G[24] <- ( - sqrt(14) * (10 + sqrt(5)))/112 G[26] <- (sqrt(7) * (-29 + 13 * sqrt(5)))/336 G[27] <- ( - sqrt(77) * (75 + 17 * sqrt(5)))/1232 G[28] <- 13/22 G[29] <- ( - sqrt(77) * (2 + sqrt(5)))/264 G[30] <- ( - sqrt(7) * (13 + 6 * sqrt(5)))/88 G[31] <- (sqrt(2) * (3 - 2 * sqrt(5)))/44 G[32] <- (sqrt(154) * (3 - 2 * sqrt(5)))/3696 G[33] <- (sqrt(14) * (2 - 5 * sqrt(5)))/1232 G[34] <- (sqrt(11) * (1 - sqrt(5)))/44 G[35] <- (sqrt(7) * (1 - sqrt(5)))/336 G[36] <- ( - sqrt(77) * (1 + 3 * sqrt(5)))/1232 # # G1,2,3,4,7,10,11,12,19,25 are zero. # } else (stop("bad filter specified\n")) return(list(type = type, name = name, nphi = nphi, npsi = npsi, NH = NH, ndecim = ndecim, H = H, G = G)) } "mfirst.last"<- function(LengthH, nlevels, ndecim, type = "wavelet", bc = "periodic") { # # mfirst.last # Sets up a coefficient data base for a multiple wavelet object # The structure is analogous to that used in first.last # but returns more information required by mwd and mwr. # # Copyright Tim Downie 1995-1996 # # if(type != "wavelet") stop("Type can only be wavelet") first.last.c <- matrix(0, nrow = nlevels + 1, ncol = 3, dimnames = list( NULL, c("First", "Last", "Offset"))) first.last.d <- matrix(0, nrow = nlevels, ncol = 3, dimnames = list( NULL, c("First", "Last", "Offset"))) if(bc == "periodic") { # Periodic boundary correction if(type == "wavelet") { first.last.c[, 1] <- rep(0, nlevels + 1) first.last.c[, 2] <- ndecim^(0:nlevels) - 1 first.last.c[, 3] <- rev(c(0, cumsum(rev(1 + first.last.c[, 2]))[1:nlevels])) first.last.d[, 1] <- rep(0, nlevels) first.last.d[, 2] <- ndecim^(0:(nlevels - 1)) - 1 first.last.d[, 3] <- rev(c(0, cumsum(rev(1 + first.last.d[, 2]))[1:(nlevels - 1)])) nvecs.c <- first.last.c[1, 3] + 1 nvecs.d <- first.last.d[1, 3] + 1 } else if(type == "station") { # # # in case nondecimated Multiple wavelet transform is implemented # then this code might be of use (will need adapting) # first.last.c[, 1] <- rep(0, nlevels + 1) first.last.c[, 2] <- 2^nlevels - 1 first.last.c[, 3] <- rev(c(0, cumsum(rev(1 + first.last.c[, 2]))[1:nlevels])) first.last.d[, 1] <- rep(0, nlevels) first.last.d[, 2] <- 2^nlevels - 1 first.last.d[, 3] <- rev(c(0, cumsum(rev(1 + first.last.d[, 2]))[1:(nlevels - 1)])) ntotal <- (nlevels + 1) * 2^nlevels ntotal.d <- nlevels * 2^nlevels } } else if(bc == "symmetric") { # Symmetric boundary reflection first.last.c[nlevels + 1, 1] <- 0 first.last.c[nlevels + 1, 2] <- 2^nlevels - 1 first.last.c[nlevels + 1, 3] <- 0 nvecs.c <- first.last.c[nlevels + 1, 2] - first.last.c[nlevels + 1, 1] + 1 nvecs.d <- 0 for(i in nlevels:1) { first.last.c[i, 1] <- trunc(0.5 * (1 - LengthH + first.last.c[i + 1, 1])) first.last.c[i, 2] <- trunc(0.5 * first.last.c[i + 1, 2 ]) first.last.c[i, 3] <- first.last.c[i + 1, 3] + first.last.c[i + 1, 2] - first.last.c[i + 1, 1] + 1 first.last.d[i, 1] <- trunc(0.5 * (first.last.c[i + 1, 1] - 1)) first.last.d[i, 2] <- trunc(0.5 * (first.last.c[i + 1, 2] + LengthH - 2)) if(i != nlevels) { first.last.d[i, 3] <- first.last.d[i + 1, 3] + first.last.d[i + 1, 2] - first.last.d[i + 1, 1] + 1 } nvecs.c <- nvecs.c + first.last.c[i, 2] - first.last.c[ i, 1] + 1 nvecs.d <- nvecs.d + first.last.d[i, 2] - first.last.d[ i, 1] + 1 } } else { stop("Unknown boundary correction method") } names(nvecs.c) <- NULL names(nvecs.d) <- NULL list(first.last.c = first.last.c, nvecs.c = nvecs.c, first.last.d = first.last.d, nvecs.d = nvecs.d) } "modernise"<- function(...) UseMethod("modernise") "modernise.wd"<- function(wd, ...) { if(IsEarly(wd)) { cat("Converting wavelet object to latest release\n") wd$type <- "wavelet" wd$date <- date() } else cat("Object is already up to date\n") wd } "mpostfilter"<- function(C, prefilter.type, filter.type, nphi, npsi, ndecim, nlevels, verbose = FALSE) { ndata <- ndecim^nlevels * nphi if(prefilter.type == "Repeat") ndata <- ndecim^(nlevels - 1) * nphi data <- rep(0, ndata) if(filter.type == "Geronimo") { if(prefilter.type == "Minimal") { if(verbose == TRUE) cat(" O.K.\nPostfilter (Minimal)\n") w <- 1 data[(1:(ndata/2)) * 2 - 1] <- 2/w * C[2, (1:(ndata/2)) ] data[(1:(ndata/2)) * 2] <- - sqrt(2)/w * C[1, (1:( ndata/2))] + 4/w * C[2, (1:(ndata/2))] } else if(prefilter.type == "Identity") { if(verbose == TRUE) cat(" O.K.\nPostfilter (identity)\n") data[(1:(ndata/2)) * 2 - 1] <- C[1, (1:(ndata/2))] data[(1:(ndata/2)) * 2] <- C[2, (1:(ndata/2))] } else if(prefilter.type == "Repeat") { if(verbose == TRUE) cat(" O.K.\nPostfilter (weighted average)\n") for(k in 1:ndata) data[k] <- (C[2, k] + C[1, k]/sqrt(2))/2 } else if(prefilter.type == "Interp" || prefilter.type == "default") { if(verbose == TRUE) cat(" O.K.\nPostfilter (interpolation)\n") t <- sqrt(96/25) u <- sqrt(3) data[2 * (1:(ndata/2))] <- u * C[2, (1:(ndata/2))] data[2 * (2:(ndata/2)) - 1] <- t * C[1, (2:(ndata/2))] - 0.29999999999999999 * (data[2 * (2:(ndata/2)) - 2] + data[2 * (2:(ndata/2))]) data[1] <- t * C[1, 1] - 0.29999999999999999 * (data[ ndata] + data[2]) } else if(prefilter.type == "Xia") { if(verbose == TRUE) cat(" O.K.\nPostfilter (Xia)\n") epsilon1 <- 0 epsilon2 <- 0.10000000000000001 root2 <- sqrt(2) x <- (2 * root2)/(5 * (root2 * epsilon2 - epsilon1)) a <- (x - epsilon1 + epsilon2 * 2 * root2)/2 b <- (x + epsilon1 - epsilon2 * 2 * root2)/2 c <- (x + 4 * epsilon1 - epsilon2 * 3 * root2)/(root2 * 2) d <- (x - 4 * epsilon1 + epsilon2 * 3 * root2)/(root2 * 2) data[2 * (1:(ndata/2))] <- d * C[1, 1:(ndata/2)] - b * C[2, 1:(ndata/2)] data[2 * (1:(ndata/2)) - 1] <- a * C[2, 1:(ndata/2)] - c * C[1, 1:(ndata/2)] } else if(prefilter.type == "Roach1") { q1 <- 0.32982054290000001 q2 <- 0.23184851840000001 q3 <- 0.8187567536 q4 <- -0.29459505809999997 q5 <- -0.1629787369 q6 <- 0.23184851840000001 q7 <- -0.23184851840000001 q8 <- -0.1629787369 q9 <- 0.29459505809999997 q10 <- 0.8187567536 q11 <- -0.23184851840000001 q12 <- 0.32982054290000001 nn <- (ndata - 2)/2 QB <- matrix(c(q2, q1, q8, q7), ncol = 2, byrow = TRUE) QA <- matrix(c(q4, q3, q10, q9), ncol = 2, byrow = TRUE) QZ <- matrix(c(q6, q5, q12, q11), ncol = 2, byrow = TRUE) partition <- matrix(data, nrow = 2, byrow = FALSE) partition[, (2:nn)] <- QB %*% C[, (2:nn) - 1] + QA %*% C[, (2:nn)] + QZ %*% C[, (2:nn) + 1] partition[, 1] <- QB %*% C[, nn + 1] + QA %*% C[, 1] + QZ %*% C[, 2] partition[, nn + 1] <- QB %*% C[, nn] + QA %*% C[, nn + 1] + QZ %*% C[, 1] data <- c(partition) } else if(prefilter.type == "Roach3") { q1 <- 0.084397403440000004 q2 <- -0.0036003129089999999 q3 <- 0.084858161210000005 q4 <- 0.99279918550000001 q5 <- -0.00015358592229999999 q6 <- -0.0036003129089999999 q7 <- -0.0036003129089999999 q8 <- 0.00015358592229999999 q9 <- 0.99279918550000001 q10 <- -0.084858161210000005 q11 <- -0.0036003129089999999 q12 <- -0.084397403440000004 nn <- (ndata - 2)/2 QZ <- matrix(c(q7, q8, q1, q2), ncol = 2, byrow = TRUE) QA <- matrix(c(q9, q10, q3, q4), ncol = 2, byrow = TRUE) QB <- matrix(c(q11, q12, q5, q6), ncol = 2, byrow = TRUE) partition <- matrix(data, nrow = 2, byrow = FALSE) partition[, (2:nn)] <- QB %*% C[, (2:nn) - 1] + QA %*% C[, (2:nn)] + QZ %*% C[, (2:nn) + 1] partition[, 1] <- QB %*% C[, nn + 1] + QA %*% C[, 1] + QZ %*% C[, 2] partition[, nn + 1] <- QB %*% C[, nn] + QA %*% C[, nn + 1] + QZ %*% C[, 1] data <- c(partition) } else stop("Specified postfilter not available for given multiwavelet" ) } else if(filter.type == "Donovan3") { if(prefilter.type == "Identity") { if(verbose == TRUE) cat(" O.K.\nPostfilter (identity)\n") data[(1:(ndata/3)) * 3 - 2] <- C[1, (1:(ndata/3))] data[(1:(ndata/3)) * 3 - 1] <- C[2, (1:(ndata/3))] data[(1:(ndata/3)) * 3] <- C[3, (1:(ndata/3))] } else if(prefilter.type == "Linear") { cat(" O.K.\nPostfilter (Linear)\n") if(verbose == TRUE) data[(1:(ndata/3)) * 3 - 2] <- C[1, (1:(ndata/3 ))] * -4.914288 + 4.914288 * C[2, (1:(ndata/3 ))] data[(1:(ndata/3)) * 3 - 1] <- C[1, (1:(ndata/3))] * -2.778375 + 3.778375 * C[2, (1:(ndata/3))] data[(1:(ndata/3)) * 3] <- C[1, (1:(ndata/3))] * -2.298365 + 3.298365 * C[2, (1:(ndata/3))] + C[ 3, (1:(ndata/3))] } else if(prefilter.type == "Interp" || prefilter.type == "default") { if(verbose == TRUE) cat(" O.K.\nPostfilter (interpolation)\n") w <- sqrt(5) lc <- length(data)/3 data[3 * (0:(lc - 1)) + 1] <- C[1, 1:lc] * sqrt(11/7) data[2] <- ( - (2 + 6 * w) * C[1, lc] - (3 + 2 * w) * C[ 1, 1] + 6 * sqrt(77) * C[2, 1] + ((103 - 24 * w ) * sqrt(7))/(16 - 5 * w) * C[3, 1])/(9 * sqrt( 77)) data[3 * (1:(lc - 1)) + 2] <- ( - (2 + 6 * w) * C[1, 1:( lc - 1)] - (3 + 2 * w) * C[1, (2:lc)] + 6 * sqrt(77) * C[2, (2:lc)] + ((103 - 24 * w) * sqrt(7))/(16 - 5 * w) * C[3, (2:lc)])/(9 * sqrt( 77)) data[3] <- ((-3 + 2 * w)/(3 * sqrt(231)) * C[1, lc] + ( -2 + 6 * w)/(3 * sqrt(231)) * C[1, 1] + 2/sqrt(3) * C[2, 1] + (306 - 112 * w)/((16 - 5 * w) * 3 * sqrt(33)) * C[3, 1])/sqrt(3) data[3 * (2:lc)] <- ((-3 + 2 * w)/(3 * sqrt(231)) * C[1, (1:(lc - 1))] + (-2 + 6 * w)/(3 * sqrt(231)) * C[1, (2:lc)] + 2/sqrt(3) * C[2, (2:lc)] + (306 - 112 * w)/((16 - 5 * w) * 3 * sqrt(33)) * C[3, ( 2:lc)])/sqrt(3) } else stop("Specified postfilter not available for given multiwavelet" ) } else stop("No postfilters for type of multiwavelet") return(data) } "mprefilter"<- function(data, prefilter.type, filter.type, nlevels, nvecs.c, nphi, npsi, ndecim, verbose = FALSE) { #function that takes original data and computes the starting level #coefficients for the wavelet decompostion # ndata <- length(data) C <- matrix(rep(0, nvecs.c * nphi), nrow = nphi) # #jump to type of multiwavelet if(filter.type == "Geronimo") { if(prefilter.type == "Minimal") { if(verbose == TRUE) cat(" O.K.\nPrefilter (Minimal)...") w <- 1 C[1, 1:(ndata/2)] <- w * sqrt(2) * data[(1:(ndata/2)) * 2 - 1] - w/sqrt(2) * data[(1:(ndata/2)) * 2] C[2, 1:(ndata/2)] <- w * 0.5 * data[(1:(ndata/2)) * 2 - 1] } else if(prefilter.type == "Identity") { if(verbose == TRUE) cat(" O.K.\nPrefilter (Identity)...") for(l in 1:nphi) { C[l, 1:(ndata/nphi)] <- data[(0:((ndata/nphi) - 1)) * nphi + l] } } else if(prefilter.type == "Repeat") { if(verbose == TRUE) cat(" O.K.\nRepeating signal...") C[1, 1:(ndata)] <- data[1:ndata] * sqrt(2) C[2, 1:(ndata)] <- data[1:ndata] } else if(prefilter.type == "Interp" || prefilter.type == "default") { if(verbose == TRUE) cat(" O.K.\nPrefilter (interpolation)...") r <- sqrt(25/96) s <- sqrt(1/3) a <- -0.29999999999999999 C[2, (1:(ndata/2))] <- s * data[2 * (1:(ndata/2))] C[1, 1] <- r * (data[1] - a * (data[ndata] + data[2])) C[1, (2:(ndata/2))] <- r * (data[2 * (2:(ndata/2)) - 1] - a * (data[2 * (2:(ndata/2)) - 2] + data[2 * (2:( ndata/2))])) } else if(prefilter.type == "Xia") { if(verbose == TRUE) cat(" O.K.\nPrefilter (Xia) ...") epsilon1 <- 0 epsilon2 <- 0.10000000000000001 root2 <- sqrt(2) x <- (2 * root2)/(5 * (root2 * epsilon2 - epsilon1)) a <- (x - epsilon1 + epsilon2 * 2 * root2)/2 b <- (x + epsilon1 - epsilon2 * 2 * root2)/2 c <- (x + 4 * epsilon1 - epsilon2 * 3 * root2)/(root2 * 2) d <- (x - 4 * epsilon1 + epsilon2 * 3 * root2)/(root2 * 2) C[1, (1:(ndata/2))] <- a * data[2 * (1:(ndata/2))] + b * data[2 * (1:(ndata/2)) - 1] C[2, (1:(ndata/2))] <- c * data[2 * (1:(ndata/2))] + d * data[2 * (1:(ndata/2)) - 1] } else if(prefilter.type == "Roach1") { q1 <- 0.32982054290000001 q2 <- 0.23184851840000001 q3 <- 0.8187567536 q4 <- -0.29459505809999997 q5 <- -0.1629787369 q6 <- 0.23184851840000001 q7 <- -0.23184851840000001 q8 <- -0.1629787369 q9 <- 0.29459505809999997 q10 <- 0.8187567536 q11 <- -0.23184851840000001 q12 <- 0.32982054290000001 QB <- matrix(c(q2, q1, q8, q7), ncol = 2, byrow = TRUE) QA <- matrix(c(q4, q3, q10, q9), ncol = 2, byrow = TRUE) QZ <- matrix(c(q6, q5, q12, q11), ncol = 2, byrow = TRUE) nn <- (ndata - 2)/2 partition <- matrix(data, nrow = 2, byrow = FALSE) C[, (2:nn)] <- QB %*% partition[, (2:nn) - 1] + QA %*% partition[, (2:nn)] + QZ %*% partition[, (2:nn) + 1] C[, 1] <- QB %*% partition[, nn + 1] + QA %*% partition[ , 1] + QZ %*% partition[, 2] C[, nn + 1] <- QB %*% partition[, nn] + QA %*% partition[, nn + 1] + QZ %*% partition[, 1] } else if(prefilter.type == "Roach3") { q1 <- 0.084397403440000004 q2 <- -0.0036003129089999999 q3 <- 0.084858161210000005 q4 <- 0.99279918550000001 q5 <- -0.00015358592229999999 q6 <- -0.0036003129089999999 q7 <- -0.0036003129089999999 q8 <- 0.00015358592229999999 q9 <- 0.99279918550000001 q10 <- -0.084858161210000005 q11 <- -0.0036003129089999999 q12 <- -0.084397403440000004 nn <- (ndata - 2)/2 QB <- matrix(c(q7, q8, q1, q2), ncol = 2, byrow = FALSE) QA <- matrix(c(q9, q10, q3, q4), ncol = 2, byrow = FALSE) QZ <- matrix(c(q11, q12, q5, q6), ncol = 2, byrow = FALSE) partition <- matrix(data, nrow = 2, byrow = FALSE) C[, (2:nn)] <- QB %*% partition[, (2:nn) - 1] + QA %*% partition[, (2:nn)] + QZ %*% partition[, (2:nn) + 1] C[, 1] <- QB %*% partition[, nn + 1] + QA %*% partition[ , 1] + QZ %*% partition[, 2] C[, nn + 1] <- QB %*% partition[, nn] + QA %*% partition[, nn + 1] + QZ %*% partition[, 1] } else stop("Bad prefilter for specified multiwavelet filter") } else if(filter.type == "Donovan3") { if(prefilter.type == "Identity") { if(verbose == TRUE) cat(" O.K.\nPrefilter (Identity)...") for(l in 1:nphi) { C[l, 1:(ndata/nphi)] <- data[(0:((ndata/nphi) - 1)) * nphi + l] } } else if(prefilter.type == "Linear") { if(verbose == TRUE) cat(" O.K.\nPrefilter (Linear)...") C[1, 1:(ndata/3)] <- data[3 * 0:((ndata/3) - 1) + 1] * -0.76885512 + data[3 * 0:((ndata/3) - 1) + 2] C[2, 1:(ndata/3)] <- data[3 * 0:((ndata/3) - 1) + 1] * -0.56536682999999999 + data[3 * 0:((ndata/3) - 1) + 2] C[3, 1:(ndata/3)] <- data[3 * 0:((ndata/3) - 1) + 1] * 0.097676540000000006 - data[3 * 0:((ndata/3) - 1) + 2] + data[3 * 1:(ndata/3)] } else if(prefilter.type == "Interp" || prefilter.type == "default") { if(verbose == TRUE) cat(" O.K.\nPrefilter (Interpolation)...") w <- sqrt(5) lc <- length(data)/3 C[1, 1:lc] <- data[3 * (0:(lc - 1)) + 1] * sqrt(7/11) C[3, 1] <- ((sqrt(3) * (data[2] - data[3]) + (C[1, lc] * ( -1 + 8 * w))/3/sqrt(231) + (C[1, 1] * (1 + 8 * w))/3/sqrt(231)) * 3 * sqrt(33) * (16 - 5 * w))/ (-203 + 88 * w) C[3, 2:lc] <- ((sqrt(3) * (data[3 * (1:(lc - 1)) + 2] - data[3 * (2:lc)]) + (C[1, 1:(lc - 1)] * (-1 + 8 * w))/3/sqrt(231) + (C[1, 2:lc] * (1 + 8 * w))/3/ sqrt(231)) * 3 * sqrt(33) * (16 - 5 * w))/(-203 + 88 * w) C[2, 1] <- ((sqrt(3) * data[2] + (C[1, lc] * (2 + 6 * w ))/3/sqrt(231) + (C[1, 1] * (3 + 2 * w))/3/sqrt( 231) - (C[3, 1] * (103 - 24 * w))/3/sqrt(33)/( 16 - 5 * w)) * sqrt(3))/2 C[2, 2:lc] <- ((sqrt(3) * data[3 * (1:(lc - 1)) + 2] + ( C[1, 1:(lc - 1)] * (2 + 6 * w))/3/sqrt(231) + ( C[1, 2:lc] * (3 + 2 * w))/3/sqrt(231) - (C[3, 2: lc] * (103 - 24 * w))/3/sqrt(33)/(16 - 5 * w)) * sqrt(3))/2 } else stop("Bad prefilter for specified multiwavelet filter") } else stop("No prefilter for the multiwavelet filter") return(C) } "mwd"<- function(data, prefilter.type = "default", filter.type = "Geronimo", bc = "periodic", verbose = FALSE) { # #applies the Discrete Multiple wavelet Transform to data #copyrigt Tim Downie 1995-1996 # if(verbose == TRUE) cat("Multiple wavelet decomposition\n") if(verbose == TRUE) cat("Checking Arguements...") if(bc != "periodic") stop("\nOnly periodic boundary conditions allowed at the moment" ) filter <- mfilter.select(type = filter.type) ndata <- length(data) # # # check ndata = filter$nphi * filter$ndecim ^ nlevels # # nlevels <- log(ndata/filter$nphi)/log(filter$ndecim) # # # repeated signal prefilter has one extra level # if(prefilter.type == "Repeat") nlevels <- nlevels + 1 if(nlevels != round(nlevels) || nlevels < 1) stop("\nbad number of data points for this filter\n") if(verbose == TRUE) cat(" O.K.\nBuilding first/last database ...") fl <- mfirst.last(LengthH = filter$NH, nlevels = nlevels, ndecim = filter$ndecim, type = "wavelet", bc = bc) # if(bc == "periodic") nbc <- 1 else if(bc == "symmetric") nbc <- 2 C <- mprefilter(data, prefilter.type, filter.type, nlevels, fl$nvecs.c, filter$nphi, filter$npsi, filter$ndecim, verbose) if(verbose == TRUE) cat(" O.K.\nRunning decomposition algorithm...") gwd <- .C("multiwd", C = as.double(C), lengthc = as.integer(fl$nvecs.c * filter$nphi), D = as.double(rep(0, fl$nvecs.d * filter$npsi)), lengthd = as.integer(fl$nvecs.d * filter$npsi), nlevels = as.integer(nlevels), nphi = as.integer(filter$nphi), npsi = as.integer(filter$npsi), ndecim = as.integer(filter$ndecim), H = as.double(filter$H), G = as.double(filter$G), NH = as.integer(filter$NH), lowerc = as.integer(fl$first.last.c[, 1]), upperc = as.integer(fl$first.last.c[, 2]), offsetc = as.integer(fl$first.last.c[, 3]), lowerd = as.integer(fl$first.last.d[, 1]), upperd = as.integer(fl$first.last.d[, 2]), offsetd = as.integer(fl$first.last.d[, 3]), nbc = as.integer(nbc), PACKAGE = "wavethresh") # # the C function returns the C and D coefficients as a vector # convert into a matrix with nphi rows. # gwd$C <- matrix(gwd$C, nrow = filter$nphi) gwd$D <- matrix(gwd$D, nrow = filter$npsi) outlist <- list(C = gwd$C, D = gwd$D, nlevels = nlevels, ndata = ndata, filter = filter, fl.dbase = fl, type = "wavelet", bc = bc, prefilter = prefilter.type, date = date()) class(outlist) <- "mwd" if(verbose == TRUE) cat(" O.K.\nReturning Multiple Wavelet Decomposition\n") return(outlist) } "mwr"<- function(mwd, prefilter.type = mwd$prefilter, verbose = FALSE, start.level = 0, returnC = FALSE) { #function to reconstruct the data from an object of class mwd #a multiwavelet decomposition #Tim Downie #last updated May 96 if(verbose == TRUE) cat("Multiple wavelet reconstruction\nArguement checking ..." ) ctmp <- class(mwd) if(is.null(ctmp)) stop("Input must have class mwd") else if(ctmp != "mwd") stop("Input must have class mwd") if(mwd$prefilter != prefilter.type) warning("The pre/postfilters are inconsistent\n") if(start.level < 0 || start.level >= nlevelsWT(mwd)) stop( "Start.level out of range\n") # # keep the value of the Cs at level 0 reset all the others # if(verbose == TRUE) cat(" O.K.\nInitialising variables ...") C <- matrix(rep(0, length(mwd$C)), nrow = mwd$filter$nphi) c0low <- mwd$fl.dbase$first.last.c[start.level + 1, 3] + 1 c0high <- c0low + mwd$fl.dbase$first.last.c[start.level + 1, 2] - mwd$ fl.dbase$first.last.c[start.level + 1, 1] for(l in 1:mwd$filter$nphi) C[l, c0low:c0high] <- mwd$C[l, c0low:c0high] if(mwd$bc == "periodic") nbc <- 1 else if(mwd$bc == "symmetric") nbc <- 2 else stop("bad boundary conditions") if(verbose == TRUE) cat(" O.K.\nRunning Reconstruction algorithm...") reconstr <- .C("multiwr", C = as.double(C), lengthc = as.integer(mwd$fl.dbase$ntotal), D = as.double(mwd$D), lengthd = as.integer(mwd$fl.dbase$ntotal.d), nlevels = as.integer(nlevelsWT(mwd)), nphi = as.integer(mwd$filter$nphi), npsi = as.integer(mwd$filter$npsi), ndecim = as.integer(mwd$filter$ndecim), H = as.double(mwd$filter$H), G = as.double(mwd$filter$G), NH = as.integer(mwd$filter$NH), lowerc = as.integer(mwd$fl.dbase$first.last.c[, 1]), upperc = as.integer(mwd$fl.dbase$first.last.c[, 2]), offsetc = as.integer(mwd$fl.dbase$first.last.c[, 3]), lowerd = as.integer(mwd$fl.dbase$first.last.d[, 1]), upperd = as.integer(mwd$fl.dbase$first.last.d[, 2]), offsetd = as.integer(mwd$fl.dbase$first.last.d[, 3]), nbc = as.integer(nbc), startlevel = as.integer(start.level), PACKAGE = "wavethresh") ndata <- mwd$filter$ndecim^nlevelsWT(mwd)* mwd$filter$nphi reconstr$C <- matrix(reconstr$C, nrow = mwd$filter$nphi) if(returnC == TRUE) { if(verbose == TRUE) cat(" O.K.\nReturning starting coefficients\n") return(reconstr$C[, (1:(ndata/mwd$filter$nphi))]) } if(verbose == TRUE) cat(" O.K.\nApply post filter...") ndata <- mwd$filter$ndecim^nlevelsWT(mwd)* mwd$filter$nphi data <- mpostfilter(reconstr$C, prefilter.type, mwd$filter$type, mwd$ filter$nphi, mwd$filter$npsi, mwd$filter$ndecim, nlevelsWT(mwd), verbose) if(verbose == TRUE) cat(" O.K.\nReturning data\n") return(data) } "newsure"<- function(s, x) { x <- abs(x) d <- length(x) sl <- sort.list(x) y <- x[sl] sigma <- s[sl] cy <- cumsum(y^2) cy <- c(0, cy[1:(length(cy) - 1)]) csigma <- cumsum(sigma^2) csigma <- c(0, csigma[1:(length(csigma) - 1)]) ans <- d - 2 * csigma + cy + d:1 * y^2 m <- min(ans) index <- (1:length(ans))[m == ans] return(y[index]) } "nlevelsWT"<- function(...) UseMethod("nlevelsWT") #"nlevels.default"<- #function(object, ...) #{ # if(is.null(object$nlevels)) { # n <- length(object) # return(IsPowerOfTwo(n)) # } # else return(object$nlevels) #} #MAN: changed function below to cope with $nlevels deprecation (R-2.6.0 onwards). "nlevelsWT.default"<- function(object, ...) { if (is.list(object)){ if(!is.null(object$nlevels)){ # "normal" object */ return(object$nlevels) } else{ if(isa(object,"uncompressed")){ # 2 special cases return(IsPowerOfTwo(object$v)) } else if(isa(object, "griddata")){ return(IsPowerOfTwo(object$gridy)) } else{ # what to do? e.g. tpwd,wpstDO,compressed classes. print("I don't know what to do with this object!\n") stop("unknown nlevels") } } } else{ #data should be atomic (numeric)... return(IsPowerOfTwo(length(object))) } } "nullevels"<- function(...) UseMethod("nullevels") "nullevels.imwd"<- function(imwd, levelstonull, ...) { nlevels <- nlevelsWT(imwd) if(max(levelstonull) > nlevels - 1) stop(paste("Illegal level to null, maximum is ", nlevels - 1)) if(min(levelstonull) < 0) stop(paste("Illegal level to null, minimum is ", nlevels - 1)) for(lev in levelstonull) { n1 <- lt.to.name(lev, type = "CD") n2 <- lt.to.name(lev, type = "DC") n3 <- lt.to.name(lev, type = "DD") imwd[[n1]] <- rep(0, length(imwd[[n1]])) imwd[[n2]] <- rep(0, length(imwd[[n2]])) imwd[[n3]] <- rep(0, length(imwd[[n3]])) } imwd } "nullevels.wd"<- function(wd, levelstonull, ...) { nlevels <- nlevelsWT(wd) if(max(levelstonull) > nlevels - 1) stop(paste("Illegal level to null, maximum is ", nlevels - 1)) if(min(levelstonull) < 0) stop(paste("Illegal level to null, minimum is ", nlevels - 1)) for(lev in levelstonull) { d <- accessD(wd, level = lev) d <- rep(0, length(d)) wd <- putD(wd, level = lev, v = d) } wd } "nullevels.wst"<- function(wst, levelstonull, ...) { nullevels.wd(wst, levelstonull = levelstonull) } "numtonv"<- function(number, nlevels) { if(nlevels < 1) stop("nlevels cannot be less than 1") if(number < 0) stop("Number cannot be less than 0") else if(number > 2^nlevels - 1) stop(paste("Number cannot be more than", 2^nlevels - 1)) node.vector <- vector("list", nlevels) matchcodes <- c("L", "R") mask <- 2^(nlevels - 1) cmc <- NULL for(i in (nlevels - 1):0) { index <- floor(number/mask) if(index == 1) number <- number - mask mask <- mask/2 cmc <- c(cmc, index) } for(i in (nlevels - 1):0) { index <- cmc[i + 1] nul <- 2^(nlevels - i - 1) upperl <- rep(0, nul) upperctrl <- rep(matchcodes[index + 1], nul) node.vector[[i + 1]] <- list(upperctrl = upperctrl, upperl = upperl) } node.vector <- list(node.list = node.vector, nlevels = nlevels) class(node.vector) <- "nv" node.vector } "plot.imwd"<- function(x, scaling = "by.level", co.type = "abs", package = "R", plot.type = "mallat", arrangement = c(3, 3), transform = FALSE, tfunction = sqrt, ...) { # # # Check class of imwd # if(package != "R" && package != "S") stop("Unknown package") ctmp <- class(x) if(is.null(ctmp)) stop("imwd has no class") else if(ctmp != "imwd") stop("imwd is not of class imwd") if(x$type == "station" && plot.type == "mallat") stop("Cannot do Mallat type plot on nondecimated wavelet object") Csize <- 2^(nlevelsWT(x)) m <- matrix(0, nrow = Csize, ncol = Csize) first.last.d <- x$fl.dbase$first.last.d first.last.c <- x$fl.dbase$first.last.c if(plot.type == "mallat") { for(level in (nlevelsWT(x)):1) { ndata <- 2^(level - 1) firstD <- first.last.d[level, 1] lastD <- first.last.d[level, 2] LengthD <- lastD - firstD + 1 sel <- seq(from = (1 - firstD), length = ndata) # # # Extract CD for this level # nm <- lt.to.name(level - 1, "CD") msub1 <- matrix(x[[nm]], nrow = LengthD, ncol = LengthD) # # # Extract DC for this level # nm <- lt.to.name(level - 1, "DC") msub2 <- matrix(x[[nm]], nrow = LengthD, ncol = LengthD) # # # Extract DD for this level # nm <- lt.to.name(level - 1, "DD") msub3 <- matrix(x[[nm]], nrow = LengthD, ncol = LengthD) # # # # Work out if we want to display the absolute values or the actual # values # if(co.type == "abs") { msub1 <- abs(msub1) msub2 <- abs(msub2) msub3 <- abs(msub3) } else if(co.type == "mabs") { msub1 <- - abs(msub1) msub2 <- - abs(msub2) msub3 <- - abs(msub3) } else if(co.type != "none") stop("Unknown co.type") if(transform == TRUE) { msub1 <- tfunction(msub1) msub2 <- tfunction(msub2) msub3 <- tfunction(msub3) } if(scaling == "by.level") { if(ndata == 1) { r.m1 <- range(c(as.vector(msub1), as.vector( msub2), as.vector(msub3))) r.m2 <- r.m1 r.m3 <- r.m1 } else { r.m1 <- range(msub1) r.m2 <- range(msub2) r.m3 <- range(msub3) } if(r.m1[2] - r.m1[1] == 0) { msub1[, ] <- 0 } else { mu1 <- 249/(r.m1[2] - r.m1[1]) msub1 <- mu1 * (msub1 - r.m1[1]) } if(r.m2[2] - r.m2[1] == 0) { msub2[, ] <- 0 } else { mu2 <- 249/(r.m2[2] - r.m2[1]) msub2 <- mu2 * (msub2 - r.m2[1]) } if(r.m3[2] - r.m3[1] == 0) { msub3[, ] <- 0 } else { mu3 <- 249/(r.m3[2] - r.m3[1]) msub3 <- mu3 * (msub3 - r.m3[1]) } } else { range.msub <- range(c(msub1, msub2, msub3)) multiplier <- 255/(range.msub[2] - range.msub[1 ]) msub1 <- multiplier * (msub1 - range.msub[1]) msub2 <- multiplier * (msub2 - range.msub[1]) msub3 <- multiplier * (msub3 - range.msub[1]) # } m[(ndata + 1):(2 * ndata), 1:ndata] <- msub1[sel, sel] m[1:ndata, (ndata + 1):(2 * ndata)] <- msub2[sel, sel] m[(ndata + 1):(2 * ndata), (ndata + 1):(2 * ndata)] <- msub3[sel, sel] } if(package == "R") { image(m, xaxt = "n", yaxt = "n",...) axis(1, at = c(0, 2^((nlevelsWT(x)- 3):(nlevelsWT(x))) )) axis(2, at = c(0, 2^((nlevelsWT(x)- 3):(nlevelsWT(x))) )) } else return(m) } else if(plot.type == "cols") { oldpar <- par(mfrow = arrangement, pty = "s") for(level in (nlevelsWT(x):1)) { ndata <- 2^(level - 1) firstD <- first.last.d[level, 1] lastD <- first.last.d[level, 2] LengthD <- lastD - firstD + 1 sel <- seq(from = (1 - firstD), length = ndata) # # # Extract CD for this level # nm <- lt.to.name(level - 1, "CD") msub1 <- matrix(x[[nm]], nrow = LengthD, ncol = LengthD) # # # Extract DC for this level # nm <- lt.to.name(level - 1, "DC") msub2 <- matrix(x[[nm]], nrow = LengthD, ncol = LengthD) # # # Extract DD for this level # nm <- lt.to.name(level - 1, "DD") msub3 <- matrix(x[[nm]], nrow = LengthD, ncol = LengthD) # # # # Work out if we want to display the absolute values or the actual # values # if(co.type == "abs") { msub1 <- abs(msub1) msub2 <- abs(msub2) msub3 <- abs(msub3) } else if(co.type == "mabs") { msub1 <- - abs(msub1) msub2 <- - abs(msub2) msub3 <- - abs(msub3) } else if(co.type != "none") stop("Unknown co.type") if(transform == TRUE) { msub1 <- tfunction(msub1) msub2 <- tfunction(msub2) msub3 <- tfunction(msub3) } if(package == "R") { xlabstr <- paste("Level", level - 1, "(horizonatal)") image(msub1, xlab = xlabstr) xlabstr <- paste("Level", level - 1, "(vertical)") image(msub2, xlab = xlabstr) xlabstr <- paste("Level", level - 1, "(diagonal)") image(msub3, xlab = xlabstr,...) } else { warning("Not using R") } } par(oldpar) } else stop("Unknown plot.type") } "plot.imwdc"<- function(x, verbose = FALSE, ...) { imwd <- uncompress(x, verbose = verbose) return(plot(imwd, ...)) } plot.irregwd <- function (x, xlabels, first.level = 1, main = "Wavelet Decomposition Coefficients", scaling = "by.level", rhlab = FALSE, sub, ...) { ctmp <- class(x) if (is.null(ctmp)) stop("irregwd has no class") else if (ctmp != "irregwd") stop("irregwd is not of class irregwd") iwd <- x wd <- x class(wd) <- "wd" levels <- nlevelsWT(wd) nlevels <- levels - first.level n <- 2^(levels - 1) if (missing(sub)) sub <- wd$filter$name plot(c(0, 0, n, n), c(0, nlevels + 1, nlevels + 1, 0), type = "n", xlab = "Translate", ylab = "Resolution Level", main = main, yaxt = "n", xaxt = "n", sub = sub, ...) axis(2, at = 1:(nlevels), labels = ((levels - 1):first.level)) if (missing(xlabels)) { axx <- c(0, 2^(nlevels - 2), 2^(nlevels - 1), 2^(nlevels - 1) + 2^(nlevels - 2), 2^nlevels) axis(1, at = axx) } else { axx <- pretty(1:n, n = 3) if (axx[length(axx)] > n) axx[length(axx)] <- n axx[axx == 0] <- 1 axl <- signif(xlabels[axx], digits = 3) axis(1, at = axx, labels = axl) } x <- 1:n height <- 1 first.last.d <- wd$fl.dbase$first.last.d axr <- NULL if (scaling == "global") { my <- 0 for (i in ((levels - 1):first.level)) { y <- accessc(iwd, i) my <- max(c(my, abs(y))) } } for (i in ((levels - 1):first.level)) { n <- 2^i y <- accessc(iwd, i) xplot <- x ly <- length(y) if (scaling == "by.level") my <- max(abs(y)) y <- (0.5 * y)/my axr <- c(axr, my) segments(xplot, height, xplot, height + y) if (i != first.level) { x1 <- x[seq(1, n - 1, 2)] x2 <- x[seq(2, n, 2)] x <- (x1 + x2)/2 height <- height + 1 } } if (rhlab == TRUE) axis(4, at = 1:length(axr), labels = signif(axr, 3)) axr } "plot.mwd"<- function(x, first.level = 1, main = "Wavelet Decomposition Coefficients", scaling = "compensated", rhlab = FALSE, sub = x$filter$name, NotPlotVal = 0.050000000000000003, xlab = "Translate", ylab = "Resolution level", return.scale = TRUE, colour = (2:(npsi + 1)), ...) { #plot.mwd #plot a multiwavelet decompostion # #Tim Downie 1995-1996 # # # Check class of mwd # ctmp <- class(x) if(is.null(ctmp)) stop("mwd has no class") else if(ctmp == "wd") stop("object is of class wd use plot.wd or plot") else if(ctmp != "mwd") stop("object is not of class mwd") nlevels <- nlevelsWT(x)- first.level mx <- x$ndata xlabs <- seq(0, mx/2, length = 5) plot(c(0, 0, mx, mx), c(0, nlevels + 1, nlevels + 1, 0), type = "n", xlab = xlab, ylab = ylab, main = main, yaxt = "n", xaxt = "n", sub=sub, ...) axis(1, at = seq(0, mx, length = 5), labels = xlabs) axis(2, at = 1:(nlevels), labels = (nlevelsWT(x)- 1):first.level) delta <- 1 npsi <- x$filter$npsi ndecim <- x$filter$ndecim height <- 1 first.last.d <- x$fl.dbase$first.last.d axr <- NULL if(scaling == "global") { my <- 0 for(i in ((nlevelsWT(x)- 1):first.level)) { y <- c(accessD(x, i)) my <- max(c(my, abs(y))) } } if(scaling == "compensated") { my <- 0 for(i in ((nlevelsWT(x)- 1):first.level)) { y <- c(accessD(x, i)) * x$filter$ndecim^(i/2) my <- max(c(my, abs(y))) } } for(i in ((nlevelsWT(x)- 1):first.level)) { y <- c(accessD(x, i)) ly <- length(y) n <- ly/npsi if(scaling == "by.level") my <- max(abs(y)) if(scaling == "compensated") y <- y * ndecim^(i/2) if(my == 0) y <- rep(0, ly) else y <- (0.5 * y)/my axr <- c(axr, my) xplot <- rep(((1:n) * mx)/(n + 1), rep(npsi, ly/npsi)) + (0:( npsi - 1)) * delta segments(xplot, height, xplot, height + y, col = colour) height <- height + 1 } if(rhlab == TRUE) axis(4, at = 1:length(axr), labels = signif(axr, 3)) if(return.scale == TRUE) return(axr) else return(NULL) } "plot.nvwp"<- function(x, ...) { plotpkt(nlevelsWT(x)) pktlist <- print.nvwp(x, printing = FALSE) for(i in 1:length(pktlist$level)) addpkt(pktlist$level[i], pktlist$pkt[i], 1, col = 1) } "plot.wd"<- function(x, xlabvals, xlabchars, ylabchars, first.level = 0, main = "Wavelet Decomposition Coefficients", scaling = "global", rhlab = FALSE, sub, NotPlotVal = 0.0050000000000000001, xlab = "Translate", ylab = "Resolution Level", aspect = "Identity", ...) { if(IsEarly(x)) { ConvertMessage() stop() } if(is.complex(x$D) && aspect == "Identity") aspect <- "Mod" # # Check class of wd # ctmp <- class(x) if(is.null(ctmp)) stop("wd has no class") else if(ctmp != "wd") stop("wd is not of class wd") levels <- nlevelsWT(x) if(x$bc == "interval") { if(first.level < x$current.scale) warning(paste("plot.wd plotted from level", x$ current.scale, " because \"wavelets on the interval\" transform was only computed to this level\n" )) first.level <- x$current.scale } nlevels <- levels - first.level type <- x$type if(IsEarly(x)) { ConvertMessage() stop() } if(type == "wavelet") n <- 2^(levels - 1) else if(type == "station") n <- 2^levels else stop("Unknown type for wavelet object") if(missing(sub)) sub <- paste(switch(type, wavelet = "Standard transform", station = "Nondecimated transform"), x$filter$name) if(aspect != "Identity") sub <- paste(sub, "(", aspect, ")") plot(c(0, 0, n, n), c(0, nlevels + 1, nlevels + 1, 0), type = "n", xlab = xlab, ylab = ylab, main = main, yaxt = "n", xaxt = "n", sub = sub, ...) yll <- (levels - 1):first.level if(missing(ylabchars)) axis(2, at = 1:(nlevels), labels = yll) else if(length(ylabchars) != nlevels) stop(paste("Should have ", nlevels, " entries in ylabchars")) else axis(2, at = 1:(nlevels), labels = ylabchars) if(missing(xlabchars)) { if(missing(xlabvals)) { if(type == "wavelet") axx <- c(0, 2^(levels - 3), 2^(levels - 2), 2^( levels - 2) + 2^(levels - 3), 2^(levels - 1)) else axx <- c(0, 2^(levels - 2), 2^(levels - 1), 2^( levels - 1) + 2^(levels - 2), 2^levels) axis(1, at = axx) } else { lx <- pretty(xlabvals, n = 4) cat("lx is ", lx, "\n") if(lx[1] < min(xlabvals)) lx[1] <- min(xlabvals) if(lx[length(lx)] > max(xlabvals)) lx[length(lx)] <- max(xlabvals) cat("lx is ", lx, "\n") xix <- NULL for(i in 1:length(lx)) { u <- (xlabvals - lx[i])^2 xix <- c(xix, (1:length(u))[u == min(u)]) } axx <- xix if(type == "wavelet") axx <- xix/2 axl <- signif(lx, digits = 2) axis(1, at = axx, labels = axl) } } else axis(1, at = xlabvals, labels = xlabchars) myxx <- 1:n height <- 1 first.last.d <- x$fl.dbase$first.last.d axr <- NULL if(scaling == "global") { my <- 0 for(i in ((levels - 1):first.level)) { y <- accessD(x, i, aspect = aspect) my <- max(c(my, abs(y))) } } if(scaling == "compensated") { my <- 0 for(i in ((levels - 1):first.level)) { y <- accessD(x, i, aspect = aspect) * 2^(i/2) my <- max(c(my, abs(y))) } } if(scaling == "super") { my <- 0 for(i in ((levels - 1):first.level)) { y <- accessD(x, i, aspect = aspect) * 2^i my <- max(c(my, abs(y))) } } shift <- 1 for(i in ((levels - 1):first.level)) { y <- accessD(x, i, aspect = aspect) if(type == "wavelet") n <- 2^i else { y <- y[c((n - shift + 1):n, 1:(n - shift))] shift <- shift * 2 } xplot <- myxx ly <- length(y) if(scaling == "by.level") my <- max(abs(y)) if(scaling == "compensated") y <- y * 2^(i/2) if(scaling == "super") y <- y * 2^i if(my == 0) { y <- rep(0, length(y)) } else y <- (0.5 * y)/my axr <- c(axr, my) if(max(abs(y)) > NotPlotVal) segments(xplot, height, xplot, height + y) if(i != first.level) { if(type == "wavelet") { x1 <- myxx[seq(1, n - 1, 2)] x2 <- myxx[seq(2, n, 2)] myxx <- (x1 + x2)/2 } height <- height + 1 } } if(rhlab == TRUE) axis(4, at = 1:length(axr), labels = signif(axr, digits=3)) axr } "plot.wp"<- function(x, nvwp = NULL, main = "Wavelet Packet Decomposition", sub, first.level = 5, scaling = "compensated", dotted.turn.on = 5, color.force = FALSE, WaveletColor = 2, NodeVecColor = 3, fast = FALSE, SmoothedLines = TRUE, ...) { # # Check class of wp # ctmp <- class(x) if(is.null(ctmp)) stop("wp has no class") else if(ctmp != "wp") stop("wp is not of class wp") levels <- nlevelsWT(x) dotted.turn.on <- levels - dotted.turn.on N <- 2^levels # The number of original data points # # # Check validity of command line args # if(first.level < 0 || first.level > levels) stop("first.level must between zero and the number of levels") # if(dotted.turn.on < 0 || dotted.turn.on > levels) stop( "dotted.turn.on must between zero and number of levels" ) # # Do subtitling # if(missing(sub)) sub <- paste("Filter: ", x$filter$name) # # # Set plotting region and do axes of plot # oldpar <- par(mfrow = c(1, 1)) if(!is.null(nvwp)) sub <- paste(sub, "(selected packets in color 3)") plot(c(0, N + 1), c(-1, levels - first.level + 1), type = "n", main = main, xlab = "Packet Number", ylab = "Resolution Level", yaxt = "n", sub = sub, ...) axis(2, at = 0:(levels - first.level), labels = levels:first.level) # # # Check out how to do things in a different colour if we can # if(color.force == FALSE) { if(CanUseMoreThanOneColor() == FALSE) { if(WaveletColor > 1) { warning( "Can't (or can't find out how) display wavelets in color" ) WaveletColor <- 1 } if(NodeVecColor > 1) { warning( "Can't (or can't find out how) display node vector packets in color" ) NodeVecColor <- 1 } } } origdata <- getpacket(x, lev = levels, index = 0) # # # Scaling for the original data is always the same # sf <- max(abs(origdata)) if(sf == 0) { stop("Original data is the zero function\n") } scale.origdata <- (0.5 * origdata)/sf lines(1:N, scale.origdata) if(first.level == levels) return() # # # Draw the vertical seperators if necessary # for(i in 1:(levels - first.level)) { N <- N/2 if(i > dotted.turn.on) break else for(j in 1:(2^i - 1)) { segments(N * (j - 1) + N + 0.5, i - 0.5, N * (j - 1) + N + 0.5, i + 0.5, lty = 2) } } # # # Get all the coefficients # CoefMatrix <- x$wp # # # Remove the original data cos we've already plotted that # CoefMatrix <- CoefMatrix[ - (levels + 1), ] # # Compute Global Scale Factor if necessary # Sf <- 0 if(scaling == "global") Sf <- max(abs(CoefMatrix), na.rm = TRUE) else if(scaling == "compensated") { for(i in 1:(levels - first.level)) { Coefs <- CoefMatrix[levels - i + 1, ] * 2^((levels - i )/2) Sf <- max(c(Sf, abs(Coefs)), na.rm = TRUE) } } if(scaling == "global") sf <- Sf if(is.null(nvwp)) { # # If there is no associated node vector then plot the wavelet packet # table using the matrix of coefficients. This is faster than the # packet by packet method that is used when we have a node vector # (but probably not much) # # for(i in 1:(levels - first.level)) { PKLength <- 2^(levels - i) Coefs <- CoefMatrix[levels - i + 1, ] if(scaling == "by.level") sf <- max(abs(Coefs), na.rm = TRUE) else if(scaling == "compensated") sf <- Sf/2^((levels - i)/2) if(is.na(sf) || sf == 0) Coefs <- rep(0, length(Coefs)) else Coefs <- (0.5 * Coefs)/sf pkl <- 1:PKLength if(SmoothedLines == TRUE) lines(pkl, i + Coefs[pkl]) else segments(pkl, i, pkl, i + Coefs[pkl]) pkl <- PKLength + pkl segments(pkl, i, pkl, i + Coefs[pkl], col=WaveletColor) pkl <- (2 * PKLength + 1):length(Coefs) segments(pkl, i, pkl, i + Coefs[pkl]) } } else { pklist <- print.nvwp(nvwp, printing = FALSE) for(i in 1:(levels - first.level)) { # # Scaling issues # Coefs <- CoefMatrix[levels - i + 1, ] if(scaling == "by.level") sf <- max(abs(Coefs), na.rm = TRUE) else if(scaling == "compensated") sf <- Sf/2^((levels - i)/2) if(is.na(sf) || sf == 0) Coefs <- rep(0, length(Coefs)) else Coefs <- (0.5 * Coefs)/sf CoefMatrix[levels - i + 1, ] <- Coefs x$wp <- CoefMatrix the.lev <- levels - i PKLength <- 2^the.lev npkts <- 2^i pkl <- 1:PKLength for(j in 1:npkts) { pkt <- getpacket(x, level = the.lev, index = j - 1) lcol <- 1 if(any(pklist$level == the.lev)) { lpklist <- pklist$pkt[pklist$level == the.lev ] if(any(lpklist == (j - 1))) lcol <- NodeVecColor else if(j == 2) lcol <- WaveletColor } else if(j == 2) lcol <- WaveletColor if(j == 1) { if(SmoothedLines == TRUE) lines(pkl, i + pkt, col=lcol) else segments(pkl, i, pkl, i + pkt, col=lcol) } else segments(pkl, i, pkl, i + pkt, col=lcol) pkl <- pkl + PKLength } } } invisible() } "plot.wst"<- function(x, main = "Nondecimated Wavelet (Packet) Decomposition", sub, first.level = 5, scaling = "compensated", dotted.turn.on = 5, aspect = "Identity", ...) { # # Check class of wst # ctmp <- class(x) if(is.null(ctmp)) stop("wst has no class") else if(ctmp != "wst") stop("wst is not of class wst") levels <- nlevelsWT(x) dotted.turn.on <- levels - dotted.turn.on if(is.complex(x$wp) && aspect == "Identity") aspect <- "Mod" N <- 2^levels # The number of original data points # # # Check validity of command line args # if(first.level < 0 || first.level > levels) stop("first.level must between zero and the number of levels") # if(dotted.turn.on < 0 || dotted.turn.on > levels) stop( "dotted.turn.on must between zero and number of levels" ) # # Do subtitling # if(missing(sub)) sub <- paste("Filter: ", x$filter$name) # # # Set plotting region and do axes of plot # if(aspect != "Identity") sub <- paste(sub, "(", aspect, ")") plot(c(0, N + 1), c(-1, levels - first.level + 1), type = "n", main = main, xlab = "Packet Number", ylab = "Resolution Level", yaxt = "n", sub = sub, ...) axis(2, at = 0:(levels - first.level), labels = levels:first.level) # origdata <- getpacket(x, lev = levels, index = 0, aspect = aspect) # # # Scaling for the original data is always the same # sf <- max(abs(origdata)) if(sf == 0) { scale.origdata <- rep(0, length(origdata)) } else scale.origdata <- (0.5 * origdata)/sf lines(1:N, scale.origdata) if(first.level == levels) return() # # # Draw the vertical seperators if necessary # for(i in 1:(levels - first.level)) { N <- N/2 if(i > dotted.turn.on) break else for(j in 1:(2^i - 1)) { segments(N * (j - 1) + N + 0.5, i - 0.5, N * (j - 1) + N + 0.5, i + 0.5, lty = 2) } } # # # Get all the coefficients # if(aspect == "Identity") CoefMatrix <- x$wp else { fn <- get(aspect) CoefMatrix <- fn(x$wp) } CoefMatrix <- CoefMatrix[ - (levels + 1), ] # # Compute Global Scale Factor if necessary # Sf <- 0 if(scaling == "global") Sf <- max(abs(CoefMatrix), na.rm = TRUE) else if(scaling == "compensated") { for(i in 1:(levels - first.level)) { Coefs <- CoefMatrix[levels - i + 1, ] * 2^((levels - i )/2) Sf <- max(c(Sf, abs(Coefs)), na.rm = TRUE) } } if(scaling == "global") sf <- Sf for(i in 1:(levels - first.level)) { PKLength <- 2^(levels - i) Coefs <- CoefMatrix[levels - i + 1, ] if(scaling == "by.level") sf <- max(abs(Coefs), na.rm = TRUE) else if(scaling == "compensated") sf <- Sf/2^((levels - i)/2) if(is.na(sf) || sf == 0) Coefs <- rep(0, length(Coefs)) else Coefs <- (0.5 * Coefs)/sf pkl <- 1:PKLength segments(pkl, i, pkl, i + Coefs[pkl]) pkl <- PKLength + pkl segments(pkl, i, pkl, i + Coefs[pkl]) pkl <- (2 * PKLength + 1):length(Coefs) segments(pkl, i, pkl, i + Coefs[pkl]) } } "plot.wst2D"<- function(x, plot.type = "level", main = "", ...) { nlev <- nlevelsWT(x) sz <- dim(x$wst2D)[2] if(plot.type == "level") { for(i in 0:(nlev - 1)) { image(matrix(x$wst2D[i + 1, , ], nrow = sz)) st <- paste("Level", i) title(main = main, sub = st) } } } "plotpkt"<- function(J) { x <- c(0, 2^(J - 1)) y <- c(0, J) plot(x, y, type = "n", xlab = "Packet indices", ylab = "Level", xaxt = "n") axis(1, at = seq(from = 0, to = 2^(J - 1), by = 0.5), labels = 0:2^J) } "print.BP"<- function(x, ...) { cat("BP class object. Contains \"best basis\" information\n") cat("Components of object:") print(names(x)) cat("Number of levels ", nlevelsWT(x), "\n") cat("List of \"best\" packets\n") m <- cbind(x$level, x$pkt, x$basiscoef) dimnames(m) <- list(NULL, c("Level id", "Packet id", "Basis coef")) print(m) } "print.imwd"<- function(x, ...) { cat("Class 'imwd' : Discrete Image Wavelet Transform Object:\n") cat(" ~~~~ : List with", length(x), "components with names\n") cat(" ", names(x), "\n\n") cat("$ wNLx are LONG coefficient vectors !\n") cat("\nsummary(.):\n----------\n") summary.imwd(x) } "print.imwdc"<- function(x, ...) { cat("Class 'imwdc' : Compressed Discrete Image Wavelet Transform Object:\n" ) cat(" ~~~~~ : List with", length(x), "components with names\n") cat(" ", names(x), "\n\n") cat("$ wNLx are LONG coefficient vectors !\n") cat("\nsummary(.):\n----------\n") summary.imwdc(x) } "print.mwd"<- function(x, ...) { ctmp <- class(x) if(is.null(ctmp)) stop("Input must have class mwd") else if(ctmp != "mwd") stop("Input must have class mwd") cat("Class 'mwd' : Discrete Multiple Wavelet Transform Object:\n") cat(" ~~~ : List with", length(x), "components with names\n") cat(" ", names(x), "\n\n") cat("$ C and $ D are LONG coefficient vectors !\n") cat("\nCreated on :", x$date, "\n") cat("Type of decomposition: ", x$type, "\n") cat("\nsummary:\n----------\n") summary.mwd(x) } "print.nv"<- function(x, printing = TRUE, verbose = FALSE, ...) { if(verbose == TRUE & printing == TRUE) { cat("Printing node vector as a list\n") cat("------------------------------\n") print(as.list(x)) cat("Printing node vector as format\n") cat("------------------------------\n") } node.vector <- x$node.list acsel <- 0 acsellist <- NULL cntr <- 0 power <- 1 rvector <- 0 for(i in (nlevelsWT(x)- 1):0) { nl <- node.vector[[i + 1]] action <- nl$upperctrl[acsel + 1] actent <- nl$upperl[acsel + 1] cntr <- cntr + 1 if(action == "S") { if(printing == TRUE) cat("There are ", cntr, " reconstruction steps\n") return(invisible(list(indexlist = acsellist, rvector = rvector))) } else if(action == "L") acsel <- 2 * acsel else { acsel <- 2 * acsel + 1 rvector <- rvector + power } power <- power * 2 if(printing == TRUE) { cat("Level : ", i, " Action is ", action) cat(" (getpacket Index: ", acsel, ")\n") } acsellist <- c(acsellist, acsel) } if(printing == TRUE) cat("There are ", cntr, " reconstruction steps\n") invisible(list(indexlist = acsellist, rvector = rvector)) } "print.nvwp"<- function(x, printing = TRUE, ...) { nlev <- nlevelsWT(x) pkt <- NULL level <- NULL decompose <- x$node.list[[nlev]]$upperctrl if(decompose == "B") { parent.decompose <- 0 for(i in nlev:1) { child.lev <- i - 1 child.decompose <- sort(c(2 * parent.decompose, 2 * parent.decompose + 1)) if(child.lev == 0) ctrl <- rep("T", 2^nlev) else ctrl <- x$node.list[[child.lev]]$upperctrl for(j in 1:length(child.decompose)) { if(ctrl[child.decompose[j] + 1] == "T") { level <- c(level, child.lev) pkt <- c(pkt, child.decompose[j]) if(printing == TRUE) cat("Level: ", child.lev, " Packet: ", child.decompose[j], "\n") } } if(child.lev != 0) { ctrl <- ctrl[child.decompose + 1] sv <- ctrl == "B" parent.decompose <- child.decompose[sv] } if (length(parent.decompose)==0) break } } else { level <- nlev pkt <- 0 if(printing == TRUE) { cat("Original data is best packet!\n") } } invisible(list(level = level, pkt = pkt)) } "print.w2d"<- function(x, ...) { cat("w2d class object.\n") cat("A composite object containing the components\n") cat("\t") print(names(x)) cat("Number of levels: ", nlevelsWT(x), "\n") cat("Number of data points: ", nrow(x$m), "\n") cat("Number of bases: ", ncol(x$m), "\n") cat("Groups vector: ") print(x$k) } "print.wd"<- function(x, ...) { if(IsEarly(x)) { ConvertMessage() stop() } cat("Class 'wd' : Discrete Wavelet Transform Object:\n") cat(" ~~ : List with", length(x), "components with names\n") cat(" ", names(x), "\n\n") if(x$bc == "interval") cat("$transformed.vector is a LONG coefficient vector!\n") else cat("$C and $D are LONG coefficient vectors\n") cat("\nCreated on :", x$date, "\n") cat("Type of decomposition: ", x$type, "\n") cat("\nsummary(.):\n----------\n") summary.wd(x) } "print.wd3D"<- function(x, ...) { if(IsEarly(x)) { ConvertMessage() stop() } cat("Class 'wd3d' : 3D DWT Object:\n") cat(" ~~~~ : List with", length(x), "components with names\n") cat(" ", names(x), "\n\n") cat("$ a is the wavelet coefficient array\n") cat("Dimension of a is ") print(dim(x$a)) cat("\nCreated on :", x$date, "\n") cat("\nsummary(.):\n----------\n") summary.wd3D(x) } "print.wp"<- function(x, ...) { if(IsEarly(x)) { ConvertMessage() stop() } cat("Class 'wp' : Wavelet Packet Object:\n") cat(" ~~ : List with", length(x), "components with names\n") cat(" ", names(x), "\n\n") cat("$wp is the wavelet packet matrix\n") cat("\nCreated on :", x$date, "\n") cat("\nsummary(.):\n----------\n") summary.wp(x) } "print.wpst"<- function(x, ...) { if(IsEarly(x)) { ConvertMessage() stop() } cat("Class 'wpst' : Nondecimated Wavelet Packet Transform Object:\n") cat(" ~~~ : List with", length(x), "components with names\n") cat(" ", names(x), "\n\n") cat("$wpst is a coefficient vector\n") cat("\nCreated on :", x$date[1], "\n") cat("\nsummary(.):\n----------\n") summary.wpst(x) } "print.wpstCL"<- function(x, ...) { cat("wpstCL class object\n") cat("Results of applying discriminator to time series\n") cat("Components: ", names(x), "\n") } "print.wpstDO"<- function(x, ...) { cat("Nondecimated wavelet packet discrimination object\n") cat("Composite object containing components:") print(names(x)) cat("Fisher's discrimination: done\n") cat("BP component has the following information\n") print(x$BP) } "print.wst"<- function(x, ...) { if(IsEarly(x)) { ConvertMessage() stop() } cat("Class 'wst' : Packet-ordered Nondecimated Wavelet Transform Object:\n") cat(" ~~~ : List with", length(x), "components with names\n") cat(" ", names(x), "\n\n") cat("$wp and $Carray are the coefficient matrices\n") cat("\nCreated on :", x$date[1], "\n") cat("\nsummary(.):\n----------\n") summary.wst(x) } "print.wst2D"<- function(x, ...) { cat("Class 'wst2D' : 2D Packet-ordered Nondecimated Wavelet Transform Object:\n") cat(" ~~~~~ : List with", length(x), "components with names\n") cat(" ", names(x), "\n\n") cat("$wst2D is the coefficient array\n") cat("\nCreated on :", x$date[1], "\n") cat("\nsummary(.):\n----------\n") summary.wst2D(x) } "putC"<- function(...) UseMethod("putC") "putC.mwd"<- function(mwd, level, M, boundary = FALSE, index = FALSE, ...) { # #putC.mwd, changes the C coefficients at the given level. #Tim Downie #last update May 1996 # if(is.null(class(mwd))) stop("mwd is not class mwd object") if(!inherits(mwd, "mwd")) stop("mwd is not class mwd object") if(level < 0) stop("level too small") else if(level > nlevelsWT(mwd)) stop("level too big") flc <- mwd$fl.dbase$first.last.c[level + 1, ] if(boundary == FALSE) { if(mwd$type == "wavelet") n <- 2^level else n <- 2^nlevelsWT(mwd) i1 <- flc[3] + 1 - flc[1] i2 <- flc[3] + n - flc[1] } else { n <- flc[2] - flc[1] + 1 i1 <- flc[3] + 1 i2 <- flc[3] + n } if(index == FALSE) { if(length(M) != mwd$filter$npsi * n) stop("The length of M is wrong") mwd$C[, i1:i2] <- M return(mwd) } else return(list(ix1 = i1, ix2 = i2)) } "putC.wd"<- function(wd, level, v, boundary = FALSE, index = FALSE, ...) { if(IsEarly(wd)) { ConvertMessage() stop() } if(!inherits(wd, "wd")) stop("wd is not class wd object") if(level < 0) stop("level should be zero or larger") else if(level > nlevelsWT(wd)) stop(paste("Level should be less than or equal to ", nlevelsWT(wd ))) if(wd$bc == "interval") { if(level != wd$current.scale) stop(paste( "Requested wd object was decomposed to level ", wd$current.scale, " and so for \"wavelets on the interval\" object\ns I can only alter this level for the scaling function coefficients\n" )) first.level <- wd$fl.dbase$first.last.c[1] last.level <- wd$fl.dbase$first.last.c[2] offset.level <- wd$fl.dbase$first.last.c[3] n <- last.level - first.level + 1 if(length(v) != n) stop(paste( "I think the length of \"v\" is wrong. I think it should be of length ", n)) wd$transformed.vector[(offset.level + 1 - first.level):( offset.level + n - first.level)] <- v return(wd) } flc <- wd$fl.dbase$first.last.c[level + 1, ] if(boundary == FALSE) { if(wd$type == "wavelet") n <- 2^level else n <- 2^nlevelsWT(wd) i1 <- flc[3] + 1 - flc[1] i2 <- flc[3] + n - flc[1] } else { n <- flc[2] - flc[1] + 1 i1 <- flc[3] + 1 i2 <- flc[3] + n } if(length(v) != n) stop(paste("I think the length of \"v\" is wrong. I think it should be of length ", n)) wd$C[i1:i2] <- v if(index == FALSE) return(wd) else return(list(ix1 = i1, ix2 = i2)) } "putC.wp"<- function(wp, ...) { stop("A wavelet packet object does not have ``levels'' of father wavelet coefficients. Use putD to obtain levels of father and mother coefficients" ) } "putC.wst"<- function(wst, level, value, ...) { # # # Get all coefficients at a particular level # First work out how many packets there are at this level # nlevels <- nlevelsWT(wst) if(2^nlevels != length(value)) stop("Input data value of wrong length") wst$Carray[level + 1, ] <- value wst } "putD"<- function(...) UseMethod("putD") "putD.mwd"<- function(mwd, level, M, boundary = FALSE, index = FALSE, ...) { # #putD.mwd #replaces D coefficients at given level with M #Tim Downie #last update May 1996 # # if(is.null(class(mwd))) stop("mwd is not class mwd object") if(!inherits(mwd, "mwd")) stop("mwd is not class mwd object") if(level < 0) stop("level too small") else if(level >= nlevelsWT(mwd)) stop("level too big") fld <- mwd$fl.dbase$first.last.d[level + 1, ] if(boundary == FALSE) { if(mwd$type == "wavelet") n <- 2^level else n <- 2^nlevelsWT(mwd) i1 <- fld[3] + 1 - fld[1] i2 <- fld[3] + n - fld[1] } else { n <- fld[2] - fld[1] + 1 i1 <- fld[3] + 1 i2 <- fld[3] + n } if(index == FALSE) { if(length(M) != mwd$filter$npsi * n) stop("The length of M is wrong") mwd$D[, i1:i2] <- M return(mwd) } else return(list(ix1 = i1, ix2 = i2)) } "putD.wd"<- function(wd, level, v, boundary = FALSE, index = FALSE, ...) { if(IsEarly(wd)) { ConvertMessage() stop() } if(!inherits(wd, "wd")) stop("wd is not class wd object") if(level < 0) stop("level too small") else if(level > nlevelsWT(wd)- 1) stop(paste("Level too big. Maximum level is ", nlevelsWT(wd)- 1)) if(wd$bc == "interval") { level <- level - wd$current.scale objname <- deparse(substitute(wd)) if(level < 0) stop(paste("The wd object: ", objname, " was only decomposed down to level: ", wd$ current.scale, " Try a larger level")) if(boundary == TRUE) stop("There are no boundary elements in a wavelets on th\ne interval transform!" ) } fld <- wd$fl.dbase$first.last.d[level + 1, ] if(boundary == FALSE) { if(wd$type == "wavelet") n <- 2^level else n <- 2^nlevelsWT(wd) if(wd$bc == "interval") n <- fld[2] - fld[1] + 1 i1 <- fld[3] + 1 - fld[1] i2 <- fld[3] + n - fld[1] } else { n <- fld[2] - fld[1] + 1 i1 <- fld[3] + 1 i2 <- fld[3] + n } if(length(v) != n) stop("I think that the length of v is wrong") if(wd$bc == "interval") wd$transformed.vector[i1:i2] <- v else wd$D[i1:i2] <- v if(index == FALSE) return(wd) else return(list(ix1 = i1, ix2 = i2)) } "putD.wd3D"<- function(x, v, ...) { truesize <- dim(x$a)[1] nlx <- nlevelsWT(x) vlev <- v$lev va <- v$a putDwd3Dcheck(lti = vlev, dima = dim(va), block = v$block, nlx = nlx) Iarrayix <- switch(v$block, HHH = 0, GHH = 1, HGH = 2, GGH = 3, HHG = 4, GHG = 5, HGG = 6, GGG = 7) if(Iarrayix == 0 && vlev != 0) stop("Can only insert HHH into level 0") if(is.null(Iarrayix)) stop(paste("Unknown block to insert: ", v$block)) tmp <- .C("putarr", Carray = as.double(x$a), truesize = as.integer(truesize), level = as.integer(vlev), Iarrayix = as.integer(Iarrayix), Iarray = as.double(va), PACKAGE = "wavethresh") x$a <- array(tmp$Carray, dim = dim(x$a)) x } "putD.wp"<- function(wp, level, value, ...) { # # Insert coefficients "value" into "wp" at resolution "level". # First work out how many packets there are at this level # nlev <- nlevelsWT(wp) if(2^nlev != length(value)) stop("Input data value of wrong length") wp$wp[level + 1, ] <- value wp } "putD.wst"<- function(wst, level, value, ...) { # # # Get all coefficients at a particular level # First work out how many packets there are at this level # nlevels <- nlevelsWT(wst) if(2^nlevels != length(value)) stop("Input data value of wrong length") wst$wp[level + 1, ] <- value wst } "putDwd3Dcheck"<- function(lti, dima, block, nlx) { if(lti < 0) stop(paste("Level cannot be negative for block:", block)) else if(lti > nlx - 1) stop(paste("Maximum level for block: ", block, " is ", nlx - 1) ) if(length(dima) != 3) stop(paste(block, "array is not three-dimensional")) if(any(dima != dima[1])) stop(paste(block, " dimensions are not all the same")) arrdimlev <- IsPowerOfTwo(dima[1]) if(is.na(arrdimlev)) stop(paste(block, " dimensions are not power of two")) if(arrdimlev != lti) stop(paste(block, "dimensions will not fit into cube at that level")) } "putpacket"<- function(...) UseMethod("putpacket") "putpacket.wp"<- function(wp, level, index, packet, ...) { # cat("PUTPACKET: Level:", level, " Index:", index, " Pack Length ", # length(packet), "\n") if(!inherits(wp, "wp")) stop("wp object is not of class wp") if(level > nlevelsWT(wp)) stop("Not that many levels in wp object") unit <- 2^level LocalIndex <- unit * index + 1 if(index > 2^(nlevelsWT(wp)- level) - 1) { cat("Index was too high, maximum for this level is ", 2^(wp$ nlevels - level) - 1, "\n") stop("Error occured") } if(LocalIndex < 0) stop("Index must be non-negative") if(length(packet) != unit) stop("Packet is not of correct length\n") wp$wp[level + 1, (LocalIndex:(LocalIndex + unit - 1))] <- packet wp } "putpacket.wst"<- function(wst, level, index, packet, ...) { class(wst) <- "wp" l <- putpacket.wp(wst, level = level, index = index, packet = packet) class(l) <- "wst" l } "putpacket.wst2D"<- function(wst2D, level, index, type = "S", packet, Ccode = TRUE, ...) { cellength <- 2^level nlev <- nlevelsWT(wst2D) if(!is.matrix(packet)) stop("packet should be a matrix") nr <- nrow(packet) nc <- ncol(packet) if(nr != nc) stop("packet should be a square matrix") else if(nr != cellength) stop(paste("packet matrix should be square of dimension ", cellength, " if you're inserting at level ", level, " not ", nr)) if(level > nlev - 1) stop(paste("Maximum level is ", nlev - 1, " you supplied ", level)) else if(level < 0) stop(paste("Minimum level is 0 you supplied ", level)) if(type != "S" && type != "H" && type != "V" && type != "D") stop("Type must be one of S, H, V or D") if(nchar(index) != nlev - level) stop(paste("Index must be ", nlev - level, " characters long for level ", level)) for(i in 1:nchar(index)) { s1 <- substring(index, i, i) if(s1 != "0" && s1 != "1" && s1 != "2" && s1 != "3") stop(paste("Character ", i, " in index is not a 0, 1, 2 or 3. It is ", s1)) } if(Ccode == TRUE) { ntype <- switch(type, S = 0, H = 1, V = 2, D = 3) amdim <- dim(wst2D$wst2D) ans <- .C("putpacketwst2D", am = as.double(wst2D$wst2D), d1 = as.integer(amdim[1]), d12 = as.integer(amdim[1] * amdim[2]), maxlevel = as.integer(nlev - 1), level = as.integer(level), index = as.integer(index), ntype = as.integer(ntype), packet = as.double(packet), sl = as.integer(nr), PACKAGE = "wavethresh") wst2D$wst2D <- array(ans$am, dim = amdim) } else { x <- y <- 0 ans <- .C("ixtoco", level = as.integer(level), maxlevel = as.integer(nlev - 1), index = as.integer(index), x = as.integer(x), y = as.integer(y), PACKAGE = "wavethresh") tmpx <- switch(type, S = 0, H = 0, V = cellength, D = cellength) tmpy <- switch(type, S = 0, H = cellength, V = 0, D = cellength) x <- ans$x + tmpx + 1 y <- ans$y + tmpy + 1 cat("x ", x, "y: ", y, "x+cellength-1 ", x + cellength - 1, "y+cellength-1", y + cellength - 1, "\n") wst2D$wst2D[level + 1, x:(x + cellength - 1), y:(y + cellength - 1)] <- packet } wst2D } "rcov"<- function(x) { # #rcov # #computes a robust correlation matrix of x # x must be a matrix with the columns as observations #which is the opposite to the S function var (don't get confused!) #Method comes from Huber's "Robust Statistics" # if(!is.matrix(x)) stop("x must be a matrix") m <- dim(x)[1] n <- dim(x)[2] b1 <- b2 <- b3 <- 0 a <- rep(0, m) sigma <- matrix(rep(0, m^2), nrow = m) for(i in 1:m) { a[i] <- 1/mad(x[i, ]) sigma[i, i] <- 1/a[i]^2 } if(m > 1) { for(i in 2:m) for(j in 1:(i - 1)) { b1 <- mad(a[i] * x[i, ] + a[j] * x[j, ])^2 b2 <- mad(a[i] * x[i, ] - a[j] * x[j, ])^2 b3 <- mad(a[j] * x[j, ] - a[i] * x[i, ])^2 sigma[i, j] <- (b1 - b2)/((b1 + b2) * a[i] * a[ j]) sigma[j, i] <- (b1 - b3)/((b1 + b3) * a[i] * a[ j]) } } return(sigma) } "rfft"<- function(x) { # given a vector x computes the real continuous fourier transform of # x; ie regards x as points on a periodic function on [0,1] starting at # 0 and finding the coefficients of the functions 1, sqrt(2)cos 2 pi t, # sqrt(2) sin 2 pi t, etc that give an expansion of the interpolant of # x The number of terms in the expansion is the length of x. # If x is of even length, the last # coefficient will be that of a cosine term with no matching sine. # nx <- length(x) z <- fft(x) z1 <- sqrt(2) * z[2:(1 + floor(nx/2))] rz <- c(Re(z)[1], as.vector(rbind(Re(z1), - Im(z1))))/nx return(rz[1:nx]) } "rfftinv"<- function(rz, n = length(rz)) { # Inverts the following transform---- # given a vector rz computes the inverse real continuous fourier transform of # rz; ie regards rz as the coefficients of the expansion of a # periodic function f in terms of the functions # 1, sqrt(2)cos 2 pi t, sqrt(2) sin 2 pi t, etc . # The output of the function is f evaluated # at a regular grid of n points, starting at 0. # If n is not specified it is taken to be the length of rz; # the results are unpredictable if n < length(rz). # nz <- length(rz) z <- complex(n) nz1 <- floor(nz/2) nz2 <- ceiling(nz/2) - 1 z[1] <- rz[1] + (0i) z[2:(nz1 + 1)] <- (1/sqrt(2)) * rz[seq(from = 2, by = 2, length = nz1)] z[2:(nz2 + 1)] <- z[2:(nz2 + 1)] - (1i) * (1/sqrt(2)) * rz[seq(from = 3, by = 2, length = nz2)] z[n:(n + 1 - nz1)] <- Conj(z[2:(nz1 + 1)]) x <- Re(fft(z, inverse = TRUE)) return(x) } "rfftwt"<- function(xrfft, wt) { # weight the real fourier series xrfft of even length # by a weight sequence wt # The first term of xrfft is left alone, and the weights are # then applied to pairs of terms in xrfft. # wt is of length half n . xsrfft <- xrfft * c(1, rep(wt, c(rep(2, length(wt) - 1), 1))) return(xsrfft) } "rm.det"<- function(wd.int.obj) { len <- length(wd.int.obj$transformed.vector) n <- len maxscale <- log(len, 2) minscale <- wd.int.obj$current.scale for(i in c(maxscale:(minscale + 1))) n <- n/2 for(i in c((n + 1):len)) wd.int.obj$transformed.vector[i] <- 0 return(wd.int.obj) } "rmget"<- function(requestJ, filter.number, family) { ps <- paste("rm.*.", filter.number, ".", family, sep = "") cand <- objects(envir = WTEnv, pattern = ps) if(length(cand) == 0) return(NULL) cand <- substring(cand, first = 4) candfd <- firstdot(cand) cand <- as.numeric(substring(cand, first = 1, last = candfd - 1)) cand <- cand[cand >= requestJ] if(length(cand) == 0) return(NULL) else return(min(cand)) } "rmname"<- function(J, filter.number, family) { if(J >= 0) stop("J must be a negative integer") return(paste("rm.", - J, ".", filter.number, ".", family, sep = "")) } "rotateback"<- function(v) { lv <- length(v) v[c(lv, 1:(lv - 1))] } "rsswav"<- function(noisy, value = 1, filter.number = 10, family = "DaubLeAsymm", thresh.type = "hard", ll = 3) { lo <- length(noisy) oodd <- noisy[seq(from = 1, by = 2, length = lo/2)] oeven <- noisy[seq(from = 2, by = 2, length = lo/2)] # # # Do decomposition of odd # oddwd <- wd(oodd, filter.number = filter.number, family = family) oddwdt <- threshold(oddwd, policy = "manual", value = value, type = thresh.type, lev = ll:(nlevelsWT(oddwd)- 1)) oddwr <- wr(oddwdt) # # Interpolate evens # eint <- (c(oeven[1], oeven) + c(oeven, oeven[length(oeven)]))/2 eint <- eint[1:(length(eint) - 1)] ssq1 <- ssq(eint, oddwr) # # ts.plot(oddwr, main = paste("Odd plot, ssq=", ssq1)) # # Now do decomposition of even # evenwd <- wd(oeven, filter.number = filter.number, family = family) evenwdt <- threshold(evenwd, policy = "manual", value = value, type = thresh.type, lev = ll:(nlevelsWT(evenwd)- 1)) evenwr <- wr(evenwdt) # # # Inerpolate odds # oint <- (c(oodd[1], oodd) + c(oodd, oodd[length(oodd)]))/2 oint <- oint[1:(length(oint) - 1)] ssq2 <- ssq(oint, evenwr) # ts.plot(evenwr, main = paste("Even plot, ssq=", ssq2)) answd <- wd(noisy, filter.number = filter.number, family = family) ll <- list(ssq = (ssq1 + ssq2)/2, df = dof(threshold(answd, policy = "manual", value = value, type = thresh.type, lev = ll:(answd$ nlevels - 1)))) return(ll) } "simchirp"<- function(n = 1024) { x <- 1.0000000000000001e-05 + seq(from = -1, to = 1, length = n + 1)[1: n] y <- sin(pi/x) list(x = x, y = y) } "ssq"<- function(u, v) { sum((u - v)^2) } "summary.imwd"<- function(object, ...) { # # # Check class of imwd # ctmp <- class(object) if(is.null(ctmp)) stop("imwd has no class") else if(ctmp != "imwd") stop("imwd is not of class imwd") first.last.c <- object$fl.dbase$first.last.c pix <- first.last.c[nlevelsWT(object)+ 1, 2] - first.last.c[nlevelsWT(object)+ 1, 1] + 1 cat("UNcompressed image wavelet decomposition structure\n") cat("Levels: ", nlevelsWT(object), "\n") cat("Original image was", pix, "x", pix, " pixels.\n") cat("Filter was: ", object$filter$name, "\n") cat("Boundary handling: ", object$bc, "\n") } "summary.imwdc"<- function(object, ...) { # # # Check class of imwdc # ctmp <- class(object) if(is.null(ctmp)) stop("imwdc has no class") else if(ctmp != "imwdc") stop("imwdc is not of class imwdc") first.last.c <- object$fl.dbase$first.last.c pix <- first.last.c[nlevelsWT(object)+ 1, 2] - first.last.c[nlevelsWT(object)+ 1, 1] + 1 cat("Compressed image wavelet decomposition structure\n") cat("Levels: ", nlevelsWT(object), "\n") cat("Original image was", pix, "x", pix, " pixels.\n") cat("Filter was: ", object$filter$name, "\n") cat("Boundary handling: ", object$bc, "\n") } "summary.mwd"<- function(object, ...) { ctmp <- class(object, ...) if(is.null(ctmp)) stop("Input must have class mwd") else if(ctmp != "mwd") stop("Input must have class mwd") cat("Length of original: ", object$ndata, "\n") cat("Levels: ", nlevelsWT(object), "\n") cat("Filter was: ", object$filter$name, "\n") cat("Scaling fns: ", object$filter$nphi, "\n") cat("Wavelet fns: ", object$filter$npsi, "\n") cat("Prefilter: ", object$prefilter, "\n") cat("Scaling factor: ", object$filter$ndecim, "\n") cat("Boundary handling: ", object$bc, "\n") cat("Transform type: ", object$type, "\n") cat("Date: ", object$date, "\n") } "summary.wd"<- function(object, ...) { if(IsEarly(object)) { ConvertMessage() stop() } if(object$bc != "interval") pix <- length(accessC(object)) else pix <- 2^nlevelsWT(object) cat("Levels: ", nlevelsWT(object), "\n") cat("Length of original: ", pix, "\n") cat("Filter was: ", object$filter$name, "\n") cat("Boundary handling: ", object$bc, "\n") if(object$bc == "interval") if(object$preconditioned == TRUE) cat("Preconditioning is ON\n") else cat("Preconditioning is OFF\n") cat("Transform type: ", object$type, "\n") cat("Date: ", object$date, "\n") } "summary.wd3D"<- function(object, ...) { if(IsEarly(object)) { ConvertMessage() stop() } cat("Levels: ", nlevelsWT(object), "\n") cat("Filter number was: ", object$filter.number, "\n") cat("Filter family was: ", object$family, "\n") cat("Date: ", object$date, "\n") } "summary.wp"<- function(object, ...) { if(IsEarly(object)) { ConvertMessage() stop() } wpdim <- dim(object$wp) cat("Levels: ", nlevelsWT(object), "\n") cat("Length of original: ", wpdim[2], "\n") cat("Filter was: ", object$filter$name, "\n") } "summary.wpst"<- function(object, ...) { if(IsEarly(object)) { ConvertMessage() stop() } pix <- 2^nlevelsWT(object) cat("Levels: ", nlevelsWT(object), "\n") cat("Length of original: ", pix, "\n") cat("Filter was: ", object$filter$name, "\n") cat("Date: ", object$date[1], "\n") if(length(object$date) != 1) cat("This object has been modified. Use \"Whistory\" to find out what's happened\n" ) } "summary.wst"<- function(object, ...) { if(IsEarly(object)) { ConvertMessage() stop() } pix <- 2^nlevelsWT(object) cat("Levels: ", nlevelsWT(object), "\n") cat("Length of original: ", pix, "\n") cat("Filter was: ", object$filter$name, "\n") cat("Date: ", object$date[1], "\n") if(length(object$date) != 1) cat("This object has been modified. Use \"Whistory\" to find out what's happened\n" ) } "summary.wst2D"<- function(object, ...) { nlev <- nlevelsWT(object) cat("Levels: ", nlev, "\n") cat("Length of original: ", 2^nlev, "x", 2^nlev, "\n") cat("Filter was: ", object$filter$name, "\n") cat("Date: ", object$date[1], "\n") if(length(object$date) != 1) cat("This object has been modified. Use \"Whistory\" to find out what's happened\n" ) } "support"<- function(filter.number = 10, family = "DaubLeAsymm", m = 0, n = 0) { m <- m + 1 if(family == "DaubExPhase") { a <- - (filter.number - 1) b <- filter.number lh <- 2^( + m) * (a + n) rh <- 2^( + m) * (b + n) return(list(lh = lh, rh = rh, psi.lh = - (filter.number - 1), psi.rh = filter.number, phi.lh = 0, phi.rh = 2 * filter.number - 1)) } else if(family == "DaubLeAsymm") { a <- - (filter.number - 1) b <- filter.number lh <- 2^( + m) * (a + n) rh <- 2^( + m) * (b + n) return(list(lh = lh, rh = rh, psi.lh = - (filter.number - 1), psi.rh = filter.number, phi.lh = 0, phi.rh = 2 * filter.number - 1)) } else { stop(paste("Family: ", family, " not supported for support!\n") ) } } "sure"<- function(x) { # # The SURE function of Donoho and Johnstone # Finds the minimum # x <- abs(x) d <- length(x) y <- sort(x) # # # Form cumulative sum # cy <- cumsum(y^2) cy <- c(0, cy[1:(length(cy) - 1)]) # # # Now the answer # ans <- d - 2 * 1:d + cy + d:1 * y^2 # cat("ans is\n") # print(ans) m <- min(ans) index <- (1:length(ans))[m == ans] return(y[index]) } "threshold"<- function(...) UseMethod("threshold") "threshold.imwd"<- function(imwd, levels = 3:(nlevelsWT(imwd)- 1), type = "hard", policy = "universal", by.level = FALSE, value = 0, dev = var, verbose = FALSE, return.threshold = FALSE, compression = TRUE, Q = 0.050000000000000003, ...) { # # # Check class of imwd # if(verbose == TRUE) cat("Argument checking\n") ctmp <- class(imwd) if(is.null(ctmp)) stop("imwd has no class") else if(ctmp != "imwd") stop("imwd is not of class imwd") if(policy != "universal" && policy != "manual" && policy != "probability" && policy != "fdr") stop("Only policys are universal, manual, fdr and probability at present" ) if(type != "hard" && type != "soft") stop("Only hard or soft thresholding at present") r <- range(levels) if(r[1] < 0) stop("levels out of range, level too small") if(r[2] > nlevelsWT(imwd)- 1) stop("levels out of range, level too big") if(r[1] > nlevelsWT(imwd)- 1) { warning("no thresholding done") return(imwd) } if(r[2] < 0) { warning("no thresholding done") return(imwd) } nthresh <- length(levels) d <- NULL n <- 2^(2 * nlevelsWT(imwd)) # # Decide which policy to adopt # The next if-else construction should define a vector called # "thresh" that contains the threshold value for each level # in "levels". This may be the same threshold value # a global threshold. # if(policy == "universal") { if(verbose == TRUE) cat("Universal policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) { d <- c(d, imwd[[lt.to.name(levels[i], "CD")]], imwd[[lt.to.name(levels[i], "DC")]], imwd[[ lt.to.name(levels[i], "DD")]]) } noise.level <- sqrt(dev(d)) thresh <- sqrt(2 * log(n)) * noise.level if(verbose == TRUE) cat("Global threshold is: ", thresh, "\n") thresh <- rep(thresh, length = nthresh) } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) for(i in 1:nthresh) { d <- c(imwd[[lt.to.name(levels[i], "CD")]], imwd[[lt.to.name(levels[i], "DC")]], imwd[[ lt.to.name(levels[i], "DD")]]) noise.level <- sqrt(dev(d)) thresh[i] <- sqrt(2 * log(n)) * noise.level if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } } else if(policy == "manual") { if(verbose == TRUE) cat("Manual policy...\n") thresh <- rep(value, length = nthresh) if(length(value) != 1 && length(value) != nthresh) warning("your threshold is not the same length as number of levels" ) } else if(policy == "fdr") { # # # Threshold chosen by FDR-procedure # if(verbose == TRUE) cat("FDR policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) { d <- c(d, imwd[[lt.to.name(levels[i], "CD")]], imwd[[lt.to.name(levels[i], "DC")]], imwd[[ lt.to.name(levels[i], "DD")]]) } if(length(value) != 1) stop("Length of value should be 1") noise.level <- sqrt(dev(c(imwd[[lt.to.name(levels[ nthresh], "CD")]], imwd[[lt.to.name(levels[ nthresh], "DC")]], imwd[[lt.to.name(levels[ nthresh], "DD")]]))) minit <- n dinit <- d thinit <- qnorm(1 - Q/2) * noise.level if(log(n, 2) > 15) ninit <- 4 else { if(log(n, 2) > 12) ninit <- 3 else { if(log(n, 2) > 10) ninit <- 2 else ninit <- 1 } } for(k in seq(1, ninit)) { dinit1 <- dinit[abs(dinit) >= thinit] minit <- length(dinit1) if(minit == 0) thresh <- max(abs(d)) * 1.0001 else { thinit <- qnorm(1 - (Q * minit)/(2 * n)) * noise.level minit1 <- length(dinit1[abs(dinit1) >= thinit ]) if(minit1 == minit || minit1 == 0) break dinit <- dinit1 } } if(noise.level > 0) { m <- length(d) minit <- length(dinit) p <- (2 - 2 * pnorm(abs(dinit)/noise.level)) index <- order(p) j <- seq(1, minit) m0 <- max(j[p[index] <= (Q * j)/m]) if(m0 != "NA" && m0 < minit) thresh <- abs(dinit[index[m0]]) else { if(m0 == "NA") thresh <- max(abs(dinit)) * 1.0001 else thresh <- 0 } } else thresh <- 0 thresh <- rep(thresh, length = nthresh) if(verbose == TRUE) cat("Global threshold is: ", thresh[1], "\n", "sigma is: ", noise.level, "\n") } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) for(i in 1:nthresh) { d <- c(imwd[[lt.to.name(levels[i], "CD")]], imwd[[lt.to.name(levels[i], "DC")]], imwd[[ lt.to.name(levels[i], "DD")]]) m <- length(d) noise.level <- sqrt(dev(d)) thinit <- qnorm(1 - Q/2) * noise.level dinit <- d[abs(d) >= thinit] minit <- length(dinit) if(minit == 0) thresh[i] <- max(abs(d)) * 1.0001 else { if(noise.level > 0) { p <- (2 - 2 * pnorm(abs(dinit)/noise.level) ) index <- order(p) j <- seq(1, minit) m0 <- max(j[p[index] <= (Q * j)/m]) if(m0 != "NA" && m0 < minit) thresh[i] <- abs(dinit[index[m0]]) else { if(m0 == "NA") thresh[i] <- max(abs(dinit)) * 1.0001 else thresh[i] <- 0 } } else thresh[i] <- 0 } if(verbose == TRUE) cat("Threshold for level: ", levels[i], "is", thresh[i], "\n") } } } else if(policy == "probability") { if(verbose == TRUE) cat("Probability policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) { d <- c(d, imwd[[lt.to.name(levels[i], "CD")]], imwd[[lt.to.name(levels[i], "DC")]], imwd[[ lt.to.name(levels[i], "DD")]]) } if(length(value) != 1) stop("Length of value should be 1") thresh <- rep(quantile(abs(d), prob = value), length = nthresh) if(verbose == TRUE) cat("Global threshold is: ", thresh[1], "\n") } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) if(length(value) == 1) value <- rep(value, nthresh) if(length(value) != nthresh) stop("Wrong number of probability values") for(i in 1:nthresh) { d <- c(imwd[[lt.to.name(levels[i], "CD")]], imwd[[lt.to.name(levels[i], "DC")]], imwd[[ lt.to.name(levels[i], "DD")]]) thresh[i] <- quantile(abs(d), prob = value[i]) if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } } if(return.threshold == TRUE) return(thresh) for(i in 1:nthresh) { dCD <- imwd[[lt.to.name(levels[i], "CD")]] dDC <- imwd[[lt.to.name(levels[i], "DC")]] dDD <- imwd[[lt.to.name(levels[i], "DD")]] if(type == "hard") { dCD[abs(dCD) <= thresh[i]] <- 0 dDC[abs(dDC) <= thresh[i]] <- 0 dDD[abs(dDD) <= thresh[i]] <- 0 if(verbose == TRUE) { cat("Level: ", levels[i], " there are ", sum( dCD == 0), ":", sum(dDC == 0), ":", sum(dDD == 0), " zeroes and: ") cat(sum(dCD != 0), ":", sum(dDC != 0), ":", sum( dDD != 0), " nonzeroes\n") } } else if(type == "soft") { dCD <- sign(dCD) * (abs(dCD) - thresh[i]) * (abs(dCD) > thresh[i]) dDC <- sign(dDC) * (abs(dDC) - thresh[i]) * (abs(dDC) > thresh[i]) dDD <- sign(dDD) * (abs(dDD) - thresh[i]) * (abs(dDD) > thresh[i]) if(verbose == TRUE) { cat("Level: ", levels[i], " there are ", sum( dCD == 0), ":", sum(dDC == 0), ":", sum(dDD == 0), " zeroes and: ") cat(sum(dCD != 0), ":", sum(dDC != 0), ":", sum( dDD != 0), " nonzeroes\n") } } imwd[[lt.to.name(levels[i], "CD")]] <- dCD imwd[[lt.to.name(levels[i], "DC")]] <- dDC imwd[[lt.to.name(levels[i], "DD")]] <- dDD } if(compression == TRUE) return(compress(imwd, verbose = verbose)) else return(imwd) } "threshold.imwdc"<- function(imwdc, verbose = FALSE, ...) { warning("You are probably thresholding an already thresholded object") imwd <- uncompress(imwdc, verbose = verbose) return(threshold(imwd, verbose = TRUE, ...)) } "threshold.irregwd"<- function(irregwd, levels = 3:(nlevelsWT(wd)- 1), type = "hard", policy = "universal", by.level = FALSE, value = 0, dev = var, boundary = FALSE, verbose = FALSE, return.threshold = FALSE, force.sure = FALSE, cvtol = 0.01, Q = 0.050000000000000003, alpha = 0.050000000000000003, ...) { if(verbose == TRUE) cat("threshold.irregwd:\n") if(IsEarly(wd)) { ConvertMessage() stop() } # # Check class of wd # if(verbose == TRUE) cat("Argument checking\n") ctmp <- class(irregwd) if(is.null(ctmp)) stop("irregwd has no class") else if(ctmp != "irregwd") stop("irregwd is not of class irregwd") wd <- irregwd class(wd) <- "wd" if(policy != "universal" && policy != "manual" && policy != "probability" && policy != "sure" && policy != "mannum" && policy != "cv" && policy != "fdr" && policy != "op1" && policy != "op2" && policy != "LSuniversal") stop("Only policys are universal, manual, mannum, sure, LSuniversal, cv, op1, op2 and probability at present" ) if(type != "hard" && type != "soft") stop("Only hard or soft thresholding at present") r <- range(levels) if(r[1] < 0) stop("levels out of range, level too small") if(r[2] > nlevelsWT(wd)- 1) stop("levels out of range, level too big") if(r[1] > nlevelsWT(wd)- 1) { warning("no thresholding done") return(wd) } if(r[2] < 0) { warning("no thresholding done") return(wd) } n <- 2^nlevelsWT(wd) nthresh <- length(levels) # # Estimate sigma if(by.level == FALSE) { d <- NULL ccc <- NULL for(i in 1:nthresh) { d <- c(d, accessD(wd, level = levels[i], boundary = boundary)) ccc <- c(ccc, accessc(irregwd, level = levels[i], boundary = boundary)) } ind <- (1:length(d))[abs(ccc) > 1.0000000000000001e-05] sigma <- sqrt(dev(d[ind]/sqrt(ccc[ind]))) sigma <- rep(sigma, nthresh) } else { for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary ) ccc <- accessc(irregwd, level = levels[i], boundary = boundary) ind <- (1:length(d))[abs(ccc) > 1.0000000000000001e-05] sigma[i] <- sqrt(dev(d[ind]/sqrt(ccc[ind]))) } } if(verbose == TRUE) print(sigma) d <- NULL ccc <- NULL # # Check to see if we're thresholding a complex wavelet transform. # We can only do certain things in this case # if(is.complex(wd$D)) { stop("Complex transform not implemented") } # # # Decide which policy to adopt # The next if-else construction should define a vector called # "thresh" that contains the threshold value for each level # in "levels". This may be the same threshold value # a global threshold. # if(policy == "universal") { # # # Donoho and Johnstone's universal policy # if(verbose == TRUE) cat("Universal policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) d <- c(d, accessD(wd, level = levels[i], boundary = boundary)) nd <- length(d) thresh <- sqrt(2 * log(nd)) if(verbose == TRUE) cat("Global threshold is: ", thresh, "\n") thresh <- rep(thresh, length = nthresh) } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) nd <- length(d) thresh[i] <- sqrt(2 * log(nd)) if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } expo <- 1 } else if(policy == "LSuniversal") { # # # The universal policy modified for local spectral smoothing # This should only be used via the LocalSpec function # if(verbose == TRUE) cat("Local spectral universal policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) d <- c(d, accessD(wd, level = levels[i], boundary = boundary)) nd <- length(d) thresh <- log(nd) if(verbose == TRUE) cat("Global threshold is: ", thresh, "\n") thresh <- rep(thresh, length = nthresh) } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) nd <- length(d) thresh[i] <- log(nd) if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } expo <- 1 } else if(policy == "sure") { if(type == "hard") stop("Can only do soft thresholding with sure policy") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) { d <- c(d, accessD(wd, level = levels[i], boundary = boundary)) ccc <- c(ccc, accessc(irregwd, level = levels[i ], boundary = boundary)) } ind <- (1:length(d))[abs(ccc) > 1.0000000000000001e-05] nd <- length(ind) neta.d <- (log(nd, base = 2)^(3/2)) sd2 <- (sum((d[ind]/(sigma[1] * ccc)[ind])^2 - 1)/nd) if(verbose == TRUE) { cat("neta.d is ", neta.d, "\nsd2 is ", sd2, "\n") cat("nd is ", nd, "\n") cat("noise.level ", noise.level, "\n") } if(force.sure == TRUE || sd2 > neta.d/sqrt(nd)) { if(verbose == TRUE) { cat("SURE: Using SURE\n") } thresh <- newsure(sqrt(ccc) * sigma[1], d) expo <- 0 } else { if(verbose == TRUE) cat("SURE: (sparse) using sqrt 2log n\n") thresh <- sqrt(2 * log(nd)) } thresh <- rep(thresh, length = nthresh) if(verbose == TRUE) cat("Global threshold is ", thresh, "\n") } else { # # # By level is true # print("Sure for level- and coefficient-dependenet thresholding is not adapted" ) if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) collect <- NULL for(i in 1:nthresh) collect <- c(collect, accessD(wd, level = levels[i], boundary = boundary)) noise.level <- sqrt(dev(collect)) for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) nd <- length(d) neta.d <- (log(nd, base = 2)^(3/2)) sd2 <- (sum((d/noise.level)^2 - 1)/nd) if(verbose == TRUE) { cat("neta.d is ", neta.d, "\nsd2 is ", sd2, "\n") cat("nd is ", nd, "\n") cat("noise.level ", noise.level, "\n") } if(force.sure == TRUE || sd2 > neta.d/sqrt(nd)) { if(verbose == TRUE) { cat("SURE: Using SURE\n") } thresh[i] <- sure(d/noise.level) } else { if(verbose == TRUE) cat("SURE: (sparse) using sqrt 2log n\n") thresh[i] <- sqrt(2 * log(nd)) } if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } } else if(policy == "manual") { # # # User supplied threshold policy # if(verbose == TRUE) cat("Manual policy\n") thresh <- rep(value, length = nthresh) expo <- 1 if(length(value) != 1 && length(value) != nthresh) warning("your threshold is not the same length as number of levels" ) } else if(policy == "mannum") { if(verbose == TRUE) { cat("Manual policy using ", value, " of the") cat(" largest coefficients\n") } if(value < 1) { stop("Have to select an integer larger than 1 for value" ) } else if(value > length(wd$D)) { stop(paste("There are only ", length(wd$D), " coefficients, you specified ", value)) } coefs <- wd$D scoefs <- sort(abs(coefs)) scoefs <- min(rev(scoefs)[1:value]) wd$D[abs(wd$D) < scoefs] <- 0 return(wd) } else if(policy == "probability") { # # # Threshold is quantile based # if(verbose == TRUE) cat("Probability policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) d <- c(d, accessD(wd, level = levels[i], boundary = boundary)) if(length(value) != 1) stop("Length of value should be 1") thresh <- rep(quantile(abs(d), prob = value), length = nthresh) if(verbose == TRUE) cat("Global threshold is: ", thresh[1], "\n") } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) if(length(value) == 1) value <- rep(value, nthresh) if(length(value) != nthresh) stop("Wrong number of probability values") for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) thresh[i] <- quantile(abs(d), prob = value[i]) if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } } if(return.threshold == TRUE) return(thresh) for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) ccc <- accessc(irregwd, level = levels[i], boundary = boundary) actthresh <- thresh[i] * (sigma[i] * sqrt(ccc))^expo # is vector if(type == "hard") { d[abs(d) <= actthresh] <- 0 if(verbose == TRUE) cat("Level: ", levels[i], " there are ", sum(d == 0), " zeroes\n") } else if(type == "soft") { d <- (d * (abs(d) - actthresh) * (abs(d) > actthresh))/ abs(d) d[is.na(d)] <- 0 } wd <- putD(wd, level = levels[i], v = d, boundary = boundary) } wd } "threshold.mwd"<- function(mwd, levels = 3:(nlevelsWT(mwd)- 1), type = "hard", policy = "universal", boundary = FALSE, verbose = FALSE, return.threshold = FALSE, threshold = 0, covtol = 1.0000000000000001e-09, robust = TRUE, return.chisq = FALSE, bivariate = TRUE, ...) { #threshold.mwd #thresholds a multiple wavelet object #Tim Downie #last updated May 1996 # # # Check arguments # if(verbose == TRUE) cat("threshold.mwd:\n") if(verbose == TRUE) cat("Argument checking\n") ctmp <- class(mwd) if(is.null(ctmp)) stop("mwd has no class") else if(ctmp != "mwd") stop("mwd is not of class mwd") if(policy != "manual" && policy != "universal" && policy != "visushrink") stop("Only policies are universal manual and visushrink at present" ) if(type != "hard" && type != "soft") stop("Only hard or soft thresholding at present") nlevels <- nlevelsWT(mwd) npsi <- mwd$filter$npsi r <- range(levels) if(r[1] < 0) stop("levels out of range, level too small") if(r[2] > nlevelsWT(mwd)- 1) stop("levels out of range, level too big") if(r[1] > nlevelsWT(mwd)- 1) { warning("no thresholding done, returning input") return(mwd) } if(r[2] < 0) { warning("no thresholding done, returning input") return(mwd) } if(policy == "manual" && threshold <= 0) stop( "If you want manual thresholding, you must supply\na positive threshold" ) # # #Apply the a single wavelet policy to multiwavelets #so far only universal thresholding #visushrink visushrink can be done if using the single policy # if(bivariate == FALSE) { if(verbose == TRUE) cat("Thresholding multiple wavelets using single wavelet universal thresholding\n" ) noise.level <- rep(0, npsi) thresh <- rep(0, npsi) ninlev <- rep(0, length(levels)) if(robust == FALSE) dev <- var else dev <- mad D <- NULL for(i in levels) { index <- i + 1 - levels[1] ninlev[index] <- dim(accessD(mwd, level = i, boundary = boundary))[2] D <- matrix(c(D, accessD(mwd, level = i, boundary = boundary)), nrow = npsi) } nD <- dim(D)[2] for(i in 1:npsi) { noise.level[i] <- sqrt(dev(D[i, ])) if(policy == "visushrink") thresh[i] <- (sqrt(2 * log(nD)) * noise.level[i ])/sqrt(nD) else if(policy == "manual") thresh[i] <- threshold[i] else thresh[i] <- (sqrt(2 * log(nD)) * noise.level[i]) } if(verbose == TRUE) { cat("Threshold for each wavelet is: ", thresh, "\n") cat("noise levels are : ", noise.level, "\n") } for(i in 1:npsi) { d <- D[i, ] if(type == "hard") { d[abs(d) <= thresh[i]] <- 0 } else if(type == "soft") { d <- sign(d) * (abs(d) - thresh[i]) * (abs(d) > thresh[i]) } D[i, ] <- d } jump <- 1 for(i in levels) { index <- i + 1 - levels[1] mwd <- putD(mwd, level = i, M = D[, jump:(jump + ninlev[ index] - 1)], boundary = boundary) jump <- jump + ninlev[index] } if(return.threshold == TRUE) return(thresh) else return(mwd) } # # #If we get here then do Multivariate thresholding # if(policy == "universal" || policy == "manual") { n <- 0 nj <- rep(0, length(levels)) chisq <- NULL chisqkeep <- NULL chisqnewkeep <- NULL for(i in 1:length(levels)) { level <- levels[i] d <- accessD(mwd, level = level) nj[i] <- dim(d)[2] Y <- rep(0, nj[i]) # VHAT is the Var/Covar matrix of the data at each level # estinated using normal estimates or robust estimates # if(robust == FALSE) VHAT <- var(t(d)) if(robust == TRUE) VHAT <- rcov(d) # # If the smallest eigen value of VHAT is less than covtol # we may run into problems when inverting VHAT # so code chisq as -1 and return the same vector coeff as was input # if(min(abs(eigen(VHAT, only.values = TRUE)$values)) < covtol) { warning(paste( "singular variance structure in level ", level, "this level not thresholded")) Y <- rep(-1, nj[i]) } else { VINV <- solve(VHAT) for(s in 1:npsi) Y <- Y + d[s, ]^2 * VINV[s, s] for(s in 2:npsi) for(t in 1:(s - 1)) Y <- Y + 2 * d[s, ] * d[t, ] * VINV[s, t] n <- n + nj[i] # # The above line means that the threshold is caculated using only # the thresholdable coefficients. } chisq <- c(chisq, Y) } } if(policy != "manual") chithresh <- 2 * log(n) else chithresh <- threshold if(return.threshold == TRUE) { return(chithresh) } if(return.chisq == TRUE) return(chisq) lc <- length(chisq) dnew <- matrix(rep(0, 2 * lc), nrow = 2) d <- NULL for(i in 1:length(levels)) { d <- matrix(c(d, accessD(mwd, level = levels[i])), nrow = 2) } if(type == "hard") { for(i in 1:lc) { keep <- 1 * ((chisq[i] >= chithresh) || (chisq[i] == -1 )) dnew[, i] <- d[, i] * keep } } if(type == "soft") { for(i in 1:lc) { if(chisq[i] != -1) chisqnew <- max(chisq[i] - chithresh, 0) if(chisq[i] > 0) shrink <- (max(chisq[i] - chithresh, 0))/chisq[ i] else shrink <- 0 dnew[, i] <- d[, i] * shrink } } low <- 1 for(i in 1:length(levels)) { mwd <- putD(mwd, level = levels[i], M = dnew[, low:(low - 1 + nj[i])]) low <- low + nj[i] } if(verbose == TRUE) cat("returning wavelet decomposition\n") return(mwd) } "threshold.wd"<- function(wd, levels = 3:(nlevelsWT(wd)- 1), type = "soft", policy = "sure", by.level = FALSE, value = 0, dev = madmad, boundary = FALSE, verbose = FALSE, return.threshold = FALSE, force.sure = FALSE, cvtol = 0.01, cvmaxits=500, Q = 0.050000000000000003, OP1alpha = 0.050000000000000003, alpha = 0.5, beta = 1, C1 = NA, C2 = NA, C1.start = 100, al.check=TRUE, ...) { if(verbose == TRUE) cat("threshold.wd:\n") if(IsEarly(wd)) { ConvertMessage() stop() } # # Check class of wd # if(verbose == TRUE) cat("Argument checking\n") ctmp <- class(wd) if(is.null(ctmp)) stop("wd has no class") else if(ctmp != "wd") stop("wd is not of class wd") if(policy != "universal" && policy != "manual" && policy != "probability" && policy != "sure" && policy != "mannum" && policy != "cv" && policy != "fdr" && policy != "op1" && policy != "op2" && policy != "LSuniversal" && policy != "BayesThresh") stop("Only policys are universal, BayesThresh, manual, mannum, sure, LSuniversal, cv, op1, op2 and probability at present" ) if(type != "hard" && type != "soft") stop("Only hard or soft thresholding at present") r <- range(levels) if(r[1] < 0) stop("levels out of range, level too small. Minimum level is 0" ) if(r[2] > nlevelsWT(wd) - 1) stop(paste("levels out of range, level too big. Maximum level is", nlevelsWT(wd) - 1)) if(r[1] > nlevelsWT(wd)- 1) { warning("no thresholding done") return(wd) } if(r[2] < 0) { warning("no thresholding done") return(wd) } if (al.check==TRUE) if (all(sort(levels)==levels)==FALSE) warning("Entries in levels vector are not ascending. Please check this is what you intend. If so, you can turn this warning off with al.check argument") d <- NULL n <- 2^nlevelsWT(wd) nthresh <- length(levels) # # # Check to see if we're thresholding a complex wavelet transform. # We can only do certain things in this case # if(is.complex(wd$D)) { stop("Please use cthresh package for complex-valued wavelet shrinkage") } # # # Decide which policy to adopt # The next if-else construction should define a vector called # "thresh" that contains the threshold value for each level # in "levels". This may be the same threshold value # a global threshold. # if(policy == "universal") { # # # Donoho and Johnstone's universal policy # if(verbose == TRUE) cat("Universal policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) d <- c(d, accessD(wd, level = levels[i], boundary = boundary)) noise.level <- sqrt(dev(d)) nd <- length(d) thresh <- sqrt(2 * log(nd)) * noise.level if(verbose == TRUE) cat("Global threshold is: ", thresh, "\n") thresh <- rep(thresh, length = nthresh) } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) noise.level <- sqrt(dev(d)) nd <- length(d) thresh[i] <- sqrt(2 * log(nd)) * noise.level if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } } else if(policy == "LSuniversal") { # # # The universal policy modified for local spectral smoothing # This should only be used via the LocalSpec function # if(verbose == TRUE) cat("Local spectral universal policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) d <- c(d, accessD(wd, level = levels[i], boundary = boundary)) noise.level <- sqrt(dev(d)) nd <- length(d) thresh <- log(nd) * noise.level if(verbose == TRUE) cat("Global threshold is: ", thresh, "\n") thresh <- rep(thresh, length = nthresh) } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) noise.level <- sqrt(dev(d)) nd <- length(d) thresh[i] <- log(nd) * noise.level if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } } else if(policy == "sure") { if(type == "hard") stop("Can only do soft thresholding with sure policy") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) d <- c(d, accessD(wd, level = levels[i], boundary = boundary)) noise.level <- sqrt(dev(d)) nd <- length(d) neta.d <- (log(nd, base = 2)^(3/2)) sd2 <- (sum((d/noise.level)^2 - 1)/nd) if(verbose == TRUE) { cat("neta.d is ", neta.d, "\nsd2 is ", sd2, "\n") cat("nd is ", nd, "\n") cat("noise.level ", noise.level, "\n") } if(force.sure == TRUE || sd2 > neta.d/sqrt(nd)) { if(verbose == TRUE) { cat("SURE: Using SURE\n") } thresh <- sure(d/noise.level) } else { if(verbose == TRUE) cat("SURE: (sparse) using sqrt 2log n\n") thresh <- sqrt(2 * log(nd)) } thresh <- rep(thresh * noise.level, length = nthresh) if(verbose == TRUE) cat("Global threshold is ", thresh, "\n") } else { # # # By level is true # if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) collect <- NULL for(i in 1:nthresh) collect <- c(collect, accessD(wd, level = levels[i], boundary = boundary)) noise.level <- sqrt(dev(collect)) for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) nd <- length(d) neta.d <- (log(nd, base = 2)^(3/2)) sd2 <- (sum((d/noise.level)^2 - 1)/nd) if(verbose == TRUE) { cat("neta.d is ", neta.d, "\nsd2 is ", sd2, "\n") cat("nd is ", nd, "\n") cat("noise.level ", noise.level, "\n") } if(force.sure == TRUE || sd2 > neta.d/sqrt(nd)) { if(verbose == TRUE) { cat("SURE: Using SURE\n") } thresh[i] <- sure(d/noise.level) } else { if(verbose == TRUE) cat("SURE: (sparse) using sqrt 2log n\n") thresh[i] <- sqrt(2 * log(nd)) } if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } } else if(policy == "BayesThresh") { # # Check that all hyperparameters of the prior are non-negative # if(alpha < 0) stop("parameter alpha is negative") if(beta < 0) stop("parameter beta is negative") nthresh <- length(levels) nsignal <- rep(0, nthresh) noise.level <- sqrt(dev(accessD(wd, level = (nlevelsWT(wd)- 1)))) v <- 2^( - alpha * levels) if(is.na(C1)) { # # Estimation of C1 and C2 via universal threshodling # if(C1.start < 0) stop("C1.start is negative") universal <- threshold(wd, policy = "universal", type = "hard", dev = dev, by.level = FALSE, levels = levels) sum2 <- rep(0, nthresh) for(i in 1:nthresh) { dun <- accessD(universal, level = levels[i]) nsignal[i] <- sum(abs(dun) > 10^-10) if(nsignal[i] > 0) sum2[i] <- sum(dun[abs(dun) > 0]^2) } if(sum(nsignal) == 0) { wd <- nullevels(wd, levelstonu = levels) if(verbose == TRUE) cat( "hyperparameters of the prior are: alpha = ", alpha, "C1 = 0", "beta = ", beta, "C2 = 0\n") return(wd) } else { fntoopt <- function(C, nsignal, noise.level, wd, sum2, v) { ans<- nsignal * (log(noise.level^2 + C^2 * v) - 2 * log(pnorm(( - noise.level * sqrt(2 * log(2^nlevelsWT(wd))))/sqrt(noise.level^2 + C^2 * v)))) + sum2/(noise.level^2 + C^2 * v) sum(ans) } C1 <- optimize(f=fntoopt, interval=c(0, 50*sqrt(C1.start)), nsignal=nsignal, noise.level=noise.level, wd=wd, sum2=sum2, v=v)$minimum^2 } } if(C1 < 0) stop("parameter C1 is negative") tau2 <- C1 * v if(is.na(C2)) { p <- 2 * pnorm(( - noise.level * sqrt(2 * log(2^wd$ nlevels)))/sqrt(noise.level^2 + tau2)) if(beta == 1) C2 <- sum(nsignal/p)/nlevelsWT(wd) else C2 <- (1 - 2^(1 - beta))/(1 - 2^((1 - beta) * wd$ nlevels)) * sum(nsignal/p) } if(C2 < 0) stop("parameter C2 is negative") if(verbose == TRUE) cat("noise.level is: ", round(noise.level, 4), "\nhyperparameters of the prior are: alpha = ", alpha, "C1 = ", round(C1, 4), "beta = ", beta, "C2 = ", round(C2, 4), "\n") # # # Bayesian Thresholding # if(C1 == 0 | C2 == 0) wd <- nullevels(wd, levelstonu = levels) else { pr <- pmin(1, C2 * 2^( - beta * levels)) rat <- tau2/(noise.level^2 + tau2) # for(i in 1:nthresh) { d <- accessD(wd, level = levels[i]) w <- (1 - pr[i])/pr[i]/sqrt((noise.level^2 * rat[i])/tau2[i]) * exp(( - rat[i] * d^2)/2/ noise.level^2) z <- 0.5 * (1 + pmin(w, 1)) d <- sign(d) * pmax(0, rat[i] * abs(d) - noise.level * sqrt(rat[i]) * qnorm(z)) wd <- putD(wd, level = levels[i], v = d) } } return(wd) } else if(policy == "cv") { # # # Threshold chosen by cross-validation # if(verbose == TRUE) cat("Cross-validation policy\n") # if(by.level == TRUE) stop( "Cross-validation policy does not permit by.level\n\t\t\tthresholding (yet)" ) # # Reconstruct the function for CWCV (this should be quick) # ynoise <- wr(wd) thresh <- CWCV(ynoise = ynoise, x = 1:length(ynoise), filter.number = wd$filter$filter.number, family = wd$ filter$family, thresh.type = type, tol = cvtol, maxits=cvmaxits, verbose = 0, plot.it = FALSE, ll = min(levels))$xvthresh thresh <- rep(thresh, length = nthresh) } else if(policy == "fdr") { # # # Threshold chosen by FDR-procedure # if(verbose == TRUE) cat("FDR policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) { d <- c(d, accessD(wd, level = levels[i], boundary = boundary)) } if(length(value) != 1) stop("Length of value should be 1") noise.level <- sqrt(dev(accessD(wd, level = (nlevelsWT(wd)- 1)))) minit <- length(d) dinit <- d thinit <- qnorm(1 - Q/2) * noise.level if(log(n, 2) > 12) ninit <- 3 else { if(log(n, 2) > 10) ninit <- 2 else ninit <- 1 } for(k in seq(1, ninit)) { dinit1 <- dinit[abs(dinit) >= thinit] minit <- length(dinit1) if(minit == 0) thresh <- max(abs(d)) * 1.0001 else { thinit <- qnorm(1 - (Q * minit)/(2 * n)) * noise.level minit1 <- length(dinit1[abs(dinit1) >= thinit ]) if(minit1 == minit || minit1 == 0) break dinit <- dinit1 } } if(noise.level > 0) { m <- length(d) minit <- length(dinit) p <- (2 - 2 * pnorm(abs(dinit)/noise.level)) index <- order(p) j <- seq(1, minit) m0 <- max(j[p[index] <= (Q * j)/m]) if(m0 != "NA" && m0 < minit) thresh <- abs(dinit[index[m0]]) else { if(m0 == "NA") thresh <- max(abs(dinit)) * 1.0001 else thresh <- 0 } } else thresh <- 0 thresh <- rep(thresh, length = nthresh) if(verbose == TRUE) cat("Global threshold is: ", thresh[1], "\n", "sigma is: ", noise.level, "\n") } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) m <- length(d) noise.level <- sqrt(dev(d)) thinit <- qnorm(1 - Q/2) * noise.level dinit <- d[abs(d) >= thinit] minit <- length(dinit) if(minit == 0) thresh[i] <- max(abs(d)) * 1.0001 else { if(noise.level > 0) { p <- (2 - 2 * pnorm(abs(dinit)/noise.level) ) index <- order(p) j <- seq(1, minit) m0 <- max(j[p[index] <= (Q * j)/m]) if(m0 != "NA" && m0 < minit) thresh[i] <- abs(dinit[index[m0]]) else { if(m0 == "NA") thresh[i] <- max(abs(dinit)) * 1.0001 else thresh[i] <- 0 } } else thresh[i] <- 0 } if(verbose == TRUE) cat("Threshold for level: ", levels[i], "is", thresh[i], "\n") } } } else if(policy == "op1") { # # # Ogden and Parzen's first policy # if(verbose == TRUE) cat("Ogden and Parzen's first policy\n") if(by.level == FALSE) stop("Ogden and Parzen's first policy only computes level-dependent policies" ) thresh <- TOthreshda1(ywd = wd, alpha = OP1alpha, verbose = verbose, return.threshold = return.threshold) return(thresh) } else if(policy == "op2") { # # # Ogden and Parzen's second policy # if(verbose == TRUE) cat("Ogden and Parzen's second policy\n") if(by.level == FALSE) stop("Ogden and Parzen's second policy only computes level-dependent policies" ) thresh <- TOthreshda2(ywd = wd, alpha = OP1alpha, verbose = verbose, return.threshold = return.threshold) return(thresh) } else if(policy == "manual") { # # # User supplied threshold policy # if(verbose == TRUE) cat("Manual policy\n") thresh <- rep(value, length = nthresh) if(length(value) != 1 && length(value) != nthresh) warning("your threshold is not the same length as number of levels" ) } else if(policy == "mannum") { if(verbose == TRUE) { cat("Manual policy using ", value, " of the") cat(" largest coefficients\n") } if(value < 1) { stop("Have to select an integer larger than 1 for value" ) } else if(value > length(wd$D)) { stop(paste("There are only ", length(wd$D), " coefficients, you specified ", value)) } coefs <- wd$D scoefs <- sort(abs(coefs)) scoefs <- min(rev(scoefs)[1:value]) wd$D[abs(wd$D) < scoefs] <- 0 return(wd) } else if(policy == "probability") { # # # Threshold is quantile based # if(verbose == TRUE) cat("Probability policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) d <- c(d, accessD(wd, level = levels[i], boundary = boundary)) if(length(value) != 1) stop("Length of value should be 1") thresh <- rep(quantile(abs(d), prob = value), length = nthresh) if(verbose == TRUE) cat("Global threshold is: ", thresh[1], "\n") } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) if(length(value) == 1) value <- rep(value, nthresh) if(length(value) != nthresh) stop("Wrong number of probability values") for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) thresh[i] <- quantile(abs(d), prob = value[i]) if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } } if(return.threshold == TRUE) return(thresh) for(i in 1:nthresh) { d <- accessD(wd, level = levels[i], boundary = boundary) if(type == "hard") { d[abs(d) <= thresh[i]] <- 0 } else if(type == "soft") { d <- (d * (abs(d) - thresh[i]) * (abs(d) > thresh[i]))/ abs(d) d[is.na(d)] <- 0 } if(verbose == TRUE) cat("Level: ", levels[i], " there are ", sum(d == 0), " zeroes\n") wd <- putD(wd, level = levels[i], v = d, boundary = boundary) } wd } "threshold.wd3D"<- function(wd3D, levels = 3:(nlevelsWT(wd3D)- 1), type = "hard", policy = "universal", by.level = FALSE, value = 0, dev = var, verbose = FALSE, return.threshold = FALSE, ...) { if(verbose == TRUE) cat("threshold.wd3D:\n") # # # Check class of wd3D # if(verbose == TRUE) cat("Argument checking\n") ctmp <- class(wd3D) if(is.null(ctmp)) stop("wd3D has no class") else if(ctmp != "wd3D") stop("wd3D is not of class wd3D") if(policy != "universal" && policy != "manual") stop("Only policys are universal, manual") if(type != "hard" && type != "soft") stop("Only hard or soft thresholding at present") r <- range(levels) if(r[1] < 0) stop("levels out of range, level too small") if(r[2] > nlevelsWT(wd3D) - 1) stop(paste("levels out of range, level too big. Maximum level is ", nlevelsWT(wd3D) - 1)) if(r[1] > nlevelsWT(wd3D) - 1) { warning("no thresholding done") return(wd3D) } if(r[2] < 0) { warning("no thresholding done") return(wd3D) } d <- NULL n <- (2^nlevelsWT(wd3D))^3 nthresh <- length(levels) # # # # Decide which policy to adopt # The next if-else construction should define a vector called # "thresh" that contains the threshold value for each level # in "levels". This may be the same threshold value # a global threshold. # if(policy == "universal") { # # # Donoho and Johnstone's universal policy # if(verbose == TRUE) cat("Universal policy...") if(by.level == FALSE) { if(verbose == TRUE) cat("All levels at once\n") for(i in 1:nthresh) { v <- accessD(wd3D, level = levels[i]) d <- c(v$GHH, v$HGH, v$GGH, v$HHG, v$GHG, v$HGG, v$GGG) if(levels[i] == 0) d <- c(d, v$HHH) } noise.level <- sqrt(dev(d)) nd <- length(d) thresh <- sqrt(2 * log(nd)) * noise.level if(verbose == TRUE) cat("Global threshold is: ", thresh, "\n") thresh <- rep(thresh, length = nthresh) } else { if(verbose == TRUE) cat("Level by level\n") thresh <- rep(0, length = nthresh) for(i in 1:nthresh) { v <- accessD(wd3D, level = levels[i]) d <- c(v$GHH, v$HGH, v$GGH, v$HHG, v$GHG, v$HGG, v$GGG) if(levels[i] == 0) d <- c(d, v$HHH) noise.level <- sqrt(dev(d)) nd <- length(d) thresh[i] <- sqrt(2 * log(nd)) * noise.level if(verbose == TRUE) cat("Threshold for level: ", levels[i], " is ", thresh[i], "\n") } } } else if(policy == "manual") { # # # User supplied threshold policy # if(verbose == TRUE) cat("Manual policy\n") thresh <- rep(value, length = nthresh) if(length(value) != 1 && length(value) != nthresh) warning("your threshold is not the same length as number of levels" ) } if(return.threshold == TRUE) return(thresh) blocktypes <- c("GHH", "HGH", "GGH", "HHG", "GHG", "HGG", "GGG") for(i in 1:nthresh) { if(levels[i] == 0) lblocks <- c("HHH", blocktypes) else lblocks <- blocktypes nblocks <- length(lblocks) thedim <- rep(2^(levels[i]), 3) for(j in 1:nblocks) { d <- as.vector(accessD(wd3D, level = levels[i], block = lblocks[j])) if(type == "hard") { d[abs(d) <= thresh[i]] <- 0 if(verbose == TRUE) cat("Level: ", levels[i], " there are ", sum( d == 0), " zeroes\n") } else if(type == "soft") { d <- (d * (abs(d) - thresh[i]) * (abs(d) > thresh[i]))/abs(d) d[is.na(d)] <- 0 } vinsert <- list(lev = levels[i], block = lblocks[j], a = array(d, dim = thedim)) wd3D <- putD(wd3D, v = vinsert) } } wd3D } "threshold.wp"<- function(wp, levels = 3:(nlevelsWT(wp) - 1), dev = madmad, policy = "universal", value = 0, by.level = FALSE, type = "soft", verbose = FALSE, return.threshold = FALSE, cvtol = 0.01, cvnorm = l2norm, add.history = TRUE, ...) { # # Do some arg checking # rn <- range(levels) if(rn[1] < 0) stop("all selected levels must be larger than zero") if(rn[2] > nlevelsWT(wp) - 1) stop(paste("all selected levels must be smaller than", nlevelsWT( wp) - 1)) nr <- nrow(wp$wp) nc <- ncol(wp$wp) # # # Figure out the threshold # if(policy == "manual") { if(length(value) == 1) { if(verbose == TRUE) cat("Univariate threshold\n") threshv <- rep(value, length(levels)) } else if(length(value) == length(levels)) { if(verbose == TRUE) cat("Multivariate threshold\n") threshv <- value } else stop("Manual policy. Your threshold vector is neither of length 1 or the length of the number of levels that you wish to threshold" ) } else if(policy == "universal") { if(verbose == TRUE) cat("Universal threshold\n") if(by.level == FALSE) { # # Global threshold # d <- NULL for(lev in 1:length(levels)) { d <- c(d, accessD(wp, level = levels[lev])) } sigma <- dev(d) threshv <- sqrt(2 * log(nc) * sigma) threshv <- rep(threshv, length(levels)) } else { # # # Level by level threshold # threshv <- rep(0, length(levels)) for(lev in 1:length(levels)) { d <- accessD(wp, level = levels[lev]) sigma <- dev(d) threshv[lev] <- sqrt(2 * log(nc) * sigma) } } } if(verbose == TRUE) { cat("Threshold is ") print(threshv) cat("\n") } # # # Now apply the threshold # if(return.threshold == TRUE) return(threshv) for(lev in 1:length(levels)) { if(verbose == TRUE) { cat("Applying threshold ", threshv[lev], " to level ", levels[lev], "\n") } d <- accessD(wp, level = levels[lev]) if(type == "hard") d[abs(d) <= threshv[lev]] <- 0 else if(type == "soft") d <- sign(d) * (abs(d) - threshv[lev]) * (abs(d) > threshv[lev]) wp <- putD(wp, level = levels[lev], v = d) } wp$date <- c(wp$date, date()) if(add.history == TRUE) wp$history <- c(wp$history, paste("Thresholded:", paste( as.character(threshv), collapse = "; "), "Levels: ", paste(as.character(levels), collapse = "; "), "Policy: ", policy, "Type: ", type)) wp } "threshold.wst"<- function(wst, levels = 3:(nlevelsWT(wst) - 1), dev = madmad, policy = "universal", value = 0, by.level = FALSE, type = "soft", verbose = FALSE, return.threshold = FALSE, cvtol = 0.01, cvnorm = l2norm, add.history = TRUE, ...) { # # Do some arg checking # call <- match.call() rn <- range(levels) if(rn[1] < 0) stop("all selected levels must be larger than zero") if(rn[2] > nlevelsWT(wst) - 1) stop(paste("all selected levels must be smaller than", nlevelsWT( wst) - 1)) nr <- nrow(wst$wp) nc <- ncol(wst$wp) # # # Figure out the threshold # if(policy == "manual") { if(length(value) == 1) { if(verbose == TRUE) cat("Univariate threshold\n") threshv <- rep(value, length(levels)) } else if(length(value) == length(levels)) { if(verbose == TRUE) cat("Multivariate threshold\n") threshv <- value } else stop("Manual policy. Your threshold vector is neither of length 1 or the length of the number of levels that you wish to threshold" ) } else if(policy == "universal") { if(verbose == TRUE) cat("Universal threshold\n") if(by.level == FALSE) { # # Global threshold # d <- NULL for(lev in 1:length(levels)) { d <- c(d, accessD(wst, level = levels[lev])) } sigma <- dev(d) threshv <- sqrt(2 * log(nc) * sigma) threshv <- rep(threshv, length(levels)) } else { # # # Level by level threshold # threshv <- rep(0, length(levels)) for(lev in 1:length(levels)) { d <- accessD(wst, level = levels[lev]) sigma <- dev(d) threshv[lev] <- sqrt(2 * log(nc) * sigma) } } } else if(policy == "LSuniversal") { if(verbose == TRUE) cat("Local Spec universal threshold\n") if(by.level == FALSE) { # # Global threshold # d <- NULL for(lev in 1:length(levels)) { d <- c(d, accessD(wst, level = levels[lev])) } sigma <- dev(d) threshv <- log(nc) * sqrt(sigma) threshv <- rep(threshv, length(levels)) } else { # # # Level by level threshold # threshv <- rep(0, length(levels)) for(lev in 1:length(levels)) { d <- accessD(wst, level = levels[lev]) sigma <- dev(d) threshv[lev] <- log(nc) * sqrt(sigma) } } } else if(policy == "sure") { if(verbose == TRUE) cat("SURE threshold\n") if(by.level == FALSE) { # # Global threshold # d <- NULL for(lev in 1:length(levels)) { d <- c(d, accessD(wst, level = levels[lev])) } sigma <- sqrt(dev(d)) threshv <- sigma * sure(d/sigma) threshv <- rep(threshv, length(levels)) } else { # # # Level by level threshold # threshv <- rep(0, length(levels)) for(lev in 1:length(levels)) { d <- accessD(wst, level = levels[lev]) sigma <- sqrt(dev(d)) threshv[lev] <- sigma * sure(d/sigma) } } } else if(policy == "cv") { if(verbose == TRUE) cat("Cross-validation threshold\n") ynoise <- AvBasis(wst) if(by.level == TRUE) { if(verbose == TRUE) cat("by-level\n") if(length(levels) != 1) warning( "Taking minimum level as first level for level-dependent cross-validation" ) levels <- min(levels):(nlevelsWT(wst) - 1) threshv <- wstCVl(ndata = ynoise, ll = min(levels), type = type, filter.number = wst$filter$ filter.number, family = wst$filter$family, tol = cvtol, verbose = 0, plot.it = FALSE, norm = cvnorm, InverseType = "average")$xvthresh if(verbose == TRUE) cat("Cross-validation threshold is ", threshv, "\n") } else { if(verbose == TRUE) cat("global\n") threshv <- wstCV(ndata = ynoise, ll = min(levels), type = type, filter.number = wst$filter$ filter.number, family = wst$filter$family, tol = cvtol, verbose = 0, plot.it = FALSE, norm = cvnorm, InverseType = "average")$xvthresh threshv <- rep(threshv, length(levels)) } } else { stop(paste("Unknown policy: ", policy)) } if(verbose == TRUE) { cat("Threshold is ") print(threshv) cat("\n") } # # # Now apply the threshold # if(return.threshold == TRUE) return(threshv) for(lev in 1:length(levels)) { if(verbose == TRUE) { cat("Applying threshold ", threshv[lev], " to level ", levels[lev], "(type is ", type, ")\n") } d <- accessD(wst, level = levels[lev]) if(type == "hard") d[abs(d) <= threshv[lev]] <- 0 else if(type == "soft") d <- sign(d) * (abs(d) - threshv[lev]) * (abs(d) > threshv[lev]) wst <- putD(wst, level = levels[lev], v = d) } wst$date <- c(wst$date, date()) if(add.history == TRUE) wst$history <- c(wst$history, paste("Thresholded:", paste( as.character(threshv), collapse = "; "), "Levels: ", paste(as.character(levels), collapse = "; "), "Policy: ", policy, "Type: ", type)) wst } "tpwd"<- function(image, filter.number = 10, family = "DaubLeAsymm", verbose = FALSE) { if(!is.matrix(image)) stop("image should be a matrix") nr <- nrow(image) lr <- IsPowerOfTwo(nr) if(is.na(lr)) stop(paste("Number of rows (", nr, ") should be a power of 2.") ) nc <- ncol(image) lc <- IsPowerOfTwo(nc) if(is.na(lc)) stop(paste("Number of cols (", nc, ") should be a power of 2.") ) bc <- "periodic" type <- "wavelet" nbc <- switch(bc, periodic = 1, symmetric = 2) if(is.null(nbc)) stop("Unknown boundary condition") ntype <- switch(type, wavelet = 1, station = 2) # # # Select the appropriate filter # if(verbose == TRUE) cat("...done\nFilter...") filter <- filter.select(filter.number = filter.number, family = family) # # # Build the first/last database # if(verbose == TRUE) cat("...selected\nFirst/last database...") fl.dbaseR <- first.last(LengthH = length(filter$H), DataLength = nr, type = type, bc = bc) # fl.dbaseC <- first.last(LengthH = length(filter$H), DataLength = nc, type = type, bc = bc) # error <- 0 answer <- .C("tpwd", image = as.double(image), nr = as.integer(nr), nc = as.integer(nc), lr = as.integer(lr), lc = as.integer(lc), firstCr = as.integer(fl.dbaseR$first.last.c[, 1]), lastCr = as.integer(fl.dbaseR$first.last.c[, 2]), offsetCr = as.integer(fl.dbaseR$first.last.c[, 3]), firstDr = as.integer(fl.dbaseR$first.last.d[, 1]), lastDr = as.integer(fl.dbaseR$first.last.d[, 2]), offsetDr = as.integer(fl.dbaseR$first.last.d[, 3]), firstCc = as.integer(fl.dbaseC$first.last.c[, 1]), lastCc = as.integer(fl.dbaseC$first.last.c[, 2]), offsetCc = as.integer(fl.dbaseC$first.last.c[, 3]), firstDc = as.integer(fl.dbaseC$first.last.d[, 1]), lastDc = as.integer(fl.dbaseC$first.last.d[, 2]), offsetDc = as.integer(fl.dbaseC$first.last.d[, 3]), ntype = as.integer(ntype), nbc = as.integer(nbc), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), error = as.integer(error), PACKAGE = "wavethresh") theanswer <- list(tpwd = matrix(answer$image, nrow = nr, ncol = nc), filter.number = filter.number, family = family, type = type, bc = bc, date = date()) class(theanswer) <- "tpwd" theanswer } "tpwr"<- function(tpwdobj, verbose = FALSE) { if(!inherits(tpwdobj, "tpwd")) stop("tpwdobj is not of class tpwd") nr <- nrow(tpwdobj$tpwd) lr <- IsPowerOfTwo(nr) nc <- ncol(tpwdobj$tpwd) lc <- IsPowerOfTwo(nc) bc <- tpwdobj$bc type <- tpwdobj$type nbc <- switch(bc, periodic = 1, symmetric = 2) ntype <- switch(type, wavelet = 1, station = 2) # # # Select the appropriate filter # if(verbose == TRUE) cat("...done\nFilter...") filter <- filter.select(filter.number = tpwdobj$filter.number, family = tpwdobj$family) # # # Build the first/last database # if(verbose == TRUE) cat("...selected\nFirst/last database...") fl.dbaseR <- first.last(LengthH = length(filter$H), DataLength = nr, type = type, bc = bc) # fl.dbaseC <- first.last(LengthH = length(filter$H), DataLength = nc, type = type, bc = bc) # error <- 0 answer <- .C("tpwr", image = as.double(tpwdobj$tpwd), nr = as.integer(nr), nc = as.integer(nc), lr = as.integer(lr), lc = as.integer(lc), firstCr = as.integer(fl.dbaseR$first.last.c[, 1]), lastCr = as.integer(fl.dbaseR$first.last.c[, 2]), offsetCr = as.integer(fl.dbaseR$first.last.c[, 3]), firstDr = as.integer(fl.dbaseR$first.last.d[, 1]), lastDr = as.integer(fl.dbaseR$first.last.d[, 2]), offsetDr = as.integer(fl.dbaseR$first.last.d[, 3]), firstCc = as.integer(fl.dbaseC$first.last.c[, 1]), lastCc = as.integer(fl.dbaseC$first.last.c[, 2]), offsetCc = as.integer(fl.dbaseC$first.last.c[, 3]), firstDc = as.integer(fl.dbaseC$first.last.d[, 1]), lastDc = as.integer(fl.dbaseC$first.last.d[, 2]), offsetDc = as.integer(fl.dbaseC$first.last.d[, 3]), ntype = as.integer(ntype), nbc = as.integer(nbc), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), error = as.integer(error), PACKAGE = "wavethresh") if(answer$error != 0) stop(paste("Error code was ", answer$error)) theanswer <- matrix(answer$image, nrow = nr, ncol = nc) theanswer } "uncompress"<- function(...) UseMethod("uncompress") "uncompress.default"<- function(v, verbose = FALSE, ...) { ctmp <- class(v) if(is.null(ctmp)) { stop("Object v has no class") } else if(ctmp == "uncompressed") { if(verbose == TRUE) cat("Not compressed\n") return(unclass(v$vector)) } else if(ctmp == "compressed") { answer <- rep(0, length = v$original.length) answer[v$position] <- v$values if(verbose == TRUE) cat("Uncompressed to length ", length(answer), "\n") return(answer) } else stop("v has unknown class") } "uncompress.imwdc"<- function(x, verbose = FALSE, ...) { if(verbose == TRUE) cat("Argument checking\n") ctmp <- class(x) if(is.null(ctmp)) stop("imwd has no class") else if(ctmp != c("imwdc")) stop("imwd is not of class imwdc") unsquished <- list(nlevels = nlevelsWT(x), fl.dbase = x$fl.dbase, filter = x$filter, w0Lconstant = x$w0Lconstant, bc = x$ bc, type = x$type) # # # Go round loop compressing each set of coefficients # for(level in 0:(nlevelsWT(x)- 1)) { if(verbose == TRUE) cat("Level ", level, "\n\t") nm <- lt.to.name(level, "CD") if(verbose == TRUE) cat("CD\t") unsquished[[nm]] <- uncompress.default(x[[nm]], verbose = verbose) nm <- lt.to.name(level, "DC") if(verbose == TRUE) cat("\tDC\t") unsquished[[nm]] <- uncompress.default(x[[nm]], verbose = verbose) nm <- lt.to.name(level, "DD") if(verbose == TRUE) cat("\tDD\t") unsquished[[nm]] <- uncompress.default(x[[nm]], verbose = verbose) } class(unsquished) <- "imwd" if(verbose == TRUE) cat("Overall inflation: Was: ", w <- object.size(x), " Now:", s <- object.size(unsquished), " (", signif((100 * s)/w, digits=3), "%)\n") unsquished } "wavegrow"<- function(n = 64, filter.number = 10, family = "DaubLeAsymm", type = "wavelet", random = TRUE, read.value = TRUE, restart = FALSE) { nlev <- IsPowerOfTwo(n) if(is.na(nlev)) stop("n is not a power of two") coords <- vector("list", nlev) if(type == "wavelet") { x <- 1:(n/2) coords[[nlev]] <- x nn <- n/2 for(i in (nlev - 1):1) { x1 <- x[seq(1, nn - 1, 2)] x2 <- x[seq(2, nn, 2)] x <- (x1 + x2)/2 nn <- nn/2 coords[[i]] <- x } } else for(i in 1:nlev) coords[[i]] <- 1:n if(is.null(dev.list())) stop("Please start 2 graphical devices before using me") if(length(dev.list()) < 2) stop("Please start another graphics device\n") ndev <- length(dev.list()) gd1 <- dev.list()[ndev - 1] gd2 <- dev.list()[ndev] v <- rnorm(n, sd = 1e-10) vwr <- v vwdS <- wd(v, filter.number = filter.number, family = family, type = type) toplev <- nlevelsWT(vwdS) - 1 ans <- "y" while(ans == "y" | ans == "yes" | ans == "Y") { dev.set(which = gd1) ts.plot(v) dev.set(which = gd2) plot(vwdS, NotPlotVal = 0) while(1) { co <- locator(1) if(is.null(co)) break lev <- 1 + toplev - round(co$y) cvec <- coords[[lev + 1]] ix <- (cvec - co$x)^2 nvec <- length(cvec) ix <- (1:nvec)[ix == min(ix)] if(type == "station") { ix <- ix - 2^(nlev - lev - 1) ix <- ((ix - 1) %% n) + 1 } cat("Level ", lev, " Coordinate ", ix, "\n") if(random == TRUE) new <- rnorm(1) else { if(read.value == TRUE) { cat("Type in coefficient value ") new <- scan(n = 1) } else new <- 1 } v <- accessD(vwdS, lev = lev) v[ix] <- new vwdS <- putD(vwdS, lev = lev, v = v) plot(vwdS, NotPlotVal = 0) dev.set(which = gd1) if(type == "station") { vwdWST <- convert(vwdS) vwr <- AvBasis(vwdWST) } else vwr <- wr(vwdS) ts.plot(vwr) dev.set(which = gd2) if(restart == TRUE) { v <- rep(1, n) vwdS <- wd(v, filter.number = filter.number, family = family, type = type) } } cat("Do you want to continue? ") ans <- readline() if(ans == "y" | ans == "yes" | ans == "Y") { v <- rnorm(n, sd = 1e-10) vwdS <- wd(v, filter.number = filter.number, family = family, type = type) } } return(list(ts = vwr, wd = vwdS)) } "wd.int"<- function(data, preferred.filter.number, min.scale, precond) { storage.mode(data) <- "double" storage.mode(preferred.filter.number) <- "integer" storage.mode(min.scale) <- "integer" storage.mode(precond) <- "logical" size <- length(data) storage.mode(size) <- "integer" max.scale <- log(size, 2) filter.history <- integer(max.scale - min.scale) temp <- .C("dec", vect = data, size, preferred.filter.number, min.scale, precond, history = filter.history, PACKAGE = "wavethresh") wav.int.object <- list(transformed.vector = temp$vect, current.scale = min.scale, filters.used = temp$history, preconditioned = precond, date = date()) return(wav.int.object) } "wd3D"<- function(a, filter.number = 10, family = "DaubLeAsymm") { d <- dim(a) if(length(d) != 3) stop(paste("a is not a three-dimensional object")) for(i in 1:3) if(is.na(IsPowerOfTwo(d[i]))) stop(paste("Dimension ", i, " of a is not of dyadic length")) if(any(d != d[1])) stop("Number of elements in each dimension is not identical") error <- 0 nlevels <- IsPowerOfTwo(d[1]) H <- filter.select(filter.number = filter.number, family = family)$H ans <- .C("wd3D", Carray = as.double(a), size = as.integer(d[1]), H = as.double(H), LengthH = as.integer(length(H)), error = as.integer(error), PACKAGE = "wavethresh") if(ans$error != 0) stop(paste("Error code was ", ans$error)) l <- list(a = array(ans$Carray, dim = d), filter.number = filter.number, family = family, date = date(), nlevels = nlevels) class(l) <- "wd3D" l } "wp"<- function(data, filter.number = 10, family = "DaubLeAsymm", verbose = FALSE) { if(verbose == TRUE) cat("Argument checking...") DataLength <- length(data) # # # Check that we have a power of 2 data elements # nlevels <- log(DataLength)/log(2) if(round(nlevels) != nlevels) stop("The length of data is not a power of 2") # if(verbose == TRUE) { cat("There are ", nlevels, " levels\n") } # # Select the appropriate filter # if(verbose == TRUE) cat("...done\nFilter...") filter <- filter.select(filter.number = filter.number, family = family) # # # Compute the decomposition # if(verbose == TRUE) cat("Decomposing...\n") newdata <- c(rep(0, DataLength * nlevels), data) wavelet.packet <- .C("wavepackde", newdata = as.double(newdata), DataLength = as.integer(DataLength), levels = as.integer(nlevels), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), PACKAGE = "wavethresh") wpm <- matrix(wavelet.packet$newdata, ncol = DataLength, byrow = TRUE) wp <- list(wp = wpm, nlevels = nlevels, filter = filter, date = date()) class(wp) <- "wp" wp } "wpst"<- function(data, filter.number = 10, family = "DaubLeAsymm", FinishLevel = 0) { nlev <- nlevelsWT(data) n <- length(data) if(FinishLevel < 0) stop("FinishLevel must be larger than zero") else if(FinishLevel >= nlev) stop(paste("FinishLevel must be < ", nlev)) # lansvec <- n * (2 * n - 1) ansvec <- rep(0, lansvec) # # # Now create vector that keeps track of where levels start/stop # # Note that the vector avixstart stores index entry values in C # notation. If you use it in Splus you'll have to add on 1 # npkts <- function(level, nlev) 4^(nlev - level) pktlength <- function(level) 2^level avixstart <- rep(0, nlev + 1) for(i in 1:nlev) avixstart[i + 1] <- avixstart[i] + npkts(i - 1, nlev) * pktlength(i - 1) # # # Copy in original data # ansvec[(avixstart[nlev + 1] + 1):lansvec] <- data # # # Call the C routine # filter <- filter.select(filter.number = filter.number, family = family) ans <- .C("wpst", ansvec = as.double(ansvec), lansvec = as.integer(lansvec), nlev = as.integer(nlev), FinishLevel = as.integer(FinishLevel), avixstart = as.integer(avixstart), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), error = as.integer(0), PACKAGE = "wavethresh") rv <- list(wpst = ans$ansvec, nlevels = nlev, avixstart = avixstart, filter = filter, date = date()) class(rv) <- "wpst" rv } "wpst2discr"<- function(wpstobj, groups) { # # Function to convert wpst object and associated groups vector into # data matrix and k vector required as the input to the discr function. # # Input: wpstobj: a wpst object of a time-series # groups: a vector of length ncases containing the group # membership of each case. # # Returns: wpstm - a matrix. Number of rows is the number of cases # The rows are ordered according to the group # memberships of the cases. E.g. The first n1 rows # contain the group 1 cases, the second n2 rows # contain the group 2 cases, ... the ng rows # contain the group g cases. # # level - a vector of length npkts. Each entry refers to # the level that the col of wpstm comes from. # # pktix - a vector of length npkts. Each entry refers to # the packet index that the col of wpstm comes from. # # # k - a vector of length ng (the number of groups). # k[1] contains the number of members for group 1, # k[2] contains the number of members for group 2, ... # k[ng] contains the number of members for group ng. # # # J <- nlev <- nlevelsWT(wpstobj) grot <- compgrot(J, filter.number=2) nbasis <- 2 * (2^nlev - 1) ndata <- 2^nlev m <- matrix(0, nrow = ndata, ncol = nbasis) level <- rep(0, nbasis) pktix <- rep(0, nbasis) cnt <- 1 cat("Level: ") for(j in 0:(nlev - 1)) { cat(j, " ") lcnt <- 0 npkts <- 2^(nlev - j) prcnt <- as.integer(npkts/10) if (prcnt == 0) prcnt <- 1 for(i in 0:(npkts - 1)) { pkcoef <- guyrot(accessD(wpstobj, level = j, index = i), grot[J - j])/(sqrt(2)^(J - j)) m[, cnt] <- log(pkcoef^2) level[cnt] <- j pktix[cnt] <- i lcnt <- lcnt + 1 cnt <- cnt + 1 if(lcnt %% prcnt == 0) { lcnt <- 0 cat(".") } } cat("\n") } cat("\n") l <- list(m = m, groups = groups, level = level, pktix = pktix, nlevels = J) class(l) <- "w2d" l } "wpstCLASS"<- function(newTS, wpstDO) { # # # Apply wpst to new TS # newwpst <- wpst(newTS, filter.number = wpstDO$filter$filter.number, family = wpstDO$filter$family) # # # Extract the "best packets" # goodlevel <- wpstDO$BP$level goodpkt <- wpstDO$BP$pkt npkts <- length(goodpkt) ndata <- length(newTS) m <- matrix(0, nrow = ndata, ncol = npkts) J <- nlevelsWT(newwpst) grot <- compgrot(J, filter.number=2) for(i in 1:npkts) { j <- goodlevel[i] m[, i] <- guyrot(accessD(newwpst, level = j, index = goodpkt[i] ), grot[J - j])/(sqrt(2)^(J - j)) m[, i] <- log(m[, i]^2) } mTd <- predict(wpstDO$BPd$dm, m) l <- list(BasisMatrix=m, BasisMatrixDM=m%*%wpstDO$BPd$dm$scaling, wpstDO=wpstDO, PredictedOP=mTd, PredictedGroups=mTd$class) class(l) <- "wpstCL" l } "wr"<- function(...) UseMethod("wr") "wr.int"<- function(wav.int.object, ...) { data <- wav.int.object$transformed.vector storage.mode(data) <- "double" size <- length(data) storage.mode(size) <- "integer" filter.history <- wav.int.object$filters.used storage.mode(filter.history) <- "integer" current.scale <- wav.int.object$current.scale storage.mode(current.scale) <- "integer" precond <- wav.int.object$preconditioned storage.mode(precond) <- "logical" temp <- .C("rec", vect = data, size, filter.history, current.scale, precond, PACKAGE = "wavethresh") return(temp$vect) } "wr.mwd"<- function(...) { #calling mwr directly would be better but #just in case... mwr(...) } "wr3D"<- function(obj) { ClassObj <- class(obj) if(is.null(ClassObj)) stop("obj has no class") if(ClassObj != "wd3D") stop("obj is not of class wd3D") Carray <- obj$a H <- filter.select(filter.number = obj$filter.number, family = obj$ family)$H answer <- .C("wr3D", Carray = as.double(Carray), truesize = as.integer(dim(Carray)[1]), H = as.double(H), LengthH = as.integer(length(H)), error = as.integer(0), PACKAGE = "wavethresh") array(answer$Carray, dim = dim(Carray)) } "wst2D"<- function(m, filter.number = 10, family = "DaubLeAsymm") { nr <- nrow(m) J <- IsPowerOfTwo(nr) dimv <- c(J, 2 * nr, 2 * nr) am <- array(0, dim = dimv) filter <- filter.select(filter.number = filter.number, family = family) error <- 0 ans <- .C("SWT2Dall", m = as.double(m), nm = as.integer(nr), am = as.double(am), J = as.integer(J), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), error = as.integer(error), PACKAGE = "wavethresh") if(ans$error != 0) stop(paste("Error code was ", ans$error)) l <- list(wst2D = array(ans$am, dim = dimv), nlevels = J, filter = filter, date = date()) class(l) <- "wst2D" l } "wstCV"<- function(ndata, ll = 3, type = "soft", filter.number = 10, family = "DaubLeAsymm", tol = 0.01, verbose = 0, plot.it = FALSE, norm = l2norm, InverseType = "average", uvdev = madmad) { nlev <- log(length(ndata))/log(2) levels <- ll:(nlev - 1) nwst <- wst(ndata, filter.number = filter.number, family = family) uv <- threshold(nwst, levels = levels, type = type, policy = "universal", dev = madmad, return.thresh = TRUE)[1] if(verbose == 1) cat("Now optimising cross-validated error estimate\n") levels <- ll:(nlev - 2) R <- 0.61803399000000003 C <- 1 - R ax <- 0 bx <- uv/2 cx <- uv x0 <- ax x3 <- cx if(abs(cx - bx) > abs(bx - ax)) { x1 <- bx x2 <- bx + C * (cx - bx) } else { x2 <- bx x1 <- bx - C * (bx - ax) } fa <- GetRSSWST(ndata, threshold = ax, levels = levels, type = type, filter.number = filter.number, family = family, norm = norm, verbose = verbose, InverseType = InverseType) cat("Done 1\n") fb <- GetRSSWST(ndata, threshold = bx, levels = levels, type = type, filter.number = filter.number, family = family, norm = norm, verbose = verbose, InverseType = InverseType) cat("Done 2\n") fc <- GetRSSWST(ndata, threshold = cx, levels = levels, type = type, filter.number = filter.number, family = family, norm = norm, verbose = verbose, InverseType = InverseType) cat("Done 3\n") f1 <- GetRSSWST(ndata, threshold = x1, levels = levels, type = type, filter.number = filter.number, family = family, norm = norm, verbose = verbose, InverseType = InverseType) cat("Done 4\n") f2 <- GetRSSWST(ndata, threshold = x2, levels = levels, type = type, filter.number = filter.number, family = family, norm = norm, verbose = verbose, InverseType = InverseType) cat("Done 5\n") xkeep <- c(ax, cx, x1, x2) fkeep <- c(fa, fc, f1, f2) if(plot.it == TRUE) { plot(c(ax, bx, cx), c(fa, fb, fc)) text(c(x1, x2), c(f1, f2), lab = c("1", "2")) } cnt <- 3 while(abs(x3 - x0) > tol * (abs(x1) + abs(x2))) { if(verbose > 0) { cat("x0=", x0, "x1=", x1, "x2=", x2, "x3=", x3, "\n") cat("f1=", f1, "f2=", f2, "\n") } if(f2 < f1) { x0 <- x1 x1 <- x2 x2 <- R * x1 + C * x3 f1 <- f2 f2 <- GetRSSWST(ndata, threshold = x2, levels = levels, type = type, filter.number = filter.number, family = family, norm = norm, verbose = verbose, InverseType = InverseType) if(verbose == 2) { cat("SSQ: ", signif(f2, digits=3), "\n") } else if(verbose == 1) cat(".") xkeep <- c(xkeep, x2) fkeep <- c(fkeep, f2) if(plot.it == TRUE) text(x2, f2, lab = as.character(cnt)) cnt <- cnt + 1 } else { x3 <- x2 x2 <- x1 x1 <- R * x2 + C * x0 f2 <- f1 f1 <- GetRSSWST(ndata, threshold = x1, levels = levels, type = type, filter.number = filter.number, family = family, norm = norm, verbose = verbose, InverseType = InverseType) if(verbose == 2) cat("SSQ: ", signif(f1, digits=3), "\n") else if(verbose == 1) cat(".") xkeep <- c(xkeep, x1) fkeep <- c(fkeep, f1) if(plot.it == TRUE) text(x1, f1, lab = as.character(cnt)) cnt <- cnt + 1 } } if(f1 < f2) tmp <- x1 else tmp <- x2 x1 <- tmp/sqrt(1 - log(2)/log(length(ndata))) if(verbose == 1) cat("Correcting to ", x1, "\n") else if(verbose == 1) cat("\n") g <- sort.list(xkeep) xkeep <- xkeep[g] fkeep <- fkeep[g] if(verbose >= 1) { cat("Reconstructing CV \n") } nwstT <- threshold(nwst, type = type, levels = levels, policy = "manual", value = x1) # # # Now threshold the top level using universal thresholding # nwstT <- threshold(nwstT, type = type, levels = nlevelsWT(nwstT) - 1, policy = "universal", dev = uvdev) xvwr <- AvBasis.wst(nwstT) list(ndata = ndata, xvwr = xvwr, xvwrWSTt = nwstT, uvt = uv, xvthresh = x1, xkeep = xkeep, fkeep = fkeep) } "wstCVl"<- function(ndata, ll = 3, type = "soft", filter.number = 10, family = "DaubLeAsymm", tol = 0.01, verbose = 0, plot.it = FALSE, norm = l2norm, InverseType = "average", uvdev = madmad) { nlev <- log(length(ndata))/log(2) levels <- ll:(nlev - 2) nwst <- wst(ndata, filter.number = filter.number, family = family) uv <- threshold(nwst, levels = levels, type = type, policy = "universal", dev = madmad, return.thresh = TRUE)[1] if(verbose == 1) cat("Now optimising cross-validated error estimate\n") upper <- rep(uv, length(levels)) lower <- rep(0, length(levels)) start <- (lower + upper)/2 answer <- nlminb(start = start, objective = wvcvlrss, lower = lower, upper = upper, ndata = ndata, levels = levels, type = type, filter.number = filter.number, family = family, norm = norm, verbose = verbose, InverseType = InverseType, control = list(rel.tol = tol)) x1 <- answer$par if(verbose >= 2) thverb <- TRUE else thverb <- FALSE xvwrWSTt <- threshold.wst(nwst, levels = levels, policy = "manual", value = x1, verbose = thverb) # # Now threshold the top level using universal thresholding # lastuvt <- threshold(xvwrWSTt, type = type, levels = nlevelsWT(xvwrWSTt) - 1, policy = "universal", dev = uvdev, return.thresh = TRUE) xvwrWSTt <- threshold(xvwrWSTt, type = type, levels = nlevelsWT(xvwrWSTt) - 1, policy = "manual", value = lastuvt) xvwr <- AvBasis.wst(xvwrWSTt) list(ndata = ndata, xvwr = xvwr, xvwrWSTt = xvwrWSTt, uvt = uv, xvthresh = c(x1, lastuvt), optres = answer) } "wvcvlrss"<- function(threshold, ndata, levels, type, filter.number, family, norm, verbose, InverseType) { answer <- GetRSSWST(ndata = ndata, threshold = threshold, levels = levels, family = family, filter.number = filter.number, type = type, norm = norm, verbose = verbose, InverseType = InverseType ) return(answer) } "wvmoments"<- function(filter.number = 10, family = "DaubLeAsymm", moment = 0, scaling.function = FALSE) { WV <- draw.default(filter.number = filter.number, family = family, plot.it = FALSE, enhance = FALSE, resolution = 32768, scaling.function = scaling.function) intfn <- function(x, moment, xwv, ywv) { x^moment * approx(x = xwv, y = ywv, xout = x, rule = 2)$y } plot(WV$x, intfn(WV$x, moment = moment, WV$x, WV$y), type = "l") integrate(intfn, lower = -7, upper = 7, moment = moment, xwv = WV$x, ywv = WV$y, subdivisions = 1000, keep.xy = TRUE) } "wvrelease"<- function() { packageStartupMessage("WaveThresh: R wavelet software, release 4.7.2, installed\n") packageStartupMessage("Copyright Guy Nason and others 1993-2022\n") packageStartupMessage("Note: nlevels has been renamed to nlevelsWT\n") } wavethresh/R/LSWsim.r0000644000176200001440000000251414211622634014177 0ustar liggesusers"LSWsim"<- function(spec){ # # # First check that all spectral elements are non-zero # if (any(spec$D < 0)) stop("All spectral elements must be non-negative") # # # Now multiply by random element and factor of 2 (to undo AvBasis # averaging) # nlev <- nlevelsWT(spec) len <- 2^nlev for(i in (nlev-1):0) { v <- accessD(spec, level=i) v <- sqrt(v)*2^(nlev-i)*rnorm(len) spec <- putD(spec, level=i, v=v) } AvBasis(convert(spec)) } "cns"<- function(n, filter.number=1, family="DaubExPhase"){ if (is.na(IsPowerOfTwo(n))) stop("n must be a power of two") z <- rep(0, n) zwdS <- wd(z, filter.number=filter.number, family=family, type="station") zwdS } "checkmyews" <- function(spec, nsim=10){ ans <- cns(2^nlevelsWT(spec)) for(i in 1:nsim) { cat(".") LSWproc <- LSWsim(spec) ews <- ewspec(LSWproc, filter.number=1, family="DaubExPhase", WPsmooth=FALSE) ans$D <- ans$D + ews$S$D ans$C <- ans$C + ews$S$C } ans$D <- ans$D/nsim ans$C <- ans$C/nsim ans } wavethresh/R/NSextra.r0000644000176200001440000001451414211622634014410 0ustar liggesusers"makewpstRO" <- function(timeseries, response, filter.number = 10., family = "DaubExPhase", trans = logabs, percentage = 10.) { # # # Using the data in time series (which should be a length a power of two) # and the response information. Create an object # of class wpstRO (stationary wavelet packet regression Object). # # Given this wpstRO and another timeseries a function exists to predict # the group membership of each timeseries element # # # First build stationary wavelet packet object # # # Now convert this to a matrix # twpst <- # wpst(timeseries, filter.number = filter.number, family = family ) # # Now extract the ``best'' 1D variables. # tw2m <- # wpst2m(wpstobj = twpst, trans = trans) tbm <- # bestm(tw2m, y = response, percentage = percentage) # # Now build data frame from these variables # # print.w2m(tbm) nc <- ncol(tbm$m) nr <- nrow(tbm$m) tdf <- data.frame(response, tbm$m) dimnames(tdf) <- list(as.character(1.:nr), c("response", paste( "X", 1.:nc, sep = ""))) l <- list(df = tdf, ixvec = tbm$ixvec, level = tbm$level, pktix = tbm$pktix, nlevels = tbm$nlevels, cv = tbm$cv, filter = twpst$filter, trans = trans) oldClass(l) <- "wpstRO" l } "wpstREGR" <- function(newTS, wpstRO) { # # Extract the "best packets" # newwpst <- # wpst(newTS, filter.number = wpstRO$filter$filter.number, family = wpstRO$filter$family) goodlevel <- wpstRO$level goodpkt <- wpstRO$pkt npkts <- length(goodpkt) ndata <- length(newTS) m <- matrix(0., nrow = ndata, ncol = npkts) J <- nlevelsWT(newwpst) grot <- compgrot(J, filter.number=wpstRO$filter$filter.number, family=wpstRO$filter$family) for(i in 1.:npkts) { j <- goodlevel[i] m[, i] <- guyrot(accessD(newwpst, level = j, index = goodpkt[i]), grot[J - j])/(sqrt(2.)^(J - j)) m[, i] <- wpstRO$trans(m[, i]) } dimnames(m) <- list(NULL, paste("X", 1.:npkts, sep = "")) l <- data.frame(m) l } "wpst2m" <- function(wpstobj, trans = identity) { # # Function that converts a wpstobj into a matrix # # Input: # # wpstobj: the wpstobj to convert # # trans: the transform to apply to the # wpst coefficients as they come out # # an interesting alternative is # trans = log( . )^2 # (you'll have to write this function) # # # Returns: An object of class w2m # # This is a list with the following components: # # m - a matrix of order ndata x nbasis # # where ndata is the number of data points for # the time series that constituted wpstobj # # and nbasis is the number of bases in the wpstobj # # Each column corresponds to a basis function # # The row ordering is the same as the time series # that constituted wpstobj # # pktix - a vector of length nbasis which # describes the packet index of the # basis function in wpstm # # level - as pktix but for the level # # nlevels The number of levels # J <- nlev <- nlevelsWT(wpstobj) grot <- compgrot(J, filter.number = wpstobj$filter$filter.number, family = wpstobj$filter$family) nbasis <- 2. * (2.^nlev - 1.) ndata <- 2.^nlev m <- matrix(0., nrow = ndata, ncol = nbasis) level <- rep(0., nbasis) pktix <- rep(0., nbasis) cnt <- 1. cat("Level: ") for(j in 0.:(nlev - 1.)) { cat(j, " ") lcnt <- 0. npkts <- 2.^(nlev - j) prcnt <- as.integer(npkts/10.) for(i in 0.:(npkts - 1.)) { pkcoef <- guyrot(accessD(wpstobj, level = j, index = i), grot[J - j])/(sqrt(2.)^ (J - j)) m[, cnt] <- trans(pkcoef) level[cnt] <- j pktix[cnt] <- i lcnt <- lcnt + 1. cnt <- cnt + 1. if (prcnt > 0) { if(lcnt %% prcnt == 0.) { lcnt <- 0. cat(".") } } } cat("\n") } cat("\n") l <- list(m = m, level = level, pktix = pktix, nlevels = J) oldClass(l) <- "w2m" l } "compgrot" <- function(J, filter.number, family) { if(filter.number == 1. && family == "DaubExPhase") { grot <- (2.^(0.:(J - 1.)) - 1.) } else { grot <- (1.:J)^2. grot[1.] <- 2. grot <- cumsum(grot) } grot } "logabs" <- function(x) logb(x^2.) "bestm" <- function(w2mobj, y, percentage = 50.) { # # Compute desired number of bases # ndata <- # nrow(w2mobj$m) # # Actual number of bases # dbasis <- # as.integer((percentage * ndata)/100.) nbasis <- ncol(w2mobj$m) cv <- rep(0., nbasis) for(i in 1.:nbasis) { cv[i] <- cor(w2mobj$m[, i], y) } cv[is.na(cv)] <- 0. sv <- rev(sort.list(abs(cv)))[1.:dbasis] ixvec <- 1.:nbasis l <- list(m = w2mobj$m[, sv], ixvec = ixvec[sv], pktix = w2mobj$ pktix[sv], level = w2mobj$level[sv], nlevels = w2mobj$ nlevels, cv = cv[sv]) oldClass(l) <- "w2m" l } "print.w2m" <- function(x, maxbasis = 10., ...) { w2mobj <- x cat("Contains SWP coefficients\n") cat("Original time series length: ", nrow(w2mobj$m), "\n") cat("Number of bases: ", ncol(w2mobj$m), "\n") lbasis <- min(maxbasis, ncol(w2mobj$m)) if(is.null(w2mobj$ixvec)) { cat("Raw object\n") mtmp <- cbind(w2mobj$level[1.:lbasis], w2mobj$pktix[ 1.:lbasis]) dimnames(mtmp) <- list(NULL, c("Level", "Pkt Index")) } else { cat("Some basis selection performed\n") mtmp <- cbind(w2mobj$level[1.:lbasis], w2mobj$pktix[ 1.:lbasis], w2mobj$ixvec[1.:lbasis], w2mobj$ cv[1.:lbasis]) dimnames(mtmp) <- list(NULL, c("Level", "Pkt Index", "Orig Index", "Score")) } print(mtmp) if(ncol(w2mobj$m) > maxbasis) cat("etc. etc.\n") invisible() } "print.wpstRO" <- function(x, maxbasis = 10., ...) { wpstRO <- x cat("Stationary wavelet packet regression object\n") cat("Composite object containing components:") cat("Original time series length: ", nrow(wpstRO$df), "\n") cat("Number of bases: ", ncol(wpstRO$df) - 1., "\n") lbasis <- min(maxbasis, ncol(wpstRO$df) - 1.) if(is.null(wpstRO$ixvec)) { cat("Raw object\n") mtmp <- cbind(wpstRO$level[1.:lbasis], wpstRO$pktix[ 1.:lbasis]) dimnames(mtmp) <- list(NULL, c("Level", "Pkt Index")) } else { cat("Some basis selection performed\n") mtmp <- cbind(wpstRO$level[1.:lbasis], wpstRO$pktix[ 1.:lbasis], wpstRO$ixvec[1.:lbasis], wpstRO$ cv[1.:lbasis]) dimnames(mtmp) <- list(NULL, c("Level", "Pkt Index", "Orig Index", "Score")) } print(mtmp) if(ncol(wpstRO$df) > maxbasis) cat("etc. etc.\n") invisible() } wavethresh/R/wavde.r0000644000176200001440000006440114211622634014132 0ustar liggesusers"Chires5" <- function(x, tau=1, J, filter.number=10, family="DaubLeAsymm", nT=20) # data x # fine tuning parameter tau # resolution level J # family and filter.number specify the scaling function to be used # nT is the number of iterations performed in the Daubechies-Lagarias algorithm { # calculate support of father wavelet sup <- support(filter.number, family) sup <- c(sup$phi.lh, sup$phi.rh) # extract filter coefficients filcf <- filter.select(filter.number, family)$H # calculate primary resolution p <- tau * 2^J # calculate bounds on translation kmin <- ceiling(p*min(x)-sup[2]) kmax <- floor(p*max(x)-sup[1]) # create vector to put estimated coefficients in chat <- rep(0, kmax-kmin+1) # call C code! error <- 0 ans <- .C("SFDE5", x = as.double(x), nx = as.integer(length(x)), p = as.double(p), filter = as.double(filcf), nf = as.integer(2*filter.number - 1), prec = as.integer(nT), chat = as.double(chat), kmin = as.integer(kmin), kmax = as.integer(kmax), philh = as.double(sup[1]), phirh = as.double(sup[2]), error = as.integer(error), PACKAGE = "wavethresh") if (ans$error != 0) stop(paste("PLDF2 function returned error code:", ans$error)) filter <- list(filter.number=filter.number, family=family) res <- list(p=p, tau=tau, J=J) list(coef=ans$chat, klim=c(ans$kmin, ans$kmax), p=ans$p, filter=filter, n=length(x), res=res) } "Chires6" <- function(x, tau=1, J, filter.number=10, family="DaubLeAsymm", nT=20) # data x # fine tuning parameter tau # resolution level J # family and filter.number specify the scaling function to be used # nT is the number of iterations performed in the Daubechies-Lagarias algorithm { # calculate support of father wavelet sup <- support(filter.number, family) sup <- c(sup$phi.lh, sup$phi.rh) # extract filter coefficients filcf <- filter.select(filter.number, family)$H # calculate primary resolution p <- tau * 2^J # calculate bounds on translation kmin <- ceiling(p*min(x)-sup[2]) kmax <- floor(p*max(x)-sup[1]) # create output vector/matrix ncoef <- kmax-kmin+1 chat <- rep(0, ncoef) covar <- matrix(0, nrow=ncoef, ncol=(2*filter.number-1)) # call C code! error <- 0 ans <- .C("SFDE6", x = as.double(x), nx = as.integer(length(x)), p = as.double(p), filter = as.double(filcf), nf = as.integer(2*filter.number - 1), prec = as.integer(nT), chat = as.double(chat), covar = as.double(covar), kmin = as.integer(kmin), kmax = as.integer(kmax), philh = as.double(sup[1]), phirh = as.double(sup[2]), error = as.integer(error), PACKAGE = "wavethresh") if (ans$error != 0) stop(paste("PLDF2 function returned error code:", ans$error)) filter <- list(filter.number=filter.number, family=family) res <- list(p=p, tau=tau, J=J) list(coef=ans$chat, covar=matrix(ans$covar, nrow=ncoef), klim=c(ans$kmin, ans$kmax), p=ans$p, filter=filter, n=length(x), res=res) } "dclaw" <- function(x) { den <- dnorm(x)/2 for(i in 0:4){ den <- den + dnorm(x, mean=(i/2-1), sd=1/10)/10 } den } "dencvwd" <- function(hrproj, filter.number=hrproj$filter$filter.number, family=hrproj$filter$family, type="wavelet", bc="zero", firstk=hrproj$klim, RetFather=TRUE, verbose=FALSE) { image <- hrproj$covar # Select wavelet filter filter <- filter.select(filter.number = filter.number, family = family) Csize <- nrow(image) # Set-up first/last database if(is.null(firstk)) firstk <- c(0, Csize-1) if(verbose == TRUE) cat("...selected\nFirst/last database...") fl.dbase <- first.last.dh(LengthH = length(filter$H), DataLength = Csize, bc = bc, type = type, firstk = firstk) first.last.c <- fl.dbase$first.last.c first.last.d <- fl.dbase$first.last.d nlev <- nrow(first.last.d) # Set up answer list image.decomp <- list(nlevels = nlev, fl.dbase = fl.dbase, filter = filter, type = type, bc = bc, date = date()) if(verbose == TRUE) cat("...built\n") # Ok, go round loop doing decompositions nbc <- switch(bc, periodic = 1, symmetric = 2, zero = 3) if(is.null(nbc)) stop("Unknown boundary handling") if(type == "station" && bc == "symmetric") stop("Cannot do stationary transform with symmetric boundary conditions" ) ntype <- switch(type, wavelet = 1, station = 2) if(is.null(ntype)) stop("Unknown type of transform") # Load up original image smoothed <- as.vector(image) if(verbose == TRUE) { cat(bc, " boundary handling\n") cat("Decomposing...") } for(level in seq(nrow(first.last.d), 1, -1)) { if(verbose == TRUE) cat(level - 1, "") LengthCin <- first.last.c[level+1, 2] - first.last.c[level+1, 1] + 1 LengthCout <- first.last.c[level, 2] - first.last.c[level, 1] + 1 LengthDout <- first.last.d[level, 2] - first.last.d[level, 1] + 1 ImCC <- rep(0, (LengthCout * (2*filter.number-1))) ImDD <- rep(0, (LengthDout * (2*filter.number-1))) error <- 0 z <- .C("StoDCDS", C = as.double(smoothed), Csize = as.integer(LengthCin), firstCin = as.integer(first.last.c[level + 1, 1]), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), LengthCout = as.integer(LengthCout), firstCout = as.integer(first.last.c[level, 1]), lastCout = as.integer(first.last.c[level, 2]), LengthDout = as.integer(LengthDout), firstDout = as.integer(first.last.d[level, 1]), lastDout = as.integer(first.last.d[level, 2]), ImCC = as.double(ImCC), ImDD = as.double(ImDD), nbc = as.integer(nbc), ntype = as.integer(ntype), error = as.integer(error), PACKAGE = "wavethresh") error <- z$error if(error != 0) { cat("Error was ", error, "\n") stop("Error reported") } smoothed <- z$ImCC if(RetFather == TRUE) { nm <- lt.to.name(level - 1, "CC") image.decomp[[nm]] <- matrix(z$ImCC, nrow=LengthCout) } nm <- lt.to.name(level - 1, "DD") image.decomp[[nm]] <- matrix(z$ImDD, nrow=LengthDout) } if(verbose == TRUE) cat("\nReturning answer...\n") image.decomp$w0Lconstant <- smoothed image.decomp$bc <- bc image.decomp$date <- date() class(image.decomp) <- "imwd" l <- list(C=NULL, D=rep(0, fl.dbase$ntotal.d), nlevels=nrow(fl.dbase$first.last.d), fl.dbase=fl.dbase, filter=filter, type=type, bc=bc, date=date()) class(l) <- "wd" for(level in 1:nlevelsWT(l)) { covar <- image.decomp[[lt.to.name(level - 1, "DD")]] l <- putD.wd(l, level-1, covar[,1], boundary=TRUE) } l } "denplot" <- function(wr, coef, nT=20, lims, n=50) # smoothed high level coefficients wr # coef is the output from denproj for this analysis # nT is the number of iterations performed in the Daubechies-Lagarias algorithm # estimate is evaluated at n points between lims { p <- coef$res$p filter <- coef$filter # calculate support of father wavelet sup <- support(filter$filter.number, filter$family) sup <- c(sup$phi.lh, sup$phi.rh) # extract filter coefficients filcf <- filter.select(filter$filter.number, filter$family)$H # create grid for drawing density estimate and vector to put values in gx <- seq(lims[1], lims[2], length=n) gy <- c(rep(0, length(gx))) # find range of high resolution coefficients kmin <- coef$klim[1] kmax <- coef$klim[2] # call C code! error <- 0 ans <- .C("PLDE2", C = as.double(wr), p = as.double(p), filter = as.double(filcf), nf = as.integer(2*filter$filter.number - 1), prec = as.integer(nT), kmin = as.integer(kmin), kmax = as.integer(kmax), gx = as.double(gx), gy = as.double(gy), ng = as.integer(n), philh = as.double(sup[1]), phirh = as.double(sup[2]), error = as.integer(error), PACKAGE = "wavethresh") if (ans$error != 0) stop(paste("PLDF2 function returned error code:", ans$error)) list(x=ans$gx, y=ans$gy) } "denproj" <- function(x, tau=1, J, filter.number=10, family="DaubLeAsymm", covar=FALSE, nT=20) # data x # fine tuning parameter tau # resolution level J # family and filter.number specify the scaling function to be used # covar - logical variable indicating whether covariances should be calculated # nT is the number of iterations performed in the Daubechies-Lagarias algorithm { if(covar) ans <- Chires6(x, tau, J, filter.number, family, nT) else ans <- Chires5(x, tau, J, filter.number, family, nT) ans } "denwd" <- function(coef) { wd.dh(coef$coef, filter.number=coef$filter$filter.number, family=coef$filter$family, bc="zero", firstk=coef$klim) } "denwr" <- function(wd, start.level=0, verbose=FALSE, bc=wd$bc, return.object=FALSE, filter.number=wd$filter$filter.number, family=wd$filter$family) { if(IsEarly(wd)) { ConvertMessage() stop() } if(verbose == TRUE) cat("Argument checking...") # Check class of wd if(verbose == TRUE) cat("Argument checking\n") ctmp <- class(wd) if(is.null(ctmp)) stop("wd has no class") else if(ctmp != "wd") stop("wd is not of class wd") if(start.level < 0) stop("start.level must be nonnegative") if(start.level >= nlevelsWT(wd)) stop("start.level must be less than the number of levels") if(is.null(wd$filter$filter.number)) stop("NULL filter.number for wd") if(bc != wd$bc) warning("Boundary handling is different to original") if(wd$type == "station") stop("Use convert to generate wst object and then AvBasis or InvBasis" ) type <- wd$type filter <- filter.select(filter.number = filter.number, family = family) LengthH <- length(filter$H) # Build the reconstruction first/last database if(verbose == TRUE) cat("...done\nFirst/last database...") r.first.last.c <- wd$fl.dbase$first.last.c[(start.level+1):(nlevelsWT(wd)+1), ] ntotal <- r.first.last.c[1,3] + r.first.last.c[1,2] - r.first.last.c[1,1] + 1 names(ntotal) <- NULL C <- accessC(wd, level = start.level, boundary = TRUE) C <- c(rep(0, length = (ntotal - length(C))), C) nlevels <- nlevelsWT(wd) - start.level error <- 0 # Load object code if(verbose == TRUE) cat("...built\n") if(verbose == TRUE) { cat("Reconstruction...") error <- 1 } ntype <- switch(type, wavelet = 1, station = 2) if(is.null(ntype)) stop("Unknown type of decomposition") nbc <- switch(bc, periodic = 1, symmetric = 2, zero = 3) if(is.null(nbc)) stop("Unknown boundary handling") if(!is.complex(wd$D)) { wavelet.reconstruction <- .C("waverecons_dh", C = as.double(C), D = as.double(wd$D), H = as.double(filter$H), LengthH = as.integer(LengthH), nlevels = as.integer(nlevels), firstC = as.integer(r.first.last.c[, 1]), lastC = as.integer(r.first.last.c[, 2]), offsetC = as.integer(r.first.last.c[, 3]), firstD = as.integer(wd$fl.dbase$first.last.d[, 1]), lastD = as.integer(wd$fl.dbase$first.last.d[, 2]), offsetD = as.integer(wd$fl.dbase$first.last.d[, 3]), ntype = as.integer(ntype), nbc = as.integer(nbc), error = as.integer(error), PACKAGE = "wavethresh") } if(verbose == TRUE) cat("done\n") error <- wavelet.reconstruction$error if(error != 0) { cat("Error code returned from waverecons: ", error, "\n") stop("waverecons returned error") } fl.dbase <- list(first.last.c=r.first.last.c, ntotal=wavelet.reconstruction$LengthC, first.last.d=wd$fl.dbase$ first.last.d, ntotal.d=wd$fl.dbase$ntotal.d) if(!is.complex(wd$D)) { l <- list(C=wavelet.reconstruction$C, D=wavelet.reconstruction$D, fl.dbase=fl.dbase, nlevels=nlevelsWT(wavelet.reconstruction), filter=filter, type=type, bc=bc, date=date()) } class(l) <- "wd" if(return.object == TRUE) return(l) else { if(bc == "zero") return(accessC(l, boundary = TRUE)) else return(accessC(l)) } stop("Shouldn't get here\n") } "first.last.dh" <- function(LengthH, DataLength, type = "wavelet", bc = "periodic", firstk=c(0, DataLength-1)) { if(type == "station" && bc != "periodic") stop("Can only do periodic boundary conditions with station") if(type != "station" && type != "wavelet") stop("Type can only be wavelet or station") if(bc=="periodic" || bc=="symmetric") { levels <- log(DataLength)/log(2) first.last.c <- matrix(0, nrow = levels + 1, ncol = 3, dimnames = list(NULL, c("First", "Last", "Offset"))) first.last.d <- matrix(0, nrow = levels, ncol = 3, dimnames = list(NULL, c("First", "Last", "Offset"))) } if(bc == "periodic") { # Periodic boundary correction if(type == "wavelet") { first.last.c[, 1] <- rep(0, levels + 1) first.last.c[, 2] <- 2^(0:levels) - 1 first.last.c[, 3] <- rev(c(0, cumsum(rev(1 + first.last.c[, 2]))[1:levels])) first.last.d[, 1] <- rep(0, levels) first.last.d[, 2] <- 2^(0:(levels - 1)) - 1 first.last.d[, 3] <- rev(c(0, cumsum(rev(1 + first.last.d[, 2]))[1:(levels - 1)])) ntotal <- 2 * DataLength - 1 ntotal.d <- DataLength - 1 } else if(type == "station") { first.last.c[, 1] <- rep(0, levels + 1) first.last.c[, 2] <- 2^levels - 1 first.last.c[, 3] <- rev(c(0, cumsum(rev(1 + first.last.c[, 2]))[1:levels])) first.last.d[, 1] <- rep(0, levels) first.last.d[, 2] <- 2^levels - 1 first.last.d[, 3] <- rev(c(0, cumsum(rev(1 + first.last.d[, 2]))[1:(levels - 1)])) ntotal <- (levels + 1) * 2^levels ntotal.d <- levels * 2^levels } } else if(bc == "symmetric") { # Symmetric boundary reflection first.last.c[levels + 1, 1] <- 0 first.last.c[levels + 1, 2] <- DataLength - 1 first.last.c[levels + 1, 3] <- 0 ntotal <- first.last.c[levels + 1, 2] - first.last.c[levels + 1,1] + 1 ntotal.d <- 0 for(i in levels:1) { first.last.c[i, 1] <- trunc(0.5 * (1 - LengthH + first.last.c[i + 1, 1])) first.last.c[i, 2] <- trunc(0.5 * first.last.c[i + 1, 2]) first.last.c[i, 3] <- first.last.c[i + 1, 3] + first.last.c[i + 1, 2] - first.last.c[i + 1, 1] + 1 first.last.d[i, 1] <- trunc(0.5 * (first.last.c[i + 1, 1] - 1)) first.last.d[i, 2] <- trunc(0.5 * (first.last.c[i + 1, 2] + LengthH - 2)) if(i != levels) { first.last.d[i, 3] <- first.last.d[i + 1, 3] + first.last.d[i + 1, 2] - first.last.d[i + 1, 1] + 1 } ntotal <- ntotal + first.last.c[i, 2] - first.last.c[i,1] + 1 ntotal.d <- ntotal.d + first.last.d[i, 2] - first.last.d[i, 1] + 1 } } else if(bc=="zero") { first.c <- firstk[1] last.c <- firstk[2] offset.c <- 0 first.d <- NULL last.d <- NULL offset.d <- 0 ntotal <- last.c - first.c + 1 ntotal.d <- 0 while( (first.c[1] > 2 - LengthH || first.c[1] < 1 - LengthH) || (last.c[1] > 0 || last.c[1] < -1) ) { first.c <- c(ceiling(0.5*(first.c[1] - LengthH + 1)), first.c) last.c <- c(floor(0.5*last.c[1]), last.c) offset.c <- c(offset.c[1] + last.c[2] - first.c[2] +1, offset.c) ntotal <- ntotal + last.c[1] - first.c[1] + 1 first.d <- c(ceiling(0.5*(first.c[2]-1)), first.d) last.d <- c(floor(0.5*(last.c[2] + LengthH - 2)), last.d) if(length(first.d) > 1) offset.d <- c(offset.d[1] + last.d[2] - first.d[2] + 1, offset.d) ntotal.d <- ntotal.d + last.d[1] - first.d[1] +1 } first.last.c <- matrix(c(first.c, last.c, offset.c), ncol=3, dimnames=list(NULL, c("First", "Last", "Offset"))) first.last.d <- matrix(c(first.d, last.d, offset.d), ncol=3, dimnames=list(NULL, c("First", "Last", "Offset"))) } else { stop("Unknown boundary correction method") } names(ntotal) <- NULL names(ntotal.d) <- NULL list(first.last.c = first.last.c, ntotal = ntotal, first.last.d = first.last.d, ntotal.d = ntotal.d) } "pclaw" <- function(q) { prob <- pnorm(q)/2 for(i in 0:4){ prob <- prob + pnorm(q, mean=(i/2-1), sd=1/10)/10 } prob } "plotdenwd" <- function(wd, xlabvals, xlabchars, ylabchars, first.level=0, top.level=nlevelsWT(wd) - 1, main="Wavelet Decomposition Coefficients", scaling="global", rhlab=FALSE, sub, NotPlotVal=0.005, xlab="Translate", ylab="Resolution Level", aspect="Identity", ...) { ctmp <- class(wd) if(is.null(ctmp)) stop("wd has no class") else if(ctmp != "wd") stop("wd is not of class wd") levels <- nlevelsWT(wd) nlevels <- levels - first.level cfac <- top.level - (levels-1) sfac <- rep(2, nlevels) ^ c((nlevels-1):0) first <- wd$fl.dbase$first.last.d[(first.level+1):levels,1] first <- first * sfac + (sfac-1)/2 last <- wd$fl.dbase$first.last.d[(first.level+1):levels,2] last <- last * sfac + (sfac-1)/2 xrange <- c(floor(min(first)), ceiling(max(last))) type <- wd$type if(type == "wavelet") n <- 2^(levels-2) if(missing(sub)) sub <- paste(switch(type, wavelet = "Standard transform", station = "Nondecimated transform"), wd$filter$name) if(aspect != "Identity") sub <- paste(sub, "(", aspect, ")") plot(c(xrange[1], xrange[1], xrange[2], xrange[2]), c(0, nlevels+1, nlevels+1, 0), type="n", xlab=xlab, ylab=ylab, main=main, yaxt="n", xaxt="n", sub=sub, ...) yll <- top.level:(first.level+cfac) if(missing(ylabchars)) axis(2, at = 1:(nlevels), labels = yll) else if(length(ylabchars) != nlevels) stop(paste("Should have ", nlevels, " entries in ylabchars")) else axis(2, at = 1:(nlevels), labels = ylabchars) if(missing(xlabchars)) { if(missing(xlabvals)) { if(type == "wavelet") { if(wd$bc != "zero") { axx <- c(0, 2^(levels - 3), 2^(levels - 2), 2^(levels - 2) + 2^(levels - 3), 2^(levels - 1)) } else { jrange <- floor(logb(abs(xrange), 2)) xlabr <- sign(xrange) * 2^jrange xsp <- diff(xlabr) axx <- xlabr[1] + c(0, xsp/4, xsp/2, 3*xsp/4, xsp) if((xlabr[2]+xsp/4) <= xrange[2]) axx <- c(axx, xlabr[2]+xsp/4) if((xlabr[1]-xsp/4) >= xrange[1]) axx <- c(xlabr[1]-xsp/4, axx) } } else axx <- c(0, 2^(levels - 2), 2^(levels - 1), 2^(levels - 1) + 2^(levels - 2), 2^levels) axis(1, at = axx) } else { lx <- pretty(xlabvals, n = 4) cat("lx is ", lx, "\n") if(lx[1] < min(xlabvals)) lx[1] <- min(xlabvals) if(lx[length(lx)] > max(xlabvals)) lx[length(lx)] <- max(xlabvals) cat("lx is ", lx, "\n") xix <- NULL for(i in 1:length(lx)) { u <- (xlabvals - lx[i])^2 xix <- c(xix, (1:length(u))[u == min(u)]) } axx <- xix if(type == "wavelet") axx <- xix/2 axl <- signif(lx, digits = 2) axis(1, at = axx, labels = axl) } } else axis(1, at = xlabvals, labels = xlabchars) x <- 1:n height <- 1 first.last.d <- wd$fl.dbase$first.last.d axr <- NULL if(scaling == "global") { my <- 0 for(i in ((levels - 1):first.level)) { y <- accessD(wd, i, boundary=TRUE, aspect = aspect) my <- max(c(my, abs(y))) } } if(scaling == "compensated") { my <- 0 for(i in ((levels - 1):first.level)) { y <- accessD(wd, i, boundary=TRUE, aspect = aspect) * 2^(i/2) my <- max(c(my, abs(y))) } } if(scaling == "super") { my <- 0 for(i in ((levels - 1):first.level)) { y <- accessD(wd, i, boundary=TRUE, aspect = aspect) * 2^i my <- max(c(my, abs(y))) } } shift <- 1 for(i in ((levels - 1):first.level)) { y <- accessD(wd, i, boundary=TRUE, aspect = aspect) if(type == "wavelet") n <- first.last.d[i+1,2]-first.last.d[i+1,1]+1 else { y <- y[c((n - shift + 1):n, 1:(n - shift))] shift <- shift * 2 } xplot <- seq(from=first[i-first.level+1], to=last[i-first.level+1], by=2^(nlevels-(i-first.level)-1)) ly <- length(y) if(scaling == "by.level") my <- max(abs(y)) if(scaling == "compensated") y <- y * 2^(i/2) if(scaling == "super") y <- y * 2^i if(my == 0) { y <- rep(0, length(y)) } else y <- (0.5 * y)/my axr <- c(axr, my) if(max(abs(y)) > NotPlotVal) segments(xplot, height, xplot, height + y) if(i != first.level) { if(type == "wavelet") { # x1 <- x[seq(1, n - 1, 2)] # x2 <- x[seq(2, n, 2)] # x <- (x1 + x2)/2 # x <- 1:n } height <- height + 1 } } if(rhlab == TRUE) axis(4, at = 1:length(axr), labels = signif(axr, 3)) axr } "rclaw" <- function(n) { nx <- rnorm(n) p <- runif(n) oldx <- nx nx[p<=0.5] <- nx[p<=0.5]/10 + (trunc(p[p<=0.5] * 10)/2 -1) nx } "wd.dh" <- function(data, filter.number = 10, family = "DaubLeAsymm", type = "wavelet", bc = "periodic", firstk=NULL, verbose = FALSE) { if(verbose == TRUE) cat("wd: Argument checking...") if(!is.atomic(data)) stop("Data is not atomic") DataLength <- length(data) # Check that we have a power of 2 data elements if not using zero bcs if(bc=="periodic" || bc=="symmetric") { nlevels <- nlevelsWT(data) if(is.na(nlevels)) stop("Data length is not power of two") } # Check for correct type if(type != "wavelet" && type != "station") stop("Unknown type of wavelet decomposition") if(type == "station" && bc != "periodic") stop("Can only do periodic boundary conditions with station") # Select the appropriate filter if(verbose == TRUE) cat("...done\nFilter...") filter <- filter.select(filter.number = filter.number, family = family) # Build the first/last database if(verbose == TRUE) cat("...selected\nFirst/last database...") fl.dbase <- first.last.dh(LengthH = length(filter$H), DataLength = DataLength, type = type, bc = bc, firstk = firstk) # Find number of levels in zero bc case if(bc=="zero") nlevels <- nrow(fl.dbase$first.last.d) # Put in the data C <- rep(0, fl.dbase$ntotal) C[1:DataLength] <- data if(verbose == TRUE) error <- 1 else error <- 0 if(verbose == TRUE) cat("built\n") # Compute the decomposition if(verbose == TRUE) cat("Decomposing...\n") nbc <- switch(bc, periodic = 1, symmetric = 2, zero = 3) if(is.null(nbc)) stop("Unknown boundary condition") ntype <- switch(type, wavelet = 1, station = 2) if(is.null(filter$G)) { wavelet.decomposition <- .C("wavedecomp_dh", C = as.double(C), D = as.double(rep(0, fl.dbase$ntotal.d)), H = as.double(filter$H), LengthH = as.integer(length(filter$H)), nlevels = as.integer(nlevels), firstC = as.integer(fl.dbase$first.last.c[, 1]), lastC = as.integer(fl.dbase$first.last.c[, 2]), offsetC = as.integer(fl.dbase$first.last.c[, 3]), firstD = as.integer(fl.dbase$first.last.d[, 1]), lastD = as.integer(fl.dbase$first.last.d[, 2]), offsetD = as.integer(fl.dbase$first.last.d[, 3]), ntype = as.integer(ntype), nbc = as.integer(nbc), error = as.integer(error), PACKAGE = "wavethresh") } if(verbose == TRUE) cat("done\n") error <- wavelet.decomposition$error if(error != 0) { cat("Error ", error, " occured in wavedecomp\n") stop("Error") } if(is.null(filter$G)) { l <- list(C = wavelet.decomposition$C, D = wavelet.decomposition$D, nlevels = nlevelsWT(wavelet.decomposition), fl.dbase = fl.dbase, filter = filter, type = type, bc = bc, date = date()) } class(l) <- "wd" return(l) } wavethresh/MD50000644000176200001440000003404114335177642012760 0ustar liggesusers117a3fa19b4d86de217fd74bdd2bfb82 *DESCRIPTION af6b5704f05588794fb999142781f2dc *NAMESPACE d4322104e0c067fb69236236f23ce2fb *R/LSWsim.r 1dbbada142cde04b1190afd6f4735b0e *R/NSextra.r 2713562b6f36f46545109fb397833a9f *R/function.r a293e8e6493aa664ae235c5a0e2f6f62 *R/wavde.r 9a9039118440a136e67a2d28efd60bdb *build/partial.rdb 8c5fdba4e1e58b2ebd9ab4258b5b7617 *data/BabyECG.rda fe9024fed6007ec9831601f6cc90d363 *data/BabySS.rda cde124da9614b7c373609ddf03107a17 *data/datalist 2ae1e06329ec665260606abc26e2737e *data/ipd.rda ad576bc3fc90149d01caa06803f2250e *data/lennon.rda 23e7e30e9b534775a1f4c9140effb9f1 *data/teddy.rda de73b25b833ee2df2c2cf43f583ad04e *inst/CHANGES 7eec463ea698e64bcaae88ed1385950c *man/AutoBasis.rd c346526092e0db7e9af4e8010d26271f *man/AvBasis.rd 15e88b76d8824a00f8f46bbf645b9c52 *man/AvBasis.wst.rd eb430fab01b336cf4314b9c3a0e9d987 *man/AvBasis.wst2D.rd 45633989bbe4e35ab0adc60ceb3ae04a *man/BAYES.THR.rd cd8f70d1f28f6dc0d9fd9d6126f26e15 *man/BMdiscr.rd f86d60c34746c1397f9f29df347eb7f6 *man/BabyECG.rd 64932378fe28ce2ee6c318582c0ec97a *man/BabySS.rd f39288d8582bf05e330da7bca023d6fc *man/Best1DCols.rd 648401d331d609c35191551b7716e90e *man/CWCV.rd 2a07d2cb500bab730081995396e9057e *man/CWavDE.rd 2abd5955ca0d1f5fb3f8f5e9371c4f18 *man/CanUseMoreThanOneColor.rd 9252361fc9f6717792728ee55bfc7754 *man/Chires5.rd 394c09623e26138f475b952665d10667 *man/Chires6.rd e54f4040c1474b1ce39e44d297b7b026 *man/ConvertMessage.rd 30d7ecf764200e63d8823ed854817dab *man/Crsswav.rd 51c1ea1c1ec2765203e9d2c333e95839 *man/Cthreshold.rd ab47b95ed9d0d044b0cdde405b46fa72 *man/DJ.EX.rd 8297fbe6d75e65f7e1d6cacc0686d22d *man/FullWaveletCV.rd bfdffc5204a671f461ff1d675a5b4c25 *man/GenW.rd e01e26995b4ca36405c8eb839d1b1aa0 *man/GetRSSWST.rd 5dccb7955dada8bf06a1e5a5c3a2414a *man/HaarConcat.rd 527ecc12ce1fd494d7f912799359b8a0 *man/HaarMA.rd 691b27fb5e62003e4119b4fc44128dcb *man/InvBasis.rd aed2337d34b9721c7ace913c87334e26 *man/InvBasis.wp.rd 9979ef6df53750657486098d2fe1389a *man/InvBasis.wst.rd 93f5d86f25b5d94fd1a83f3ba49596b4 *man/IsEarly.default.rd 0905352eae478e6056824ba43ef0f6a8 *man/IsEarly.rd 0fb908295c75c05e8d94984ac391527c *man/IsEarly.wd.rd dcf9fd57c2238628be551ee0f2e29a0e *man/IsPowerOfTwo.rd c1628682ee7e8bb77c590d16584995a4 *man/LSWsim.rd f8f69b55ecb7905bb352372dd46295c5 *man/LocalSpec.rd cb50c352c4ba1eb15dcf0f75682eca5e *man/LocalSpec.wd.rd 4288b3bf90fb9424f870135346cee978 *man/LocalSpec.wst.rd 596c8dbe48458dd74e9745e4e405c699 *man/MaNoVe.rd ecc94b34fcc6c90f6f20e760fca33db9 *man/MaNoVe.wp.rd 95f76a6958b7a812788741d407f47279 *man/MaNoVe.wst.rd 3644eee4a69da429e8c34174e4f5e48c *man/PsiJ.rd 292d4225e20ce12d60de2deb8dc150af *man/PsiJmat.rd afbb55fae9733eab0db543ec746fe9b1 *man/Psiname.rd 5c2483272b11eabe3e602ff749dfe72e *man/ScalingFunction.rd 7c3c4504e647cb545b5dbf5dc8360883 *man/Shannon.entropy.rd c9019aaecb676a5bc22a442abdeb9135 *man/TOgetthrda1.rd 4e99ffb6aafc761a6605f64ffd53bf12 *man/TOthreshda1.rd 21717d077dab0d552cad9cd5a55ec424 *man/TOthreshda2.rd 39da2c72edf6d4ed227458e4a44fa015 *man/WTEnv.rd 95e81b90805034e267a393f3fe06ef20 *man/WaveletCV.rd 4243fad95cb02f468c8e2f1bdb022e72 *man/Whistory.rd fbd3860c7c2f74a98cccd91781c1be03 *man/Whistory.wst.rd e8c2eca2e301aab8c620c3bc7ff774fd *man/accessC.mwd.rd ce3d969207513c745c0d863dc6ddca77 *man/accessC.wd.rd 774e0e9aa2004aa3757319325b9326a2 *man/accessC.wp.rd 4cd9641a8638909ddc9d1fa4711ef2d9 *man/accessC.wst.rd a62a9b6da5fa1f2409b6006f09d75f83 *man/accessC2.rd 20c217ea7420caa6a8e9fd47979b59d7 *man/accessD.mwd.rd d2fe216b53dc12784cff1addeca4528e *man/accessD.rd 8a3c909596d9b063c994468a0f2c29b4 *man/accessD.wd.rd 3cdef4766e20a2c33cb34032adc2834c *man/accessD.wd3D.rd 7da8094fe85438098b4ffc97a53b2d92 *man/accessD.wp.rd b9d76f7b8f64a427eae014c500ee2aeb *man/accessD.wpst.rd 03f4902c682c908720c424d6417c5f66 *man/accessD.wst.rd 6243209a81cf7cd34c5f6a6ed7dcbb27 *man/accessc.rd 6713219dfbaeddeede6a241d2d824d65 *man/addpkt.rd e878b7e554b45f65a8426087dc8e8555 *man/av.basis.rd bdf06f6bf3cc677987d40009675fb767 *man/basisplot.BP.rd 73e4b3e209c1450426809d89b7f59ab9 *man/basisplot.rd 5d18f01f22505d872b8dc8ae63230894 *man/basisplot.wp.rd e86c5e076209ae668351608080de8d0b *man/bestm.rd 1377906fd0bb1ae620ba749f965bc18d *man/c2to4.rd 3fc0c968741defc8db4c229b2b59800e *man/checkmyews.rd 68fb096f54eb035c1ca82e454fe35836 *man/cns.rd 63d92e96e6d80dbf3ded40804a7ad1d8 *man/compare.filters.rd f6c3b1dc51b7e5421f31fca7d513bd62 *man/compgrot.rd b412050bc4d79a25094ac150a4adad9f *man/compress.default.rd 03f1318221372dd8f1bfb72f4fae09c9 *man/compress.imwd.rd 812c9ece5d5a2d7b167386f492bdf9e7 *man/compress.rd 033dc708c136bcda876ee3359b354606 *man/conbar.rd a877aee85061fe030c3079f4a35218cf *man/convert.rd c2b1f67c89a99c02e5bda2ddbcb59ca9 *man/convert.wd.rd 5b591341b1d3b2e50fbbd01b38e53977 *man/convert.wst.rd df633fa98c34714dd998a7a98016fa7c *man/cthresh.rd abd29604e2326b402b7d4d0198552abb *man/dclaw.rd 581c3b457157255755fed170ce852dab *man/dencvwd.rd 1bf4e49caa1cc14bed86982023e58112 *man/denplot.rd 055936b36ceefc606f38eaf39a8b7128 *man/denproj.rd 0b16b72c9834c997b3948c9b85b19220 *man/denwd.rd fb17059b80dc7679ec042a61e6d3b22d *man/denwr.rd b6bca98e90ce135269d607db52ba27a0 *man/dof.rd a673ef6c2e82e1474ab6f03311c46675 *man/doppler.rd f3dfc0a0583717caaec6e5c013eabfc1 *man/draw.default.rd e5564e5d85cd983ee53ede735a045d60 *man/draw.imwd.rd 53a1e3901135845d9d17096a9b5b4f9a *man/draw.imwdc.rd 04c42c2fa8c379675c1461ede2626515 *man/draw.mwd.rd b5b87c24aed86882fd84da7d62365adc *man/draw.rd c4293d5e57347e1f071cf379cefa573d *man/draw.wd.rd 20d91c6c174b77a776179333de22d470 *man/draw.wp.rd 7cafcd679d626391d743b0fe09407788 *man/draw.wst.rd 072d7f4ff674edf213039e902d1524da *man/drawbox.rd d75b70a30b362897c4436da405265b7f *man/drawwp.default.rd 7fbb649a852b96d14afa35466e4ece5a *man/ewspec.rd 70d19e53b77e20523c152c672df9e976 *man/example.1.rd ffb93b1f489000d89a691674a942de9a *man/filter.select.rd d226df064980aee16f540f2632db4081 *man/find.parameters.rd c728f3547310492f0ccf436b7657e75c *man/first.last.dh.rd 389c28feae36973ca3126d323fe1301b *man/first.last.rd cbf0483782aaf227244043cef359890b *man/firstdot.rd 376e3653b63eadba48a2f8f27a899221 *man/getarrvec.rd afb3a1a6330638264a7dc87e797161d0 *man/getpacket.rd dbd2ae7f40fbf593037668c9ae45ce9f *man/getpacket.wp.rd bacf3d9d4f882a62ab5d3a8a9aad37be *man/getpacket.wpst.rd 720d1db3aa85f00a6cc4efd698e8a40b *man/getpacket.wst.rd 463982818daf9c33aee5f774d7fa8330 *man/getpacket.wst2D.rd 89acbdc350e4467546e77bf70006d615 *man/griddata.rd ddd180902b177e6d4743f5867e751339 *man/guyrot.rd c81ae7423dfa0099aadba3de08337b83 *man/image.wd.rd 9dc147f301a54758e34a08b2a33c09b8 *man/image.wst.rd d526a2ff0de06dcf0ea54a087bc03439 *man/imwd.object.rd 2a46a32e688ba77c6762c33975f628fa *man/imwd.rd 8bd64298b43e59ee92426ad38e14bfb9 *man/imwdc.object.rd ee683908bd9a26d8a8dc3da1c0457b3e *man/imwr.imwd.rd 1a6d02d5747eac6abf9eb8db0ff16ff3 *man/imwr.imwdc.rd eea2219200f0e93148d9a0b053736547 *man/imwr.rd 92ea6ca6ad7ae86174f3eb256c10317a *man/ipd.rd 7f100d4b0906e58ad484ed414d2da37d *man/ipndacw.rd b722c9c51cd6740e3b93edd12b61d1ad *man/irregwd.objects.rd c73062f7f3065c28e74539b48724cf09 *man/irregwd.rd 9cc729637bf4f0a43ff91ffa58bac462 *man/l2norm.rd b05654fc89d79644700c772333a4158a *man/lennon.rd 44c89c5c7ce6a8e1b19b64fd546ce85b *man/levarr.rd ee9df06a416186741fe512de153f55b5 *man/linfnorm.rd 11da6aa2055ced9c1bcde32d3498cd96 *man/logabs.rd e8becbfae0db00f14eecfa057987f602 *man/lt.to.name.rd d7ac2f0d2fa4d96dcfb360864448422e *man/madmad.rd c131ca9f902f3dc9372c7eb58366c1ae *man/make.dwwt.rd d6f73101b19d6399aa538e09fc2d57d1 *man/makegrid.rd abec1b082bb6f102b2e0273a81d03cb8 *man/makewpstDO.rd d9f7720bf016a6a4460a5085bf23401e *man/makewpstRO.rd 2aba9fb2e9cece4d7c35ab75c48b5b38 *man/mfilter.select.rd c246a843f6a941615c6a8152fae84362 *man/mfirst.last.rd 5d56b8696e2d85c724649932f94a221c *man/modernise.rd 986c47a47c8b93c72a83874d015066ed *man/modernise.wd.rd 7cfbf9026ebb5283108efe27902d0427 *man/mpostfilter.rd 9574b02c3c3cbfb274b2228f3a0faefb *man/mprefilter.rd e8a65e4fcebdce69f380005b2ec6ce88 *man/mwd.object.rd e10dabe4c9b8c1a33881c26112cfdf30 *man/mwd.rd d9e63a2b967470b7c6b3890879157ee3 *man/mwr.rd 6eb18e25b4487ac9bbafa837a1dfe1b2 *man/newsure.rd 93a32ce44d9412e40b8638a76b2011f8 *man/nlevelsWT.default.rd b9239d2823414123e5ad15711bce02a3 *man/nlevelsWT.rd 3ced4f2ca8f089b1dd0d5b246c7bbcdd *man/nullevels.imwd.rd e2c6889c8b999f40223ceb0ee803028e *man/nullevels.rd 0f5217fe95cd7f37be369230bfba7ea8 *man/nullevels.wd.rd 9f4738885d6b7641c7ac8466d082a884 *man/nullevels.wst.rd f910919846ac906b79049a7cba49bc69 *man/numtonv.rd 33c38d740a01362745884597cf625331 *man/nv.object.rd 5f788af84a450cd860c4ce182b6bc246 *man/plot.imwd.rd 069668e363fd159deb85d2f1c31aa10e *man/plot.irregwd.rd fc6533efc0857909722da604180dc07d *man/plot.mwd.rd 48955dfa401214ea3a6b4d4aba8863bb *man/plot.nvwp.rd deea73bf8bd25385f6d5b1fa5db587c4 *man/plot.wd.rd 8628b2dcf901d710bcbb0551eac2d5de *man/plot.wp.rd 93de4cc22e9bfe448d77f499fdb99acf *man/plot.wst.rd 20555002191d426883c0ec28d72744c0 *man/plot.wst2D.rd 30e71754457d62d4e39bc296aff29f94 *man/plotdenwd.rd ff364de4483194b3bb179336df69fc6b *man/plotpkt.rd d813b8964a1bcadda85f8c158f659060 *man/print.BP.rd 26eb624ab2284c2a32206618789af152 *man/print.imwd.rd 2a84a315cad0880052d74ce7558b2db1 *man/print.imwdc.rd eec3d6dfd750069db82ba2d415d2e038 *man/print.mwd.rd d50c74c85b9cdd32a88a1fa8f33f563a *man/print.nv.rd ad77c9d68c33f233fb9b349fbdc6eb3a *man/print.nvwp.rd 71af87d7549a34bff2d18a18dd4c08af *man/print.w2d.rd e73e70236ce944b78c398cc7ed74653f *man/print.w2m.rd af072911347b90d19dca0eeb0590c30f *man/print.wd.rd 8d194509b22bb39d5761c7de670a7e4b *man/print.wd3D.rd 580ede0b33a0457468e50b78bf542f97 *man/print.wp.rd d88f3c8a36057d99628f3ae7ca027509 *man/print.wpst.rd 31b714a2b4f22001404edbb8b887be45 *man/print.wpstCL.rd 4e93bae7f15f6c34af32f469f72448f5 *man/print.wpstDO.rd 72a46b1a70ba766799f17e5bc0f64d87 *man/print.wpstRO.rd b274412db7a839381808e562fae8ba4f *man/print.wst.rd f2d69b8aab002aa93b43f77a9ead5c9b *man/print.wst2D.rd ba1a5a19baf1211a1e9a399714ce066a *man/putC.mwd.rd 04e19bab62cd7876a52c9fd43b4aa3e4 *man/putC.rd 787e00694b9ab4c0bedeac9fb098f7d4 *man/putC.wd.rd 1e19c24cf956e166938d5cb3dbd5bc2c *man/putC.wp.rd 76edd471c92f9a2d980ade0b83c18138 *man/putC.wst.rd c8d7f44b4b1dd202ebc219ec1886befe *man/putD.mwd.rd fb45b3d314a7b155b6499ed111c64f7d *man/putD.rd aa577417cf2f829671dee9394f142594 *man/putD.wd.rd 149da3eeb737ead068e497ed8a75ff4e *man/putD.wd3D.rd 04e1f6a57c98c0b3e5d2ad0654082e03 *man/putD.wp.rd 60d7a85a8b59917d85044d011295a83a *man/putD.wst.rd f823110934c54b091015319445283b01 *man/putDwd3Dcheck.rd 409f27ab64da06b3e70baae31ff634fb *man/putpacket.rd 6ae10d65cc6f4549d82ca7c2828c890a *man/putpacket.wp.rd db5e89a4bf3f5efdddd7299fa45f2af8 *man/putpacket.wst.rd 3653dd0085e0dc4f545948cf570b81d1 *man/putpacket.wst2D.rd b4a2102e9d79d6d407516665ff5ad09f *man/rcov.rd 43765afb4ae7c0e12797b28b1cc22b10 *man/rfft.rd 714fee9f4daf8e212892dd75442adf33 *man/rfftinv.rd ceaea95fcef81a691a0e77bc7ae72cee *man/rfftwt.rd a6d755730f07285c368a03c099601a3f *man/rm.det.rd e7d2ee467a678b6671682da2aa053a98 *man/rmget.rd ed289e2891f7782297eec86b23b86df9 *man/rmname.rd cff96e2876733053f066925fee7e2647 *man/rotateback.rd 1b2a04f6c60bdc00429ce0a38778d84b *man/rsswav.rd d0979e7b609eacf252d5797cbf8ea4b8 *man/simchirp.rd 49e74c1d815f0fad891f82a8ed33ef33 *man/ssq.rd edc1606b23a8108c2f77ef3054c6b8d0 *man/summary.imwd.rd d08dd10a8b6ba62c6083564ff3fc1871 *man/summary.imwdc.rd 97b41f8ed86d1ce913f64080d7ee2b4a *man/summary.mwd.rd 147e19b334ba4f0299362d2d3c30028e *man/summary.wd.rd 576a20518ec1a2eacd01ed5237b562e9 *man/summary.wd3D.rd a6c10d2bbd5fa425a73100a3bed24150 *man/summary.wp.rd 83618edf151efb3b6f49367228b72ec1 *man/summary.wpst.rd f1c9e02cc6a4650a041eb2cc5b5ad57c *man/summary.wst.rd 2d83088840cac45915e355dd0095981e *man/summary.wst2D.rd e70a9e8d5519ef5268f054c792480e79 *man/support.rd a26a530d2048623ee9b05ed035d3c06a *man/sure.rd d74e4f513be2038aa23f14741e7d60c7 *man/teddy.rd 13f62c72f8e0895cbbb65461bae1ff46 *man/test.dataCT.rd 4c90169a7bb30d4b55d9b9a46b3cc6e6 *man/threshold.imwd.rd dcdecc661067a4dd1c3c6666f55ef638 *man/threshold.imwdc.rd c18ba4cbe0dcb89acad157ad4f924fa1 *man/threshold.irregwd.rd 46749478ae2bd4becd589939500501e3 *man/threshold.mwd.rd a2c872595d138ffa1f0c806bf7e46f0a *man/threshold.rd 5b44e6605b07b0d0c6ae1ae4bb3b5e08 *man/threshold.wd.rd 48fe76aba8cfa16d9fa227e777a9658c *man/threshold.wd3D.rd aceda5408563cd853ee8761ff9d2a94f *man/threshold.wp.rd 8c1e668df2ac587aed71daf59162b5e6 *man/threshold.wst.rd 75ef6f53b32ced439c775b7b115b5243 *man/tpwd.rd 8fdbab0ec27e436c0b941287c31be4b0 *man/tpwr.rd 0496dc711f59916f18693c3b90e73a63 *man/uncompress.default.rd b41874533ae7b27e6c862a5f21af1b72 *man/uncompress.imwdc.rd 724d037aef76d70432dc29ffab1675a1 *man/uncompress.rd 4847e69ed6259d72f612799e6e7cfc74 *man/wavegrow.rd e5cb33f434860c44548915813c3ad578 *man/wavethresh-package.Rd 1a54c29a534251bce44f4b02a6f5ab36 *man/wd.dh.rd 38438d163d9cd7212e89f533b83589fa *man/wd.int.rd 09e8ff400d8cd271ca99acebe166e5a6 *man/wd.object.rd 36b6238c5d251a2d7e759f5af4fd3834 *man/wd.rd faf1e23994c4c9761fa4d507738f754a *man/wd3D.object.rd 6aac5e79086fab0d9931144c67704ddd *man/wd3D.rd c6522b965735c2c29ddbabef464eb68f *man/wp.object.rd f151a30bf6171570cd058fb7232e3bf6 *man/wp.rd 1de0a7b117129e7fb0690176f946d9a9 *man/wpst.rd 90a98a0a360df2acd57f49dd4f5446b5 *man/wpst2discr.rd 1713ed4621e69351987392ca755bf9d3 *man/wpst2m.rd 81d2c7bb948f88399c2315bf106096a1 *man/wpstCLASS.rd 8fb63fc7b809792be3d1e46bfd1bdd1c *man/wpstREGR.rd 02e99db64a7e913251c08f1eed608d2c *man/wr.int.rd 6922698db86d8b4f4c970241bea7a315 *man/wr.mwd.rd 7ae257375815fce4b9409cf1f1942f33 *man/wr.rd c206063c8089469e1041f8cb1e68f358 *man/wr.wd.rd ec41ee4856f172b8c5a286419ba63e44 *man/wr3D.rd 76b33540cffeff3fb17a619aeba78554 *man/wst.object.rd 30e1375f6897b023b5148059ed71821a *man/wst.rd f2d3b42365b61831c8b928b3ca2d6eee *man/wst2D.object.rd 82c0a1b61471ae5bddf79a0e01da6e33 *man/wst2D.rd 2a4febcd4ef95d2b3e73b578846606b4 *man/wstCV.rd d954d49fecfe366e6d4847a048e95c00 *man/wstCVl.rd 9e4fd5267d40a06aeaa7ae24e804ca48 *man/wvcvlrss.rd ab14464534eb79a1be36c3cfa3ce3314 *man/wvmoments.rd 91f6afc099dd0dda34b0c77e278c4884 *man/wvrelease.rd 3c737a736391895bd81c119099e235c8 *src/WAVDE.c db677cce4e7d5ad10366ba732b1703b3 *src/cthreb.c e5fdd9a88d8590e2c5103ae7e84ce19c *src/functions.c 4e23b8a7a57845c1591ecf789f63177e *src/wavethresh_init.c wavethresh/inst/0000755000176200001440000000000014334426560013416 5ustar liggesuserswavethresh/inst/CHANGES0000644000176200001440000001373414334426560014421 0ustar liggesusers4.5-1 Fixed memory leak in av_basis which is called by AvBasis.wst 4.6 Changed PsiJ. Added new option verbose, so that the information messages that previously were always printed out can now be suppressed. Also, changed default value of OPLENGTH to be 100000 Changed wvrelease. Changed the previous two uses of "cat" to now use "message", which can be suppressed. This helps the package knitr Changed draw.default. Added a type argument. Changed nlevels to nlevelsWT. This was because nlevels clashed with a function of the same name in another package to do with factor levels. 4.6.1 Merged the contents of the cthresh package into wavethresh. Keeping the two as separate packages was causing big problems. 4.6.2 Minor bugfixes. In CWCV... Changed the argument "interptype" in "CWCV" to "noise". Sometimes the "normal" argument causes the routine not to terminate. In WaveletCV... Two function calls to "threshold" needed the argument policy="universal" added. This is because they were using the "sure" policy by default and the routine required a universal threshold. Note, people would have probably been using CWCV so this error would not have been noticed much. The bug comes to the fore when one changes the threshold type from the default "soft" to "hard", as SURE thresholding can't be computed with the latter. In rsswav... Function was returning a multi-argument, which is no longer allowed, changed so that a list is returned. In PsiJ and PsiJmati... The default value of the OPLENGTH argument has been increased to 10^7 In PsiJ,ipndacw,rmget... Various matrices and objects get stored in an environment (WTEnv) now. So, invocation of one of these functions *the first time* will result in the object being computed from scratch and stored. Repeat invocation will used the stored objects saving computing time. This is different to previous versions of WaveThresh. In previous versions the stored objects got stored in your home R directory (ok .GlobalEnv) without your permission and without your knowledge (unless you looked) - but they were useful. The previous version though did have the advantage that the stored objects persisted across different sessions of R, or could be used if you attached another directory which had the stored objects in. I'd like to see how things go with the current version. If continued recomputing is a pain then let me know and I'll think up some different model. 4.6.3 Make some corrections after running valgrind. See comments in code for details - search on Valgrind In function evalF, SFDE6, SFDE5, PLDE2 (these latter functions were also modified to catch and report on failures of memory allocation and also to properly free memory once it had been used). 4.6.4 The h_n quadrature mirror file coefficients for the (little-used) Littlewood-Paley wavelet have been changed so that they sum to \sqrt(2) and their sum of squares is 1. This is to bring them in line with the Daubechies compactly supported wavelets 4.6.5 Changed CWaveletCV to count the number of iterations in the main iterative process (using the counter iterations) and to check that it is always less than the supplied argument maxits. This is so that calls to this routine (e.g. from the much used threshold.wd using the "cv" policy do not run on without limit). The R code CWCV and threshold.wd have also been changed to take account of new maxits and cvmaxits arguments and associated help pages changed. 4.6.6 Bugfix in plot.mwd found by Sebastian Muller. This was caused by a reference to nlevels() which is not a WaveThresh function anymore (its name got changed to nlevelsWT() in version 4.6) This call was accidentally left over. Also took opportunity to bring the DESCRIPTION file up to date. 4.6.7 Remove all references to the tsp function. This was a compatibility fix in R to relate to S, but now causes problems. 4.6.8 Added Coiflets to the filter.select function and updated help file (thanks to Anestis Antoniadis. I could have sworn I put these in before, but apparently not). Fixed bug. This bug only manifested itself in Windows code. In Best1D cols an operation occurs which compares all wavelet packets to the groups vector, to identify which packets are similar enough to the groups vector for further consideration. The first wavelet packet, though, is always a constant vector and hence the correlation is always zero (unless the groups vector was itself constant, but then this is not an interesting case). Now, we set this first value equal to zero. Previously, the correlation was computed but the denominator involves calculating the standard deviation of the constant vector, which is zero, and this causes a division by zero warning and returns and NA which can't then be handled. Now setting the correlation for this entry to be zero is correct and bypasses the problem. 4.6.9 Added sanity checks to the arguments of makegrid. The length of the vector arguments t and y should be the same. However, the function did not check this and erroneous NA values appear when the function was called on vectors of different lengths. The function now checks and stops if the vectors are of different lengths. Additionally, checking on the length of the gridn number, which should be dyadic is also performed, stopping the function if it is not. Changed maintainer email address to g.nason@imperial.ac.uk Added routines to register routines (src/wavethresh_init.c) Bugfix on line 5494 of functions.c. Line band1- changed to band1--, as band1 needs to be successively decrememented (as with band2++ later). This is in the computec C function which gets called by irregwd 4.7.0 Changed the few references from Sfloat to double and Sint to int Changed lines in function.r that were comparing the output of class() to string to make use of the inherit function instead. 4.7.1 Inserted C prototypes (in response to package update request from R team) 4.7.2 Corrected typing of wpCmnv function which was wrong in wavethresh_init.c