forecast/0000755000176200001440000000000014474126512012066 5ustar liggesusersforecast/NAMESPACE0000644000176200001440000002045014474046744013316 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method("[",msts) S3method(accuracy,default) S3method(accuracy,mforecast) S3method(as.Date,timeDate) S3method(as.character,Arima) S3method(as.character,bats) S3method(as.character,ets) S3method(as.character,tbats) S3method(as.data.frame,forecast) S3method(as.data.frame,mforecast) S3method(as.ts,forecast) S3method(autolayer,forecast) S3method(autolayer,mforecast) S3method(autolayer,msts) S3method(autolayer,mts) S3method(autolayer,ts) S3method(autoplot,Arima) S3method(autoplot,StructTS) S3method(autoplot,acf) S3method(autoplot,ar) S3method(autoplot,bats) S3method(autoplot,decomposed.ts) S3method(autoplot,ets) S3method(autoplot,forecast) S3method(autoplot,mforecast) S3method(autoplot,mpacf) S3method(autoplot,mstl) S3method(autoplot,msts) S3method(autoplot,mts) S3method(autoplot,seas) S3method(autoplot,splineforecast) S3method(autoplot,stl) S3method(autoplot,tbats) S3method(autoplot,ts) S3method(coef,ets) S3method(fitted,ARFIMA) S3method(fitted,Arima) S3method(fitted,ar) S3method(fitted,bats) S3method(fitted,ets) S3method(fitted,forecast_ARIMA) S3method(fitted,lagwalk) S3method(fitted,modelAR) S3method(fitted,nnetar) S3method(fitted,tbats) S3method(fitted,tslm) S3method(forecast,Arima) S3method(forecast,HoltWinters) S3method(forecast,StructTS) S3method(forecast,ar) S3method(forecast,baggedModel) S3method(forecast,bats) S3method(forecast,default) S3method(forecast,ets) S3method(forecast,forecast) S3method(forecast,forecast_ARIMA) S3method(forecast,fracdiff) S3method(forecast,lagwalk) S3method(forecast,lm) S3method(forecast,mlm) S3method(forecast,modelAR) S3method(forecast,mstl) S3method(forecast,mts) S3method(forecast,nnetar) S3method(forecast,stl) S3method(forecast,stlm) S3method(forecast,tbats) S3method(forecast,ts) S3method(forecast,varest) S3method(fortify,ts) S3method(getResponse,Arima) S3method(getResponse,ar) S3method(getResponse,baggedModel) S3method(getResponse,bats) S3method(getResponse,default) S3method(getResponse,fracdiff) S3method(getResponse,lm) S3method(getResponse,mforecast) S3method(getResponse,tbats) S3method(head,ts) S3method(logLik,ets) S3method(nobs,ets) S3method(plot,Arima) S3method(plot,ar) S3method(plot,bats) S3method(plot,ets) S3method(plot,forecast) S3method(plot,mforecast) S3method(plot,mpacf) S3method(plot,splineforecast) S3method(plot,tbats) S3method(predict,default) S3method(print,CVar) S3method(print,OCSBtest) S3method(print,baggedModel) S3method(print,bats) S3method(print,ets) S3method(print,forecast) S3method(print,forecast_ARIMA) S3method(print,lagwalk) S3method(print,mforecast) S3method(print,modelAR) S3method(print,msts) S3method(print,nnetar) S3method(print,nnetarmodels) S3method(print,summary.Arima) S3method(print,summary.ets) S3method(print,summary.forecast) S3method(print,summary.mforecast) S3method(print,tbats) S3method(residuals,ARFIMA) S3method(residuals,Arima) S3method(residuals,ar) S3method(residuals,bats) S3method(residuals,ets) S3method(residuals,forecast) S3method(residuals,forecast_ARIMA) S3method(residuals,nnetar) S3method(residuals,stlm) S3method(residuals,tbats) S3method(residuals,tslm) S3method(seasadj,decomposed.ts) S3method(seasadj,mstl) S3method(seasadj,seas) S3method(seasadj,stl) S3method(seasadj,tbats) S3method(simulate,Arima) S3method(simulate,ar) S3method(simulate,ets) S3method(simulate,fracdiff) S3method(simulate,lagwalk) S3method(simulate,modelAR) S3method(simulate,nnetar) S3method(simulate,tbats) S3method(subset,msts) S3method(subset,ts) S3method(summary,Arima) S3method(summary,ets) S3method(summary,forecast) S3method(summary,mforecast) S3method(summary,tslm) S3method(tail,ts) S3method(window,msts) export("%>%") export(Acf) export(Arima) export(BoxCox) export(BoxCox.lambda) export(CV) export(CVar) export(Ccf) export(GeomForecast) export(InvBoxCox) export(Pacf) export(StatForecast) export(accuracy) export(arfima) export(arima.errors) export(arimaorder) export(auto.arima) export(autolayer) export(autoplot) export(baggedETS) export(baggedModel) export(bats) export(bizdays) export(bld.mbb.bootstrap) export(checkresiduals) export(croston) export(dm.test) export(dshw) export(easter) export(ets) export(findfrequency) export(forecast) export(forecast.ets) export(fourier) export(fourierf) export(geom_forecast) export(getResponse) export(ggAcf) export(ggCcf) export(ggPacf) export(gghistogram) export(gglagchull) export(gglagplot) export(ggmonthplot) export(ggseasonplot) export(ggsubseriesplot) export(ggtaperedacf) export(ggtaperedpacf) export(ggtsdisplay) export(holt) export(hw) export(is.Arima) export(is.acf) export(is.baggedModel) export(is.bats) export(is.constant) export(is.ets) export(is.forecast) export(is.mforecast) export(is.modelAR) export(is.nnetar) export(is.nnetarmodels) export(is.splineforecast) export(is.stlm) export(ma) export(meanf) export(modelAR) export(monthdays) export(mstl) export(msts) export(na.interp) export(naive) export(ndiffs) export(nnetar) export(nsdiffs) export(ocsb.test) export(remainder) export(rwf) export(seasadj) export(seasonal) export(seasonaldummy) export(seasonaldummyf) export(seasonplot) export(ses) export(sindexf) export(snaive) export(splinef) export(stlf) export(stlm) export(taperedacf) export(taperedpacf) export(tbats) export(tbats.components) export(thetaf) export(trendcycle) export(tsCV) export(tsclean) export(tsdisplay) export(tslm) export(tsoutliers) import(Rcpp) import(parallel) importFrom(colorspace,sequential_hcl) importFrom(fracdiff,diffseries) importFrom(fracdiff,fracdiff) importFrom(fracdiff,fracdiff.sim) importFrom(generics,accuracy) importFrom(generics,forecast) importFrom(ggplot2,autoplot) importFrom(ggplot2,fortify) importFrom(grDevices,gray) importFrom(grDevices,heat.colors) importFrom(grDevices,nclass.FD) importFrom(grDevices,palette) importFrom(graphics,abline) importFrom(graphics,axis) importFrom(graphics,grid) importFrom(graphics,hist) importFrom(graphics,layout) importFrom(graphics,lines) importFrom(graphics,mtext) importFrom(graphics,par) importFrom(graphics,plot) importFrom(graphics,points) importFrom(graphics,polygon) importFrom(graphics,text) importFrom(graphics,title) importFrom(lmtest,bgtest) importFrom(magrittr,"%>%") importFrom(nnet,nnet) importFrom(stats,"tsp<-") importFrom(stats,AIC) importFrom(stats,BIC) importFrom(stats,Box.test) importFrom(stats,acf) importFrom(stats,aggregate) importFrom(stats,approx) importFrom(stats,ar) importFrom(stats,arima) importFrom(stats,arima.sim) importFrom(stats,as.formula) importFrom(stats,as.ts) importFrom(stats,complete.cases) importFrom(stats,cycle) importFrom(stats,decompose) importFrom(stats,diffinv) importFrom(stats,end) importFrom(stats,extractAIC) importFrom(stats,filter) importFrom(stats,fitted) importFrom(stats,formula) importFrom(stats,frequency) importFrom(stats,hatvalues) importFrom(stats,is.mts) importFrom(stats,is.ts) importFrom(stats,ksmooth) importFrom(stats,lm) importFrom(stats,loess) importFrom(stats,logLik) importFrom(stats,lsfit) importFrom(stats,median) importFrom(stats,model.frame) importFrom(stats,na.contiguous) importFrom(stats,na.exclude) importFrom(stats,na.omit) importFrom(stats,na.pass) importFrom(stats,napredict) importFrom(stats,nobs) importFrom(stats,optim) importFrom(stats,optimize) importFrom(stats,pf) importFrom(stats,plot.ts) importFrom(stats,poly) importFrom(stats,predict) importFrom(stats,pt) importFrom(stats,qnorm) importFrom(stats,qt) importFrom(stats,quantile) importFrom(stats,reformulate) importFrom(stats,residuals) importFrom(stats,rnorm) importFrom(stats,runif) importFrom(stats,sd) importFrom(stats,simulate) importFrom(stats,smooth.spline) importFrom(stats,spec.ar) importFrom(stats,start) importFrom(stats,stl) importFrom(stats,supsmu) importFrom(stats,terms) importFrom(stats,time) importFrom(stats,ts) importFrom(stats,tsdiag) importFrom(stats,tsp) importFrom(stats,var) importFrom(stats,window) importFrom(timeDate,Easter) importFrom(timeDate,as.Date.timeDate) importFrom(timeDate,as.timeDate) importFrom(timeDate,difftimeDate) importFrom(timeDate,isBizday) importFrom(tseries,adf.test) importFrom(tseries,kpss.test) importFrom(tseries,pp.test) importFrom(urca,ur.df) importFrom(urca,ur.kpss) importFrom(urca,ur.pp) importFrom(utils,head) importFrom(utils,head.matrix) importFrom(utils,methods) importFrom(utils,packageVersion) importFrom(utils,tail) importFrom(utils,tail.matrix) importFrom(zoo,as.Date) importFrom(zoo,as.yearqtr) importFrom(zoo,rollmean) useDynLib(forecast, .registration = TRUE) forecast/README.md0000644000176200001440000000473414456202551013353 0ustar liggesusersforecast ====================== [![R-CMD-check](https://github.com/robjhyndman/forecast/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/robjhyndman/forecast/actions/workflows/R-CMD-check.yaml) [![CRAN_Status_Badge](https://www.r-pkg.org/badges/version/forecast)](https://cran.r-project.org/package=forecast) [![Downloads](https://cranlogs.r-pkg.org/badges/forecast)](https://cran.r-project.org/package=forecast) [![Licence](https://img.shields.io/badge/licence-GPL--3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0.en.html) The R package *forecast* provides methods and tools for displaying and analysing univariate time series forecasts including exponential smoothing via state space models and automatic ARIMA modelling. A complementary forecasting package is the [fable](http://fable.tidyverts.org/) package, which implements many of the same models but in a tidyverse framework. ## Installation You can install the **stable** version from [CRAN](https://cran.r-project.org/package=forecast). ```s install.packages('forecast', dependencies = TRUE) ``` You can install the **development** version from [Github](https://github.com/robjhyndman/forecast) ```s # install.packages("remotes") remotes::install_github("robjhyndman/forecast") ``` ## Usage ```s library(forecast) library(ggplot2) # ETS forecasts USAccDeaths %>% ets() %>% forecast() %>% autoplot() # Automatic ARIMA forecasts WWWusage %>% auto.arima() %>% forecast(h=20) %>% autoplot() # ARFIMA forecasts library(fracdiff) x <- fracdiff.sim( 100, ma=-.4, d=.3)$series arfima(x) %>% forecast(h=30) %>% autoplot() # Forecasting with STL USAccDeaths %>% stlm(modelfunction=ar) %>% forecast(h=36) %>% autoplot() AirPassengers %>% stlf(lambda=0) %>% autoplot() USAccDeaths %>% stl(s.window='periodic') %>% forecast() %>% autoplot() # TBATS forecasts USAccDeaths %>% tbats() %>% forecast() %>% autoplot() taylor %>% tbats() %>% forecast() %>% autoplot() ``` ## For more information * Get started in forecasting with the online textbook at http://OTexts.org/fpp2/ * Read the Hyndsight blog at https://robjhyndman.com/hyndsight/ * Ask forecasting questions on http://stats.stackexchange.com/tags/forecasting * Ask R questions on http://stackoverflow.com/tags/forecasting+r * Join the International Institute of Forecasters: http://forecasters.org/ ## License This package is free and open source software, licensed under GPL-3. forecast/data/0000755000176200001440000000000014150370574012777 5ustar liggesusersforecast/data/gold.rda0000644000176200001440000000532414150370574014420 0ustar liggesusersu ЦQ2bPĄJ%ldz2I泔 }y9!Q}! k$,LFEh׈Yw9yٽ:{+bsssK,ҍ\d_m"sq[CG;-̯Y ?k9GW z(_>߄OB毧χ8mk#~FvTz)~PO׿S= |#]ۿ-)mLY?hn]|e?r id\n/V¡f;<|ѺAuooqwc'"=4~ȯeh3j^a:e5Ke1_UʼnP{^w(8fq_R?A} .?mηseߝt ;~2Tdyzϖ۝iS:ƇLFc,k ꋱ8X}y觥~|{ %O|Vp?mk#r8ʛ-=-jo0./ i廱e?pݜ~i{cG.?V^hN$/o6v&/s Zݾͼ7+z׭mN}_TKu'/ǿ|uS:™krpUgo\OYހo~/yJ~6KUܗ/deLclB'8'8u=.#aZT?NϷ^@xS>g"=DuSGI=Suz;閡cr/ O>:{s˧|Iݔ\PugM#?>#E~e_DFKTG 졞}~֟n>ՠ bAq` 􋁾xz ˗w}8(>:?ϑ?W=YNꄤsΉOīsgl̓g"#Gm_Y<?pO^W{uL,p=_ij<\$(?G~yQ=[U;\]ܪK %y&Iѩ??N"G z;0IgOY-?co{~'=Ng,=r/^gY^5ȹ׳V2{yގW'eCVf8|:ueOWtl߫9Gw:y+tqr>%?z`'~_\j~'}z_,~?٬~/iځ'~Zs9悔>,s~wݨ?<;w2M ^>8]]ٯs& ;D?Mry2qˉ1YmܜVz7&{5>Oa̾'LV>RZν=*yj=U_1/c[|q>エSOV<-8tyuλfb5ν:uDGzԩ:qX\c:({+[a3xt|^މ^=!?zuǏ3㹃| w!uA^]\Ǣ_}K| ^y7os ꌂ7:gK:沈\qwӬE/bo~U;KKg~ZGG?(G:up3nU9es="ɣpynlډ}?ɋg?TK]Q~yV]&WIz_VKj=IOkݮ-}p>>#gׂz=A|k}<9?8/#}#gI<&TǏ,zEUR/&(L4|?I$ר7>}c|}>+gG6&;~ynv`o9(+}A]/R!w#zTfZ}QW;bЇޏ>-l(ǠYA~Z# pj!;xua{f =yx=u.[މ;A_yonޓM ̾zߕhqgN^=XuhlKősqk1#forecast/data/woolyrnq.rda0000644000176200001440000000067214150370574015366 0ustar liggesusersmK(DasLb!,,쐕Ґ K%R36n)Xb9cf "eaeI$I{L9us]p;R6eK7l3?Ifý]=JifvErJt2˰vC0a׎`l*<]@nB7OJ'}Mxx3^,->+ CwOOa0J\&uyƒrDW ةBYwqz:ϘG%껉KϾط@pϋ"zoeT>&n. #CaN3Bwg9׿N\|9st{qhgٿ ZO D[ɓp5τu5Cn {[Wc8m^cMforecast/data/taylor.rda0000644000176200001440000002402414150370574015003 0ustar liggesusers] l[Y1M FQ(*JLsszsqNT3EEED((E$"wUyy}j_kڱ/z3$y$yYO}GmL.ג)8II2I27^ߗ$cI2"$IzwJԘcgߒ$SNkdIr$I$I+WL۝cӽA>q,UMIr Iސ$$9$92$)?%&$Ǧ$.{r$?$WI{cߢp][d|Gl7^Tǧcmy|[M_J$Y$eǛو9c(7$>w>W|Aq{#~^'[}$Yw%2 (3Iٱ]/X. |-3v<emY^3♖lLiO_$w1[0yuX\?Es}Z;HuoO+`Q$Uz~/~ah"~'ZH78dyb]c/iőQcrX_˗z‰1³E[yӚ괥;yGW n8v/v#zr}ѥ=xY`NȣږIv IS1q99o[ןn-FAkvz#V8yZ耽xtkUgԜG+|,w?n^c;u¸h9du =~@lۄvcvް<[ŃLaO;# vJ SzG8|$d!فUX]P?fhT d jķf"4>~ 64]svak.]эjQw:_nи.ͨb]95qEӨ]IgI8hxϲ^ԠqfwKm׆bgW6kO^*}9NK6guuuƔ?6,e]%ׯ>`?VP=cL0B N }ĩ[V܆A/+Sy/hw Flyp m{#(O >y&bǧ.쩯7bѣ!_%Q}̳v&ҵ :(s828g}M̄8)bkWʁWtR o\7#_~ >8yQcϑq*w>+zP; &Bi 2Yk {2ns1\ӌX,.`D'h-ʨ=5Ö&]hн:lhOwU[kҹgp,Vr-d>maoWh;é,f(@#W ))5`=wj})xWJjm9߅=uŽ=K;}4x e=GI͵LSK\5j Vg&dK( zɂZZEkϊe^ʨ#bi! ,ܵ~ֿ6ܒ^v O>pQJ[} W^A__BvY-:%6,j5T_fqՍY84HXnٜ YG ~r]iʋ ^MOMɖGzݠ+5Ż M↚Ѥe-_WjљH9Ņ=}Ԟ5fȝ)=Ȅn'+L5#u6#wx̆i=6ǫM~Wκt ]6v o[Rل&=nDn7@3~zb-v;qƇ\q2Yq͘l8'c9Dzs;+1]~Jn]ƞn6Ŧ[wb0UjOhCX+q[T͛o8#?u?oҌ,Ȱ%#9q&O+qvbc׺r /Xd{,֨:Zzgxhk5쟈)},>?Cߣ} )ygɌ6 cVΖaxO=:k9hwvg\񼡦6ܨ55\\K .qNYxGنM )O4~ EU^mN_WcaR\ӴlvAk񡏣U: Suu%ku߁ (*] &{xoQ~:XU=T+c̨p솻yUG3> zGK!2FOgc-/VMcl6ncvWebcJG.-~qԴSy jݻ͋浱W |M}iT1n^u[jbqg~,a+#LXu^6GO|X,}z6h>^ѯ]Y9#7j?PKkoz٣+]yP`O؂ѺVuj쉜 /c?lM+|)X^ɫmzqQd:˦<\k‡\ ֎.nV>~{Вu5wu9~]{1>WY~ 4q&`-#!1ʅߊYSe`rnKĝ Q]~"<XyzĹHC{rv7bǽĚx^Czhњc=]ϊZQ|W]_3ϵT?O=ooCl8kϦsVq`Vv9eBb?BҴy=$74a)غ.[Ӣm=ۣ{lsݎleKzeugBgm-Ѷ'[q[(/KbTSrz~CӰAဧƺ5&683oՠf^o;w'qpØk|[\gƽ׭}% '%=2 ,=OIQo>G'`cTL#!ai9 )qN=-"x u9G]~}4i6{C2(VqW}--MԘṸNvԂ]*to6tk٪*Ҥŗ t~-ŧ K\u" 8?=߇ؚ)?Sr9Uk ? 5nԯ_xf_gj>܃]T!<mq[yc$\s++nY뛌5XY}T kV7D_QGTg5=aW}_õ\<³uz*kr)[e16ތ)=ߐaPd]Z9X_םɾ-6l=O}&6ºe_vM-R6.axܧaԯh܂\sޔ|pyz+eN<ޥwp鶿[5ju݁L}[4V^_bK1 ź1yXYa A+|/gtLcxuS̈́`y ]x wi]k:ϜeSZp\?|ϫWhꭚ%_E-UA}5ZtU^]W=:c 04 k9GcBҘ\Lۈ+cDa?;j)<^#j8{hǵpW:58t]ǹbo[/ |wؽ ;֯;zUv/31Z> giLEZ|"WԽb؋.: Ws=ڏ?tzxo/UbSNP{5}%|ZG}iv{o}8&{r5}x{ͮsh k4?HK Np;N+~ ыhF9PnԒpmmr%gp݇'w=1wάX>[[l}P7jՇp/gRzZ6θsFb hJU>jk!~\'.s8Sg;m^l;w][}XѐUҬg(#4rއCO^ol pMN Šz5|X6iS7sڼ ˏ,Ő1ZK7 ߿7V,^XsX{\7=#\d]ܦO}O-zܿ^y,ͫ0-E}ZyyK`}u잚w̿+[[gÓ{jgJDUgcz5H=-u]Kc~n ^4YMqʘwjr]__q&?U+~_ǿ0SuSt¶]oL쫱X V6Q=_JiR006閘+x%qu5כtRM 4.喝ԦZζucMc-z]>{oS = #禬RHEl,][6=Ssc}P|5]j\ @//K8 5A~X.>=7oca #cLflL/0 W?^qVS,Ŧaý= =E{6\wD[|]m/z^襜]᥵exwsct Nn.X]W307J>xQ#S7k~6~]{"p¸FrݥLLOi11.DO}Omr:`o+ K۽g kGvNxiqiTw\U uud  q[_xVI5bZyM>4nN\#3lGWĞ(OcMcūbh҆Mm>/>^qCqŦZSsL}Q&>*9ڛH|3׍Ye>O4tFvT>=1—EN\.M\>ZGqͮ6]jY5 q]YGClĵgΆbgw*WGޠKo?^p)ׇ;c%>1&Y Jjw7^mh>Ægz6x.=k\_>s+zLz!sa+9J( h؟É1Z8)BkjY&>u%p[W?cэЗVCzi}.K4BoxC}=-R]dwYSPGfԋ }wceSz! ҼA|o 'ox k'oÖzX3$9;K|(Z{ӷ~JDx~YTpb]X יٙg kx]=0=D-1FVOnzW|SWh_X>.JU V`|rn/lwIixJwںa&R4$CO)uhpmG6`@m kF9輬3LO#}:?:XOOOQW=yv$|?=e\O Oē!~( >E=ئi%-qoӮv2NU޵KwMŶXᯡ>׌}es8g+x95ǾqhFGyϰ"gs|eC϶Qp cM5%'-ý_h?;#&u~Þ/0֨5_LOrf_<ЫlEMz?C3&:!#g,,#.]iz qkx?iܖkޖ5zP \;=iߴ]D-dzM-;ϼ< ~߼d4c>֋Y.[ϋO|k%6½ [-k[}mW67~0d]xZQ ~'RCiq|j3\9;[zXinΫys9sF;K7N}s|[&C[;Zo}g!֧Ozm|l6exз7z">~͎+=X;θ'qg2 ڷj$M=O!g,0{p_fNN1>;>~w};ZѣvӃޣw_m=-jiMjwӜ X񾺻 k{|͉WqO2֋t~d)>o~O'ʘggUrX.77~EO~ǹ_2lXY߳F6\Z7|[q[/SX~_l5>iuNLs~K~'czN86=htG0CWBj{ ̵(0yu.~1+ I7po>ss5&OXYW#i>oG7p.M.yS7 %mG_՟ |44a?forecast/data/gas.rda0000644000176200001440000000273014150370574014243 0ustar liggesusers] Le s9lZ:ԥ9OfZs谴IeehX$f͚3MA8r f^PAS΁'׬o} {~wqq1111"G#6[O{IɑnͶ]Gb~l;O3{ w' { s_Cv scUUO0cpRcxsM0Ye13'Ƕ3&Gs0630NgA‚[iəgɌ7딝ſь`=sE)x?_h摻D{U'ˏ5g9!,zWA}~c_<M3󱧘{3}?i?+Vr;VX\X9+?&qZֹ̯7<4a߱>3Yy>Ч}u6KtK+ϢL|yo7l:N4"h?`ai:7ŗ/W)80TI>;ExzȜlHx?~sqZUjGI5̗®L8{R')>_}a=!>Oyr߸Hny(|'8uPzDnd+✸({ߒ!xޱһӅܓ /{O<O`? >3x\ ]?UtɡRoV<$&1ke$>.ȽѱCm #uK /WdgS?v:_oaQ|%[C4Os΂۵9ǜ=|'p߄ ?'?w!"vue=nA׏M_[[:)?e.î:\pforecast/data/wineind.rda0000644000176200001440000000127014150370574015124 0ustar liggesusersmKHTaǯwHܴUAfD բ QZDP,jуR)Qv-UÜqfq#A[T?.;}MjKJ 0 Bxb]thGy~5z=V՚%١FF`_ s6ȎGܬ:DV~x[Kow]z A$9t#̰p]s58X]򧛄N01Bw.-][}R]Cv:J3pramE :s`U+X#U/^mO=T|X~̛]1]^O}7%;/?:uEj~Wf+L_κ#j/3=}=a0F1ot@FsnΕl_4Z񟑝N>s#ܑD^'xjfq\/[ ?:ȷt'Tg)cBP4o =y׳˃}UhcURtx{wSϠٓ˽(forecast/man/0000755000176200001440000000000014456202551012637 5ustar liggesusersforecast/man/forecast.lm.Rd0000644000176200001440000000662314150370574015354 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lm.R \name{forecast.lm} \alias{forecast.lm} \title{Forecast a linear model with possible time series components} \usage{ \method{forecast}{lm}( object, newdata, h = 10, level = c(80, 95), fan = FALSE, lambda = object$lambda, biasadj = NULL, ts = TRUE, ... ) } \arguments{ \item{object}{Object of class "lm", usually the result of a call to \code{\link[stats]{lm}} or \code{\link{tslm}}.} \item{newdata}{An optional data frame in which to look for variables with which to predict. If omitted, it is assumed that the only variables are trend and season, and \code{h} forecasts are produced.} \item{h}{Number of periods for forecasting. Ignored if \code{newdata} present.} \item{level}{Confidence level for prediction intervals.} \item{fan}{If \code{TRUE}, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{ts}{If \code{TRUE}, the forecasts will be treated as time series provided the original data is a time series; the \code{newdata} will be interpreted as related to the subsequent time periods. If \code{FALSE}, any time series attributes of the original data will be ignored.} \item{...}{Other arguments passed to \code{\link[stats]{predict.lm}()}.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{forecast.lm}. An object of class \code{"forecast"} is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The historical data for the response variable.} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values} } \description{ \code{forecast.lm} is used to predict linear models, especially those involving trend and seasonality components. } \details{ \code{forecast.lm} is largely a wrapper for \code{\link[stats]{predict.lm}()} except that it allows variables "trend" and "season" which are created on the fly from the time series characteristics of the data. Also, the output is reformatted into a \code{forecast} object. } \examples{ y <- ts(rnorm(120,0,3) + 1:120 + 20*sin(2*pi*(1:120)/12), frequency=12) fit <- tslm(y ~ trend + season) plot(forecast(fit, h=20)) } \seealso{ \code{\link{tslm}}, \code{\link[stats]{lm}}. } \author{ Rob J Hyndman } \keyword{stats} forecast/man/ggmonthplot.Rd0000644000176200001440000000222614150370574015474 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R \name{ggmonthplot} \alias{ggmonthplot} \alias{ggsubseriesplot} \title{Create a seasonal subseries ggplot} \usage{ ggmonthplot(x, labels = NULL, times = time(x), phase = cycle(x), ...) ggsubseriesplot(x, labels = NULL, times = time(x), phase = cycle(x), ...) } \arguments{ \item{x}{a time series object (type \code{ts}).} \item{labels}{A vector of labels to use for each 'season'} \item{times}{A vector of times for each observation} \item{phase}{A vector of seasonal components} \item{\dots}{Not used (for consistency with monthplot)} } \value{ Returns an object of class \code{ggplot}. } \description{ Plots a subseries plot using ggplot. Each season is plotted as a separate mini time series. The blue lines represent the mean of the observations within each season. } \details{ The \code{ggmonthplot} function is simply a wrapper for \code{ggsubseriesplot} as a convenience for users familiar with \code{\link[stats]{monthplot}}. } \examples{ ggsubseriesplot(AirPassengers) ggsubseriesplot(woolyrnq) } \seealso{ \code{\link[stats]{monthplot}} } \author{ Mitchell O'Hara-Wild } forecast/man/bizdays.Rd0000644000176200001440000000150114272665773014607 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calendar.R \name{bizdays} \alias{bizdays} \title{Number of trading days in each season} \usage{ bizdays(x, FinCenter = c("New York", "London", "NERC", "Toronto", "Zurich")) } \arguments{ \item{x}{Monthly or quarterly time series} \item{FinCenter}{Major financial center.} } \value{ Time series } \description{ Returns number of trading days in each month or quarter of the observed time period in a major financial center. } \details{ Useful for trading days length adjustments. More on how to define "business days", please refer to \code{\link[timeDate]{isBizday}}. } \examples{ x <- ts(rnorm(30), start = c(2013, 2), frequency = 12) bizdays(x, FinCenter = "New York") } \seealso{ \code{\link[forecast]{monthdays}} } \author{ Earo Wang } \keyword{ts} forecast/man/forecast.bats.Rd0000644000176200001440000000527514150370574015677 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecastBATS.R, R/forecastTBATS.R \name{forecast.bats} \alias{forecast.bats} \alias{forecast.tbats} \title{Forecasting using BATS and TBATS models} \usage{ \method{forecast}{bats}(object, h, level = c(80, 95), fan = FALSE, biasadj = NULL, ...) \method{forecast}{tbats}(object, h, level = c(80, 95), fan = FALSE, biasadj = NULL, ...) } \arguments{ \item{object}{An object of class "\code{bats}". Usually the result of a call to \code{\link{bats}}.} \item{h}{Number of periods for forecasting. Default value is twice the largest seasonal period (for seasonal data) or ten (for non-seasonal data).} \item{level}{Confidence level for prediction intervals.} \item{fan}{If TRUE, level is set to \code{seq(51,99,by=3)}. This is suitable for fan plots.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If TRUE, point forecasts and fitted values are mean forecast. Otherwise, these points can be considered the median of the forecast densities.} \item{...}{Other arguments, currently ignored.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{forecast.bats}. An object of class \code{"forecast"} is a list containing at least the following elements: \item{model}{A copy of the \code{bats} object} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ Forecasts \code{h} steps ahead with a BATS model. Prediction intervals are also produced. } \examples{ \dontrun{ fit <- bats(USAccDeaths) plot(forecast(fit)) taylor.fit <- bats(taylor) plot(forecast(taylor.fit)) } } \references{ De Livera, A.M., Hyndman, R.J., & Snyder, R. D. (2011), Forecasting time series with complex seasonal patterns using exponential smoothing, \emph{Journal of the American Statistical Association}, \bold{106}(496), 1513-1527. } \seealso{ \code{\link{bats}}, \code{\link{tbats}},\code{\link{forecast.ets}}. } \author{ Slava Razbash and Rob J Hyndman } \keyword{ts} forecast/man/forecast.HoltWinters.Rd0000644000176200001440000000557414150370574017232 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecast2.R \name{forecast.HoltWinters} \alias{forecast.HoltWinters} \title{Forecasting using Holt-Winters objects} \usage{ \method{forecast}{HoltWinters}( object, h = ifelse(frequency(object$x) > 1, 2 * frequency(object$x), 10), level = c(80, 95), fan = FALSE, lambda = NULL, biasadj = NULL, ... ) } \arguments{ \item{object}{An object of class "\code{HoltWinters}". Usually the result of a call to \code{\link[stats]{HoltWinters}}.} \item{h}{Number of periods for forecasting} \item{level}{Confidence level for prediction intervals.} \item{fan}{If TRUE, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{...}{Other arguments.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{forecast.HoltWinters}. An object of class \code{"forecast"} is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ Returns forecasts and other information for univariate Holt-Winters time series models. } \details{ This function calls \code{\link[stats]{predict.HoltWinters}} and constructs an object of class "\code{forecast}" from the results. It is included for completeness, but the \code{\link{ets}} is recommended for use instead of \code{\link[stats]{HoltWinters}}. } \examples{ fit <- HoltWinters(WWWusage,gamma=FALSE) plot(forecast(fit)) } \seealso{ \code{\link[stats]{predict.HoltWinters}}, \code{\link[stats]{HoltWinters}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/bats.Rd0000644000176200001440000000713214150370574014064 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bats.R \name{bats} \alias{bats} \alias{as.character.bats} \alias{print.bats} \title{BATS model (Exponential smoothing state space model with Box-Cox transformation, ARMA errors, Trend and Seasonal components)} \usage{ bats( y, use.box.cox = NULL, use.trend = NULL, use.damped.trend = NULL, seasonal.periods = NULL, use.arma.errors = TRUE, use.parallel = length(y) > 1000, num.cores = 2, bc.lower = 0, bc.upper = 1, biasadj = FALSE, model = NULL, ... ) } \arguments{ \item{y}{The time series to be forecast. Can be \code{numeric}, \code{msts} or \code{ts}. Only univariate time series are supported.} \item{use.box.cox}{\code{TRUE/FALSE} indicates whether to use the Box-Cox transformation or not. If \code{NULL} then both are tried and the best fit is selected by AIC.} \item{use.trend}{\code{TRUE/FALSE} indicates whether to include a trend or not. If \code{NULL} then both are tried and the best fit is selected by AIC.} \item{use.damped.trend}{\code{TRUE/FALSE} indicates whether to include a damping parameter in the trend or not. If \code{NULL} then both are tried and the best fit is selected by AIC.} \item{seasonal.periods}{If \code{y} is a numeric then seasonal periods can be specified with this parameter.} \item{use.arma.errors}{\code{TRUE/FALSE} indicates whether to include ARMA errors or not. If \code{TRUE} the best fit is selected by AIC. If \code{FALSE} then the selection algorithm does not consider ARMA errors.} \item{use.parallel}{\code{TRUE/FALSE} indicates whether or not to use parallel processing.} \item{num.cores}{The number of parallel processes to be used if using parallel processing. If \code{NULL} then the number of logical cores is detected and all available cores are used.} \item{bc.lower}{The lower limit (inclusive) for the Box-Cox transformation.} \item{bc.upper}{The upper limit (inclusive) for the Box-Cox transformation.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If TRUE, point forecasts and fitted values are mean forecast. Otherwise, these points can be considered the median of the forecast densities.} \item{model}{Output from a previous call to \code{bats}. If model is passed, this same model is fitted to \code{y} without re-estimating any parameters.} \item{...}{Additional arguments to be passed to \code{auto.arima} when choose an ARMA(p, q) model for the errors. (Note that xreg will be ignored, as will any arguments concerning seasonality and differencing, but arguments controlling the values of p and q will be used.)} } \value{ An object of class "\code{bats}". The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{bats} and associated functions. The fitted model is designated BATS(omega, p,q, phi, m1,...mJ) where omega is the Box-Cox parameter and phi is the damping parameter; the error is modelled as an ARMA(p,q) process and m1,...,mJ list the seasonal periods used in the model. } \description{ Fits a BATS model applied to \code{y}, as described in De Livera, Hyndman & Snyder (2011). Parallel processing is used by default to speed up the computations. } \examples{ \dontrun{ fit <- bats(USAccDeaths) plot(forecast(fit)) taylor.fit <- bats(taylor) plot(forecast(taylor.fit)) } } \references{ De Livera, A.M., Hyndman, R.J., & Snyder, R. D. (2011), Forecasting time series with complex seasonal patterns using exponential smoothing, \emph{Journal of the American Statistical Association}, \bold{106}(496), 1513-1527. } \author{ Slava Razbash and Rob J Hyndman } \keyword{ts} forecast/man/easter.Rd0000644000176200001440000000136614150370574014421 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calendar.R \name{easter} \alias{easter} \title{Easter holidays in each season} \usage{ easter(x, easter.mon = FALSE) } \arguments{ \item{x}{Monthly or quarterly time series} \item{easter.mon}{If TRUE, the length of Easter holidays includes Easter Monday.} } \value{ Time series } \description{ Returns a vector of 0's and 1's or fractional results if Easter spans March and April in the observed time period. Easter is defined as the days from Good Friday to Easter Sunday inclusively, plus optionally Easter Monday if \code{easter.mon=TRUE}. } \details{ Useful for adjusting calendar effects. } \examples{ easter(wineind, easter.mon = TRUE) } \author{ Earo Wang } \keyword{ts} forecast/man/woolyrnq.Rd0000644000176200001440000000072514150370574015026 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{woolyrnq} \alias{woolyrnq} \title{Quarterly production of woollen yarn in Australia} \format{ Time series data } \source{ Time Series Data Library. \url{https://pkg.yangzhuoranyang.com/tsdl/} } \usage{ woolyrnq } \description{ Quarterly production of woollen yarn in Australia: tonnes. Mar 1965 -- Sep 1994. } \examples{ tsdisplay(woolyrnq) } \keyword{datasets} forecast/man/plot.bats.Rd0000644000176200001440000000244714150370574015045 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bats.R, R/ggplot.R, R/tbats.R \name{plot.bats} \alias{plot.bats} \alias{autoplot.tbats} \alias{autoplot.bats} \alias{plot.tbats} \title{Plot components from BATS model} \usage{ \method{plot}{bats}(x, main = "Decomposition by BATS model", ...) \method{autoplot}{tbats}(object, range.bars = FALSE, ...) \method{autoplot}{bats}(object, range.bars = FALSE, ...) \method{plot}{tbats}(x, main = "Decomposition by TBATS model", ...) } \arguments{ \item{x}{Object of class \dQuote{bats/tbats}.} \item{main}{Main title for plot.} \item{...}{Other plotting parameters passed to \code{\link[graphics]{par}}.} \item{object}{Object of class \dQuote{bats/tbats}.} \item{range.bars}{Logical indicating if each plot should have a bar at its right side representing relative size. If NULL, automatic selection takes place.} } \value{ None. Function produces a plot } \description{ Produces a plot of the level, slope and seasonal components from a BATS or TBATS model. The plotted components are Box-Cox transformed using the estimated transformation parameter. } \examples{ \dontrun{ fit <- tbats(USAccDeaths) plot(fit) autoplot(fit, range.bars = TRUE)} } \seealso{ \code{\link{bats}},\code{\link{tbats}} } \author{ Rob J Hyndman } \keyword{hplot} forecast/man/na.interp.Rd0000644000176200001440000000217514150370574015033 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clean.R \name{na.interp} \alias{na.interp} \title{Interpolate missing values in a time series} \usage{ na.interp( x, lambda = NULL, linear = (frequency(x) <= 1 | sum(!is.na(x)) <= 2 * frequency(x)) ) } \arguments{ \item{x}{time series} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{linear}{Should a linear interpolation be used.} } \value{ Time series } \description{ By default, uses linear interpolation for non-seasonal series. For seasonal series, a robust STL decomposition is first computed. Then a linear interpolation is applied to the seasonally adjusted data, and the seasonal component is added back. } \details{ A more general and flexible approach is available using \code{na.approx} in the \code{zoo} package. } \examples{ data(gold) plot(na.interp(gold)) } \seealso{ \code{\link[forecast]{tsoutliers}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/is.forecast.Rd0000644000176200001440000000066314150370574015355 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecast.R, R/mforecast.R, R/spline.R \name{is.forecast} \alias{is.forecast} \alias{is.mforecast} \alias{is.splineforecast} \title{Is an object a particular forecast type?} \usage{ is.forecast(x) is.mforecast(x) is.splineforecast(x) } \arguments{ \item{x}{object to be tested} } \description{ Returns true if the forecast object is of a particular type } forecast/man/tsoutliers.Rd0000644000176200001440000000215514207263356015352 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clean.R \name{tsoutliers} \alias{tsoutliers} \title{Identify and replace outliers in a time series} \usage{ tsoutliers(x, iterate = 2, lambda = NULL) } \arguments{ \item{x}{time series} \item{iterate}{the number of iterations required} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} } \value{ \item{index}{Indicating the index of outlier(s)} \item{replacement}{Suggested numeric values to replace identified outliers} } \description{ Uses supsmu for non-seasonal series and a periodic stl decomposition with seasonal series to identify outliers and estimate their replacements. } \examples{ data(gold) tsoutliers(gold) } \references{ Hyndman (2021) "Detecting time series outliers" \url{https://robjhyndman.com/hyndsight/tsoutliers/}. } \seealso{ \code{\link[forecast]{na.interp}}, \code{\link[forecast]{tsclean}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/simulate.ets.Rd0000644000176200001440000001023514207263356015550 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate.R, R/simulate_tbats.R \name{simulate.ets} \alias{simulate.ets} \alias{simulate.Arima} \alias{simulate.ar} \alias{simulate.lagwalk} \alias{simulate.fracdiff} \alias{simulate.nnetar} \alias{simulate.modelAR} \alias{simulate.tbats} \title{Simulation from a time series model} \usage{ \method{simulate}{ets}( object, nsim = length(object$x), seed = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, ... ) \method{simulate}{Arima}( object, nsim = length(object$x), seed = NULL, xreg = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, lambda = object$lambda, ... ) \method{simulate}{ar}( object, nsim = object$n.used, seed = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, ... ) \method{simulate}{lagwalk}( object, nsim = length(object$x), seed = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, lambda = object$lambda, ... ) \method{simulate}{fracdiff}( object, nsim = object$n, seed = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, ... ) \method{simulate}{nnetar}( object, nsim = length(object$x), seed = NULL, xreg = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, lambda = object$lambda, ... ) \method{simulate}{modelAR}( object, nsim = length(object$x), seed = NULL, xreg = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, lambda = object$lambda, ... ) \method{simulate}{tbats}( object, nsim = length(object$y), seed = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, ... ) } \arguments{ \item{object}{An object of class "\code{ets}", "\code{Arima}", "\code{ar}" or "\code{nnetar}".} \item{nsim}{Number of periods for the simulated series. Ignored if either \code{xreg} or \code{innov} are not \code{NULL}. Otherwise the default is the length of series used to train model (or 100 if no data found).} \item{seed}{Either \code{NULL} or an integer that will be used in a call to \code{\link[base]{set.seed}} before simulating the time series. The default, \code{NULL}, will not change the random generator state.} \item{future}{Produce sample paths that are future to and conditional on the data in \code{object}. Otherwise simulate unconditionally.} \item{bootstrap}{Do simulation using resampled errors rather than normally distributed errors or errors provided as \code{innov}.} \item{innov}{A vector of innovations to use as the error series. Ignored if \code{bootstrap==TRUE}. If not \code{NULL}, the value of \code{nsim} is set to length of \code{innov}.} \item{...}{Other arguments, not currently used.} \item{xreg}{New values of \code{xreg} to be used for forecasting. The value of \code{nsim} is set to the number of rows of \code{xreg} if it is not \code{NULL}.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} } \value{ An object of class "\code{ts}". } \description{ Returns a time series based on the model object \code{object}. } \details{ With \code{simulate.Arima()}, the \code{object} should be produced by \code{\link{Arima}} or \code{\link{auto.arima}}, rather than \code{\link[stats]{arima}}. By default, the error series is assumed normally distributed and generated using \code{\link[stats]{rnorm}}. If \code{innov} is present, it is used instead. If \code{bootstrap=TRUE} and \code{innov=NULL}, the residuals are resampled instead. When \code{future=TRUE}, the sample paths are conditional on the data. When \code{future=FALSE} and the model is stationary, the sample paths do not depend on the data at all. When \code{future=FALSE} and the model is non-stationary, the location of the sample paths is arbitrary, so they all start at the value of the first observation. } \examples{ fit <- ets(USAccDeaths) plot(USAccDeaths, xlim = c(1973, 1982)) lines(simulate(fit, 36), col = "red") } \seealso{ \code{\link{ets}}, \code{\link{Arima}}, \code{\link{auto.arima}}, \code{\link{ar}}, \code{\link{arfima}}, \code{\link{nnetar}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/wineind.Rd0000644000176200001440000000070214150370574014564 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{wineind} \alias{wineind} \title{Australian total wine sales} \format{ Time series data } \source{ Time Series Data Library. \url{https://pkg.yangzhuoranyang.com/tsdl/} } \usage{ wineind } \description{ Australian total wine sales by wine makers in bottles <= 1 litre. Jan 1980 -- Aug 1994. } \examples{ tsdisplay(wineind) } \keyword{datasets} forecast/man/autoplot.ts.Rd0000644000176200001440000000422114150370574015423 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R \name{autolayer.mts} \alias{autolayer.mts} \alias{autolayer.msts} \alias{autolayer.ts} \alias{autoplot.ts} \alias{autoplot.mts} \alias{autoplot.msts} \alias{fortify.ts} \title{Automatically create a ggplot for time series objects} \usage{ \method{autolayer}{mts}(object, colour = TRUE, series = NULL, ...) \method{autolayer}{msts}(object, series = NULL, ...) \method{autolayer}{ts}(object, colour = TRUE, series = NULL, ...) \method{autoplot}{ts}( object, series = NULL, xlab = "Time", ylab = deparse(substitute(object)), main = NULL, ... ) \method{autoplot}{mts}( object, colour = TRUE, facets = FALSE, xlab = "Time", ylab = deparse(substitute(object)), main = NULL, ... ) \method{autoplot}{msts}(object, ...) \method{fortify}{ts}(model, data, ...) } \arguments{ \item{object}{Object of class \dQuote{\code{ts}} or \dQuote{\code{mts}}.} \item{colour}{If TRUE, the time series will be assigned a colour aesthetic} \item{series}{Identifies the time series with a colour, which integrates well with the functionality of \link{geom_forecast}.} \item{...}{Other plotting parameters to affect the plot.} \item{xlab}{X-axis label.} \item{ylab}{Y-axis label.} \item{main}{Main title.} \item{facets}{If TRUE, multiple time series will be faceted (and unless specified, colour is set to FALSE). If FALSE, each series will be assigned a colour.} \item{model}{Object of class \dQuote{\code{ts}} to be converted to \dQuote{\code{data.frame}}.} \item{data}{Not used (required for \link{fortify} method)} } \value{ None. Function produces a ggplot graph. } \description{ \code{autoplot} takes an object of type \code{ts} or \code{mts} and creates a ggplot object suitable for usage with \code{stat_forecast}. } \details{ \code{fortify.ts} takes a \code{ts} object and converts it into a data frame (for usage with ggplot2). } \examples{ library(ggplot2) autoplot(USAccDeaths) lungDeaths <- cbind(mdeaths, fdeaths) autoplot(lungDeaths) autoplot(lungDeaths, facets=TRUE) } \seealso{ \code{\link[stats]{plot.ts}}, \code{\link[ggplot2]{fortify}} } \author{ Mitchell O'Hara-Wild } forecast/man/plot.mforecast.Rd0000644000176200001440000000423414150370574016073 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R, R/mforecast.R \name{autoplot.mforecast} \alias{autoplot.mforecast} \alias{autolayer.mforecast} \alias{plot.mforecast} \title{Multivariate forecast plot} \usage{ \method{autoplot}{mforecast}(object, PI = TRUE, facets = TRUE, colour = FALSE, ...) \method{autolayer}{mforecast}(object, series = NULL, PI = TRUE, ...) \method{plot}{mforecast}(x, main = paste("Forecasts from", unique(x$method)), xlab = "time", ...) } \arguments{ \item{object}{Multivariate forecast object of class \code{mforecast}. Used for ggplot graphics (S3 method consistency).} \item{PI}{If \code{FALSE}, confidence intervals will not be plotted, giving only the forecast line.} \item{facets}{If TRUE, multiple time series will be faceted. If FALSE, each series will be assigned a colour.} \item{colour}{If TRUE, the time series will be assigned a colour aesthetic} \item{\dots}{additional arguments to each individual \code{plot}.} \item{series}{Matches an unidentified forecast layer with a coloured object on the plot.} \item{x}{Multivariate forecast object of class \code{mforecast}.} \item{main}{Main title. Default is the forecast method. For autoplot, specify a vector of titles for each plot.} \item{xlab}{X-axis label. For autoplot, specify a vector of labels for each plot.} } \description{ Plots historical data with multivariate forecasts and prediction intervals. } \details{ \code{autoplot} will produce an equivalent plot as a ggplot object. } \examples{ library(ggplot2) lungDeaths <- cbind(mdeaths, fdeaths) fit <- tslm(lungDeaths ~ trend + season) fcast <- forecast(fit, h=10) plot(fcast) autoplot(fcast) carPower <- as.matrix(mtcars[,c("qsec","hp")]) carmpg <- mtcars[,"mpg"] fit <- lm(carPower ~ carmpg) fcast <- forecast(fit, newdata=data.frame(carmpg=30)) plot(fcast, xlab="Year") autoplot(fcast, xlab=rep("Year",2)) } \references{ Hyndman and Athanasopoulos (2018) \emph{Forecasting: principles and practice}, 2nd edition, OTexts: Melbourne, Australia. \url{https://otexts.com/fpp2/} } \seealso{ \code{\link[forecast]{plot.forecast}}, \code{\link[stats]{plot.ts}} } \author{ Mitchell O'Hara-Wild } \keyword{ts} forecast/man/splinef.Rd0000644000176200001440000000672414150370574014601 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spline.R \name{splinef} \alias{splinef} \title{Cubic Spline Forecast} \usage{ splinef( y, h = 10, level = c(80, 95), fan = FALSE, lambda = NULL, biasadj = FALSE, method = c("gcv", "mle"), x = y ) } \arguments{ \item{y}{a numeric vector or time series of class \code{ts}} \item{h}{Number of periods for forecasting} \item{level}{Confidence level for prediction intervals.} \item{fan}{If TRUE, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{method}{Method for selecting the smoothing parameter. If \code{method="gcv"}, the generalized cross-validation method from \code{\link[stats]{smooth.spline}} is used. If \code{method="mle"}, the maximum likelihood method from Hyndman et al (2002) is used.} \item{x}{Deprecated. Included for backwards compatibility.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{splinef}. An object of class \code{"forecast"} containing the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{onestepf}{One-step forecasts from the fitted model.} \item{fitted}{Smooth estimates of the fitted trend using all data.} \item{residuals}{Residuals from the fitted model. That is x minus one-step forecasts.} } \description{ Returns local linear forecasts and prediction intervals using cubic smoothing splines. } \details{ The cubic smoothing spline model is equivalent to an ARIMA(0,2,2) model but with a restricted parameter space. The advantage of the spline model over the full ARIMA model is that it provides a smooth historical trend as well as a linear forecast function. Hyndman, King, Pitrun, and Billah (2002) show that the forecast performance of the method is hardly affected by the restricted parameter space. } \examples{ fcast <- splinef(uspop,h=5) plot(fcast) summary(fcast) } \references{ Hyndman, King, Pitrun and Billah (2005) Local linear forecasts using cubic smoothing splines. \emph{Australian and New Zealand Journal of Statistics}, \bold{47}(1), 87-99. \url{https://robjhyndman.com/publications/splinefcast/}. } \seealso{ \code{\link[stats]{smooth.spline}}, \code{\link[stats]{arima}}, \code{\link{holt}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/forecast.StructTS.Rd0000644000176200001440000000531514150370574016474 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecast2.R \name{forecast.StructTS} \alias{forecast.StructTS} \title{Forecasting using Structural Time Series models} \usage{ \method{forecast}{StructTS}( object, h = ifelse(object$coef["epsilon"] > 1e-10, 2 * object$xtsp[3], 10), level = c(80, 95), fan = FALSE, lambda = NULL, biasadj = NULL, ... ) } \arguments{ \item{object}{An object of class "\code{StructTS}". Usually the result of a call to \code{\link[stats]{StructTS}}.} \item{h}{Number of periods for forecasting} \item{level}{Confidence level for prediction intervals.} \item{fan}{If TRUE, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{...}{Other arguments.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{forecast.StructTS}. An object of class \code{"forecast"} is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ Returns forecasts and other information for univariate structural time series models. } \details{ This function calls \code{predict.StructTS} and constructs an object of class "\code{forecast}" from the results. } \examples{ fit <- StructTS(WWWusage,"level") plot(forecast(fit)) } \seealso{ \code{\link[stats]{StructTS}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/tsCV.Rd0000644000176200001440000000515614150370574014016 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tscv.R \name{tsCV} \alias{tsCV} \title{Time series cross-validation} \usage{ tsCV(y, forecastfunction, h = 1, window = NULL, xreg = NULL, initial = 0, ...) } \arguments{ \item{y}{Univariate time series} \item{forecastfunction}{Function to return an object of class \code{forecast}. Its first argument must be a univariate time series, and it must have an argument \code{h} for the forecast horizon. If exogenous predictors are used, then it must also have \code{xreg} and \code{newxreg} arguments corresponding to the training and test periods.} \item{h}{Forecast horizon} \item{window}{Length of the rolling window, if NULL, a rolling window will not be used.} \item{xreg}{Exogeneous predictor variables passed to the forecast function if required.} \item{initial}{Initial period of the time series where no cross-validation is performed.} \item{...}{Other arguments are passed to \code{forecastfunction}.} } \value{ Numerical time series object containing the forecast errors as a vector (if h=1) and a matrix otherwise. The time index corresponds to the last period of the training data. The columns correspond to the forecast horizons. } \description{ \code{tsCV} computes the forecast errors obtained by applying \code{forecastfunction} to subsets of the time series \code{y} using a rolling forecast origin. } \details{ Let \code{y} contain the time series \eqn{y_1,\dots,y_T}{y[1:T]}. Then \code{forecastfunction} is applied successively to the time series \eqn{y_1,\dots,y_t}{y[1:t]}, for \eqn{t=1,\dots,T-h}, making predictions \eqn{\hat{y}_{t+h|t}}{f[t+h]}. The errors are given by \eqn{e_{t+h} = y_{t+h}-\hat{y}_{t+h|t}}{e[t+h] = y[t+h]-f[t+h]}. If h=1, these are returned as a vector, \eqn{e_1,\dots,e_T}{e[1:T]}. For h>1, they are returned as a matrix with the hth column containing errors for forecast horizon h. The first few errors may be missing as it may not be possible to apply \code{forecastfunction} to very short time series. } \examples{ #Fit an AR(2) model to each rolling origin subset far2 <- function(x, h){forecast(Arima(x, order=c(2,0,0)), h=h)} e <- tsCV(lynx, far2, h=1) #Fit the same model with a rolling window of length 30 e <- tsCV(lynx, far2, h=1, window=30) #Example with exogenous predictors far2_xreg <- function(x, h, xreg, newxreg) { forecast(Arima(x, order=c(2,0,0), xreg=xreg), xreg=newxreg) } y <- ts(rnorm(50)) xreg <- matrix(rnorm(100),ncol=2) e <- tsCV(y, far2_xreg, h=3, xreg=xreg) } \seealso{ \link{CV}, \link{CVar}, \link{residuals.Arima}, \url{https://robjhyndman.com/hyndsight/tscv/}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/plot.Arima.Rd0000644000176200001440000000317514150370574015144 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/armaroots.R, R/ggplot.R \name{plot.Arima} \alias{plot.Arima} \alias{plot.ar} \alias{autoplot.Arima} \alias{autoplot.ar} \title{Plot characteristic roots from ARIMA model} \usage{ \method{plot}{Arima}( x, type = c("both", "ar", "ma"), main, xlab = "Real", ylab = "Imaginary", ... ) \method{plot}{ar}(x, main, xlab = "Real", ylab = "Imaginary", ...) \method{autoplot}{Arima}(object, type = c("both", "ar", "ma"), ...) \method{autoplot}{ar}(object, ...) } \arguments{ \item{x}{Object of class \dQuote{Arima} or \dQuote{ar}.} \item{type}{Determines if both AR and MA roots are plotted, of if just one set is plotted.} \item{main}{Main title. Default is "Inverse AR roots" or "Inverse MA roots".} \item{xlab}{X-axis label.} \item{ylab}{Y-axis label.} \item{...}{Other plotting parameters passed to \code{\link[graphics]{par}}.} \item{object}{Object of class \dQuote{Arima} or \dQuote{ar}. Used for ggplot graphics (S3 method consistency).} } \value{ None. Function produces a plot } \description{ Produces a plot of the inverse AR and MA roots of an ARIMA model. Inverse roots outside the unit circle are shown in red. } \details{ \code{autoplot} will produce an equivalent plot as a ggplot object. } \examples{ library(ggplot2) fit <- Arima(WWWusage, order = c(3, 1, 0)) plot(fit) autoplot(fit) fit <- Arima(woolyrnq, order = c(2, 0, 0), seasonal = c(2, 1, 1)) plot(fit) autoplot(fit) plot(ar.ols(gold[1:61])) autoplot(ar.ols(gold[1:61])) } \seealso{ \code{\link{Arima}}, \code{\link[stats]{ar}} } \author{ Rob J Hyndman & Mitchell O'Hara-Wild } \keyword{hplot} forecast/man/fourier.Rd0000644000176200001440000000442314150370574014606 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/season.R \name{fourier} \alias{fourier} \alias{fourierf} \title{Fourier terms for modelling seasonality} \usage{ fourier(x, K, h = NULL) fourierf(x, K, h) } \arguments{ \item{x}{Seasonal time series: a \code{ts} or a \code{msts} object} \item{K}{Maximum order(s) of Fourier terms} \item{h}{Number of periods ahead to forecast (optional)} } \value{ Numerical matrix. } \description{ \code{fourier} returns a matrix containing terms from a Fourier series, up to order \code{K}, suitable for use in \code{\link{Arima}}, \code{\link{auto.arima}}, or \code{\link{tslm}}. } \details{ \code{fourierf} is deprecated, instead use the \code{h} argument in \code{fourier}. The period of the Fourier terms is determined from the time series characteristics of \code{x}. When \code{h} is missing, the length of \code{x} also determines the number of rows for the matrix returned by \code{fourier}. Otherwise, the value of \code{h} determines the number of rows for the matrix returned by \code{fourier}, typically used for forecasting. The values within \code{x} are not used. Typical use would omit \code{h} when generating Fourier terms for training a model and include \code{h} when generating Fourier terms for forecasting. When \code{x} is a \code{ts} object, the value of \code{K} should be an integer and specifies the number of sine and cosine terms to return. Thus, the matrix returned has \code{2*K} columns. When \code{x} is a \code{msts} object, then \code{K} should be a vector of integers specifying the number of sine and cosine terms for each of the seasonal periods. Then the matrix returned will have \code{2*sum(K)} columns. } \examples{ library(ggplot2) # Using Fourier series for a "ts" object # K is chosen to minimize the AICc deaths.model <- auto.arima(USAccDeaths, xreg=fourier(USAccDeaths,K=5), seasonal=FALSE) deaths.fcast <- forecast(deaths.model, xreg=fourier(USAccDeaths, K=5, h=36)) autoplot(deaths.fcast) + xlab("Year") # Using Fourier series for a "msts" object taylor.lm <- tslm(taylor ~ fourier(taylor, K = c(3, 3))) taylor.fcast <- forecast(taylor.lm, data.frame(fourier(taylor, K = c(3, 3), h = 270))) autoplot(taylor.fcast) } \seealso{ \code{\link{seasonaldummy}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/getResponse.Rd0000644000176200001440000000261014150370574015425 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getResponse.R \name{getResponse} \alias{getResponse} \alias{getResponse.default} \alias{getResponse.lm} \alias{getResponse.Arima} \alias{getResponse.fracdiff} \alias{getResponse.ar} \alias{getResponse.tbats} \alias{getResponse.bats} \alias{getResponse.mforecast} \alias{getResponse.baggedModel} \title{Get response variable from time series model.} \usage{ getResponse(object, ...) \method{getResponse}{default}(object, ...) \method{getResponse}{lm}(object, ...) \method{getResponse}{Arima}(object, ...) \method{getResponse}{fracdiff}(object, ...) \method{getResponse}{ar}(object, ...) \method{getResponse}{tbats}(object, ...) \method{getResponse}{bats}(object, ...) \method{getResponse}{mforecast}(object, ...) \method{getResponse}{baggedModel}(object, ...) } \arguments{ \item{object}{a time series model or forecast object.} \item{...}{Additional arguments that are ignored.} } \value{ A numerical vector or a time series object of class \code{ts}. } \description{ \code{getResponse} is a generic function for extracting the historical data from a time series model (including \code{Arima}, \code{ets}, \code{ar}, \code{fracdiff}), a linear model of class \code{lm}, or a forecast object. The function invokes particular \emph{methods} which depend on the class of the first argument. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/ocsb.test.Rd0000644000176200001440000000342514150370574015040 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unitRoot.R \name{ocsb.test} \alias{ocsb.test} \alias{print.OCSBtest} \title{Osborn, Chui, Smith, and Birchenhall Test for Seasonal Unit Roots} \usage{ ocsb.test(x, lag.method = c("fixed", "AIC", "BIC", "AICc"), maxlag = 0) } \arguments{ \item{x}{a univariate seasonal time series.} \item{lag.method}{a character specifying the lag order selection method.} \item{maxlag}{the maximum lag order to be considered by \code{lag.method}.} } \value{ ocsb.test returns a list of class "OCSBtest" with the following components: * statistics the value of the test statistics. * pvalues the p-values for each test statistics. * method a character string describing the type of test. * data.name a character string giving the name of the data. * fitted.model the fitted regression model. } \description{ An implementation of the Osborn, Chui, Smith, and Birchenhall (OCSB) test. } \details{ The regression equation may include lags of the dependent variable. When lag.method = "fixed", the lag order is fixed to maxlag; otherwise, maxlag is the maximum number of lags considered in a lag selection procedure that minimises the lag.method criterion, which can be AIC or BIC or corrected AIC, AICc, obtained as AIC + (2k(k+1))/(n-k-1), where k is the number of parameters and n is the number of available observations in the model. Critical values for the test are based on simulations, which has been smoothed over to produce critical values for all seasonal periods. } \examples{ ocsb.test(AirPassengers) } \references{ Osborn DR, Chui APL, Smith J, and Birchenhall CR (1988) "Seasonality and the order of integration for consumption", \emph{Oxford Bulletin of Economics and Statistics} \bold{50}(4):361-377. } \seealso{ \code{\link{nsdiffs}} } forecast/man/dm.test.Rd0000644000176200001440000000625514456202551014514 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DM2.R \name{dm.test} \alias{dm.test} \title{Diebold-Mariano test for predictive accuracy} \usage{ dm.test( e1, e2, alternative = c("two.sided", "less", "greater"), h = 1, power = 2, varestimator = c("acf", "bartlett") ) } \arguments{ \item{e1}{Forecast errors from method 1.} \item{e2}{Forecast errors from method 2.} \item{alternative}{a character string specifying the alternative hypothesis, must be one of \code{"two.sided"} (default), \code{"greater"} or \code{"less"}. You can specify just the initial letter.} \item{h}{The forecast horizon used in calculating \code{e1} and \code{e2}.} \item{power}{The power used in the loss function. Usually 1 or 2.} \item{varestimator}{a character string specifying the long-run variance estimator. Options are \code{"acf"} (default) or \code{"bartlett"}.} } \value{ A list with class \code{"htest"} containing the following components: \item{statistic}{the value of the DM-statistic.} \item{parameter}{the forecast horizon and loss function power used in the test.} \item{alternative}{a character string describing the alternative hypothesis.} \item{varestimator}{a character string describing the long-run variance estimator.} \item{p.value}{the p-value for the test.} \item{method}{a character string with the value "Diebold-Mariano Test".} \item{data.name}{a character vector giving the names of the two error series.} } \description{ The Diebold-Mariano test compares the forecast accuracy of two forecast methods. } \details{ This function implements the modified test proposed by Harvey, Leybourne and Newbold (1997). The null hypothesis is that the two methods have the same forecast accuracy. For \code{alternative="less"}, the alternative hypothesis is that method 2 is less accurate than method 1. For \code{alternative="greater"}, the alternative hypothesis is that method 2 is more accurate than method 1. For \code{alternative="two.sided"}, the alternative hypothesis is that method 1 and method 2 have different levels of accuracy. The long-run variance estimator can either the auto-correlation estimator \code{varestimator = "acf"}, or the estimator based on Bartlett weights \code{varestimator = "bartlett"} which ensures a positive estimate. Both long-run variance estimators are proposed in Diebold and Mariano (1995). } \examples{ # Test on in-sample one-step forecasts f1 <- ets(WWWusage) f2 <- auto.arima(WWWusage) accuracy(f1) accuracy(f2) dm.test(residuals(f1), residuals(f2), h = 1) # Test on out-of-sample one-step forecasts f1 <- ets(WWWusage[1:80]) f2 <- auto.arima(WWWusage[1:80]) f1.out <- ets(WWWusage[81:100], model = f1) f2.out <- Arima(WWWusage[81:100], model = f2) accuracy(f1.out) accuracy(f2.out) dm.test(residuals(f1.out), residuals(f2.out), h = 1) } \references{ Diebold, F.X. and Mariano, R.S. (1995) Comparing predictive accuracy. \emph{Journal of Business and Economic Statistics}, \bold{13}, 253-263. Harvey, D., Leybourne, S., & Newbold, P. (1997). Testing the equality of prediction mean squared errors. \emph{International Journal of forecasting}, \bold{13}(2), 281-291. } \author{ George Athanasopoulos and Kirill Kuroptev } \keyword{htest} \keyword{ts} forecast/man/ets.Rd0000644000176200001440000001346114254256650013733 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ets.R \name{ets} \alias{ets} \alias{print.ets} \alias{summary.ets} \alias{as.character.ets} \alias{coef.ets} \alias{tsdiag.ets} \title{Exponential smoothing state space model} \usage{ ets( y, model = "ZZZ", damped = NULL, alpha = NULL, beta = NULL, gamma = NULL, phi = NULL, additive.only = FALSE, lambda = NULL, biasadj = FALSE, lower = c(rep(1e-04, 3), 0.8), upper = c(rep(0.9999, 3), 0.98), opt.crit = c("lik", "amse", "mse", "sigma", "mae"), nmse = 3, bounds = c("both", "usual", "admissible"), ic = c("aicc", "aic", "bic"), restrict = TRUE, allow.multiplicative.trend = FALSE, use.initial.values = FALSE, na.action = c("na.contiguous", "na.interp", "na.fail"), ... ) } \arguments{ \item{y}{a numeric vector or time series of class \code{ts}} \item{model}{Usually a three-character string identifying method using the framework terminology of Hyndman et al. (2002) and Hyndman et al. (2008). The first letter denotes the error type ("A", "M" or "Z"); the second letter denotes the trend type ("N","A","M" or "Z"); and the third letter denotes the season type ("N","A","M" or "Z"). In all cases, "N"=none, "A"=additive, "M"=multiplicative and "Z"=automatically selected. So, for example, "ANN" is simple exponential smoothing with additive errors, "MAM" is multiplicative Holt-Winters' method with multiplicative errors, and so on. It is also possible for the model to be of class \code{"ets"}, and equal to the output from a previous call to \code{ets}. In this case, the same model is fitted to \code{y} without re-estimating any smoothing parameters. See also the \code{use.initial.values} argument.} \item{damped}{If TRUE, use a damped trend (either additive or multiplicative). If NULL, both damped and non-damped trends will be tried and the best model (according to the information criterion \code{ic}) returned.} \item{alpha}{Value of alpha. If NULL, it is estimated.} \item{beta}{Value of beta. If NULL, it is estimated.} \item{gamma}{Value of gamma. If NULL, it is estimated.} \item{phi}{Value of phi. If NULL, it is estimated.} \item{additive.only}{If TRUE, will only consider additive models. Default is FALSE.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated. When \code{lambda} is specified, \code{additive.only} is set to \code{TRUE}.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{lower}{Lower bounds for the parameters (alpha, beta, gamma, phi). Ignored if \code{bounds=="admissible"}.} \item{upper}{Upper bounds for the parameters (alpha, beta, gamma, phi). Ignored if \code{bounds=="admissible"}.} \item{opt.crit}{Optimization criterion. One of "mse" (Mean Square Error), "amse" (Average MSE over first \code{nmse} forecast horizons), "sigma" (Standard deviation of residuals), "mae" (Mean of absolute residuals), or "lik" (Log-likelihood, the default).} \item{nmse}{Number of steps for average multistep MSE (1<=\code{nmse}<=30).} \item{bounds}{Type of parameter space to impose: \code{"usual" } indicates all parameters must lie between specified lower and upper bounds; \code{"admissible"} indicates parameters must lie in the admissible space; \code{"both"} (default) takes the intersection of these regions.} \item{ic}{Information criterion to be used in model selection.} \item{restrict}{If \code{TRUE} (default), the models with infinite variance will not be allowed.} \item{allow.multiplicative.trend}{If \code{TRUE}, models with multiplicative trend are allowed when searching for a model. Otherwise, the model space excludes them. This argument is ignored if a multiplicative trend model is explicitly requested (e.g., using \code{model="MMN"}).} \item{use.initial.values}{If \code{TRUE} and \code{model} is of class \code{"ets"}, then the initial values in the model are also not re-estimated.} \item{na.action}{A function which indicates what should happen when the data contains NA values. By default, the largest contiguous portion of the time-series will be used.} \item{...}{Other undocumented arguments.} } \value{ An object of class "\code{ets}". The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{ets} and associated functions. } \description{ Returns ets model applied to \code{y}. } \details{ Based on the classification of methods as described in Hyndman et al (2008). The methodology is fully automatic. The only required argument for ets is the time series. The model is chosen automatically if not specified. This methodology performed extremely well on the M3-competition data. (See Hyndman, et al, 2002, below.) } \examples{ fit <- ets(USAccDeaths) plot(forecast(fit)) } \references{ Hyndman, R.J., Koehler, A.B., Snyder, R.D., and Grose, S. (2002) "A state space framework for automatic forecasting using exponential smoothing methods", \emph{International J. Forecasting}, \bold{18}(3), 439--454. Hyndman, R.J., Akram, Md., and Archibald, B. (2008) "The admissible parameter space for exponential smoothing models". \emph{Annals of Statistical Mathematics}, \bold{60}(2), 407--426. Hyndman, R.J., Koehler, A.B., Ord, J.K., and Snyder, R.D. (2008) \emph{Forecasting with exponential smoothing: the state space approach}, Springer-Verlag. \url{http://www.exponentialsmoothing.net}. } \seealso{ \code{\link[stats]{HoltWinters}}, \code{\link{rwf}}, \code{\link{Arima}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/accuracy.default.Rd0000644000176200001440000000642614207263356016357 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/errors.R \name{accuracy.default} \alias{accuracy.default} \title{Accuracy measures for a forecast model} \usage{ \method{accuracy}{default}(object, x, test = NULL, d = NULL, D = NULL, f = NULL, ...) } \arguments{ \item{object}{An object of class \dQuote{\code{forecast}}, or a numerical vector containing forecasts. It will also work with \code{Arima}, \code{ets} and \code{lm} objects if \code{x} is omitted -- in which case training set accuracy measures are returned.} \item{x}{An optional numerical vector containing actual values of the same length as object, or a time series overlapping with the times of \code{f}.} \item{test}{Indicator of which elements of \code{x} and \code{f} to test. If \code{test} is \code{NULL}, all elements are used. Otherwise test is a numeric vector containing the indices of the elements to use in the test.} \item{d}{An integer indicating the number of lag-1 differences to be used for the denominator in MASE calculation. Default value is 1 for non-seasonal series and 0 for seasonal series.} \item{D}{An integer indicating the number of seasonal differences to be used for the denominator in MASE calculation. Default value is 0 for non-seasonal series and 1 for seasonal series.} \item{f}{Deprecated. Please use `object` instead.} \item{...}{Additional arguments depending on the specific method.} } \value{ Matrix giving forecast accuracy measures. } \description{ Returns range of summary measures of the forecast accuracy. If \code{x} is provided, the function measures test set forecast accuracy based on \code{x-f}. If \code{x} is not provided, the function only produces training set accuracy measures of the forecasts based on \code{f["x"]-fitted(f)}. All measures are defined and discussed in Hyndman and Koehler (2006). } \details{ The measures calculated are: \itemize{ \item ME: Mean Error \item RMSE: Root Mean Squared Error \item MAE: Mean Absolute Error \item MPE: Mean Percentage Error \item MAPE: Mean Absolute Percentage Error \item MASE: Mean Absolute Scaled Error \item ACF1: Autocorrelation of errors at lag 1. } By default, the MASE calculation is scaled using MAE of training set naive forecasts for non-seasonal time series, training set seasonal naive forecasts for seasonal time series and training set mean forecasts for non-time series data. If \code{f} is a numerical vector rather than a \code{forecast} object, the MASE will not be returned as the training data will not be available. See Hyndman and Koehler (2006) and Hyndman and Athanasopoulos (2014, Section 2.5) for further details. } \examples{ fit1 <- rwf(EuStockMarkets[1:200, 1], h = 100) fit2 <- meanf(EuStockMarkets[1:200, 1], h = 100) accuracy(fit1) accuracy(fit2) accuracy(fit1, EuStockMarkets[201:300, 1]) accuracy(fit2, EuStockMarkets[201:300, 1]) plot(fit1) lines(EuStockMarkets[1:300, 1]) } \references{ Hyndman, R.J. and Koehler, A.B. (2006) "Another look at measures of forecast accuracy". \emph{International Journal of Forecasting}, \bold{22}(4), 679-688. Hyndman, R.J. and Athanasopoulos, G. (2018) "Forecasting: principles and practice", 2nd ed., OTexts, Melbourne, Australia. Section 3.4 "Evaluating forecast accuracy". \url{https://otexts.com/fpp2/accuracy.html}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/msts.Rd0000644000176200001440000000276414150370574014127 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/msts.R \name{msts} \alias{msts} \alias{print.msts} \alias{window.msts} \alias{`[.msts`} \title{Multi-Seasonal Time Series} \usage{ msts(data, seasonal.periods, ts.frequency = floor(max(seasonal.periods)), ...) } \arguments{ \item{data}{A numeric vector, ts object, matrix or data frame. It is intended that the time series data is univariate, otherwise treated the same as ts().} \item{seasonal.periods}{A vector of the seasonal periods of the msts.} \item{ts.frequency}{The seasonal period that should be used as frequency of the underlying ts object. The default value is \code{max(seasonal.periods)}.} \item{...}{Arguments to be passed to the underlying call to \code{ts()}. For example \code{start=c(1987,5)}.} } \value{ An object of class \code{c("msts", "ts")}. If there is only one seasonal period (i.e., \code{length(seasonal.periods)==1}), then the object is of class \code{"ts"}. } \description{ msts is an S3 class for multi seasonal time series objects, intended to be used for models that support multiple seasonal periods. The msts class inherits from the ts class and has an additional "msts" attribute which contains the vector of seasonal periods. All methods that work on a ts class, should also work on a msts class. } \examples{ x <- msts(taylor, seasonal.periods=c(2*24,2*24*7,2*24*365), start=2000+22/52) y <- msts(USAccDeaths, seasonal.periods=12, start=1949) } \author{ Slava Razbash and Rob J Hyndman } \keyword{ts} forecast/man/baggedModel.Rd0000644000176200001440000000445614150370574015333 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/baggedModel.R \name{baggedModel} \alias{baggedModel} \alias{print.baggedModel} \alias{baggedETS} \title{Forecasting using a bagged model} \usage{ baggedModel(y, bootstrapped_series = bld.mbb.bootstrap(y, 100), fn = ets, ...) baggedETS(y, bootstrapped_series = bld.mbb.bootstrap(y, 100), ...) } \arguments{ \item{y}{A numeric vector or time series of class \code{ts}.} \item{bootstrapped_series}{bootstrapped versions of y.} \item{fn}{the forecast function to use. Default is \code{\link{ets}}.} \item{\dots}{Other arguments passed to the forecast function.} } \value{ Returns an object of class "\code{baggedModel}". The function \code{print} is used to obtain and print a summary of the results. \item{models}{A list containing the fitted ensemble models.} \item{method}{The function for producing a forecastable model.} \item{y}{The original time series.} \item{bootstrapped_series}{The bootstrapped series.} \item{modelargs}{The arguments passed through to \code{fn}.} \item{fitted}{Fitted values (one-step forecasts). The mean of the fitted values is calculated over the ensemble.} \item{residuals}{Original values minus fitted values.} } \description{ The bagged model forecasting method. } \details{ This function implements the bagged model forecasting method described in Bergmeir et al. By default, the \code{\link{ets}} function is applied to all bootstrapped series. Base models other than \code{\link{ets}} can be given by the parameter \code{fn}. Using the default parameters, the function \code{\link{bld.mbb.bootstrap}} is used to calculate the bootstrapped series with the Box-Cox and Loess-based decomposition (BLD) bootstrap. The function \code{\link{forecast.baggedModel}} can then be used to calculate forecasts. \code{baggedETS} is a wrapper for \code{baggedModel}, setting \code{fn} to "ets". This function is included for backwards compatibility only, and may be deprecated in the future. } \examples{ fit <- baggedModel(WWWusage) fcast <- forecast(fit) plot(fcast) } \references{ Bergmeir, C., R. J. Hyndman, and J. M. Benitez (2016). Bagging Exponential Smoothing Methods using STL Decomposition and Box-Cox Transformation. International Journal of Forecasting 32, 303-312. } \author{ Christoph Bergmeir, Fotios Petropoulos } \keyword{ts} forecast/man/forecast.nnetar.Rd0000644000176200001440000001023414150370574016224 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nnetar.R \name{forecast.nnetar} \alias{forecast.nnetar} \title{Forecasting using neural network models} \usage{ \method{forecast}{nnetar}( object, h = ifelse(object$m > 1, 2 * object$m, 10), PI = FALSE, level = c(80, 95), fan = FALSE, xreg = NULL, lambda = object$lambda, bootstrap = FALSE, npaths = 1000, innov = NULL, ... ) } \arguments{ \item{object}{An object of class "\code{nnetar}" resulting from a call to \code{\link{nnetar}}.} \item{h}{Number of periods for forecasting. If \code{xreg} is used, \code{h} is ignored and the number of forecast periods is set to the number of rows of \code{xreg}.} \item{PI}{If TRUE, prediction intervals are produced, otherwise only point forecasts are calculated. If \code{PI} is FALSE, then \code{level}, \code{fan}, \code{bootstrap} and \code{npaths} are all ignored.} \item{level}{Confidence level for prediction intervals.} \item{fan}{If \code{TRUE}, level is set to \code{seq(51,99,by=3)}. This is suitable for fan plots.} \item{xreg}{Future values of external regressor variables.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{bootstrap}{If \code{TRUE}, then prediction intervals computed using simulations with resampled residuals rather than normally distributed errors. Ignored if \code{innov} is not \code{NULL}.} \item{npaths}{Number of sample paths used in computing simulated prediction intervals.} \item{innov}{Values to use as innovations for prediction intervals. Must be a matrix with \code{h} rows and \code{npaths} columns (vectors are coerced into a matrix). If present, \code{bootstrap} is ignored.} \item{...}{Additional arguments passed to \code{\link{simulate.nnetar}}} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{forecast.nnetar}. An object of class "\code{forecast}" is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{xreg}{The external regressors used in fitting (if given).} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} \item{...}{Other arguments} } \description{ Returns forecasts and other information for univariate neural network models. } \details{ Prediction intervals are calculated through simulations and can be slow. Note that if the network is too complex and overfits the data, the residuals can be arbitrarily small; if used for prediction interval calculations, they could lead to misleadingly small values. It is possible to use out-of-sample residuals to ameliorate this, see examples. } \examples{ ## Fit & forecast model fit <- nnetar(USAccDeaths, size=2) fcast <- forecast(fit, h=20) plot(fcast) \dontrun{ ## Include prediction intervals in forecast fcast2 <- forecast(fit, h=20, PI=TRUE, npaths=100) plot(fcast2) ## Set up out-of-sample innovations using cross-validation fit_cv <- CVar(USAccDeaths, size=2) res_sd <- sd(fit_cv$residuals, na.rm=TRUE) myinnovs <- rnorm(20*100, mean=0, sd=res_sd) ## Forecast using new innovations fcast3 <- forecast(fit, h=20, PI=TRUE, npaths=100, innov=myinnovs) plot(fcast3) } } \seealso{ \code{\link{nnetar}}. } \author{ Rob J Hyndman and Gabriel Caceres } \keyword{ts} forecast/man/seasonplot.Rd0000644000176200001440000000411514150370574015320 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R, R/graph.R \name{ggseasonplot} \alias{ggseasonplot} \alias{seasonplot} \title{Seasonal plot} \usage{ ggseasonplot( x, season.labels = NULL, year.labels = FALSE, year.labels.left = FALSE, type = NULL, col = NULL, continuous = FALSE, polar = FALSE, labelgap = 0.04, ... ) seasonplot( x, s, season.labels = NULL, year.labels = FALSE, year.labels.left = FALSE, type = "o", main, xlab = NULL, ylab = "", col = 1, labelgap = 0.1, ... ) } \arguments{ \item{x}{a numeric vector or time series of class \code{ts}.} \item{season.labels}{Labels for each season in the "year"} \item{year.labels}{Logical flag indicating whether labels for each year of data should be plotted on the right.} \item{year.labels.left}{Logical flag indicating whether labels for each year of data should be plotted on the left.} \item{type}{plot type (as for \code{\link[graphics]{plot}}). Not yet supported for ggseasonplot.} \item{col}{Colour} \item{continuous}{Should the colour scheme for years be continuous or discrete?} \item{polar}{Plot the graph on seasonal coordinates} \item{labelgap}{Distance between year labels and plotted lines} \item{\dots}{additional arguments to \code{\link[graphics]{plot}}.} \item{s}{seasonal frequency of x} \item{main}{Main title.} \item{xlab}{X-axis label.} \item{ylab}{Y-axis label.} } \value{ None. } \description{ Plots a seasonal plot as described in Hyndman and Athanasopoulos (2014, chapter 2). This is like a time plot except that the data are plotted against the seasons in separate years. } \examples{ ggseasonplot(AirPassengers, col=rainbow(12), year.labels=TRUE) ggseasonplot(AirPassengers, year.labels=TRUE, continuous=TRUE) seasonplot(AirPassengers, col=rainbow(12), year.labels=TRUE) } \references{ Hyndman and Athanasopoulos (2018) \emph{Forecasting: principles and practice}, 2nd edition, OTexts: Melbourne, Australia. \url{https://otexts.com/fpp2/} } \seealso{ \code{\link[stats]{monthplot}} } \author{ Rob J Hyndman & Mitchell O'Hara-Wild } \keyword{ts} forecast/man/ma.Rd0000644000176200001440000000262014150370574013525 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/season.R \name{ma} \alias{ma} \title{Moving-average smoothing} \usage{ ma(x, order, centre = TRUE) } \arguments{ \item{x}{Univariate time series} \item{order}{Order of moving average smoother} \item{centre}{If TRUE, then the moving average is centred for even orders.} } \value{ Numerical time series object containing the simple moving average smoothed values. } \description{ \code{ma} computes a simple moving average smoother of a given time series. } \details{ The moving average smoother averages the nearest \code{order} periods of each observation. As neighbouring observations of a time series are likely to be similar in value, averaging eliminates some of the randomness in the data, leaving a smooth trend-cycle component. \deqn{\hat{T}_{t} = \frac{1}{m} \sum_{j=-k}^k y_{t+j}}{T[t]=1/m(y[t-k]+y[t-k+1]+\ldots+y[t]+\ldots+y[t+k-1]+y[t+k])} where \eqn{k=\frac{m-1}{2}}{k=(m-1)/2} When an even \code{order} is specified, the observations averaged will include one more observation from the future than the past (k is rounded up). If centre is TRUE, the value from two moving averages (where k is rounded up and down respectively) are averaged, centering the moving average. } \examples{ plot(wineind) sm <- ma(wineind,order=12) lines(sm,col="red") } \seealso{ \code{\link[stats]{decompose}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/gglagplot.Rd0000644000176200001440000000370514150370574015115 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R \name{gglagplot} \alias{gglagplot} \alias{gglagchull} \title{Time series lag ggplots} \usage{ gglagplot( x, lags = ifelse(frequency(x) > 9, 16, 9), set.lags = 1:lags, diag = TRUE, diag.col = "gray", do.lines = TRUE, colour = TRUE, continuous = frequency(x) > 12, labels = FALSE, seasonal = TRUE, ... ) gglagchull( x, lags = ifelse(frequency(x) > 1, min(12, frequency(x)), 4), set.lags = 1:lags, diag = TRUE, diag.col = "gray", ... ) } \arguments{ \item{x}{a time series object (type \code{ts}).} \item{lags}{number of lag plots desired, see arg set.lags.} \item{set.lags}{vector of positive integers specifying which lags to use.} \item{diag}{logical indicating if the x=y diagonal should be drawn.} \item{diag.col}{color to be used for the diagonal if(diag).} \item{do.lines}{if TRUE, lines will be drawn, otherwise points will be drawn.} \item{colour}{logical indicating if lines should be coloured.} \item{continuous}{Should the colour scheme for years be continuous or discrete?} \item{labels}{logical indicating if labels should be used.} \item{seasonal}{Should the line colour be based on seasonal characteristics (TRUE), or sequential (FALSE).} \item{\dots}{Not used (for consistency with lag.plot)} } \value{ None. } \description{ Plots a lag plot using ggplot. } \details{ \dQuote{gglagplot} will plot time series against lagged versions of themselves. Helps visualising 'auto-dependence' even when auto-correlations vanish. \dQuote{gglagchull} will layer convex hulls of the lags, layered on a single plot. This helps visualise the change in 'auto-dependence' as lags increase. } \examples{ gglagplot(woolyrnq) gglagplot(woolyrnq,seasonal=FALSE) lungDeaths <- cbind(mdeaths, fdeaths) gglagplot(lungDeaths, lags=2) gglagchull(lungDeaths, lags=6) gglagchull(woolyrnq) } \seealso{ \code{\link[stats]{lag.plot}} } \author{ Mitchell O'Hara-Wild } forecast/man/Acf.Rd0000644000176200001440000000747114150370574013632 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/acf.R \name{Acf} \alias{Acf} \alias{Pacf} \alias{Ccf} \alias{taperedacf} \alias{taperedpacf} \title{(Partial) Autocorrelation and Cross-Correlation Function Estimation} \usage{ Acf( x, lag.max = NULL, type = c("correlation", "covariance", "partial"), plot = TRUE, na.action = na.contiguous, demean = TRUE, ... ) Pacf( x, lag.max = NULL, plot = TRUE, na.action = na.contiguous, demean = TRUE, ... ) Ccf( x, y, lag.max = NULL, type = c("correlation", "covariance"), plot = TRUE, na.action = na.contiguous, ... ) taperedacf( x, lag.max = NULL, type = c("correlation", "partial"), plot = TRUE, calc.ci = TRUE, level = 95, nsim = 100, ... ) taperedpacf(x, ...) } \arguments{ \item{x}{a univariate or multivariate (not Ccf) numeric time series object or a numeric vector or matrix.} \item{lag.max}{maximum lag at which to calculate the acf. Default is $10*log10(N/m)$ where $N$ is the number of observations and $m$ the number of series. Will be automatically limited to one less than the number of observations in the series.} \item{type}{character string giving the type of acf to be computed. Allowed values are \dQuote{\code{correlation}} (the default), \dQuote{\code{covariance}} or \dQuote{\code{partial}}.} \item{plot}{logical. If \code{TRUE} (the default) the resulting acf, pacf or ccf is plotted.} \item{na.action}{function to handle missing values. Default is \code{\link[stats]{na.contiguous}}. Useful alternatives are \code{\link[stats]{na.pass}} and \code{\link{na.interp}}.} \item{demean}{Should covariances be about the sample means?} \item{...}{Additional arguments passed to the plotting function.} \item{y}{a univariate numeric time series object or a numeric vector.} \item{calc.ci}{If \code{TRUE}, confidence intervals for the ACF/PACF estimates are calculated.} \item{level}{Percentage level used for the confidence intervals.} \item{nsim}{The number of bootstrap samples used in estimating the confidence intervals.} } \value{ The \code{Acf}, \code{Pacf} and \code{Ccf} functions return objects of class "acf" as described in \code{\link[stats]{acf}} from the stats package. The \code{taperedacf} and \code{taperedpacf} functions return objects of class "mpacf". } \description{ The function \code{Acf} computes (and by default plots) an estimate of the autocorrelation function of a (possibly multivariate) time series. Function \code{Pacf} computes (and by default plots) an estimate of the partial autocorrelation function of a (possibly multivariate) time series. Function \code{Ccf} computes the cross-correlation or cross-covariance of two univariate series. } \details{ The functions improve the \code{\link[stats]{acf}}, \code{\link[stats]{pacf}} and \code{\link[stats]{ccf}} functions. The main differences are that \code{Acf} does not plot a spike at lag 0 when \code{type=="correlation"} (which is redundant) and the horizontal axes show lags in time units rather than seasonal units. The tapered versions implement the ACF and PACF estimates and plots described in Hyndman (2015), based on the banded and tapered estimates of autocovariance proposed by McMurry and Politis (2010). } \examples{ Acf(wineind) Pacf(wineind) \dontrun{ taperedacf(wineind, nsim=50) taperedpacf(wineind, nsim=50) } } \references{ Hyndman, R.J. (2015). Discussion of ``High-dimensional autocovariance matrices and optimal linear prediction''. \emph{Electronic Journal of Statistics}, 9, 792-796. McMurry, T. L., & Politis, D. N. (2010). Banded and tapered estimates for autocovariance matrices and the linear process bootstrap. \emph{Journal of Time Series Analysis}, 31(6), 471-482. } \seealso{ \code{\link[stats]{acf}}, \code{\link[stats]{pacf}}, \code{\link[stats]{ccf}}, \code{\link{tsdisplay}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/seasonal.Rd0000644000176200001440000000205414150370574014736 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/components.R \name{seasonal} \alias{seasonal} \alias{trendcycle} \alias{remainder} \title{Extract components from a time series decomposition} \usage{ seasonal(object) trendcycle(object) remainder(object) } \arguments{ \item{object}{Object created by \code{\link[stats]{decompose}}, \code{\link[stats]{stl}} or \code{\link{tbats}}.} } \value{ Univariate time series. } \description{ Returns a univariate time series equal to either a seasonal component, trend-cycle component or remainder component from a time series decomposition. } \examples{ plot(USAccDeaths) fit <- stl(USAccDeaths, s.window="periodic") lines(trendcycle(fit),col="red") library(ggplot2) autoplot(cbind( Data=USAccDeaths, Seasonal=seasonal(fit), Trend=trendcycle(fit), Remainder=remainder(fit)), facets=TRUE) + ylab("") + xlab("Year") } \seealso{ \code{\link[stats]{stl}}, \code{\link[stats]{decompose}}, \code{\link{tbats}}, \code{\link{seasadj}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/CVar.Rd0000644000176200001440000000404214150370574013763 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tscv.R \name{CVar} \alias{CVar} \alias{print.CVar} \title{k-fold Cross-Validation applied to an autoregressive model} \usage{ CVar( y, k = 10, FUN = nnetar, cvtrace = FALSE, blocked = FALSE, LBlags = 24, ... ) } \arguments{ \item{y}{Univariate time series} \item{k}{Number of folds to use for cross-validation.} \item{FUN}{Function to fit an autoregressive model. Currently, it only works with the \code{\link{nnetar}} function.} \item{cvtrace}{Provide progress information.} \item{blocked}{choose folds randomly or as blocks?} \item{LBlags}{lags for the Ljung-Box test, defaults to 24, for yearly series can be set to 20} \item{...}{Other arguments are passed to \code{FUN}.} } \value{ A list containing information about the model and accuracy for each fold, plus other summary information computed across folds. } \description{ \code{CVar} computes the errors obtained by applying an autoregressive modelling function to subsets of the time series \code{y} using k-fold cross-validation as described in Bergmeir, Hyndman and Koo (2015). It also applies a Ljung-Box test to the residuals. If this test is significant (see returned pvalue), there is serial correlation in the residuals and the model can be considered to be underfitting the data. In this case, the cross-validated errors can underestimate the generalization error and should not be used. } \examples{ modelcv <- CVar(lynx, k=5, lambda=0.15) print(modelcv) print(modelcv$fold1) library(ggplot2) autoplot(lynx, series="Data") + autolayer(modelcv$testfit, series="Fits") + autolayer(modelcv$residuals, series="Residuals") ggAcf(modelcv$residuals) } \references{ Bergmeir, C., Hyndman, R.J., Koo, B. (2018) A note on the validity of cross-validation for evaluating time series prediction. \emph{Computational Statistics & Data Analysis}, \bold{120}, 70-83. \url{https://robjhyndman.com/publications/cv-time-series/}. } \seealso{ \link{CV}, \link{tsCV}. } \author{ Gabriel Caceres and Rob J Hyndman } \keyword{ts} forecast/man/plot.ets.Rd0000644000176200001440000000207514150370574014704 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ets.R, R/ggplot.R \name{plot.ets} \alias{plot.ets} \alias{autoplot.ets} \title{Plot components from ETS model} \usage{ \method{plot}{ets}(x, ...) \method{autoplot}{ets}(object, range.bars = NULL, ...) } \arguments{ \item{x}{Object of class \dQuote{ets}.} \item{...}{Other plotting parameters to affect the plot.} \item{object}{Object of class \dQuote{ets}. Used for ggplot graphics (S3 method consistency).} \item{range.bars}{Logical indicating if each plot should have a bar at its right side representing relative size. If NULL, automatic selection takes place.} } \value{ None. Function produces a plot } \description{ Produces a plot of the level, slope and seasonal components from an ETS model. } \details{ \code{autoplot} will produce an equivalent plot as a ggplot object. } \examples{ fit <- ets(USAccDeaths) plot(fit) plot(fit,plot.type="single",ylab="",col=1:3) library(ggplot2) autoplot(fit) } \seealso{ \code{\link{ets}} } \author{ Rob J Hyndman & Mitchell O'Hara-Wild } \keyword{hplot} forecast/man/arima.errors.Rd0000644000176200001440000000153114150370574015534 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/arima.R \name{arima.errors} \alias{arima.errors} \title{Errors from a regression model with ARIMA errors} \usage{ arima.errors(object) } \arguments{ \item{object}{An object containing a time series model of class \code{Arima}.} } \value{ A \code{ts} object } \description{ Returns time series of the regression residuals from a fitted ARIMA model. } \details{ This is a deprecated function which is identical to \code{\link{residuals.Arima}(object, type="regression")} Regression residuals are equal to the original data minus the effect of any regression variables. If there are no regression variables, the errors will be identical to the original series (possibly adjusted to have zero mean). } \seealso{ \code{\link{residuals.Arima}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/gghistogram.Rd0000644000176200001440000000171514150370574015447 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R \name{gghistogram} \alias{gghistogram} \title{Histogram with optional normal and kernel density functions} \usage{ gghistogram( x, add.normal = FALSE, add.kde = FALSE, add.rug = TRUE, bins, boundary = 0 ) } \arguments{ \item{x}{a numerical vector.} \item{add.normal}{Add a normal density function for comparison} \item{add.kde}{Add a kernel density estimate for comparison} \item{add.rug}{Add a rug plot on the horizontal axis} \item{bins}{The number of bins to use for the histogram. Selected by default using the Friedman-Diaconis rule given by \code{\link[grDevices]{nclass.FD}}} \item{boundary}{A boundary between two bins.} } \value{ None. } \description{ Plots a histogram and density estimates using ggplot. } \examples{ gghistogram(lynx, add.kde=TRUE) } \seealso{ \code{\link[graphics]{hist}}, \code{\link[ggplot2]{geom_histogram}} } \author{ Rob J Hyndman } forecast/man/arimaorder.Rd0000644000176200001440000000211114150370574015250 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/arima.R \name{arimaorder} \alias{arimaorder} \title{Return the order of an ARIMA or ARFIMA model} \usage{ arimaorder(object) } \arguments{ \item{object}{An object of class \dQuote{\code{Arima}}, dQuote\code{ar} or \dQuote{\code{fracdiff}}. Usually the result of a call to \code{\link[stats]{arima}}, \code{\link{Arima}}, \code{\link{auto.arima}}, \code{\link[stats]{ar}}, \code{\link{arfima}} or \code{\link[fracdiff]{fracdiff}}.} } \value{ A numerical vector giving the values \eqn{p}, \eqn{d} and \eqn{q} of the ARIMA or ARFIMA model. For a seasonal ARIMA model, the returned vector contains the values \eqn{p}, \eqn{d}, \eqn{q}, \eqn{P}, \eqn{D}, \eqn{Q} and \eqn{m}, where \eqn{m} is the period of seasonality. } \description{ Returns the order of a univariate ARIMA or ARFIMA model. } \examples{ WWWusage \%>\% auto.arima \%>\% arimaorder } \seealso{ \code{\link[stats]{ar}}, \code{\link{auto.arima}}, \code{\link{Arima}}, \code{\link[stats]{arima}}, \code{\link{arfima}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/tsclean.Rd0000644000176200001440000000235214207263356014565 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clean.R \name{tsclean} \alias{tsclean} \title{Identify and replace outliers and missing values in a time series} \usage{ tsclean(x, replace.missing = TRUE, iterate = 2, lambda = NULL) } \arguments{ \item{x}{time series} \item{replace.missing}{If TRUE, it not only replaces outliers, but also interpolates missing values} \item{iterate}{the number of iterations required} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} } \value{ Time series } \description{ Uses supsmu for non-seasonal series and a robust STL decomposition for seasonal series. To estimate missing values and outlier replacements, linear interpolation is used on the (possibly seasonally adjusted) series } \examples{ cleangold <- tsclean(gold) } \references{ Hyndman (2021) "Detecting time series outliers" \url{https://robjhyndman.com/hyndsight/tsoutliers/}. } \seealso{ \code{\link[forecast]{na.interp}}, \code{\link[forecast]{tsoutliers}}, \code{\link[stats]{supsmu}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/autolayer.Rd0000644000176200001440000000134614150370574015141 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R \name{autolayer} \alias{autolayer} \title{Create a ggplot layer appropriate to a particular data type} \usage{ autolayer(object, ...) } \arguments{ \item{object}{an object, whose class will determine the behaviour of autolayer} \item{...}{other arguments passed to specific methods} } \value{ a ggplot layer } \description{ \code{autolayer()} uses ggplot2 to draw a particular layer for an object of a particular class in a single command. This defines the S3 generic that other classes and packages can extend. } \seealso{ \code{\link[ggplot2:autoplot]{autoplot()}}, \code{\link[ggplot2:ggplot]{ggplot()}} and \code{\link[ggplot2:fortify]{fortify()}} } forecast/man/croston.Rd0000644000176200001440000000511114150370574014615 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecast2.R \name{croston} \alias{croston} \title{Forecasts for intermittent demand using Croston's method} \usage{ croston(y, h = 10, alpha = 0.1, x = y) } \arguments{ \item{y}{a numeric vector or time series of class \code{ts}} \item{h}{Number of periods for forecasting.} \item{alpha}{Value of alpha. Default value is 0.1.} \item{x}{Deprecated. Included for backwards compatibility.} } \value{ An object of class \code{"forecast"} is a list containing at least the following elements: \item{model}{A list containing information about the fitted model. The first element gives the model used for non-zero demands. The second element gives the model used for times between non-zero demands. Both elements are of class \code{forecast}.} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model. That is y minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{croston} and associated functions. } \description{ Returns forecasts and other information for Croston's forecasts applied to y. } \details{ Based on Croston's (1972) method for intermittent demand forecasting, also described in Shenstone and Hyndman (2005). Croston's method involves using simple exponential smoothing (SES) on the non-zero elements of the time series and a separate application of SES to the times between non-zero elements of the time series. The smoothing parameters of the two applications of SES are assumed to be equal and are denoted by \code{alpha}. Note that prediction intervals are not computed as Croston's method has no underlying stochastic model. } \examples{ y <- rpois(20,lambda=.3) fcast <- croston(y) plot(fcast) } \references{ Croston, J. (1972) "Forecasting and stock control for intermittent demands", \emph{Operational Research Quarterly}, \bold{23}(3), 289-303. Shenstone, L., and Hyndman, R.J. (2005) "Stochastic models underlying Croston's method for intermittent demand forecasting". \emph{Journal of Forecasting}, \bold{24}, 389-402. } \seealso{ \code{\link{ses}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/is.ets.Rd0000644000176200001440000000120614150370574014334 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/acf.R, R/arima.R, R/baggedModel.R, R/bats.R, % R/ets.R, R/modelAR.R, R/mstl.R, R/nnetar.R \name{is.acf} \alias{is.acf} \alias{is.Arima} \alias{is.baggedModel} \alias{is.bats} \alias{is.ets} \alias{is.modelAR} \alias{is.stlm} \alias{is.nnetar} \alias{is.nnetarmodels} \title{Is an object a particular model type?} \usage{ is.acf(x) is.Arima(x) is.baggedModel(x) is.bats(x) is.ets(x) is.modelAR(x) is.stlm(x) is.nnetar(x) is.nnetarmodels(x) } \arguments{ \item{x}{object to be tested} } \description{ Returns true if the model object is of a particular type } forecast/man/ndiffs.Rd0000644000176200001440000000523114150370574014402 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unitRoot.R \name{ndiffs} \alias{ndiffs} \title{Number of differences required for a stationary series} \usage{ ndiffs( x, alpha = 0.05, test = c("kpss", "adf", "pp"), type = c("level", "trend"), max.d = 2, ... ) } \arguments{ \item{x}{A univariate time series} \item{alpha}{Level of the test, possible values range from 0.01 to 0.1.} \item{test}{Type of unit root test to use} \item{type}{Specification of the deterministic component in the regression} \item{max.d}{Maximum number of non-seasonal differences allowed} \item{...}{Additional arguments to be passed on to the unit root test} } \value{ An integer indicating the number of differences required for stationarity. } \description{ Functions to estimate the number of differences required to make a given time series stationary. \code{ndiffs} estimates the number of first differences necessary. } \details{ \code{ndiffs} uses a unit root test to determine the number of differences required for time series \code{x} to be made stationary. If \code{test="kpss"}, the KPSS test is used with the null hypothesis that \code{x} has a stationary root against a unit-root alternative. Then the test returns the least number of differences required to pass the test at the level \code{alpha}. If \code{test="adf"}, the Augmented Dickey-Fuller test is used and if \code{test="pp"} the Phillips-Perron test is used. In both of these cases, the null hypothesis is that \code{x} has a unit root against a stationary root alternative. Then the test returns the least number of differences required to fail the test at the level \code{alpha}. } \examples{ ndiffs(WWWusage) ndiffs(diff(log(AirPassengers), 12)) } \references{ Dickey DA and Fuller WA (1979), "Distribution of the Estimators for Autoregressive Time Series with a Unit Root", \emph{Journal of the American Statistical Association} \bold{74}:427-431. Kwiatkowski D, Phillips PCB, Schmidt P and Shin Y (1992) "Testing the Null Hypothesis of Stationarity against the Alternative of a Unit Root", \emph{Journal of Econometrics} \bold{54}:159-178. Osborn, D.R. (1990) "A survey of seasonality in UK macroeconomic variables", \emph{International Journal of Forecasting}, \bold{6}:327-336. Phillips, P.C.B. and Perron, P. (1988) "Testing for a unit root in time series regression", \emph{Biometrika}, \bold{72}(2), 335-346. Said E and Dickey DA (1984), "Testing for Unit Roots in Autoregressive Moving Average Models of Unknown Order", \emph{Biometrika} \bold{71}:599-607. } \seealso{ \code{\link{auto.arima}} and \code{\link{ndiffs}} } \author{ Rob J Hyndman, Slava Razbash & Mitchell O'Hara-Wild } \keyword{ts} forecast/man/tsdisplay.Rd0000644000176200001440000000465114150370574015152 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R, R/graph.R \name{ggtsdisplay} \alias{ggtsdisplay} \alias{tsdisplay} \title{Time series display} \usage{ ggtsdisplay( x, plot.type = c("partial", "histogram", "scatter", "spectrum"), points = TRUE, smooth = FALSE, lag.max, na.action = na.contiguous, theme = NULL, ... ) tsdisplay( x, plot.type = c("partial", "histogram", "scatter", "spectrum"), points = TRUE, ci.type = c("white", "ma"), lag.max, na.action = na.contiguous, main = NULL, xlab = "", ylab = "", pch = 1, cex = 0.5, ... ) } \arguments{ \item{x}{a numeric vector or time series of class \code{ts}.} \item{plot.type}{type of plot to include in lower right corner.} \item{points}{logical flag indicating whether to show the individual points or not in the time plot.} \item{smooth}{logical flag indicating whether to show a smooth loess curve superimposed on the time plot.} \item{lag.max}{the maximum lag to plot for the acf and pacf. A suitable value is selected by default if the argument is missing.} \item{na.action}{function to handle missing values in acf, pacf and spectrum calculations. The default is \code{\link[stats]{na.contiguous}}. Useful alternatives are \code{\link[stats]{na.pass}} and \code{\link{na.interp}}.} \item{theme}{Adds a ggplot element to each plot, typically a theme.} \item{\dots}{additional arguments to \code{\link[stats]{acf}}.} \item{ci.type}{type of confidence limits for ACF that is passed to \code{\link[stats]{acf}}. Should the confidence limits assume a white noise input or for lag \eqn{k} an MA(\eqn{k-1}) input?} \item{main}{Main title.} \item{xlab}{X-axis label.} \item{ylab}{Y-axis label.} \item{pch}{Plotting character.} \item{cex}{Character size.} } \value{ None. } \description{ Plots a time series along with its acf and either its pacf, lagged scatterplot or spectrum. } \details{ \code{ggtsdisplay} will produce the equivalent plot using ggplot graphics. } \examples{ library(ggplot2) ggtsdisplay(USAccDeaths, plot.type="scatter", theme=theme_bw()) tsdisplay(diff(WWWusage)) ggtsdisplay(USAccDeaths, plot.type="scatter") } \references{ Hyndman and Athanasopoulos (2018) \emph{Forecasting: principles and practice}, 2nd edition, OTexts: Melbourne, Australia. \url{https://otexts.com/fpp2/} } \seealso{ \code{\link[stats]{plot.ts}}, \code{\link{Acf}}, \code{\link[stats]{spec.ar}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/ses.Rd0000644000176200001440000001120014150370574013714 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/HoltWintersNew.R \name{ses} \alias{ses} \alias{holt} \alias{hw} \title{Exponential smoothing forecasts} \usage{ ses( y, h = 10, level = c(80, 95), fan = FALSE, initial = c("optimal", "simple"), alpha = NULL, lambda = NULL, biasadj = FALSE, x = y, ... ) holt( y, h = 10, damped = FALSE, level = c(80, 95), fan = FALSE, initial = c("optimal", "simple"), exponential = FALSE, alpha = NULL, beta = NULL, phi = NULL, lambda = NULL, biasadj = FALSE, x = y, ... ) hw( y, h = 2 * frequency(x), seasonal = c("additive", "multiplicative"), damped = FALSE, level = c(80, 95), fan = FALSE, initial = c("optimal", "simple"), exponential = FALSE, alpha = NULL, beta = NULL, gamma = NULL, phi = NULL, lambda = NULL, biasadj = FALSE, x = y, ... ) } \arguments{ \item{y}{a numeric vector or time series of class \code{ts}} \item{h}{Number of periods for forecasting.} \item{level}{Confidence level for prediction intervals.} \item{fan}{If TRUE, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{initial}{Method used for selecting initial state values. If \code{optimal}, the initial values are optimized along with the smoothing parameters using \code{\link{ets}}. If \code{simple}, the initial values are set to values obtained using simple calculations on the first few observations. See Hyndman & Athanasopoulos (2014) for details.} \item{alpha}{Value of smoothing parameter for the level. If \code{NULL}, it will be estimated.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{x}{Deprecated. Included for backwards compatibility.} \item{...}{Other arguments passed to \code{forecast.ets}.} \item{damped}{If TRUE, use a damped trend.} \item{exponential}{If TRUE, an exponential trend is fitted. Otherwise, the trend is (locally) linear.} \item{beta}{Value of smoothing parameter for the trend. If \code{NULL}, it will be estimated.} \item{phi}{Value of damping parameter if \code{damped=TRUE}. If \code{NULL}, it will be estimated.} \item{seasonal}{Type of seasonality in \code{hw} model. "additive" or "multiplicative"} \item{gamma}{Value of smoothing parameter for the seasonal component. If \code{NULL}, it will be estimated.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{ets} and associated functions. An object of class \code{"forecast"} is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ Returns forecasts and other information for exponential smoothing forecasts applied to \code{y}. } \details{ ses, holt and hw are simply convenient wrapper functions for \code{forecast(ets(...))}. } \examples{ fcast <- holt(airmiles) plot(fcast) deaths.fcast <- hw(USAccDeaths,h=48) plot(deaths.fcast) } \references{ Hyndman, R.J., Koehler, A.B., Ord, J.K., Snyder, R.D. (2008) \emph{Forecasting with exponential smoothing: the state space approach}, Springer-Verlag: New York. \url{http://www.exponentialsmoothing.net}. Hyndman and Athanasopoulos (2018) \emph{Forecasting: principles and practice}, 2nd edition, OTexts: Melbourne, Australia. \url{https://otexts.com/fpp2/} } \seealso{ \code{\link{ets}}, \code{\link[stats]{HoltWinters}}, \code{\link{rwf}}, \code{\link[stats]{arima}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/forecast.mts.Rd0000644000176200001440000000711314150370574015542 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mforecast.R \name{forecast.mts} \alias{forecast.mts} \alias{mforecast} \alias{print.mforecast} \alias{summary.mforecast} \alias{as.data.frame.mforecast} \title{Forecasting time series} \usage{ \method{forecast}{mts}( object, h = ifelse(frequency(object) > 1, 2 * frequency(object), 10), level = c(80, 95), fan = FALSE, robust = FALSE, lambda = NULL, biasadj = FALSE, find.frequency = FALSE, allow.multiplicative.trend = FALSE, ... ) } \arguments{ \item{object}{a multivariate time series or multivariate time series model for which forecasts are required} \item{h}{Number of periods for forecasting} \item{level}{Confidence level for prediction intervals.} \item{fan}{If TRUE, \code{level} is set to \code{seq(51,99,by=3)}. This is suitable for fan plots.} \item{robust}{If TRUE, the function is robust to missing values and outliers in \code{object}. This argument is only valid when \code{object} is of class \code{mts}.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{find.frequency}{If TRUE, the function determines the appropriate period, if the data is of unknown period.} \item{allow.multiplicative.trend}{If TRUE, then ETS models with multiplicative trends are allowed. Otherwise, only additive or no trend ETS models are permitted.} \item{...}{Additional arguments affecting the forecasts produced.} } \value{ An object of class "\code{mforecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the multivariate forecasts and prediction intervals. The generic accessors functions \code{fitted.values} and \code{residuals} extract various useful features of the value returned by \code{forecast$model}. An object of class \code{"mforecast"} is a list usually containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model. For models with additive errors, the residuals will be x minus the fitted values.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ \code{mforecast} is a class of objects for forecasting from multivariate time series or multivariate time series models. The function invokes particular \emph{methods} which depend on the class of the first argument. } \details{ For example, the function \code{\link{forecast.mlm}} makes multivariate forecasts based on the results produced by \code{\link{tslm}}. } \seealso{ Other functions which return objects of class \code{"mforecast"} are \code{\link{forecast.mlm}}, \code{forecast.varest}. } \author{ Rob J Hyndman & Mitchell O'Hara-Wild } forecast/man/modelAR.Rd0000644000176200001440000000733214150370574014460 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/modelAR.R \name{modelAR} \alias{modelAR} \alias{print.modelAR} \title{Time Series Forecasts with a user-defined model} \usage{ modelAR( y, p, P = 1, FUN, predict.FUN, xreg = NULL, lambda = NULL, model = NULL, subset = NULL, scale.inputs = FALSE, x = y, ... ) } \arguments{ \item{y}{A numeric vector or time series of class \code{ts}.} \item{p}{Embedding dimension for non-seasonal time series. Number of non-seasonal lags used as inputs. For non-seasonal time series, the default is the optimal number of lags (according to the AIC) for a linear AR(p) model. For seasonal time series, the same method is used but applied to seasonally adjusted data (from an stl decomposition).} \item{P}{Number of seasonal lags used as inputs.} \item{FUN}{Function used for model fitting. Must accept argument \code{x} and \code{y} for the predictors and response, respectively (\code{formula} object not currently supported).} \item{predict.FUN}{Prediction function used to apply \code{FUN} to new data. Must accept an object of class \code{FUN} as its first argument, and a data frame or matrix of new data for its second argument. Additionally, it should return fitted values when new data is omitted.} \item{xreg}{Optionally, a vector or matrix of external regressors, which must have the same number of rows as \code{y}. Must be numeric.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{model}{Output from a previous call to \code{nnetar}. If model is passed, this same model is fitted to \code{y} without re-estimating any parameters.} \item{subset}{Optional vector specifying a subset of observations to be used in the fit. Can be an integer index vector or a logical vector the same length as \code{y}. All observations are used by default.} \item{scale.inputs}{If TRUE, inputs are scaled by subtracting the column means and dividing by their respective standard deviations. If \code{lambda} is not \code{NULL}, scaling is applied after Box-Cox transformation.} \item{x}{Deprecated. Included for backwards compatibility.} \item{\dots}{Other arguments passed to \code{FUN} for \code{modelAR}.} } \value{ Returns an object of class "\code{modelAR}". The function \code{summary} is used to obtain and print a summary of the results. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{nnetar}. \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{x}{The original time series.} \item{xreg}{The external regressors used in fitting (if given).} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} \item{...}{Other arguments} } \description{ Experimental function to forecast univariate time series with a user-defined model } \details{ This is an experimental function and only recommended for advanced users. The selected model is fitted with lagged values of \code{y} as inputs. The inputs are for lags 1 to \code{p}, and lags \code{m} to \code{mP} where \code{m=frequency(y)}. If \code{xreg} is provided, its columns are also used as inputs. If there are missing values in \code{y} or \code{xreg}, the corresponding rows (and any others which depend on them as lags) are omitted from the fit. The model is trained for one-step forecasting. Multi-step forecasts are computed recursively. } \author{ Rob J Hyndman and Gabriel Caceres } \keyword{ts} forecast/man/forecast.ts.Rd0000644000176200001440000001144714207263356015374 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecast.R \name{forecast.ts} \alias{forecast.ts} \alias{print.forecast} \alias{summary.forecast} \alias{as.data.frame.forecast} \alias{as.ts.forecast} \alias{forecast.default} \title{Forecasting time series} \usage{ \method{forecast}{ts}( object, h = ifelse(frequency(object) > 1, 2 * frequency(object), 10), level = c(80, 95), fan = FALSE, robust = FALSE, lambda = NULL, biasadj = FALSE, find.frequency = FALSE, allow.multiplicative.trend = FALSE, model = NULL, ... ) \method{forecast}{default}(object, ...) \method{print}{forecast}(x, ...) } \arguments{ \item{object}{a time series or time series model for which forecasts are required} \item{h}{Number of periods for forecasting} \item{level}{Confidence level for prediction intervals.} \item{fan}{If TRUE, \code{level} is set to \code{seq(51,99,by=3)}. This is suitable for fan plots.} \item{robust}{If TRUE, the function is robust to missing values and outliers in \code{object}. This argument is only valid when \code{object} is of class \code{ts}.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{find.frequency}{If TRUE, the function determines the appropriate period, if the data is of unknown period.} \item{allow.multiplicative.trend}{If TRUE, then ETS models with multiplicative trends are allowed. Otherwise, only additive or no trend ETS models are permitted.} \item{model}{An object describing a time series model; e.g., one of of class \code{ets}, \code{Arima}, \code{bats}, \code{tbats}, or \code{nnetar}.} \item{...}{Additional arguments affecting the forecasts produced. If \code{model=NULL}, \code{forecast.ts} passes these to \code{\link{ets}} or \code{\link{stlf}} depending on the frequency of the time series. If \code{model} is not \code{NULL}, the arguments are passed to the relevant modelling function.} \item{x}{a numeric vector or time series of class \code{ts}.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessors functions \code{fitted.values} and \code{residuals} extract various useful features of the value returned by \code{forecast$model}. An object of class \code{"forecast"} is a list usually containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model. For models with additive errors, the residuals will be x minus the fitted values.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ \code{forecast} is a generic function for forecasting from time series or time series models. The function invokes particular \emph{methods} which depend on the class of the first argument. } \details{ For example, the function \code{\link{forecast.Arima}} makes forecasts based on the results produced by \code{\link[stats]{arima}}. If \code{model=NULL},the function \code{\link{forecast.ts}} makes forecasts using \code{\link{ets}} models (if the data are non-seasonal or the seasonal period is 12 or less) or \code{\link{stlf}} (if the seasonal period is 13 or more). If \code{model} is not \code{NULL}, \code{forecast.ts} will apply the \code{model} to the \code{object} time series, and then generate forecasts accordingly. } \examples{ WWWusage \%>\% forecast \%>\% plot fit <- ets(window(WWWusage, end=60)) fc <- forecast(WWWusage, model=fit) } \seealso{ Other functions which return objects of class \code{"forecast"} are \code{\link{forecast.ets}}, \code{\link{forecast.Arima}}, \code{\link{forecast.HoltWinters}}, \code{\link{forecast.StructTS}}, \code{\link{meanf}}, \code{\link{rwf}}, \code{\link{splinef}}, \code{\link{thetaf}}, \code{\link{croston}}, \code{\link{ses}}, \code{\link{holt}}, \code{\link{hw}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/taylor.Rd0000644000176200001440000000125114150370574014441 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{taylor} \alias{taylor} \title{Half-hourly electricity demand} \format{ Time series data } \source{ James W Taylor } \usage{ taylor } \description{ Half-hourly electricity demand in England and Wales from Monday 5 June 2000 to Sunday 27 August 2000. Discussed in Taylor (2003), and kindly provided by James W Taylor. Units: Megawatts } \examples{ plot(taylor) } \references{ Taylor, J.W. (2003) Short-term electricity demand forecasting using double seasonal exponential smoothing. \emph{Journal of the Operational Research Society}, \bold{54}, 799-805. } \keyword{datasets} forecast/man/seasonaldummy.Rd0000644000176200001440000000313714150370574016015 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/season.R \name{seasonaldummy} \alias{seasonaldummy} \alias{seasonaldummyf} \title{Seasonal dummy variables} \usage{ seasonaldummy(x, h = NULL) seasonaldummyf(x, h) } \arguments{ \item{x}{Seasonal time series: a \code{ts} or a \code{msts} object} \item{h}{Number of periods ahead to forecast (optional)} } \value{ Numerical matrix. } \description{ \code{seasonaldummy} returns a matrix of dummy variables suitable for use in \code{\link{Arima}}, \code{\link{auto.arima}} or \code{\link{tslm}}. The last season is omitted and used as the control. } \details{ \code{seasonaldummyf} is deprecated, instead use the \code{h} argument in \code{seasonaldummy}. The number of dummy variables is determined from the time series characteristics of \code{x}. When \code{h} is missing, the length of \code{x} also determines the number of rows for the matrix returned by \code{seasonaldummy}. the value of \code{h} determines the number of rows for the matrix returned by \code{seasonaldummy}, typically used for forecasting. The values within \code{x} are not used. } \examples{ plot(ldeaths) # Using seasonal dummy variables month <- seasonaldummy(ldeaths) deaths.lm <- tslm(ldeaths ~ month) tsdisplay(residuals(deaths.lm)) ldeaths.fcast <- forecast(deaths.lm, data.frame(month=I(seasonaldummy(ldeaths,36)))) plot(ldeaths.fcast) # A simpler approach to seasonal dummy variables deaths.lm <- tslm(ldeaths ~ season) ldeaths.fcast <- forecast(deaths.lm, h=36) plot(ldeaths.fcast) } \seealso{ \code{\link{fourier}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/forecast.modelAR.Rd0000644000176200001440000000705214150370574016264 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/modelAR.R \name{forecast.modelAR} \alias{forecast.modelAR} \title{Forecasting using user-defined model} \usage{ \method{forecast}{modelAR}( object, h = ifelse(object$m > 1, 2 * object$m, 10), PI = FALSE, level = c(80, 95), fan = FALSE, xreg = NULL, lambda = object$lambda, bootstrap = FALSE, npaths = 1000, innov = NULL, ... ) } \arguments{ \item{object}{An object of class "\code{modelAR}" resulting from a call to \code{\link{modelAR}}.} \item{h}{Number of periods for forecasting. If \code{xreg} is used, \code{h} is ignored and the number of forecast periods is set to the number of rows of \code{xreg}.} \item{PI}{If TRUE, prediction intervals are produced, otherwise only point forecasts are calculated. If \code{PI} is FALSE, then \code{level}, \code{fan}, \code{bootstrap} and \code{npaths} are all ignored.} \item{level}{Confidence level for prediction intervals.} \item{fan}{If \code{TRUE}, level is set to \code{seq(51,99,by=3)}. This is suitable for fan plots.} \item{xreg}{Future values of external regressor variables.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{bootstrap}{If \code{TRUE}, then prediction intervals computed using simulations with resampled residuals rather than normally distributed errors. Ignored if \code{innov} is not \code{NULL}.} \item{npaths}{Number of sample paths used in computing simulated prediction intervals.} \item{innov}{Values to use as innovations for prediction intervals. Must be a matrix with \code{h} rows and \code{npaths} columns (vectors are coerced into a matrix). If present, \code{bootstrap} is ignored.} \item{...}{Additional arguments passed to \code{\link{simulate.nnetar}}} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{forecast.nnetar}. An object of class "\code{forecast}" is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{xreg}{The external regressors used in fitting (if given).} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} \item{...}{Other arguments} } \description{ Returns forecasts and other information for user-defined models. } \details{ Prediction intervals are calculated through simulations and can be slow. Note that if the model is too complex and overfits the data, the residuals can be arbitrarily small; if used for prediction interval calculations, they could lead to misleadingly small values. } \seealso{ \code{\link{nnetar}}. } \author{ Rob J Hyndman and Gabriel Caceres } \keyword{ts} forecast/man/figures/0000755000176200001440000000000014150370574014305 5ustar liggesusersforecast/man/figures/logo.png0000644000176200001440000001365414150370574015764 0ustar liggesusersPNG  IHDRxb]egAMA a cHRMz&u0`:pQ<bKGDIDATx]WgOПR[jUZrkoϹ'X.R\QYDvYCBl B"{?O&<2|΁df|2))dSu*5cTJ|*5SR3R33t}If2LfV$gliR3CT:[X>IOd|06CnI[ؑۆ/x>-WxnWc۩:mm'9ľL[~yK&|uK۳}4D̶$M{i]C9vuHK" yD_N2Yrl3vށ?+ێ@Cf]'Z@)>d*Jg7M];YJ9VEi:Fl:ZDiDyQVgW(Ŷ RgLiQsvRuH[U3b˱jՖ$tVO|ӂ;^0`#>'~[ro1RO6`gyI-XwmK*)/n;~%ԓ X:JOwL[{><Ƨu_-I-H3'U 3Uזm[-YHݜp8ZnCޅR {dB7XO.t`濾WWPDa$~(%K-6M rMhBu$K-I{@)j'\FšFՇ) LFޅ-{ҶV-[?W%<,4vOr LcCzS[RYyQs;Ӳ6816~_>u &lJI.Z:K[q[O7 e˝@QE֣~/m ƹAkcߖm㽶yq GZQ=6|8tδbMF ʞm<Ɵ7.;|q3F(,n\U#xYuxmڒJX{.\Ł~994ydIrXjƟroQmI%mUΩ&l5.{\~4Ƒ횐%ã`=nN1;LmmɕoQ^S5G\?E_^HepSˢ4{&;:KUXNT;?71=QH FD]Wu^؂Xondž:dYqЕ=Sd:x5c ]6c۹ž>0:'.=-v0 \3enL8[yϯɎ|]dQW+ڟ%YC|䅊]sf+$r>T>p8`{Ygko5h4QB]ez;vTF%eIޯq] ۜxx>Z=_$cBawLh$a56#]q m \}6|tr\;*Aievޯ V V/ẃ]tE7́6j{'hbpbgp?>!\m'3~eV܂fG,,9l7ؚ͓c@j`&9PAՋI9ppṔKn F&sO^ -,5`k@i?J .o8,Zl>b|np*CnaSܺ^/sK0K3E!W[$\'ПVhu-8Σ~/<3}h*ѧw=<)KɎ Ar-c3Džox>Y/i%[8Zn kQ\ou㞞Cȴ$vNzCIASsw|l:=oͱG(!ÿdQ [11jVn꜂OB}HTŃ2[RKց) {Ǟ`6W~;[g}%}x؃Fg/F1yJ&pՁ#嶐8RnõVuNpdaƘ96~}^ԋhܗŨ׏~YvlG 47oc^? h5=}1kk.7Z5= N3WgRpMϡqM ȹ߇Q?6Q45o:ݎŮ?,|ACÞ0>&}}g>EcZ78Pjp l[`Sg,^3臦`>oE+P7gI5=\;\z491N>BA]g`ɇPP 'f]9di)SyT>0<@$ , wL(hcOq/-v9$ Yf")zbG.V/*,H1('۱r/CۂMrՇ1gv\}B5ƞ^4g;mS*"C0KcM3CdP=pMϡ I<| 4r[6qb^|߉uq 6Q30Ƙ4=:0S#8K;+ DoJ_׺᚞ pڮ cˉ {Xf{.6DWWjENQ EDM$ ۜꞤ")ֺb%D]$LOQaBAf{wMϋ>/su}^]An {VXr:@,t\ 4%Ilz~N=Bv{^K%] J.?PM\wZhv,x>1z,elF2+( S-c@e4*""š.4q8M=HWzse7RR+FT,VA?4-Z$%z5O3G\vQf`iOJ;'Pw:*]7*96CcJ{ CPLCcϦldv2L0 rQ^h&8&8K;zI7Œ[7uΠRr&X\iH.;= \ NTGpQq8<-l@(5Q|`$wLTYfð9kǴn^ngճSPqAr+yc"4bڪE(lsGp w;\T Fb{|ogBp!@)T+޵F8c_ E'BAH{-z 'dL.n[3T'K;2i` "WԈ|n*IK &Tz"8 dM;WpbUHR#z+]X^.t fDQzvjAvb-c3nXjDZ^X[ '!$#YP硺tԣozC>'UVj!Nj"gYe-9ul`K)Xͯ )6 -WQXɎ*#8ud!ׯis6jV'*~4( jcz Tty_?(Ճ %>f$jcmDgiRQJ*OK5CwjAeֈ_ tNllէiw 2, rHYPtSvB֫H5lSm㸬EzӴDg&8U<*4;$#vWDW(آ!!Sb 'LI"eB)HY"5 tN,[P է^ArբH['& "c5BOgDzi<*hI)ʘbAOW؅bIX3eFziFL*,0=y ӓL,4+-MvW!A@*  mx# N= Y=G2+G*,ye#,F`{#8Z労 %9cMiRnX'Xy w3=" =KL'8K;t~b"HȅJn6lEvi,sw6f$`]cZ9r[2K{ ]cX&tiyf($wY (- Dgqߓn2ʭQ$91` ?w.ѻ,vf/5:M^ع7g(m$CK:i$I Xr XO@^"ֆB%f!- Pd7$wVha`E{l< &cBq%A0r)yCr7J)bdДfAPf EQK`1ҔLN6,=K!h XȨ_x lB&'\ Ar y!-#X)`2 UM,9ISJ$佊Ş+c%U ?1r ksʖF~>Ns y?gŔ`~e,U"B )-yj! XgH3sLz6%;Y{8rhߢ6b R*7AI"%D<8ra#?rzPU/Ӹ~j1nRFU@6h(fDt v!gLahC}ϙOpZY!YA?ފJ{]Gn4cƢqhuqRqIF6(t y]qPʞ"e]fʞ" وSVjIR3Xk~MNj`/z+cJJJJJͬSu4&'yx+VSoSJͤ/|)yUxkrcՇ,*5RئR3/$[D)eAVgPR3+Rh7Yp>ӤUI(tǚ4̺P}g* z}=fDjSoiI}59RMW$u6 _Z8>9R$ME/D{Sy>쪌{:-%֛Jl׾/O곀RcJdF+$:SNٲr5GiӞ+ҞFW+YVo/ }^Vg}*5Tr.{R,BgH2iҪDjK*(/ŕΆ ߖ\VgZgkKK@ʞi^#K@_Z%ٖTX^\\֖Tۖܔ%hm$:3ڒKP}^ڟhӞڄL{-d*e0yɵmG-I@B+zCAy1qϲiUTHn12 Wo=]/iKf'uvs%bϵ˭̴L%tEXtdate:create2018-03-12T05:44:18+00:00;2?T%tEXtdate:modify2018-03-12T05:44:18+00:00JotEXtSoftwareAdobe ImageReadyqe<IENDB`forecast/man/forecast.ets.Rd0000644000176200001440000000652414150370574015537 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/etsforecast.R \name{forecast.ets} \alias{forecast.ets} \title{Forecasting using ETS models} \usage{ \method{forecast}{ets}( object, h = ifelse(object$m > 1, 2 * object$m, 10), level = c(80, 95), fan = FALSE, simulate = FALSE, bootstrap = FALSE, npaths = 5000, PI = TRUE, lambda = object$lambda, biasadj = NULL, ... ) } \arguments{ \item{object}{An object of class "\code{ets}". Usually the result of a call to \code{\link{ets}}.} \item{h}{Number of periods for forecasting} \item{level}{Confidence level for prediction intervals.} \item{fan}{If TRUE, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{simulate}{If TRUE, prediction intervals are produced by simulation rather than using analytic formulae. Errors are assumed to be normally distributed.} \item{bootstrap}{If TRUE, then prediction intervals are produced by simulation using resampled errors (rather than normally distributed errors).} \item{npaths}{Number of sample paths used in computing simulated prediction intervals.} \item{PI}{If TRUE, prediction intervals are produced, otherwise only point forecasts are calculated. If \code{PI} is FALSE, then \code{level}, \code{fan}, \code{simulate}, \code{bootstrap} and \code{npaths} are all ignored.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{...}{Other arguments.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{forecast.ets}. An object of class \code{"forecast"} is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model. For models with additive errors, the residuals are x - fitted values. For models with multiplicative errors, the residuals are equal to x /(fitted values) - 1.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ Returns forecasts and other information for univariate ETS models. } \examples{ fit <- ets(USAccDeaths) plot(forecast(fit,h=48)) } \seealso{ \code{\link{ets}}, \code{\link{ses}}, \code{\link{holt}}, \code{\link{hw}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/CV.Rd0000644000176200001440000000135314150370574013442 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lm.R \name{CV} \alias{CV} \title{Cross-validation statistic} \usage{ CV(obj) } \arguments{ \item{obj}{output from \code{\link[stats]{lm}} or \code{\link{tslm}}} } \value{ Numerical vector containing CV, AIC, AICc, BIC and AdjR2 values. } \description{ Computes the leave-one-out cross-validation statistic (the mean of PRESS -- prediction residual sum of squares), AIC, corrected AIC, BIC and adjusted R^2 values for a linear model. } \examples{ y <- ts(rnorm(120,0,3) + 20*sin(2*pi*(1:120)/12), frequency=12) fit1 <- tslm(y ~ trend + season) fit2 <- tslm(y ~ season) CV(fit1) CV(fit2) } \seealso{ \code{\link[stats]{AIC}} } \author{ Rob J Hyndman } \keyword{models} forecast/man/forecast.baggedModel.Rd0000644000176200001440000000456614150370574017142 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/baggedModel.R \name{forecast.baggedModel} \alias{forecast.baggedModel} \title{Forecasting using a bagged model} \usage{ \method{forecast}{baggedModel}( object, h = ifelse(frequency(object$y) > 1, 2 * frequency(object$y), 10), ... ) } \arguments{ \item{object}{An object of class "\code{baggedModel}" resulting from a call to \code{\link{baggedModel}}.} \item{h}{Number of periods for forecasting.} \item{...}{Other arguments, passed on to the \code{\link{forecast}} function of the original method} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. An object of class "\code{forecast}" is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{xreg}{The external regressors used in fitting (if given).} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ Returns forecasts and other information for bagged models. } \details{ Intervals are calculated as min and max values over the point forecasts from the models in the ensemble. I.e., the intervals are not prediction intervals, but give an indication of how different the forecasts within the ensemble are. } \examples{ fit <- baggedModel(WWWusage) fcast <- forecast(fit) plot(fcast) \dontrun{ fit2 <- baggedModel(WWWusage, fn="auto.arima") fcast2 <- forecast(fit2) plot(fcast2) accuracy(fcast2)} } \references{ Bergmeir, C., R. J. Hyndman, and J. M. Benitez (2016). Bagging Exponential Smoothing Methods using STL Decomposition and Box-Cox Transformation. International Journal of Forecasting 32, 303-312. } \seealso{ \code{\link{baggedModel}}. } \author{ Christoph Bergmeir, Fotios Petropoulos } \keyword{ts} forecast/man/reexports.Rd0000644000176200001440000000114314207263356015164 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecast-package.R, R/ggplot.R \docType{import} \name{reexports} \alias{reexports} \alias{\%>\%} \alias{forecast} \alias{accuracy} \alias{autoplot} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{generics}{\code{\link[generics]{accuracy}}, \code{\link[generics]{forecast}}} \item{ggplot2}{\code{\link[ggplot2]{autoplot}}} \item{magrittr}{\code{\link[magrittr:pipe]{\%>\%}}} }} forecast/man/findfrequency.Rd0000644000176200001440000000225214207263356015775 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/findfrequency.R \name{findfrequency} \alias{findfrequency} \title{Find dominant frequency of a time series} \usage{ findfrequency(x) } \arguments{ \item{x}{a numeric vector or time series of class \code{ts}} } \value{ an integer value } \description{ \code{findfrequency} returns the period of the dominant frequency of a time series. For seasonal data, it will return the seasonal period. For cyclic data, it will return the average cycle length. } \details{ The dominant frequency is determined from a spectral analysis of the time series. First, a linear trend is removed, then the spectral density function is estimated from the best fitting autoregressive model (based on the AIC). If there is a large (possibly local) maximum in the spectral density function at frequency \eqn{f}, then the function will return the period \eqn{1/f} (rounded to the nearest integer). If no such dominant frequency can be found, the function will return 1. } \examples{ findfrequency(USAccDeaths) # Monthly data findfrequency(taylor) # Half-hourly data findfrequency(lynx) # Annual data } \author{ Rob J Hyndman } \keyword{ts} forecast/man/nnetar.Rd0000644000176200001440000001116114456202551014415 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nnetar.R \name{nnetar} \alias{nnetar} \alias{print.nnetar} \alias{print.nnetarmodels} \title{Neural Network Time Series Forecasts} \usage{ nnetar( y, p, P = 1, size, repeats = 20, xreg = NULL, lambda = NULL, model = NULL, subset = NULL, scale.inputs = TRUE, x = y, ... ) } \arguments{ \item{y}{A numeric vector or time series of class \code{ts}.} \item{p}{Embedding dimension for non-seasonal time series. Number of non-seasonal lags used as inputs. For non-seasonal time series, the default is the optimal number of lags (according to the AIC) for a linear AR(p) model. For seasonal time series, the same method is used but applied to seasonally adjusted data (from an stl decomposition). If set to zero to indicate that no non-seasonal lags should be included, then P must be at least 1 and a model with only seasonal lags will be fit.} \item{P}{Number of seasonal lags used as inputs.} \item{size}{Number of nodes in the hidden layer. Default is half of the number of input nodes (including external regressors, if given) plus 1.} \item{repeats}{Number of networks to fit with different random starting weights. These are then averaged when producing forecasts.} \item{xreg}{Optionally, a vector or matrix of external regressors, which must have the same number of rows as \code{y}. Must be numeric.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{model}{Output from a previous call to \code{nnetar}. If model is passed, this same model is fitted to \code{y} without re-estimating any parameters.} \item{subset}{Optional vector specifying a subset of observations to be used in the fit. Can be an integer index vector or a logical vector the same length as \code{y}. All observations are used by default.} \item{scale.inputs}{If TRUE, inputs are scaled by subtracting the column means and dividing by their respective standard deviations. If \code{lambda} is not \code{NULL}, scaling is applied after Box-Cox transformation.} \item{x}{Deprecated. Included for backwards compatibility.} \item{\dots}{Other arguments passed to \code{\link[nnet]{nnet}} for \code{nnetar}.} } \value{ Returns an object of class "\code{nnetar}". The function \code{summary} is used to obtain and print a summary of the results. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{nnetar}. \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{x}{The original time series.} \item{xreg}{The external regressors used in fitting (if given).} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} \item{...}{Other arguments} } \description{ Feed-forward neural networks with a single hidden layer and lagged inputs for forecasting univariate time series. } \details{ A feed-forward neural network is fitted with lagged values of \code{y} as inputs and a single hidden layer with \code{size} nodes. The inputs are for lags 1 to \code{p}, and lags \code{m} to \code{mP} where \code{m=frequency(y)}. If \code{xreg} is provided, its columns are also used as inputs. If there are missing values in \code{y} or \code{xreg}, the corresponding rows (and any others which depend on them as lags) are omitted from the fit. A total of \code{repeats} networks are fitted, each with random starting weights. These are then averaged when computing forecasts. The network is trained for one-step forecasting. Multi-step forecasts are computed recursively. For non-seasonal data, the fitted model is denoted as an NNAR(p,k) model, where k is the number of hidden nodes. This is analogous to an AR(p) model but with nonlinear functions. For seasonal data, the fitted model is called an NNAR(p,P,k)[m] model, which is analogous to an ARIMA(p,0,0)(P,0,0)[m] model but with nonlinear functions. } \examples{ fit <- nnetar(lynx) fcast <- forecast(fit) plot(fcast) ## Arguments can be passed to nnet() fit <- nnetar(lynx, decay=0.5, maxit=150) plot(forecast(fit)) lines(lynx) ## Fit model to first 100 years of lynx data fit <- nnetar(window(lynx,end=1920), decay=0.5, maxit=150) plot(forecast(fit,h=14)) lines(lynx) ## Apply fitted model to later data, including all optional arguments fit2 <- nnetar(window(lynx,start=1921), model=fit) } \author{ Rob J Hyndman and Gabriel Caceres } \keyword{ts} forecast/man/autoplot.acf.Rd0000644000176200001440000000553614150370574015540 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R \name{autoplot.acf} \alias{autoplot.acf} \alias{ggAcf} \alias{ggPacf} \alias{ggCcf} \alias{autoplot.mpacf} \alias{ggtaperedacf} \alias{ggtaperedpacf} \title{ggplot (Partial) Autocorrelation and Cross-Correlation Function Estimation and Plotting} \usage{ \method{autoplot}{acf}(object, ci = 0.95, ...) ggAcf( x, lag.max = NULL, type = c("correlation", "covariance", "partial"), plot = TRUE, na.action = na.contiguous, demean = TRUE, ... ) ggPacf( x, lag.max = NULL, plot = TRUE, na.action = na.contiguous, demean = TRUE, ... ) ggCcf( x, y, lag.max = NULL, type = c("correlation", "covariance"), plot = TRUE, na.action = na.contiguous, ... ) \method{autoplot}{mpacf}(object, ...) ggtaperedacf( x, lag.max = NULL, type = c("correlation", "partial"), plot = TRUE, calc.ci = TRUE, level = 95, nsim = 100, ... ) ggtaperedpacf(x, ...) } \arguments{ \item{object}{Object of class \dQuote{\code{acf}}.} \item{ci}{coverage probability for confidence interval. Plotting of the confidence interval is suppressed if ci is zero or negative.} \item{...}{Other plotting parameters to affect the plot.} \item{x}{a univariate or multivariate (not Ccf) numeric time series object or a numeric vector or matrix.} \item{lag.max}{maximum lag at which to calculate the acf.} \item{type}{character string giving the type of acf to be computed. Allowed values are "\code{correlation}" (the default), \dQuote{\code{covariance}} or \dQuote{\code{partial}}.} \item{plot}{logical. If \code{TRUE} (the default) the resulting ACF, PACF or CCF is plotted.} \item{na.action}{function to handle missing values. Default is \code{\link[stats]{na.contiguous}}. Useful alternatives are \code{\link[stats]{na.pass}} and \code{\link{na.interp}}.} \item{demean}{Should covariances be about the sample means?} \item{y}{a univariate numeric time series object or a numeric vector.} \item{calc.ci}{If \code{TRUE}, confidence intervals for the ACF/PACF estimates are calculated.} \item{level}{Percentage level used for the confidence intervals.} \item{nsim}{The number of bootstrap samples used in estimating the confidence intervals.} } \value{ A ggplot object. } \description{ Produces a ggplot object of their equivalent Acf, Pacf, Ccf, taperedacf and taperedpacf functions. } \details{ If \code{autoplot} is given an \code{acf} or \code{mpacf} object, then an appropriate ggplot object will be created. ggtaperedpacf } \examples{ library(ggplot2) ggAcf(wineind) wineind \%>\% Acf(plot=FALSE) \%>\% autoplot \dontrun{ wineind \%>\% taperedacf(plot=FALSE) \%>\% autoplot ggtaperedacf(wineind) ggtaperedpacf(wineind)} ggCcf(mdeaths, fdeaths) } \seealso{ \code{\link[stats]{plot.acf}}, \code{\link{Acf}}, \code{\link[stats]{acf}}, \code{\link{taperedacf}} } \author{ Mitchell O'Hara-Wild } forecast/man/is.constant.Rd0000644000176200001440000000046014150370574015373 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/newarima2.R \name{is.constant} \alias{is.constant} \title{Is an object constant?} \usage{ is.constant(x) } \arguments{ \item{x}{object to be tested} } \description{ Returns true if the object's numerical values do not vary. } forecast/man/naive.Rd0000644000176200001440000000777214341272370014244 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/naive.R \name{rwf} \alias{rwf} \alias{naive} \alias{print.naive} \alias{snaive} \title{Naive and Random Walk Forecasts} \usage{ rwf( y, h = 10, drift = FALSE, level = c(80, 95), fan = FALSE, lambda = NULL, biasadj = FALSE, ..., x = y ) naive( y, h = 10, level = c(80, 95), fan = FALSE, lambda = NULL, biasadj = FALSE, ..., x = y ) snaive( y, h = 2 * frequency(x), level = c(80, 95), fan = FALSE, lambda = NULL, biasadj = FALSE, ..., x = y ) } \arguments{ \item{y}{a numeric vector or time series of class \code{ts}} \item{h}{Number of periods for forecasting} \item{drift}{Logical flag. If TRUE, fits a random walk with drift model.} \item{level}{Confidence levels for prediction intervals.} \item{fan}{If TRUE, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{...}{Additional arguments affecting the forecasts produced. If \code{model=NULL}, \code{forecast.ts} passes these to \code{\link{ets}} or \code{\link{stlf}} depending on the frequency of the time series. If \code{model} is not \code{NULL}, the arguments are passed to the relevant modelling function.} \item{x}{Deprecated. Included for backwards compatibility.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{naive} or \code{snaive}. An object of class \code{"forecast"} is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ \code{rwf()} returns forecasts and prediction intervals for a random walk with drift model applied to \code{y}. This is equivalent to an ARIMA(0,1,0) model with an optional drift coefficient. \code{naive()} is simply a wrapper to \code{rwf()} for simplicity. \code{snaive()} returns forecasts and prediction intervals from an ARIMA(0,0,0)(0,1,0)m model where m is the seasonal period. } \details{ The random walk with drift model is \deqn{Y_t=c + Y_{t-1} + Z_t}{Y[t]=c + Y[t-1] + Z[t]} where \eqn{Z_t}{Z[t]} is a normal iid error. Forecasts are given by \deqn{Y_n(h)=ch+Y_n}{Y[n+h]=ch+Y[n]}. If there is no drift (as in \code{naive}), the drift parameter c=0. Forecast standard errors allow for uncertainty in estimating the drift parameter (unlike the corresponding forecasts obtained by fitting an ARIMA model directly). The seasonal naive model is \deqn{Y_t= Y_{t-m} + Z_t}{Y[t]=Y[t-m] + Z[t]} where \eqn{Z_t}{Z[t]} is a normal iid error. } \examples{ gold.fcast <- rwf(gold[1:60], h=50) plot(gold.fcast) plot(naive(gold,h=50),include=200) plot(snaive(wineind)) } \seealso{ \code{\link{Arima}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/monthdays.Rd0000644000176200001440000000134614150370574015142 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/season.R \name{monthdays} \alias{monthdays} \title{Number of days in each season} \usage{ monthdays(x) } \arguments{ \item{x}{time series} } \value{ Time series } \description{ Returns number of days in each month or quarter of the observed time period. } \details{ Useful for month length adjustments } \examples{ par(mfrow=c(2,1)) plot(ldeaths,xlab="Year",ylab="pounds", main="Monthly deaths from lung disease (UK)") ldeaths.adj <- ldeaths/monthdays(ldeaths)*365.25/12 plot(ldeaths.adj,xlab="Year",ylab="pounds", main="Adjusted monthly deaths from lung disease (UK)") } \seealso{ \code{\link[forecast]{bizdays}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/gas.Rd0000644000176200001440000000060414150370574013702 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{gas} \alias{gas} \title{Australian monthly gas production} \format{ Time series data } \source{ Australian Bureau of Statistics. } \usage{ gas } \description{ Australian monthly gas production: 1956--1995. } \examples{ plot(gas) seasonplot(gas) tsdisplay(gas) } \keyword{datasets} forecast/man/tslm.Rd0000644000176200001440000000433114150370574014110 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lm.R \name{tslm} \alias{tslm} \title{Fit a linear model with time series components} \usage{ tslm(formula, data, subset, lambda = NULL, biasadj = FALSE, ...) } \arguments{ \item{formula}{an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted.} \item{data}{an optional data frame, list or environment (or object coercible by as.data.frame to a data frame) containing the variables in the model. If not found in data, the variables are taken from environment(formula), typically the environment from which lm is called.} \item{subset}{an optional subset containing rows of data to keep. For best results, pass a logical vector of rows to keep. Also supports \code{\link[base]{subset}()} functions.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{...}{Other arguments passed to \code{\link[stats]{lm}()}} } \value{ Returns an object of class "lm". } \description{ \code{tslm} is used to fit linear models to time series including trend and seasonality components. } \details{ \code{tslm} is largely a wrapper for \code{\link[stats]{lm}()} except that it allows variables "trend" and "season" which are created on the fly from the time series characteristics of the data. The variable "trend" is a simple time trend and "season" is a factor indicating the season (e.g., the month or the quarter depending on the frequency of the data). } \examples{ y <- ts(rnorm(120,0,3) + 1:120 + 20*sin(2*pi*(1:120)/12), frequency=12) fit <- tslm(y ~ trend + season) plot(forecast(fit, h=20)) } \seealso{ \code{\link{forecast.lm}}, \code{\link[stats]{lm}}. } \author{ Mitchell O'Hara-Wild and Rob J Hyndman } \keyword{stats} forecast/man/forecast.mlm.Rd0000644000176200001440000000710014150370574015520 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mforecast.R \name{forecast.mlm} \alias{forecast.mlm} \title{Forecast a multiple linear model with possible time series components} \usage{ \method{forecast}{mlm}( object, newdata, h = 10, level = c(80, 95), fan = FALSE, lambda = object$lambda, biasadj = NULL, ts = TRUE, ... ) } \arguments{ \item{object}{Object of class "mlm", usually the result of a call to \code{\link[stats]{lm}} or \code{\link{tslm}}.} \item{newdata}{An optional data frame in which to look for variables with which to predict. If omitted, it is assumed that the only variables are trend and season, and \code{h} forecasts are produced.} \item{h}{Number of periods for forecasting. Ignored if \code{newdata} present.} \item{level}{Confidence level for prediction intervals.} \item{fan}{If \code{TRUE}, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{ts}{If \code{TRUE}, the forecasts will be treated as time series provided the original data is a time series; the \code{newdata} will be interpreted as related to the subsequent time periods. If \code{FALSE}, any time series attributes of the original data will be ignored.} \item{...}{Other arguments passed to \code{\link[forecast]{forecast.lm}()}.} } \value{ An object of class "\code{mforecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{forecast.lm}. An object of class \code{"mforecast"} is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a multivariate time series} \item{lower}{Lower limits for prediction intervals of each series} \item{upper}{Upper limits for prediction intervals of each series} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The historical data for the response variable.} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values} } \description{ \code{forecast.mlm} is used to predict multiple linear models, especially those involving trend and seasonality components. } \details{ \code{forecast.mlm} is largely a wrapper for \code{\link[forecast]{forecast.lm}()} except that it allows forecasts to be generated on multiple series. Also, the output is reformatted into a \code{mforecast} object. } \examples{ lungDeaths <- cbind(mdeaths, fdeaths) fit <- tslm(lungDeaths ~ trend + season) fcast <- forecast(fit, h=10) carPower <- as.matrix(mtcars[,c("qsec","hp")]) carmpg <- mtcars[,"mpg"] fit <- lm(carPower ~ carmpg) fcast <- forecast(fit, newdata=data.frame(carmpg=30)) } \seealso{ \code{\link{tslm}}, \code{\link{forecast.lm}}, \code{\link[stats]{lm}}. } \author{ Mitchell O'Hara-Wild } forecast/man/bld.mbb.bootstrap.Rd0000644000176200001440000000234514150370574016450 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bootstrap.R \name{bld.mbb.bootstrap} \alias{bld.mbb.bootstrap} \title{Box-Cox and Loess-based decomposition bootstrap.} \usage{ bld.mbb.bootstrap(x, num, block_size = NULL) } \arguments{ \item{x}{Original time series.} \item{num}{Number of bootstrapped versions to generate.} \item{block_size}{Block size for the moving block bootstrap.} } \value{ A list with bootstrapped versions of the series. The first series in the list is the original series. } \description{ Generates bootstrapped versions of a time series using the Box-Cox and Loess-based decomposition bootstrap. } \details{ The procedure is described in Bergmeir et al. Box-Cox decomposition is applied, together with STL or Loess (for non-seasonal time series), and the remainder is bootstrapped using a moving block bootstrap. } \examples{ bootstrapped_series <- bld.mbb.bootstrap(WWWusage, 100) } \references{ Bergmeir, C., R. J. Hyndman, and J. M. Benitez (2016). Bagging Exponential Smoothing Methods using STL Decomposition and Box-Cox Transformation. International Journal of Forecasting 32, 303-312. } \seealso{ \code{\link{baggedETS}}. } \author{ Christoph Bergmeir, Fotios Petropoulos } \keyword{ts} forecast/man/autoplot.seas.Rd0000644000176200001440000000350714150370574015736 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R, R/mstl.R \name{autoplot.decomposed.ts} \alias{autoplot.decomposed.ts} \alias{autoplot.stl} \alias{autoplot.StructTS} \alias{autoplot.seas} \alias{autoplot.mstl} \title{Plot time series decomposition components using ggplot} \usage{ \method{autoplot}{decomposed.ts}(object, labels = NULL, range.bars = NULL, ...) \method{autoplot}{stl}(object, labels = NULL, range.bars = TRUE, ...) \method{autoplot}{StructTS}(object, labels = NULL, range.bars = TRUE, ...) \method{autoplot}{seas}(object, labels = NULL, range.bars = NULL, ...) \method{autoplot}{mstl}(object, ...) } \arguments{ \item{object}{Object of class \dQuote{\code{seas}}, \dQuote{\code{stl}}, or \dQuote{\code{decomposed.ts}}.} \item{labels}{Labels to replace \dQuote{seasonal}, \dQuote{trend}, and \dQuote{remainder}.} \item{range.bars}{Logical indicating if each plot should have a bar at its right side representing relative size. If \code{NULL}, automatic selection takes place.} \item{...}{Other plotting parameters to affect the plot.} } \value{ Returns an object of class \code{ggplot}. } \description{ Produces a ggplot object of seasonally decomposed time series for objects of class \dQuote{\code{stl}} (created with \code{\link[stats]{stl}}), class \dQuote{\code{seas}} (created with \code{\link[seasonal]{seas}}), or class \dQuote{\code{decomposed.ts}} (created with \code{\link[stats]{decompose}}). } \examples{ library(ggplot2) co2 \%>\% decompose() \%>\% autoplot() nottem \%>\% stl(s.window = "periodic") \%>\% autoplot() \dontrun{ library(seasonal) seas(USAccDeaths) \%>\% autoplot() } } \seealso{ \code{\link[seasonal]{seas}}, \code{\link[stats]{stl}}, \code{\link[stats]{decompose}}, \code{\link[stats]{StructTS}}, \code{\link[stats]{plot.stl}}. } \author{ Mitchell O'Hara-Wild } forecast/man/dshw.Rd0000644000176200001440000001004214150370574014072 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dshw.r \name{dshw} \alias{dshw} \title{Double-Seasonal Holt-Winters Forecasting} \usage{ dshw( y, period1 = NULL, period2 = NULL, h = 2 * max(period1, period2), alpha = NULL, beta = NULL, gamma = NULL, omega = NULL, phi = NULL, lambda = NULL, biasadj = FALSE, armethod = TRUE, model = NULL ) } \arguments{ \item{y}{Either an \code{\link{msts}} object with two seasonal periods or a numeric vector.} \item{period1}{Period of the shorter seasonal period. Only used if \code{y} is not an \code{\link{msts}} object.} \item{period2}{Period of the longer seasonal period. Only used if \code{y} is not an \code{\link{msts}} object.} \item{h}{Number of periods for forecasting.} \item{alpha}{Smoothing parameter for the level. If \code{NULL}, the parameter is estimated using least squares.} \item{beta}{Smoothing parameter for the slope. If \code{NULL}, the parameter is estimated using least squares.} \item{gamma}{Smoothing parameter for the first seasonal period. If \code{NULL}, the parameter is estimated using least squares.} \item{omega}{Smoothing parameter for the second seasonal period. If \code{NULL}, the parameter is estimated using least squares.} \item{phi}{Autoregressive parameter. If \code{NULL}, the parameter is estimated using least squares.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{armethod}{If TRUE, the forecasts are adjusted using an AR(1) model for the errors.} \item{model}{If it's specified, an existing model is applied to a new data set.} } \value{ An object of class "\code{forecast}" which is a list that includes the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{x}{The original time series.} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{dshw}. } \description{ Returns forecasts using Taylor's (2003) Double-Seasonal Holt-Winters method. } \details{ Taylor's (2003) double-seasonal Holt-Winters method uses additive trend and multiplicative seasonality, where there are two seasonal components which are multiplied together. For example, with a series of half-hourly data, one would set \code{period1=48} for the daily period and \code{period2=336} for the weekly period. The smoothing parameter notation used here is different from that in Taylor (2003); instead it matches that used in Hyndman et al (2008) and that used for the \code{\link{ets}} function. } \examples{ \dontrun{ fcast <- dshw(taylor) plot(fcast) t <- seq(0,5,by=1/20) x <- exp(sin(2*pi*t) + cos(2*pi*t*4) + rnorm(length(t),0,.1)) fit <- dshw(x,20,5) plot(fit) } } \references{ Taylor, J.W. (2003) Short-term electricity demand forecasting using double seasonal exponential smoothing. \emph{Journal of the Operational Research Society}, \bold{54}, 799-805. Hyndman, R.J., Koehler, A.B., Ord, J.K., and Snyder, R.D. (2008) \emph{Forecasting with exponential smoothing: the state space approach}, Springer-Verlag. \url{http://www.exponentialsmoothing.net}. } \seealso{ \code{\link[stats]{HoltWinters}}, \code{\link{ets}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/Arima.Rd0000644000176200001440000001067214150370574014167 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/arima.R \name{Arima} \alias{Arima} \alias{print.ARIMA} \alias{summary.Arima} \alias{as.character.Arima} \title{Fit ARIMA model to univariate time series} \usage{ Arima( y, order = c(0, 0, 0), seasonal = c(0, 0, 0), xreg = NULL, include.mean = TRUE, include.drift = FALSE, include.constant, lambda = model$lambda, biasadj = FALSE, method = c("CSS-ML", "ML", "CSS"), model = NULL, x = y, ... ) } \arguments{ \item{y}{a univariate time series of class \code{ts}.} \item{order}{A specification of the non-seasonal part of the ARIMA model: the three components (p, d, q) are the AR order, the degree of differencing, and the MA order.} \item{seasonal}{A specification of the seasonal part of the ARIMA model, plus the period (which defaults to frequency(y)). This should be a list with components order and period, but a specification of just a numeric vector of length 3 will be turned into a suitable list with the specification as the order.} \item{xreg}{Optionally, a numerical vector or matrix of external regressors, which must have the same number of rows as y. It should not be a data frame.} \item{include.mean}{Should the ARIMA model include a mean term? The default is \code{TRUE} for undifferenced series, \code{FALSE} for differenced ones (where a mean would not affect the fit nor predictions).} \item{include.drift}{Should the ARIMA model include a linear drift term? (i.e., a linear regression with ARIMA errors is fitted.) The default is \code{FALSE}.} \item{include.constant}{If \code{TRUE}, then \code{include.mean} is set to be \code{TRUE} for undifferenced series and \code{include.drift} is set to be \code{TRUE} for differenced series. Note that if there is more than one difference taken, no constant is included regardless of the value of this argument. This is deliberate as otherwise quadratic and higher order polynomial trends would be induced.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{method}{Fitting method: maximum likelihood or minimize conditional sum-of-squares. The default (unless there are missing values) is to use conditional-sum-of-squares to find starting values, then maximum likelihood.} \item{model}{Output from a previous call to \code{Arima}. If model is passed, this same model is fitted to \code{y} without re-estimating any parameters.} \item{x}{Deprecated. Included for backwards compatibility.} \item{...}{Additional arguments to be passed to \code{\link[stats]{arima}}.} } \value{ See the \code{\link[stats]{arima}} function in the stats package. The additional objects returned are \item{x}{The time series data} \item{xreg}{The regressors used in fitting (when relevant).} \item{sigma2}{The bias adjusted MLE of the innovations variance.} } \description{ Largely a wrapper for the \code{\link[stats]{arima}} function in the stats package. The main difference is that this function allows a drift term. It is also possible to take an ARIMA model from a previous call to \code{Arima} and re-apply it to the data \code{y}. } \details{ See the \code{\link[stats]{arima}} function in the stats package. } \examples{ library(ggplot2) WWWusage \%>\% Arima(order=c(3,1,0)) \%>\% forecast(h=20) \%>\% autoplot # Fit model to first few years of AirPassengers data air.model <- Arima(window(AirPassengers,end=1956+11/12),order=c(0,1,1), seasonal=list(order=c(0,1,1),period=12),lambda=0) plot(forecast(air.model,h=48)) lines(AirPassengers) # Apply fitted model to later data air.model2 <- Arima(window(AirPassengers,start=1957),model=air.model) # Forecast accuracy measures on the log scale. # in-sample one-step forecasts. accuracy(air.model) # out-of-sample one-step forecasts. accuracy(air.model2) # out-of-sample multi-step forecasts accuracy(forecast(air.model,h=48,lambda=NULL), log(window(AirPassengers,start=1957))) } \seealso{ \code{\link{auto.arima}}, \code{\link{forecast.Arima}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/plot.forecast.Rd0000644000176200001440000000704014150370574015714 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecast.R, R/ggplot.R, R/spline.R \name{plot.forecast} \alias{plot.forecast} \alias{autoplot.forecast} \alias{autoplot.splineforecast} \alias{autolayer.forecast} \alias{plot.splineforecast} \title{Forecast plot} \usage{ \method{plot}{forecast}( x, include, PI = TRUE, showgap = TRUE, shaded = TRUE, shadebars = (length(x$mean) < 5), shadecols = NULL, col = 1, fcol = 4, pi.col = 1, pi.lty = 2, ylim = NULL, main = NULL, xlab = "", ylab = "", type = "l", flty = 1, flwd = 2, ... ) \method{autoplot}{forecast}( object, include, PI = TRUE, shadecols = c("#596DD5", "#D5DBFF"), fcol = "#0000AA", flwd = 0.5, ... ) \method{autoplot}{splineforecast}(object, PI = TRUE, ...) \method{autolayer}{forecast}(object, series = NULL, PI = TRUE, showgap = TRUE, ...) \method{plot}{splineforecast}(x, fitcol = 2, type = "o", pch = 19, ...) } \arguments{ \item{x}{Forecast object produced by \code{\link{forecast}}.} \item{include}{number of values from time series to include in plot. Default is all values.} \item{PI}{Logical flag indicating whether to plot prediction intervals.} \item{showgap}{If \code{showgap=FALSE}, the gap between the historical observations and the forecasts is removed.} \item{shaded}{Logical flag indicating whether prediction intervals should be shaded (\code{TRUE}) or lines (\code{FALSE})} \item{shadebars}{Logical flag indicating if prediction intervals should be plotted as shaded bars (if \code{TRUE}) or a shaded polygon (if \code{FALSE}). Ignored if \code{shaded=FALSE}. Bars are plotted by default if there are fewer than five forecast horizons.} \item{shadecols}{Colors for shaded prediction intervals. To get default colors used prior to v3.26, set \code{shadecols="oldstyle"}.} \item{col}{Colour for the data line.} \item{fcol}{Colour for the forecast line.} \item{pi.col}{If \code{shaded=FALSE} and \code{PI=TRUE}, the prediction intervals are plotted in this colour.} \item{pi.lty}{If \code{shaded=FALSE} and \code{PI=TRUE}, the prediction intervals are plotted using this line type.} \item{ylim}{Limits on y-axis.} \item{main}{Main title.} \item{xlab}{X-axis label.} \item{ylab}{Y-axis label.} \item{type}{1-character string giving the type of plot desired. As for \code{\link[graphics]{plot.default}}.} \item{flty}{Line type for the forecast line.} \item{flwd}{Line width for the forecast line.} \item{...}{Other plotting parameters to affect the plot.} \item{object}{Forecast object produced by \code{\link{forecast}}. Used for ggplot graphics (S3 method consistency).} \item{series}{Matches an unidentified forecast layer with a coloured object on the plot.} \item{fitcol}{Line colour for fitted values.} \item{pch}{Plotting character (if \code{type=="p"} or \code{type=="o"}).} } \value{ None. } \description{ Plots historical data with forecasts and prediction intervals. } \details{ \code{autoplot} will produce a ggplot object. plot.splineforecast autoplot.splineforecast } \examples{ library(ggplot2) wine.fit <- hw(wineind,h=48) plot(wine.fit) autoplot(wine.fit) fit <- tslm(wineind ~ fourier(wineind,4)) fcast <- forecast(fit, newdata=data.frame(fourier(wineind,4,20))) autoplot(fcast) fcast <- splinef(airmiles,h=5) plot(fcast) autoplot(fcast) } \references{ Hyndman and Athanasopoulos (2018) \emph{Forecasting: principles and practice}, 2nd edition, OTexts: Melbourne, Australia. \url{https://otexts.com/fpp2/} } \seealso{ \code{\link[stats]{plot.ts}} } \author{ Rob J Hyndman & Mitchell O'Hara-Wild } \keyword{ts} forecast/man/arfima.Rd0000644000176200001440000000636214150370574014376 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/arfima.R \name{arfima} \alias{arfima} \title{Fit a fractionally differenced ARFIMA model} \usage{ arfima( y, drange = c(0, 0.5), estim = c("mle", "ls"), model = NULL, lambda = NULL, biasadj = FALSE, x = y, ... ) } \arguments{ \item{y}{a univariate time series (numeric vector).} \item{drange}{Allowable values of d to be considered. Default of \code{c(0,0.5)} ensures a stationary model is returned.} \item{estim}{If \code{estim=="ls"}, then the ARMA parameters are calculated using the Haslett-Raftery algorithm. If \code{estim=="mle"}, then the ARMA parameters are calculated using full MLE via the \code{\link[stats]{arima}} function.} \item{model}{Output from a previous call to \code{arfima}. If model is passed, this same model is fitted to y without re-estimating any parameters.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{x}{Deprecated. Included for backwards compatibility.} \item{\dots}{Other arguments passed to \code{\link{auto.arima}} when selecting p and q.} } \value{ A list object of S3 class \code{"fracdiff"}, which is described in the \code{\link[fracdiff]{fracdiff}} documentation. A few additional objects are added to the list including \code{x} (the original time series), and the \code{residuals} and \code{fitted} values. } \description{ An ARFIMA(p,d,q) model is selected and estimated automatically using the Hyndman-Khandakar (2008) algorithm to select p and q and the Haslett and Raftery (1989) algorithm to estimate the parameters including d. } \details{ This function combines \code{\link[fracdiff]{fracdiff}} and \code{\link{auto.arima}} to automatically select and estimate an ARFIMA model. The fractional differencing parameter is chosen first assuming an ARFIMA(2,d,0) model. Then the data are fractionally differenced using the estimated d and an ARMA model is selected for the resulting time series using \code{\link{auto.arima}}. Finally, the full ARFIMA(p,d,q) model is re-estimated using \code{\link[fracdiff]{fracdiff}}. If \code{estim=="mle"}, the ARMA coefficients are refined using \code{\link[stats]{arima}}. } \examples{ library(fracdiff) x <- fracdiff.sim( 100, ma=-.4, d=.3)$series fit <- arfima(x) tsdisplay(residuals(fit)) } \references{ J. Haslett and A. E. Raftery (1989) Space-time Modelling with Long-memory Dependence: Assessing Ireland's Wind Power Resource (with discussion); \emph{Applied Statistics} \bold{38}, 1-50. Hyndman, R.J. and Khandakar, Y. (2008) "Automatic time series forecasting: The forecast package for R", \emph{Journal of Statistical Software}, \bold{26}(3). } \seealso{ \code{\link[fracdiff]{fracdiff}}, \code{\link{auto.arima}}, \code{\link{forecast.fracdiff}}. } \author{ Rob J Hyndman and Farah Yasmeen } \keyword{ts} forecast/man/forecast-package.Rd0000644000176200001440000000371014341272370016325 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecast-package.R \docType{package} \name{forecast-package} \alias{forecast-package} \alias{_PACKAGE} \title{forecast: Forecasting Functions for Time Series and Linear Models} \description{ \if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}} Methods and tools for displaying and analysing univariate time series forecasts including exponential smoothing via state space models and automatic ARIMA modelling. } \seealso{ Useful links: \itemize{ \item \url{https://pkg.robjhyndman.com/forecast/} \item \url{https://github.com/robjhyndman/forecast} \item Report bugs at \url{https://github.com/robjhyndman/forecast/issues} } } \author{ \strong{Maintainer}: Rob Hyndman \email{Rob.Hyndman@monash.edu} (\href{https://orcid.org/0000-0002-2140-5352}{ORCID}) [copyright holder] Authors: \itemize{ \item George Athanasopoulos (\href{https://orcid.org/0000-0002-5389-2802}{ORCID}) \item Christoph Bergmeir (\href{https://orcid.org/0000-0002-3665-9021}{ORCID}) \item Gabriel Caceres (\href{https://orcid.org/0000-0002-2947-2023}{ORCID}) \item Leanne Chhay \item Kirill Kuroptev \item Mitchell O'Hara-Wild (\href{https://orcid.org/0000-0001-6729-7695}{ORCID}) \item Fotios Petropoulos (\href{https://orcid.org/0000-0003-3039-4955}{ORCID}) \item Slava Razbash \item Earo Wang (\href{https://orcid.org/0000-0001-6448-5260}{ORCID}) \item Farah Yasmeen (\href{https://orcid.org/0000-0002-1479-5401}{ORCID}) } Other contributors: \itemize{ \item Federico Garza [contributor] \item Daniele Girolimetto [contributor] \item Ross Ihaka [contributor, copyright holder] \item R Core Team [contributor, copyright holder] \item Daniel Reid [contributor] \item David Shaub [contributor] \item Yuan Tang (\href{https://orcid.org/0000-0001-5243-233X}{ORCID}) [contributor] \item Xiaoqian Wang [contributor] \item Zhenyu Zhou [contributor] } } \keyword{package} forecast/man/meanf.Rd0000644000176200001440000000552114150370574014221 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecast2.R \name{meanf} \alias{meanf} \title{Mean Forecast} \usage{ meanf( y, h = 10, level = c(80, 95), fan = FALSE, lambda = NULL, biasadj = FALSE, bootstrap = FALSE, npaths = 5000, x = y ) } \arguments{ \item{y}{a numeric vector or time series of class \code{ts}} \item{h}{Number of periods for forecasting} \item{level}{Confidence levels for prediction intervals.} \item{fan}{If TRUE, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{bootstrap}{If TRUE, use a bootstrap method to compute prediction intervals. Otherwise, assume a normal distribution.} \item{npaths}{Number of bootstrapped sample paths to use if \code{bootstrap==TRUE}.} \item{x}{Deprecated. Included for backwards compatibility.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{meanf}. An object of class \code{"forecast"} is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ Returns forecasts and prediction intervals for an iid model applied to y. } \details{ The iid model is \deqn{Y_t=\mu + Z_t}{Y[t]=mu + Z[t]} where \eqn{Z_t}{Z[t]} is a normal iid error. Forecasts are given by \deqn{Y_n(h)=\mu}{Y[n+h]=mu} where \eqn{\mu}{mu} is estimated by the sample mean. } \examples{ nile.fcast <- meanf(Nile, h=10) plot(nile.fcast) } \seealso{ \code{\link{rwf}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/nsdiffs.Rd0000644000176200001440000000527514150370574014575 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unitRoot.R \name{nsdiffs} \alias{nsdiffs} \title{Number of differences required for a seasonally stationary series} \usage{ nsdiffs( x, alpha = 0.05, m = frequency(x), test = c("seas", "ocsb", "hegy", "ch"), max.D = 1, ... ) } \arguments{ \item{x}{A univariate time series} \item{alpha}{Level of the test, possible values range from 0.01 to 0.1.} \item{m}{Deprecated. Length of seasonal period} \item{test}{Type of unit root test to use} \item{max.D}{Maximum number of seasonal differences allowed} \item{...}{Additional arguments to be passed on to the unit root test} } \value{ An integer indicating the number of differences required for stationarity. } \description{ Functions to estimate the number of differences required to make a given time series stationary. \code{nsdiffs} estimates the number of seasonal differences necessary. } \details{ \code{nsdiffs} uses seasonal unit root tests to determine the number of seasonal differences required for time series \code{x} to be made stationary (possibly with some lag-one differencing as well). Several different tests are available: \itemize{ \item If \code{test="seas"} (default), a measure of seasonal strength is used, where differencing is selected if the seasonal strength (Wang, Smith & Hyndman, 2006) exceeds 0.64 (based on minimizing MASE when forecasting using auto.arima on M3 and M4 data). \item If \code{test="ch"}, the Canova-Hansen (1995) test is used (with null hypothesis of deterministic seasonality) \item If \code{test="hegy"}, the Hylleberg, Engle, Granger & Yoo (1990) test is used. \item If \code{test="ocsb"}, the Osborn-Chui-Smith-Birchenhall (1988) test is used (with null hypothesis that a seasonal unit root exists). } } \examples{ nsdiffs(AirPassengers) } \references{ Wang, X, Smith, KA, Hyndman, RJ (2006) "Characteristic-based clustering for time series data", \emph{Data Mining and Knowledge Discovery}, \bold{13}(3), 335-364. Osborn DR, Chui APL, Smith J, and Birchenhall CR (1988) "Seasonality and the order of integration for consumption", \emph{Oxford Bulletin of Economics and Statistics} \bold{50}(4):361-377. Canova F and Hansen BE (1995) "Are Seasonal Patterns Constant over Time? A Test for Seasonal Stability", \emph{Journal of Business and Economic Statistics} \bold{13}(3):237-252. Hylleberg S, Engle R, Granger C and Yoo B (1990) "Seasonal integration and cointegration.", \emph{Journal of Econometrics} \bold{44}(1), pp. 215-238. } \seealso{ \code{\link{auto.arima}}, \code{\link{ndiffs}}, \code{\link{ocsb.test}}, \code{\link[uroot]{hegy.test}}, and \code{\link[uroot]{ch.test}} } \author{ Rob J Hyndman, Slava Razbash and Mitchell O'Hara-Wild } forecast/man/BoxCox.Rd0000644000176200001440000000364414341272370014336 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/forecast2.R \name{BoxCox} \alias{BoxCox} \alias{InvBoxCox} \title{Box Cox Transformation} \usage{ BoxCox(x, lambda) InvBoxCox(x, lambda, biasadj = FALSE, fvar = NULL) } \arguments{ \item{x}{a numeric vector or time series of class \code{ts}.} \item{lambda}{transformation parameter. If \code{lambda = "auto"}, then the transformation parameter lambda is chosen using BoxCox.lambda (with a lower bound of -0.9)} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{fvar}{Optional parameter required if biasadj=TRUE. Can either be the forecast variance, or a list containing the interval \code{level}, and the corresponding \code{upper} and \code{lower} intervals.} } \value{ a numeric vector of the same length as x. } \description{ BoxCox() returns a transformation of the input variable using a Box-Cox transformation. InvBoxCox() reverses the transformation. } \details{ The Box-Cox transformation (as given by Bickel & Doksum 1981) is given by \deqn{f_\lambda(x) =(sign(x)|x|^\lambda - 1)/\lambda}{f(x;lambda)=(sign(x)|x|^lambda - 1)/lambda} if \eqn{\lambda\ne0}{lambda is not equal to 0}. For \eqn{\lambda=0}{lambda=0}, \deqn{f_0(x)=\log(x)}{f(x;0)=log(x)}. } \examples{ lambda <- BoxCox.lambda(lynx) lynx.fit <- ar(BoxCox(lynx,lambda)) plot(forecast(lynx.fit,h=20,lambda=lambda)) } \references{ Box, G. E. P. and Cox, D. R. (1964) An analysis of transformations. \emph{JRSS B} \bold{26} 211--246. Bickel, P. J. and Doksum K. A. (1981) An Analysis of Transformations Revisited. \emph{JASA} \bold{76} 296-311. } \seealso{ \code{\link{BoxCox.lambda}} } \author{ Rob J Hyndman & Mitchell O'Hara-Wild } \keyword{ts} forecast/man/tbats.Rd0000644000176200001440000000740514150370574014253 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tbats.R \name{tbats} \alias{tbats} \alias{as.character.tbats} \alias{print.tbats} \title{TBATS model (Exponential smoothing state space model with Box-Cox transformation, ARMA errors, Trend and Seasonal components)} \usage{ tbats( y, use.box.cox = NULL, use.trend = NULL, use.damped.trend = NULL, seasonal.periods = NULL, use.arma.errors = TRUE, use.parallel = length(y) > 1000, num.cores = 2, bc.lower = 0, bc.upper = 1, biasadj = FALSE, model = NULL, ... ) } \arguments{ \item{y}{The time series to be forecast. Can be \code{numeric}, \code{msts} or \code{ts}. Only univariate time series are supported.} \item{use.box.cox}{\code{TRUE/FALSE} indicates whether to use the Box-Cox transformation or not. If \code{NULL} then both are tried and the best fit is selected by AIC.} \item{use.trend}{\code{TRUE/FALSE} indicates whether to include a trend or not. If \code{NULL} then both are tried and the best fit is selected by AIC.} \item{use.damped.trend}{\code{TRUE/FALSE} indicates whether to include a damping parameter in the trend or not. If \code{NULL} then both are tried and the best fit is selected by AIC.} \item{seasonal.periods}{If \code{y} is \code{numeric} then seasonal periods can be specified with this parameter.} \item{use.arma.errors}{\code{TRUE/FALSE} indicates whether to include ARMA errors or not. If \code{TRUE} the best fit is selected by AIC. If \code{FALSE} then the selection algorithm does not consider ARMA errors.} \item{use.parallel}{\code{TRUE/FALSE} indicates whether or not to use parallel processing.} \item{num.cores}{The number of parallel processes to be used if using parallel processing. If \code{NULL} then the number of logical cores is detected and all available cores are used.} \item{bc.lower}{The lower limit (inclusive) for the Box-Cox transformation.} \item{bc.upper}{The upper limit (inclusive) for the Box-Cox transformation.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If TRUE, point forecasts and fitted values are mean forecast. Otherwise, these points can be considered the median of the forecast densities.} \item{model}{Output from a previous call to \code{tbats}. If model is passed, this same model is fitted to \code{y} without re-estimating any parameters.} \item{...}{Additional arguments to be passed to \code{auto.arima} when choose an ARMA(p, q) model for the errors. (Note that xreg will be ignored, as will any arguments concerning seasonality and differencing, but arguments controlling the values of p and q will be used.)} } \value{ An object with class \code{c("tbats", "bats")}. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{bats} and associated functions. The fitted model is designated TBATS(omega, p,q, phi, ,...,) where omega is the Box-Cox parameter and phi is the damping parameter; the error is modelled as an ARMA(p,q) process and m1,...,mJ list the seasonal periods used in the model and k1,...,kJ are the corresponding number of Fourier terms used for each seasonality. } \description{ Fits a TBATS model applied to \code{y}, as described in De Livera, Hyndman & Snyder (2011). Parallel processing is used by default to speed up the computations. } \examples{ \dontrun{ fit <- tbats(USAccDeaths) plot(forecast(fit)) taylor.fit <- tbats(taylor) plot(forecast(taylor.fit))} } \references{ De Livera, A.M., Hyndman, R.J., & Snyder, R. D. (2011), Forecasting time series with complex seasonal patterns using exponential smoothing, \emph{Journal of the American Statistical Association}, \bold{106}(496), 1513-1527. } \seealso{ \code{\link{tbats.components}}. } \author{ Slava Razbash and Rob J Hyndman } \keyword{ts} forecast/man/sindexf.Rd0000644000176200001440000000164614150370574014577 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/season.R \name{sindexf} \alias{sindexf} \title{Forecast seasonal index} \usage{ sindexf(object, h) } \arguments{ \item{object}{Output from \code{\link[stats]{decompose}} or \link[stats]{stl}.} \item{h}{Number of periods ahead to forecast} } \value{ Time series } \description{ Returns vector containing the seasonal index for \code{h} future periods. If the seasonal index is non-periodic, it uses the last values of the index. } \examples{ uk.stl <- stl(UKDriverDeaths,"periodic") uk.sa <- seasadj(uk.stl) uk.fcast <- holt(uk.sa,36) seasf <- sindexf(uk.stl,36) uk.fcast$mean <- uk.fcast$mean + seasf uk.fcast$lower <- uk.fcast$lower + cbind(seasf,seasf) uk.fcast$upper <- uk.fcast$upper + cbind(seasf,seasf) uk.fcast$x <- UKDriverDeaths plot(uk.fcast,main="Forecasts from Holt's method with seasonal adjustment") } \author{ Rob J Hyndman } \keyword{ts} forecast/man/forecast.stl.Rd0000644000176200001440000001636514150370574015552 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mstl.R \name{forecast.stl} \alias{forecast.stl} \alias{stlm} \alias{forecast.stlm} \alias{stlf} \title{Forecasting using stl objects} \usage{ \method{forecast}{stl}( object, method = c("ets", "arima", "naive", "rwdrift"), etsmodel = "ZZN", forecastfunction = NULL, h = frequency(object$time.series) * 2, level = c(80, 95), fan = FALSE, lambda = NULL, biasadj = NULL, xreg = NULL, newxreg = NULL, allow.multiplicative.trend = FALSE, ... ) stlm( y, s.window = 7 + 4 * seq(6), robust = FALSE, method = c("ets", "arima"), modelfunction = NULL, model = NULL, etsmodel = "ZZN", lambda = NULL, biasadj = FALSE, xreg = NULL, allow.multiplicative.trend = FALSE, x = y, ... ) \method{forecast}{stlm}( object, h = 2 * object$m, level = c(80, 95), fan = FALSE, lambda = object$lambda, biasadj = NULL, newxreg = NULL, allow.multiplicative.trend = FALSE, ... ) stlf( y, h = frequency(x) * 2, s.window = 7 + 4 * seq(6), t.window = NULL, robust = FALSE, lambda = NULL, biasadj = FALSE, x = y, ... ) } \arguments{ \item{object}{An object of class \code{stl} or \code{stlm}. Usually the result of a call to \code{\link[stats]{stl}} or \code{stlm}.} \item{method}{Method to use for forecasting the seasonally adjusted series.} \item{etsmodel}{The ets model specification passed to \code{\link[forecast]{ets}}. By default it allows any non-seasonal model. If \code{method!="ets"}, this argument is ignored.} \item{forecastfunction}{An alternative way of specifying the function for forecasting the seasonally adjusted series. If \code{forecastfunction} is not \code{NULL}, then \code{method} is ignored. Otherwise \code{method} is used to specify the forecasting method to be used.} \item{h}{Number of periods for forecasting.} \item{level}{Confidence level for prediction intervals.} \item{fan}{If \code{TRUE}, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{xreg}{Historical regressors to be used in \code{\link[forecast]{auto.arima}()} when \code{method=="arima"}.} \item{newxreg}{Future regressors to be used in \code{\link[forecast]{forecast.Arima}()}.} \item{allow.multiplicative.trend}{If TRUE, then ETS models with multiplicative trends are allowed. Otherwise, only additive or no trend ETS models are permitted.} \item{...}{Other arguments passed to \code{forecast.stl}, \code{modelfunction} or \code{forecastfunction}.} \item{y}{A univariate numeric time series of class \code{ts}} \item{s.window}{Either the character string ``periodic'' or the span (in lags) of the loess window for seasonal extraction.} \item{robust}{If \code{TRUE}, robust fitting will used in the loess procedure within \code{\link[stats]{stl}}.} \item{modelfunction}{An alternative way of specifying the function for modelling the seasonally adjusted series. If \code{modelfunction} is not \code{NULL}, then \code{method} is ignored. Otherwise \code{method} is used to specify the time series model to be used.} \item{model}{Output from a previous call to \code{stlm}. If a \code{stlm} model is passed, this same model is fitted to y without re-estimating any parameters.} \item{x}{Deprecated. Included for backwards compatibility.} \item{t.window}{A number to control the smoothness of the trend. See \code{\link[stats]{stl}} for details.} } \value{ \code{stlm} returns an object of class \code{stlm}. The other functions return objects of class \code{forecast}. There are many methods for working with \code{\link{forecast}} objects including \code{summary} to obtain and print a summary of the results, while \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features. } \description{ Forecasts of STL objects are obtained by applying a non-seasonal forecasting method to the seasonally adjusted data and re-seasonalizing using the last year of the seasonal component. } \details{ \code{stlm} takes a time series \code{y}, applies an STL decomposition, and models the seasonally adjusted data using the model passed as \code{modelfunction} or specified using \code{method}. It returns an object that includes the original STL decomposition and a time series model fitted to the seasonally adjusted data. This object can be passed to the \code{forecast.stlm} for forecasting. \code{forecast.stlm} forecasts the seasonally adjusted data, then re-seasonalizes the results by adding back the last year of the estimated seasonal component. \code{stlf} combines \code{stlm} and \code{forecast.stlm}. It takes a \code{ts} argument, applies an STL decomposition, models the seasonally adjusted data, reseasonalizes, and returns the forecasts. However, it allows more general forecasting methods to be specified via \code{forecastfunction}. \code{forecast.stl} is similar to \code{stlf} except that it takes the STL decomposition as the first argument, instead of the time series. Note that the prediction intervals ignore the uncertainty associated with the seasonal component. They are computed using the prediction intervals from the seasonally adjusted series, which are then reseasonalized using the last year of the seasonal component. The uncertainty in the seasonal component is ignored. The time series model for the seasonally adjusted data can be specified in \code{stlm} using either \code{method} or \code{modelfunction}. The \code{method} argument provides a shorthand way of specifying \code{modelfunction} for a few special cases. More generally, \code{modelfunction} can be any function with first argument a \code{ts} object, that returns an object that can be passed to \code{\link{forecast}}. For example, \code{forecastfunction=ar} uses the \code{\link{ar}} function for modelling the seasonally adjusted series. The forecasting method for the seasonally adjusted data can be specified in \code{stlf} and \code{forecast.stl} using either \code{method} or \code{forecastfunction}. The \code{method} argument provides a shorthand way of specifying \code{forecastfunction} for a few special cases. More generally, \code{forecastfunction} can be any function with first argument a \code{ts} object, and other \code{h} and \code{level}, which returns an object of class \code{\link{forecast}}. For example, \code{forecastfunction=thetaf} uses the \code{\link{thetaf}} function for forecasting the seasonally adjusted series. } \examples{ tsmod <- stlm(USAccDeaths, modelfunction = ar) plot(forecast(tsmod, h = 36)) decomp <- stl(USAccDeaths, s.window = "periodic") plot(forecast(decomp)) plot(stlf(AirPassengers, lambda = 0)) } \seealso{ \code{\link[stats]{stl}}, \code{\link{forecast.ets}}, \code{\link{forecast.Arima}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/checkresiduals.Rd0000644000176200001440000000316214456202551016121 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/checkresiduals.R \name{checkresiduals} \alias{checkresiduals} \title{Check that residuals from a time series model look like white noise} \usage{ checkresiduals(object, lag, test, plot = TRUE, ...) } \arguments{ \item{object}{Either a time series model, a forecast object, or a time series (assumed to be residuals).} \item{lag}{Number of lags to use in the Ljung-Box or Breusch-Godfrey test. If missing, it is set to \code{min(10,n/5)} for non-seasonal data, and \code{min(2m, n/5)} for seasonal data, where \code{n} is the length of the series, and \code{m} is the seasonal period of the data. It is further constrained to be at least \code{df+3} where \code{df} is the degrees of freedom of the model. This ensures there are at least 3 degrees of freedom used in the chi-squared test.} \item{test}{Test to use for serial correlation. By default, if \code{object} is of class \code{lm}, then \code{test="BG"}. Otherwise, \code{test="LB"}. Setting \code{test=FALSE} will prevent the test results being printed.} \item{plot}{Logical. If \code{TRUE}, will produce the plot.} \item{...}{Other arguments are passed to \code{\link{ggtsdisplay}}.} } \value{ None } \description{ If \code{plot=TRUE}, produces a time plot of the residuals, the corresponding ACF, and a histogram. If \code{test} is not \code{FALSE}, the output from either a Ljung-Box test or Breusch-Godfrey test is printed. } \examples{ fit <- ets(WWWusage) checkresiduals(fit) } \seealso{ \code{\link{ggtsdisplay}}, \code{\link[stats]{Box.test}}, \code{\link[lmtest]{bgtest}} } \author{ Rob J Hyndman } forecast/man/thetaf.Rd0000644000176200001440000000546414150370574014414 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/theta.R \name{thetaf} \alias{thetaf} \title{Theta method forecast} \usage{ thetaf( y, h = ifelse(frequency(y) > 1, 2 * frequency(y), 10), level = c(80, 95), fan = FALSE, x = y ) } \arguments{ \item{y}{a numeric vector or time series of class \code{ts}} \item{h}{Number of periods for forecasting} \item{level}{Confidence levels for prediction intervals.} \item{fan}{If TRUE, level is set to seq(51,99,by=3). This is suitable for fan plots.} \item{x}{Deprecated. Included for backwards compatibility.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{rwf}. An object of class \code{"forecast"} is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ Returns forecasts and prediction intervals for a theta method forecast. } \details{ The theta method of Assimakopoulos and Nikolopoulos (2000) is equivalent to simple exponential smoothing with drift. This is demonstrated in Hyndman and Billah (2003). The series is tested for seasonality using the test outlined in A&N. If deemed seasonal, the series is seasonally adjusted using a classical multiplicative decomposition before applying the theta method. The resulting forecasts are then reseasonalized. Prediction intervals are computed using the underlying state space model. More general theta methods are available in the \code{\link[forecTheta]{forecTheta}} package. } \examples{ nile.fcast <- thetaf(Nile) plot(nile.fcast) } \references{ Assimakopoulos, V. and Nikolopoulos, K. (2000). The theta model: a decomposition approach to forecasting. \emph{International Journal of Forecasting} \bold{16}, 521-530. Hyndman, R.J., and Billah, B. (2003) Unmasking the Theta method. \emph{International J. Forecasting}, \bold{19}, 287-290. } \seealso{ \code{\link[stats]{arima}}, \code{\link{meanf}}, \code{\link{rwf}}, \code{\link{ses}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/fitted.Arima.Rd0000644000176200001440000000325514150370574015444 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/arfima.R, R/arima.R, R/bats.R, R/ets.R, % R/modelAR.R, R/nnetar.R, R/tbats.R \name{fitted.ARFIMA} \alias{fitted.ARFIMA} \alias{fitted.Arima} \alias{fitted.forecast_ARIMA} \alias{fitted.ar} \alias{fitted.bats} \alias{fitted.ets} \alias{fitted.modelAR} \alias{fitted.nnetar} \alias{fitted.tbats} \title{h-step in-sample forecasts for time series models.} \usage{ \method{fitted}{ARFIMA}(object, h = 1, ...) \method{fitted}{Arima}(object, h = 1, ...) \method{fitted}{ar}(object, ...) \method{fitted}{bats}(object, h = 1, ...) \method{fitted}{ets}(object, h = 1, ...) \method{fitted}{modelAR}(object, h = 1, ...) \method{fitted}{nnetar}(object, h = 1, ...) \method{fitted}{tbats}(object, h = 1, ...) } \arguments{ \item{object}{An object of class "\code{Arima}", "\code{bats}", "\code{tbats}", "\code{ets}" or "\code{nnetar}".} \item{h}{The number of steps to forecast ahead.} \item{...}{Other arguments.} } \value{ A time series of the h-step forecasts. } \description{ Returns h-step forecasts for the data used in fitting the model. } \examples{ fit <- ets(WWWusage) plot(WWWusage) lines(fitted(fit), col='red') lines(fitted(fit, h=2), col='green') lines(fitted(fit, h=3), col='blue') legend("topleft", legend=paste("h =",1:3), col=2:4, lty=1) } \seealso{ \code{\link{forecast.Arima}}, \code{\link{forecast.bats}}, \code{\link{forecast.tbats}}, \code{\link{forecast.ets}}, \code{\link{forecast.nnetar}}, \code{\link{residuals.Arima}}, \code{\link{residuals.bats}}, \code{\link{residuals.tbats}}, \code{\link{residuals.ets}}, \code{\link{residuals.nnetar}}. } \author{ Rob J Hyndman & Mitchell O'Hara-Wild } \keyword{ts} forecast/man/auto.arima.Rd0000644000176200001440000001462214150370574015175 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/newarima2.R \name{auto.arima} \alias{auto.arima} \title{Fit best ARIMA model to univariate time series} \usage{ auto.arima( y, d = NA, D = NA, max.p = 5, max.q = 5, max.P = 2, max.Q = 2, max.order = 5, max.d = 2, max.D = 1, start.p = 2, start.q = 2, start.P = 1, start.Q = 1, stationary = FALSE, seasonal = TRUE, ic = c("aicc", "aic", "bic"), stepwise = TRUE, nmodels = 94, trace = FALSE, approximation = (length(x) > 150 | frequency(x) > 12), method = NULL, truncate = NULL, xreg = NULL, test = c("kpss", "adf", "pp"), test.args = list(), seasonal.test = c("seas", "ocsb", "hegy", "ch"), seasonal.test.args = list(), allowdrift = TRUE, allowmean = TRUE, lambda = NULL, biasadj = FALSE, parallel = FALSE, num.cores = 2, x = y, ... ) } \arguments{ \item{y}{a univariate time series} \item{d}{Order of first-differencing. If missing, will choose a value based on \code{test}.} \item{D}{Order of seasonal-differencing. If missing, will choose a value based on \code{season.test}.} \item{max.p}{Maximum value of p} \item{max.q}{Maximum value of q} \item{max.P}{Maximum value of P} \item{max.Q}{Maximum value of Q} \item{max.order}{Maximum value of p+q+P+Q if model selection is not stepwise.} \item{max.d}{Maximum number of non-seasonal differences} \item{max.D}{Maximum number of seasonal differences} \item{start.p}{Starting value of p in stepwise procedure.} \item{start.q}{Starting value of q in stepwise procedure.} \item{start.P}{Starting value of P in stepwise procedure.} \item{start.Q}{Starting value of Q in stepwise procedure.} \item{stationary}{If \code{TRUE}, restricts search to stationary models.} \item{seasonal}{If \code{FALSE}, restricts search to non-seasonal models.} \item{ic}{Information criterion to be used in model selection.} \item{stepwise}{If \code{TRUE}, will do stepwise selection (faster). Otherwise, it searches over all models. Non-stepwise selection can be very slow, especially for seasonal models.} \item{nmodels}{Maximum number of models considered in the stepwise search.} \item{trace}{If \code{TRUE}, the list of ARIMA models considered will be reported.} \item{approximation}{If \code{TRUE}, estimation is via conditional sums of squares and the information criteria used for model selection are approximated. The final model is still computed using maximum likelihood estimation. Approximation should be used for long time series or a high seasonal period to avoid excessive computation times.} \item{method}{fitting method: maximum likelihood or minimize conditional sum-of-squares. The default (unless there are missing values) is to use conditional-sum-of-squares to find starting values, then maximum likelihood. Can be abbreviated.} \item{truncate}{An integer value indicating how many observations to use in model selection. The last \code{truncate} values of the series are used to select a model when \code{truncate} is not \code{NULL} and \code{approximation=TRUE}. All observations are used if either \code{truncate=NULL} or \code{approximation=FALSE}.} \item{xreg}{Optionally, a numerical vector or matrix of external regressors, which must have the same number of rows as \code{y}. (It should not be a data frame.)} \item{test}{Type of unit root test to use. See \code{\link{ndiffs}} for details.} \item{test.args}{Additional arguments to be passed to the unit root test.} \item{seasonal.test}{This determines which method is used to select the number of seasonal differences. The default method is to use a measure of seasonal strength computed from an STL decomposition. Other possibilities involve seasonal unit root tests.} \item{seasonal.test.args}{Additional arguments to be passed to the seasonal unit root test. See \code{\link{nsdiffs}} for details.} \item{allowdrift}{If \code{TRUE}, models with drift terms are considered.} \item{allowmean}{If \code{TRUE}, models with a non-zero mean are considered.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{parallel}{If \code{TRUE} and \code{stepwise = FALSE}, then the specification search is done in parallel. This can give a significant speedup on multicore machines.} \item{num.cores}{Allows the user to specify the amount of parallel processes to be used if \code{parallel = TRUE} and \code{stepwise = FALSE}. If \code{NULL}, then the number of logical cores is automatically detected and all available cores are used.} \item{x}{Deprecated. Included for backwards compatibility.} \item{...}{Additional arguments to be passed to \code{\link[stats]{arima}}.} } \value{ Same as for \code{\link{Arima}} } \description{ Returns best ARIMA model according to either AIC, AICc or BIC value. The function conducts a search over possible model within the order constraints provided. } \details{ The default arguments are designed for rapid estimation of models for many time series. If you are analysing just one time series, and can afford to take some more time, it is recommended that you set \code{stepwise=FALSE} and \code{approximation=FALSE}. Non-stepwise selection can be slow, especially for seasonal data. The stepwise algorithm outlined in Hyndman & Khandakar (2008) is used except that the default method for selecting seasonal differences is now based on an estimate of seasonal strength (Wang, Smith & Hyndman, 2006) rather than the Canova-Hansen test. There are also some other minor variations to the algorithm described in Hyndman and Khandakar (2008). } \examples{ fit <- auto.arima(WWWusage) plot(forecast(fit,h=20)) } \references{ Hyndman, RJ and Khandakar, Y (2008) "Automatic time series forecasting: The forecast package for R", \emph{Journal of Statistical Software}, \bold{26}(3). Wang, X, Smith, KA, Hyndman, RJ (2006) "Characteristic-based clustering for time series data", \emph{Data Mining and Knowledge Discovery}, \bold{13}(3), 335-364. } \seealso{ \code{\link{Arima}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/geom_forecast.Rd0000644000176200001440000001054314456202551015746 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggplot.R \docType{data} \name{StatForecast} \alias{StatForecast} \alias{GeomForecast} \alias{geom_forecast} \title{Forecast plot} \format{ An object of class \code{StatForecast} (inherits from \code{Stat}, \code{ggproto}, \code{gg}) of length 3. An object of class \code{GeomForecast} (inherits from \code{Geom}, \code{ggproto}, \code{gg}) of length 7. } \usage{ StatForecast GeomForecast geom_forecast( mapping = NULL, data = NULL, stat = "forecast", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, PI = TRUE, showgap = TRUE, series = NULL, ... ) } \arguments{ \item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}}. If specified and \code{inherit.aes = TRUE} (the default), it is combined with the default mapping at the top level of the plot. You must supply \code{mapping} if there is no plot mapping.} \item{data}{The data to be displayed in this layer. There are three options: If \code{NULL}, the default, the data is inherited from the plot data as specified in the call to \code{\link{ggplot}}. A \code{data.frame}, or other object, will override the plot data. All objects will be fortified to produce a data frame. See \code{\link{fortify}} for which variables will be created. A \code{function} will be called with a single argument, the plot data. The return value must be a \code{data.frame}, and will be used as the layer data.} \item{stat}{The stat object to use calculate the data.} \item{position}{Position adjustment, either as a string, or the result of a call to a position adjustment function.} \item{na.rm}{If \code{FALSE} (the default), removes missing values with a warning. If \code{TRUE} silently removes missing values.} \item{show.legend}{logical. Should this layer be included in the legends? \code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE} never includes, and \code{TRUE} always includes.} \item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics, rather than combining with them. This is most useful for helper functions that define both data and aesthetics and shouldn't inherit behaviour from the default plot specification, e.g. \code{\link{borders}}.} \item{PI}{If \code{FALSE}, confidence intervals will not be plotted, giving only the forecast line.} \item{showgap}{If \code{showgap=FALSE}, the gap between the historical observations and the forecasts is removed.} \item{series}{Matches an unidentified forecast layer with a coloured object on the plot.} \item{...}{Additional arguments for \code{\link{forecast.ts}}, other arguments are passed on to \code{\link{layer}}. These are often aesthetics, used to set an aesthetic to a fixed value, like \code{color = "red"} or \code{alpha = .5}. They may also be parameters to the paired geom/stat.} } \value{ A layer for a ggplot graph. } \description{ Generates forecasts from \code{forecast.ts} and adds them to the plot. Forecasts can be modified via sending forecast specific arguments above. } \details{ Multivariate forecasting is supported by having each time series on a different group. You can also pass \code{geom_forecast} a \code{forecast} object to add it to the plot. The aesthetics required for the forecasting to work includes forecast observations on the y axis, and the \code{time} of the observations on the x axis. Refer to the examples below. To automatically set up aesthetics, use \code{autoplot}. } \examples{ \dontrun{ library(ggplot2) autoplot(USAccDeaths) + geom_forecast() lungDeaths <- cbind(mdeaths, fdeaths) autoplot(lungDeaths) + geom_forecast() # Using fortify.ts p <- ggplot(aes(x=x, y=y), data=USAccDeaths) p <- p + geom_line() p + geom_forecast() # Without fortify.ts data <- data.frame(USAccDeaths=as.numeric(USAccDeaths), time=as.numeric(time(USAccDeaths))) p <- ggplot(aes(x=time, y=USAccDeaths), data=data) p <- p + geom_line() p + geom_forecast() p + geom_forecast(h=60) p <- ggplot(aes(x=time, y=USAccDeaths), data=data) p + geom_forecast(level=c(70,98)) p + geom_forecast(level=c(70,98),colour="lightblue") #Add forecasts to multivariate series with colour groups lungDeaths <- cbind(mdeaths, fdeaths) autoplot(lungDeaths) + geom_forecast(forecast(mdeaths), series="mdeaths") } } \seealso{ \code{\link{forecast}}, \code{\link[ggplot2]{ggproto}} } \author{ Mitchell O'Hara-Wild } \keyword{datasets} forecast/man/residuals.forecast.Rd0000644000176200001440000000610714150370574016734 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/residuals.R \name{residuals.forecast} \alias{residuals.forecast} \alias{residuals.ar} \alias{residuals.Arima} \alias{residuals.forecast_ARIMA} \alias{residuals.bats} \alias{residuals.tbats} \alias{residuals.ets} \alias{residuals.ARFIMA} \alias{residuals.nnetar} \alias{residuals.stlm} \alias{residuals.tslm} \title{Residuals for various time series models} \usage{ \method{residuals}{forecast}(object, type = c("innovation", "response"), ...) \method{residuals}{ar}(object, type = c("innovation", "response"), ...) \method{residuals}{Arima}(object, type = c("innovation", "response", "regression"), h = 1, ...) \method{residuals}{bats}(object, type = c("innovation", "response"), h = 1, ...) \method{residuals}{tbats}(object, type = c("innovation", "response"), h = 1, ...) \method{residuals}{ets}(object, type = c("innovation", "response"), h = 1, ...) \method{residuals}{ARFIMA}(object, type = c("innovation", "response"), ...) \method{residuals}{nnetar}(object, type = c("innovation", "response"), h = 1, ...) \method{residuals}{stlm}(object, type = c("innovation", "response"), ...) \method{residuals}{tslm}(object, type = c("innovation", "response", "deviance"), ...) } \arguments{ \item{object}{An object containing a time series model of class \code{ar}, \code{Arima}, \code{bats}, \code{ets}, \code{arfima}, \code{nnetar} or \code{stlm}. If \code{object} is of class \code{forecast}, then the function will return \code{object$residuals} if it exists, otherwise it returns the differences between the observations and their fitted values.} \item{type}{Type of residual.} \item{...}{Other arguments not used.} \item{h}{If \code{type='response'}, then the fitted values are computed for \code{h}-step forecasts.} } \value{ A \code{ts} object. } \description{ Returns time series of residuals from a fitted model. } \details{ Innovation residuals correspond to the white noise process that drives the evolution of the time series model. Response residuals are the difference between the observations and the fitted values (equivalent to \code{h}-step forecasts). For functions with no \code{h} argument, \code{h=1}. For homoscedastic models, the innovation residuals and the response residuals for \code{h=1} are identical. Regression residuals are available for regression models with ARIMA errors, and are equal to the original data minus the effect of the regression variables. If there are no regression variables, the errors will be identical to the original series (possibly adjusted to have zero mean). \code{arima.errors} is a deprecated function which is identical to \code{residuals.Arima(object, type="regression")}. For \code{nnetar} objects, when \code{type="innovations"} and \code{lambda} is used, a matrix of time-series consisting of the residuals from each of the fitted neural networks is returned. } \examples{ fit <- Arima(lynx,order=c(4,0,0), lambda=0.5) plot(residuals(fit)) plot(residuals(fit, type='response')) } \seealso{ \code{\link{fitted.Arima}}, \code{\link{checkresiduals}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/forecast.Arima.Rd0000644000176200001440000001064414150370574015773 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/arfima.R, R/arima.R \name{forecast.fracdiff} \alias{forecast.fracdiff} \alias{forecast.Arima} \alias{forecast.forecast_ARIMA} \alias{forecast.ar} \title{Forecasting using ARIMA or ARFIMA models} \usage{ \method{forecast}{fracdiff}( object, h = 10, level = c(80, 95), fan = FALSE, lambda = object$lambda, biasadj = NULL, ... ) \method{forecast}{Arima}( object, h = ifelse(object$arma[5] > 1, 2 * object$arma[5], 10), level = c(80, 95), fan = FALSE, xreg = NULL, lambda = object$lambda, bootstrap = FALSE, npaths = 5000, biasadj = NULL, ... ) \method{forecast}{ar}( object, h = 10, level = c(80, 95), fan = FALSE, lambda = NULL, bootstrap = FALSE, npaths = 5000, biasadj = FALSE, ... ) } \arguments{ \item{object}{An object of class "\code{Arima}", "\code{ar}" or "\code{fracdiff}". Usually the result of a call to \code{\link[stats]{arima}}, \code{\link{auto.arima}}, \code{\link[stats]{ar}}, \code{\link{arfima}} or \code{\link[fracdiff]{fracdiff}}.} \item{h}{Number of periods for forecasting. If \code{xreg} is used, \code{h} is ignored and the number of forecast periods is set to the number of rows of \code{xreg}.} \item{level}{Confidence level for prediction intervals.} \item{fan}{If \code{TRUE}, level is set to \code{seq(51,99,by=3)}. This is suitable for fan plots.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{biasadj}{Use adjusted back-transformed mean for Box-Cox transformations. If transformed data is used to produce forecasts and fitted values, a regular back transformation will result in median forecasts. If biasadj is TRUE, an adjustment will be made to produce mean forecasts and fitted values.} \item{...}{Other arguments.} \item{xreg}{Future values of an regression variables (for class \code{Arima} objects only). A numerical vector or matrix of external regressors; it should not be a data frame.} \item{bootstrap}{If \code{TRUE}, then prediction intervals computed using simulation with resampled errors.} \item{npaths}{Number of sample paths used in computing simulated prediction intervals when \code{bootstrap=TRUE}.} } \value{ An object of class "\code{forecast}". The function \code{summary} is used to obtain and print a summary of the results, while the function \code{plot} produces a plot of the forecasts and prediction intervals. The generic accessor functions \code{fitted.values} and \code{residuals} extract useful features of the value returned by \code{forecast.Arima}. An object of class "\code{forecast}" is a list containing at least the following elements: \item{model}{A list containing information about the fitted model} \item{method}{The name of the forecasting method as a character string} \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper limits for prediction intervals} \item{level}{The confidence values associated with the prediction intervals} \item{x}{The original time series (either \code{object} itself or the time series used to create the model stored as \code{object}).} \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} \item{fitted}{Fitted values (one-step forecasts)} } \description{ Returns forecasts and other information for univariate ARIMA models. } \details{ For \code{Arima} or \code{ar} objects, the function calls \code{\link[stats]{predict.Arima}} or \code{\link[stats]{predict.ar}} and constructs an object of class "\code{forecast}" from the results. For \code{fracdiff} objects, the calculations are all done within \code{\link{forecast.fracdiff}} using the equations given by Peiris and Perera (1988). } \examples{ fit <- Arima(WWWusage,c(3,1,0)) plot(forecast(fit)) library(fracdiff) x <- fracdiff.sim( 100, ma=-.4, d=.3)$series fit <- arfima(x) plot(forecast(fit,h=30)) } \references{ Peiris, M. & Perera, B. (1988), On prediction with fractionally differenced ARIMA models, \emph{Journal of Time Series Analysis}, \bold{9}(3), 215-220. } \seealso{ \code{\link[stats]{predict.Arima}}, \code{\link[stats]{predict.ar}}, \code{\link{auto.arima}}, \code{\link{Arima}}, \code{\link[stats]{arima}}, \code{\link[stats]{ar}}, \code{\link{arfima}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/man/tbats.components.Rd0000644000176200001440000000236214150370574016434 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tbats.R \name{tbats.components} \alias{tbats.components} \title{Extract components of a TBATS model} \usage{ tbats.components(x) } \arguments{ \item{x}{A tbats object created by \code{\link{tbats}}.} } \value{ A multiple time series (\code{mts}) object. The first series is the observed time series. The second series is the trend component of the fitted model. Series three onwards are the seasonal components of the fitted model with one time series for each of the seasonal components. All components are transformed using estimated Box-Cox parameter. } \description{ Extract the level, slope and seasonal components of a TBATS model. The extracted components are Box-Cox transformed using the estimated transformation parameter. } \examples{ \dontrun{ fit <- tbats(USAccDeaths, use.parallel=FALSE) components <- tbats.components(fit) plot(components)} } \references{ De Livera, A.M., Hyndman, R.J., & Snyder, R. D. (2011), Forecasting time series with complex seasonal patterns using exponential smoothing, \emph{Journal of the American Statistical Association}, \bold{106}(496), 1513-1527. } \seealso{ \code{\link{tbats}}. } \author{ Slava Razbash and Rob J Hyndman } \keyword{ts} forecast/man/gold.Rd0000644000176200001440000000052514150370574014057 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{gold} \alias{gold} \title{Daily morning gold prices} \format{ Time series data } \usage{ gold } \description{ Daily morning gold prices in US dollars. 1 January 1985 -- 31 March 1989. } \examples{ tsdisplay(gold) } \keyword{datasets} forecast/man/subset.ts.Rd0000644000176200001440000000404514150370574015065 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/subset.R \name{subset.ts} \alias{subset.ts} \alias{subset.msts} \title{Subsetting a time series} \usage{ \method{subset}{ts}( x, subset = NULL, month = NULL, quarter = NULL, season = NULL, start = NULL, end = NULL, ... ) \method{subset}{msts}(x, subset = NULL, start = NULL, end = NULL, ...) } \arguments{ \item{x}{a univariate time series to be subsetted} \item{subset}{optional logical expression indicating elements to keep; missing values are taken as false. \code{subset} must be the same length as \code{x}.} \item{month}{Numeric or character vector of months to retain. Partial matching on month names used.} \item{quarter}{Numeric or character vector of quarters to retain.} \item{season}{Numeric vector of seasons to retain.} \item{start}{Index of start of contiguous subset.} \item{end}{Index of end of contiguous subset.} \item{...}{Other arguments, unused.} } \value{ If \code{subset} is used, a numeric vector is returned with no ts attributes. If \code{start} and/or \code{end} are used, a ts object is returned consisting of x[start:end], with the appropriate time series attributes retained. Otherwise, a ts object is returned with frequency equal to the length of \code{month}, \code{quarter} or \code{season}. } \description{ Various types of subsetting of a time series. Allows subsetting by index values (unlike \code{\link[stats]{window}}). Also allows extraction of the values of a specific season or subset of seasons in each year. For example, to extract all values for the month of May from a time series. } \details{ If character values for months are used, either upper or lower case may be used, and partial unambiguous names are acceptable. Possible character values for quarters are \code{"Q1"}, \code{"Q2"}, \code{"Q3"}, and \code{"Q4"}. } \examples{ plot(subset(gas,month="November")) subset(woolyrnq,quarter=3) subset(USAccDeaths, start=49) } \seealso{ \code{\link[base]{subset}}, \code{\link[stats]{window}} } \author{ Rob J Hyndman } \keyword{ts} forecast/man/mstl.Rd0000644000176200001440000000316714150370574014116 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mstl.R \name{mstl} \alias{mstl} \title{Multiple seasonal decomposition} \usage{ mstl(x, lambda = NULL, iterate = 2, s.window = 7 + 4 * seq(6), ...) } \arguments{ \item{x}{Univariate time series of class \code{msts} or \code{ts}.} \item{lambda}{Box-Cox transformation parameter. If \code{lambda="auto"}, then a transformation is automatically selected using \code{BoxCox.lambda}. The transformation is ignored if NULL. Otherwise, data transformed before model is estimated.} \item{iterate}{Number of iterations to use to refine the seasonal component.} \item{s.window}{Seasonal windows to be used in the decompositions. If scalar, the same value is used for all seasonal components. Otherwise, it should be a vector of the same length as the number of seasonal components (or longer).} \item{...}{Other arguments are passed to \code{\link[stats]{stl}}.} } \description{ Decompose a time series into seasonal, trend and remainder components. Seasonal components are estimated iteratively using STL. Multiple seasonal periods are allowed. The trend component is computed for the last iteration of STL. Non-seasonal time series are decomposed into trend and remainder only. In this case, \code{\link[stats]{supsmu}} is used to estimate the trend. Optionally, the time series may be Box-Cox transformed before decomposition. Unlike \code{\link[stats]{stl}}, \code{mstl} is completely automated. } \examples{ library(ggplot2) mstl(taylor) \%>\% autoplot() mstl(AirPassengers, lambda = "auto") \%>\% autoplot() } \seealso{ \code{\link[stats]{stl}}, \code{\link[stats]{supsmu}} } forecast/man/BoxCox.lambda.Rd0000644000176200001440000000305614150370574015555 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/guerrero.R \name{BoxCox.lambda} \alias{BoxCox.lambda} \title{Automatic selection of Box Cox transformation parameter} \usage{ BoxCox.lambda(x, method = c("guerrero", "loglik"), lower = -1, upper = 2) } \arguments{ \item{x}{a numeric vector or time series of class \code{ts}} \item{method}{Choose method to be used in calculating lambda.} \item{lower}{Lower limit for possible lambda values.} \item{upper}{Upper limit for possible lambda values.} } \value{ a number indicating the Box-Cox transformation parameter. } \description{ If \code{method=="guerrero"}, Guerrero's (1993) method is used, where lambda minimizes the coefficient of variation for subseries of \code{x}. } \details{ If \code{method=="loglik"}, the value of lambda is chosen to maximize the profile log likelihood of a linear model fitted to \code{x}. For non-seasonal data, a linear time trend is fitted while for seasonal data, a linear time trend with seasonal dummy variables is used. } \examples{ lambda <- BoxCox.lambda(AirPassengers,lower=0) air.fit <- Arima(AirPassengers, order=c(0,1,1), seasonal=list(order=c(0,1,1),period=12), lambda=lambda) plot(forecast(air.fit)) } \references{ Box, G. E. P. and Cox, D. R. (1964) An analysis of transformations. \emph{JRSS B} \bold{26} 211--246. Guerrero, V.M. (1993) Time-series analysis supported by power transformations. \emph{Journal of Forecasting}, \bold{12}, 37--48. } \seealso{ \code{\link{BoxCox}} } \author{ Leanne Chhay and Rob J Hyndman } \keyword{ts} forecast/man/seasadj.Rd0000644000176200001440000000177514150370574014554 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/seasadj.R \name{seasadj} \alias{seasadj} \alias{seasadj.stl} \alias{seasadj.mstl} \alias{seasadj.decomposed.ts} \alias{seasadj.tbats} \alias{seasadj.seas} \title{Seasonal adjustment} \usage{ seasadj(object, ...) \method{seasadj}{stl}(object, ...) \method{seasadj}{mstl}(object, ...) \method{seasadj}{decomposed.ts}(object, ...) \method{seasadj}{tbats}(object, ...) \method{seasadj}{seas}(object, ...) } \arguments{ \item{object}{Object created by \code{\link[stats]{decompose}}, \code{\link[stats]{stl}} or \code{\link{tbats}}.} \item{...}{Other arguments not currently used.} } \value{ Univariate time series. } \description{ Returns seasonally adjusted data constructed by removing the seasonal component. } \examples{ plot(AirPassengers) lines(seasadj(decompose(AirPassengers,"multiplicative")),col=4) } \seealso{ \code{\link[stats]{stl}}, \code{\link[stats]{decompose}}, \code{\link{tbats}}. } \author{ Rob J Hyndman } \keyword{ts} forecast/DESCRIPTION0000644000176200001440000000646514474126512013607 0ustar liggesusersPackage: forecast Version: 8.21.1 Title: Forecasting Functions for Time Series and Linear Models Description: Methods and tools for displaying and analysing univariate time series forecasts including exponential smoothing via state space models and automatic ARIMA modelling. Depends: R (>= 3.5.0), Imports: colorspace, fracdiff, generics (>= 0.1.2), ggplot2 (>= 2.2.1), graphics, lmtest, magrittr, nnet, parallel, Rcpp (>= 0.11.0), stats, timeDate, tseries, urca, zoo Suggests: forecTheta, knitr, methods, rmarkdown, rticles, seasonal, testthat (>= 3.0.0), uroot LinkingTo: Rcpp (>= 0.11.0), RcppArmadillo (>= 0.2.35) LazyData: yes ByteCompile: TRUE Authors@R: c( person("Rob", "Hyndman", email = "Rob.Hyndman@monash.edu", role = c("aut", "cre", "cph"), comment = c(ORCID = "0000-0002-2140-5352")), person("George", "Athanasopoulos", role = "aut", comment = c(ORCID = "0000-0002-5389-2802")), person("Christoph", "Bergmeir", role = "aut", comment = c(ORCID = "0000-0002-3665-9021")), person("Gabriel", "Caceres", role = "aut", comment = c(ORCID = "0000-0002-2947-2023")), person("Leanne", "Chhay", role = "aut"), person("Kirill", "Kuroptev", role = "aut"), person("Mitchell", "O'Hara-Wild", role = "aut", comment = c(ORCID = "0000-0001-6729-7695")), person("Fotios", "Petropoulos", role = "aut", comment = c(ORCID = "0000-0003-3039-4955")), person("Slava", "Razbash", role = "aut"), person("Earo", "Wang", role = "aut", comment = c(ORCID = "0000-0001-6448-5260")), person("Farah", "Yasmeen", role = "aut", comment = c(ORCID = "0000-0002-1479-5401")), person("Federico", "Garza", role = "ctb"), person("Daniele", "Girolimetto", role = "ctb"), person("Ross", "Ihaka", role = c("ctb", "cph")), person("R Core Team", role = c("ctb", "cph")), person("Daniel", "Reid", role = "ctb"), person("David", "Shaub", role = "ctb"), person("Yuan", "Tang", role = "ctb", comment = c(ORCID = "0000-0001-5243-233X")), person("Xiaoqian", "Wang", role = "ctb"), person("Zhenyu", "Zhou", role = "ctb") ) BugReports: https://github.com/robjhyndman/forecast/issues License: GPL-3 URL: https://pkg.robjhyndman.com/forecast/, https://github.com/robjhyndman/forecast VignetteBuilder: knitr Encoding: UTF-8 RoxygenNote: 7.2.3 Config/testthat/edition: 3 NeedsCompilation: yes Packaged: 2023-08-31 13:24:15 UTC; robjhyndman Author: Rob Hyndman [aut, cre, cph] (), George Athanasopoulos [aut] (), Christoph Bergmeir [aut] (), Gabriel Caceres [aut] (), Leanne Chhay [aut], Kirill Kuroptev [aut], Mitchell O'Hara-Wild [aut] (), Fotios Petropoulos [aut] (), Slava Razbash [aut], Earo Wang [aut] (), Farah Yasmeen [aut] (), Federico Garza [ctb], Daniele Girolimetto [ctb], Ross Ihaka [ctb, cph], R Core Team [ctb, cph], Daniel Reid [ctb], David Shaub [ctb], Yuan Tang [ctb] (), Xiaoqian Wang [ctb], Zhenyu Zhou [ctb] Maintainer: Rob Hyndman Repository: CRAN Date/Publication: 2023-08-31 15:10:02 UTC forecast/build/0000755000176200001440000000000014474112176013166 5ustar liggesusersforecast/build/vignette.rds0000644000176200001440000000041614474112176015526 0ustar liggesusersuQMK@|ؚ I$OM(A$=aNڥͦlVJorM]u`fggyB_'`ȡ~PD|^ehtZ/*Ѫf$(! X[`E[)+\ N5dZ 8hc# $:C`  %? iٔ{{r9ݶ2 k1a͝){qҦ^jT۸O{6w|@Ÿ0otforecast/tests/0000755000176200001440000000000014150370574013230 5ustar liggesusersforecast/tests/testthat/0000755000176200001440000000000014474126512015070 5ustar liggesusersforecast/tests/testthat/test-tslm.R0000644000176200001440000001203214353422625017145 0ustar liggesusers# A unit test for tslm function if (require(testthat)) { mv_y <- ts(cbind(rnorm(120, 0, 3) + 1:120 + 20 * sin(2 * pi * (1:120) / 12), rnorm(120, 3, 7) + 1:120 + 16 * sin(2 * pi * (1:120 + 6) / 12)), frequency = 12) mv_x <- ts(cbind(rnorm(120, 0, 8) + (1:120) / 2 + 42 * sin(2 * pi * (1:120) / 12), rnorm(120, 3, 7) + (1:120) * -1 + 20 * sin(2 * pi * (1:120 + 6) / 12)), frequency = 12) v_y <- ts(rnorm(120, 0, 8) + (1:120) / 2 + 12 * sin(2 * pi * (1:120) / 12), frequency = 12) v_x <- ts(rnorm(120, 0, 1) + (1:120) * (-1) + 28 * sin(2 * pi * (1:120) / 12), frequency = 12) data <- datamat(mv_y, mv_x, v_y, v_x, fourier(v_y, 3)) test_that("tests on model building with univariate time series", { fit1 <- tslm(v_y ~ trend + season, data = data) fit2 <- tslm(v_y ~ trend + season, data = data, lambda = 2, biasadj = FALSE) fit3 <- tslm(v_y ~ trend + season, data = data, lambda = 2, biasadj = TRUE) expect_false(identical(fit2$fitted.values, fit3$fitted.values)) fit2 <- tslm(v_y ~ trend + season, data = data.frame(trend = rnorm(120))) expect_false(identical(fit1$model, fit2$model)) fit2 <- tslm(v_y ~ trend + season) expect_named(fit1, names(fit2)) expect_identical(fit1$model, fit2$model, ignore_attr = "terms") expect_identical(fit1$coefficients, fit2$coefficients) fit1 <- tslm(USAccDeaths ~ trend + season, data = USAccDeaths) fit2 <- tslm(USAccDeaths ~ trend + season) expect_named(fit1, names(fit2)) expect_identical(fit1$model, fit2$model, ignore_attr = "terms") expect_identical(fit1$coefficients, fit2$coefficients) expect_warning(fit3 <- tslm( USAccDeaths ~ trend + season, data = USAccDeaths, subset = time(USAccDeaths) %% 1 < 0.1 )) fit <- tslm(USAccDeaths ~ trend + season + trend * season, data = USAccDeaths) expect_true("trend:season" %in% attr(fit$terms, "term.labels")) }) test_that("tslm parity with lm", { fit1 <- tslm(v_y ~ v_x + fourier(v_y, 3), data = data.frame(v_y = v_y)) fit2 <- lm(v_y ~ v_x + fourier(v_y, 3), data = data.frame(v_y = v_y)) expect_equal(fit1$coefficients, fit1$coefficients) expect_equal(fit1$model, fit2$model, ignore_attr = "terms") }) test_that("tests on subsetting data", { a <- mv_y[, 1] expect_warning(fit1 <- tslm(mv_y ~ trend, subset = a < 20), "Subset has been assumed contiguous") expect_error(fit2 <- tslm(mv_y ~ trend, subset = subset(mv_y, mv_y[, 1] < 20))) expect_warning(tslm(v_y ~ trend + season + trend * season, subset = v_y < 100), "Subset has been assumed contiguous") }) test_that("tests on model building with multivariate time series", { fit1 <- tslm(mv_y ~ trend + season) fit2 <- tslm(mv_y ~ trend + season, lambda = 0.5) expect_false(identical(fit1$coefficients, fit2$coefficients)) fit3 <- tslm(mv_y ~ trend + season, lambda = 0.5, biasadj = TRUE) expect_false(identical(fit2$fitted.values, fit3$fitted.values)) fit2 <- tslm(mv_y ~ trend + season, data = data) expect_named(fit1,names(fit2)) expect_identical(fit1$model, fit2$model, ignore_attr = "terms") expect_identical(fit1$coefficients, fit2$coefficients) expect_warning(fit3 <- tslm(mv_y ~ trend + season, subset = mv_y[, 1] < 1), "Subset has been assumed contiguous") expect_warning(fit4 <- tslm(mv_y ~ trend + season, data = data, subset = mv_y[, 1] < 1), "Subset has been assumed contiguous") expect_named(fit3,names(fit4)) expect_identical(fit3$model, fit4$model, ignore_attr = "terms") expect_identical(fit3$coefficients, fit4$coefficients) }) test_that("tests with bad input", { expect_error(tslm(mpg ~ cyl, data = mtcars), "Not time series data") expect_error(tslm(tmp2 ~ trend + season + trend * season, subset = subset(tmp2, month = "January"), "Non-seasonal data cannot be modelled using a seasonal factor")) }) test_that("forecast.lm", { fit1 <- tslm(v_y ~ trend + season, lambda = 2, biasadj = FALSE) fit2 <- tslm(v_y ~ trend + season, lambda = 2, biasadj = TRUE) fcast1 <- forecast(fit1, h = 60, biasadj = FALSE) fcast2 <- forecast(fit2, h = 60, biasadj = TRUE) expect_false(identical(fcast1$mean, fcast2$mean)) fred <- tslm(ldeaths ~ trend + season, lambda = 0) fc <- forecast(fred) }) test_that("Unusual usage", { expect_silent(fit1 <- tslm(v_y ~ trend + v_x + I(v_x ^ 2) + fourier(v_x, 3))) # forecast(fit1, newdata=data.frame(v_x=ts(1:2,freq=12))) # tslm(v_y ~ trend + I(v_x) + I(v_x^2) + fourier(v_x, 3), data=data) # tslm(v_y ~ trend + season + I(v_x) + I(v_x^2) + fourier(ts(season, freq=12), 3)) # fit2 <- tslm(v_y ~ trend + season + I(v_x)*fourier(v_x,3)) # forecast(fit2, newdata=data.frame(v_x=ts(1:2,freq=12))) # tslm(v_y ~ trend + season + I(v_x)*fourier(v_x,3),data=data) }) test_that("Missing values", { USMissingDeaths <- USAccDeaths USMissingDeaths[c(1,44, 72)] <- NA timetrend <- 1:72 fit <- tslm(USMissingDeaths ~ season + timetrend) expect_equal(sum(is.na(residuals(fit))), 3) fc <- forecast(fit, newdata = data.frame(timetrend = 73)) expect_length(fc$mean, 1) }) } forecast/tests/testthat/test-nnetar.R0000644000176200001440000001463414456202551017465 0ustar liggesusers# A unit test for nnetar.R if (require(testthat)) { test_that("Tests for nnetar", { oilnnet <- nnetar(airmiles, lambda = 0.15) woolyrnqnnet <- nnetar(woolyrnq, repeats = 10) expect_output(print(woolyrnqnnet), regexp = "Series: woolyrnq") expect_true(length(forecast(oilnnet)$mean) == 10) expect_true(length(forecast(woolyrnqnnet)$mean) == 2 * frequency(woolyrnq)) # # Test with single-column xreg (which might be a vector) uscnnet <- nnetar(woolyrnq, xreg = 1:length(woolyrnq)) expect_true(all(dim(uscnnet$xreg) == c(119, 1))) expect_true(length(forecast(uscnnet, xreg = 120:130)$mean) == 11) # Test default size with and without xreg uscnnet <- nnetar(woolyrnq, p = 2, P = 2) expect_output( print(uscnnet), regexp = "NNAR(2,2,2)", fixed = TRUE ) expect_output( print(uscnnet), regexp = "4-2-1 network", fixed = TRUE ) expect_true(uscnnet$size == 2) uscnnet <- nnetar(woolyrnq, p = 2, P = 2, xreg = 1:119, repeats = 10) expect_output( print(uscnnet), regexp = "NNAR(2,2,3)", fixed = TRUE ) expect_output( print(uscnnet), regexp = "5-3-1 network", fixed = TRUE ) expect_true(uscnnet$size == 3) # Test default size for models with only seasonal lags, with and without xreg seasonal_only_lags_nnet <- nnetar(woolyrnq,p = 0,P = 3) expect_output( print(seasonal_only_lags_nnet),regexp = "NNAR(0,3,2)", fixed = TRUE ) expect_output( print(seasonal_only_lags_nnet),regexp = "3-2-1 network", fixed = TRUE ) seasonal_only_lags_xreg_nnet <- nnetar(woolyrnq,p = 0,P = 3,xreg = cbind(1:119,119:1)) expect_output( print(seasonal_only_lags_xreg_nnet),regexp = "NNAR(0,3,3)", fixed = TRUE ) expect_output( print(seasonal_only_lags_xreg_nnet),regexp = "5-3-1 network", fixed = TRUE ) # Test P=0 when m>1 uscnnet <- nnetar(woolyrnq, p = 4, P = 0) expect_true(uscnnet$size == 2) expect_output(print(uscnnet), regexp = "NNAR(4,2)", fixed = TRUE) # Test overlapping p & P uscnnet <- nnetar(woolyrnq, p = 4, P = 2) expect_true(uscnnet$size == 3) expect_output( print(uscnnet), regexp = "NNAR(4,2,3)", fixed = TRUE ) expect_output( print(uscnnet), regexp = "5-3-1 network", fixed = TRUE ) # Test that p = 0 & P = 0 is not permitted expect_error( nnetar(woolyrnq,p = 0,P = 0) ) # Test with multiple-column xreg creditnnet <- nnetar( wineind, xreg = cbind(bizdays(wineind), fourier(wineind, 1)) ) expect_warning( expect_length(forecast(creditnnet, h = 2, xreg = matrix(2, 2, 3))$mean, 2L), "different column names", fixed = TRUE ) # Test if h doesn't match xreg expect_warning( expect_length(forecast(creditnnet, h = 5, xreg = matrix(2, 2, 3))$mean, 2L), "different column names", fixed = TRUE ) # Test that P is ignored if m=1 expect_warning(creditnnet <- nnetar(WWWusage, p = 2, P = 4, xreg = 1:length(WWWusage))) expect_output( print(creditnnet), regexp = "NNAR(2,2)", fixed = TRUE ) # Test fixed size creditnnet <- nnetar(WWWusage, p = 1, P = 1, xreg = 1:length(WWWusage), size = 12) expect_true(uscnnet$size == 3) expect_output(print(creditnnet), regexp = "NNAR(1,12)", fixed = TRUE) # Test passing arguments to nnet expect_warning(creditnnet <- nnetar( WWWusage, p = 2, P = 4, xreg = 1:length(WWWusage), decay = 0.1 )) expect_output( print(creditnnet), regexp = "decay=0.1", fixed = TRUE ) ## Test output format correct oilnnet <- nnetar(airmiles, p = 1, size = 0, skip = TRUE, Wts = c(0, 1), maxit = 0, repeats = 10) expect_true(all.equal(oilnnet$fitted[-1], airmiles[-length(airmiles)])) ## Test output format correct when NAs present oilna <- airmiles oilna[12] <- NA suppressWarnings(oilnnet <- nnetar(oilna, p = 1, size = 0, skip = TRUE, Wts = c(0, 1), maxit = 0)) expect_true(all.equal(oilnnet$fitted[-c(1, 12, 13)], oilna[-c(11, 12, length(oilna))])) ## Test model argument fit1 <- nnetar( WWWusage, xreg = 1:length(WWWusage), lambda = 2, decay = 0.5, maxit = 25, repeats = 7 ) fit2 <- nnetar(WWWusage, xreg = 1:length(WWWusage), model = fit1) # Check some model parameters expect_true(identical(fit1$p, fit2$p)) expect_true(identical(fit1$lambda, fit2$lambda)) expect_true(identical(fit1$nnetargs, fit2$nnetargs)) # Check fitted values are all the same expect_true(identical(fitted(fit1), fitted(fit2))) # Check residuals all the same expect_true(identical(residuals(fit1), residuals(fit2))) # Check number of neural nets expect_true(identical(length(fit1$model), length(fit2$model))) # Check neural network weights all the same expect_true(identical(fit1$model[[1]]$wts, fit2$model[[1]]$wts)) expect_true(identical(fit1$model[[7]]$wts, fit2$model[[7]]$wts)) # Check subset argument oilnnet <- nnetar(airmiles, subset = 11:20) expect_true(identical(which(!is.na(fitted(oilnnet))), 11:20)) oilnnet <- nnetar(airmiles, subset = c(rep(F, 10), rep(T, 10), rep(F, length(airmiles) - 20))) expect_true(identical(which(!is.na(fitted(oilnnet))), 11:20)) ## Check short and constant data expect_warning(nnetfit <- nnetar(rep(1, 10), p=2, P=0, size=1, repeats=1, lambda = 0.1), "Constant data") expect_true(nnetfit$p == 1) expect_true(is.null(nnetfit$lambda)) expect_true(is.null(nnetfit$scalex)) expect_error(nnetfit <- nnetar(rnorm(2), p=1, P=0, size=1, repeats=1), "Not enough data") expect_silent(nnetfit <- nnetar(rnorm(3), p=1, P=0, size=1, repeats=1)) expect_true(nnetfit$p == 1) expect_silent(nnetfit <- nnetar(rnorm(3), p=2, P=0, size=1, repeats=1)) expect_true(nnetfit$p == 2) expect_warning(nnetfit <- nnetar(rnorm(3), p=3, P=0, size=1, repeats=1), "short series") expect_true(nnetfit$p == 2) expect_warning(nnetfit <- nnetar(rnorm(3), p=4, P=0, size=1, repeats=1), "short series") expect_true(nnetfit$p == 2) expect_warning(nnetfit <- nnetar(rnorm(10), xreg=rep(1, 10), p=2, P=0, size=1, repeats=1, lambda = 0.1), "Constant xreg") expect_true(is.null(nnetfit$scalexreg)) expect_warning(nnetfit <- nnetar(rnorm(3), xreg=matrix(c(1, 2, 3, 1, 1, 1), ncol=2), p=1, P=0, size=1, repeats=1, lambda = 0.1), "Constant xreg") expect_true(is.null(nnetfit$scalexreg)) }) } forecast/tests/testthat/test-ets.R0000644000176200001440000000440414456202551016763 0ustar liggesusers# A unit test for ets function if (require(testthat)) { test_that("tests for some arguments in ets", { fit <- ets(wineind, model = "ZZM") comp <- paste0(fit$components[1:3], collapse = "") expect_identical(comp, "MAM") }) test_that("tests for some arguments in ets", { fit <- ets(wineind, model = "MAM", alpha = 0.1611) expect_identical(as.numeric(fit$par["alpha"]), 0.1611) }) test_that("refit ets model to new data", { fit <- ets(wineind, model = "MAM", alpha = 0.1611) parnames <- c("alpha", "beta", "gamma") par <- fit$par[parnames] expect_identical(ets(wineind, model = fit, alpha = 0.1611, use.initial.values = FALSE)$par[parnames], par) expect_identical(ets(wineind, model = fit, alpha = 0.1611, beta = NA, use.initial.values = FALSE)$par[parnames], par) expect_identical(ets(wineind, model = fit, alpha = 0.1611, gamma = NA, use.initial.values = FALSE)$par[parnames], par) expect_identical(ets(wineind, model = fit, alpha = 0.1611, phi = NA, use.initial.values = FALSE)$par[parnames], par) expect_identical(ets(wineind, model = fit, alpha = 0.1611, use.initial.values = TRUE)$par, fit$par) }) test_that("class methods for ets work", { fit <- ets(wineind, model = "MAM", alpha = 0.1611) expect_output(print(summary(fit)), "Smoothing parameters") expect_equal(length(coef(fit)), 16L) expect_lt(abs(logLik(fit) + 1802.9586023), 1e-5) plot(fit) }) test_that("test ets() for errors", { expect_warning(ets(taylor)) fit1 <- ets(airmiles, lambda = 0.15, biasadj = FALSE) expect_gt(fit1$par["alpha"], 0.95) fit2 <- ets(airmiles, lambda = 0.15, biasadj = TRUE) expect_lt(fit2$par["beta"], 1e-3) expect_false(identical(fit1$fitted, fit2$fitted)) expect_error(ets(taylor, model = "ZZA")) }) test_that("forecast.ets()", { fit <- ets(airmiles, lambda = 0.15, biasadj = TRUE) fcast1 <- forecast(fit, PI = FALSE) expect_true(is.null(fcast1$upper) & is.null(fcast1$lower)) fcast1 <- forecast(fit, biasadj = FALSE) fcast2 <- forecast(fit, biasadj = TRUE) expect_false(identical(fcast1$mean, fcast2$mean)) fcast <- forecast(fit, simulate = TRUE) expect_true(!is.null(fcast$upper) & !is.null(fcast$lower)) expect_true(all(fcast$upper > fcast$lower)) }) } forecast/tests/testthat/test-calendar.R0000644000176200001440000000147514353422625017750 0ustar liggesusers# A unit test for calendar.R if (require(testthat)) { test_that("Tests for bizdays()", { expect_error(bizdays(1:20)) b1 <- bizdays(woolyrnq, FinCenter = "New York") b2 <- bizdays(woolyrnq, FinCenter = "London") b3 <- bizdays(woolyrnq, FinCenter = "Zurich") if(packageVersion("timeDate") >= '4021.105') { expect_equal(sum(abs(b1 - b2)), 145L) expect_equal(sum(abs(b1 - b3)), 176L) } expect_equal(sum(abs(b2 - b3)), 117L) b1 <- bizdays(gas, FinCenter = "NERC") b2 <- bizdays(gas, FinCenter = "Toronto") if(packageVersion("timeDate") >= '4021.105') { expect_equal(sum(abs(b1 - b2)), 211L) } }) test_that("Tests for easter()", { expect_true(length(easter(woolyrnq)) == length(woolyrnq)) expect_true(length(easter(wineind)) == length(wineind)) }) } forecast/tests/testthat/test-spline.R0000644000176200001440000000074414353422625017467 0ustar liggesusers# A unit test for spline.R if (require(testthat)) { test_that("Tests for splinef()", { plot.splineforecast(splinef(airmiles)) fit1 <- splinef(woolyrnq, lambda = 0.2, biasadj = FALSE) fit2 <- splinef(woolyrnq, lambda = 0.2, biasadj = TRUE) expect_false(identical(fit1$mean, fit2$mean)) splinef(woolyrnq, method = "mle") splinef(WWWusage, method = "mle") expect_error(splinef(woolyrnq, level = 110)) expect_error(splinef(woolyrnq, level = -10)) }) } forecast/tests/testthat/test-forecast.R0000644000176200001440000000303114353422625017773 0ustar liggesusers# A unit test for forecast.R if (require(testthat)) { test_that("tests for findfrequency()", { expect_true(frequency(airmiles) == findfrequency(as.numeric(airmiles))) expect_false(frequency(wineind) == findfrequency(as.numeric(wineind))) expect_true(frequency(woolyrnq) == findfrequency(as.numeric(woolyrnq))) expect_true(frequency(gas) == findfrequency(as.numeric(gas))) }) test_that("tests forecast.ts()", { fc1 <- as.numeric(forecast(as.numeric(airmiles), find.frequency = TRUE)$mean) fc2 <- as.numeric(forecast(airmiles)$mean) expect_true(all(fc1 == fc2)) }) test_that("tests summary.forecast() and forecast.forecast()", { WWWusageforecast <- forecast(WWWusage) expect_output(print(summary(WWWusageforecast)), regexp = "Forecast method:") expect_true(all(predict(WWWusageforecast)$mean == forecast(WWWusageforecast)$mean)) }) # test_that("tests plot.forecast()", { # # Fit several types of models for plotting # batsmod <- bats(woolyrnq) # nnetmod <- nnetar(woolyrnq) # tslmmod <- tslm(woolyrnq ~ trend + season) # nnetfc<- forecast(nnetmod) # batsfc <- forecast(batsmod) # tslmfc <- forecast(tslmmod) # skip_on_travis() # # Plot the forecasts # expect_that(plot(nnetfc), not(throws_error())) # expect_that(plot(batsfc), not(throws_error())) # expect_that(plot(batsfc, shaded = FALSE), not(throws_error())) # expect_that(plot(tslmfc, PI = FALSE), not(throws_error())) # expect_that(plot(forecast(tslmmod, h = 0)), not(throws_error())) # }) } forecast/tests/testthat/test-season.R0000644000176200001440000000721714150370574017467 0ustar liggesusers# A unit test for na.interp() and tsclean() if (require(testthat)) { test_that("tests for monthdays", { expect_error(monthdays(rnorm(10))) expect_error(monthdays(rnorm(10))) expect_true(all(monthdays(ts(rep(100, 12), frequency = 12)) == c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31))) expect_true(all(monthdays(ts(rep(1, 4), frequency = 4)) == c(90, 91, 92, 92))) # Test leapyears expect_true(monthdays(ts(rep(1, 48), frequency = 12))[38] == 29) expect_true(monthdays(ts(rep(1, 16), frequency = 4))[13] == 91) }) test_that("tests for seasonaldummy", { expect_error(seasonaldummy(1)) testseries <- ts(rep(1:7, 5), frequency = 7) dummymat <- seasonaldummy(testseries) expect_true(length(testseries) == nrow(dummymat)) expect_true(ncol(dummymat) == 6) expect_true(all(seasonaldummy(wineind)[1:11, ] == diag(11))) }) test_that("tests for seasonaldummyf", { expect_error(seasonaldummy(1)) expect_warning(dummymat <- seasonaldummyf(wineind, 4), "deprecated") expect_true(nrow(dummymat) == 4) expect_true(ncol(dummymat) == 11) }) test_that("tests for fourier", { expect_error(fourier(1)) testseries <- ts(rep(1:7, 5), frequency = 7) fouriermat <- fourier(testseries, 3) expect_true(length(testseries) == nrow(fouriermat)) expect_true(ncol(fouriermat) == 6) expect_true(all(grep("-7", colnames(fouriermat)))) }) test_that("tests for fourierf", { expect_warning(fouriermat <- fourierf(wineind, 4, 10), "deprecated") expect_true(nrow(fouriermat) == 10) expect_true(ncol(fouriermat) == 8) }) test_that("tests for stlm", { expect_warning(stlm(ts(rep(5, 24), frequency = 4), etsmodel = "ZZZ")) }) test_that("tests for forecast.stlm", { expect_error(forecast.stlm(stlm(wineind), newxreg = matrix(rep(1, 24), ncol = 2))) stlmfit1 <- stlm(woolyrnq, method = "ets") stlmfit2 <- stlm(woolyrnq, method = "arima", approximation = FALSE) fcfit1 <- forecast(stlmfit1) fcfit2 <- forecast(stlmfit1, fan = TRUE) expect_true(all(fcfit2$level == seq(from = 51, to = 99, by = 3))) fcstlmfit3 <- forecast(stlmfit2) expect_true(all(round(forecast(stlm(ts(rep(100, 120), frequency = 12)))$mean, 10) == 100)) expect_true(all(round(forecast(stlm(ts(rep(100, 120), frequency = 12), lambda = 1))$mean, 10) == 100)) }) test_that("tests for stlf", { expect_true(all(forecast(stlm(wineind))$mean == stlf(wineind)$mean)) expect_true(all(forecast(stlm(wineind, lambda = .5))$mean == stlf(wineind, lambda = .5)$mean)) fit1 <- stlf(wineind, lambda = .2, biasadj = FALSE) fit2 <- stlf(wineind, lambda = .2, biasadj = TRUE) expect_false(identical(fit1$mean, fit2$mean)) # Constant series should not error series <- ts(rep(950, 20), frequency = 4) constantForecast <- expect_error(stlf(series), NA) # Small eps expect_true(all(abs(constantForecast$mean - mean(series)) < 10^-8)) y <- ts(rep(1:7, 3), frequency = 7) expect_equal(c(stlf(y)$mean), rep(1:7, 2)) }) test_that("tests for ma", { testseries <- ts(1:20, frequency = 4) expect_true(frequency(ma(testseries, order = 4)) == frequency(testseries)) maseries <- ma(testseries, order = 3) expect_true(identical(which(is.na(maseries)), c(1L, 20L))) expect_true(all(abs(maseries[2:19] - 2:19) < 1e-14)) maseries <- ma(testseries, order = 2, centre = FALSE) expect_true(identical(which(is.na(maseries)), 20L)) expect_true(all(abs(maseries[1:19] - 1:19 - 0.5) < 1e-14)) maseries <- ma(testseries, order = 2, centre = TRUE) expect_true(identical(which(is.na(maseries)), c(1L, 20L))) expect_true(all(abs(maseries[2:19] - 2:19) < 1e-14)) }) } forecast/tests/testthat/test-ggplot.R0000644000176200001440000000526614353422625017475 0ustar liggesusers# A unit test for ggplot support if (require(testthat)) { test_that("tests for autoplot/gg functions", { library(ggplot2) lungDeaths <- cbind(mdeaths, fdeaths) ggAcf(wineind) autoplot(Acf(wineind)) expect_identical(ggAcf(wineind, plot = FALSE)$acf, acf(wineind, plot = FALSE, lag.max = 24)$acf) ggPacf(wineind) autoplot(Pacf(wineind)) expect_identical(ggPacf(wineind, plot = FALSE)$acf, acf(wineind, plot = FALSE, type = "partial", lag.max = 24)$acf) ggCcf(mdeaths, fdeaths) autoplot(Ccf(mdeaths, fdeaths)) expect_identical(ggCcf(mdeaths, fdeaths, plot = FALSE)$acf, ccf(mdeaths, fdeaths, plot = FALSE, type = "correlation", lag.max = 24)$acf) arimafit <- Arima(USAccDeaths, order = c(1, 1, 1), seasonal = c(1, 1, 1)) autoplot(arimafit) autoplot(arimafit, type = "ma") autoplot(arimafit, type = "ar") arfit <- ar(USAccDeaths) autoplot(arfit) decomposefit <- decompose(USAccDeaths) autoplot(decomposefit) etsfit <- ets(USAccDeaths, model = "ANA") autoplot(etsfit) structfit <- StructTS(USAccDeaths) autoplot(structfit) stlfit <- stl(USAccDeaths, s.window = "periodic") autoplot(stlfit) # seasfit <- seasonal::seas(USAccDeaths) # autoplot(seasfit) etsfcast <- forecast(etsfit) autoplot(etsfcast) autoplot(etsfcast, PI = FALSE) lmfit <- lm(mpg ~ disp, data = mtcars) lmfcast <- forecast(lmfit, newdata = data.frame(disp = 214)) autoplot(lmfcast) mfcast <- forecast(lungDeaths) autoplot(mfcast) ggtsdisplay(USAccDeaths, plot.type = "spectrum") ggtsdisplay(USAccDeaths, plot.type = "partial") ggtsdisplay(USAccDeaths, plot.type = "histogram") ggtsdisplay(USAccDeaths, plot.type = "scatter", theme = ggplot2::theme_bw()) gglagplot(woolyrnq, lags = 2) gglagplot(lungDeaths, lags = 2) gglagplot(WWWusage, do.lines = FALSE, colour = FALSE, labels = TRUE) gglagchull(woolyrnq, lags = 4) ggmonthplot(woolyrnq) ggseasonplot(woolyrnq, year.labels = TRUE, year.labels.left = TRUE) ggseasonplot(USAccDeaths, polar = TRUE, col = 1:5, continuous = TRUE) splinefit <- splinef(airmiles, h = 5) autoplot(splinefit) autoplot(USAccDeaths) autoplot(lungDeaths) autoplot(lungDeaths, facets = TRUE) autoplot(USAccDeaths) + geom_forecast() autoplot(USAccDeaths) + autolayer(etsfcast, series = "ETS") autoplot(lungDeaths) + geom_forecast() autoplot(lungDeaths) + autolayer(mfcast, series = c("mdeaths", "fdeaths")) autoplot(lungDeaths) + autolayer(mfcast) autoplot(lungDeaths) + autolayer(mfcast, series = TRUE) autoplot(lungDeaths, facets = TRUE) + geom_forecast() gghistogram(USAccDeaths, add.kde = TRUE) }) } forecast/tests/testthat/test-refit.R0000644000176200001440000001221514353422625017302 0ustar liggesusers# A unit test for re-fitting models if (require(testthat)) { test_that("tests for re-fitting models", { # arima fit <- Arima(mdeaths, c(1, 0, 0), c(2, 0, 0), include.mean = FALSE, include.drift = TRUE) refit <- Arima(fdeaths, model = fit) expect_true(identical(fit$coef, refit$coef)) expect_false(identical(fit$x, refit$x)) expect_false(identical(fit$fitted, refit$fitted)) expect_false(identical(fit$residuals, refit$residuals)) refit_same <- Arima(mdeaths, model = fit) expect_true(identical(fit$coef, refit_same$coef)) expect_true(identical(fit$x, refit_same$x)) expect_true(all.equal(fit$fitted, refit_same$fitted)) expect_true(all.equal(fit$residuals, refit_same$residuals)) # arfima fit <- arfima(mdeaths) refit <- arfima(fdeaths, model = fit) expect_true(identical(fit$ar, refit$ar)) expect_true(identical(fit$ma, refit$ma)) expect_false(identical(fit$x, refit$x)) expect_false(identical(fit$fitted, refit$fitted)) expect_false(identical(fit$residuals, refit$residuals)) refit_same <- arfima(mdeaths, model = fit) expect_true(identical(fit$ar, refit_same$ar)) expect_true(identical(fit$ma, refit_same$ma)) expect_true(identical(fit$x, refit_same$x)) expect_true(identical(fit$fitted, refit_same$fitted)) expect_true(identical(fit$residuals, refit_same$residuals)) # dshw fit <- dshw(mdeaths, period1 = 4, period2 = 12) refit <- dshw(fdeaths, model = fit) expect_false(identical(fit$x, refit$x)) expect_false(identical(fit$fitted, refit$fitted)) expect_false(identical(fit$residuals, refit$residuals)) refit_same <- dshw(mdeaths, model = fit) expect_true(identical(fit$model, refit_same$model)) expect_true(identical(fit$x, refit_same$x)) expect_true(identical(fit$fitted, refit_same$fitted)) expect_true(identical(fit$residuals, refit_same$residuals)) # ets fit <- ets(mdeaths) refit <- ets(fdeaths, model = fit, use.initial.values = TRUE) expect_true(identical(fit$fit, refit$fit)) expect_false(identical(fit$x, refit$x)) expect_false(identical(fit$fitted, refit$fitted)) expect_false(identical(fit$residuals, refit$residuals)) refit_same <- ets(mdeaths, model = fit, use.initial.values = TRUE) expect_true(identical(fit$x, refit_same$x)) expect_true(identical(fit$fitted, refit_same$fitted)) expect_true(identical(residuals(fit), residuals(refit_same))) # stlm fit <- stlm(mdeaths) refit <- stlm(fdeaths, model = fit) expect_true(identical(fit$model$par, refit$model$par)) expect_false(identical(fit$x, refit$x)) expect_false(identical(fit$fitted, refit$fitted)) expect_false(identical(fit$residuals, refit$residuals)) refit_same <- stlm(mdeaths, model = fit) expect_true(identical(fit$model$par, refit_same$model$par)) expect_true(identical(fit$x, refit_same$x)) expect_true(identical(fit$fitted, refit_same$fitted)) expect_true(identical(fit$residuals, refit_same$residuals)) # bats fit <- bats(mdeaths) refit <- bats(fdeaths, model = fit) expect_true(identical(fit$parameters, refit$parameters)) expect_false(identical(fit$x, refit$x)) expect_false(identical(fit$fitted.values, refit$fitted.values)) expect_false(identical(residuals(fit), residuals(refit))) refit_same <- bats(mdeaths, model = fit) expect_true(identical(fit$model$par, refit_same$model$par)) expect_true(identical(fit$x, refit_same$x)) expect_true(identical(fit$fitted.values, refit_same$fitted.values)) expect_true(identical(residuals(fit), residuals(refit_same))) # tbats fit <- tbats(mdeaths) refit <- tbats(fdeaths, model = fit) expect_true(identical(fit$parameters, refit$parameters)) expect_false(identical(fit$x, refit$x)) expect_false(identical(fit$fitted.values, refit$fitted.values)) expect_false(identical(residuals(fit), residuals(refit))) refit_same <- tbats(mdeaths, model = fit) expect_true(identical(fit$model$par, refit_same$model$par)) expect_true(identical(fit$x, refit_same$x)) expect_true(identical(fit$fitted.values, refit_same$fitted.values)) expect_true(identical(residuals(fit), residuals(refit_same))) # nnetar fit <- nnetar(mdeaths) refit <- nnetar(fdeaths, model = fit) expect_false(identical(fit$x, refit$x)) expect_false(identical(fit$fitted, refit$fitted)) expect_false(identical(fit$residuals, refit$residuals)) refit_same <- nnetar(mdeaths, model = fit) expect_true(identical(fit$x, refit_same$x)) expect_true(identical(fit$fitted, refit_same$fitted)) expect_true(identical(residuals(fit), residuals(refit_same))) # forecast.ts fit <- forecast(mdeaths) refit <- forecast(fdeaths, model = fit, use.initial.values = TRUE) expect_false(identical(fit$x, refit$x)) expect_false(identical(fit$fitted, refit$fitted)) expect_false(identical(fit$residuals, refit$residuals)) refit_same <- forecast(mdeaths, model = fit, use.initial.values = TRUE) expect_true(identical(fit$x, refit_same$x)) expect_true(identical(fit$fitted, refit_same$fitted)) expect_true(identical(residuals(fit), residuals(refit_same))) }) } forecast/tests/testthat/test-newarima2.R0000644000176200001440000000411314323125536020052 0ustar liggesusers# A unit test functions in newarima2.R if (require(testthat)) { test_that("test auto.arima() and associated methods", { expect_warning(auto.arima(rep(1, 100), stepwise = TRUE, parallel = TRUE)) set.seed(345) testseries1 <- ts(rnorm(100) + 1:100, frequency = 0.1) xregmat <- matrix(runif(300), ncol = 3) expect_true(frequency(forecast(auto.arima(testseries1))) == 1) fit1 <- auto.arima(testseries1, xreg = xregmat, allowdrift = FALSE) expect_true(all(xregmat == fit1$xreg)) testseries2 <- ts(rep(100, 120), frequency = 12) xregmat <- matrix(runif(240), ncol = 2) expect_output(print(auto.arima(testseries2, xreg = xregmat)), regexp = "Series: testseries2") expect_output(print(summary(auto.arima(testseries2, xreg = xregmat, approximation = TRUE, stepwise = FALSE))), regexp = "Series: testseries2") expect_output(print(auto.arima(ts(testseries2, frequency = 4), approximation = TRUE, trace = TRUE)), regexp = "ARIMA") fit1 <- auto.arima(testseries1, stepwise = FALSE, lambda = 2, biasadj = FALSE) fit2 <- auto.arima(testseries1, stepwise = FALSE, lambda = 2, biasadj = TRUE) expect_false(identical(fit1$fitted, fit2$fitted)) }) test_that("test parallel = FALSE and stepwise = FALSE for auto.arima()", { skip_if(identical(Sys.getenv("GITHUB_ACTIONS"), "true")) expect_equal(auto.arima(WWWusage, parallel = FALSE, stepwise = FALSE)$arma, c(3L, 0L, 0L, 0L, 1L, 1L, 0L)) }) test_that("tests for ndiffs()", { expect_true(ndiffs(AirPassengers, test = "kpss") == 1) expect_true(ndiffs(AirPassengers, test = "adf") == 1) expect_true(ndiffs(AirPassengers, test = "pp") == 1) }) test_that("tests for nsdiffs()", { expect_true(nsdiffs(AirPassengers, test = "seas") == 1) expect_true(nsdiffs(AirPassengers, test = "ocsb") == 1) expect_error(nsdiffs(airmiles)) expect_true(nsdiffs(rep(1, 100)) == 0) expect_warning(nsdiffs(ts(rnorm(10), frequency = 0.1))) skip_if_not_installed("uroot") expect_true(nsdiffs(AirPassengers, test = "hegy") == 1) expect_true(nsdiffs(AirPassengers, test = "ch") == 0) }) } forecast/tests/testthat/test-wrangle.R0000644000176200001440000000226514353422625017634 0ustar liggesusers# A unit test for wrangling functions if (require(testthat)) { mv_y <- ts(cbind(rnorm(120, 0, 3) + 1:120 + 20 * sin(2 * pi * (1:120) / 12), rnorm(120, 3, 7) + 1:120 + 16 * sin(2 * pi * (1:120 + 6) / 12)), frequency = 12) mv_x <- ts(cbind(rnorm(120, 0, 8) + (1:120) / 2 + 42 * sin(2 * pi * (1:120) / 12), rnorm(120, 3, 7) + (1:120) * -1 + 20 * sin(2 * pi * (1:120 + 6) / 12)), frequency = 12) v_y <- ts(rnorm(120, 0, 8) + (1:120) / 2 + 12 * sin(2 * pi * (1:120) / 12), frequency = 12) v_x <- ts(rnorm(120, 0, 1) + (1:120) * (-1) + 28 * sin(2 * pi * (1:120) / 12), frequency = 12) test_that("tests on retaining matrix attributes", { data <- datamat(mv_y, mv_x, v_y, v_x) expect_true(is.ts(data[, 1])) expect_true(identical(tsp(data[, 1]), tsp(data[, 2]))) expect_true(NCOL(data) == 8) expect_true(NCOL(data[, 1]) == 2) expect_true("matrix" %in% class(data[, 1])) expect_true(class(data) == "data.frame") }) test_that("flatten data.frames", { mvdata <- datamat(mv_y, mv_x) vdata <- datamat(v_y, v_x) data <- datamat(mvdata, vdata, flatten = TRUE) expect_true(class(data) == "data.frame") expect_true(!"data.frame" %in% class(data[, 1])) }) } forecast/tests/testthat/test-acf.R0000644000176200001440000000073414353422625016725 0ustar liggesusers# A unit test for Acf() function if (require(testthat)) { test_that("tests for acf", { out <- Acf(wineind, lag.max = 10, type = "partial", plot = FALSE) expect_length(out$lag, 10) expect_identical(out$acf, Pacf(wineind, lag.max = 10, plot = FALSE)$acf) expect_equal(dim(Acf(wineind, lag.max = 10, type = "correlation", plot = FALSE)$acf), c(11L, 1L, 1L)) expect_equal(Acf(wineind, lag.max = 10, type = "correlation", plot = TRUE)$acf[1, 1, 1], 1) }) } forecast/tests/testthat/test-modelAR.R0000644000176200001440000002376514456202551017526 0ustar liggesusers# A unit test for modelAR.R if (require(testthat)) { test_that("Tests for modelAR", { ## Set up functions to match 'nnetar' behavior avnnet2 <- function(x, y, repeats = repeats, linout = TRUE, trace = FALSE, ...) { mods <- list() for (i in 1:repeats) { mods[[i]] <- nnet::nnet(x, y, linout = linout, trace = trace, ...) } return(structure(mods, class = "nnetarmodels")) } ## predict.avnnet2 <- function(model, newdata = NULL) { if (is.null(newdata)) { if (length(predict(model[[1]])) > 1) { rowMeans(sapply(model, predict)) } else { mean(sapply(model, predict)) } } else { if (NCOL(newdata) >= 2 & NROW(newdata) >= 2) { rowMeans(sapply(model, predict, newdata = newdata)) } else { mean(sapply(model, predict, newdata = newdata)) } } } ## compare residuals to 'nnetar' expect_silent({ set.seed(123) nnetar_model <- nnetar(lynx[1:100], p = 2, P = 1, size = 3, repeats = 20) set.seed(123) modelAR_model <- modelAR(lynx[1:100], FUN = avnnet2, predict.FUN = predict.avnnet2, p = 2, P = 1, scale.inputs = TRUE, size = 3, repeats = 20) res1 <- residuals(nnetar_model) res2 <- residuals(modelAR_model) }) expect_true(identical(res1, res2)) ## check re-fitting old model and compare to 'nnetar' expect_silent({ nnetar_model2 <- nnetar(lynx[101:114], model = nnetar_model) modelAR_model2 <- modelAR(lynx[101:114], FUN = avnnet2, predict.FUN = predict.avnnet2, model = modelAR_model) res1 <- residuals(nnetar_model2) res2 <- residuals(modelAR_model2) }) expect_true(identical(res1, res2)) ## compare forecasts with 'nnetar' expect_silent({ f1 <- forecast(nnetar_model)$mean f2 <- forecast(modelAR_model)$mean }) expect_true(identical(f1, f2)) ## test lambda and compare to 'nnetar' expect_silent({ set.seed(123) oilnnet_nnetar <- nnetar(airmiles, lambda = 0.15, size = 1, repeats = 20) set.seed(123) oilnnet_modelAR <- modelAR(airmiles, FUN = avnnet2, predict.FUN = predict.avnnet2, scale.inputs = TRUE, lambda = 0.15, size = 1, repeats = 20) }) expect_true(identical(residuals(oilnnet_nnetar, type = "response"), residuals(oilnnet_modelAR, type = "response"))) expect_true(length(forecast(oilnnet_modelAR)$mean) == 10) ## check print input name expect_silent(woolyrnqnnet <- modelAR(woolyrnq, FUN = avnnet2, predict.FUN = predict.avnnet2, scale.inputs = TRUE, p = 1, P = 0, size = 8, repeats = 10)) expect_output(print(woolyrnqnnet), regexp = "Series: woolyrnq") ## check default forecast length expect_true(length(forecast(woolyrnqnnet)$mean) == 2 * frequency(woolyrnq)) # # Test with single-column xreg (which might be a vector) expect_silent({ set.seed(123) woolyrnqnnet <- modelAR(woolyrnq, FUN = avnnet2, predict.FUN = predict.avnnet2, scale.inputs = TRUE, xreg = 1:length(woolyrnq), p = 2, P = 2, size = 4, repeats = 10) set.seed(123) woolyrnqnnet2 <- nnetar(woolyrnq, xreg = 1:length(woolyrnq), p = 2, P = 2, size = 4, repeats = 10) }) expect_true(all(dim(woolyrnqnnet$xreg) == c(119, 1))) expect_true(length(forecast(woolyrnqnnet, xreg = 120:130)$mean) == 11) expect_true(identical(forecast(woolyrnqnnet, xreg = 120:130)$mean, forecast(woolyrnqnnet2, xreg = 120:130)$mean)) ## Test with multiple-column xreg set.seed(123) winennet <- modelAR(wineind, FUN = avnnet2, predict.FUN = predict.avnnet2, scale.inputs = TRUE, xreg = cbind(bizdays(wineind), fourier(wineind, 1)), p = 2, P = 1, size = 4, repeats = 10) set.seed(123) winennet2 <- nnetar( wineind, xreg = cbind(bizdays(wineind), fourier(wineind, 1)), p = 2, P = 1, size = 4, repeats = 10 ) expect_true(length(forecast(winennet, h = 2, xreg = matrix(2, 2, 3))$mean) == 2L) ## Test if h matches xreg expect_true(length(forecast(winennet, h = 5, xreg = matrix(2, 2, 3))$mean) == 2L) expect_warning( expect_equal( forecast(winennet2, xreg = matrix(2, 2, 3))$mean, forecast(winennet, xreg = matrix(2, 2, 3))$mean ), "different column names", fixed = TRUE ) ## Test that P is ignored if m=1 expect_warning(wwwnnet <- modelAR(WWWusage, FUN = avnnet2, predict.FUN = predict.avnnet2, scale.inputs = TRUE, xreg = 1:length(WWWusage), p = 2, P = 4, size = 3, repeats = 10)) ## Test passing arguments to nnet expect_silent({ set.seed(123) wwwnnet <- modelAR(WWWusage, FUN = avnnet2, predict.FUN = predict.avnnet2, scale.inputs = TRUE, xreg = 1:length(WWWusage), p = 2, P = 0, size = 3, decay = 0.1, repeats = 10) set.seed(123) wwwnnet2 <- nnetar(WWWusage, size = 3, p = 2, P = 0, xreg = 1:length(WWWusage), decay = 0.1, repeats = 10) }) expect_true(identical( forecast(wwwnnet, h = 2, xreg = (length(WWWusage) + 1):(length(WWWusage) + 5))$mean, forecast(wwwnnet2, h = 2, xreg = (length(WWWusage) + 1):(length(WWWusage) + 5))$mean )) ## Test output format correct when NAs present airna <- airmiles airna[12] <- NA expect_warning(airnnet <- modelAR(airna, FUN = avnnet2, predict.FUN = predict.avnnet2, scale.inputs = TRUE, p = 1, size = 0, skip = TRUE, Wts = c(0, 1), maxit = 0, repeats = 5)) expect_equal(airnnet$fitted[-c(1, 12, 13)], airna[-c(11, 12, length(airna))]) ## Test model argument expect_silent({ set.seed(123) fit1 <- modelAR( WWWusage, FUN = avnnet2, predict.FUN = predict.avnnet2, scale.inputs = TRUE, xreg = 1:length(WWWusage), p = 3, size = 2, lambda = 2, decay = 0.5, maxit = 25, repeats = 7 ) fit2 <- modelAR(WWWusage, xreg = 1:length(WWWusage), model = fit1) set.seed(123) fit3 <- nnetar(WWWusage, xreg = 1:length(WWWusage), p = 3, size = 2, lambda = 2, decay = 0.5, maxit = 25, repeats = 7) }) # Check some model parameters expect_true(identical(fit1$p, fit2$p)) expect_true(identical(fit1$lambda, fit2$lambda)) expect_true(identical(fit1$modelargs, fit2$modelargs)) # Check fitted values are all the same expect_true(identical(fitted(fit1), fitted(fit2))) expect_true(identical(fitted(fit1, h = 2), fitted(fit2, h = 2))) # Check residuals all the same expect_true(identical(residuals(fit1), residuals(fit2))) # Check number of neural nets expect_true(identical(length(fit1$model), length(fit2$model))) # Check neural network weights all the same expect_true(identical(fit1$model[[1]]$wts, fit2$model[[1]]$wts)) expect_true(identical(fit1$model[[7]]$wts, fit2$model[[7]]$wts)) ## compare results with 'nnetar' expect_true(identical(fitted(fit1), fitted(fit3))) expect_true(identical(fitted(fit1, h = 3), fitted(fit3, h = 3))) expect_true(identical(residuals(fit1, type = "response"), residuals(fit3, type = "response"))) ## Check subset argument using indices expect_silent({ set.seed(123) airnnet <- modelAR(airmiles, , FUN = avnnet2, predict.FUN = predict.avnnet2, scale.inputs = TRUE, subset = 11:20, p = 1, size = 1, repeats = 10) set.seed(123) airnnet2 <- nnetar(airmiles, , subset = 11:20, p = 1, size = 1, repeats = 10) }) expect_true(identical(which(!is.na(fitted(airnnet))), 11:20)) expect_true(identical(fitted(airnnet), fitted(airnnet2))) expect_true(identical(forecast(airnnet, h = 5)$mean, forecast(airnnet2, h = 5)$mean)) ## Check subset argument using logical vector expect_silent({ set.seed(123) airnnet <- modelAR(airmiles, FUN = avnnet2, predict.FUN = predict.avnnet2, scale.inputs = TRUE, subset = c(rep(F, 10), rep(T, 10), rep(F, length(airmiles) - 20)), p = 1, size = 1, repeats = 10) set.seed(123) airnnet2 <- nnetar(airmiles, , subset = c(rep(F, 10), rep(T, 10), rep(F, length(airmiles) - 20)), p = 1, size = 1, repeats = 10) }) expect_true(identical(which(!is.na(fitted(airnnet))), 11:20)) expect_true(identical(fitted(airnnet), fitted(airnnet2))) expect_true(identical(forecast(airnnet, h = 5)$mean, forecast(airnnet2, h = 5)$mean)) ## compare prediction intervals with 'nnetar' expect_silent({ set.seed(456) f1 <- forecast(airnnet, h = 5, PI = TRUE, npaths = 100) set.seed(456) f2 <- forecast(airnnet2, h = 5, PI = TRUE, npaths = 100) }) #expect_true(identical(f1$upper, f2$upper)) #expect_true(identical(f1$lower, f2$lower)) ## Check short and constant data expect_warning(nnetfit <- modelAR(rep(1, 10), FUN = avnnet2, predict.FUN = predict.avnnet2, p = 2, P = 0, size = 1, repeats = 1, lambda = 0.1), "Constant data") expect_true(nnetfit$p == 1) expect_true(is.null(nnetfit$lambda)) expect_true(is.null(nnetfit$scalex)) expect_error(nnetfit <- modelAR(rnorm(2), FUN = avnnet2, predict.FUN = predict.avnnet2, p = 1, P = 0, size = 1, repeats = 1), "Not enough data") expect_silent(nnetfit <- modelAR(rnorm(3), FUN = avnnet2, predict.FUN = predict.avnnet2, p = 1, P = 0, size = 1, repeats = 1)) expect_true(nnetfit$p == 1) expect_silent(nnetfit <- modelAR(rnorm(3), FUN = avnnet2, predict.FUN = predict.avnnet2, p = 2, P = 0, size = 1, repeats = 1)) expect_true(nnetfit$p == 2) expect_warning(nnetfit <- modelAR(rnorm(3), FUN = avnnet2, predict.FUN = predict.avnnet2, p = 3, P = 0, size = 1, repeats = 1), "short series") expect_true(nnetfit$p == 2) expect_warning(nnetfit <- modelAR(rnorm(3), FUN = avnnet2, predict.FUN = predict.avnnet2, p = 4, P = 0, size = 1, repeats = 1), "short series") expect_true(nnetfit$p == 2) expect_warning(nnetfit <- modelAR(rnorm(10), FUN = avnnet2, predict.FUN = predict.avnnet2, xreg = rep(1, 10), p = 2, P = 0, size = 1, repeats = 1, lambda = 0.1), "Constant xreg") expect_true(is.null(nnetfit$scalexreg)) expect_warning(nnetfit <- modelAR(rnorm(3), FUN = avnnet2, predict.FUN = predict.avnnet2, xreg = matrix(c(1, 2, 3, 1, 1, 1), ncol = 2), p = 1, P = 0, size = 1, repeats = 1, lambda = 0.1), "Constant xreg") expect_true(is.null(nnetfit$scalexreg)) }) } forecast/tests/testthat/test-hfitted.R0000644000176200001440000000244614353422625017625 0ustar liggesusers# A unit test for h-step fits if (require(testthat)) { test_that("variance test on h-step fits", { mod1 <- ets(WWWusage, model = "AAN", damped = TRUE) h1 <- fitted(mod1, h = 1) h2 <- fitted(mod1, h = 2) j <- !is.na(h1) & !is.na(h2) expect_lt(var(diff(h1[j])), var(diff(h2[j]))) # hfitted automatic function selection h2_1 <- hfitted(mod1, h = 2) expect_true(identical(h2, h2_1)) mod2 <- Arima(WWWusage, order = c(1, 1, 1)) h1 <- fitted(mod2, h = 1) h2 <- fitted(mod2, h = 2) j <- !is.na(h1) & !is.na(h2) expect_lt(var(diff(h1[j])), var(diff(h2[j]))) mod3 <- arfima(WWWusage) h1 <- fitted(mod3, h = 1) h2 <- fitted(mod3, h = 2) j <- !is.na(h1) & !is.na(h2) expect_lt(var(diff(h1[j])), var(diff(h2[j]))) # mod3 <- tbats(WWWusage) # h1 <- fitted(mod3, h=1) # h2 <- fitted(mod3, h=2) # j <- !is.na(h1) & !is.na(h2) # expect_lt(var(diff(h1[j])), var(diff(h2[j]))) # # mod4 <- bats(WWWusage) # h1 <- fitted(mod4, h=1) # h2 <- fitted(mod4, h=2) # j <- !is.na(h1) & !is.na(h2) # expect_lt(var(diff(h1[j])), var(diff(h2[j]))) mod5 <- nnetar(WWWusage) h1 <- fitted(mod5, h = 1) h2 <- fitted(mod5, h = 2) j <- !is.na(h1) & !is.na(h2) expect_lt(var(diff(h1[j])), var(diff(h2[j]))) }) } forecast/tests/testthat/test-msts.R0000644000176200001440000000041714353422625017160 0ustar liggesusers# A unit test for msts.R if (require(testthat)) { test_that("tests for msts() and print.msts()", { x <- msts(taylor, seasonal.periods = c(48, 336), ts.frequency = 48, start = 2000 + 22 / 52) expect_output(print(x), regexp = "Multi-Seasonal Time Series") }) } forecast/tests/testthat/test-accuracy.R0000644000176200001440000000306514353422625017766 0ustar liggesusers# A unit test for accuracy() function if (require(testthat)) { test_that("tests for a non-forecast object (input)", { expect_error(accuracy(USAccDeaths)) }) test_that("tests for dimension (output)", { train <- window(USAccDeaths, start = c(1973, 1), end = c(1976, 12)) test <- window(USAccDeaths, start = c(1977, 1)) fcasts <- forecast(train, h = 6) expect_identical(dim(accuracy(fcasts)), c(1L, 7L)) expect_identical(dim(accuracy(fcasts, test)), c(2L, 8L)) expect_false( all(dim(accuracy(fcasts, test, test = 1:2)) == dim(accuracy(fcasts, test))) ) expect_identical(accuracy(fcasts, test = 1:length(train)), accuracy(fcasts)) }) test_that("tests for accuracy (output)", { # Test arima fitarima <- Arima(USAccDeaths, order = c(0, 1, 1), seasonal = c(0, 1, 1)) accuracyarima <- accuracy(fitarima)[1, "RMSE"] accuracyarimasim <- accuracy(Arima(simulate(fitarima, seed = 123), order = c(0, 1, 0), seasonal = c(0, 0, 1)))[1, "RMSE"] expect_lt(accuracyarima, accuracyarimasim) # Test ets fitets <- ets(AirPassengers, model = "MAM", damped = TRUE) accuracyets <- accuracy(fitets)[1, "RMSE"] accuracyetssim <- accuracy(ets(simulate(fitets, seed = 123), model = "MAM", damped = TRUE))[1, "RMSE"] expect_lt(accuracyets, accuracyetssim) # Test lm month <- factor(rep(1:12, 14)) fitlm <- lm(wineind[1:168] ~ month) accuracylm <- accuracy(fitlm)[1, "RMSE"] accuracylmsim <- accuracy(lm(simulate(fitlm, seed = 123)[, 1] ~ month))[1, "RMSE"] expect_gt(accuracylm, accuracylmsim) }) } forecast/tests/testthat/test-arfima.R0000644000176200001440000000230714150370574017431 0ustar liggesusers# A unit test for arfima.R if (require(testthat)) { arfima1 <- arfima(WWWusage, estim = "mle") arfima2 <- arfima(WWWusage, estim = "ls") arfimabc <- arfima(WWWusage, estim = "mle", lambda = 0.75, biasadj = FALSE) arfimabc2 <- arfima(WWWusage, estim = "mle", lambda = 0.75, biasadj = TRUE) test_that("test accuracy(), fitted(), and residuals().", { expect_true(all(arimaorder(arfima1) == arimaorder(arfima2))) fitarfima <- fitted(arfima1) residarfima <- residuals(arfima2) expect_true(length(fitarfima) == length(residarfima)) expect_true(all(getResponse(arfima1) == WWWusage)) expect_false(identical(arfimabc$fitted, arfimabc2$fitted)) expect_error(accuracy(arfima1), NA) expect_equal(mean(residuals(arfima1)), accuracy(arfima1)[, "ME"]) }) test_that("test forecast.fracdiff()", { expect_true(all(forecast(arfima1, fan = TRUE)$mean == forecast(arfima1, fan = FALSE)$mean)) expect_error(forecast(arfimabc, level = -10)) expect_error(forecast(arfimabc, level = 110)) expect_false(identical(forecast(arfimabc, biasadj = FALSE), forecast(arfimabc, biasadj = TRUE))) expect_output(print(summary(forecast(arfimabc))), regexp = "Forecast method: ARFIMA") }) } forecast/tests/testthat/test-boxcox.R0000644000176200001440000000416714353422625017502 0ustar liggesusers# A unit test for boxcox transformations if (require(testthat)) { test_that("tests for biasadj automatically set based on model fit", { # lm fit <- tslm(USAccDeaths ~ trend, lambda = 0.5, biasadj = TRUE) expect_true(all.equal(forecast(fit), forecast(fit, biasadj = TRUE))) # HoltWintersZZ fit <- ses(USAccDeaths, initial = "simple", lambda = 0.5, biasadj = TRUE) expect_true(all.equal(forecast(fit), forecast(fit, biasadj = TRUE))) # arfima x <- fracdiff::fracdiff.sim(100, ma = -.4, d = .3)$series fit <- arfima(x) expect_true(all.equal(forecast(fit), forecast(fit, biasadj=TRUE))) #arima fit1 <- Arima(USAccDeaths, order = c(0,1,1), seasonal = c(0,1,1), lambda = 0.5, biasadj = TRUE) fit2 <- auto.arima(USAccDeaths, max.p=0, max.d=1, max.q=1, max.P=0, max.D=1, max.Q=1, lambda = 0.5, biasadj = TRUE) expect_true(all.equal(forecast(fit1), forecast(fit1, biasadj=TRUE))) expect_true(all.equal(forecast(fit2), forecast(fit2, biasadj=TRUE))) expect_true(all.equal(forecast(fit1)$mean, forecast(fit2)$mean)) # ets fit <- ets(USAccDeaths, model = "ANA", lambda = 0.5, biasadj = TRUE) expect_true(all.equal(forecast(fit), forecast(fit, biasadj = TRUE))) # bats # fit <- bats(USAccDeaths, use.box.cox = TRUE, biasadj = TRUE) # expect_true(all.equal(forecast(fit), forecast(fit, biasadj=TRUE))) # tbats # fit <- tbats(USAccDeaths, use.box.cox = TRUE, biasadj = TRUE) # expect_true(all.equal(forecast(fit), forecast(fit, biasadj=TRUE))) }) test_that("tests for automatic lambda selection in BoxCox transformation", { lambda_auto <- BoxCox.lambda(USAccDeaths) # lm fit <- tslm(USAccDeaths ~ trend, lambda = "auto", biasadj = TRUE) expect_equal(as.numeric(fit$lambda), lambda_auto, tolerance=1e-3) # ets fit <- ets(USAccDeaths, model = "ANA", lambda = "auto", biasadj = TRUE) expect_equal(as.numeric(fit$lambda), lambda_auto, tolerance=1e-3) # arima fit <- Arima(USAccDeaths, order = c(0,1,1), seasonal = c(0,1,1), lambda = "auto", biasadj = TRUE) expect_equal(as.numeric(fit$lambda), lambda_auto, tolerance=1e-3) }) } forecast/tests/testthat/test-arima.R0000644000176200001440000001165514353422625017271 0ustar liggesusers# A unit test for Arima() function if (require(testthat)) { test_that("tests for a non-ts object", { set.seed(123) abc <- rnorm(50, 5, 1) fit <- Arima(abc, order = c(2, 0, 1)) expect_identical(fit$arma, c(2L, 1L, 0L, 0L, 1L, 0L, 0L)) }) test_that("tests for a ts with the seasonal component", { fit <- Arima(wineind, order = c(1, 1, 1), seasonal = c(0, 1, 1)) expect_identical(fit$arma, c(1L, 1L, 0L, 1L, 12L, 1L, 1L)) }) test_that("tests for ARIMA errors", { fit <- Arima(wineind, order = c(1, 1, 1), seasonal = c(0, 1, 1)) expect_identical(residuals(fit, type = "regression"), wineind) }) test_that("tests for arimaorder", { for (ar in 1:5) { for (i in 0:1) { for (ma in 1:5) { fitarima <- Arima(lynx, order = c(ar, i, ma), method = "ML", include.constant = TRUE, lambda = 0.5) arextracted <- fitarima$arma[1] iextracted <- fitarima$arma[6] maextracted <- fitarima$arma[2] expect_true(all(arimaorder(fitarima) == c(arextracted, iextracted, maextracted))) expect_true(all(names(arimaorder(fitarima)) == c("p", "d", "q"))) expect_true(arimaorder(fitarima)["p"] == ar) expect_true(arimaorder(fitarima)["d"] == i) expect_true(arimaorder(fitarima)["q"] == ma) } } } # Test ar arMod <- ar(lynx, order.max = 2) expect_true(arimaorder(arMod)["p"] == 2) expect_true(arimaorder(arMod)["d"] == 0) expect_true(arimaorder(arMod)["q"] == 0) expect_true(all(names(arimaorder(arMod)) == c("p", "d", "q"))) # Test SARIMA sarimaMod <- Arima(wineind, order = c(1, 1, 2), seasonal=c(0, 1,1)) expect_true(all(names(arimaorder(sarimaMod)) == c("p", "d", "q", "P", "D", "Q", "Frequency"))) expect_true(arimaorder(sarimaMod)["p"] == 1) expect_true(arimaorder(sarimaMod)["d"] == 1) expect_true(arimaorder(sarimaMod)["q"] == 2) expect_true(arimaorder(sarimaMod)["P"] == 0) expect_true(arimaorder(sarimaMod)["D"] == 1) expect_true(arimaorder(sarimaMod)["Q"] == 1) expect_true(arimaorder(sarimaMod)["Frequency"] == frequency(wineind)) # Test fracdiff set.seed(4) fracdiffMod <- fracdiff::fracdiff(lynx, nar = 2) expect_true(all(names(arimaorder(fracdiffMod)) == c("p", "d", "q"))) expect_true(arimaorder(fracdiffMod)["p"] == 2) expect_true(arimaorder(fracdiffMod)["d"] >= 0) expect_true(arimaorder(fracdiffMod)["d"] <= 1) expect_true(arimaorder(fracdiffMod)["p"] == 2) }) test_that("tests for forecast.Arima", { fit1 <- Arima(wineind, order = c(1, 1, 2), seasonal = c(0, 1, 1), method = "CSS") expect_warning(forecast.Arima(fit1, xreg = 1:10), "xreg not required") expect_warning(forecast.Arima(fit1, include.drift = TRUE)) expect_true(all.equal(forecast.Arima(fit1, bootstrap = TRUE, npaths = 100)$ mean, forecast.Arima(fit1)$mean)) fit2 <- Arima(wineind, order = c(1, 0, 1), seasonal = c(0, 0, 0), include.drift = TRUE) expect_warning(Arima(wineind, order = c(1, 2, 1), include.drift = TRUE)) expect_true("drift" %in% names(coef(fit2))) expect_true(length(forecast.Arima(fit2)$mean) == 2 * frequency(wineind)) fit3 <- Arima(wineind, order = c(1, 1, 2), seasonal = c(0, 1, 1), include.mean = FALSE) expect_false("intercept" %in% names(coef(fit3))) expect_true(frequency(forecast.Arima(fit3)$mean) == frequency(wineind)) fit4 <- Arima(wineind, order = c(1, 1, 2), seasonal = c(0, 1, 1), xreg = rnorm(length(wineind))) expect_error(forecast.Arima(fit4)) expect_error(forecast.Arima(fit4, xreg = matrix(rnorm(40), ncol = 2))) forecast.Arima(fit4, xreg = rnorm(20))$mean %>% expect_length(20) fit5 <- Arima(wineind[1:150], order = c(1, 1, 2), seasonal = c(0, 1, 1), method = "ML") expect_true(accuracy(fit5)[1, "MAPE"] < accuracy(Arima(wineind, model = fit5))[1, "MAPE"]) fit6 <- Arima(wineind, order = c(1, 1, 2), seasonal = c(0, 1, 1), method = "CSS", lambda = 5) expect_false(identical(fit1$coef, fit6$coef)) }) test_that("tests for search.arima", { set.seed(444) arimasim <- arima.sim(n = 300, model = list(ar = runif(8, -.1, 0.1), ma = runif(8, -0.1, 0.1), sd = 0.1)) expect_true(AIC(auto.arima(arimasim)) >= AIC(auto.arima(arimasim, stepwise = FALSE))) }) test_that("tests for forecast.ar()", { fitar <- ar(taylor) arfc <- forecast.ar(fitar)$mean expect_true(all(arfc == forecast.ar(fitar, bootstrap = TRUE, npaths = 100)$mean)) expect_true(all(arfc == forecast.ar(fitar, fan = TRUE)$mean)) expect_error(forecast.ar(fitar, level = -10)) expect_error(forecast.ar(fitar, level = 110)) expect_true(all(arfc + 1 == forecast.ar(fitar, lambda = 1)$mean)) arfcbc <- forecast.ar(fitar, lambda = 2) arfcabc <- forecast.ar(fitar, lambda = 2, biasadj = TRUE) expect_false(identical(arfcbc$mean, arfcabc$mean)) }) test_that("tests for as.character.Arima()", { expect_match(as.character(auto.arima(woolyrnq)), regexp = "ARIMA") }) } forecast/tests/testthat/test-thetaf.R0000644000176200001440000000075714150370574017454 0ustar liggesusers# A unit test for thetaf.R if (require(testthat)) { test_that("test thetaf()", { thetafc <- thetaf(WWWusage)$mean expect_true(all(thetafc == thetaf(WWWusage, fan = TRUE)$mean)) expect_error(thetaf(WWWusage, level = -10)) expect_error(thetaf(WWWusage, level = 110)) # Constant series should not error series <- ts(rep(950, 20), frequency = 4) constantForecast <- expect_error(thetaf(series), NA) expect_true(is.constant(round(constantForecast$mean, 12))) }) } forecast/tests/testthat/test-clean.R0000644000176200001440000000256614353422625017263 0ustar liggesusers# A unit test for na.interp() and tsclean() if (require(testthat)) { test_that("tests for na.interp", { # Test nonseasonal interpolation expect_true(all(na.interp(c(1, 2, 3, NA, 5, 6, 7)) == 1:7)) # Test for identical on series without NAs expect_true(all(na.interp(wineind) == wineind)) # Test seasonal interpolation testseries <- ts(rep(1:7, 5), frequency = 7) testseries[c(1, 3, 11, 17)] <- NA expect_true(sum(abs(na.interp(testseries) - rep(1:7, 5))) < 1e-12) # Test length of output expect_true(length(testseries) == length(na.interp(testseries))) }) test_that("tests for tsclean", { # Test for no NAs expect_false(any(is.na(tsclean(gold)))) # Test for removing outliers in seasonal series testseries <- ts(rep(1:7, 5), frequency = 7) testseries[c(2, 4, 14)] <- 0 expect_true(sum(abs(tsclean(testseries) - rep(1:7, 5))) < 1e-12) # Test for NAs left with replace.missing = FALSE argument testseries[c(2, 4, 14)] <- NA expect_true(any(is.na(tsclean(testseries, replace.missing = FALSE)))) # Test for outliers in a series expect_equal(sum(abs(wineind - tsclean(wineind)) > 1e-6), 1) # Test for identical on series without NAs or outliers expect_true(identical(USAccDeaths, tsclean(USAccDeaths))) # Test length of output expect_true(length(tsclean(testseries)) == length(testseries)) }) } forecast/tests/testthat/test-bats.R0000644000176200001440000000134714353422625017126 0ustar liggesusers# A unit test for bats function if (require(testthat)) { test_that("tests for a non-ts object", { set.seed(123) abc <- rnorm(50, 5, 1) fit <- bats(abc, use.box.cox = TRUE, use.parallel = FALSE) expect_false(fit$lambda == 0) expect_output(print(fit), "Seed States") expect_equal(length(residuals(fit)), 50L) plot(fit) expect_equal(bats(1, use.box.cox = TRUE, use.parallel = FALSE)$AIC, -Inf) expect_equal(bats(-1, use.box.cox = TRUE, use.parallel = FALSE)$AIC, -Inf) }) test_that("Test parallel of bats", { abc <- rnorm(50, 5, 1) skip_on_cran() skip_if(identical(Sys.getenv("GITHUB_ACTIONS"), "true")) expect_gt(bats(abc, use.box.cox = TRUE, use.parallel = TRUE)$lambda, 0.999) }) } forecast/tests/testthat/test-graph.R0000644000176200001440000000113414353422625017270 0ustar liggesusers# A unit test for graph.R if (require(testthat)) { test_that("Tests for seasonplot()", { expect_error(seasonplot(airmiles)) seasonplot(ts(gold, frequency = 7)) seasonplot(woolyrnq) seasonplot(wineind) seasonplot(wineind, year.labels = TRUE) seasonplot(wineind, year.labels.left = TRUE) # seasonplot(taylor) }) test_that("Tests for tsdisplay()", { expect_silent(tsdisplay(airmiles, ci.type = "ma")) expect_silent(tsdisplay(1:20)) expect_silent(tsdisplay(airmiles, plot.type = "scatter")) expect_silent(tsdisplay(airmiles, plot.type = "spectrum")) }) } forecast/tests/testthat/test-dshw.R0000644000176200001440000000204014353422625017131 0ustar liggesusers# A unit test for dshw function if (require(testthat)) { test_that("Test dshw()", { # Test negative values and period1 and period2 not specified set.seed(345) expect_error(dshw(-10:10)) expect_error(dshw(abs(rnorm(100)))) # Test fits with period1 and period2 swapped set.seed(5555) t <- seq(0, 1, by = 0.1) x <- exp(sin(2 * pi * t) + cos(2 * pi * t * 4) + rnorm(length(t), 0, 0.1)) fit1 <- dshw(x, period1 = 4, period2 = 2)$mean fit2 <- dshw(x, period1 = 2, period2 = 4)$mean expect_true(all(fit1 == fit2)) # Test fits with lambda specified and armethod = FALSE y <- x + 1 fit3 <- dshw(y, period1 = 2, period2 = 4, lambda = 2, biasadj = FALSE) fit4 <- dshw(y, period1 = 2, period2 = 4, lambda = 2, biasadj = TRUE) expect_false(identical(fit3$mean, fit4$mean)) fit5 <- dshw(x, period1 = 2, period2 = 4, armethod = FALSE) # Test fits with inappropriate periods specified expect_error(dshw(x, period1 = 2, period2 = 2)) expect_error(dshw(x, period1 = 2, period2 = 4.1)) }) } forecast/tests/testthat/test-tbats.R0000644000176200001440000000473514353422625017316 0ustar liggesusers# A unit test for tbats function if (require(testthat)) { test_that("Test simple cases for tbats", { expect_error(tbats(data.frame(x1 = 1, x2 = 2), use.parallel = FALSE)) expect_warning(tbats(c(1:5, NA, 7:9), use.parallel = FALSE)) expect_true(all(forecast(tbats(rep(1, 100), use.parallel = FALSE))$mean == 1)) }) test_that("Test tbats() and forecasts", { # Fit tbats models tbatsfit1 <- tbats(subset(wineind, end = 50), use.parallel = FALSE) tbatsfit2 <- tbats(WWWusage, use.parallel = FALSE) tbatsfit3 <- tbats(as.numeric(woolyrnq), seasonal.periods = frequency(woolyrnq), use.parallel = FALSE) tbatsfit4 <- tbats(airmiles, use.box.cox = FALSE, use.parallel = FALSE) # Test tbats.components tbats.components(tbatsfit1) tbats.components(tbatsfit2) tbats.components(tbatsfit3) tbats.components(tbatsfit4) # Test accuracy.tbats() function expect_output(print(accuracy(tbatsfit1)), regexp = "ME") expect_output(print(accuracy(tbatsfit2)), regexp = "ME") expect_output(print(accuracy(tbatsfit3)), regexp = "ME") expect_output(print(accuracy(tbatsfit4)), regexp = "ME") # Test summary.tbats() expect_output(print(summary(tbatsfit1)), regexp = "Length") expect_output(print(summary(tbatsfit2)), regexp = "Length") expect_output(print(summary(tbatsfit3)), regexp = "Length") expect_output(print(summary(tbatsfit4)), regexp = "Length") # Test fitted length expect_true(length(fitted(tbatsfit1)) == 50) expect_true(length(fitted(tbatsfit2)) == length(WWWusage)) expect_true(length(fitted(tbatsfit3)) == length(woolyrnq)) expect_true(length(fitted(tbatsfit4)) == length(airmiles)) # Test length of forecast expect_true(length(forecast(tbatsfit1)$mean) == 2 * frequency(wineind)) expect_true(length(forecast(tbatsfit2)$mean) == 10) # expect_true(length(forecast(tbatsfit3)$mean) == 2 * frequency(woolyrnq)) expect_true(length(forecast(tbatsfit4)$mean) == 10) # Test inappropriate levels expect_error(forecast(tbatsfit1, level = -10)) expect_error(forecast(tbatsfit1, level = 110)) # Test forecasts with fan = TRUE expect_true(all(forecast(tbatsfit1, fan = TRUE)$mean == forecast(tbatsfit1)$mean)) }) #test_that("Test tbats() with parallel", { # Tests will not run on Travis in parallel # expect_output(print(tbats(woolyrnq, num.cores = 1)), regexp = "TBATS") # expect_output(print(tbats(elecsales, num.cores = 1, use.trend = FALSE)), regexp = "BATS") #}) } forecast/tests/testthat/test-forecast2.R0000644000176200001440000000736214353422625020070 0ustar liggesusers# A unit test for forecast2.R if (require(testthat)) { test_that("test meanf()", { meanfc <- mean(wineind) expect_true(all(meanf(wineind)$mean == meanfc)) bcforecast <- meanf(wineind, lambda = -0.5)$mean expect_true(max(bcforecast) == min(bcforecast)) expect_true(all(meanf(wineind, fan = TRUE)$mean == meanfc)) expect_error(meanf(wineind, level = -10)) expect_error(meanf(wineind, level = 110)) # Constant series should not error series <- ts(rep(950, 20), frequency = 4) constantForecast <- expect_error(rwf(series), NA) expect_true(is.constant(constantForecast$mean)) }) test_that("test rwf()", { rwfc <- rwf(airmiles)$mean expect_true(all(rwfc == naive(airmiles)$mean)) expect_true(all(rwfc < rwf(airmiles, drift = TRUE)$mean)) expect_true(all(rwf(airmiles, fan = TRUE)$mean == rwfc)) expect_true(length(rwf(airmiles, lambda = 0.15)$mean) == 10) expect_false(identical(rwf(airmiles, lambda = 0.15, biasadj = FALSE)$mean, rwf(airmiles, lambda = 0.15, biasadj = TRUE)$mean)) # Constant series should not error series <- ts(rep(950, 20), frequency = 4) constantForecast <- expect_error(rwf(series), NA) expect_true(is.constant(constantForecast$mean)) }) test_that("test forecast.HoltWinters()", { hwmod <- stats::HoltWinters(UKgas) expect_true(all(forecast(hwmod, fan = TRUE)$mean == forecast(hwmod)$mean)) expect_error(forecast(hwmod, level = -10)) expect_error(forecast(hwmod, level = 110)) # Forecasts transformed manually with Box-Cox should match # forecasts when lambda is passed as an argument hwmodbc <- stats::HoltWinters(BoxCox(UKgas, lambda = 0.25)) hwfc <- forecast(hwmodbc, lambda = 0.25, biasadj = FALSE)$mean hwfc2 <- forecast(hwmodbc, lambda = 0.25, biasadj = TRUE)$mean hwbcfc <- InvBoxCox(forecast(hwmodbc)$mean, lambda = 0.25) expect_true(all(hwfc == hwbcfc)) expect_false(identical(hwfc, hwfc2)) }) test_that("test for forecast.StructTS()", { structtsmod <- stats::StructTS(wineind) fc1 <- forecast(structtsmod)$mean expect_true(all(fc1 == forecast(structtsmod, fan = TRUE)$mean)) expect_error(forecast(structtsmod, level = -10)) expect_error(forecast(structtsmod, level = 110)) # Forecasts transformed manually with Box-Cox should match # forecasts when lambda is passed as an argument bcseries <- BoxCox(woolyrnq, lambda = 0.19) fc2 <- InvBoxCox(forecast(stats::StructTS(bcseries))$mean, lambda = 0.19) fc3 <- forecast(stats::StructTS(bcseries), lambda = 0.19, biasadj = FALSE)$mean fc4 <- forecast(stats::StructTS(bcseries), lambda = 0.19, biasadj = TRUE)$mean expect_true(all(fc2 == fc3)) expect_false(identical(fc3, fc4)) }) test_that("test croston()", { set.seed(1234) expect_error(croston(rnorm(100))) expect_true(all(croston(rep(0, 100))$mean == 0)) }) test_that("test hw()", { expect_output(print(summary(holt(wineind))), regexp = "Forecast method: Holt's method") expect_output(print(summary(holt(wineind, damped = TRUE))), regexp = "Forecast method: Damped Holt's method") }) test_that("test holt()", { expect_output(print(summary(hw(wineind))), regexp = "Forecast method: Holt-Winters' additive method") }) test_that("test naive() and snaive()", { # WWWusage has frequency = 1, so naive and snaive should match expect_true(all(snaive(WWWusage, h = 10)$mean == naive(WWWusage)$mean)) expect_true(all(snaive(WWWusage, h = 10)$upper == naive(WWWusage)$upper)) expect_true(all(snaive(WWWusage, h = 10)$lower == naive(WWWusage)$lower)) # Constant series should not error series <- ts(rep(950, 20), frequency = 4) constantForecast <- expect_error(snaive(series), NA) expect_true(is.constant(constantForecast$mean)) }) } forecast/tests/testthat/test-armaroots.R0000644000176200001440000000045114353422625020177 0ustar liggesusers# A unit test for armaroots.R if (require(testthat)) { test_that("Tests for plot.Arima()", { arimafit <- Arima(lynx, c(2, 0, 2), include.mean = FALSE) plot(arimafit) plot(arimafit, type = "ma") plot(arimafit, type = "ar") expect_warning(plot(Arima(lynx, c(0, 1, 0)))) }) } forecast/tests/testthat/test-subset.R0000644000176200001440000000374614353422625017507 0ustar liggesusers# A unit test for subset function if (require(testthat)) { mtsobj <- ts(matrix(rnorm(200), ncol = 2), frequency = 4) test_that("tests specifying correct argument", { sub <- subset(wineind, month = "September") expect_length(sub, tsp(sub)[2] - tsp(sub)[1] + 1) expect_identical(round(sum(sub)), 338985) sub2 <- subset(wineind, month = "SEPT") expect_identical(sub, sub2) sub2 <- subset(wineind, month = 9) expect_identical(sub, sub2) sub2 <- subset(wineind, season = 9) expect_identical(sub, sub2) sub <- subset(woolyrnq, quarter = 1) expect_length(sub,tsp(sub)[2] - tsp(sub)[1] + 1) expect_identical(sum(sub), 153142) sub2 <- subset(woolyrnq, season = 1) expect_identical(sub, sub2) sub <- subset(wineind, subset = wineind < 25000) expect_identical(round(sum(sub)), 1948985) expect_length(sub,91) sub <- subset(mtsobj, c(1, 1, rep(0, 98)) == 1) expect_identical(ncol(sub), 2L) expect_identical(nrow(sub), 2L) sub <- subset(mtsobj, quarter = 1) expect_identical(ncol(sub), 2L) expect_identical(nrow(sub), 25L) }) test_that("tests specifying wrong argument", { expect_error(subset(wineind, quarter = 1), "Data is not quarterly") expect_error(subset(woolyrnq, month = "January"), "Data is not monthly") }) test_that("test for bad input", { expect_error(subset.ts(mtcars, quarter = 1), "Data must be seasonal") expect_error(subset(wineind, subset = c(1, 2)), "subset must be the same length as x") expect_error(subset(mtsobj, mtsobj < .5), "subset must be a vector of rows to keep") expect_error(subset(wineind, month = "Jaan"), "No recognizable months") expect_error(subset(wineind, season = 1:14), "Seasons must be between 1 and 12") expect_error(subset(wineind, month = 1:14), "Months must be between 1 and 12") expect_error(subset(woolyrnq, quarter = "qq1"), "No recognizable quarters") expect_error(subset(woolyrnq, quarter = 1:6), "Quarters must be between 1 and 4") }) } forecast/tests/testthat/test-mforecast.R0000644000176200001440000000463414353422625020162 0ustar liggesusers# A unit test for forecast.R if (require(testthat)) { mv_y <- ts(cbind(rnorm(120, 0, 3) + 1:120 + 20 * sin(2 * pi * (1:120) / 12), rnorm(120, 3, 7) + 1:120 + 16 * sin(2 * pi * (1:120 + 6) / 12)), frequency = 12) mv_x <- ts(cbind(rnorm(120, 0, 8) + (1:120) / 2 + 42 * sin(2 * pi * (1:120) / 12), rnorm(120, 3, 7) + (1:120) * -1 + 20 * sin(2 * pi * (1:120 + 6) / 12)), frequency = 12) v_y <- ts(rnorm(120, 0, 8) + (1:120) / 2 + 12 * sin(2 * pi * (1:120) / 12), frequency = 12) v_x <- ts(rnorm(120, 0, 1) + (1:120) * (-1) + 28 * sin(2 * pi * (1:120) / 12), frequency = 12) test_that("tests for is.mforecast()", { fit <- lm(mv_y ~ v_x) fcast <- forecast(fit, newdata = data.frame(v_x = 30)) expect_true(is.mforecast(fcast)) fit <- lm(v_y ~ v_x) fcast <- forecast(fit, newdata = data.frame(v_x = 30)) expect_false(is.mforecast(fcast)) }) test_that("tests for mlmsplit()", { fit <- lm(mv_y ~ v_x) fit1 <- mlmsplit(fit, index = 1) fit2 <- mlmsplit(fit, index = 2) fit3 <- lm(mv_y[, 1] ~ v_x) fit4 <- lm(mv_y[, 2] ~ v_x) expect_identical(fit1$coefficients, fit3$coefficients) expect_identical(fit2$coefficients, fit4$coefficients) expect_identical(fit1$rank, fit3$rank) expect_identical(fit2$rank, fit4$rank) expect_equal(fit1$fitted.values, fit3$fitted.values) expect_equal(fit2$fitted.values, fit4$fitted.values) expect_error(mlmsplit(fit), "Must select lm") }) test_that("tests for forecast.mlm()", { fit <- lm(mv_y ~ v_x) fcast <- forecast(fit, newdata = data.frame(v_x = 30)) fit2 <- lm(mv_y[, 1] ~ v_x) fcast2 <- forecast(fit2, newdata = data.frame(v_x = 30)) expect_equal(fcast$forecast[[1]]$residuals, fcast2$residuals) }) test_that("tests for forecast.mts()", { lungDeaths <- cbind(mdeaths, fdeaths) fcast_b <- forecast(lungDeaths) fcast_m <- forecast(mdeaths) fcast_f <- forecast(fdeaths) expect_true(all.equal(fcast_b$forecast[[1]]$mean, fcast_m$mean)) expect_true(all.equal(fcast_b$forecast[[2]]$mean, fcast_f$mean)) }) test_that("tests for print.mforecast()", { fit <- lm(mv_y ~ v_x) fcast <- forecast(fit, newdata = data.frame(v_x = 30)) expect_output(print(fcast), "Series 1") expect_output(print(fcast), "Series 2") }) test_that("tests for plot.mforecast()", { fit <- lm(mv_y ~ v_x) fcast <- forecast(fit, newdata = data.frame(v_x = 30)) expect_silent(plot(fcast)) }) } forecast/tests/testthat.R0000644000176200001440000000011714150370574015212 0ustar liggesusersSys.setenv("R_TESTS" = "") if (require(testthat)) { test_check("forecast") } forecast/src/0000755000176200001440000000000014474112177012657 5ustar liggesusersforecast/src/etscalc.c0000644000176200001440000001710314150370574014441 0ustar liggesusers#include #define NONE 0 #define ADD 1 #define MULT 2 #define DAMPED 1 #define TOL 1.0e-10 #define HUGEN 1.0e10 #define NA -99999.0 // Functions called by R void etscalc(double *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int*); void etssimulate(double *, int *, int *, int *, int *, double *, double *, double *, double *, int *, double *, double *); void etsforecast(double *, int *, int *, int *, double *, int *, double *); // Internal functions void forecast(double, double, double *, int, int, int, double, double *, int); void update(double *, double *, double *, double *, double *, double *, int, int, int, double, double, double, double, double); // ****************************************************************** void etscalc(double *y, int *n, double *x, int *m, int *error, int *trend, int *season, double *alpha, double *beta, double *gamma, double *phi, double *e, double *lik, double *amse, int *nmse) { int i, j, nstates; double oldl, l, oldb, b, olds[24], s[24], f[30], lik2, tmp, denom[30]; if((*m > 24) & (*season > NONE)) return; else if(*m < 1) *m = 1; if(*nmse > 30) *nmse = 30; nstates = (*m)*(*season>NONE) + 1 + (*trend>NONE); // Copy initial state components l = x[0]; if(*trend > NONE) b = x[1]; if(*season > NONE) { for(j=0; j<(*m); j++) s[j] = x[(*trend>NONE)+j+1]; } *lik = 0.0; lik2 = 0.0; for(j=0; j<(*nmse); j++) { amse[j] = 0.0; denom[j] = 0.0; } for (i=0; i<(*n); i++) { // COPY PREVIOUS STATE oldl = l; if(*trend > NONE) oldb = b; if(*season > NONE) { for(j=0; j<(*m); j++) olds[j] = s[j]; } // ONE STEP FORECAST forecast(oldl, oldb, olds, *m, *trend, *season, *phi, f, *nmse); if(fabs(f[0]-NA) < TOL) { *lik = NA; return; } if(*error == ADD) e[i] = y[i] - f[0]; else e[i] = (y[i] - f[0])/f[0]; for(j=0; j<(*nmse); j++) { if(i+j<(*n)) { denom[j] += 1.0; tmp = y[i+j]-f[j]; amse[j] = (amse[j] * (denom[j]-1.0) + (tmp*tmp)) / denom[j]; } } // UPDATE STATE update(&oldl, &l, &oldb, &b, olds, s, *m, *trend, *season, *alpha, *beta, *gamma, *phi, y[i]); // STORE NEW STATE x[nstates*(i+1)] = l; if(*trend > NONE) x[nstates*(i+1)+1] = b; if(*season > NONE) { for(j=0; j<(*m); j++) x[(*trend>NONE)+nstates*(i+1)+j+1] = s[j]; } *lik = *lik + e[i]*e[i]; lik2 += log(fabs(f[0])); } *lik = (*n) * log(*lik); if(*error == MULT) *lik += 2*lik2; } // ********************************************************************************* void etssimulate(double *x, int *m, int *error, int *trend, int *season, double *alpha, double *beta, double *gamma, double *phi, int *h, double *y, double *e) { int i, j, nstates; double oldl, l, oldb, b, olds[24], s[24], f[10]; if((*m > 24) & (*season > NONE)) return; else if(*m < 1) *m = 1; nstates = (*m)*(*season>NONE) + 1 + (*trend>NONE); // Copy initial state components l = x[0]; if(*trend > NONE) b = x[1]; if(*season > NONE) { for(j=0; j<(*m); j++) s[j] = x[(*trend>NONE)+j+1]; } for (i=0; i<(*h); i++) { // COPY PREVIOUS STATE oldl = l; if(*trend > NONE) oldb = b; if(*season > NONE) { for(j=0; j<(*m); j++) olds[j] = s[j]; } // ONE STEP FORECAST forecast(oldl, oldb, olds, *m, *trend, *season, *phi, f, 1); if(fabs(f[0]-NA) < TOL) { y[0]=NA; return; } if(*error == ADD) y[i] = f[0] + e[i]; else y[i] = f[0]*(1.0+e[i]); // UPDATE STATE update(&oldl, &l, &oldb, &b, olds, s, *m, *trend, *season, *alpha, *beta, *gamma, *phi, y[i]); } } // ********************************************************************************* void etsforecast(double *x, int *m, int *trend, int *season, double *phi, int *h, double *f) { int j; double l, b, s[24]; if((*m > 24) & (*season > NONE)) return; else if(*m < 1) *m = 1; // Copy initial state components l = x[0]; b = 0.0; if(*trend > NONE) b = x[1]; if(*season > NONE) { for(j=0; j<(*m); j++) s[j] = x[(*trend>NONE)+j+1]; } // Compute forecasts forecast(l, b, s, *m, *trend, *season, *phi, f, *h); } // ***************************************************************** void forecast(double l, double b, double *s, int m, int trend, int season, double phi, double *f, int h) { int i,j; double phistar; phistar = phi; // FORECASTS for(i=0; i NONE) { if(trend==ADD) r = (*l) - (*oldl); // l[t]-l[t-1] else //if(trend==MULT) { if(fabs(*oldl) < TOL) r = HUGEN; else r = (*l)/(*oldl); // l[t]/l[t-1] } *b = phib + (beta/alpha)*(r - phib); // b[t] = phi*b[t-1] + beta*(r - phi*b[t-1]) // b[t] = b[t-1]^phi + beta*(r - b[t-1]^phi) } // NEW SEASON if(season > NONE) { if(season==ADD) t = y - q; else //if(season==MULT) { if(fabs(q) < TOL) t = HUGEN; else t = y / q; } s[0] = olds[m-1] + gamma*(t - olds[m-1]); // s[t] = s[t-m] + gamma*(t - s[t-m]) for(j=1; j #include #include //For R's Nelder-Mead solver #include #include #include "etsTargetFunction.h" // This function initializes all the parameters, constructs an // object of type EtsTargetFunction and adds an external pointer // to this object with name "ets.xptr" // to the environment submitted as p_rho // RcppExport SEXP etsTargetFunctionInit(SEXP p_y, SEXP p_nstate, SEXP p_errortype, SEXP p_trendtype, SEXP p_seasontype, SEXP p_damped, SEXP p_lower, SEXP p_upper, SEXP p_opt_crit, SEXP p_nmse, SEXP p_bounds, SEXP p_m, SEXP p_optAlpha, SEXP p_optBeta, SEXP p_optGamma, SEXP p_optPhi, SEXP p_givenAlpha, SEXP p_givenBeta, SEXP p_givenGamma, SEXP p_givenPhi, SEXP p_alpha, SEXP p_beta, SEXP p_gamma, SEXP p_phi, SEXP p_rho) { BEGIN_RCPP; EtsTargetFunction* sp = new EtsTargetFunction(); std::vector y = Rcpp::as< std::vector >(p_y); int nstate = Rcpp::as(p_nstate); int errortype = Rcpp::as(p_errortype); int trendtype = Rcpp::as(p_trendtype); int seasontype = Rcpp::as(p_seasontype); bool damped = Rcpp::as(p_damped); std::vector lower = Rcpp::as< std::vector >(p_lower); std::vector upper = Rcpp::as< std::vector >(p_upper); std::string opt_crit = Rcpp::as(p_opt_crit); int nmse = Rcpp::as(p_nmse); std::string bounds = Rcpp::as< std::string >(p_bounds); int m = Rcpp::as(p_m); bool optAlpha = Rcpp::as(p_optAlpha); bool optBeta = Rcpp::as(p_optBeta); bool optGamma = Rcpp::as(p_optGamma); bool optPhi = Rcpp::as(p_optPhi); bool givenAlpha = Rcpp::as(p_givenAlpha); bool givenBeta = Rcpp::as(p_givenBeta); bool givenGamma = Rcpp::as(p_givenGamma); bool givenPhi = Rcpp::as(p_givenPhi); double alpha = Rcpp::as(p_alpha); double beta = Rcpp::as(p_beta); double gamma = Rcpp::as(p_gamma); double phi = Rcpp::as(p_phi); sp->init(y, nstate, errortype, trendtype, seasontype, damped, lower, upper, opt_crit, nmse, bounds, m, optAlpha, optBeta, optGamma, optPhi, givenAlpha, givenBeta, givenGamma, givenPhi, alpha, beta, gamma, phi); Rcpp::Environment e(p_rho); e["ets.xptr"] = Rcpp::XPtr( sp, true ); return Rcpp::wrap(e); END_RCPP; } // RcppExport double targetFunctionRmalschains(SEXP p_par, SEXP p_env) // { // Rcpp::NumericVector par(p_par); // Rcpp::Environment e(p_env); // Rcpp::XPtr sp(e.get("ets.xptr")); // sp->eval(par.begin(), par.size()); // //return Rcpp::wrap(sp->getObjVal()); // return sp->getObjVal(); // } // RcppExport SEXP etsGetTargetFunctionRmalschainsPtr() { // typedef double (*funcPtr)(SEXP, SEXP); // return (Rcpp::XPtr(new funcPtr(&targetFunctionRmalschains))); // } /* RcppExport SEXP targetFunctionRdonlp2(SEXP p_var, SEXP p_env) { Rcpp::Environment e(p_env); Rcpp::XPtr sp(e.get("ets.xptr")); Rcpp::NumericVector var(p_var); int mode = var[0]; int fun_id = var[1]; sp->eval(var.begin()+2, var.size()-2); if(mode == 0) { if(fun_id == 0) { return Rcpp::wrap(sp->getObjVal()); } else { return Rcpp::wrap(0); //return Rcpp::wrap(sp->restrictions[fun_id-1]); } } else if(mode==1) { // error("Gradients are not implemented, exiting."); }; return R_NilValue; } RcppExport SEXP etsGetTargetFunctionRdonlp2Ptr() { typedef SEXP (*funcPtr)(SEXP, SEXP); return (Rcpp::XPtr(new funcPtr(&targetFunctionRdonlp2))); } */ double targetFunctionEtsNelderMead(int n, double *par, void *ex) { EtsTargetFunction* sp = (EtsTargetFunction*) ex; sp->eval(par, n); return sp->getObjVal(); } RcppExport SEXP etsNelderMead(SEXP p_var, SEXP p_env, SEXP p_abstol, SEXP p_intol, SEXP p_alpha, SEXP p_beta, SEXP p_gamma, SEXP p_trace, SEXP p_maxit) { double abstol = Rcpp::as(p_abstol); double intol = Rcpp::as(p_intol); double alpha = Rcpp::as(p_alpha); double beta= Rcpp::as(p_beta); double gamma= Rcpp::as(p_gamma); int trace = Rcpp::as(p_trace); int maxit = Rcpp::as(p_maxit); int fncount = 0, fail=0; double Fmin = 0.0; Rcpp::NumericVector dpar(p_var); Rcpp::NumericVector opar(dpar.size()); Rcpp::Environment e(p_env); Rcpp::XPtr sp(e.get("ets.xptr")); double (*funcPtr)(int n, double *par, void *ex) = targetFunctionEtsNelderMead; nmmin(dpar.size(), dpar.begin(), opar.begin(), &Fmin, funcPtr, &fail, abstol, intol, sp, alpha, beta, gamma, trace, &fncount, maxit); return Rcpp::List::create(Rcpp::Named("value") = Fmin, Rcpp::Named("par") = opar, Rcpp::Named("fail") = fail, Rcpp::Named("fncount") = fncount); } forecast/src/makeTBATSMatrices.cpp0000644000176200001440000000562514323125536016572 0ustar liggesusers#include "calcBATS.h" using namespace Rcpp ; SEXP makeTBATSWMatrix(SEXP smallPhi_s, SEXP kVector_s, SEXP arCoefs_s, SEXP maCoefs_s, SEXP tau_s) { BEGIN_RCPP double *smallPhi, *arCoefs, *maCoefs; int *kVector, *tau; int adjustPhi = 0; R_len_t numSeasonal = 0, numCols = 1, p = 0, q = 0; if(!Rf_isNull(smallPhi_s)) { smallPhi = REAL(smallPhi_s); adjustPhi = 1; numCols = numCols + 1; } if(!Rf_isNull(kVector_s)) { tau = &INTEGER(tau_s)[0]; kVector = INTEGER(kVector_s); numSeasonal = LENGTH(kVector_s); numCols = numCols + *tau; } if(!Rf_isNull(arCoefs_s)) { arCoefs = REAL(arCoefs_s); p = LENGTH(arCoefs_s); numCols = numCols + p; } if(!Rf_isNull(maCoefs_s)) { maCoefs = REAL(maCoefs_s); q = LENGTH(maCoefs_s); numCols = numCols + q; } NumericMatrix wTranspose_r(1, numCols); arma::mat wTranspose(wTranspose_r.begin(), wTranspose_r.nrow(), wTranspose_r.ncol(), false); if(!Rf_isNull(kVector_s)) { wTranspose.zeros(); int position = adjustPhi; for(R_len_t s = 0; s < numSeasonal; s++) { //wTranspose.submat(0,(position+1), 0, (position + kVector[s])) = arma::ones(1, kVector[s]); for(int j = (position+1); j <= (position + kVector[s]); j++) { wTranspose(0,j) = 1; } position = position + (2 * kVector[s]); } } wTranspose(0,0) = 1; if(adjustPhi == 1) { wTranspose(0,1) = *smallPhi; } if(!Rf_isNull(arCoefs_s)) { for(R_len_t i = 1; i <= p; i++) { wTranspose(0,(adjustPhi + *tau +i)) = arCoefs[(i-1)]; } } if(!Rf_isNull(maCoefs_s)) { for(R_len_t i = 1; i <= q; i++) { wTranspose(0,(adjustPhi + *tau + p + i)) = maCoefs[(i-1)]; } } arma::mat w = arma::trans(wTranspose); smallPhi = 0; arCoefs = 0; maCoefs = 0; kVector = 0; return List::create( Named("w") = w, Named("w.transpose") = wTranspose ); END_RCPP } SEXP makeCIMatrix(SEXP k_s, SEXP m_s) { BEGIN_RCPP double pi = arma::datum::pi; double lambda, *m; int *k; k = &INTEGER(k_s)[0]; m = &REAL(m_s)[0]; NumericMatrix C(*k, *k); for(int j = 1; j<=*k; j++) { lambda = (2 * pi * j) / *m; C((j-1),(j-1)) = std::cos(lambda); } return wrap(C); END_RCPP } SEXP makeSIMatrix(SEXP k_s, SEXP m_s) { BEGIN_RCPP double pi = arma::datum::pi; double lambda, *m; int *k; k = &INTEGER(k_s)[0]; m = &REAL(m_s)[0]; NumericMatrix S(*k, *k); for(int j = 1; j<=*k; j++) { lambda = (2 * pi * j) / *m; S((j-1),(j-1)) = std::sin(lambda); } return wrap(S); END_RCPP } SEXP makeAIMatrix(SEXP C_s, SEXP S_s, SEXP k_s) { int *k; k = &INTEGER(k_s)[0]; NumericMatrix C_r(C_s); NumericMatrix S_r(S_s); arma::mat C(C_r.begin(), C_r.nrow(), C_r.ncol(), false); arma::mat S(S_r.begin(), S_r.nrow(), S_r.ncol(), false); arma::mat A((*k * 2), (*k * 2)); A.submat(0,0, (*k -1), (*k -1)) = C; A.submat(0,*k, (*k -1), ((*k *2) -1)) = S; A.submat(*k,0, ((*k *2) -1), (*k -1)) = (-1 * S); A.submat(*k,*k, ((*k *2) -1), ((*k *2) -1)) = C; return wrap(A); } forecast/src/calcBATS.cpp0000644000176200001440000002170514323125536014740 0ustar liggesusers#include "calcBATS.h" using namespace Rcpp ; SEXP calcBATS(SEXP ys, SEXP yHats, SEXP wTransposes, SEXP Fs, SEXP xs, SEXP gs, SEXP es ){ BEGIN_RCPP NumericMatrix yr(ys); NumericMatrix yHatr(yHats); NumericMatrix wTransposer(wTransposes); NumericMatrix Fr(Fs); NumericMatrix xr(xs); NumericMatrix gr(gs); NumericMatrix er(es); int t; arma::mat y(yr.begin(), yr.nrow(), yr.ncol(), false); arma::mat yHat(yHatr.begin(), yHatr.nrow(), yHatr.ncol(), false); arma::mat wTranspose(wTransposer.begin(), wTransposer.nrow(), wTransposer.ncol(), false); arma::mat F(Fr.begin(), Fr.nrow(), Fr.ncol(), false); arma::mat x(xr.begin(), xr.nrow(), xr.ncol(), false); arma::mat g(gr.begin(), gr.nrow(), gr.ncol(), false); arma::mat e(er.begin(), er.nrow(), er.ncol(), false); for(t = 1; t < yr.ncol(); t++) { yHat.col(t) = wTranspose * x.col((t-1)); e(0,t) = y(0, t) - yHat(0, t); x.col(t) = F * x.col((t-1)) + g * e(0,t); } return List::create( Named("y.hat") = yHat, Named("e") = e, Named("x") = x ); END_RCPP } SEXP calcBATSFaster(SEXP ys, SEXP yHats, SEXP wTransposes, SEXP Fs, SEXP xs, SEXP gs, SEXP es, SEXP xNought_s, SEXP sPeriods_s, SEXP betaV, SEXP tau_s, SEXP p_s, SEXP q_s ) { BEGIN_RCPP NumericMatrix yr(ys); NumericMatrix yHatr(yHats); NumericMatrix wTransposer(wTransposes); NumericMatrix Fr(Fs); NumericMatrix xr(xs); NumericMatrix gr(gs); NumericMatrix er(es); NumericMatrix xNought_r(xNought_s); //IntegerVector sPeriodsR(sPeriods); int adjBeta, previousS, lengthArma, *tau, *p, *q, *sPeriods; R_len_t lengthSeasonal; tau = &INTEGER(tau_s)[0]; p = &INTEGER(p_s)[0]; q = &INTEGER(q_s)[0]; lengthArma = *p + *q; if(!Rf_isNull(sPeriods_s)) { sPeriods = INTEGER(sPeriods_s); lengthSeasonal = LENGTH(sPeriods_s); } if(!Rf_isNull(betaV)) { adjBeta = 1; } else { adjBeta = 0; } arma::mat y(yr.begin(), yr.nrow(), yr.ncol(), false); arma::mat yHat(yHatr.begin(), yHatr.nrow(), yHatr.ncol(), false); arma::mat wTranspose(wTransposer.begin(), wTransposer.nrow(), wTransposer.ncol(), false); arma::mat F(Fr.begin(), Fr.nrow(), Fr.ncol(), false); arma::mat x(xr.begin(), xr.nrow(), xr.ncol(), false); arma::mat g(gr.begin(), gr.nrow(), gr.ncol(), false); arma::mat e(er.begin(), er.nrow(), er.ncol(), false); arma::mat xNought(xNought_r.begin(), xNought_r.nrow(), xNought_r.ncol(), false); if(!Rf_isNull(sPeriods_s)) { //One //Rprintf("one-1\n"); yHat.col(0) = wTranspose.cols(0, adjBeta) * xNought.rows(0, adjBeta); //Rprintf("one-2\n"); previousS = 0; for(R_len_t i = 0; i < lengthSeasonal; i++) { //Rprintf("one-3\n"); yHat(0,0) = yHat(0,0) + xNought( (previousS + sPeriods[i] + adjBeta), 0); previousS += sPeriods[i]; } if(lengthArma > 0) { //Rprintf("bg-1"); yHat.col(0) = yHat(0,0) + wTranspose.cols((*tau + adjBeta + 1), (xNought.n_rows-1)) * xNought.rows((*tau + adjBeta + 1), (xNought.n_rows-1)); } //Two e(0,0) = y(0, 0) - yHat(0, 0); //Three //Rprintf("three-5\n"); x.submat(0, 0, adjBeta, 0) = F.submat(0,0,adjBeta,adjBeta) * xNought.rows(0,adjBeta); if(lengthArma > 0) { //Rprintf("bg-2"); x.submat(0, 0, adjBeta, 0) += F.submat(0,(adjBeta+ *tau + 1),adjBeta,(F.n_cols - 1)) * xNought.rows((adjBeta+ *tau + 1),(F.n_cols - 1)); } previousS = 0; for(R_len_t i = 0; i < lengthSeasonal; i++) { //Rprintf("three-7\n"); x((adjBeta+previousS+1),0) = xNought((adjBeta+previousS+sPeriods[i]),0); if(lengthArma > 0) { //Rprintf("bg-3"); x.submat((adjBeta+previousS+1),0, (adjBeta+previousS+1),0) = x.submat((adjBeta+previousS+1),0, (adjBeta+previousS+1),0) + F.submat((adjBeta + previousS + 1), (adjBeta+*tau+1), (adjBeta + previousS + 1), (F.n_cols-1)) * xNought.rows((adjBeta + *tau +1), (F.n_cols-1)); } //Rprintf("three-9\n"); x.submat((adjBeta + previousS + 2), 0, (adjBeta + previousS + sPeriods[i]), 0) = xNought.rows((adjBeta + previousS + 1), (adjBeta + previousS + sPeriods[i] -1)); previousS += sPeriods[i]; } if(*p > 0) { //Rprintf("bg-4"); x.submat((adjBeta+ *tau + 1),0,(adjBeta+ *tau + 1),0) = F.submat((adjBeta + *tau +1), (adjBeta + *tau +1), (adjBeta + *tau + 1), (F.n_cols-1)) * xNought.rows((adjBeta+*tau+1), (F.n_cols-1)); //Rprintf("bg-5"); ////error is HERE!!! if(*p > 1) { x.submat((adjBeta + *tau + 2),0,(adjBeta + *tau + *p),0) = xNought.rows((adjBeta + *tau + 1),(adjBeta + *tau + *p-1)); } } if(*q > 0) { //Rprintf("three-12\n"); x((adjBeta+ *tau + *p + 1),0) = 0; if(*q > 1) { //Rprintf("three-13\n"); x.submat((adjBeta+ *tau + *p + 2), 0, (adjBeta + *tau + *p + *q) , 0) = xNought.rows((adjBeta + *tau + *p + 1),(adjBeta + *tau + *p + *q - 1)); } } ///Temporary fix! //x.col(0) += g * e(0,0); //End /////////// x(0,0) += g(0,0) * e(0,0); if(adjBeta == 1) { x(1,0) += g(1,0) * e(0,0); } previousS = 0; for(R_len_t i = 0; i < lengthSeasonal; i++) { x((adjBeta+previousS+1),0) += g((adjBeta+previousS+1),0) * e(0,0); previousS += sPeriods[i]; } if(*p > 0) { x((adjBeta + *tau + 1),0) += e(0,0); if(*q > 0) { x((adjBeta + *tau + *p + 1),0) += e(0,0); } } else if(*q > 0) { x((adjBeta + *tau + 1),0) += e(0,0); } ///////////////////////////////// for(int t = 1; t < yr.ncol(); t++) { //Rprintf("point-x\n"); //One yHat.col(t) = wTranspose.cols(0, adjBeta) * x.submat(0, (t-1), adjBeta, (t-1)); previousS = 0; for(R_len_t i = 0; i < lengthSeasonal; i++) { //mod here //Rprintf("point-xx\n"); yHat(0,t) += x((previousS + sPeriods[i] + adjBeta), (t-1)); previousS += sPeriods[i]; } if(lengthArma > 0) { //Rprintf("bg-6"); yHat.col(t) += wTranspose.cols((*tau + adjBeta + 1), (xNought.n_rows-1)) * x.submat((*tau + adjBeta + 1), (t-1), (x.n_rows-1), (t-1)); } //Two //Rprintf("point-x4\n"); e(0,t) = y(0, t) - yHat(0, t); //Three //Rprintf("point-x5\n"); x.submat(0, t, adjBeta, t) = F.submat(0,0,adjBeta,adjBeta) * x.submat(0, (t-1), adjBeta, (t-1)); if(lengthArma > 0) { //Rprintf("bg-7"); x.submat(0, t, adjBeta, t) += F.submat(0,(adjBeta+ *tau + 1),adjBeta,(F.n_cols - 1)) * x.submat((adjBeta+ *tau + 1), (t-1), (F.n_cols - 1), (t-1)); } previousS = 0; for(R_len_t i = 0; i < lengthSeasonal; i++) { //Rprintf("point-x7\n"); x((adjBeta+previousS+1),t) = x((adjBeta+previousS+sPeriods[i]),(t-1)); if(lengthArma > 0) { //Rprintf("bg-8"); x.submat((adjBeta+previousS+1),t, (adjBeta+previousS+1),t) += F.submat((adjBeta + previousS + 1), (adjBeta+*tau+1), (adjBeta + previousS + 1), (F.n_cols-1)) * x.submat((adjBeta + *tau +1), (t-1), (F.n_cols-1), (t-1)); } //Rprintf("Three-L-9\n"); x.submat((adjBeta + previousS + 2), t, (adjBeta + previousS + sPeriods[i]), t) = x.submat((adjBeta + previousS + 1), (t-1), (adjBeta + previousS + sPeriods[i] -1), (t-1)); previousS += sPeriods[i]; } /* if(lengthArma > 0) { x.submat((adjBeta+ *tau + 1),t, (x.n_rows-1),t) = F.submat((adjBeta+ *tau + 1), (adjBeta+ *tau + 1), (F.n_rows - 1), (F.n_rows - 1)) * x.submat((adjBeta+ *tau + 1),(t-1), (x.n_rows-1),(t-1)); } */ if(*p > 0) { //Rprintf("bg-9"); x.submat((adjBeta+ *tau + 1),t, (adjBeta+ *tau + 1),t) = F.submat((adjBeta + *tau +1), (adjBeta + *tau +1), (adjBeta + *tau + 1), (F.n_cols-1)) * x.submat((adjBeta+*tau+1), (t-1), (F.n_cols-1), (t-1)); if(*p > 1) { x.submat((adjBeta + *tau + 2),t,(adjBeta + *tau + *p),t) = x.submat((adjBeta + *tau + 1), (t-1), (adjBeta + *tau + *p -1), (t-1)); } } if(*q > 0) { x((adjBeta+ *tau + *p + 1),t) = 0; if(*q > 1) { x.submat((adjBeta+ *tau + *p + 2), t, (adjBeta + *tau + *p + *q) , t) = x.submat((adjBeta + *tau + *p + 1), (t-1), (adjBeta + *tau + *p + *q - 1), (t-1)); } } ///Temporary fix! //x.col(t) += g * e(0,t); //End /////////// x(0,t) += g(0,0) * e(0,t); if(adjBeta == 1) { x(1,t) += g(1,0) * e(0,t); } previousS = 0; for(R_len_t i = 0; i < lengthSeasonal; i++) { x((adjBeta+previousS+1),t) += g((adjBeta+previousS+1),0) * e(0,t); previousS += sPeriods[i]; } if(*p > 0) { x((adjBeta + *tau + 1),t) += e(0,t); if(*q > 0) { x((adjBeta + *tau + *p + 1),t) += e(0,t); } } else if(*q > 0) { x((adjBeta + *tau + 1),t) += e(0,t); } ///////////////////////////////// } } else { yHat.col(0) = wTranspose * xNought; e(0,0) = y(0, 0) - yHat(0, 0); x.col(0) = F * xNought + g * e(0,0); for(int t = 1; t < yr.ncol(); t++) { yHat.col(t) = wTranspose * x.col((t-1)); e(0,t) = y(0, t) - yHat(0, t); x.col(t) = F * x.col((t-1)) + g * e(0,t); } } return R_NilValue; END_RCPP } SEXP calcWTilda(SEXP wTildaTransposes, SEXP Ds) { BEGIN_RCPP NumericMatrix wTildaTransposer(wTildaTransposes); NumericMatrix Dr(Ds); int t; arma::mat wTildaTranspose(wTildaTransposer.begin(), wTildaTransposer.nrow(), wTildaTransposer.ncol(), false); arma::mat D(Dr.begin(), Dr.nrow(), Dr.ncol(), false); for(t = 1; t < wTildaTransposer.nrow(); t++) { wTildaTranspose.row(t) = wTildaTranspose.row((t-1)) * D; } return wTildaTransposer; END_RCPP } forecast/src/calcBATS.h0000644000176200001440000000463714323125536014412 0ustar liggesusers#ifndef _forecast_CALCBATS #define _forecast_CALCBATS ///////////////////////////////////// // if unable to compile, please comment these lines // #define __GXX_EXPERIMENTAL_CXX0X__ 1 // #ifndef HAVE_ERRNO_T // typedef int errno_t; // #endif // #if __WORDSIZE == 64 // # ifndef __intptr_t_defined // typedef long int intptr_t; // # define __intptr_t_defined // # endif // typedef unsigned long int uintptr_t; // #else // # ifndef __intptr_t_defined // typedef int intptr_t; // # define __intptr_t_defined // # endif // typedef unsigned int uintptr_t; // #endif // #include // #include // #include // #include // #include // if unable to compile, please comment these lines ///////////////////////////////////// #include #include RcppExport SEXP calcBATS(SEXP ys, SEXP yHats, SEXP wTransposes, SEXP Fs, SEXP xs, SEXP gs, SEXP es ) ; RcppExport SEXP calcBATSFaster(SEXP ys, SEXP yHats, SEXP wTransposes, SEXP Fs, SEXP xs, SEXP gs, SEXP es, SEXP xNought_s, SEXP sPeriods_s, SEXP betaV, SEXP tau_s, SEXP p_s, SEXP q_s ) ; RcppExport SEXP calcWTilda(SEXP wTildaTransposes, SEXP Ds) ; RcppExport SEXP makeBATSWMatrix(SEXP smallPhi_s, SEXP sPeriods_s, SEXP arCoefs_s, SEXP maCoefs_s) ; RcppExport SEXP makeBATSGMatrix(SEXP alpha_s, SEXP beta_s, SEXP gammaVector_s, SEXP seasonalPeriods_s, SEXP p_s, SEXP q_s) ; RcppExport SEXP updateFMatrix(SEXP F_s, SEXP smallPhi_s, SEXP alpha_s, SEXP beta_s, SEXP gammaBold_s, SEXP ar_s, SEXP ma_s, SEXP tau_s) ; RcppExport SEXP updateWtransposeMatrix(SEXP wTranspose_s, SEXP smallPhi_s, SEXP tau_s, SEXP arCoefs_s, SEXP maCoefs_s, SEXP p_s, SEXP q_s) ; RcppExport SEXP updateGMatrix(SEXP g_s, SEXP gammaBold_s, SEXP alpha_s, SEXP beta_s, SEXP gammaVector_s, SEXP seasonalPeriods_s) ; //TBATS Functions RcppExport SEXP makeTBATSWMatrix(SEXP smallPhi_s, SEXP kVector_s, SEXP arCoefs_s, SEXP maCoefs_s, SEXP tau_s) ; RcppExport SEXP makeCIMatrix(SEXP k_s, SEXP m_s) ; RcppExport SEXP makeSIMatrix(SEXP k_s, SEXP m_s) ; RcppExport SEXP makeAIMatrix(SEXP C_s, SEXP S_s, SEXP k_s) ; RcppExport SEXP updateTBATSGammaBold(SEXP gammaBold_s, SEXP kVector_s, SEXP gammaOne_s, SEXP gammaTwo_s) ; RcppExport SEXP updateTBATSGMatrix(SEXP g_s, SEXP gammaBold_s, SEXP alpha_s, SEXP beta_s) ; RcppExport SEXP calcTBATSFaster(SEXP ys, SEXP yHats, SEXP wTransposes, SEXP Fs, SEXP xs, SEXP gs, SEXP es, SEXP xNought_s) ; #endif forecast/src/Makevars0000644000176200001440000000016114456202551014345 0ustar liggesusersPKG_CXXFLAGS = $(SHLIB_OPENMP_CXXFLAGS) PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) forecast/src/Makevars.win0000644000176200001440000000016114456202551015141 0ustar liggesusersPKG_CXXFLAGS = $(SHLIB_OPENMP_CXXFLAGS) PKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) forecast/src/updateTBATSMatrices.cpp0000644000176200001440000000247414456202551017136 0ustar liggesusers#include "calcBATS.h" using namespace Rcpp ; SEXP updateTBATSGammaBold(SEXP gammaBold_s, SEXP kVector_s, SEXP gammaOne_s, SEXP gammaTwo_s) { BEGIN_RCPP NumericMatrix gammaBold(gammaBold_s); IntegerVector kVector(kVector_s); NumericVector gammaOne(gammaOne_s); NumericVector gammaTwo(gammaTwo_s); int endPos = 0; int numK = kVector.size(); for(int i =0; i < numK; i++) { for(int j = endPos; j < (kVector(i) + endPos); j++) { gammaBold(0,j)=gammaOne(i); } for(int j = (kVector(i) + endPos); j < ((2*kVector(i)) + endPos); j++) { gammaBold(0,j)=gammaTwo(i); } endPos += 2 * kVector(i); } return R_NilValue; END_RCPP } SEXP updateTBATSGMatrix(SEXP g_s, SEXP gammaBold_s, SEXP alpha_s, SEXP beta_s) { BEGIN_RCPP int adjBeta = 0; NumericMatrix g_r(g_s); //Rprintf("one\n"); g_r(0,0) = REAL(alpha_s)[0]; //Rprintf("two\n"); if(!Rf_isNull(beta_s)) { //Rprintf("three\n"); g_r(1,0) = REAL(beta_s)[0]; adjBeta = 1; } //Rprintf("four\n"); if(!Rf_isNull(gammaBold_s)) { NumericMatrix gammaBold_r(gammaBold_s); arma::mat gammaBold(gammaBold_r.begin(), gammaBold_r.nrow(), gammaBold_r.ncol(), false); arma::mat g(g_r.begin(), g_r.nrow(), g_r.ncol(), false); g.submat((adjBeta+1), 0,(adjBeta+gammaBold.n_cols), 0) = trans(gammaBold); } //Rprintf("five\n"); return R_NilValue; END_RCPP } forecast/src/registerDynamicSymbol.c0000644000176200001440000000034514150370574017342 0ustar liggesusers// RegisteringDynamic Symbols #include #include #include void R_init_markovchain(DllInfo* info) { R_registerRoutines(info, NULL, NULL, NULL, NULL); R_useDynamicSymbols(info, TRUE); } forecast/src/etspolyroot.c0000644000176200001440000003534614353422767015446 0ustar liggesusers/* Formerly src/appl/cpoly.c: * * Copyright (C) 1997-1998 Ross Ihaka * Copyright (C) 1999-2001 R Core Team * * cpoly finds the zeros of a complex polynomial. * * On Entry * * opr, opi - double precision vectors of real and * imaginary parts of the coefficients in * order of decreasing powers. * * degree - int degree of polynomial. * * * On Return * * zeror, zeroi - output double precision vectors of * real and imaginary parts of the zeros. * * fail - output int parameter, true only if * leading coefficient is zero or if cpoly * has found fewer than degree zeros. * * The program has been written to reduce the chance of overflow * occurring. If it does occur, there is still a possibility that * the zerofinder will work provided the overflowed quantity is * replaced by a large number. * * This is a C translation of the following. * * TOMS Algorithm 419 * Jenkins and Traub. * Comm. ACM 15 (1972) 97-99. * * Ross Ihaka * February 1997 */ #include /* for declaration of hypot */ #include /* for declaration of R_alloc */ #include /* for FLT_RADIX */ #include /* for R_pow_di */ static void calct(Rboolean *); static Rboolean fxshft(int, double *, double *); static Rboolean vrshft(int, double *, double *); static void nexth(Rboolean); static void noshft(int); static void polyev(int, double, double, double *, double *, double *, double *, double *, double *); static double errev(int, double *, double *, double, double, double, double); static double cpoly_cauchy(int, double *, double *); static double cpoly_scale(int, double *, double, double, double, double); static void cdivid(double, double, double, double, double *, double *); /* Global Variables (too many!) */ static int nn; static double *pr, *pi, *hr, *hi, *qpr, *qpi, *qhr, *qhi, *shr, *shi; static double sr, si; static double tr, ti; static double pvr, pvi; static const double eta = DBL_EPSILON; static const double are = /* eta = */DBL_EPSILON; static const double mre = 2. * M_SQRT2 * /* eta, i.e. */DBL_EPSILON; static const double infin = DBL_MAX; void cpolyroot(double *opr, double *opi, int *degree, double *zeror, double *zeroi, Rboolean *fail) { static const double smalno = DBL_MIN; static const double base = (double)FLT_RADIX; static int d_n, i, i1, i2; static double zi, zr, xx, yy; static double bnd, xxx; Rboolean conv; int d1; double *tmp; static const double cosr =/* cos 94 */ -0.06975647374412529990; static const double sinr =/* sin 94 */ 0.99756405025982424767; xx = M_SQRT1_2;/* 1/sqrt(2) = 0.707.... */ yy = -xx; *fail = FALSE; nn = *degree; d1 = nn - 1; /* algorithm fails if the leading coefficient is zero. */ if (opr[0] == 0. && opi[0] == 0.) { *fail = TRUE; return; } /* remove the zeros at the origin if any. */ while (opr[nn] == 0. && opi[nn] == 0.) { d_n = d1-nn+1; zeror[d_n] = 0.; zeroi[d_n] = 0.; nn--; } nn++; /*-- Now, global var. nn := #{coefficients} = (relevant degree)+1 */ if (nn == 1) return; /* Use a single allocation as these as small */ tmp = (double *) R_alloc((size_t) (10*nn), sizeof(double)); pr = tmp; pi = tmp + nn; hr = tmp + 2*nn; hi = tmp + 3*nn; qpr = tmp + 4*nn; qpi = tmp + 5*nn; qhr = tmp + 6*nn; qhi = tmp + 7*nn; shr = tmp + 8*nn; shi = tmp + 9*nn; /* make a copy of the coefficients and shr[] = | p[] | */ for (i = 0; i < nn; i++) { pr[i] = opr[i]; pi[i] = opi[i]; shr[i] = hypot(pr[i], pi[i]); } /* scale the polynomial with factor 'bnd'. */ bnd = cpoly_scale(nn, shr, eta, infin, smalno, base); if (bnd != 1.) { for (i=0; i < nn; i++) { pr[i] *= bnd; pi[i] *= bnd; } } /* start the algorithm for one zero */ while (nn > 2) { /* calculate bnd, a lower bound on the modulus of the zeros. */ for (i=0 ; i < nn ; i++) shr[i] = hypot(pr[i], pi[i]); bnd = cpoly_cauchy(nn, shr, shi); /* outer loop to control 2 major passes */ /* with different sequences of shifts */ for (i1 = 1; i1 <= 2; i1++) { /* first stage calculation, no shift */ noshft(5); /* inner loop to select a shift */ for (i2 = 1; i2 <= 9; i2++) { /* shift is chosen with modulus bnd */ /* and amplitude rotated by 94 degrees */ /* from the previous shift */ xxx= cosr * xx - sinr * yy; yy = sinr * xx + cosr * yy; xx = xxx; sr = bnd * xx; si = bnd * yy; /* second stage calculation, fixed shift */ conv = fxshft(i2 * 10, &zr, &zi); if (conv) goto L10; } } /* the zerofinder has failed on two major passes */ /* return empty handed */ *fail = TRUE; return; /* the second stage jumps directly to the third stage iteration. * if successful, the zero is stored and the polynomial deflated. */ L10: d_n = d1+2 - nn; zeror[d_n] = zr; zeroi[d_n] = zi; --nn; for (i=0; i < nn ; i++) { pr[i] = qpr[i]; pi[i] = qpi[i]; } }/*while*/ /* calculate the final zero and return */ cdivid(-pr[1], -pi[1], pr[0], pi[0], &zeror[d1], &zeroi[d1]); return; } /* Computes the derivative polynomial as the initial * polynomial and computes l1 no-shift h polynomials. */ static void noshft(int l1) { int i, j, jj, n = nn - 1, nm1 = n - 1; double t1, t2, xni; for (i=0; i < n; i++) { xni = (double)(nn - i - 1); hr[i] = xni * pr[i] / n; hi[i] = xni * pi[i] / n; } for (jj = 1; jj <= l1; jj++) { if (hypot(hr[n-1], hi[n-1]) <= eta * 10.0 * hypot(pr[n-1], pi[n-1])) { /* If the constant term is essentially zero, */ /* shift h coefficients. */ for (i = 1; i <= nm1; i++) { j = nn - i; hr[j-1] = hr[j-2]; hi[j-1] = hi[j-2]; } hr[0] = 0.; hi[0] = 0.; } else { cdivid(-pr[nn-1], -pi[nn-1], hr[n-1], hi[n-1], &tr, &ti); for (i = 1; i <= nm1; i++) { j = nn - i; t1 = hr[j-2]; t2 = hi[j-2]; hr[j-1] = tr * t1 - ti * t2 + pr[j-1]; hi[j-1] = tr * t2 + ti * t1 + pi[j-1]; } hr[0] = pr[0]; hi[0] = pi[0]; } } } /* Computes l2 fixed-shift h polynomials and tests for convergence. * initiates a variable-shift iteration and returns with the * approximate zero if successful. */ static Rboolean fxshft(int l2, double *zr, double *zi) { /* l2 - limit of fixed shift steps * zr,zi - approximate zero if convergence (result TRUE) * * Return value indicates convergence of stage 3 iteration * * Uses global (sr,si), nn, pr[], pi[], .. (all args of polyev() !) */ Rboolean pasd, boool, test; static double svsi, svsr; static int i, j, n; static double oti, otr; n = nn - 1; /* evaluate p at s. */ polyev(nn, sr, si, pr, pi, qpr, qpi, &pvr, &pvi); test = TRUE; pasd = FALSE; /* calculate first t = -p(s)/h(s). */ calct(&boool); /* main loop for one second stage step. */ for (j=1; j<=l2; j++) { otr = tr; oti = ti; /* compute next h polynomial and new t. */ nexth(boool); calct(&boool); *zr = sr + tr; *zi = si + ti; /* test for convergence unless stage 3 has */ /* failed once or this is the last h polynomial. */ if (!boool && test && j != l2) { if (hypot(tr - otr, ti - oti) >= hypot(*zr, *zi) * 0.5) { pasd = FALSE; } else if (! pasd) { pasd = TRUE; } else { /* the weak convergence test has been */ /* passed twice, start the third stage */ /* iteration, after saving the current */ /* h polynomial and shift. */ for (i = 0; i < n; i++) { shr[i] = hr[i]; shi[i] = hi[i]; } svsr = sr; svsi = si; if (vrshft(10, zr, zi)) { return TRUE; } /* the iteration failed to converge. */ /* turn off testing and restore */ /* h, s, pv and t. */ test = FALSE; for (i=1 ; i<=n ; i++) { hr[i-1] = shr[i-1]; hi[i-1] = shi[i-1]; } sr = svsr; si = svsi; polyev(nn, sr, si, pr, pi, qpr, qpi, &pvr, &pvi); calct(&boool); } } } /* attempt an iteration with final h polynomial */ /* from second stage. */ return(vrshft(10, zr, zi)); } /* carries out the third stage iteration. */ static Rboolean vrshft(int l3, double *zr, double *zi) { /* l3 - limit of steps in stage 3. * zr,zi - on entry contains the initial iterate; * if the iteration converges it contains * the final iterate on exit. * Returns TRUE if iteration converges * * Assign and uses GLOBAL sr, si */ Rboolean boool, b; static int i, j; static double r1, r2, mp, ms, tp, relstp; static double omp; b = FALSE; sr = *zr; si = *zi; /* main loop for stage three */ for (i = 1; i <= l3; i++) { /* evaluate p at s and test for convergence. */ polyev(nn, sr, si, pr, pi, qpr, qpi, &pvr, &pvi); mp = hypot(pvr, pvi); ms = hypot(sr, si); if (mp <= 20. * errev(nn, qpr, qpi, ms, mp, /*are=*/eta, mre)) { goto L_conv; } /* polynomial value is smaller in value than */ /* a bound on the error in evaluating p, */ /* terminate the iteration. */ if (i != 1) { if (!b && mp >= omp && relstp < .05) { /* iteration has stalled. probably a */ /* cluster of zeros. do 5 fixed shift */ /* steps into the cluster to force */ /* one zero to dominate. */ tp = relstp; b = TRUE; if (relstp < eta) tp = eta; r1 = sqrt(tp); r2 = sr * (r1 + 1.) - si * r1; si = sr * r1 + si * (r1 + 1.); sr = r2; polyev(nn, sr, si, pr, pi, qpr, qpi, &pvr, &pvi); for (j = 1; j <= 5; ++j) { calct(&boool); nexth(boool); } omp = infin; goto L10; } else { /* exit if polynomial value */ /* increases significantly. */ if (mp * .1 > omp) return FALSE; } } omp = mp; /* calculate next iterate. */ L10: calct(&boool); nexth(boool); calct(&boool); if (!boool) { relstp = hypot(tr, ti) / hypot(sr, si); sr += tr; si += ti; } } return FALSE; L_conv: *zr = sr; *zi = si; return TRUE; } static void calct(Rboolean *boool) { /* computes t = -p(s)/h(s). * boool - logical, set true if h(s) is essentially zero. */ int n = nn - 1; double hvi, hvr; /* evaluate h(s). */ polyev(n, sr, si, hr, hi, qhr, qhi, &hvr, &hvi); *boool = hypot(hvr, hvi) <= are * 10. * hypot(hr[n-1], hi[n-1]); if (!*boool) { cdivid(-pvr, -pvi, hvr, hvi, &tr, &ti); } else { tr = 0.; ti = 0.; } } static void nexth(Rboolean boool) { /* calculates the next shifted h polynomial. * boool : if TRUE h(s) is essentially zero */ int j, n = nn - 1; double t1, t2; if (!boool) { for (j=1; j < n; j++) { t1 = qhr[j - 1]; t2 = qhi[j - 1]; hr[j] = tr * t1 - ti * t2 + qpr[j]; hi[j] = tr * t2 + ti * t1 + qpi[j]; } hr[0] = qpr[0]; hi[0] = qpi[0]; } else { /* if h(s) is zero replace h with qh. */ for (j=1; j < n; j++) { hr[j] = qhr[j-1]; hi[j] = qhi[j-1]; } hr[0] = 0.; hi[0] = 0.; } } /*--------------------- Independent Complex Polynomial Utilities ----------*/ static void polyev(int n, double s_r, double s_i, double *p_r, double *p_i, double *q_r, double *q_i, double *v_r, double *v_i) { /* evaluates a polynomial p at s by the horner recurrence * placing the partial sums in q and the computed value in v_. */ int i; double t; q_r[0] = p_r[0]; q_i[0] = p_i[0]; *v_r = q_r[0]; *v_i = q_i[0]; for (i = 1; i < n; i++) { t = *v_r * s_r - *v_i * s_i + p_r[i]; q_i[i] = *v_i = *v_r * s_i + *v_i * s_r + p_i[i]; q_r[i] = *v_r = t; } } static double errev(int n, double *qr, double *qi, double ms, double mp, double a_re, double m_re) { /* bounds the error in evaluating the polynomial by the horner * recurrence. * * qr,qi - the partial sum vectors * ms - modulus of the point * mp - modulus of polynomial value * a_re,m_re - error bounds on complex addition and multiplication */ double e; int i; e = hypot(qr[0], qi[0]) * m_re / (a_re + m_re); for (i=0; i < n; i++) e = e*ms + hypot(qr[i], qi[i]); return e * (a_re + m_re) - mp * m_re; } static double cpoly_cauchy(int n, double *pot, double *q) { /* Computes a lower bound on the moduli of the zeros of a polynomial * pot[1:nn] is the modulus of the coefficients. */ double f, x, delf, dx, xm; int i, n1 = n - 1; pot[n1] = -pot[n1]; /* compute upper estimate of bound. */ x = exp((log(-pot[n1]) - log(pot[0])) / (double) n1); /* if newton step at the origin is better, use it. */ if (pot[n1-1] != 0.) { xm = -pot[n1] / pot[n1-1]; if (xm < x) x = xm; } /* chop the interval (0,x) unitl f le 0. */ for(;;) { xm = x * 0.1; f = pot[0]; for (i = 1; i < n; i++) f = f * xm + pot[i]; if (f <= 0.0) { break; } x = xm; } dx = x; /* do Newton iteration until x converges to two decimal places. */ while (fabs(dx / x) > 0.005) { q[0] = pot[0]; for(i = 1; i < n; i++) q[i] = q[i-1] * x + pot[i]; f = q[n1]; delf = q[0]; for(i = 1; i < n1; i++) delf = delf * x + q[i]; dx = f / delf; x -= dx; } return x; } static double cpoly_scale(int n, double *pot, double eps, double BIG, double small, double base) { /* Returns a scale factor to multiply the coefficients of the polynomial. * The scaling is done to avoid overflow and to avoid * undetected underflow interfering with the convergence criterion. * The factor is a power of the base. * pot[1:n] : modulus of coefficients of p * eps,BIG, * small,base - constants describing the floating point arithmetic. */ int i, ell; double x, high, sc, lo, min_, max_; /* find largest and smallest moduli of coefficients. */ high = sqrt(BIG); lo = small / eps; max_ = 0.; min_ = BIG; for (i = 0; i < n; i++) { x = pot[i]; if (x > max_) max_ = x; if (x != 0. && x < min_) min_ = x; } /* scale only if there are very large or very small components. */ if (min_ < lo || max_ > high) { x = lo / min_; if (x <= 1.) sc = 1. / (sqrt(max_) * sqrt(min_)); else { sc = x; if (BIG / sc > max_) sc = 1.0; } ell = (int) (log(sc) / log(base) + 0.5); return R_pow_di(base, ell); } else return 1.0; } static void cdivid(double ar, double ai, double br, double bi, double *cr, double *ci) { /* complex division c = a/b, i.e., (cr +i*ci) = (ar +i*ai) / (br +i*bi), avoiding overflow. */ double d, r; if (br == 0. && bi == 0.) { /* division by zero, c = infinity. */ *cr = *ci = R_PosInf; } else if (fabs(br) >= fabs(bi)) { r = bi / br; d = br + r * bi; *cr = (ar + ai * r) / d; *ci = (ai - ar * r) / d; } else { r = br / bi; d = bi + r * br; *cr = (ar * r + ai) / d; *ci = (ai * r - ar) / d; } } /* static double cpoly_cmod(double *r, double *i) * --> replaced by hypot() everywhere */ forecast/src/calcTBATS.cpp0000644000176200001440000000230214332530605015051 0ustar liggesusers#include "calcBATS.h" using namespace Rcpp ; SEXP calcTBATSFaster(SEXP ys, SEXP yHats, SEXP wTransposes, SEXP Fs, SEXP xs, SEXP gs, SEXP es, SEXP xNought_s) { BEGIN_RCPP NumericMatrix yr(ys); NumericMatrix yHatr(yHats); NumericMatrix wTransposer(wTransposes); NumericMatrix Fr(Fs); NumericMatrix xr(xs); NumericMatrix gr(gs); NumericMatrix er(es); NumericMatrix xNought_r(xNought_s); arma::mat y(yr.begin(), yr.nrow(), yr.ncol(), false); arma::mat yHat(yHatr.begin(), yHatr.nrow(), yHatr.ncol(), false); arma::mat wTranspose(wTransposer.begin(), wTransposer.nrow(), wTransposer.ncol(), false); arma::mat F(Fr.begin(), Fr.nrow(), Fr.ncol(), false); arma::mat x(xr.begin(), xr.nrow(), xr.ncol(), false); arma::mat g(gr.begin(), gr.nrow(), gr.ncol(), false); arma::mat e(er.begin(), er.nrow(), er.ncol(), false); arma::mat xNought(xNought_r.begin(), xNought_r.nrow(), xNought_r.ncol(), false); yHat.col(0) = wTranspose * xNought; e(0,0) = y(0, 0) - yHat(0, 0); x.col(0) = F * xNought + g * e(0,0); for(int t = 1; t < yr.ncol(); t++) { yHat.col(t) = wTranspose * x.col((t-1)); e(0,t) = y(0, t) - yHat(0, t); x.col(t) = F * x.col((t-1)) + g * e(0,t); } return R_NilValue; END_RCPP } forecast/src/etsTargetFunction.h0000644000176200001440000000267014323125536016501 0ustar liggesusers#include #include extern "C" { void etscalc(double *, int *, double *, int *, int *, int *, int *, double *, double *, double *, double *, double *, double *, double *, int *); void cpolyroot(double *opr, double *opi, int *degree, double *zeror, double *zeroi, Rboolean *fail); } class EtsTargetFunction { public: void eval(const double* p_var, int p_var_length); void init(std::vector & p_y, int p_nstate, int p_errortype, int p_trendtype, int p_seasontype, bool p_damped, std::vector & p_lower, std::vector & p_upper, std::string p_opt_crit, int p_nmse, std::string p_bounds, int p_m, bool p_optAlpha, bool p_optBeta, bool p_optGamma, bool p_optPhi, bool p_givenAlpha, bool p_givenBeta, bool p_givenGamma, bool p_givenPhi, double alpha, double beta, double gamma, double phi); double getObjVal() { return(objval); }; private: bool check_params(); bool admissible(); std::vector par; std::vector y; int nstate; int errortype; int trendtype; int seasontype; bool damped; std::vector par_noopt; std::vector lower; std::vector upper; std::string opt_crit; int nmse; std::string bounds; int m; int n; std::vector state; double alpha, beta, gamma, phi; std::vector e; std::vector amse; double lik, objval; bool optAlpha, optBeta, optGamma, optPhi, givenAlpha, givenBeta, givenGamma, givenPhi; }; forecast/src/makeBATSMatrices.cpp0000644000176200001440000001163714323125536016446 0ustar liggesusers#include "calcBATS.h" using namespace Rcpp ; SEXP makeBATSWMatrix(SEXP smallPhi_s, SEXP sPeriods_s, SEXP arCoefs_s, SEXP maCoefs_s) { BEGIN_RCPP double *smallPhi, *arCoefs, *maCoefs; int *seasonalPeriods; int adjustPhi = 0; R_len_t numCols = 1, numSeasonal = 0, p = 0, q = 0; int lengthSeasonal = 0; if(!Rf_isNull(smallPhi_s)) { smallPhi = REAL(smallPhi_s); adjustPhi = 1; numCols = numCols + 1; } if(!Rf_isNull(sPeriods_s)) { seasonalPeriods = INTEGER(sPeriods_s); numSeasonal = LENGTH(sPeriods_s); for(R_len_t s = 0; s < numSeasonal; s++) { lengthSeasonal = lengthSeasonal + seasonalPeriods[s]; } numCols = numCols + lengthSeasonal; } else { lengthSeasonal = 0; } if(!Rf_isNull(arCoefs_s)) { arCoefs = REAL(arCoefs_s); p = LENGTH(arCoefs_s); numCols = numCols + p; } if(!Rf_isNull(maCoefs_s)) { maCoefs = REAL(maCoefs_s); q = LENGTH(maCoefs_s); numCols = numCols + q; } NumericMatrix wTranspose_r(1, numCols); arma::mat wTranspose(wTranspose_r.begin(), wTranspose_r.nrow(), wTranspose_r.ncol(), false); if(!Rf_isNull(sPeriods_s)) { wTranspose.zeros(); int position = adjustPhi; for(R_len_t s = 0; s < numSeasonal; s++) { position = position + seasonalPeriods[s]; wTranspose(0,position) = 1; } } wTranspose(0,0) = 1; if(adjustPhi == 1) { wTranspose(0,1) = *smallPhi; } if(!Rf_isNull(arCoefs_s)) { for(R_len_t i = 1; i <= p; i++) { wTranspose(0,(adjustPhi + lengthSeasonal +i)) = arCoefs[(i-1)]; } } if(!Rf_isNull(maCoefs_s)) { for(R_len_t i = 1; i <= q; i++) { wTranspose(0,(adjustPhi + lengthSeasonal + p + i)) = maCoefs[(i-1)]; } } arma::mat w = arma::trans(wTranspose); smallPhi = 0; arCoefs = 0; maCoefs = 0; seasonalPeriods = 0; return List::create( Named("w") = w, Named("w.transpose") = wTranspose ); END_RCPP } SEXP makeBATSGMatrix(SEXP alpha_s, SEXP beta_s, SEXP gammaVector_s, SEXP seasonalPeriods_s, SEXP p_s, SEXP q_s) { BEGIN_RCPP double *gammaVector; int *seasonalPeriods, *p, *q; int numCols, gammaLength = 0; int adjustBeta = 0; p = INTEGER(p_s); q = INTEGER(q_s); numCols = 1 + *p + *q; if(!Rf_isNull(beta_s)) { numCols = numCols + 1; adjustBeta = 1; } //Find the length of the gamma/seasonal bit if((!Rf_isNull(gammaVector_s))&&(!Rf_isNull(seasonalPeriods_s))) { gammaVector = REAL(gammaVector_s); seasonalPeriods = INTEGER(seasonalPeriods_s); for(R_len_t i = 0; i < LENGTH(seasonalPeriods_s); i++) { gammaLength = gammaLength + seasonalPeriods[i]; } numCols = numCols + gammaLength; } else { gammaLength = 0; } NumericMatrix gTranspose_r(1, numCols); arma::mat gTranspose(gTranspose_r.begin(), gTranspose_r.nrow(), gTranspose_r.ncol(), false); gTranspose.zeros(); gTranspose(0,0) = REAL(alpha_s)[0]; if(!Rf_isNull(beta_s)) { gTranspose(0,1) = REAL(beta_s)[0]; } //Copy the gamma/seasonal bits if((!Rf_isNull(gammaVector_s))&&(!Rf_isNull(seasonalPeriods_s))) { int position = adjustBeta + 1; gTranspose(0, position) = gammaVector[0]; if(LENGTH(gammaVector_s) > 1) { for(R_len_t s = 0; s < (LENGTH(seasonalPeriods_s)-1); s++) { position = position + seasonalPeriods[s]; gTranspose(0, position) = gammaVector[(s+1)]; } } } if(*p != 0) { gTranspose(0, (adjustBeta+gammaLength+1)) = 1; } if(*q != 0) { gTranspose(0, (adjustBeta+gammaLength+ *p +1)) = 1; } arma::mat g(arma::trans(gTranspose)); seasonalPeriods = 0; p = 0; q = 0; gammaVector = 0; if((!Rf_isNull(gammaVector_s))&&(!Rf_isNull(seasonalPeriods_s))) { arma::mat gammaBold = gTranspose.cols((1+adjustBeta), (adjustBeta+gammaLength)); return List::create( Named("g") = g, Named("g.transpose") = gTranspose, Named("gamma.bold.matrix") = gammaBold ); } else { return List::create( Named("g") = g, Named("g.transpose") = gTranspose, Named("gamma.bold.matrix") = R_NilValue ); } END_RCPP } /* SEXP makeFMatrix(SEXP alpha_s, SEXP beta_s, SEXP smallPhi_s, SEXP seasonalPeriods_s, SEXP gammaBoldMatrix_s, SEXP arCoefs_s, SEXP maCoefs_s) { BEGIN_RCPP NumericMatrix alpha_r(alpha_s); if(!Rf_isNull(beta_s)) { NumericMatrix beta_r(beta_s); bool indBeta = true; } else { bool indBeta = false; } if(!Rf_isNull(smallPhi_s)) { NumericMatrix smallPhi_r(smallPhi_s); bool indSmallPhi = true; } else { bool indSmallPhi = false; } if(!Rf_isNull(seasonalPeriods_s)) { NumericMatrix seasonalPeriods_r(seasonalPeriods_s); bool indSeasonalPeriods = true; } else { bool indSeasonalPeriods = false; } if(!Rf_isNull(gammaBoldMatrix_s)) { NumericMatrix gammaBoldMatrix_r(gammaBoldMatrix_s); bool indGammaBoldMatrix = true; } else { bool indGammaBoldMatrix = false; } if(!Rf_isNull(arCoefs_s)) { NumericMatrix arCoefs_r(arCoefs_s); bool indArCoefs = true; } else { bool indArCoefs = false; } if(!Rf_isNull(maCoefs_s)) { NumericMatrix maCoefs_r(maCoefs_s); bool indMaCoefs = true; } else { bool indMaCoefs = false; } arma::mat END_RCPP } */ forecast/src/updateMatrices.cpp0000644000176200001440000001126514323125536016336 0ustar liggesusers/* * updateMatrices.cpp * * Created on: 03/11/2011 * Author: srazbash */ #include "calcBATS.h" using namespace Rcpp ; SEXP updateFMatrix(SEXP F_s, SEXP smallPhi_s, SEXP alpha_s, SEXP beta_s, SEXP gammaBold_s, SEXP ar_s, SEXP ma_s, SEXP tau_s) { BEGIN_RCPP NumericMatrix F_r(F_s); arma::mat F(F_r.begin(), F_r.nrow(), F_r.ncol(), false); double *beta, *alpha = &REAL(alpha_s)[0]; int *tau, p, q, betaAdjust; int zero = 0; if(!Rf_isNull(gammaBold_s)) { tau = &INTEGER(tau_s)[0]; } else { tau = &zero; } if(!Rf_isNull(beta_s)) { beta = &REAL(beta_s)[0]; double *smallPhi = &REAL(smallPhi_s)[0]; F(0,1) = *smallPhi; F(1,1) = *smallPhi; betaAdjust = 1; } else { betaAdjust = 0; } if(!Rf_isNull(ar_s)) { //Rprintf("before arma::mat ar\n"); NumericMatrix ar_r(ar_s); arma::mat ar(ar_r.begin(), ar_r.nrow(), ar_r.ncol(), false); //Rprintf("after arma::mat ar\n"); p = ar.n_cols; //Rprintf("line-a-before\n"); F.submat(0,(betaAdjust+ *tau+1),0,(betaAdjust+ *tau+p)) = *alpha * ar; //Rprintf("line-a-after\n"); if(betaAdjust == 1) { //Rprintf("line-b-before\n"); F.submat(1,(betaAdjust+ *tau+1),1,(betaAdjust+ *tau+p)) = *beta * ar; //Rprintf("line-b-after\n"); } if(*tau > 0) { //Rprintf("la\n"); NumericMatrix gammaBold_r(gammaBold_s); //Rprintf("la-2\n"); arma::mat gammaBold(gammaBold_r.begin(), gammaBold_r.nrow(), gammaBold_r.ncol(), false); //Rprintf("la-3\n"); //arma::mat gammaBold = as(gammaBold_s); arma::mat B = trans(gammaBold) * ar; //Rprintf("line-c-before\n"); F.submat((1+betaAdjust),(betaAdjust+ *tau+1), (betaAdjust+ *tau), (betaAdjust+ *tau+p)) = B; //Rprintf("line-c-after\n"); } //Rprintf("line-d-before\n"); F.submat((betaAdjust+ *tau+1),(betaAdjust+ *tau+1),(betaAdjust+ *tau+1),(betaAdjust+ *tau+p)) = ar; //Rprintf("line-d-after\n"); } else { p = 0; } if(!Rf_isNull(ma_s)) { NumericMatrix ma_r(ma_s); arma::mat ma(ma_r.begin(), ma_r.nrow(), ma_r.ncol(), false); q = ma.n_cols; //Rprintf("one-before\n"); F.submat(0,(betaAdjust+ *tau+p+1),0,(betaAdjust+ *tau+p+q)) = *alpha * ma; //Rprintf("one-after\n"); if(betaAdjust == 1) { //Rprintf("two-before\n"); F.submat(1,(betaAdjust+ *tau+p+1),1,(betaAdjust+ *tau+p+q)) = *beta * ma; ///Rprintf("two-after\n"); } if(*tau > 0) { //arma::mat gammaBold = as(gammaBold_s); NumericMatrix gammaBold_r(gammaBold_s); arma::mat gammaBold(gammaBold_r.begin(), gammaBold_r.nrow(), gammaBold_r.ncol(), false); arma::mat C = trans(gammaBold) * ma; //Rprintf("three-before\n"); F.submat((1+betaAdjust),(betaAdjust+ *tau+p+1), (betaAdjust+ *tau), (betaAdjust+ *tau+p+q)) = C; //Rprintf("three-after\n"); } if(!Rf_isNull(ar_s)) { //Rprintf("four-before\n"); F.submat((betaAdjust+ *tau+1), (betaAdjust+ *tau+p+1), (betaAdjust+ *tau+1), (betaAdjust+ *tau+p+q)) = ma; //Rprintf("four-after\n"); } } else { q = 0; } return R_NilValue; END_RCPP } SEXP updateWtransposeMatrix(SEXP wTranspose_s, SEXP smallPhi_s, SEXP tau_s, SEXP arCoefs_s, SEXP maCoefs_s, SEXP p_s, SEXP q_s) { BEGIN_RCPP NumericMatrix wTranspose(wTranspose_s); double *arCoefs, *maCoefs; int *p, *q, *tau, adjBeta = 0; p = &INTEGER(p_s)[0]; q = &INTEGER(q_s)[0]; tau = &INTEGER(tau_s)[0]; if(!Rf_isNull(smallPhi_s)) { adjBeta = 1; wTranspose(0,1) = REAL(smallPhi_s)[0]; } if(*p > 0) { arCoefs = REAL(arCoefs_s); for(int i = 1; i <= *p; i++) { wTranspose(0,(adjBeta + *tau + i)) = arCoefs[(i - 1)]; } if(*q > 0) { maCoefs = REAL(maCoefs_s); for(int i = 1; i <= *q; i++) { wTranspose(0,(adjBeta + *tau + *p + i)) = maCoefs[(i - 1)]; } } } else if(*q > 0) { maCoefs = REAL(maCoefs_s); for(int i = 1; i <= *q; i++) { wTranspose(0,(adjBeta + *tau + i)) = maCoefs[(i - 1)]; } } return R_NilValue; END_RCPP } SEXP updateGMatrix(SEXP g_s, SEXP gammaBold_s, SEXP alpha_s, SEXP beta_s, SEXP gammaVector_s, SEXP seasonalPeriods_s) { BEGIN_RCPP int adjBeta = 0, *seasonalPeriods; double *gammaVector; NumericMatrix g(g_s); g(0,0) = REAL(alpha_s)[0]; if(!Rf_isNull(beta_s)) { g(1,0) = REAL(beta_s)[0]; adjBeta = 1; } if((!Rf_isNull(gammaVector_s))&&(!Rf_isNull(seasonalPeriods_s))) { NumericMatrix gammaBold(gammaBold_s); seasonalPeriods = INTEGER(seasonalPeriods_s); gammaVector = REAL(gammaVector_s); int position = adjBeta + 1; int bPos = 0; gammaBold(0,bPos) = gammaVector[0]; g(position, 0) = gammaVector[0]; if(LENGTH(gammaVector_s) > 1) { for(R_len_t s = 0; s < (LENGTH(seasonalPeriods_s)-1); s++) { position = position + seasonalPeriods[s]; bPos = bPos + seasonalPeriods[s]; g(position, 0) = gammaVector[(s+1)]; } } } return R_NilValue; END_RCPP } forecast/src/etsTargetFunction.cpp0000644000176200001440000001547314323125536017041 0ustar liggesusers#include #include //for isnan, math.h is needed //#include #include "etsTargetFunction.h" #include void EtsTargetFunction::init(std::vector & p_y, int p_nstate, int p_errortype, int p_trendtype, int p_seasontype, bool p_damped, std::vector & p_lower, std::vector & p_upper, std::string p_opt_crit, int p_nmse, std::string p_bounds, int p_m, bool p_optAlpha, bool p_optBeta, bool p_optGamma, bool p_optPhi, bool p_givenAlpha, bool p_givenBeta, bool p_givenGamma, bool p_givenPhi, double alpha, double beta, double gamma, double phi) { this->y = p_y; this->n = this->y.size(); this->nstate = p_nstate; this->errortype = p_errortype; this->trendtype = p_trendtype; this->seasontype = p_seasontype; this->damped = p_damped; this->lower = p_lower; this->upper = p_upper; this->opt_crit = p_opt_crit; this->nmse = p_nmse; this->bounds = p_bounds; this->m = p_m; this->optAlpha = p_optAlpha; this->optBeta = p_optBeta; this->optGamma = p_optGamma; this->optPhi = p_optPhi; this->givenAlpha = p_givenAlpha; this->givenBeta = p_givenBeta; this->givenGamma = p_givenGamma; this->givenPhi = p_givenPhi; /* Rprintf("optAlpha: %d\n", optAlpha); Rprintf("optBeta: %d\n", optBeta); Rprintf("optGamma: %d\n", optGamma); Rprintf("optPhi: %d\n", optPhi); Rprintf("givenAlpha: %d\n", givenAlpha); Rprintf("givenBeta: %d\n", givenBeta); Rprintf("givenGamma: %d\n", givenGamma); Rprintf("givenPhi: %d\n", givenPhi); */ this->alpha = alpha; this->beta = beta; this->gamma = gamma; this->phi = phi; this->lik = 0; this->objval = 0; // for(int i=0; i < 10; i++) this->amse.push_back(0); // for(int i=0; i < n; i++) this->e.push_back(0); this->amse.resize(30, 0); this->e.resize(n, 0); } void EtsTargetFunction::eval(const double* p_par, int p_par_length) { bool equal=true; // ---------show params---------- // Rprintf("par: "); // for(int j=0;j < p_par_length;j++) { // Rprintf("%f ", p_par[j]); // } // Rprintf(" objval: %f\n", this->objval); //Rprintf("\n"); // ---------show params---------- // Check if the parameter configuration has changed, if not, just return. if(p_par_length != this->par.size()) { equal=false; } else { for(int j=0;j < p_par_length;j++) { if(p_par[j] != this->par[j]) { equal=false; break; } } } if(equal) return; this->par.clear(); for(int j=0;j < p_par_length;j++) { this->par.push_back(p_par[j]); } int j=0; if(optAlpha) this->alpha = par[j++]; if(optBeta) this->beta = par[j++]; if(optGamma) this->gamma = par[j++]; if(optPhi) this->phi = par[j++]; if(!this->check_params()) { this->objval = R_PosInf; return; } this->state.clear(); for(int i=par.size()-nstate; i < par.size(); i++) { this->state.push_back(par[i]); } // Add extra state if(seasontype!=0) {//"N"=0, "M"=2 //init.state <- c(init.state, m*(seasontype==2) - sum(init.state[(2+(trendtype!=0)):nstate])) double sum=0; for(int i=(1+((trendtype!=0) ? 1 : 0));iobjval = R_PosInf; return; } // seas.states <- init.state[-(1:(1+(trendtype!=0)))] //if(min(seas.states) < 0) // return(1e8) }; int p = state.size(); for(int i=0; i <= p*this->y.size(); i++) state.push_back(0); etscalc(&this->y[0], &this->n, &this->state[0], &this->m, &this->errortype, &this->trendtype, &this->seasontype, &this->alpha, &this->beta, &this->gamma, &this->phi, &this->e[0], &this->lik, &this->amse[0], &this->nmse); // Avoid perfect fits if (this->lik < -1e10) this->lik = -1e10; // isnan() is a C99 function //if (isnan(this->lik)) this->lik = 1e8; if (ISNAN(this->lik)) this->lik = R_PosInf; if(fabs(this->lik+99999) < 1e-7) this->lik = R_PosInf; if(this->opt_crit=="lik") this->objval = this->lik; else if(this->opt_crit=="mse") this->objval = this->amse[0]; else if(this->opt_crit=="amse") { //return(mean(e$amse[1:nmse])) double mean=0; for(int i=0;i < this->nmse;i++) { mean+=amse[i]/this->nmse; } this->objval=mean; } else if(this->opt_crit=="sigma") { //return(mean(e$e^2)) double mean=0; int ne=e.size(); for(int i=0;iobjval=mean; } else if(this->opt_crit=="mae") { //return(mean(abs(e$e))) double mean=0; int ne=e.size(); for(int i=0;iobjval=mean; } } bool EtsTargetFunction::check_params() { if(bounds != "admissible") { if(optAlpha) { if(alpha < lower[0] || alpha > upper[0]) return(false); } if(optBeta) { if(beta < lower[1] || beta > alpha || beta > upper[1]) return(false); } if(optPhi) { if(phi < lower[3] || phi > upper[3]) return(false); } if(optGamma) { if(gamma < lower[2] || gamma > 1-alpha || gamma > upper[2]) return(false); } } if(bounds != "usual") { if(!admissible()) return(false); } return(TRUE); } bool EtsTargetFunction::admissible() { if(phi < 0 || phi > 1+1e-8) return(false); //If gamma was set by the user or it is optimized, the bounds need to be enforced if(!optGamma && !givenGamma) { if(alpha < 1-1/phi || alpha > 1+1/phi) return(false); if(optBeta || givenBeta) { if(beta < alpha * (phi-1) || beta > (1+phi)*(2-alpha)) return(false); } } else if(m > 1) //Seasonal model { if(!optBeta && !givenBeta) beta = 0; //max(1-1/phi-alpha,0) double d = 1-1/phi-alpha; if(gamma < ((d > 0) ? d : 0) || gamma > 1+1/phi-alpha) return(false); if(alpha < 1-1/phi-gamma*(1-m+phi+phi*m)/(2*phi*m)) return(false); if(beta < -(1-phi)*(gamma/m+alpha)) return(false); // End of easy tests. Now use characteristic equation std::vector opr; opr.push_back(1); opr.push_back(alpha+beta-phi); for(int i=0;i opi; opi.resize(opr.size(),0); std::vector zeror(degree); std::vector zeroi(degree); Rboolean fail; cpolyroot(&opr[0], &opi[0], °ree, &zeror[0], &zeroi[0], &fail); double max = 0; for(int i=0;imax) max = abs_val; } //Rprintf("maxpolyroot: %f\n", max); if(max > 1+1e-10) return(false); // P <- c(phi*(1-alpha-gamma),alpha+beta-alpha*phi+gamma-1,rep(alpha+beta-alpha*phi,m-2),(alpha+beta-phi),1) // roots <- polyroot(P) // if(max(abs(roots)) > 1+1e-10) return(false); } //Passed all tests return(true); } forecast/vignettes/0000755000176200001440000000000014474112176014077 5ustar liggesusersforecast/vignettes/jsslogo.jpg0000644000176200001440000005221314150370574016263 0ustar liggesusersJFIFC    $.' ",#(7),01444'9=82<.342C  2!!22222222222222222222222222222222222222222222222222" X*,QY0)-e+s+YJƕ.u2j!Qk-fg!W4^VT\ʕs1GBJ ¥p%%ZKPM-$(JsbJNXDY`V\A KdJaؔ,0MD L"rLMldQ)J@K$W)2hW?Ͽj6C(@jD*jR iSf"LJՅVXh 0aQr,6AX$BVXA¶6MXtE1$*Kyqq!X4@*e LQ !ȹ4#HHQ  HYaWyyތgˢpmcĞ5u˦{/=kiG6j!"!_0;6|<;ϥ8I,+V\rzӏ};q{G Im4@Y)A&f_+=K,նD5ԸNfnjzypOJYQoDԻeQb>-RZeWwq:8og '}Cy= k<`LQcIhY7edQZշ6Wi]JGW:DZ[Oܻ<=rmEԪ? yԬȃ%e_QOeBF.cMY \?ZrCf{g? DX!8,^Yt1 q1 dúVIK\-űnM; Pjs z{9oA01˛`+ ܷLp@'xgDa_qOL;]Cܠ'[Vn>TD͊rtGS]1}ܨ4v `E]6)4 2w`QE@lK icDvj1#b5rU^vNcn rRoXp(lc$̫RJiz952kۿsLnG_nT%gWeH}Y3r[m3)R [60M988 i &s sc/1svdt"n'DGaꇠ9ro5!ȏD7%! 01@A"2QP?#*TR *TR # 4BJ(PB *TB *TRJ v cH 4uIsq_$Ev8DrAd-23|?T?I24h"Ǜzc#L|u?g% !01@APQ"2?$I$I$IbĖ$I$I$I$,Xbŋ,Xbŋ,I$IbĖ,8bŋ3 03 .\.\r,\bŋõ,'IHRܯdOp'TS!APR(`s7NQb.zGt38#7G zmAW'j\F*7q9Wjo2!1 A"02PQaq@B`Rbpr?+BfQzBoЛFPpҿTB*%l\}27.~00䕈}D"=ڇpDҀ+ij[pq?%U%4qf72[#xiE B hCa@Wwd70\K-J5uq-U_{ğF7a@E3-]^=GvRB#3p+ YW~Wa#Mm "Nі."hBl4GRf}OqOr8P>$|!>(k-n\?9bRlL-"_ aEƝUZ.MN"I\+E%USZ-Da$f#"z5WWlH&1vdHPcq RhAppQL{# NEƸӫJ g_DH"$*~Z[!lWAGj++++bVUX+~ZSa+e碸W V:'3 Ak(rϤ##ߐEY hNoԨH{!6;ꦃЈ[4J~0>"=9Gzs&cAh[%pCeeiP'c%o-2y+N@=VHUVT*q.%b]Z e Q6G%7X/ uW 8ʦow[+io1SJ ˅p.fF{Ax Y7t"(y,BɊTiYZG‹B,nJ/X"U2|?k.­+{~ۃA[S8 y uE;V>W;GhN.KU}/?N?= :">}!.˶X|frsb]NZdD&$O(2MCŊ&T0dJrpO&UX?Z"f!ȘiCYUl>lrBxSYXv"2PW#e# hyR Iy +xMdzwUeԧ:T.T]PMn-C(=tԾF((,H__;*[U<(ByK~*!1A Qaq0@P?!B ^GN fBD4h-[+z=zzOS=tz=:އ}/[§xcJFB{IEA@""tLiHDBh]Н$L`..h"Ү7-35HJCa:Ce&HBiKNM"jBDiN.1h%]D hٹBBXBt! ]Mi4G]cd]$ J{tz.%z.aK0`eo'-^&}ae1\shٍY5BZ9-f/ f[]SҍŦRL6Z 㫓2n#N-qGNc>om6l&c^0 jوp Nt qނjt\!Gҙ"mm>Gitl%.#MX0>y>"2DK"Bd4QR b+琢.l4pg2ۆrcb&Qɳ*'Qrn&]6r,gy iPRwdd9<X'֡4UOYBI2.IOrۑ9\AEG،8tǸmJp?R ?pFpՃ5vjh-fy))p]]73;E]b+7 䳱ї`٩Ļ n9p65r&cKMoIʒ v#DoHOpGeH{ 6>½;"""K2hJ_xz R6D19$;WX7h7b{a c JixSt0{l>Q}Dnc,#`JNcgv6 )n4^RoBa`&{ Ȣv<8ݖW,qO"iq3+ -?Mi mAo 썣L!8)nt|gf_})OrITH[pY7ß{W3[7&O}^î!F (IY/속+Lk6V &a'W,"xһF- q m8;8Yigᾆy~cm{;!Gqx)@?O0[!Y ʼn S; ЛUৗbײxtB ;6W٘=d'A{kLՐș G7{ ̃6r:{"po=Oc<ɞzYewh];Sy:ٔrZfLǯ$!4OJ{= mRR,2RxNbm1]7Mx`Kâ~M^$hwG$Z/~ ^tRn3(mz_~ Rztz)zoNQ /])F]nKJRMҢ÷R+ZzOSrRJRKSQ]礥)K(D2/mM%ʄ~]M#> *v4CEϹۃH7Tڃ!({zomݘ{4s/ţQÆlRs [h<WTb!(8 G7dfp*^^dQ蟏FdcMgȾtzZHff i8/;{phVe=&A:%0XA.h&+6'q{ld~4VI2_-bcyM3l˶H[zH< **V% +X jg9HkFY3Vo>Cis?|7T$]m<7&\[юy LJO~CwGp1M;F avޕ\f!%M&&[; D]n`1 M EKy̱4IFe YB=|=h]Ɣ1xCLb3GoÚ3%¾>SA@==O#cwCSck6A1Mu)*J; F22UXYTk-oݦtE9e;N\y)w䗐E+i2-D VnM [åH&<.E91.@n3SΈ>5o/ 1i9 )/`]<#@e b-}&XHB7 %R0|v\שL+C*6F7y3c5G@ⳀeگZƔvJE/>XK&/([ŭdZѫxQ^dO׌k0ZrpN|Ri%SbwNe;+xD[aGo+S>Wpυ_XyA&YƧè(WTbYE7I7$1u']A]LADr_N.#$A}YM?/|=9Y̲ <}Ԙ0 I^KxLÍ=!n7$;=)LS|׌uִR =ae;.sQIz>MwB !f!{˱=͍ӵL{DU՘ 1䒵kܯKE890]ˏM׀l{3 bJ 8]t0sü]{~hM0ül ,)/Or-\,׬p,҃ ʈ>"-X qɍ|z 7 0 ! 10AQqa@?#dx!! FB2%ܲsBXbBH$$X5̸#`G+*..j!6@ֈ!;D|!x#$v6t(%6G ~ u$%)K" ;4XHw2 eBc2CRM+(f:}a~ !X.C?'ூMHK> @5C4B\RsbF%? 4(qb3KQsJ>,6QV./ R4%P2H/F \ڿPCHOKׅ*%0ѱHddH*(HM m6l6b&8bk Yxj o6RluPl m#H8:ɗcm A#a}+ycBhc$_ Wgy!#.4=1<1RSlCV϶lKmk6?:N$BLA HuF:eCv;s/E# !1 0QA@aq?iQ$AA$&I$ £֌IOHO#ɲZER,QyPBQFv1˚R.S+Š 9 ;%ZH664cĭu6R[fҗXԴ4$7BLطIZ.-u68`KWҾ-QHI.Um?6,kBKQymҔWWx=Ҋ&!`fLh76!\M/Bᴑw;! "#DCK:_lm$6(E1:R:HkYn,”ĊA$xLV?xLBa^%983pSʫ@""-f0TBYqv" dE'5jZc{'pCAz8Qjg*4okubay\E:b"Į2Mo2 %b* z *P-`, 9jTQaH%dPQ,"ε.b~%RQb ]W#ܫ ,r\/ĶٝR-m` ) 5T}Ow}%tq ah+!]*%rΈzlJ-mqs=k-wR• PJ2:Ԡ =}KRmy&2xcǟQ8b-VÈ<@ܩ1LZܫlOQVtef~Xwrټ ` =CtaqopJwl"1YDERŞRH&D^+2ڭAZ`,ɛYtsW2䗥.RgIa70Vo Gn`Ѧ`2gP.S(0Vkw yki`G)H7 pbS=7(91IWdqf FVys1H&X<RWHs4J!Mh _XޘT.frb*DNmP+,0XjU`c)ܧ]B b Sl=17YY)YAlb PLY^~sf ?x`f2RerD5*0N̈́ uSjΊ=">[.gQ9gT?ysCpNoeAE#n\һ-E]喎92YԦ1KOqF^"Zb/D(tFټRÃ&?Xq,LKɠ_D{# ~EȏQH{-E&8jRnGQ(GJ7 oCV%H̾-NҸHĵN&ʕ^aF kT<1um>[S9Fc0@9e.Hlh RnRZ*Zc/΂| Y8  (;k ٭ψ=}%`k^=ƶOM st^:=x"PT;pl5Gpy7Dl6ˁ\Y_by/=-_Ț;>:G}v`ё#R mUObepeXANeb̅0ԲCxP912fߏ-V9p6jOE0 JYؕ RUQ=1`BvbP%0~۾uY@~rYE.0 ˔D-FƦ>ex24bE6YN UDV̥O/Y%[0f3B]s*>ȯIhg  QW5gͥ>j,x)K.8EiG>~^c@9+5Yl.(4s,X%q(B:.})ƚ}W;e5:dLw*r.*f#),] YBZ8KөUuj{KP+ aJWRp]^40Pi< 6'x-@[j̀1_:X76;e k:Doļ=],%_x ̭Ū)ɬc9 8n Eob 2@C7 ":%1o/xBGvp"mIOw=,9=KZ7jZV,/7Rd @sJha*w R])Q y+pna[{ E+ /;*ݜew`hk_4h~#[=nJcpy#51M72wTncMϼs$t:%J8V30YYS솆 V-|:bW0 JSLx]j@s3_jܷ.+>jfJ+1JĢ .` i\nSUjXj"\J;07pCb+4b] /2˗lK/3u/10B<lT^ߤ0B5DqޥoP7WaY{. GsC?O/RPXԸ4>mr2KbYԿ/Aľ]VFjb+y^%x/cx_SnzxD5//r}%1\|ܹĻu-ܸKE6L^qJ6:ƥ\LK[.-_ K\ 2HؽA/ Xĺ*nZ4<ŗ˗xKcpܾYD 8u- ľ|\?pj[,c/9̴̼^w,5i,R|+,JVfJ}=KSq/1m̼ ̫ń| PQqtyBqǤq Uٗc:1ՕuS"fҠ˶\Kcj- B<\JDNDX|*2ԻeˋrMT  uqk2ݘ7<`+$2n0Bk\V}^ pRy̫_qD4L$Yx.=sEBp̦v\|3#-imڔNJbvL/B#xLLj8`ORޥnAr/upKY.[.)|[6V^a8Z4ĹyE#`\ۈba[}"N[Bmfxw73 2;T@h9DLq *fn"BU%$TuPlbf$˹X'1Ǥ5:+)A_.[%ALm&YԮ-i}4e K lc/)`6Ge_8j/J=}(֠1ЇTpco J0l8KfQ'˷>n_3FtulE s2%q^!h~휵Z*&~j1u7`jlq5ϸtM&)p K9MbjT h.-W7[D8-O B^ƊO ̩LC@JhcEi mS/ TEQWG )>} HW~'eh!pG[`.ZK?P&_D((y>R K!(?S QDQ1d5Ej4?7ѵ  ؊Q3@Vs|9HYQ(^0L/`'b œ}=6.,lQ+6G%K)k:eL1C& "6lAŖV-j#uxo28b`KAcSs.ڍu EnT>@4Q}@Q-8#^H7$* cN@7>Qub=)D[wɖe]: ˬY !_2\,e  X' p4yawanw'=A3c @+}XWgsvçqNxG2GiR"6Ǒ7LNkS1L,]ԡudR}F& F5fx7iq娊Z4 ]WG y\f`e[Ʋ{Oډsυ@^_KLތ6F!g *-eH3㈊‹42qS|@T[ĨFgs |ns3)SY]0 @}* siHٱ W\L>[WZ8X.2J[rģ/&KPt#:MpqKf:OˁVJbC1X˾ekkB%7Q`s\J a\ĜfSQaD.G-~X3&2e!XUu\K%#2yrYPixuP,J)lh"yc[VKEl75-5#w[NŪ±8t^Kq.JS^ˊilauqNw5 So3Z,>%>;# .Cb7PElW N  1FSP/xk- !=հUDJ $[h8sEjj(Vd29aVPf!^0!V;!u_mXWf;Yg`@747>8cW[Lj+W(rH h86df5awq<`\\ir k`,{8!r!AVg*2& ͏2?hW&Ut)\ȍ)zHSm100۹HexC(򻎊1xUKCs)UNf';_ c P:|c=c՟:aKV= n12\į8{Ϧ/ ZW ~`R]KĀeLS+l\Dɉ\p%\dE $?:su,ʶ˹%ey5aYu,\UIX9q`fq ^5_0>>L DnPI0 U dX$̿wA8yT&pbp$>/DU7u.L _.ϣ fFe*(0Z/04##^\V*ka؍j)Lwy0UF!s"7I831O\@csS~?u1& c`kVޠ+r  `H^X]Dht keg/) k\!O!S%)+{3 o0_fJrZL08jb~#,ۢ + %f -&n:n?V`D1*|}?oLU#`^DibW$Ĕ(5ys<ͿUyȖS/)x>Z@hsa8~Qm?|3ZNGxP hu0y`ZuASi*FA!eaK<"0 Ꭷ,f0FSy8*xffa?'w38H!{@R; *%..L3Bq;6"omq wQ/ܩ㈗ Ƥ6Xn16]qb]:k_M)%LKmAt8UwzG<]8UT> a%KN.5S)qϬǘ#{#ċRߖ7cZZλ r,P`gpdF6Qʷw3l]7Mcp|LA8{ 7,1kuܩEoq9#-~ w>$'3ġ XnU 33p[2埪a_qM@A%B1'ƽˎRZgoY=EU1,*bq;ly=}Ͷ]gR5SdE*&_Shʜ|;kusTC~7QssN!Je.%k8SOforecast/vignettes/orcidlink.sty0000644000176200001440000000433314323126533016614 0ustar liggesusers%% %% This is file `orcidlink.sty', %% generated with the docstrip utility. %% %% The original source files were: %% %% orcidlink.dtx (with options: `package') %% %% This is a generated file. %% %% Copyright (C) 2020 by Leo C. Stein %% -------------------------------------------------------------------------- %% This work may be distributed and/or modified under the %% conditions of the LaTeX Project Public License, either version 1.3 %% of this license or (at your option) any later version. %% The latest version of this license is in %% http://www.latex-project.org/lppl.txt %% and version 1.3 or later is part of all distributions of LaTeX %% version 2005/12/01 or later. %% \NeedsTeXFormat{LaTeX2e}[1994/06/01] \ProvidesPackage{orcidlink} [2021/06/11 v1.0.4 Linked ORCiD logo macro package] %% All I did was package up Milo's code on TeX.SE, %% see https://tex.stackexchange.com/a/445583/34063 \RequirePackage{hyperref} \RequirePackage{tikz} \ProcessOptions\relax \usetikzlibrary{svg.path} \definecolor{orcidlogocol}{HTML}{A6CE39} \tikzset{ orcidlogo/.pic={ \fill[orcidlogocol] svg{M256,128c0,70.7-57.3,128-128,128C57.3,256,0,198.7,0,128C0,57.3,57.3,0,128,0C198.7,0,256,57.3,256,128z}; \fill[white] svg{M86.3,186.2H70.9V79.1h15.4v48.4V186.2z} svg{M108.9,79.1h41.6c39.6,0,57,28.3,57,53.6c0,27.5-21.5,53.6-56.8,53.6h-41.8V79.1z M124.3,172.4h24.5c34.9,0,42.9-26.5,42.9-39.7c0-21.5-13.7-39.7-43.7-39.7h-23.7V172.4z} svg{M88.7,56.8c0,5.5-4.5,10.1-10.1,10.1c-5.6,0-10.1-4.6-10.1-10.1c0-5.6,4.5-10.1,10.1-10.1C84.2,46.7,88.7,51.3,88.7,56.8z}; } } %% Reciprocal of the height of the svg whose source is above. The %% original generates a 256pt high graphic; this macro holds 1/256. \newcommand{\@OrigHeightRecip}{0.00390625} %% We will compute the current X height to make the logo the right height \newlength{\@curXheight} \DeclareRobustCommand\orcidlink[1]{% \texorpdfstring{% \setlength{\@curXheight}{\fontcharht\font`X}% \href{https://orcid.org/#1}{\XeTeXLinkBox{\mbox{% \begin{tikzpicture}[yscale=-\@OrigHeightRecip*\@curXheight, xscale=\@OrigHeightRecip*\@curXheight,transform shape] \pic{orcidlogo}; \end{tikzpicture}% }}}}{}} \endinput %% %% End of file `orcidlink.sty'. forecast/vignettes/JSS-paper.bib0000644000176200001440000005323414323125536016325 0ustar liggesusers@STRING{advap = {Advances in Applied Probability}} @STRING{amath = {Annals of Mathematics}} @STRING{ams = {The Annals of Mathematical Statistics}} @STRING{amstat = {The American Statistician}} @STRING{annalap = {The Annals of Applied Probability}} @STRING{annalp = {The Annals of Probability}} @STRING{annals = {The Annals of Statistics}} @STRING{anneug = {Annals of Eugenics}} @STRING{anzjs = {Australian \& New Zealand Journal of Statistics}} @STRING{appstat = {Applied Statistics}} @STRING{ausjstat = {Australian Journal of Statistics}} @STRING{bioc = {Biometrics}} @STRING{bioj = {Biometrical Journal}} @STRING{biok = {Biometrika}} @STRING{chance = {Chance}} @STRING{cjs = {The Canadian Journal of Statistics}} @STRING{comms = {Communications in Statistics}} @STRING{commscs = {Communications in Statistics: Computation \& Simulation}} @STRING{commstm = {Communications in Statistics: Theory \& Methods}} @STRING{compstat = {Computational Statistics}} @STRING{csda = {Computational Statistics \& Data Analysis}} @STRING{debs = {Department of Econometrics \& Business Statistics, Monash University}} @STRING{ejor = {European Journal of Operational Research}} @STRING{ijf = {International Journal of Forecasting}} @STRING{isr = {International Statistical Review}} @STRING{jap = {Journal of Applied Probability}} @STRING{jas = {Journal of Applied Statistics}} @STRING{jasa = {Journal of the American Statistical Association}} @STRING{jcgs = {Journal of Computational \& Graphical Statistics}} @STRING{je = {Journal of Econometrics}} @STRING{jes = {Journal of Educational Statistics}} @STRING{jf = {Journal of Forecasting}} @STRING{jma = {Journal of Multivariate Analysis}} @STRING{jors = {Journal of the Operational Research Society}} @STRING{jos = {Journal of Official Statistics}} @STRING{jrssa = {Journal of the Royal Statistical Society A}} @STRING{jrssb = {Journal of the Royal Statistical Society B}} @STRING{jscs = {Journal of Statistical Computation \& Simulation}} @STRING{jspi = {Journal of Statistical Planning \& Inference}} @STRING{jtp = {Journal of Theoretical Probability}} @STRING{jtsa = {Journal of Time Series Analysis}} @STRING{mansci = {Management Science}} @STRING{psyka = {Psychometrika}} @STRING{ptrf = {Probability Theory \& Related Fields}} @STRING{sankhya = {Sankhy\={a}}} @STRING{sasj = {South African Statistical Journal}} @STRING{scandjs = {Scandinavian Journal of Statistics: Theory \& Applications}} @STRING{siamjssc = {SIAM Journal of Scientific \& Statistical Computing}} @STRING{jss = {Journal of Statistical Software}} @STRING{spl = {Statistics \& Probability Letters}} @STRING{statmed = {Statistics in Medicine}} @STRING{statsci = {Statistical Science}} @STRING{statsin = {Statistica Sinica}} @STRING{survmeth = {Survey Methodology}} @STRING{tech = {Technometrics}} @STRING{toap = {to appear}} @STRING{tpaa = {Theory of Probability \& its Applications}} @STRING{tstat = {The Statistician}} @BOOK{AM79, title = {Optimal Filtering}, publisher = {Prentice-Hall}, year = {1979}, author = {B. D. O. Anderson and J. B. Moore}, address = {Englewood Cliffs}, } @BOOK{Aoki87, title = {State Space Modeling of Time Series}, publisher = {Springer-Verlag}, year = {1987}, author = {Masanao Aoki}, address = {Berlin}, } @ARTICLE{Archibald90, author = {Blyth C. Archibald}, title = {Parameter Space of the {H}olt-{W}inters' Model}, journal = ijf, year = {1990}, volume = {6}, pages = {199--209}, fileno = {1151}, keywords = {Exponential smoothing; seasonal; coefficient choice; stability; evaluation}, pdf = {Archibald90.pdf}, } @ARTICLE{AN00, author = {V. Assimakopoulos and K. Nikolopoulos}, title = {The Theta Model: A Decomposition Approach to Forecasting}, journal = ijf, year = {2000}, volume = {16}, pages = {521-530}, fileno = {1047}, keywords = {M3-Competition; Time series; Univariate forecasting method}, } @BOOK{BOK05, title = {Forecasting, Time Series and Regression: An Applied Approach}, publisher = {Thomson Brooks/Cole}, year = {2005}, author = {B. L. Bowerman and R. T. O'Connell and Anne B. Koehler}, address = {Belmont CA}, } @BOOK{BDbook91, title = {Time Series: Theory and Methods}, publisher = {Springer-Verlag}, year = {1991}, author = {P. J. Brockwell and R. A Davis}, address = {New York}, edition = {2nd}, } @BOOK{BDbook91a, title = {Introduction to Time Series and Forecasting}, publisher = {John Wiley \& Sons}, year = {2002}, edition = {2nd}, author = {P.J. Brockwell and R.A. Davis}, } @ARTICLE{CH95, author = {F. Canova and B. E. Hansen}, title = {Are Seasonal Patterns Constant Over Time? {A} Test for Seasonal Stability}, journal = {Journal of Business and Economic Statistics}, year = {1995}, volume = {13}, pages = {237-252}, file = {CH95.pdf:CH95.pdf:PDF}, pdf = {CH95.pdf}, } @ARTICLE{CY91, author = {Chris Chatfield and Mohammad Yar}, title = {Prediction Intervals for Multiplicative {H}olt-{W}inters}, journal = ijf, year = {1991}, volume = {7}, pages = {31-37}, keywords = {Holt-Winters; Prediction intervals; Exponential smoothing}, } @ARTICLE{Croston72, author = {J. D. Croston}, title = {Forecasting and Stock Control for Intermittent Demands}, journal = {Operational Research Quarterly}, year = {1972}, volume = {23}, pages = {289--304}, number = {3}, pdf = {Croston72.pdf}, } @ARTICLE{DF81, author = {D. A. Dickey and W. A. Fuller}, title = {Likelihood Ratio Statistics for Autoregressive Time Series with a Unit Root}, journal = {Econometrica}, year = {1981}, volume = {49}, pages = {1057-1071}, } @BOOK{DKbook01, title = {Time Series Analysis by State Space Methods}, publisher = {Oxford University Press}, year = {2001}, author = {J Durbin and Siem J Koopman}, address = {Oxford}, } @ARTICLE{Gardner85, author = {Gardner, Jr, Everette S.}, title = {Exponential Smoothing: The State of the Art}, journal = jf, year = {1985}, volume = {4}, pages = {1-28}, keywords = {Bibliography; exponential smoothing; comparative methods; ARIMA; exponential smoothing; control charts; CUSUM; evaluation-forecasting monitoring systems; exponential smoothing; adaptive exponential smoothing-adaptive; coefficient choice; higher-order; review; theory seasonality-estimation; harmonics; tracking signal-methodology; use-inventory control}, } @ARTICLE{GM85, author = {Gardner, Jr, Everette S. and Ed McKenzie}, title = {Forecasting Trends in Time Series}, journal = mansci, year = {1985}, volume = {31}, pages = {1237-1246}, number = {10}, keywords = {Forecasting; time series}, } @TECHREPORT{Gomez98, author = {Victor G\'{o}mez}, title = {Automatic Model Identification in the Presence of Missing Observations and Outliers}, institution = {Ministerio de Econom{\'\i}a y Hacienda, Direcci{\'o}n General de An{\'a}lisis y Programaci{\'o}n Presupuestaria}, year = {1998}, type = {Working paper}, number = {D-98009}, pdf = {Gomez98.pdf}, } @TECHREPORT{TRAMOSEATS98, author = {Victor G\'{o}mez and Agust\'{i}n Maravall}, title = {Programs \pkg{TRAMO} and \pkg{SEATS}, Instructions for the Users}, institution = {Ministerio de Econom{\'\i}a y Hacienda, Direcci{\'o}n General de An{\'a}lisis y Programaci{\'o}n Presupuestaria}, year = {1998}, type = {Working paper}, number = {97001}, month = {June}, edition = {Beta version}, } @ARTICLE{ForecastPro00, author = {Robert L Goodrich}, title = {The \pkg{Forecast Pro} Methodology}, journal = ijf, year = {2000}, volume = {16}, pages = {533-535}, number = {4}, pdf = {ForecastPro00.pdf}, } @ARTICLE{HR82, author = {E. J. Hannan and J. Rissanen}, title = {Recursive Estimation of Mixed Autoregressive-Moving Average Order}, journal = biok, year = {1982}, volume = {69}, pages = {81-94}, number = {1}, keywords = {Autoregressive-moving average; best coding; martingale; recursive calculation; strong convergence; vector autoregression}, } @ARTICLE{Hendry97, author = {David F. Hendry}, title = {The Econometrics of Macroeconomic Forecasting}, journal = {The Economic Journal}, year = {1997}, volume = {107}, pages = {1330-1357.}, number = {444}, } @ARTICLE{HEGY90, author = {S. Hylleberg and R. Engle and C. Granger and B. Yoo}, title = {Seasonal Integration and Cointegration}, journal = {Journal of Econometrics}, year = {1990}, volume = {44}, pages = {215-238}, } @ARTICLE{Hyndman01, author = {Rob J Hyndman}, title = {It's Time To Move from `What' To `Why'---Comments on the {M3}-Competition}, journal = ijf, year = {2001}, volume = {17}, pages = {567-570}, number = {4}, keywords = {commentaries on the M3-competition}, } @MANUAL{forecast, title = {\pkg{forecast}: Forecasting Functions for Time Series}, author = {Rob J Hyndman}, year = {2008}, note = {\proglang{R}~package version~1.11}, url = {http://CRAN.R-project.org/package=forecasting}, } @MANUAL{fma, title = {\pkg{fma}: Data Sets from ``{F}orecasting: Methods and Applications'' By {M}akridakis, {W}heelwright \& {H}yndman (1998)}, author = {Rob J Hyndman}, year = {2008}, note = {\proglang{R}~package version~1.11}, url = {http://CRAN.R-project.org/package=forecasting}, } @MANUAL{expsmooth, title = {\pkg{expsmooth}: Data Sets from ``{F}orecasting with Exponential Smoothing'' by Hyndman, Koehler, Ord \& Snyder (2008)}, author = {Rob J Hyndman}, year = {2008}, note = {\proglang{R}~package version~1.11}, url = {http://CRAN.R-project.org/package=forecasting}, } @MANUAL{Mcomp, title = {\pkg{Mcomp}: Data from the {M}-Competitions}, author = {Rob J Hyndman}, year = {2008}, note = {\proglang{R}~package version~1.11}, url = { http://CRAN.R-project.org/package=forecasting}, } @ARTICLE{HAA08, author = {Rob J Hyndman and {Md} Akram and Blyth C Archibald}, title = {The Admissible Parameter Space for Exponential Smoothing Models}, journal = {Annals of the Institute of Statistical Mathematics}, year = {2008}, volume = {60}, number = {2}, pages = {407--426} } @ARTICLE{HB03, author = {Rob J Hyndman and Billah, Baki}, title = {Unmasking the {T}heta Method}, journal = ijf, year = {2003}, volume = {19}, pages = {287-290}, number = {2}, keywords = {Exponential smoothing; forecasting competitions; State space models}, } @ARTICLE{HKPB05, author = {Rob J Hyndman and Maxwell L. King and Pitrun, Ivet and Billah, Baki}, title = {Local Linear Forecasts Using Cubic Smoothing Splines}, journal = anzjs, year = {2005}, volume = {47}, pages = {87-99}, number = {1}, keywords = {ARIMA models; Exponential smoothing; Holt's local linear forecasts; Maximum likelihood estimation; non-parametric regression; smoothing splines; state-space model; stochastic trends}, } @ARTICLE{HK06, author = {Rob J Hyndman and Anne B Koehler}, title = {Another Look at Measures of Forecast Accuracy}, journal = ijf, year = {2006}, volume = {22}, pages = {679-688}, issue = {4}, } @ARTICLE{HK2008, author = {Rob J Hyndman and Yeasmin Khandakar}, title = {Automatic Time Series Forecasting: The Forecast Package for R}, journal = jss, year = {2008}, volume = {27}, issue = {3}, } @ARTICLE{HKOS05, author = {Rob J Hyndman and Anne B Koehler and J Keith Ord and Ralph D Snyder}, title = {Prediction Intervals for Exponential Smoothing Using Two New Classes of State Space Models}, journal = {Journal of Forecasting}, year = {2005}, volume = {24}, pages = {17-37}, } @BOOK{expsmooth08, title = {Forecasting with Exponential Smoothing: The State Space Approach}, publisher = {Springer-Verlag}, year = {2008}, author = {Rob J Hyndman and Anne B Koehler and J Keith Ord and Ralph D Snyder}, url = {http://www.exponentialsmoothing.net/}, } @ARTICLE{HKSG02, author = {Rob J Hyndman and Anne B Koehler and Ralph D Snyder and Simone Grose}, title = {A State Space Framework for Automatic Forecasting Using Exponential Smoothing Methods}, journal = ijf, year = {2002}, volume = {18}, pages = {439-454}, number = {3}, keywords = {Prediction intervals; State space models}, } @ARTICLE{shortseasonal, author = {Rob J Hyndman and Andrey V Kostenko}, title = {Minimum Sample Size Requirements for Seasonal Forecasting Models}, journal = {Foresight: The International Journal of Applied Forecasting}, year = {2007}, volume = {6}, pages = {12-15}, } @ARTICLE{KPSS92, author = {Denis Kwiatkowski and Peter C.B. Phillips and Peter Schmidt and Yongcheol Shin}, title = {Testing the Null Hypothesis of Stationarity Against the Alternative of a Unit Root}, journal = je, year = {1992}, volume = {54}, pages = {159-178}, } @ARTICLE{Liu89, author = {L. M. Liu}, title = {Identification of Seasonal {Arima} Models Using a Filtering Method}, journal = commstm, year = {1989}, volume = {18}, pages = {2279-2288}, keywords = {model identification, seasonal time series, ARIMA models, filtering, intermediary models, calendar variation, intervention, transfer function models}, } @ARTICLE{Mcomp82, author = {S. Makridakis and A. Anderson and R. Carbone and R. Fildes and M. Hibon and R. Lewandowski and J. Newton and E. Parzen and R. Winkler}, title = {The Accuracy of Extrapolation (Time Series) Methods: Results of a Forecasting Competition}, journal = jf, year = {1982}, volume = {1}, pages = {111-153}, keywords = {Forecasting; Time series; Evaluation; Accuracy; Comparison; Empirical Study}, } @ARTICLE{Metal82, author = {Spyros Makridakis and A. Anderson and R. Carbone and R. Fildes and M. Hibon and R. Lewandowskiand J. Newton and E. Parzen and R. Winkler}, title = {The Accuracy of Extrapolation (Time Series) Methods: Results of a Forecasting Competition}, journal = jf, year = {1982}, volume = {1}, pages = {111--153}, } @ARTICLE{Metal93, author = {Spyros Makridakis and Chris Chatfield and Mich\'{e}le Hibon and Michael Lawrence and Terence Mills and J. Keith Ord and LeRoy F. Simmons}, title = {The {M}2-Competition: A Real-Time Judgmentally Based Forecasting study}, journal = ijf, year = {1993}, volume = {9}, pages = {5--22}, } @ARTICLE{M3comp00, author = {Spyros Makridakis and Michele Hibon}, title = {The {M3}-Competition: Results, Conclusions and Implications}, journal = ijf, year = {2000}, volume = {16}, pages = {451-476}, keywords = {Comparative methods-Time series: Univariate; Forecasting competitions; {M}-competition; Forecasting methods; Forecasting accuracy}, } @BOOK{MWH3, title = {Forecasting: Methods and Applications}, publisher = {John Wiley \& Sons}, year = {1998}, author = {Makridakis, Spyros and Wheelwright, Steven C. and Rob J Hyndman}, pages = {642}, address = {New York}, edition = {3rd}, url = {http://www.robhyndman.info/forecasting/}, } @ARTICLE{MP00a, author = {G. M\'{e}lard and J.-M Pasteels}, title = {Automatic {ARIMA} Modeling Including Intervention, Using Time Series Expert Software}, journal = ijf, year = {2000}, volume = {16}, pages = {497-508}, keywords = {M3-Competition; ARIMA models; Expert systems; Intervention analysis; Outliers}, } @ARTICLE{Meyer:2002, author = {David Meyer}, title = {Naive Time Series Forecasting Methods}, journal = {\proglang{R} News}, year = {2002}, volume = {2}, number = {2}, pages = {7--10}, month = {June}, url = {http://CRAN.R-project.org/doc/Rnews/}, } @ARTICLE{OKS97, author = {J. Keith Ord and Anne B. Koehler and Ralph D. Snyder}, title = {Estimation and Prediction for a Class of Dynamic Nonlinear Statistical Models}, journal = jasa, year = {1997}, volume = {92}, pages = {1621-1629}, keywords = {Forecasting; Holt-Winters; Maximum likelihood estimation; State-space models}, pdf = {OKS97.pdf}, } @ARTICLE{OL96, author = {Keith Ord and Sam Lowe}, title = {Automatic Forecasting}, journal = amstat, year = {1996}, volume = {50}, pages = {88-94}, number = {1}, month = {February}, keywords = {automatic, Forecasting, Autobox, AutocastII, Forecast Pro}, } @ARTICLE{Pegels69, author = {C. Carl Pegels}, title = {Exponential Forecasting: Some New Variations}, journal = mansci, year = {1969}, volume = {15}, pages = {311-315}, number = {5}, } @ARTICLE{Reilly00, author = {Reilly, David}, title = {The \pkg{Autobox} System}, journal = ijf, year = {2000}, volume = {16}, pages = {531-533}, number = {4}, pdf = {Reilly00.pdf}, } @ARTICLE{Ripley:2002, author = {Brian D. Ripley}, title = {Time Series in \proglang{R}~1.5.0}, journal = {\proglang{R} News}, year = {2002}, volume = {2}, number = {2}, pages = {2--7}, month = {June}, url = {http://CRAN.R-project.org/doc/Rnews/}, } @ARTICLE{SH05, author = {Lydia Shenstone and Rob J Hyndman}, title = {Stochastic Models Underlying {C}roston's Method for Intermittent Demand Forecasting}, journal = jf, year = {2005}, volume = {24}, pages = {389-402}, } @ARTICLE{SY94, author = {Jeremy Smith and Sanjay Yadav}, title = {Forecasting Costs Incurred from Unit Differencing Fractionally Integrated Processes}, journal = ijf, year = {1994}, volume = {10}, pages = {507-514}, number = {4}, pdf = {SY94.pdf}, } @ARTICLE{SKHO04, author = {Ralph D Snyder and Anne B Koehler and Rob J Hyndman and J Keith Ord}, title = {Exponential Smoothing Models: Means and Variances for Lead-Time Demand}, journal = ejor, year = {2004}, volume = {158}, pages = {444-455}, number = {2}, } @ARTICLE{Taylor03a, author = {James W. Taylor}, title = {Exponential Smoothing with a Damped Multiplicative Trend}, journal = ijf, year = {2003}, volume = {19}, pages = {715-725}, keywords = {Damped trend exponential smoothing, Pegels classification, Multiplicative trend}, } @ARTICLE{Wallis99, author = {Wallis, K. F.}, title = {Asymmetric Density Forecasts of Inflation and the {Bank of England's} Fan Chart}, journal = {National Institute Economic Review}, year = {1999}, volume = {167}, pages = {106-112}, number = {1}, } @Manual{R, title = {\proglang{R}: A Language and Environment for Statistical Computing}, author = {{\proglang{R} Development Core Team}}, organization = {\proglang{R} Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2008}, note = {{ISBN} 3-900051-07-0}, url = {http://www.R-project.org/}, } forecast/vignettes/JSS2008.Rmd0000644000176200001440000017272514473635572015543 0ustar liggesusers--- author: - name: Rob J Hyndman affiliation: Monash University address: > Department of Econometrics \& Business Statistics Monash University Clayton VIC 3800, Australia email: Rob.Hyndman@monash.edu url: https://robjhyndman.com - name: Yeasmin Khandakar affiliation: Monash University address: > Department of Econometrics \& Business Statistics Monash University Clayton VIC 3800, Australia title: formatted: "Automatic Time Series Forecasting:\\newline the \\pkg{forecast} Package for \\proglang{R}" # If you use tex in the formatted title, also supply version without plain: "Automatic Time Series Forecasting: the forecast Package for R" # For running headers, if needed short: "\\pkg{forecast}: Automatic Time Series Forecasting" abstract: > This vignette to the \proglang{R} package \pkg{forecast} is an updated version of @HK2008, published in the *Journal of Statistical Software*. Automatic forecasts of large numbers of univariate time series are often needed in business and other contexts. We describe two automatic forecasting algorithms that have been implemented in the \pkg{forecast} package for \proglang{R}. The first is based on innovations state space models that underly exponential smoothing methods. The second is a step-wise algorithm for forecasting with ARIMA models. The algorithms are applicable to both seasonal and non-seasonal data, and are compared and illustrated using four real time series. We also briefly describe some of the other functionality available in the \pkg{forecast} package.} keywords: # at least one keyword must be supplied formatted: [ARIMA models, automatic forecasting, exponential smoothing, prediction intervals, state space models, time series, "\\proglang{R}"] plain: [ARIMA models, automatic forecasting, exponential smoothing, prediction intervals, state space models, time series, R] preamble: > \usepackage{amsmath,rotating,bm,fancyvrb,paralist,thumbpdf} \Volume{27} \Issue{3} \Month{July} \Year{2008} \Submitdate{2007-05-29} \Acceptdate{2008-03-22} \def\damped{$_{\mbox{\footnotesize d}}$} \let\var=\VAR \def\R{\proglang{R}} \def\dampfactor{\phi_h} \raggedbottom bibliography: JSS-paper.bib vignette: > %\VignetteIndexEntry{Automatic Time Series Forecasting: the forecast Package for R (Hyndman & Khandakar, JSS 2008)} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} documentclass: jss output: if (rmarkdown::pandoc_version() >= "2") rticles::jss_article else rmarkdown::html_vignette fig_width: 7 fig_height: 6 fig_caption: true --- ```{r load_forecast, echo=FALSE, message=FALSE} library('forecast') ``` ```{r load_expsmooth, echo=FALSE, message=FALSE, eval=FALSE} library('expsmooth') ``` ```{r expsmooth_datsets, echo=FALSE, message=FALSE} bonds <- structure(c(5.83, 6.06, 6.58, 7.09, 7.31, 7.23, 7.43, 7.37, 7.6, 7.89, 8.12, 7.96, 7.93, 7.61, 7.33, 7.18, 6.74, 6.27, 6.38, 6.6, 6.3, 6.13, 6.02, 5.79, 5.73, 5.89, 6.37, 6.62, 6.85, 7.03, 6.99, 6.75, 6.95, 6.64, 6.3, 6.4, 6.69, 6.52, 6.8, 7.01, 6.82, 6.6, 6.32, 6.4, 6.11, 5.82, 5.87, 5.89, 5.63, 5.65, 5.73, 5.72, 5.73, 5.58, 5.53, 5.41, 4.87, 4.58, 4.89, 4.69, 4.78, 4.99, 5.23, 5.18, 5.54, 5.9, 5.8, 5.94, 5.91, 6.1, 6.03, 6.26, 6.66, 6.52, 6.26, 6, 6.42, 6.1, 6.04, 5.83, 5.8, 5.74, 5.72, 5.23, 5.14, 5.1, 4.89, 5.13, 5.37, 5.26, 5.23, 4.97, 4.76, 4.55, 4.61, 5.07, 5, 4.9, 5.28, 5.21, 5.15, 4.9, 4.62, 4.24, 3.88, 3.91, 4.04, 4.03, 4.02, 3.9, 3.79, 3.94, 3.56, 3.32, 3.93, 4.44, 4.29, 4.27, 4.29, 4.26, 4.13, 4.06, 3.81, 4.32, 4.7), .Tsp = c(1994, 2004.33333333333, 12), class = "ts") usnetelec <- structure(c(296.1, 334.1, 375.3, 403.8, 447, 476.3, 550.3, 603.9, 634.6, 648.5, 713.4, 759.2, 797.1, 857.9, 920, 987.2, 1058.4, 1147.5, 1217.8, 1332.8, 1445.5, 1535.1, 1615.9, 1753, 1864.1, 1870.3, 1920.8, 2040.9, 2127.4, 2209.4, 2250.7, 2289.6, 2298, 2244.4, 2313.4, 2419.5, 2473, 2490.5, 2575.3, 2707.4, 2967.3, 3038, 3073.8, 3083.9, 3197.2, 3247.5, 3353.5, 3444.2, 3492.2, 3620.3, 3694.8, 3802.1, 3736.6, 3858.5, 3848), .Tsp = c(1949, 2003, 1), class = "ts") ukcars <- structure(c(330.371, 371.051, 270.67, 343.88, 358.491, 362.822, 261.281, 240.355, 325.382, 316.7, 171.153, 257.217, 298.127, 251.464, 181.555, 192.598, 245.652, 245.526, 225.261, 238.211, 257.385, 228.461, 175.371, 226.462, 266.15, 287.251, 225.883, 265.313, 272.759, 234.134, 196.462, 205.551, 291.283, 284.422, 221.571, 250.697, 253.757, 267.016, 220.388, 277.801, 283.233, 302.072, 259.72, 297.658, 306.129, 322.106, 256.723, 341.877, 356.004, 361.54, 270.433, 311.105, 326.688, 327.059, 274.257, 367.606, 346.163, 348.211, 250.008, 292.518, 343.318, 343.429, 275.386, 329.747, 364.521, 378.448, 300.798, 331.757, 362.536, 389.133, 323.322, 391.832, 421.646, 416.823, 311.713, 381.902, 422.982, 427.722, 376.85, 458.58, 436.225, 441.487, 369.566, 450.723, 462.442, 468.232, 403.636, 413.948, 460.496, 448.932, 407.787, 469.408, 494.311, 433.24, 335.106, 378.795, 387.1, 372.395, 335.79, 397.08, 449.755, 402.252, 391.847, 385.89, 424.325, 433.28, 391.213, 408.74, 445.458, 428.202, 379.048, 394.042, 432.796), .Tsp = c(1977, 2005, 4), class = "ts") visitors <- structure(c(75.7, 75.4, 83.1, 82.9, 77.3, 105.7, 121.9, 150, 98, 118, 129.5, 110.6, 91.7, 94.8, 109.5, 105.1, 95, 130.3, 156.7, 190.1, 139.7, 147.8, 145.2, 132.7, 120.7, 116.5, 142, 140.4, 128, 165.7, 183.1, 222.8, 161.3, 180.4, 185.2, 160.5, 157.1, 163.8, 203.3, 196.9, 179.6, 207.3, 208, 245.8, 168.9, 191.1, 180, 160.1, 136.6, 142.7, 175.4, 161.4, 149.9, 174.1, 192.7, 247.4, 176.2, 192.8, 189.1, 181.1, 149.9, 157.3, 185.3, 178.2, 162.7, 190.6, 198.6, 253.1, 177.4, 190.6, 189.2, 168, 161.4, 172.2, 208.3, 199.3, 197.4, 216, 223.9, 266.8, 196.1, 238.2, 217.8, 203.8, 175.2, 176.9, 219.3, 199.1, 190, 229.3, 255, 302.4, 242.8, 245.5, 257.9, 226.3, 213.4, 204.6, 244.6, 239.9, 224, 267.2, 285.9, 344, 250.5, 304.3, 307.4, 255.1, 214.9, 230.9, 282.5, 265.4, 254, 301.6, 311, 384, 303.8, 319.1, 313.5, 294.2, 244.8, 261.4, 329.7, 304.9, 268.6, 320.7, 342.9, 422.3, 317.2, 392.7, 365.6, 333.2, 261.5, 306.9, 358.2, 329.2, 309.2, 350.4, 375.6, 465.2, 342.9, 408, 390.9, 325.9, 289.1, 308.2, 397.4, 330.4, 330.9, 366.5, 379.5, 448.3, 346.2, 353.6, 338.6, 341.1, 283.4, 304.2, 372.3, 323.7, 323.9, 354.8, 367.9, 457.6, 351, 398.6, 389, 334.1, 298.1, 317.1, 388.5, 355.6, 353.1, 397, 416.7, 460.8, 360.8, 434.6, 411.9, 405.6, 319.3, 347.9, 429, 372.9, 403, 426.5, 459.9, 559.9, 416.6, 429.2, 428.7, 405.4, 330.2, 370, 446.9, 384.6, 366.3, 378.5, 376.2, 523.2, 379.3, 437.2, 446.5, 360.3, 329.9, 339.4, 418.2, 371.9, 358.6, 428.9, 437, 534, 396.6, 427.5, 392.5, 321.5, 260.9, 308.3, 415.5, 362.2, 385.6, 435.3, 473.3, 566.6, 420.2, 454.8, 432.3, 402.8, 341.3, 367.3, 472, 405.8, 395.6, 449.9, 479.9, 593.1, 462.4, 501.6, 504.7, 409.5), .Tsp = c(1985.33333333333, 2005.25, 12), class = "ts") ``` # Introduction Automatic forecasts of large numbers of univariate time series are often needed in business. It is common to have over one thousand product lines that need forecasting at least monthly. Even when a smaller number of forecasts are required, there may be nobody suitably trained in the use of time series models to produce them. In these circumstances, an automatic forecasting algorithm is an essential tool. Automatic forecasting algorithms must determine an appropriate time series model, estimate the parameters and compute the forecasts. They must be robust to unusual time series patterns, and applicable to large numbers of series without user intervention. The most popular automatic forecasting algorithms are based on either exponential smoothing or ARIMA models. In this article, we discuss the implementation of two automatic univariate forecasting methods in the \pkg{forecast} package for \proglang{R}. We also briefly describe some univariate forecasting methods that are part of the \pkg{forecast} package. The \pkg{forecast} package for the \proglang{R} system for statistical computing [@R] is available from the Comprehensive \proglang{R} Archive Network at \url{https://CRAN.R-project.org/package=forecast}. Version `r packageVersion('forecast')` of the package was used for this paper. The \pkg{forecast} package contains functions for univariate forecasting and a few examples of real time series data. For more extensive testing of forecasting methods, the \pkg{fma} package contains the 90 data sets from @MWH3, the \pkg{expsmooth} package contains 24 data sets from @expsmooth08, and the \pkg{Mcomp} package contains the 1001 time series from the M-competition [@Mcomp82] and the 3003 time series from the M3-competition [@M3comp00]. The \pkg{forecast} package implements automatic forecasting using exponential smoothing, ARIMA models, the Theta method [@AN00], cubic splines [@HKPB05], as well as other common forecasting methods. In this article, we primarily discuss the exponential smoothing approach (in Section \ref{sec:expsmooth}) and the ARIMA modelling approach (in Section \ref{sec:arima}) to automatic forecasting. In Section \ref{sec:package}, we describe the implementation of these methods in the \pkg{forecast} package, along with other features of the package. # Exponential smoothing {#sec:expsmooth} Although exponential smoothing methods have been around since the 1950s, a modelling framework incorporating procedures for model selection was not developed until relatively recently. @OKS97, @HKSG02 and @HKOS05 have shown that all exponential smoothing methods (including non-linear methods) are optimal forecasts from innovations state space models. Exponential smoothing methods were originally classified by Pegels' (1969)\nocite{Pegels69} taxonomy. This was later extended by @Gardner85, modified by @HKSG02, and extended again by @Taylor03a, giving a total of fifteen methods seen in the following table. \begin{table}[!hbt] \begin{center}\vspace{0.2cm} \begin{tabular}{|ll|ccc|} \hline & &\multicolumn{3}{c|}{Seasonal Component} \\ \multicolumn{2}{|c|}{Trend}& N & A & M\\ \multicolumn{2}{|c|}{Component} & (None) & (Additive) & (Multiplicative)\\ \cline{3-5} &&&\\[-0.3cm] N & (None) & N,N & N,A & N,M\\ &&&&\\[-0.3cm] A & (Additive) & A,N & A,A & A,M\\ &&&&\\[-0.3cm] A\damped & (Additive damped) & A\damped,N & A\damped,A & A\damped,M\\ &&&&\\[-0.3cm] M & (Multiplicative) & M,N & M,A & M,M\\ &&&&\\[-0.3cm] M\damped & (Multiplicative damped) & M\damped,N & M\damped,A & M\damped,M\\ \hline \end{tabular}\vspace{0.2cm} \end{center} \caption{The fifteen exponential smoothing methods.} \end{table} Some of these methods are better known under other names. For example, cell (N,N) describes the simple exponential smoothing (or SES) method, cell (A,N) describes Holt's linear method, and cell (A\damped,N) describes the damped trend method. The additive Holt-Winters' method is given by cell (A,A) and the multiplicative Holt-Winters' method is given by cell (A,M). The other cells correspond to less commonly used but analogous methods. ## Point forecasts for all methods We denote the observed time series by $y_1,y_2,\dots,y_n$. A forecast of $y_{t+h}$ based on all of the data up to time $t$ is denoted by $\hat{y}_{t+h|t}$. To illustrate the method, we give the point forecasts and updating equations for method (A,A), the Holt-Winters' additive method: \begin{subequations}\label{eq:AMmethod} \begin{align} \mbox{Level:}\quad &\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha)(\ell_{t-1} + b_{t-1})\hspace*{1cm} \label{eq:3-44a}\\ \mbox{Growth:}\quad &b_t = \beta^*(\ell_t - \ell_{t-1}) + (1-\beta^*)b_{t-1} \label{eq:3-45a}\\ \mbox{Seasonal:}\quad &s_t = \gamma(y_t - \ell_{t-1} -b_{t-1}) + (1-\gamma)s_{t-m}\label{eq:3-46a}\\ \mbox{Forecast:}\quad &\hat{y}_{t+h|t} = \ell_t + b_th +s_{t-m+h_m^+}. \label{eq:3-47a} \end{align} \end{subequations} where $m$ is the length of seasonality (e.g., the number of months or quarters in a year), $\ell_t$ represents the level of the series, $b_t$ denotes the growth, $s_t$ is the seasonal component, $\hat{y}_{t+h|t}$ is the forecast for $h$ periods ahead, and $h_m^+ = \big[(h-1) \mbox{ mod } m\big] + 1$. To use method \eqref{eq:AMmethod}, we need values for the initial states $\ell_0$, $b_0$ and $s_{1-m},\dots,s_0$, and for the smoothing parameters $\alpha$, $\beta^*$ and $\gamma$. All of these will be estimated from the observed data. Equation \eqref{eq:3-46a} is slightly different from the usual Holt-Winters equations such as those in @MWH3 or @BOK05. These authors replace \eqref{eq:3-46a} with $$ s_t = \gamma^*(y_t - \ell_{t}) + (1-\gamma^*)s_{t-m}. $$ If $\ell_t$ is substituted using \eqref{eq:3-44a}, we obtain $$s_t = \gamma^*(1-\alpha)(y_t - \ell_{t-1}-b_{t-1}) + \{1-\gamma^*(1-\alpha)\}s_{t-m}. $$ Thus, we obtain identical forecasts using this approach by replacing $\gamma$ in \eqref{eq:3-46a} with $\gamma^*(1-\alpha)$. The modification given in \eqref{eq:3-46a} was proposed by @OKS97 to make the state space formulation simpler. It is equivalent to Archibald's (1990)\nocite{Archibald90} variation of the Holt-Winters' method. \begin{sidewaystable} \begin{small} \begin{center} \begin{tabular}{|c|lll|} \hline & \multicolumn{3}{c|}{Seasonal} \\ {Trend} & \multicolumn{1}{c}{N} & \multicolumn{1}{c}{A} & \multicolumn{1}{c|}{M}\\ \cline{2-4} & $\ell_t = \alpha y_t + (1-\alpha) \ell_{t-1}$ & $\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha) \ell_{t-1}$ & $\ell_t = \alpha (y_t / s_{t-m}) + (1-\alpha) \ell_{t-1}$\\ {N} & & $s_t = \gamma (y_t - \ell_{t-1}) + (1-\gamma) s_{t-m}$ & $s_t = \gamma (y_t / \ell_{t-1}) + (1-\gamma) s_{t-m}$ \\ & $\hat{y}_{t+h|t} = \ell_t$ & $\hat{y}_{t+h|t} = \ell_t + s_{t-m+h_m^+}$ & $\hat{y}_{t+h|t}= \ell_ts_{t-m+h_m^+}$ \\ \hline & $\ell_t = \alpha y_t + (1-\alpha) (\ell_{t-1}+b_{t-1})$ & $\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha) (\ell_{t-1}+b_{t-1})$ & $\ell_t = \alpha (y_t / s_{t-m}) + (1-\alpha) (\ell_{t-1}+b_{t-1})$\\ {A} & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) b_{t-1}$ & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) b_{t-1}$ & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) b_{t-1}$\\ & & $s_t = \gamma (y_t - \ell_{t-1}-b_{t-1}) + (1-\gamma) s_{t-m}$ & $s_t = \gamma (y_t / (\ell_{t-1}-b_{t-1})) + (1-\gamma) s_{t-m}$\\ & $\hat{y}_{t+h|t} = \ell_t+hb_t$ & $\hat{y}_{t+h|t} = \ell_t +hb_t +s_{t-m+h_m^+}$ & $\hat{y}_{t+h|t}= (\ell_t+hb_t)s_{t-m+h_m^+}$ \\ \hline & $\ell_t = \alpha y_t + (1-\alpha) (\ell_{t-1}+\phi b_{t-1})$ & $\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha) (\ell_{t-1}+\phi b_{t-1})$ & $\ell_t = \alpha (y_t / s_{t-m}) + (1-\alpha) (\ell_{t-1}+\phi b_{t-1})$\\ {A\damped } & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) \phi b_{t-1}$ & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) \phi b_{t-1}$ & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) \phi b_{t-1}$\\ & & $s_t = \gamma (y_t - \ell_{t-1}-\phi b_{t-1}) + (1-\gamma) s_{t-m}$ & $s_t = \gamma (y_t / (\ell_{t-1}-\phi b_{t-1})) + (1-\gamma) s_{t-m}$\\ & $\hat{y}_{t+h|t} = \ell_t+\dampfactor b_t$ & $\hat{y}_{t+h|t} = \ell_t+\dampfactor b_t+s_{t-m+h_m^+}$ & $\hat{y}_{t+h|t}= (\ell_t+\dampfactor b_t)s_{t-m+h_m^+}$ \\ \hline & $\ell_t = \alpha y_t + (1-\alpha) \ell_{t-1}b_{t-1}$ & $\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha) \ell_{t-1}b_{t-1}$ & $\ell_t = \alpha (y_t / s_{t-m}) + (1-\alpha) \ell_{t-1}b_{t-1}$\\ {M} & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b_{t-1}$ & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b_{t-1}$ & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b_{t-1}$\\ & & $s_t = \gamma (y_t - \ell_{t-1}b_{t-1}) + (1-\gamma) s_{t-m}$ & $s_t = \gamma (y_t / (\ell_{t-1}b_{t-1})) + (1-\gamma) s_{t-m}$\\ & $\hat{y}_{t+h|t} = \ell_tb_t^h$ & $\hat{y}_{t+h|t} = \ell_tb_t^h + s_{t-m+h_m^+}$ & $\hat{y}_{t+h|t}= \ell_tb_t^hs_{t-m+h_m^+}$ \\ \hline & $\ell_t = \alpha y_t + (1-\alpha) \ell_{t-1}b^\phi_{t-1}$ & $\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha)\ell_{t-1}b^\phi_{t-1}$ & $\ell_t = \alpha (y_t / s_{t-m}) + (1-\alpha)\ell_{t-1}b^\phi_{t-1}$\\ {M\damped } & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b^\phi_{t-1}$ & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b^\phi_{t-1}$ & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b^\phi_{t-1}$\\ & & $s_t = \gamma (y_t - \ell_{t-1}b^\phi_{t-1}) + (1-\gamma) s_{t-m}$ & $s_t = \gamma (y_t / (\ell_{t-1}b^\phi_{t-1})) + (1-\gamma) s_{t-m}$\\ & $\hat{y}_{t+h|t} = \ell_tb_t^{\phi_h}$ & $\hat{y}_{t+h|t} = \ell_tb_t^{\phi_h} + s_{t-m+h_m^+}$ & $\hat{y}_{t+h|t}= \ell_tb_t^{\phi_h}s_{t-m+h_m^+}$ \\ \hline \end{tabular} \end{center} \end{small} \caption{Formulae for recursive calculations and point forecasts. In each case, $\ell_t$ denotes the series level at time $t$, $b_t$ denotes the slope at time $t$, $s_t$ denotes the seasonal component of the series at time $t$, and $m$ denotes the number of seasons in a year; $\alpha$, $\beta^*$, $\gamma$ and $\phi$ are constants, $\phi_h = \phi+\phi^2+\dots+\phi^{h}$ and $h_m^+ = \big[(h-1) \mbox{ mod } m\big] + 1$.}\label{table:pegels} \end{sidewaystable} Table \ref{table:pegels} gives recursive formulae for computing point forecasts $h$ periods ahead for all of the exponential smoothing methods. Some interesting special cases can be obtained by setting the smoothing parameters to extreme values. For example, if $\alpha=0$, the level is constant over time; if $\beta^*=0$, the slope is constant over time; and if $\gamma=0$, the seasonal pattern is constant over time. At the other extreme, naïve forecasts (i.e., $\hat{y}_{t+h|t}=y_t$ for all $h$) are obtained using the (N,N) method with $\alpha=1$. Finally, the additive and multiplicative trend methods are special cases of their damped counterparts obtained by letting $\phi=1$. ## Innovations state space models {#sec:statespace} For each exponential smoothing method in Table \ref{table:pegels}, @expsmooth08 describe two possible innovations state space models, one corresponding to a model with additive errors and the other to a model with multiplicative errors. If the same parameter values are used, these two models give equivalent point forecasts, although different prediction intervals. Thus there are 30 potential models described in this classification. Historically, the nature of the error component has often been ignored, because the distinction between additive and multiplicative errors makes no difference to point forecasts. We are careful to distinguish exponential smoothing \emph{methods} from the underlying state space \emph{models}. An exponential smoothing method is an algorithm for producing point forecasts only. The underlying stochastic state space model gives the same point forecasts, but also provides a framework for computing prediction intervals and other properties. To distinguish the models with additive and multiplicative errors, we add an extra letter to the front of the method notation. The triplet (E,T,S) refers to the three components: error, trend and seasonality. So the model ETS(A,A,N) has additive errors, additive trend and no seasonality---in other words, this is Holt's linear method with additive errors. Similarly, ETS(M,M\damped,M) refers to a model with multiplicative errors, a damped multiplicative trend and multiplicative seasonality. The notation ETS($\cdot$,$\cdot$,$\cdot$) helps in remembering the order in which the components are specified. Once a model is specified, we can study the probability distribution of future values of the series and find, for example, the conditional mean of a future observation given knowledge of the past. We denote this as $\mu_{t+h|t} = \E(y_{t+h} \mid \bm{x}_t)$, where $\bm{x}_t$ contains the unobserved components such as $\ell_t$, $b_t$ and $s_t$. For $h=1$ we use $\mu_t\equiv\mu_{t+1|t}$ as a shorthand notation. For many models, these conditional means will be identical to the point forecasts given in Table \ref{table:pegels}, so that $\mu_{t+h|t}=\hat{y}_{t+h|t}$. However, for other models (those with multiplicative trend or multiplicative seasonality), the conditional mean and the point forecast will differ slightly for $h\ge 2$. We illustrate these ideas using the damped trend method of @GM85. \subsubsection{Additive error model: ETS(A,A$_d$,N)} Let $\mu_t = \hat{y}_t = \ell_{t-1}+b_{t-1}$ denote the one-step forecast of $y_{t}$ assuming that we know the values of all parameters. Also, let $\varepsilon_t = y_t - \mu_t$ denote the one-step forecast error at time $t$. From the equations in Table \ref{table:pegels}, we find that \begin{align} \label{ss1} y_t &= \ell_{t-1} + \phi b_{t-1} + \varepsilon_t\\ \ell_t &= \ell_{t-1} + \phi b_{t-1} + \alpha \varepsilon_t \label{ss2}\\ b_t &= \phi b_{t-1} + \beta^*(\ell_t - \ell_{t-1}- \phi b_{t-1}) = \phi b_{t-1} + \alpha\beta^*\varepsilon_t. \label{ss3} \end{align} We simplify the last expression by setting $\beta=\alpha\beta^*$. The three equations above constitute a state space model underlying the damped Holt's method. Note that it is an \emph{innovations} state space model [@AM79;@Aoki87] because the same error term appears in each equation. We an write it in standard state space notation by defining the state vector as $\bm{x}_t = (\ell_t,b_t)'$ and expressing \eqref{ss1}--\eqref{ss3} as \begin{subequations} \begin{align} y_t &= \left[ 1 \phi \right] \bm{x}_{t-1} + \varepsilon_t\label{obseq}\\ \bm{x}_t &= \left[\begin{array}{ll} 1 & \phi\\ 0 & \phi \end{array}\right]\bm{x}_{t-1} + \left[\begin{array}{l} \alpha\\ \beta \end{array}\right]\varepsilon_t.\label{stateeq} \end{align} \end{subequations} The model is fully specified once we state the distribution of the error term $\varepsilon_t$. Usually we assume that these are independent and identically distributed, following a normal distribution with mean 0 and variance $\sigma^2$, which we write as $\varepsilon_t \sim\mbox{NID}(0, \sigma^2)$. \subsubsection{Multiplicative error model: ETS(M,A$_d$,N)} A model with multiplicative error can be derived similarly, by first setting $\varepsilon_t = (y_t-\mu_t)/\mu_t$, so that $\varepsilon_t$ is the relative error. Then, following a similar approach to that for additive errors, we find \begin{align*} y_t &= (\ell_{t-1} + \phi b_{t-1})(1 + \varepsilon_t)\\ \ell_t &= (\ell_{t-1} + \phi b_{t-1})(1 + \alpha \varepsilon_t)\\ b_t &= \phi b_{t-1} + \beta(\ell_{t-1}+\phi b_{t-1})\varepsilon_t, \end{align*} or \begin{align*} y_t &= \left[ 1 \phi \right] \bm{x}_{t-1}(1 + \varepsilon_t)\\ \bm{x}_t &= \left[\begin{array}{ll} 1 & \phi \\ 0 & \phi \end{array}\right]\bm{x}_{t-1} + \left[ 1 \phi \right] \bm{x}_{t-1} \left[\begin{array}{l} \alpha\\ \beta \end{array}\right]\varepsilon_t. \end{align*} Again we assume that $\varepsilon_t \sim \mbox{NID}(0,\sigma^2)$. Of course, this is a nonlinear state space model, which is usually considered difficult to handle in estimating and forecasting. However, that is one of the many advantages of the innovations form of state space models --- we can still compute forecasts, the likelihood and prediction intervals for this nonlinear model with no more effort than is required for the additive error model. ## State space models for all exponential smoothing methods {#sec:ssmodels} There are similar state space models for all 30 exponential smoothing variations. The general model involves a state vector $\bm{x}_t = (\ell_t, b_t$, $s_t, s_{t-1}, \dots, s_{t-m+1})'$ and state space equations of the form \begin{subequations}\label{eq:ss} \begin{align} y_t &= w(\bm{x}_{t-1}) + r(\bm{x}_{t-1})\varepsilon_t \label{eq:ss1}\\ \bm{x}_t &= f(\bm{x}_{t-1}) + g(\bm{x}_{t-1})\varepsilon_t \label{eq:ss2} \end{align} \end{subequations} where $\{\varepsilon_t\}$ is a Gaussian white noise process with mean zero and variance $\sigma^2$, and $\mu_t = w(\bm{x}_{t-1})$. The model with additive errors has $r(\bm{x}_{t-1})=1$, so that $y_t = \mu_{t} + \varepsilon_t$. The model with multiplicative errors has $r(\bm{x}_{t-1})=\mu_t$, so that $y_t = \mu_{t}(1 + \varepsilon_t)$. Thus, $\varepsilon_t = (y_t - \mu_t)/\mu_t$ is the relative error for the multiplicative model. The models are not unique. Clearly, any value of $r(\bm{x}_{t-1})$ will lead to identical point forecasts for $y_t$. All of the methods in Table \ref{table:pegels} can be written in the form \eqref{eq:ss1} and \eqref{eq:ss2}. The specific form for each model is given in @expsmooth08. Some of the combinations of trend, seasonality and error can occasionally lead to numerical difficulties; specifically, any model equation that requires division by a state component could involve division by zero. This is a problem for models with additive errors and either multiplicative trend or multiplicative seasonality, as well as for the model with multiplicative errors, multiplicative trend and additive seasonality. These models should therefore be used with caution. The multiplicative error models are useful when the data are strictly positive, but are not numerically stable when the data contain zeros or negative values. So when the time series is not strictly positive, only the six fully additive models may be applied. The point forecasts given in Table \ref{table:pegels} are easily obtained from these models by iterating equations \eqref{eq:ss1} and \eqref{eq:ss2} for $t=n+1, n+2,\dots,n+h$, setting $\varepsilon_{n+j}=0$ for $j=1,\dots,h$. In most cases (notable exceptions being models with multiplicative seasonality or multiplicative trend for $h\ge2$), the point forecasts can be shown to be equal to $\mu_{t+h|t} = \E(y_{t+h} \mid \bm{x}_t)$, the conditional expectation of the corresponding state space model. The models also provide a means of obtaining prediction intervals. In the case of the linear models, where the forecast distributions are normal, we can derive the conditional variance $v_{t+h|t} = \var(y_{t+h} \mid \bm{x}_t)$ and obtain prediction intervals accordingly. This approach also works for many of the nonlinear models. Detailed derivations of the results for many models are given in @HKOS05. A more direct approach that works for all of the models is to simply simulate many future sample paths conditional on the last estimate of the state vector, $\bm{x}_t$. Then prediction intervals can be obtained from the percentiles of the simulated sample paths. Point forecasts can also be obtained in this way by taking the average of the simulated values at each future time period. An advantage of this approach is that we generate an estimate of the complete predictive distribution, which is especially useful in applications such as inventory planning, where expected costs depend on the whole distribution. ## Estimation {#sec:estimation} In order to use these models for forecasting, we need to know the values of $\bm{x}_0$ and the parameters $\alpha$, $\beta$, $\gamma$ and $\phi$. It is easy to compute the likelihood of the innovations state space model \eqref{eq:ss}, and so obtain maximum likelihood estimates. @OKS97 show that \begin{equation}\label{likelihood} L^*(\bm\theta,\bm{x}_0) = n\log\Big(\sum_{t=1}^n \varepsilon^2_t\Big) + 2\sum_{t=1}^n \log|r(\bm{x}_{t-1})| \end{equation} is equal to twice the negative logarithm of the likelihood function (with constant terms eliminated), conditional on the parameters $\bm\theta = (\alpha,\beta,\gamma,\phi)'$ and the initial states $\bm{x}_0 = (\ell_0,b_0,s_0,s_{-1},\dots,s_{-m+1})'$, where $n$ is the number of observations. This is easily computed by simply using the recursive equations in Table \ref{table:pegels}. Unlike state space models with multiple sources of error, we do not need to use the Kalman filter to compute the likelihood. The parameters $\bm\theta$ and the initial states $\bm{x}_0$ can be estimated by minimizing $L^*$. Most implementations of exponential smoothing use an ad hoc heuristic scheme to estimate $\bm{x}_0$. However, with modern computers, there is no reason why we cannot estimate $\bm{x}_0$ along with $\bm\theta$, and the resulting forecasts are often substantially better when we do. We constrain the initial states $\bm{x}_0$ so that the seasonal indices add to zero for additive seasonality, and add to $m$ for multiplicative seasonality. There have been several suggestions for restricting the parameter space for $\alpha$, $\beta$ and $\gamma$. The traditional approach is to ensure that the various equations can be interpreted as weighted averages, thus requiring $\alpha$, $\beta^*=\beta/\alpha$, $\gamma^*=\gamma/(1-\alpha)$ and $\phi$ to all lie within $(0,1)$. This suggests $$0<\alpha<1,\qquad 0<\beta<\alpha,\qquad 0<\gamma < 1-\alpha,\qquad\mbox{and}\qquad 0<\phi<1. $$ However, @HAA08 show that these restrictions are usually stricter than necessary (although in a few cases they are not restrictive enough). ## Model selection Forecast accuracy measures such as mean squared error (MSE) can be used for selecting a model for a given set of data, provided the errors are computed from data in a hold-out set and not from the same data as were used for model estimation. However, there are often too few out-of-sample errors to draw reliable conclusions. Consequently, a penalized method based on the in-sample fit is usually better. One such approach uses a penalized likelihood such as Akaike's Information Criterion: $$\mbox{AIC} = L^*(\hat{\bm\theta},\hat{\bm{x}}_0) + 2q, $$ where $q$ is the number of parameters in $\bm\theta$ plus the number of free states in $\bm{x}_0$, and $\hat{\bm\theta}$ and $\hat{\bm{x}}_0$ denote the estimates of $\bm\theta$ and $\bm{x}_0$. We select the model that minimizes the AIC amongst all of the models that are appropriate for the data. The AIC also provides a method for selecting between the additive and multiplicative error models. The point forecasts from the two models are identical so that standard forecast accuracy measures such as the MSE or mean absolute percentage error (MAPE) are unable to select between the error types. The AIC is able to select between the error types because it is based on likelihood rather than one-step forecasts. Obviously, other model selection criteria (such as the BIC) could also be used in a similar manner. ## Automatic forecasting {#sec:algorithm} We combine the preceding ideas to obtain a robust and widely applicable automatic forecasting algorithm. The steps involved are summarized below. \begin{compactenum} \item For each series, apply all models that are appropriate, optimizing the parameters (both smoothing parameters and the initial state variable) of the model in each case. \item Select the best of the models according to the AIC. \item Produce point forecasts using the best model (with optimized parameters) for as many steps ahead as required. \item Obtain prediction intervals for the best model either using the analytical results of Hyndman, Koehler, et al. (2005), or by simulating future sample paths for $\{y_{n+1},\dots,y_{n+h}\}$ and finding the $\alpha/2$ and $1-\alpha/2$ percentiles of the simulated data at each forecasting horizon. If simulation is used, the sample paths may be generated using the normal distribution for errors (parametric bootstrap) or using the resampled errors (ordinary bootstrap). \end{compactenum} @HKSG02 applied this automatic forecasting strategy to the M-competition data [@Mcomp82] and the IJF-M3 competition data [@M3comp00] using a restricted set of exponential smoothing models, and demonstrated that the methodology is particularly good at short term forecasts (up to about 6 periods ahead), and especially for seasonal short-term series (beating all other methods in the competitions for these series). # ARIMA models {#sec:arima} A common obstacle for many people in using Autoregressive Integrated Moving Average (ARIMA) models for forecasting is that the order selection process is usually considered subjective and difficult to apply. But it does not have to be. There have been several attempts to automate ARIMA modelling in the last 25 years. @HR82 proposed a method to identify the order of an ARMA model for a stationary series. In their method the innovations can be obtained by fitting a long autoregressive model to the data, and then the likelihood of potential models is computed via a series of standard regressions. They established the asymptotic properties of the procedure under very general conditions. @Gomez98 extended the Hannan-Rissanen identification method to include multiplicative seasonal ARIMA model identification. @TRAMOSEATS98 implemented this automatic identification procedure in the software \pkg{TRAMO} and \pkg{SEATS}. For a given series, the algorithm attempts to find the model with the minimum BIC. @Liu89 proposed a method for identification of seasonal ARIMA models using a filtering method and certain heuristic rules; this algorithm is used in the \pkg{SCA-Expert} software. Another approach is described by @MP00a whose algorithm for univariate ARIMA models also allows intervention analysis. It is implemented in the software package ``Time Series Expert'' (\pkg{TSE-AX}). Other algorithms are in use in commercial software, although they are not documented in the public domain literature. In particular, \pkg{Forecast Pro} [@ForecastPro00] is well-known for its excellent automatic ARIMA algorithm which was used in the M3-forecasting competition [@M3comp00]. Another proprietary algorithm is implemented in \pkg{Autobox} [@Reilly00]. @OL96 provide an early review of some of the commercial software that implement automatic ARIMA forecasting. ## Choosing the model order using unit root tests and the AIC A non-seasonal ARIMA($p,d,q$) process is given by $$ \phi(B)(1-B^d)y_{t} = c + \theta(B)\varepsilon_t $$ where $\{\varepsilon_t\}$ is a white noise process with mean zero and variance $\sigma^2$, $B$ is the backshift operator, and $\phi(z)$ and $\theta(z)$ are polynomials of order $p$ and $q$ respectively. To ensure causality and invertibility, it is assumed that $\phi(z)$ and $\theta(z)$ have no roots for $|z|<1$ [@BDbook91]. If $c\ne0$, there is an implied polynomial of order $d$ in the forecast function. The seasonal ARIMA$(p,d,q)(P,D,Q)_m$ process is given by $$ \Phi(B^m)\phi(B)(1-B^{m})^D(1-B)^dy_{t} = c + \Theta(B^m)\theta(B)\varepsilon_t $$ where $\Phi(z)$ and $\Theta(z)$ are polynomials of orders $P$ and $Q$ respectively, each containing no roots inside the unit circle. If $c\ne0$, there is an implied polynomial of order $d+D$ in the forecast function. The main task in automatic ARIMA forecasting is selecting an appropriate model order, that is the values $p$, $q$, $P$, $Q$, $D$, $d$. If $d$ and $D$ are known, we can select the orders $p$, $q$, $P$ and $Q$ via an information criterion such as the AIC: $$\mbox{AIC} = -2\log(L) + 2(p+q+P+Q+k)$$ where $k=1$ if $c\ne0$ and 0 otherwise, and $L$ is the maximized likelihood of the model fitted to the \emph{differenced} data $(1-B^m)^D(1-B)^dy_t$. The likelihood of the full model for $y_t$ is not actually defined and so the value of the AIC for different levels of differencing are not comparable. One solution to this difficulty is the ``diffuse prior'' approach which is outlined in @DKbook01 and implemented in the \code{arima()} function [@Ripley:2002] in \R. In this approach, the initial values of the time series (before the observed values) are assumed to have mean zero and a large variance. However, choosing $d$ and $D$ by minimizing the AIC using this approach tends to lead to over-differencing. For forecasting purposes, we believe it is better to make as few differences as possible because over-differencing harms forecasts [@SY94] and widens prediction intervals. [Although, see @Hendry97 for a contrary view.] Consequently, we need some other approach to choose $d$ and $D$. We prefer unit-root tests. However, most unit-root tests are based on a null hypothesis that a unit root exists which biases results towards more differences rather than fewer differences. For example, variations on the Dickey-Fuller test [@DF81] all assume there is a unit root at lag 1, and the HEGY test of @HEGY90 is based on a null hypothesis that there is a seasonal unit root. Instead, we prefer unit-root tests based on a null hypothesis of no unit-root. For non-seasonal data, we consider ARIMA($p,d,q$) models where $d$ is selected based on successive KPSS unit-root tests [@KPSS92]. That is, we test the data for a unit root; if the test result is significant, we test the differenced data for a unit root; and so on. We stop this procedure when we obtain our first insignificant result. For seasonal data, we consider ARIMA$(p,d,q)(P,D,Q)_m$ models where $m$ is the seasonal frequency and $D=0$ or $D=1$ depending on an extended Canova-Hansen test [@CH95]. Canova and Hansen only provide critical values for $21$. Let $C_m$ be the critical value for seasonal period $m$. We plotted $C_m$ against $m$ for values of $m$ up to 365 and noted that they fit the line $C_m = 0.269 m^{0.928}$ almost exactly. So for $m>12$, we use this simple expression to obtain the critical value. We note in passing that the null hypothesis for the Canova-Hansen test is not an ARIMA model as it includes seasonal dummy terms. It is a test for whether the seasonal pattern changes sufficiently over time to warrant a seasonal unit root, or whether a stable seasonal pattern modelled using fixed dummy variables is more appropriate. Nevertheless, we have found that the test is still useful for choosing $D$ in a strictly ARIMA framework (i.e., without seasonal dummy variables). If a stable seasonal pattern is selected (i.e., the null hypothesis is not rejected), the seasonality is effectively handled by stationary seasonal AR and MA terms. After $D$ is selected, we choose $d$ by applying successive KPSS unit-root tests to the seasonally differenced data (if $D=1$) or the original data (if $D=0$). Once $d$ (and possibly $D$) are selected, we proceed to select the values of $p$, $q$, $P$ and $Q$ by minimizing the AIC. We allow $c\ne0$ for models where $d+D < 2$. ## A step-wise procedure for traversing the model space Suppose we have seasonal data and we consider ARIMA$(p,d,q)(P,D,Q)_m$ models where $p$ and $q$ can take values from 0 to 3, and $P$ and $Q$ can take values from 0 to 1. When $c=0$ there is a total of 288 possible models, and when $c\ne 0$ there is a total of 192 possible models, giving 480 models altogether. If the values of $p$, $d$, $q$, $P$, $D$ and $Q$ are allowed to range more widely, the number of possible models increases rapidly. Consequently, it is often not feasible to simply fit every potential model and choose the one with the lowest AIC. Instead, we need a way of traversing the space of models efficiently in order to arrive at the model with the lowest AIC value. We propose a step-wise algorithm as follows. \begin{description} \item[Step 1:] We try four possible models to start with. \begin{itemize} \item ARIMA($2,d,2$) if $m=1$ and ARIMA($2,d,2)(1,D,1)$ if $m>1$. \item ARIMA($0,d,0$) if $m=1$ and ARIMA($0,d,0)(0,D,0)$ if $m>1$. \item ARIMA($1,d,0$) if $m=1$ and ARIMA($1,d,0)(1,D,0)$ if $m>1$. \item ARIMA($0,d,1$) if $m=1$ and ARIMA($0,d,1)(0,D,1)$ if $m>1$. \end{itemize} If $d+D \le 1$, these models are fitted with $c\ne0$. Otherwise, we set $c=0$. Of these four models, we select the one with the smallest AIC value. This is called the ``current'' model and is denoted by ARIMA($p,d,q$) if $m=1$ or ARIMA($p,d,q)(P,D,Q)_m$ if $m>1$. \item[Step 2:] We consider up to seventeen variations on the current model: \begin{itemize} \item where one of $p$, $q$, $P$ and $Q$ is allowed to vary by $\pm1$ from the current model; \item where $p$ and $q$ both vary by $\pm1$ from the current model; \item where $P$ and $Q$ both vary by $\pm1$ from the current model; \item where the constant $c$ is included if the current model has $c=0$ or excluded if the current model has $c\ne0$. \end{itemize} Whenever a model with lower AIC is found, it becomes the new ``current'' model and the procedure is repeated. This process finishes when we cannot find a model close to the current model with lower AIC. \end{description} There are several constraints on the fitted models to avoid problems with convergence or near unit-roots. The constraints are outlined below. \begin{compactitem}\itemsep=8pt \item The values of $p$ and $q$ are not allowed to exceed specified upper bounds (with default values of 5 in each case). \item The values of $P$ and $Q$ are not allowed to exceed specified upper bounds (with default values of 2 in each case). \item We reject any model which is ``close'' to non-invertible or non-causal. Specifically, we compute the roots of $\phi(B)\Phi(B)$ and $\theta(B)\Theta(B)$. If either have a root that is smaller than 1.001 in absolute value, the model is rejected. \item If there are any errors arising in the non-linear optimization routine used for estimation, the model is rejected. The rationale here is that any model that is difficult to fit is probably not a good model for the data. \end{compactitem} The algorithm is guaranteed to return a valid model because the model space is finite and at least one of the starting models will be accepted (the model with no AR or MA parameters). The selected model is used to produce forecasts. ## Comparisons with exponential smoothing There is a widespread myth that ARIMA models are more general than exponential smoothing. This is not true. The two classes of models overlap. The linear exponential smoothing models are all special cases of ARIMA models---the equivalences are discussed in @HAA08. However, the non-linear exponential smoothing models have no equivalent ARIMA counterpart. On the other hand, there are many ARIMA models which have no exponential smoothing counterpart. Thus, the two model classes overlap and are complimentary; each has its strengths and weaknesses. The exponential smoothing state space models are all non-stationary. Models with seasonality or non-damped trend (or both) have two unit roots; all other models---that is, non-seasonal models with either no trend or damped trend---have one unit root. It is possible to define a stationary model with similar characteristics to exponential smoothing, but this is not normally done. The philosophy of exponential smoothing is that the world is non-stationary. So if a stationary model is required, ARIMA models are better. One advantage of the exponential smoothing models is that they can be non-linear. So time series that exhibit non-linear characteristics including heteroscedasticity may be better modelled using exponential smoothing state space models. For seasonal data, there are many more ARIMA models than the 30 possible models in the exponential smoothing class of Section \ref{sec:expsmooth}. It may be thought that the larger model class is advantageous. However, the results in @HKSG02 show that the exponential smoothing models performed better than the ARIMA models for the seasonal M3 competition data. (For the annual M3 data, the ARIMA models performed better.) In a discussion of these results, @Hyndman01 speculates that the larger model space of ARIMA models actually harms forecasting performance because it introduces additional uncertainty. The smaller exponential smoothing class is sufficiently rich to capture the dynamics of almost all real business and economic time series. # The forecast package {#sec:package} The algorithms and modelling frameworks for automatic univariate time series forecasting are implemented in the \pkg{forecast} package in \R. We illustrate the methods using the following four real time series shown in Figure \ref{fig:etsexamples}. \begin{compactitem} \item Figure \ref{fig:etsexamples}(a) shows 125 monthly US government bond yields (percent per annum) from January 1994 to May 2004. \item Figure \ref{fig:etsexamples}(b) displays 55 observations of annual US net electricity generation (billion kwh) for 1949 through 2003. \item Figure \ref{fig:etsexamples}(c) presents 113 quarterly observations of passenger motor vehicle production in the U.K. (thousands of cars) for the first quarter of 1977 through the first quarter of 2005. \item Figure \ref{fig:etsexamples}(d) shows 240 monthly observations of the number of short term overseas visitors to Australia from May 1985 to April 2005. \end{compactitem} ```{r etsexamples, fig.height=7, fig.width=9, echo=FALSE, fig.cap="Four time series showing point forecasts and 80\\% \\& 95\\% prediction intervals obtained using exponential smoothing state space models."} par(mfrow = c(2,2)) mod1 <- ets(bonds) mod2 <- ets(usnetelec) mod3 <- ets(ukcars) mod4 <- ets(visitors) plot(forecast(mod1), main="(a) US 10-year bonds yield", xlab="Year", ylab="Percentage per annum") plot(forecast(mod2), main="(b) US net electricity generation", xlab="Year", ylab="Billion kwh") plot(forecast(mod3), main="(c) UK passenger motor vehicle production", xlab="Year", ylab="Thousands of cars") plot(forecast(mod4), main="(d) Overseas visitors to Australia", xlab="Year", ylab="Thousands of people") ``` ```{r etsnames, echo=FALSE} etsnames <- c(mod1$method, mod2$method, mod3$method, mod4$method) etsnames <- gsub("Ad","A\\\\damped",etsnames) ``` ## Implementation of the automatic exponential smoothing algorithm The innovations state space modelling framework described in Section \ref{sec:expsmooth} is implemented via the \code{ets()} function in the \pkg{forecast} package. (The default settings of \code{ets()} do not allow models with multiplicative trend, but they can be included using \code{allow.multiplicative.trend=TRUE}.) The models chosen via the algorithm for the four data sets were: \begin{compactitem} \item `r etsnames[1]` for monthly US 10-year bonds yield\\ ($\alpha=`r format(coef(mod1)['alpha'], digits=4, nsmall=4)`$, $\beta=`r format(coef(mod1)['beta'], digits=4, nsmall=4)`$, $\phi=`r format(coef(mod1)['phi'], digits=4, nsmall=4)`$, $\ell_0 = `r format(coef(mod1)['l'], digits=4, nsmall=4)`$, $b_0=`r format(coef(mod1)['b'], digits=4, nsmall=4)`$); \item `r etsnames[2]` for annual US net electricity generation\\ ($\alpha=`r format(coef(mod2)['alpha'], digits=4, nsmall=4)`$, $\beta=`r format(coef(mod2)['beta'], digits=4, nsmall=4)`$, $\ell_0 = `r format(coef(mod2)['l'], digits=4, nsmall=4)`$, $b_0=`r format(coef(mod2)['b'], digits=4, nsmall=4)`$); \item `r etsnames[3]` for quarterly UK motor vehicle production\\ ($\alpha=`r format(coef(mod3)['alpha'], digits=4, nsmall=4)`$, $\gamma=`r format(coef(mod3)['gamma'], digits=4, nsmall=4)`$, $\ell_0 = `r format(coef(mod3)['l'], digits=4, nsmall=4)`$, $s_{-3}=`r format(-sum(coef(mod3)[c('s0','s1','s2')]), digits=4, nsmall=4)`$, $s_{-2}=`r format(coef(mod3)['s2'], digits=4, nsmall=4)`$, $s_{-1}=`r format(coef(mod3)['s1'], digits=4, nsmall=4)`$, $s_0=`r format(coef(mod3)['s0'], digits=4, nsmall=4)`$); \item `r etsnames[4]` for monthly Australian overseas visitors\\ ($\alpha=`r format(coef(mod4)['alpha'], digits=4, nsmall=4)`$, $\beta=`r format(coef(mod4)['beta'], digits=2, nsmall=4)`$, $\gamma=`r format(coef(mod4)['gamma'], digits=4, nsmall=4)`$, $\ell_0 = `r format(coef(mod4)['l'], digits=4, nsmall=4)`$, $b_0 = `r format(coef(mod4)['b'], digits=4, nsmall=4)`$, $s_{-11}=`r format(12-sum(tail(coef(mod4),11)), digits=4, nsmall=4)`$, $s_{-10}=`r format(coef(mod4)['s10'], digits=4, nsmall=4)`$, $s_{-9}=`r format(coef(mod4)['s9'], digits=4, nsmall=4)`$, $s_{-8}=`r format(coef(mod4)['s8'], digits=4, nsmall=4)`$, $s_{-7}=`r format(coef(mod4)['s7'], digits=4, nsmall=4)`$, $s_{-6}=`r format(coef(mod4)['s6'], digits=4, nsmall=4)`$, $s_{-5}=`r format(coef(mod4)['s5'], digits=4, nsmall=4)`$, $s_{-4}=`r format(coef(mod4)['s4'], digits=4, nsmall=4)`$, $s_{-3}=`r format(coef(mod4)['s3'], digits=4, nsmall=4)`$, $s_{-2}=`r format(coef(mod4)['s2'], digits=4, nsmall=4)`$, $s_{-1}=`r format(coef(mod4)['s1'], digits=4, nsmall=4)`$, $s_0=`r format(coef(mod4)['s0'], digits=4, nsmall=4)`$). \end{compactitem} Although there is a lot of computation involved, it can be handled remarkably quickly on modern computers. Each of the forecasts shown in Figure \ref{fig:etsexamples} took no more than a few seconds on a standard PC. The US electricity generation series took the longest as there are no analytical prediction intervals available for the ETS(M,M\damped,N) model. Consequently, the prediction intervals for this series were computed using simulation of 5000 future sample paths. To apply the algorithm to the US net electricity generation time series \code{usnetelec}, we use the following command. ```{r ets-usnetelec, echo=TRUE} etsfit <- ets(usnetelec) ``` The object \code{etsfit} is of class ``\code{ets}'' and contains all of the necessary information about the fitted model including model parameters, the value of the state vector $\bm{x}_t$ for all $t$, residuals and so on. Printing the \code{etsfit} object shows the main items of interest. ```{r ets-usnetelec-print,echo=TRUE} etsfit ``` Some goodness-of-fit measures [defined in @HK06] are obtained using \code{accuracy()}. ```{r ets-usnetelec-accuracy,eval=TRUE,echo=TRUE} accuracy(etsfit) ``` There are also \code{coef()}, \code{plot()}, \code{summary()}, \code{residuals()}, \code{fitted()} and \code{simulate()} methods for objects of class ``\code{ets}''. The \code{plot()} function shows time plots of the original time series along with the extracted components (level, growth and seasonal). The \code{forecast()} function computes the required forecasts which are then plotted as in Figure \ref{fig:etsexamples}(b). ```{r ets-usnetelec-fcast, fig.height=5, fig.width=8, message=FALSE, warning=FALSE, include=FALSE, output=FALSE} fcast <- forecast(etsfit) plot(fcast) ``` Printing the \code{fcast} object gives a table showing the prediction intervals. ```{r ets-usnetelec-fcast-print,eval=TRUE,echo=TRUE} fcast ``` The \code{ets()} function also provides the useful feature of applying a fitted model to a new data set. For example, we could withhold 10 observations from the \code{usnetelec} data set when fitting, then compute the one-step forecast errors for the out-of-sample data. ```{r ets-usnetelec-newdata,eval=FALSE,echo=TRUE} fit <- ets(usnetelec[1:45]) test <- ets(usnetelec[46:55], model = fit) accuracy(test) ``` We can also look at the measures of forecast accuracy where the forecasts are based on only the fitting data. ```{r ets-usnetelec-fcast-accuracy,eval=FALSE,echo=TRUE} accuracy(forecast(fit,10), usnetelec[46:55]) ``` ## The HoltWinters() function There is another implementation of exponential smoothing in \R\ via the \code{HoltWinters()} function [@Meyer:2002] in the \pkg{stats} package. It implements only the (N,N), (A,N), (A,A) and (A,M) methods. The initial states $\bm{x}_0$ are fixed using a heuristic algorithm. Because of the way the initial states are estimated, a full three years of seasonal data are required to implement the seasonal forecasts using \code{HoltWinters()}. (See @shortseasonal for the minimal sample size required.) The smoothing parameters are optimized by minimizing the average squared prediction errors, which is equivalent to minimizing \eqref{likelihood} in the case of additive errors. There is a \code{predict()} method for the resulting object which can produce point forecasts and prediction intervals. Although it is nowhere documented, it appears that the prediction intervals produced by \code{predict()} for an object of class \code{HoltWinters} are based on an equivalent ARIMA model in the case of the (N,N), (A,N) and (A,A) methods, assuming additive errors. These prediction intervals are equivalent to the prediction intervals that arise from the (A,N,N), (A,A,N) and (A,A,A) state space models. For the (A,M) method, the prediction interval provided by \code{predict()} appears to be based on @CY91 which is an approximation to the true prediction interval arising from the (A,A,M) model. Prediction intervals with multiplicative errors are not possible using the \code{HoltWinters()} function. ## Implementation of the automatic ARIMA algorithm ```{r arimaexamples, fig.height=7, fig.width=9, echo=FALSE, fig.cap="Four time series showing point forecasts and 80\\% \\& 95\\% prediction intervals obtained using ARIMA models."} mod1 <- auto.arima(bonds, seasonal=FALSE, approximation=FALSE) mod2 <- auto.arima(usnetelec) mod3 <- auto.arima(ukcars) mod4 <- auto.arima(visitors) par(mfrow = c(2,2)) plot(forecast(mod1), main="(a) US 10-year bonds yield", xlab="Year", ylab="Percentage per annum") plot(forecast(mod2), main="(b) US net electricity generation", xlab="Year", ylab="Billion kwh") plot(forecast(mod3), main="(c) UK passenger motor vehicle production", xlab="Year", ylab="Thousands of cars") plot(forecast(mod4), main="(d) Overseas visitors to Australia", xlab="Year", ylab="Thousands of people") ``` The algorithm of Section \ref{sec:arima} is applied to the same four time series. Unlike the exponential smoothing algorithm, the ARIMA class of models assumes homoscedasticity, which is not always appropriate. Consequently, transformations are sometimes necessary. For these four time series, we model the raw data for series (a)--(c), but the logged data for series (d). The prediction intervals are back-transformed with the point forecasts to preserve the probability coverage. To apply this algorithm to the US net electricity generation time series \code{usnetelec}, we use the following commands. ```{r arima-auto-fcast,eval=TRUE,echo=TRUE,fig.show="hide"} arimafit <- auto.arima(usnetelec) fcast <- forecast(arimafit) plot(fcast) ``` ```{r arimanames, echo=FALSE} # Convert character strings to latex arimanames <- c(as.character(mod1), as.character(mod2), as.character(mod3), as.character(mod4)) arimanames <- gsub("\\[([0-9]*)\\]", "$_{\\1}$", arimanames) ``` The function \code{auto.arima()} implements the algorithm of Section \ref{sec:arima} and returns an object of class \code{Arima}. The resulting forecasts are shown in Figure \ref{fig:arimaexamples}. The fitted models are as follows: \begin{compactitem} \item `r arimanames[1]` for monthly US 10-year bonds yield\\ ($\theta_1= `r format(coef(mod1)['ma1'], digits=4, nsmall=4)`$); \item `r arimanames[2]` for annual US net electricity generation\\ ($\phi_1= `r format(coef(mod2)['ar1'], digits=4, nsmall=4)`$; $\phi_2= `r format(coef(mod2)['ar2'], digits=4, nsmall=4)`$; $\theta_1= `r format(coef(mod2)['ma1'], digits=4, nsmall=4)`$; $\theta_2= `r format(coef(mod2)['ma2'], digits=4, nsmall=4)`$; $c= `r format(coef(mod2)['drift'], digits=4, nsmall=4)`$); \item `r arimanames[3]` for quarterly UK motor vehicle production\\ ($\phi_1= `r format(coef(mod3)['ar1'], digits=4, nsmall=4)`$; $\phi_2= `r format(coef(mod3)['ar2'], digits=4, nsmall=4)`$; $\Phi_1= `r format(coef(mod3)['sar1'], digits=4, nsmall=4)`$; $\Phi_2= `r format(coef(mod3)['sar2'], digits=4, nsmall=4)`$); \item `r arimanames[4]` for monthly Australian overseas visitors\\ ($\phi_1= `r format(coef(mod4)['ar1'], digits=4, nsmall=4)`$; $\theta_1= `r format(coef(mod4)['ma1'], digits=4, nsmall=4)`$; $\Theta_1= `r format(coef(mod4)['sma1'], digits=4, nsmall=4)`$; $\Theta_2= `r format(coef(mod4)['sma2'], digits=4, nsmall=4)`$; $c= `r format(coef(mod4)['drift'], digits=4, nsmall=4)`$). \end{compactitem} Note that the \R\ parameterization has $\theta(B) = (1 + \theta_1B + \dots + \theta_qB)$ and $\phi(B) = (1 - \phi_1B + \dots - \phi_qB)$, and similarly for the seasonal terms. A summary of the forecasts is available, part of which is shown below. ``` Forecast method: ARIMA(2,1,2) with drift Series: usnetelec Coefficients: ar1 ar2 ma1 ma2 drift -1.3032 -0.4332 1.5284 0.8340 66.1585 s.e. 0.2122 0.2084 0.1417 0.1185 7.5595 sigma^2 estimated as 2262: log likelihood=-283.34 AIC=578.67 AICc=580.46 BIC=590.61 Error measures: ME RMSE MAE MPE MAPE MASE ACF1 Training set 0.046402 44.894 32.333 -0.61771 2.1012 0.45813 0.022492 Forecasts: Point Forecast Lo 80 Hi 80 Lo 95 Hi 95 2004 3968.957 3908.002 4029.912 3875.734 4062.180 2005 3970.350 3873.950 4066.751 3822.919 4117.782 2006 4097.171 3971.114 4223.228 3904.383 4289.959 2007 4112.332 3969.691 4254.973 3894.182 4330.482 2008 4218.671 4053.751 4383.591 3966.448 4470.894 2009 4254.559 4076.108 4433.010 3981.641 4527.476 2010 4342.760 4147.088 4538.431 4043.505 4642.014 2011 4393.306 4185.211 4601.401 4075.052 4711.560 2012 4470.261 4248.068 4692.455 4130.446 4810.077 2013 4529.113 4295.305 4762.920 4171.535 4886.690 ``` The training set error measures for the two models are very similar. Note that the information criteria are not comparable. The \pkg{forecast} package also contains the function \code{Arima()} which is largely a wrapper to the \code{arima()} function in the \pkg{stats} package. The \code{Arima()} function in the \pkg{forecast} package makes it easier to include a drift term when $d+D=1$. (Setting \code{include.mean=TRUE} in the \code{arima()} function from the \pkg{stats} package will only work when $d+D=0$.) It also provides the facility for fitting an existing ARIMA model to a new data set (as was demonstrated for the \code{ets()} function earlier). One-step forecasts for ARIMA models are now available via a \code{fitted()} function. We also provide a new function \code{arima.errors()} which returns the original time series after adjusting for regression variables. If there are no regression variables in the ARIMA model, then the errors will be identical to the original series. If there are regression variables in the ARIMA model, then the errors will be equal to the original series minus the effect of the regression variables, but leaving in the serial correlation that is modelled with the AR and MA terms. In contrast, \code{residuals()} provides true residuals, removing the AR and MA terms as well. The generic functions \code{summary()}, \code{print()}, \code{fitted()} and \code{forecast()} apply to models obtained from either the \code{Arima()} or \code{arima()} functions. ## The forecast() function The \code{forecast()} function is generic and has S3 methods for a wide range of time series models. It computes point forecasts and prediction intervals from the time series model. Methods exist for models fitted using \code{ets()}, \code{auto.arima()}, \code{Arima()}, \code{arima()}, \code{ar()}, \code{HoltWinters()} and \texttt{StructTS()}. There is also a method for a \code{ts} object. If a time series object is passed as the first argument to \code{forecast()}, the function will produce forecasts based on the exponential smoothing algorithm of Section \ref{sec:expsmooth}. In most cases, there is an existing \code{predict()} function which is intended to do much the same thing. Unfortunately, the resulting objects from the \code{predict()} function contain different information in each case and so it is not possible to build generic functions (such as \code{plot()} and \code{summary()}) for the results. So, instead, \code{forecast()} acts as a wrapper to \code{predict()}, and packages the information obtained in a common format (the \code{forecast} class). We also define a default \code{predict()} method which is used when no existing \code{predict()} function exists, and calls the relevant \code{forecast()} function. Thus, \code{predict()} methods parallel \code{forecast()} methods, but the latter provide consistent output that is more usable. \subsection[The forecast class]{The \code{forecast} class} The output from the \code{forecast()} function is an object of class ``\code{forecast}'' and includes at least the following information: \begin{compactitem} \item the original series; \item point forecasts; \item prediction intervals of specified coverage; \item the forecasting method used and information about the fitted model; \item residuals from the fitted model; \item one-step forecasts from the fitted model for the period of the observed data. \end{compactitem} There are \code{print()}, \code{plot()} and \code{summary()} methods for the ``\code{forecast}'' class. Figures \ref{fig:etsexamples} and \ref{fig:arimaexamples} were produced using the \code{plot()} method. The prediction intervals are, by default, computed for 80\% and 95\% coverage, although other values are possible if requested. Fan charts [@Wallis99] are possible using the combination \verb|plot(forecast(model.object, fan = TRUE))|. ## Other functions {#sec:other} We now briefly describe some of the other features of the \pkg{forecast} package. Each of the following functions produces an object of class ``\code{forecast}''. \code{croston()} : implements the method of @Croston72 for intermittent demand forecasting. In this method, the time series is decomposed into two separate sequences: the non-zero values and the time intervals between non-zero values. These are then independently forecast using simple exponential smoothing and the forecasts of the original series are obtained as ratios of the two sets of forecasts. No prediction intervals are provided because there is no underlying stochastic model [@SH05]. \code{theta()} : provides forecasts from the Theta method [@AN00]. @HB03 showed that these were equivalent to a special case of simple exponential smoothing with drift. \code{splinef()} : gives cubic-spline forecasts, based on fitting a cubic spline to the historical data and extrapolating it linearly. The details of this method, and the associated prediction intervals, are discussed in @HKPB05. \code{meanf()} : returns forecasts based on the historical mean. \code{rwf()} : gives ``naïve'' forecasts equal to the most recent observation assuming a random walk model. This function also allows forecasting using a random walk with drift. In addition, there are some new plotting functions for time series. \code{tsdisplay()} : provides a time plot along with an ACF and PACF. \code{seasonplot()} : produces a seasonal plot as described in @MWH3. \newpage # Bibliography forecast/R/0000755000176200001440000000000014456202551012265 5ustar liggesusersforecast/R/residuals.R0000644000176200001440000001377114150370574014416 0ustar liggesusers#' Residuals for various time series models #' #' Returns time series of residuals from a fitted model. #' #' Innovation residuals correspond to the white noise process that drives the #' evolution of the time series model. Response residuals are the difference #' between the observations and the fitted values (equivalent to \code{h}-step #' forecasts). For functions with no \code{h} argument, \code{h=1}. For #' homoscedastic models, the innovation residuals and the response residuals #' for \code{h=1} are identical. Regression residuals are available for #' regression models with ARIMA errors, and are equal to the original data #' minus the effect of the regression variables. If there are no regression #' variables, the errors will be identical to the original series (possibly #' adjusted to have zero mean). \code{arima.errors} is a deprecated function #' which is identical to \code{residuals.Arima(object, type="regression")}. #' For \code{nnetar} objects, when \code{type="innovations"} and \code{lambda} is used, a #' matrix of time-series consisting of the residuals from each of the fitted neural networks is returned. #' #' @param object An object containing a time series model of class \code{ar}, #' \code{Arima}, \code{bats}, \code{ets}, \code{arfima}, \code{nnetar} or #' \code{stlm}. #' If \code{object} is of class \code{forecast}, then the function will return #' \code{object$residuals} if it exists, otherwise it returns the differences between #' the observations and their fitted values. #' @param type Type of residual. #' @param h If \code{type='response'}, then the fitted values are computed for #' \code{h}-step forecasts. #' @param ... Other arguments not used. #' @return A \code{ts} object. #' @author Rob J Hyndman #' @seealso \code{\link{fitted.Arima}}, \code{\link{checkresiduals}}. #' @keywords ts #' #' @export residuals.forecast <- function(object, type=c("innovation", "response"), ...) { type <- match.arg(type) if (type == "innovation") { object$residuals } else { getResponse(object) - fitted(object) } } #' @rdname residuals.forecast #' @export residuals.ar <- function(object, type=c("innovation", "response"), ...) { type <- match.arg(type) # innovation and response residuals are the same for AR models object$resid } #' @rdname residuals.forecast #' #' @aliases residuals.forecast_ARIMA #' @examples #' fit <- Arima(lynx,order=c(4,0,0), lambda=0.5) #' #' plot(residuals(fit)) #' plot(residuals(fit, type='response')) #' @export residuals.Arima <- function(object, type=c("innovation", "response", "regression"), h=1, ...) { type <- match.arg(type) if (type == "innovation") { object$residuals } else if (type == "response") { getResponse(object) - fitted(object, h = h) } else { x <- getResponse(object) if (!is.null(object$lambda)) { x <- BoxCox(x, object$lambda) } xreg <- getxreg(object) # Remove intercept if (is.element("intercept", names(object$coef))) { xreg <- cbind(rep(1, length(x)), xreg) } # Return errors if (is.null(xreg)) { return(x) } else { norder <- sum(object$arma[1:4]) return(ts( c(x - xreg %*% as.matrix(object$coef[(norder + 1):length(object$coef)])), frequency = frequency(x), start = start(x) )) } } } #' @export residuals.forecast_ARIMA <- residuals.Arima #' @rdname residuals.forecast #' @export residuals.bats <- function(object, type=c("innovation", "response"), h=1, ...) { type <- match.arg(type) if (type == "innovation") { object$errors } else { getResponse(object) - fitted(object, h = h) } } #' @rdname residuals.forecast #' @export residuals.tbats <- function(object, type=c("innovation", "response"), h=1, ...) { type <- match.arg(type) if (type == "innovation") { object$errors } else { getResponse(object) - fitted(object, h = h) } } #' @rdname residuals.forecast #' @export residuals.ets <- function(object, type=c("innovation", "response"), h=1, ...) { type <- match.arg(type) if (type == "innovation") { object$residuals } else { getResponse(object) - fitted(object, h = h) } } #' @rdname residuals.forecast #' @export residuals.ARFIMA <- function(object, type=c("innovation", "response"), ...) { type <- match.arg(type) if (type == "innovation") { if (!is.null(object$residuals)) { # Object produced by arfima() return(object$residuals) } else # Object produced by fracdiff() { if (is.element("x", names(object))) { x <- object$x } else { x <- eval.parent(parse(text = as.character(object$call)[2])) } if (!is.null(object$lambda)) { x <- BoxCox(x, object$lambda) } y <- fracdiff::diffseries(x - mean(x), d = object$d) fit <- arima(y, order = c(length(object$ar), 0, length(object$ma)), include.mean = FALSE, fixed = c(object$ar, -object$ma)) return(residuals(fit, type = "innovation")) } } else { getResponse(object) - fitted(object) } } #' @rdname residuals.forecast #' @export residuals.nnetar <- function(object, type=c("innovation", "response"), h=1, ...) { type <- match.arg(type) if (type == "innovation" && !is.null(object$lambda)) { res <- matrix(unlist(lapply(object$model, residuals)), ncol = length(object$model)) if (!is.null(object$scalex$scale)) { res <- res * object$scalex$scale } } else { res <- getResponse(object) - fitted(object, h = h) } tspx <- tsp(getResponse(object)) res <- ts(res, frequency = tspx[3L], end = tspx[2L]) return(res) } #' @rdname residuals.forecast #' @export residuals.stlm <- function(object, type=c("innovation", "response"), ...) { type <- match.arg(type) if (type == "innovation") { object$residuals } else { getResponse(object) - fitted(object) } } #' @rdname residuals.forecast #' @export residuals.tslm <- function(object, type=c("innovation", "response", "deviance"), ...) { type <- match.arg(type) if (type == "innovation" || type == "deviance") { object$residuals } else { getResponse(object) - fitted(object) } } forecast/R/adjustSeasonalSeeds.R0000644000176200001440000001231414323125536016355 0ustar liggesusers############################################################################### # TBATS code cutWTBATS <- function(use.beta, w.tilda.transpose, seasonal.periods, p=0, q=0) { mask.vector <- numeric(length(seasonal.periods)) i <- length(seasonal.periods) while (i > 1) { for (j in 1:(i - 1)) { if ((seasonal.periods[i] %% seasonal.periods[j]) == 0) { mask.vector[j] <- 1 } } i <- i - 1 } w.pos.counter <- 1 w.pos <- 1 if (use.beta) { w.pos <- w.pos + 1 } for (s in seasonal.periods) { if (mask.vector[w.pos.counter] == 1) { w.tilda.transpose <- w.tilda.transpose[, -((w.pos + 1):(w.pos + s))] } else if (mask.vector[w.pos.counter] < 0) { # Cut more than one off w.pos <- w.pos + s w.tilda.transpose <- w.tilda.transpose[, -c((w.pos + mask.vector[w.pos.counter] + 1):w.pos)] w.pos <- w.pos + mask.vector[w.pos.counter] } else { w.pos <- w.pos + s w.tilda.transpose <- w.tilda.transpose[, -w.pos] w.pos <- w.pos - 1 } w.pos.counter <- w.pos.counter + 1 } if ((p != 0) | (q != 0)) { end.cut <- ncol(w.tilda.transpose) start.cut <- end.cut - (p + q) + 1 w.tilda.transpose <- w.tilda.transpose[, -c(start.cut:end.cut)] } return(list(matrix = w.tilda.transpose, mask.vector = mask.vector)) } # BATS code below ######### cutW <- function(use.beta, w.tilda.transpose, seasonal.periods, p=0, q=0) { mask.vector <- numeric(length(seasonal.periods)) i <- length(seasonal.periods) while (i > 1) { for (j in 1:(i - 1)) { if ((seasonal.periods[i] %% seasonal.periods[j]) == 0) { mask.vector[j] <- 1 } } i <- i - 1 } if (length(seasonal.periods) > 1) { for (s in length(seasonal.periods):2) { for (j in (s - 1):1) { hcf <- findGCD(seasonal.periods[s], seasonal.periods[j]) if (hcf != 1) { if ((mask.vector[s] != 1) && (mask.vector[j] != 1)) { mask.vector[s] <- hcf * -1 } } } } } w.pos.counter <- 1 w.pos <- 1 if (use.beta) { w.pos <- w.pos + 1 } for (s in seasonal.periods) { if (mask.vector[w.pos.counter] == 1) { w.tilda.transpose <- w.tilda.transpose[, -((w.pos + 1):(w.pos + s))] } else if (mask.vector[w.pos.counter] < 0) { # Cut more than one off w.pos <- w.pos + s w.tilda.transpose <- w.tilda.transpose[, -c((w.pos + mask.vector[w.pos.counter] + 1):w.pos)] w.pos <- w.pos + mask.vector[w.pos.counter] } else { w.pos <- w.pos + s w.tilda.transpose <- w.tilda.transpose[, -w.pos] w.pos <- w.pos - 1 } w.pos.counter <- w.pos.counter + 1 } if ((p != 0) | (q != 0)) { end.cut <- ncol(w.tilda.transpose) start.cut <- end.cut - (p + q) + 1 w.tilda.transpose <- w.tilda.transpose[, -c(start.cut:end.cut)] } return(list(matrix = w.tilda.transpose, mask.vector = mask.vector)) } calcSeasonalSeeds <- function(use.beta, coefs, seasonal.periods, mask.vector, p=0, q=0) { x.pos.counter <- 1 sum.k <- 0 if (use.beta) { x.pos <- 2 new.x.nought <- matrix(coefs[1:2], nrow = 2, ncol = 1) } else { x.pos <- 1 new.x.nought <- matrix(coefs[1], nrow = 1, ncol = 1) } x.pos.counter <- 1 for (s in seasonal.periods) { if (mask.vector[x.pos.counter] == 1) { # Make a vector of zeros season <- matrix(0, nrow = s, ncol = 1) new.x.nought <- rbind(new.x.nought, season) } else if (mask.vector[x.pos.counter] < 0) { extract <- coefs[(x.pos + 1):(x.pos + s + mask.vector[x.pos.counter])] # print("extract:") # print(extract) # Find k k <- sum(extract) # update sum.k sum.k <- sum.k + k / s # create the current.periodicity vector current.periodicity <- extract - k / s current.periodicity <- matrix(current.periodicity, nrow = length(current.periodicity), ncol = 1) additional <- matrix(-k / s, nrow = (-1 * mask.vector[x.pos.counter]), ncol = 1) current.periodicity <- rbind(current.periodicity, additional) new.x.nought <- rbind(new.x.nought, current.periodicity) x.pos <- x.pos + s + mask.vector[x.pos.counter] } else { # Find k k <- sum(coefs[(x.pos + 1):(x.pos + s - 1)]) # update sum.k sum.k <- sum.k + k / s # create the current.periodicity vector current.periodicity <- coefs[(x.pos + 1):(x.pos + s - 1)] - k / s current.periodicity <- c(current.periodicity, -k / s) current.periodicity <- matrix(current.periodicity, nrow = length(current.periodicity), ncol = 1) new.x.nought <- rbind(new.x.nought, current.periodicity) x.pos <- x.pos + s - 1 } # Adjust L(t) x.pos.counter <- x.pos.counter + 1 } # print(new.x.nought) # Lastly, get the arma error seed states, if they exist. if ((p != 0) | (q != 0)) { arma.seed.states <- numeric((p + q)) arma.seed.states <- matrix(arma.seed.states, nrow = length(arma.seed.states), ncol = 1) # Final value of x.nought x.nought <- rbind(new.x.nought, arma.seed.states) } else { x.nought <- new.x.nought } return(x.nought) } findGCD <- function(larger, smaller) { remainder <- larger %% smaller if (remainder != 0) { return(findGCD(smaller, remainder)) } else { return(smaller) } } forecast/R/findfrequency.R0000644000176200001440000000376614323125536015266 0ustar liggesusers## A function determining the appropriate period, if the data is of unknown period ## Written by Rob Hyndman #' Find dominant frequency of a time series #' #' \code{findfrequency} returns the period of the dominant frequency of a time #' series. For seasonal data, it will return the seasonal period. For cyclic #' data, it will return the average cycle length. #' #' The dominant frequency is determined from a spectral analysis of the time #' series. First, a linear trend is removed, then the spectral density function #' is estimated from the best fitting autoregressive model (based on the AIC). #' If there is a large (possibly local) maximum in the spectral density #' function at frequency \eqn{f}, then the function will return the period #' \eqn{1/f} (rounded to the nearest integer). If no such dominant frequency #' can be found, the function will return 1. #' #' @param x a numeric vector or time series of class \code{ts} #' @return an integer value #' @author Rob J Hyndman #' @keywords ts #' @examples #' #' findfrequency(USAccDeaths) # Monthly data #' findfrequency(taylor) # Half-hourly data #' findfrequency(lynx) # Annual data #' #' @export findfrequency <- function(x) { n <- length(x) x <- as.ts(x) # Remove trend from data x <- residuals(tslm(x ~ trend)) # Compute spectrum by fitting ar model to largest section of x n.freq <- 500 spec <- spec.ar(c(na.contiguous(x)), plot = FALSE, n.freq = n.freq) if (max(spec$spec) > 10) # Arbitrary threshold chosen by trial and error. { period <- floor(1 / spec$freq[which.max(spec$spec)] + 0.5) if (period == Inf) # Find next local maximum { j <- which(diff(spec$spec) > 0) if (length(j) > 0) { nextmax <- j[1] + which.max(spec$spec[(j[1] + 1):n.freq]) if (nextmax < length(spec$freq)) { period <- floor(1 / spec$freq[nextmax] + 0.5) } else { period <- 1L } } else { period <- 1L } } } else { period <- 1L } return(as.integer(period)) } forecast/R/forecastBATS.R0000644000176200001440000001707414323125536014701 0ustar liggesusers#' Forecasting using BATS and TBATS models #' #' Forecasts \code{h} steps ahead with a BATS model. Prediction intervals are #' also produced. #' #' @param object An object of class "\code{bats}". Usually the result of a call #' to \code{\link{bats}}. #' @param h Number of periods for forecasting. Default value is twice the #' largest seasonal period (for seasonal data) or ten (for non-seasonal data). #' @param level Confidence level for prediction intervals. #' @param fan If TRUE, level is set to \code{seq(51,99,by=3)}. This is suitable #' for fan plots. #' @param biasadj Use adjusted back-transformed mean for Box-Cox #' transformations. If TRUE, point forecasts and fitted values are mean #' forecast. Otherwise, these points can be considered the median of the #' forecast densities. #' @param ... Other arguments, currently ignored. #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{forecast.bats}. #' #' An object of class \code{"forecast"} is a list containing at least the #' following elements: \item{model}{A copy of the \code{bats} object} #' \item{method}{The name of the forecasting method as a character string} #' \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for #' prediction intervals} \item{upper}{Upper limits for prediction intervals} #' \item{level}{The confidence values associated with the prediction intervals} #' \item{x}{The original time series (either \code{object} itself or the time #' series used to create the model stored as \code{object}).} #' \item{residuals}{Residuals from the fitted model.} \item{fitted}{Fitted #' values (one-step forecasts)} #' @author Slava Razbash and Rob J Hyndman #' @seealso \code{\link{bats}}, \code{\link{tbats}},\code{\link{forecast.ets}}. #' @references De Livera, A.M., Hyndman, R.J., & Snyder, R. D. (2011), #' Forecasting time series with complex seasonal patterns using exponential #' smoothing, \emph{Journal of the American Statistical Association}, #' \bold{106}(496), 1513-1527. #' @keywords ts #' @examples #' #' \dontrun{ #' fit <- bats(USAccDeaths) #' plot(forecast(fit)) #' #' taylor.fit <- bats(taylor) #' plot(forecast(taylor.fit)) #' } #' #' @export forecast.bats <- function(object, h, level=c(80, 95), fan=FALSE, biasadj=NULL, ...) { # Set up the variables if (any(class(object$y) == "ts")) { ts.frequency <- frequency(object$y) } else { ts.frequency <- ifelse(!is.null(object$seasonal.periods), max(object$seasonal.periods), 1) } if (missing(h)) { if (is.null(object$seasonal.periods)) { h <- ifelse(ts.frequency == 1, 10, 2 * ts.frequency) } else { h <- 2 * max(object$seasonal.periods) } } else if (h <= 0) { stop("Forecast horizon out of bounds") } if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } # Set up the matrices x <- matrix(0, nrow = nrow(object$x), ncol = h) y.forecast <- numeric(h) # w <- makeWMatrix(small.phi=object$damping.parameter, seasonal.periods=object$seasonal.periods, ar.coefs=object$ar.coefficients, ma.coefs=object$ma.coefficients) w <- .Call("makeBATSWMatrix", smallPhi_s = object$damping.parameter, sPeriods_s = object$seasonal.periods, arCoefs_s = object$ar.coefficients, maCoefs_s = object$ma.coefficients, PACKAGE = "forecast") # g <- makeGMatrix(alpha=object$alpha, beta=object$beta, gamma.vector=object$gamma.values, seasonal.periods=object$seasonal.periods, p=length(object$ar.coefficients), q=length(object$ma.coefficients)) g <- .Call("makeBATSGMatrix", object$alpha, object$beta, object$gamma.values, object$seasonal.periods, length(object$ar.coefficients), length(object$ma.coefficients), PACKAGE = "forecast") F <- makeFMatrix(alpha = object$alpha, beta = object$beta, small.phi = object$damping.parameter, seasonal.periods = object$seasonal.periods, gamma.bold.matrix = g$gamma.bold.matrix, ar.coefs = object$ar.coefficients, ma.coefs = object$ma.coefficients) # Do the forecast y.forecast[1] <- w$w.transpose %*% object$x[, ncol(object$x)] x[, 1] <- F %*% object$x[, ncol(object$x)] # + g$g %*% object$errors[length(object$errors)] if (h > 1) { for (t in 2:h) { x[, t] <- F %*% x[, (t - 1)] y.forecast[t] <- w$w.transpose %*% x[, (t - 1)] } } ## Make prediction intervals here lower.bounds <- upper.bounds <- matrix(NA, ncol = length(level), nrow = h) variance.multiplier <- numeric(h) variance.multiplier[1] <- 1 if (h > 1) { for (j in 1:(h - 1)) { if (j == 1) { f.running <- diag(ncol(F)) } else { f.running <- f.running %*% F } c.j <- w$w.transpose %*% f.running %*% g$g variance.multiplier[(j + 1)] <- variance.multiplier[j] + c.j ^ 2 } } variance <- object$variance * variance.multiplier # print(variance) st.dev <- sqrt(variance) for (i in 1:length(level)) { marg.error <- st.dev * abs(qnorm((100 - level[i]) / 200)) lower.bounds[, i] <- y.forecast - marg.error upper.bounds[, i] <- y.forecast + marg.error } # Inv Box Cox transform if required if (!is.null(object$lambda)) { y.forecast <- InvBoxCox(y.forecast, object$lambda, biasadj, list(level = level, upper = upper.bounds, lower = lower.bounds)) lower.bounds <- InvBoxCox(lower.bounds, object$lambda) if (object$lambda < 1) { lower.bounds <- pmax(lower.bounds, 0) } upper.bounds <- InvBoxCox(upper.bounds, object$lambda) } colnames(upper.bounds) <- colnames(lower.bounds) <- paste0(level, "%") forecast.object <- list( model = object, mean = future_msts(object$y, y.forecast), level = level, x = object$y, series = object$series, upper = future_msts(object$y, upper.bounds), lower = future_msts(object$y, lower.bounds), fitted = copy_msts(object$y, object$fitted.values), method = as.character(object), residuals = copy_msts(object$y, object$errors) ) if (is.null(object$series)) { forecast.object$series <- deparse(object$call$y) } class(forecast.object) <- "forecast" return(forecast.object) } #' @export as.character.bats <- function(x, ...) { name <- "BATS(" if (!is.null(x$lambda)) { name <- paste(name, round(x$lambda, digits = 3), sep = "") } else { name <- paste(name, "1", sep = "") } name <- paste(name, ", {", sep = "") if (!is.null(x$ar.coefficients)) { name <- paste(name, length(x$ar.coefficients), sep = "") } else { name <- paste(name, "0", sep = "") } name <- paste(name, ",", sep = "") if (!is.null(x$ma.coefficients)) { name <- paste(name, length(x$ma.coefficients), sep = "") } else { name <- paste(name, "0", sep = "") } name <- paste(name, "}, ", sep = "") if (!is.null(x$damping.parameter)) { name <- paste(name, round(x$damping.parameter, digits = 3), sep = "") } else { name <- paste(name, "-", sep = "") } name <- paste(name, ", ", sep = "") if (!is.null(x$seasonal.periods)) { name <- paste(name, "{", sep = "") for (i in x$seasonal.periods) { name <- paste(name, i, sep = "") if (i != x$seasonal.periods[length(x$seasonal.periods)]) { name <- paste(name, ",", sep = "") } else { name <- paste(name, "})", sep = "") } } } else { name <- paste(name, "-)", sep = "") } return(name) } forecast/R/seasadj.R0000644000176200001440000000311714323125536014024 0ustar liggesusers## Generic seasadj functions #' Seasonal adjustment #' #' Returns seasonally adjusted data constructed by removing the seasonal #' component. #' #' #' @param object Object created by \code{\link[stats]{decompose}}, #' \code{\link[stats]{stl}} or \code{\link{tbats}}. #' @param ... Other arguments not currently used. #' @return Univariate time series. #' @author Rob J Hyndman #' @seealso \code{\link[stats]{stl}}, \code{\link[stats]{decompose}}, #' \code{\link{tbats}}. #' @keywords ts #' @examples #' plot(AirPassengers) #' lines(seasadj(decompose(AirPassengers,"multiplicative")),col=4) #' #' @export seasadj <- function(object, ...) UseMethod("seasadj") #' @rdname seasadj #' @export seasadj.stl <- function(object, ...) { return(trendcycle(object) + remainder(object)) } #' @rdname seasadj #' @export seasadj.mstl <- function(object, ...) { return(trendcycle(object) + remainder(object)) } #' @rdname seasadj #' @export seasadj.decomposed.ts <- function(object, ...) { if (object$type == "additive") { return(object$x - object$seasonal) } else { return(object$x / object$seasonal) } } #' @rdname seasadj #' @export seasadj.tbats <- function(object, ...) { return(object$y - seasonal(object)) # comp <- tbats.components(object) # scols <- grep("season",colnames(comp)) # sa <- comp[,"observed"] - rowSums(comp[,scols,drop=FALSE]) # # Back transform if necessary # if (!is.null(object$lambda)) # sa <- InvBoxCox(sa, object$lambda) # return(sa) } #' @rdname seasadj #' @export seasadj.seas <- function(object, ...) { return(seasextract_w_na_action(object, "final")) } forecast/R/HoltWintersNew.R0000644000176200001440000004537014254256650015362 0ustar liggesusers# Modelled on the HoltWinters() function but with more conventional # initialization. # Written by Zhenyu Zhou. 21 October 2012 HoltWintersZZ <- function(x, # smoothing parameters alpha = NULL, # level beta = NULL, # trend gamma = NULL, # seasonal component seasonal = c("additive", "multiplicative"), exponential = FALSE, # exponential phi = NULL, # damp lambda = NULL, # box-cox biasadj = FALSE, # adjusted back-transformed mean for box-cox warnings = TRUE # return optimization warnings ) { x <- as.ts(x) origx <- x seasonal <- match.arg(seasonal) m <- frequency(x) lenx <- length(x) if (!is.null(lambda)) { x <- BoxCox(x, lambda) lambda <- attr(x, "lambda") } if (is.null(phi) || !is.numeric(phi)) { phi <- 1 } if (!is.null(alpha) && !is.numeric(alpha)) { stop("cannot fit models without level ('alpha' must not be 0 or FALSE).") } if (!all(is.null(c(alpha, beta, gamma))) && any(c(alpha, beta, gamma) < 0 | c(alpha, beta, gamma) > 1)) { stop("'alpha', 'beta' and 'gamma' must be within the unit interval.") } if ((is.null(gamma) || gamma > 0)) { if (seasonal == "multiplicative" && any(x <= 0)) { stop("data must be positive for multiplicative Holt-Winters.") } } if (m <= 1) { gamma <- FALSE } ## initialise l0, b0, s0 if (!is.null(gamma) && is.logical(gamma) && !gamma) { seasonal <- "none" l.start <- x[1L] s.start <- 0 if (is.null(beta) || !is.logical(beta) || beta) { if (!exponential) { b.start <- x[2L] - x[1L] } else { b.start <- x[2L] / x[1L] } } } else { ## seasonal Holt-Winters l.start <- mean(x[1:m]) b.start <- (mean(x[m + (1:m)]) - l.start) / m if (seasonal == "additive") { s.start <- x[1:m] - l.start } else { s.start <- x[1:m] / l.start } } # initialise smoothing parameters # lower=c(rep(0.0001,3), 0.8) # upper=c(rep(0.9999,3),0.98) lower <- c(0, 0, 0, 0) upper <- c(1, 1, 1, 1) if (!is.null(beta) && is.logical(beta) && !beta) { trendtype <- "N" } else if (exponential) { trendtype <- "M" } else { trendtype <- "A" } if (seasonal == "none") { seasontype <- "N" } else if (seasonal == "multiplicative") { seasontype <- "M" } else { seasontype <- "A" } ## initialise smoothing parameter optim.start <- initparam( alpha = alpha, beta = beta, gamma = gamma, phi = 1, trendtype = trendtype, seasontype = seasontype, damped = FALSE, lower = lower, upper = upper, m = m, bounds = "usual" ) # if(!is.na(optim.start["alpha"])) # alpha2 <- optim.start["alpha"] # else # alpha2 <- alpha # if(!is.na(optim.start["beta"])) # beta2 <- optim.start["beta"] # else # beta2 <- beta # if(!is.na(optim.start["gamma"])) # gamma2 <- optim.start["gamma"] # else # gamma2 <- gamma # if(!check.param(alpha = alpha2,beta = beta2, gamma = gamma2,phi=1,lower,upper,bounds="haha",m=m)) # { # print(paste("alpha=", alpha2, "beta=",beta2, "gamma=",gamma2)) # stop("Parameters out of range") # } ################################################################################### # optimisation: alpha, beta, gamma, if any of them is null, then optimise them error <- function(p, select) { if (select[1] > 0) { alpha <- p[1L] } if (select[2] > 0) { beta <- p[1L + select[1]] } if (select[3] > 0) { gamma <- p[1L + select[1] + select[2]] } zzhw( x, lenx = lenx, alpha = alpha, beta = beta, gamma = gamma, seasonal = seasonal, m = m, dotrend = (!is.logical(beta) || beta), doseasonal = (!is.logical(gamma) || gamma), exponential = exponential, phi = phi, l.start = l.start, b.start = b.start, s.start = s.start )$SSE } select <- as.numeric(c(is.null(alpha), is.null(beta), is.null(gamma))) if (sum(select) > 0) # There are parameters to optimize { sol <- optim(optim.start, error, method = "L-BFGS-B", lower = lower[select], upper = upper[select], select = select) if (sol$convergence || any(sol$par < 0 | sol$par > 1)) { if (sol$convergence > 50) { if (warnings) { warning(gettextf("optimization difficulties: %s", sol$message), domain = NA) } } else { stop("optimization failure") } } if (select[1] > 0) { alpha <- sol$par[1L] } if (select[2] > 0) { beta <- sol$par[1L + select[1]] } if (select[3] > 0) { gamma <- sol$par[1L + select[1] + select[2]] } } final.fit <- zzhw( x, lenx = lenx, alpha = alpha, beta = beta, gamma = gamma, seasonal = seasonal, m = m, dotrend = (!is.logical(beta) || beta), doseasonal = (!is.logical(gamma) || gamma), exponential = exponential, phi = phi, l.start = l.start, b.start = b.start, s.start = s.start ) tspx <- tsp(x) fitted <- ts(final.fit$fitted, frequency = m, start = tspx[1]) res <- ts(final.fit$residuals, frequency = m, start = tspx[1]) if (!is.null(lambda)) { fitted <- InvBoxCox(fitted, lambda, biasadj, var(final.fit$residuals)) attr(lambda, "biasadj") <- biasadj } states <- matrix(final.fit$level, ncol = 1) colnames(states) <- "l" if (trendtype != "N") { states <- cbind(states, b = final.fit$trend) } if (seasontype != "N") { nr <- nrow(states) nc <- ncol(states) for (i in 1:m) states <- cbind(states, final.fit$season[(m - i) + (1:nr)]) colnames(states)[nc + (1:m)] <- paste("s", 1:m, sep = "") } states <- ts(states, frequency = m, start = tspx[1] - 1 / m) # Package output as HoltWinters class # structure(list(fitted = fitted, # x = x, # alpha = alpha, # beta = beta, # gamma = gamma, # coefficients = c(a = final.fit$level[lenx], # b = if (!is.logical(beta) || beta) final.fit$trend[lenx], # s = if (!is.logical(gamma) || gamma) final.fit$season[lenx - m + 1L:m]), # seasonal = seasonal, # exponential = exponential, # SSE = final.fit$SSE, # call = match.call(), # level = final.fit$level, # trend = final.fit$trend, # season = final.fit$season, # phi = phi # ), # class = "HoltWinters" # ) # Package output as ets class damped <- (phi < 1.0) if (seasonal == "additive") { # This should not happen components <- c("A", trendtype, seasontype, damped) } else if (seasonal == "multiplicative") { components <- c("M", trendtype, seasontype, damped) } else if (seasonal == "none" && exponential) { components <- c("M", trendtype, seasontype, damped) } else { # if(seasonal=="none" & !exponential) components <- c("A", trendtype, seasontype, damped) } initstate <- states[1, ] param <- alpha names(param) <- "alpha" if (trendtype != "N") { param <- c(param, beta = beta) names(param)[length(param)] <- "beta" } if (seasontype != "N") { param <- c(param, gamma = gamma) names(param)[length(param)] <- "gamma" } if (damped) { param <- c(param, phi = phi) names(param)[length(param)] <- "phi" } if (components[1] == "A") { sigma2 <- mean(res ^ 2) } else { sigma2 <- mean((res / fitted) ^ 2) } structure( list( fitted = fitted, residuals = res, components = components, x = origx, par = c(param, initstate), initstate = initstate, states = states, SSE = final.fit$SSE, sigma2 = sigma2, call = match.call(), m = m, lambda = lambda ), class = "ets" ) } ################################################################################### # filter function zzhw <- function(x, lenx, alpha=NULL, beta=NULL, gamma=NULL, seasonal="additive", m, dotrend=FALSE, doseasonal=FALSE, l.start=NULL, exponential = NULL, phi=NULL, b.start=NULL, s.start=NULL) { if (exponential != TRUE || is.null(exponential)) { exponential <- FALSE } if (is.null(phi) || !is.numeric(phi)) { phi <- 1 } # initialise array of l, b, s level <- trend <- season <- xfit <- residuals <- numeric(lenx) SSE <- 0 if (!dotrend) { beta <- 0 b.start <- 0 } if (!doseasonal) { gamma <- 0 s.start[1:length(s.start)] <- ifelse(seasonal == "additive", 0, 1) } lastlevel <- level0 <- l.start lasttrend <- trend0 <- b.start season0 <- s.start for (i in 1:lenx) { # definel l(t-1) if (i > 1) { lastlevel <- level[i - 1] } # define b(t-1) if (i > 1) { lasttrend <- trend[i - 1] } # define s(t-m) if (i > m) { lastseason <- season[i - m] } else { lastseason <- season0[i] } if (is.na(lastseason)) { lastseason <- ifelse(seasonal == "additive", 0, 1) } # stop((lastlevel + phi*lasttrend)*lastseason) # forecast for this period i if (seasonal == "additive") { if (!exponential) { xhat <- lastlevel + phi * lasttrend + lastseason } else { xhat <- lastlevel * lasttrend ^ phi + lastseason } } else { if (!exponential) { xhat <- (lastlevel + phi * lasttrend) * lastseason } else { xhat <- lastlevel * lasttrend ^ phi * lastseason } } xfit[i] <- xhat res <- x[i] - xhat residuals[i] <- res SSE <- SSE + res * res # calculate level[i] if (seasonal == "additive") { if (!exponential) { level[i] <- alpha * (x[i] - lastseason) + (1 - alpha) * (lastlevel + phi * lasttrend) } else { level[i] <- alpha * (x[i] - lastseason) + (1 - alpha) * (lastlevel * lasttrend ^ phi) } } else { if (!exponential) { level[i] <- alpha * (x[i] / lastseason) + (1 - alpha) * (lastlevel + phi * lasttrend) } else { level[i] <- alpha * (x[i] / lastseason) + (1 - alpha) * (lastlevel * lasttrend ^ phi) } } # calculate trend[i] if (!exponential) { trend[i] <- beta * (level[i] - lastlevel) + (1 - beta) * phi * lasttrend } else { trend[i] <- beta * (level[i] / lastlevel) + (1 - beta) * lasttrend ^ phi } # calculate season[i] if (seasonal == "additive") { if (!exponential) { season[i] <- gamma * (x[i] - lastlevel - phi * lasttrend) + (1 - gamma) * lastseason } else { season[i] <- gamma * (x[i] - lastlevel * lasttrend ^ phi) + (1 - gamma) * lastseason } } else { if (!exponential) { season[i] <- gamma * (x[i] / (lastlevel + phi * lasttrend)) + (1 - gamma) * lastseason } else { season[i] <- gamma * (x[i] / (lastlevel * lasttrend ^ phi)) + (1 - gamma) * lastseason } } } list( SSE = SSE, fitted = xfit, residuals = residuals, level = c(level0, level), trend = c(trend0, trend), season = c(season0, season), phi = phi ) } #' Exponential smoothing forecasts #' #' Returns forecasts and other information for exponential smoothing forecasts #' applied to \code{y}. #' #' ses, holt and hw are simply convenient wrapper functions for #' \code{forecast(ets(...))}. #' #' @param y a numeric vector or time series of class \code{ts} #' @param h Number of periods for forecasting. #' @param damped If TRUE, use a damped trend. #' @param seasonal Type of seasonality in \code{hw} model. "additive" or #' "multiplicative" #' @param level Confidence level for prediction intervals. #' @param fan If TRUE, level is set to seq(51,99,by=3). This is suitable for #' fan plots. #' @param initial Method used for selecting initial state values. If #' \code{optimal}, the initial values are optimized along with the smoothing #' parameters using \code{\link{ets}}. If \code{simple}, the initial values are #' set to values obtained using simple calculations on the first few #' observations. See Hyndman & Athanasopoulos (2014) for details. #' @param exponential If TRUE, an exponential trend is fitted. Otherwise, the #' trend is (locally) linear. #' @param alpha Value of smoothing parameter for the level. If \code{NULL}, it #' will be estimated. #' @param beta Value of smoothing parameter for the trend. If \code{NULL}, it #' will be estimated. #' @param gamma Value of smoothing parameter for the seasonal component. If #' \code{NULL}, it will be estimated. #' @param phi Value of damping parameter if \code{damped=TRUE}. If \code{NULL}, #' it will be estimated. #' @param x Deprecated. Included for backwards compatibility. #' @param ... Other arguments passed to \code{forecast.ets}. #' @inheritParams forecast.ts #' #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{ets} and associated #' functions. #' #' An object of class \code{"forecast"} is a list containing at least the #' following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The original time series #' (either \code{object} itself or the time series used to create the model #' stored as \code{object}).} \item{residuals}{Residuals from the fitted #' model.} \item{fitted}{Fitted values (one-step forecasts)} #' @author Rob J Hyndman #' @seealso \code{\link{ets}}, \code{\link[stats]{HoltWinters}}, #' \code{\link{rwf}}, \code{\link[stats]{arima}}. #' @references Hyndman, R.J., Koehler, A.B., Ord, J.K., Snyder, R.D. (2008) #' \emph{Forecasting with exponential smoothing: the state space approach}, #' Springer-Verlag: New York. \url{http://www.exponentialsmoothing.net}. #' #' @references Hyndman and Athanasopoulos (2018) \emph{Forecasting: principles #' and practice}, 2nd edition, OTexts: Melbourne, Australia. #' \url{https://otexts.com/fpp2/} #' @keywords ts #' @examples #' #' fcast <- holt(airmiles) #' plot(fcast) #' deaths.fcast <- hw(USAccDeaths,h=48) #' plot(deaths.fcast) #' #' @export ses <- function(y, h = 10, level = c(80, 95), fan = FALSE, initial=c("optimal", "simple"), alpha=NULL, lambda=NULL, biasadj=FALSE, x=y, ...) { initial <- match.arg(initial) if (initial == "optimal") { fcast <- forecast(ets(x, "ANN", alpha = alpha, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...) } else { fcast <- forecast(HoltWintersZZ(x, alpha = alpha, beta = FALSE, gamma = FALSE, lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...) } fcast$method <- fcast$model$method <- "Simple exponential smoothing" fcast$model$call <- match.call() fcast$series <- deparse(substitute(y)) return(fcast) } #' @rdname ses #' @export holt <- function(y, h = 10, damped = FALSE, level = c(80, 95), fan = FALSE, initial=c("optimal", "simple"), exponential=FALSE, alpha=NULL, beta=NULL, phi=NULL, lambda=NULL, biasadj=FALSE, x=y, ...) { initial <- match.arg(initial) if (length(y) <= 1L) { stop("I need at least two observations to estimate trend.") } if (initial == "optimal" || damped) { if (exponential) { fcast <- forecast(ets(x, "MMN", alpha = alpha, beta = beta, phi = phi, damped = damped, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...) } else { fcast <- forecast(ets(x, "AAN", alpha = alpha, beta = beta, phi = phi, damped = damped, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...) } } else { fcast <- forecast( HoltWintersZZ(x, alpha = alpha, beta = beta, gamma = FALSE, phi = phi, exponential = exponential, lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ... ) } if (damped) { fcast$method <- "Damped Holt's method" if (initial == "simple") { warning("Damped Holt's method requires optimal initialization") } } else { fcast$method <- "Holt's method" } if (exponential) { fcast$method <- paste(fcast$method, "with exponential trend") } fcast$model$method <- fcast$method fcast$model$call <- match.call() fcast$series <- deparse(substitute(y)) return(fcast) } #' @rdname ses #' @export hw <- function(y, h = 2 * frequency(x), seasonal = c("additive", "multiplicative"), damped = FALSE, level = c(80, 95), fan = FALSE, initial=c("optimal", "simple"), exponential=FALSE, alpha=NULL, beta=NULL, gamma=NULL, phi=NULL, lambda=NULL, biasadj=FALSE, x=y, ...) { initial <- match.arg(initial) seasonal <- match.arg(seasonal) m <- frequency(x) if (m <= 1L) { stop("The time series should have frequency greater than 1.") } if (length(y) < m + 3) { stop(paste("I need at least", m + 3, "observations to estimate seasonality.")) } if (initial == "optimal" || damped) { if (seasonal == "additive" && exponential) { stop("Forbidden model combination") } else if (seasonal == "additive" && !exponential) { fcast <- forecast(ets(x, "AAA", alpha = alpha, beta = beta, gamma = gamma, phi = phi, damped = damped, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...) } else if (seasonal != "additive" && exponential) { fcast <- forecast(ets(x, "MMM", alpha = alpha, beta = beta, gamma = gamma, phi = phi, damped = damped, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...) } else { # if(seasonal!="additive" & !exponential) fcast <- forecast(ets(x, "MAM", alpha = alpha, beta = beta, gamma = gamma, phi = phi, damped = damped, opt.crit = "mse", lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ...) } } else { fcast <- forecast( HoltWintersZZ(x, alpha = alpha, beta = beta, gamma = gamma, phi = phi, seasonal = seasonal, exponential = exponential, lambda = lambda, biasadj = biasadj), h, level = level, fan = fan, ... ) } if (seasonal == "additive") { fcast$method <- "Holt-Winters' additive method" } else { fcast$method <- "Holt-Winters' multiplicative method" } if (exponential) { fcast$method <- paste(fcast$method, "with exponential trend") } if (damped) { fcast$method <- paste("Damped", fcast$method) if (initial == "simple") { warning("Damped methods require optimal initialization") } } fcast$model$method <- fcast$method fcast$model$call <- match.call() fcast$series <- deparse(substitute(y)) return(fcast) } forecast/R/acf.R0000644000176200001440000003437514474044013013152 0ustar liggesusers# Replacement for the acf() function. #' (Partial) Autocorrelation and Cross-Correlation Function Estimation #' #' The function \code{Acf} computes (and by default plots) an estimate of the #' autocorrelation function of a (possibly multivariate) time series. Function #' \code{Pacf} computes (and by default plots) an estimate of the partial #' autocorrelation function of a (possibly multivariate) time series. Function #' \code{Ccf} computes the cross-correlation or cross-covariance of two #' univariate series. #' #' The functions improve the \code{\link[stats]{acf}}, #' \code{\link[stats]{pacf}} and \code{\link[stats]{ccf}} functions. The main #' differences are that \code{Acf} does not plot a spike at lag 0 when #' \code{type=="correlation"} (which is redundant) and the horizontal axes show #' lags in time units rather than seasonal units. #' #' The tapered versions implement the ACF and PACF estimates and plots #' described in Hyndman (2015), based on the banded and tapered estimates of #' autocovariance proposed by McMurry and Politis (2010). #' #' @param x a univariate or multivariate (not Ccf) numeric time series object #' or a numeric vector or matrix. #' @param y a univariate numeric time series object or a numeric vector. #' @param lag.max maximum lag at which to calculate the acf. Default is #' $10*log10(N/m)$ where $N$ is the number of observations and $m$ the number #' of series. Will be automatically limited to one less than the number of #' observations in the series. #' @param type character string giving the type of acf to be computed. Allowed #' values are \dQuote{\code{correlation}} (the default), #' \dQuote{\code{covariance}} or \dQuote{\code{partial}}. #' @param plot logical. If \code{TRUE} (the default) the resulting acf, pacf or #' ccf is plotted. #' @param na.action function to handle missing values. Default is #' \code{\link[stats]{na.contiguous}}. Useful alternatives are #' \code{\link[stats]{na.pass}} and \code{\link{na.interp}}. #' @param demean Should covariances be about the sample means? #' @param calc.ci If \code{TRUE}, confidence intervals for the ACF/PACF #' estimates are calculated. #' @param level Percentage level used for the confidence intervals. #' @param nsim The number of bootstrap samples used in estimating the #' confidence intervals. #' @param ... Additional arguments passed to the plotting function. #' @return The \code{Acf}, \code{Pacf} and \code{Ccf} functions return objects #' of class "acf" as described in \code{\link[stats]{acf}} from the stats #' package. The \code{taperedacf} and \code{taperedpacf} functions return #' objects of class "mpacf". #' @author Rob J Hyndman #' @seealso \code{\link[stats]{acf}}, \code{\link[stats]{pacf}}, #' \code{\link[stats]{ccf}}, \code{\link{tsdisplay}} #' @references Hyndman, R.J. (2015). Discussion of ``High-dimensional #' autocovariance matrices and optimal linear prediction''. \emph{Electronic #' Journal of Statistics}, 9, 792-796. #' #' McMurry, T. L., & Politis, D. N. (2010). Banded and tapered estimates for #' autocovariance matrices and the linear process bootstrap. \emph{Journal of #' Time Series Analysis}, 31(6), 471-482. #' @keywords ts #' @examples #' #' Acf(wineind) #' Pacf(wineind) #' \dontrun{ #' taperedacf(wineind, nsim=50) #' taperedpacf(wineind, nsim=50) #' } #' #' @export Acf <- function(x, lag.max = NULL, type = c("correlation", "covariance", "partial"), plot = TRUE, na.action = na.contiguous, demean=TRUE, ...) { type <- match.arg(type) # Set maximum lag nseries <- NCOL(x) if (is.null(lag.max)) { lag.max <- as.integer(max( floor(10 * (log10(NROW(x)) - log10(nseries))), 2 * frequency(x) )) } acf.out <- stats::acf( x, plot = FALSE, lag.max = lag.max, type = type, na.action = na.action, demean = demean ) acf.out$tsp <- tsp(x) acf.out$periods <- attributes(x)$msts acf.out$series <- deparse(substitute(x)) # Make lags in integer units nlags <- dim(acf.out$lag)[1] if (type == "partial") { acf.out$lag[, , ] <- seq(nlags) } else { acf.out$lag[, , ] <- seq(nlags)-1 } # Plot if required if (plot) { plot.out <- acf.out # Hide 0 lag if autocorrelations if (type == "correlation") { for (i in seq(NCOL(x))) { plot.out$lag[1, i, i] <- 1 plot.out$acf[1, i, i] <- 0 } } if (nseries > 1) { plot(plot.out, ...) } else { # Check if there is a ylim input input_list <- as.list(substitute(list(...))) ylimarg <- is.element("ylim", names(input_list)) if (ylimarg) { plot(plot.out, xaxt = "n", ...) } else { ylim <- c(-1, 1) * 3 / sqrt(length(x)) ylim <- range(ylim, plot.out$acf) plot(plot.out, ylim = ylim, xaxt = "n", ...) } # Make nice horizontal axis if (is.element("msts", class(x))) { seasonalaxis(attributes(x)$msts, nlags, type = "acf") } else { seasonalaxis(frequency(x), nlags, type = "acf") } if (type == "covariance") { axis(at = 0, side = 1) } } return(invisible(acf.out)) } else { return(acf.out) } } # Make nice horizontal axis with ticks at seasonal lags # Return tick points if breaks=TRUE seasonalaxis <- function(frequency, nlags, type, plot=TRUE) { # List of unlabelled tick points out2 <- NULL # Check for non-seasonal data if (length(frequency) == 1) { # Compute number of seasonal periods np <- trunc(nlags / frequency) evenfreq <- (frequency %% 2L) == 0L # Defaults for labelled tick points if (type == "acf") { out <- pretty(1:nlags) } else { out <- pretty(-nlags:nlags) } if (frequency == 1) { if (type == "acf" && nlags <= 16) { out <- 1:nlags } else if (type == "ccf" && nlags <= 8) { out <- (-nlags:nlags) } else { if (nlags <= 30 && type == "acf") { out2 <- 1:nlags } else if (nlags <= 15 && type == "ccf") { out2 <- (-nlags:nlags) } if (!is.null(out2)) { out <- pretty(out2) } } } else if (frequency > 1 && ((type == "acf" && np >= 2L) || (type == "ccf" && np >= 1L))) { if (type == "acf" && nlags <= 40) { out <- frequency * (1:np) out2 <- 1:nlags # Add half-years if (nlags <= 30 && evenfreq && np <= 3) { out <- c(out, frequency * ((1:np) - 0.5)) } } else if (type == "ccf" && nlags <= 20) { out <- frequency * (-np:np) out2 <- (-nlags:nlags) # Add half-years if (nlags <= 15 && evenfreq && np <= 3) { out <- c(out, frequency * ((-np:np) + 0.5)) } } else if (np < (12 - 4 * (type == "ccf"))) { out <- frequency * (-np:np) } } } else { # Determine which frequency to show np <- trunc(nlags / frequency) frequency <- frequency[which(np <= 16)] if (length(frequency) > 0L) { frequency <- min(frequency) } else { frequency <- 1 } out <- seasonalaxis(frequency, nlags, type, plot = FALSE) } if (plot) { axis(1, at = out) if (!is.null(out2)) { axis(1, at = out2, tcl = -0.2, labels = FALSE) } } else { return(out) } } #' @rdname Acf #' @export Pacf <- function(x, lag.max=NULL, plot = TRUE, na.action = na.contiguous, demean=TRUE, ...) { object <- Acf( x, lag.max = lag.max, type = "partial", na.action = na.action, demean = demean, plot = FALSE ) object$series <- deparse(substitute(x)) # Plot if required if (plot) { nlags <- dim(object$lag)[1] plot.out <- object # Check if there is a ylim input input_list <- as.list(substitute(list(...))) ylimarg <- is.element("ylim", names(input_list)) if (ylimarg) { plot(plot.out, xaxt = "n", ...) } else { ylim <- c(-1, 1) * 3 / sqrt(length(x)) ylim <- range(ylim, plot.out$acf) plot(plot.out, ylim = ylim, xaxt = "n", ...) } # Make nice horizontal axis if (is.element("msts", class(x))) { seasonalaxis(attributes(x)$msts, nlags, type = "acf") } else { seasonalaxis(frequency(x), nlags, type = "acf") } return(invisible(object)) } else { return(object) } } #' @rdname Acf #' @export Ccf <- function(x, y, lag.max=NULL, type=c("correlation", "covariance"), plot=TRUE, na.action=na.contiguous, ...) { type <- match.arg(type) if (is.null(lag.max)) { lag.max <- as.integer(max(floor(10 * log10(NROW(x))), 2 * frequency(x))) } ccf.out <- stats::ccf( x, y, plot = FALSE, type = type, lag.max = lag.max, na.action = na.action ) # Make lags in integer units nlags <- (dim(ccf.out$lag)[1] - 1) / 2 ccf.out$lag[, 1, 1] <- -nlags:nlags # Plot if required if (plot) { vnames <- c(deparse(substitute(x))[1L], deparse(substitute(y))[1L]) ccf.out$snames <- paste(vnames, collapse = " & ") plot(ccf.out, ylab = "CCF", xaxt = "n", ...) seasonalaxis(frequency(x), nlags, type = "ccf") return(invisible(ccf.out)) } else { return(ccf.out) } } kappa <- function(x) { k <- rep(0, length(x)) x <- abs(x) k[x <= 1] <- 1 k[x > 1 & x <= 2] <- 2 - x[x > 1 & x <= 2] return(k) } # McMurray-Politis estimate of ACF wacf <- function(x, lag.max = length(x) - 1) { n <- length(x) lag.max <- min(lag.max, n - 1) if (lag.max < 0) { stop("'lag.max' must be at least 0") } # Standard estimator acfest <- stats::acf( c(x), lag.max = lag.max, plot = FALSE, na.action = na.contiguous ) acfest$series <- deparse(substitute(x)) # Taper estimates s <- 1:length(acfest$acf[, , 1]) upper <- 2 * sqrt(log(n, 10) / n) ac <- abs(acfest$acf[, , 1]) # Find l: ac < upper for 5 consecutive lags j <- (ac < upper) l <- 0 k <- 1 N <- length(j) - 4 while (l < 1 && k <= N) { if (all(j[k:(k + 4)])) { l <- k } else { k <- k + 1 } } acfest$acf[, , 1] <- acfest$acf[, , 1] * kappa(s / l) # End of Tapering # Now do some shrinkage towards white noise using eigenvalues # Construct covariance matrix gamma <- acfest$acf[, , 1] s <- length(gamma) Gamma <- matrix(1, s, s) d <- row(Gamma) - col(Gamma) for (i in 1:(s - 1)) Gamma[d == i | d == (-i)] <- gamma[i + 1] # Compute eigenvalue decomposition ei <- eigen(Gamma) # Shrink eigenvalues d <- pmax(ei$values, 20 / n) # Construct new covariance matrix Gamma2 <- ei$vectors %*% diag(d) %*% t(ei$vectors) Gamma2 <- Gamma2 / mean(d) # Estimate new ACF d <- row(Gamma2) - col(Gamma2) for (i in 2:s) gamma[i] <- mean(Gamma2[d == (i - 1)]) acfest$acf[, , 1] <- gamma ############### end of shrinkage return(acfest) } # Find tapered PACF using LD recursions wpacf <- function(x, lag.max=length(x) - 1) { # Compute pacf as usual, just to set up structure out <- Pacf(x, lag.max = lag.max, plot = FALSE) # Compute acf using tapered estimate acvf <- wacf(x, lag.max = lag.max)$acf[, , 1] # Durbin-Levinson recursions # Modified from http://faculty.washington.edu/dbp/s519/R-code/LD-recursions.R p <- length(acvf) - 1 phis <- acvf[2] / acvf[1] pev <- rep(acvf[1], p + 1) pacf <- rep(phis, p) pev[2] <- pev[1] * (1 - phis ^ 2) if (p > 1) { for (k in 2:p) { old.phis <- phis phis <- rep(0, k) ## compute kth order pacf (reflection coefficient) phis[k] <- (acvf[k + 1] - sum(old.phis * acvf[k:2])) / pev[k] phis[1:(k - 1)] <- old.phis - phis[k] * rev(old.phis) pacf[k] <- phis[k] pev[k + 1] <- pev[k] * (1 - phis[k] ^ 2) # if(abs(pacf[k]) > 1) # warning("PACF larger than 1 in absolute value") } } out$acf[, , 1] <- pacf return(out) } # Function to produce new style plot of ACF or PACF with CI # x = time series #' @rdname Acf #' @export taperedacf <- function(x, lag.max=NULL, type=c("correlation", "partial"), plot=TRUE, calc.ci=TRUE, level=95, nsim=100, ...) { type <- match.arg(type) if (is.null(lag.max)) { lag.max <- max(floor(20 * log10(length(x))), 4 * frequency(x)) } lag <- min(lag.max, length(x) - 1) if (type == "correlation") { z <- wacf(x, )$acf[2:(lag + 1), , 1] } else { z <- wpacf(x, )$acf[1:lag, , 1] } out <- list(z = z, lag = lag, type = type, x = x) if (calc.ci) { # Get confidence intervals for plots bootsim <- lpb(x, nsim = nsim) s1 <- matrix(0, nrow = lag, ncol = nsim) if (type == "correlation") { for (i in 1:nsim) s1[, i] <- wacf(bootsim[, i])$acf[2:(lag + 1), , 1] } else { for (i in 1:nsim) s1[, i] <- wpacf(bootsim[, i])$acf[1:lag, , 1] } prob <- (100 - level) / 200 out$upper <- apply(s1, 1, quantile, prob = 1 - prob) out$lower <- apply(s1, 1, quantile, prob = prob) } out <- structure(out, class = "mpacf") if (!plot) { return(out) } else { plot(out, ...) return(invisible(out)) } return(out) } #' @rdname Acf #' @export taperedpacf <- function(x, ...) { taperedacf(x, type = "partial", ...) } #' @export plot.mpacf <- function(x, xlim=NULL, ylim=NULL, xlab="Lag", ylab="", ...) { object <- x lagx <- 1:object$lag if (is.null(xlim)) { xlim <- c(1, object$lag) } if (is.null(ylim)) { ylim <- range(object$z, object$upper, object$lower) } if (ylab == "") { ylab <- ifelse(object$type == "partial", "PACF", "ACF") } plot( lagx, object$z, type = "n", xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, xaxt = "n", ... ) grid(col = gray(.80), nx = NA, ny = NULL, lty = 1) abline(h = 0, col = gray(.4)) if (frequency(object$x) > 1) { axis(1, at = (0:100) * frequency(object$x)) for (i in 1:100) abline(v = (i - 1) * frequency(object$x), lty = 1, col = gray(0.80)) } else { axis(1) grid(col = gray(.80), ny = NA, lty = 1) } if (!is.null(object$lower)) { for (j in 1:object$lag) { polygon( lagx[j] + c(-0.55, 0.55, 0.55, -0.55), c(rep(object$lower[j], 2), rep(object$upper[j], 2)), col = gray(0.60), border = FALSE ) } # polygon(c(lagx,rev(lagx)),c(object$lower,rev(object$upper)),col=gray(.60),border=FALSE) } lines(lagx, object$z, lwd = 1.5) j <- (object$lower < 0 & object$upper > 0) points(lagx[j], object$z[j], pch = 1, cex = 0.5) points(lagx[!j], object$z[!j], pch = 19) } #' @rdname is.ets #' @export is.acf <- function(x) { inherits(x, "acf") } forecast/R/ggplot.R0000644000176200001440000024143314456202551013713 0ustar liggesusersglobalVariables(".data") #' @inherit ggplot2::autolayer #' @export autolayer <- function(object, ...){ UseMethod("autolayer") } #' @importFrom ggplot2 autoplot #' @export ggplot2::autoplot ggAddExtras <- function(xlab=NA, ylab=NA, main=NA) { dots <- eval.parent(quote(list(...))) extras <- list() if ("xlab" %in% names(dots) || is.null(xlab) || any(!is.na(xlab))) { if ("xlab" %in% names(dots)) { extras[[length(extras) + 1]] <- ggplot2::xlab(dots$xlab) } else { extras[[length(extras) + 1]] <- ggplot2::xlab(paste0(xlab[!is.na(xlab)], collapse = " ")) } } if ("ylab" %in% names(dots) || is.null(ylab) || any(!is.na(ylab))) { if ("ylab" %in% names(dots)) { extras[[length(extras) + 1]] <- ggplot2::ylab(dots$ylab) } else { extras[[length(extras) + 1]] <- ggplot2::ylab(paste0(ylab[!is.na(ylab)], collapse = " ")) } } if ("main" %in% names(dots) || is.null(main) || any(!is.na(main))) { if ("main" %in% names(dots)) { extras[[length(extras) + 1]] <- ggplot2::ggtitle(dots$main) } else { extras[[length(extras) + 1]] <- ggplot2::ggtitle(paste0(main[!is.na(main)], collapse = " ")) } } if ("xlim" %in% names(dots)) { extras[[length(extras) + 1]] <- ggplot2::xlim(dots$xlim) } if ("ylim" %in% names(dots)) { extras[[length(extras) + 1]] <- ggplot2::ylim(dots$ylim) } return(extras) } ggtsbreaks <- function(x) { # Make x axis contain only whole numbers (e.g., years) return(unique(round(pretty(floor(x[1]):ceiling(x[2]))))) } #' ggplot (Partial) Autocorrelation and Cross-Correlation Function Estimation #' and Plotting #' #' Produces a ggplot object of their equivalent Acf, Pacf, Ccf, taperedacf and #' taperedpacf functions. #' #' If \code{autoplot} is given an \code{acf} or \code{mpacf} object, then an #' appropriate ggplot object will be created. #' #' ggtaperedpacf #' @param object Object of class \dQuote{\code{acf}}. #' @param x a univariate or multivariate (not Ccf) numeric time series object #' or a numeric vector or matrix. #' @param y a univariate numeric time series object or a numeric vector. #' @param ci coverage probability for confidence interval. Plotting of the #' confidence interval is suppressed if ci is zero or negative. #' @param lag.max maximum lag at which to calculate the acf. #' @param type character string giving the type of acf to be computed. Allowed #' values are "\code{correlation}" (the default), \dQuote{\code{covariance}} or #' \dQuote{\code{partial}}. #' @param plot logical. If \code{TRUE} (the default) the resulting ACF, PACF or #' CCF is plotted. #' @param na.action function to handle missing values. Default is #' \code{\link[stats]{na.contiguous}}. Useful alternatives are #' \code{\link[stats]{na.pass}} and \code{\link{na.interp}}. #' @param demean Should covariances be about the sample means? #' @param calc.ci If \code{TRUE}, confidence intervals for the ACF/PACF #' estimates are calculated. #' @param level Percentage level used for the confidence intervals. #' @param nsim The number of bootstrap samples used in estimating the #' confidence intervals. #' @param ... Other plotting parameters to affect the plot. #' @return A ggplot object. #' @author Mitchell O'Hara-Wild #' @seealso \code{\link[stats]{plot.acf}}, \code{\link{Acf}}, #' \code{\link[stats]{acf}}, \code{\link{taperedacf}} #' @examples #' #' library(ggplot2) #' ggAcf(wineind) #' wineind %>% Acf(plot=FALSE) %>% autoplot #' \dontrun{ #' wineind %>% taperedacf(plot=FALSE) %>% autoplot #' ggtaperedacf(wineind) #' ggtaperedpacf(wineind)} #' ggCcf(mdeaths, fdeaths) #' #' @export autoplot.acf <- function(object, ci=0.95, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!inherits(object, "acf")) { stop("autoplot.acf requires a acf object, use object=object") } acf <- `dimnames<-`(object$acf, list(NULL, object$snames, object$snames)) lag <- `dimnames<-`(object$lag, list(NULL, object$snames, object$snames)) data <- as.data.frame.table(acf)[-1] data$lag <- as.numeric(lag) if (object$type == "correlation" & is.null(object$ccf)) { data <- data[data$lag != 0, ] } # Initialise ggplot object p <- ggplot2::ggplot( ggplot2::aes(x = .data[["lag"]], xend = .data[["lag"]], y = 0, yend = .data[["Freq"]]), data = data ) p <- p + ggplot2::geom_hline(yintercept = 0) # Add data p <- p + ggplot2::geom_segment(lineend = "butt", ...) # Add ci lines (assuming white noise input) ci <- qnorm((1 + ci) / 2) / sqrt(object$n.used) p <- p + ggplot2::geom_hline(yintercept = c(-ci, ci), colour = "blue", linetype = "dashed") # Add facets if needed if(any(dim(object$acf)[2:3] != c(1,1))){ p <- p + ggplot2::facet_grid( as.formula(paste0(colnames(data)[1:2], collapse = "~")) ) } # Prepare graph labels if (!is.null(object$ccf)) { ylab <- "CCF" ticktype <- "ccf" main <- paste("Series:", object$snames) nlags <- round(dim(object$lag)[1] / 2) } else if (object$type == "partial") { ylab <- "PACF" ticktype <- "acf" main <- paste("Series:", object$series) nlags <- dim(object$lag)[1] } else if (object$type == "correlation") { ylab <- "ACF" ticktype <- "acf" main <- paste("Series:", object$series) nlags <- dim(object$lag)[1] } else { ylab <- NULL } # Add seasonal x-axis # Change ticks to be seasonal and prepare default title if (!is.null(object$tsp)) { freq <- object$tsp[3] } else { freq <- 1 } if (!is.null(object$periods)) { periods <- object$periods periods <- periods[periods != freq] minorbreaks <- periods * seq(-20:20) } else { minorbreaks <- NULL } p <- p + ggplot2::scale_x_continuous(breaks = seasonalaxis( freq, nlags, type = ticktype, plot = FALSE ), minor_breaks = minorbreaks) p <- p + ggAddExtras(ylab = ylab, xlab = "Lag", main = main) return(p) } } #' @rdname autoplot.acf #' @export ggAcf <- function(x, lag.max = NULL, type = c("correlation", "covariance", "partial"), plot = TRUE, na.action = na.contiguous, demean=TRUE, ...) { object <- Acf(x, lag.max = lag.max, type = type, na.action = na.action, demean = demean, plot = FALSE) object$tsp <- tsp(x) object$periods <- attributes(x)$msts object$series <- deparse(substitute(x)) if (plot) { return(autoplot(object, ...)) } else { return(object) } } #' @rdname autoplot.acf #' @export ggPacf <- function(x, lag.max = NULL, plot = TRUE, na.action = na.contiguous, demean=TRUE, ...) { object <- Acf(x, lag.max = lag.max, type = "partial", na.action = na.action, demean = demean, plot = FALSE) object$tsp <- tsp(x) object$periods <- attributes(x)$msts object$series <- deparse(substitute(x)) if (plot) { return(autoplot(object, ...)) } else { return(object) } } #' @rdname autoplot.acf #' @export ggCcf <- function(x, y, lag.max=NULL, type=c("correlation", "covariance"), plot=TRUE, na.action=na.contiguous, ...) { object <- Ccf(x, y, lag.max = lag.max, type = type, na.action = na.action, plot = FALSE) object$snames <- paste(deparse(substitute(x)), "&", deparse(substitute(y))) object$ccf <- TRUE if (plot) { return(autoplot(object, ...)) } else { return(object) } } #' @rdname autoplot.acf #' @export autoplot.mpacf <- function(object, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!inherits(object, "mpacf")) { stop("autoplot.mpacf requires a mpacf object, use object=object") } if (!is.null(object$lower)) { data <- data.frame(Lag = 1:object$lag, z = object$z, sig = (object$lower < 0 & object$upper > 0)) cidata <- data.frame(Lag = rep(1:object$lag, each = 2) + c(-0.5, 0.5), z = rep(object$z, each = 2), upper = rep(object$upper, each = 2), lower = rep(object$lower, each = 2)) plotpi <- TRUE } else { data <- data.frame(Lag = 1:object$lag, z = object$z) plotpi <- FALSE } # Initialise ggplot object p <- ggplot2::ggplot() p <- p + ggplot2::geom_hline(ggplot2::aes(yintercept = 0), linewidth = 0.2) # Add data if (plotpi) { p <- p + ggplot2::geom_ribbon(ggplot2::aes(x = .data[["Lag"]], ymin = .data[["lower"]], ymax = .data[["upper"]]), data = cidata, fill = "grey50") } p <- p + ggplot2::geom_line(ggplot2::aes(x = .data[["Lag"]], y = .data[["z"]]), data = data) if (plotpi) { p <- p + ggplot2::geom_point(ggplot2::aes(x = .data[["Lag"]], y = .data[["z"]], colour = .data[["sig"]]), data = data) } # Change ticks to be seasonal freq <- frequency(object$x) msts <- is.element("msts", class(object$x)) # Add seasonal x-axis if (msts) { periods <- attributes(object$x)$msts periods <- periods[periods != freq] minorbreaks <- periods * seq(-20:20) } else { minorbreaks <- NULL } p <- p + ggplot2::scale_x_continuous( breaks = seasonalaxis(frequency(object$x), length(data$Lag), type = "acf", plot = FALSE), minor_breaks = minorbreaks ) if (object$type == "partial") { ylab <- "PACF" } else if (object$type == "correlation") { ylab <- "ACF" } p <- p + ggAddExtras(ylab = ylab) return(p) } } #' @rdname autoplot.acf #' @export ggtaperedacf <- function(x, lag.max=NULL, type=c("correlation", "partial"), plot=TRUE, calc.ci=TRUE, level=95, nsim=100, ...) { cl <- match.call() if (plot) { cl$plot <- FALSE } cl[[1]] <- quote(taperedacf) object <- eval.parent(cl) if (plot) { return(autoplot(object, ...)) } else { return(object) } } #' @rdname autoplot.acf #' @export ggtaperedpacf <- function(x, ...) { ggtaperedacf(x, type = "partial", ...) } #' @rdname plot.Arima #' @export autoplot.Arima <- function(object, type = c("both", "ar", "ma"), ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (is.Arima(object)) { # Detect type type <- match.arg(type) q <- p <- 0 if (length(object$model$phi) > 0) { test <- abs(object$model$phi) > 1e-09 if (any(test)) { p <- max(which(test)) } } if (length(object$model$theta) > 0) { test <- abs(object$model$theta) > 1e-09 if (any(test)) { q <- max(which(test)) } } if (type == "both") { type <- c("ar", "ma") } } else if (inherits(object, "ar")) { type <- "ar" p <- length(arroots(object)$roots) q <- 0 } else { stop("autoplot.Arima requires an Arima object") } # Remove NULL type type <- intersect(type, c("ar", "ma")[c(p > 0, q > 0)]) # Prepare data arData <- maData <- NULL allRoots <- data.frame(roots = numeric(0), type = character(0)) if ("ar" %in% type && p > 0) { arData <- arroots(object) allRoots <- rbind(allRoots, data.frame(roots = arData$roots, type = arData$type)) } if ("ma" %in% type && q > 0) { maData <- maroots(object) allRoots <- rbind(allRoots, data.frame(roots = maData$roots, type = maData$type)) } allRoots$Real <- Re(1 / allRoots$roots) allRoots$Imaginary <- Im(1 / allRoots$roots) allRoots$UnitCircle <- factor(ifelse((abs(allRoots$roots) > 1), "Within", "Outside")) # Initialise general ggplot object p <- ggplot2::ggplot(ggplot2::aes(x = .data[["Real"]], y = .data[["Imaginary"]], colour = .data[["UnitCircle"]]), data = allRoots) p <- p + ggplot2::coord_fixed(ratio = 1) p <- p + ggplot2::annotate( "path", x = cos(seq(0, 2 * pi, length.out = 100)), y = sin(seq(0, 2 * pi, length.out = 100)) ) p <- p + ggplot2::geom_vline(xintercept = 0) p <- p + ggplot2::geom_hline(yintercept = 0) p <- p + ggAddExtras(xlab = "Real", ylab = "Imaginary") if (NROW(allRoots) == 0) { return(p + ggAddExtras(main = "No AR or MA roots")) } p <- p + ggplot2::geom_point(size = 3) if (length(type) == 1) { p <- p + ggAddExtras(main = paste("Inverse", toupper(type), "roots")) } else { p <- p + ggplot2::facet_wrap(~ type, labeller = function(labels) lapply(labels, function(x) paste("Inverse", as.character(x), "roots"))) } } return(p) } #' @rdname plot.Arima #' @export autoplot.ar <- function(object, ...) { autoplot.Arima(object, ...) } #' @rdname autoplot.seas #' @export autoplot.decomposed.ts <- function(object, labels=NULL, range.bars = NULL, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!inherits(object, "decomposed.ts")) { stop("autoplot.decomposed.ts requires a decomposed.ts object") } if (is.null(labels)) { labels <- c("trend", "seasonal", "remainder") } cn <- c("data", labels) data <- data.frame( datetime = rep(time(object$x), 4), y = c(object$x, object$trend, object$seasonal, object$random), parts = factor(rep(cn, each = NROW(object$x)), levels = cn) ) # Initialise ggplot object p <- ggplot2::ggplot(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), data = data) # Add data int <- as.numeric(object$type == "multiplicative") p <- p + ggplot2::geom_line(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), data = subset(data, data$parts != cn[4]), na.rm = TRUE) p <- p + ggplot2::geom_segment( ggplot2::aes(x = .data[["datetime"]], xend = .data[["datetime"]], y = int, yend = .data[["y"]]), data = subset(data, data$parts == cn[4]), lineend = "butt", na.rm = TRUE ) p <- p + ggplot2::facet_grid("parts ~ .", scales = "free_y", switch = "y") p <- p + ggplot2::geom_hline(ggplot2::aes(yintercept = .data[["y"]]), data = data.frame(y = int, parts = factor(cn[4], levels = cn))) if (is.null(range.bars)) { range.bars <- object$type == "additive" } if (range.bars) { yranges <- vapply(split(data$y, data$parts), function(x) range(x, na.rm = TRUE), numeric(2)) xranges <- range(data$datetime) barmid <- apply(yranges, 2, mean) barlength <- min(apply(yranges, 2, diff)) barwidth <- (1 / 64) * diff(xranges) barpos <- data.frame( left = xranges[2] + barwidth, right = xranges[2] + barwidth * 2, top = barmid + barlength / 2, bottom = barmid - barlength / 2, parts = factor(colnames(yranges), levels = cn), datetime = xranges[2], y = barmid ) p <- p + ggplot2::geom_rect( ggplot2::aes( xmin = .data[["left"]], xmax = .data[["right"]], ymax = .data[["top"]], ymin = .data[["bottom"]] ), data = barpos, fill = "gray75", colour = "black", size = 1 / 3) } # Add axis labels p <- p + ggAddExtras( main = paste("Decomposition of", object$type, "time series"), xlab = "Time", ylab = "" ) # Make x axis contain only whole numbers (e.g., years) p <- p + ggplot2::scale_x_continuous(breaks = unique(round(pretty(data$datetime)))) return(p) } } #' @rdname plot.ets #' @export autoplot.ets <- function(object, range.bars = NULL, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!is.ets(object)) { stop("autoplot.ets requires an ets object, use object=object") } names <- c(y = "observed", l = "level", b = "slope", s1 = "season") data <- cbind(object$x, object$states[, colnames(object$states) %in% names(names)]) cn <- c("y", c(colnames(object$states))) colnames(data) <- cn <- names[stats::na.exclude(match(cn, names(names)))] # Convert to longform data <- data.frame( datetime = rep(time(data), NCOL(data)), y = c(data), parts = factor(rep(cn, each = NROW(data)), levels = cn) ) # Initialise ggplot object p <- ggplot2::ggplot(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), data = data, ylab = "") # Add data p <- p + ggplot2::geom_line(na.rm = TRUE) p <- p + ggplot2::facet_grid(parts ~ ., scales = "free_y", switch = "y") if (is.null(range.bars)) { range.bars <- is.null(object$lambda) } if (range.bars) { yranges <- vapply(split(data$y, data$parts), function(x) range(x, na.rm = TRUE), numeric(2)) xranges <- range(data$datetime) barmid <- apply(yranges, 2, mean) barlength <- min(apply(yranges, 2, diff)) barwidth <- (1 / 64) * diff(xranges) barpos <- data.frame( left = xranges[2] + barwidth, right = xranges[2] + barwidth * 2, top = barmid + barlength / 2, bottom = barmid - barlength / 2, parts = factor(colnames(yranges), levels = cn), datetime = xranges[2], y = barmid ) p <- p + ggplot2::geom_rect( ggplot2::aes( xmin = .data[["left"]], xmax = .data[["right"]], ymax = .data[["top"]], ymin = .data[["bottom"]] ), data = barpos, fill = "gray75", colour = "black", size = 1 / 3) } p <- p + ggAddExtras(xlab = NULL, ylab = "", main = paste("Components of", object$method, "method")) return(p) } } #' @rdname plot.bats #' @export autoplot.tbats <- function(object, range.bars = FALSE, ...) { cl <- match.call() cl[[1]] <- quote(autoplot.bats) eval.parent(cl) } #' @rdname plot.bats #' @export autoplot.bats <- function(object, range.bars = FALSE, ...) { data <- tbats.components(object) cn <- colnames(data) # Convert to longform data <- data.frame( datetime = rep(time(data), NCOL(data)), y = c(data), parts = factor(rep(cn, each = NROW(data)), levels = cn) ) # Initialise ggplot object p <- ggplot2::ggplot(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), data = data, ylab = "") # Add data p <- p + ggplot2::geom_line(na.rm = TRUE) p <- p + ggplot2::facet_grid(parts ~ ., scales = "free_y", switch = "y") if (range.bars) { yranges <- vapply(split(data$y, data$parts), function(x) range(x, na.rm = TRUE), numeric(2)) xranges <- range(data$datetime) barmid <- apply(yranges, 2, mean) barlength <- min(apply(yranges, 2, diff)) barwidth <- (1 / 64) * diff(xranges) barpos <- data.frame( left = xranges[2] + barwidth, right = xranges[2] + barwidth * 2, top = barmid + barlength / 2, bottom = barmid - barlength / 2, parts = factor(colnames(yranges), levels = cn), datetime = xranges[2], y = barmid ) p <- p + ggplot2::geom_rect( ggplot2::aes( xmin = .data[["left"]], xmax = .data[["right"]], ymax = .data[["top"]], ymin = .data[["bottom"]] ), data = barpos, fill = "gray75", colour = "black", size = 1 / 3) } p <- p + ggAddExtras(xlab = NULL, ylab = "", main = paste("Components of", object$method, "method")) return(p) } #' @rdname plot.forecast #' @export autoplot.forecast <- function(object, include, PI=TRUE, shadecols=c("#596DD5", "#D5DBFF"), fcol="#0000AA", flwd=0.5, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!is.forecast(object)) { stop("autoplot.forecast requires a forecast object, use object=object") } if (is.null(object$lower) || is.null(object$upper) || is.null(object$level)) { PI <- FALSE } else if (!is.finite(max(object$upper))) { PI <- FALSE } if (!is.null(object$model$terms) && !is.null(object$model$model)) { # Initialise original dataset mt <- object$model$terms if (!is.null(object$series)) { yvar <- object$series } else { yvar <- deparse(mt[[2]]) } # Perhaps a better way to do this xvar <- attr(mt, "term.labels") vars <- c(yvar = yvar, xvar = xvar) data <- object$model$model colnames(data) <- names(vars)[match(colnames(data), vars)] if (!is.null(object$model$lambda)) { data$yvar <- InvBoxCox(data$yvar, object$model$lambda) } } else { if (!is.null(object$x)) { data <- data.frame(yvar = c(object$x)) } else if (!is.null(object$residuals) && !is.null(object$fitted)) { data <- data.frame(yvar = c(object$residuals + object$fitted)) } else { stop("Could not find data") } if (!is.null(object$series)) { vars <- c(yvar = object$series) } else if (!is.null(object$model$call)) { vars <- c(yvar = deparse(object$model$call$y)) if (vars == "object") { vars <- c(yvar = "y") } } else { vars <- c(yvar = "y") } } # Initialise ggplot object p <- ggplot2::ggplot() # Cross sectional forecasts if (!is.element("ts", class(object$mean))) { if (length(xvar) > 1) { stop("Forecast plot for regression models only available for a single predictor") } if (NCOL(object$newdata) == 1) { # Make sure column has correct name colnames(object$newdata) <- xvar } flwd <- 2 * flwd # Scale for points # Data points p <- p + ggplot2::geom_point(ggplot2::aes(x = .data[["xvar"]], y = .data[["yvar"]]), data = data) p <- p + ggplot2::labs(y = vars["yvar"], x = vars["xvar"]) # Forecasted intervals if (PI) { levels <- NROW(object$level) interval <- data.frame(xpred = rep(object$newdata[[1]], levels), lower = c(object$lower), upper = c(object$upper), level = rep(object$level, each = NROW(object$newdata[[1]]))) interval <- interval[order(interval$level, decreasing = TRUE), ] # Must be ordered for gg z-index p <- p + ggplot2::geom_linerange(ggplot2::aes(x = .data[["xpred"]], ymin = .data[["lower"]], ymax = .data[["upper"]], colour = .data[["level"]]), data = interval, linewidth = flwd) if (length(object$level) <= 5) { p <- p + ggplot2::scale_colour_gradientn(breaks = object$level, colours = shadecols, guide = "legend") } else { p <- p + ggplot2::scale_colour_gradientn(colours = shadecols, guide = "colourbar") } } # Forecasted points predicted <- data.frame(object$newdata, object$mean) colnames(predicted) <- c("xpred", "ypred") p <- p + ggplot2::geom_point(ggplot2::aes(x = .data[["xpred"]], y = .data[["ypred"]]), data = predicted, color = fcol, size = flwd) # Line of best fit coef <- data.frame(int = 0, m = 0) i <- match("(Intercept)", names(object$model$coefficients)) if (i != 0) { coef$int <- object$model$coefficients[i] if (NROW(object$model$coefficients) == 2) { coef$m <- object$model$coefficients[-i] } } else { if (NROW(object$model$coefficients) == 1) { coef$m <- object$model$coefficients } } p <- p + ggplot2::geom_abline(intercept = coef$int, slope = coef$m) } else { # Time series objects (assumed) if(!missing(shadecols)){ warning( "The `schadecols` argument is deprecated for time series forecasts. Interval shading is now done automatically based on the level and `fcol`.", call. = FALSE) } # Data points if (!is.null(time(object$x))) { timex <- time(object$x) } else if (!is.null(time(object$model$residuals))) { timex <- time(object$model$residuals) } data <- data.frame(yvar = as.numeric(data$yvar), datetime = as.numeric(timex)) if (!missing(include)) { data <- tail(data, include) } p <- p + ggplot2::scale_x_continuous() p <- p + ggplot2::geom_line(ggplot2::aes(x = .data[["datetime"]], y = .data[["yvar"]]), data = data) + ggplot2::labs(y = vars["yvar"], x = "Time") # Forecasted intervals p <- p + autolayer(object, PI = PI, colour = fcol, size = flwd) # predicted <- data.frame(xvar = time(object$mean), yvar = object$mean) # colnames(predicted) <- c("datetime", "ypred") # if (PI) { # levels <- NROW(object$level) # interval <- data.frame(datetime = rep(predicted$datetime, levels), lower = c(object$lower), upper = c(object$upper), level = rep(object$level, each = NROW(object$mean))) # interval <- interval[order(interval$level, decreasing = TRUE), ] # Must be ordered for gg z-index # p <- p + ggplot2::geom_ribbon(ggplot2::aes_(x = ~datetime, ymin = ~lower, ymax = ~upper, group = ~-level, fill = ~level), data = interval) # if (min(object$level) < 50) { # scalelimit <- c(1, 99) # } # else { # scalelimit <- c(50, 99) # } # if (length(object$level) <= 5) { # p <- p + ggplot2::scale_fill_gradientn(breaks = object$level, colours = shadecols, limit = scalelimit, guide = "legend") # } # else { # p <- p + ggplot2::scale_fill_gradientn(colours = shadecols, limit = scalelimit) # } # # Negative group is a work around for missing z-index # } # # Forecasted points # p <- p + ggplot2::geom_line(ggplot2::aes_(x = ~datetime, y = ~ypred), data = predicted, color = fcol, size = flwd) } p <- p + ggAddExtras(main = paste("Forecasts from ", object$method, sep = "")) return(p) } } #' @rdname plot.mforecast #' @export autoplot.mforecast <- function(object, PI = TRUE, facets = TRUE, colour = FALSE, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!is.mforecast(object)) { stop("autoplot.mforecast requires a mforecast object, use object=object") } if (is.ts(object$forecast[[1]]$mean)) { # ts forecasts p <- autoplot(getResponse(object), facets = facets, colour = colour) + autolayer(object, ...) if (facets) { p <- p + ggplot2::facet_wrap( ~ series, labeller = function(labels) { if (!is.null(object$method)) { lapply(labels, function(x) paste0(as.character(x), "\n", object$method[as.character(x)])) } else { lapply(labels, function(x) paste0(as.character(x))) } }, ncol = 1, scales = "free_y" ) } p <- p + ggAddExtras(ylab = NULL) return(p) } else { # lm forecasts if (!requireNamespace("grid")) { stop("grid is needed for this function to work. Install it via install.packages(\"grid\")", call. = FALSE) } K <- length(object$forecast) if (K < 2) { warning("Expected at least two plots but forecast required less.") } # Set up vector arguments if (missing(PI)) { PI <- rep(TRUE, K) } # Set up grid # ncol: Number of columns of plots # nrow: Number of rows needed, calculated from # of cols gridlayout <- matrix(seq(1, K), ncol = 1, nrow = K) grid::grid.newpage() grid::pushViewport(grid::viewport(layout = grid::grid.layout(nrow(gridlayout), ncol(gridlayout)))) for (i in 1:K) { partialfcast <- object$forecast[[i]] partialfcast$model <- mlmsplit(object$model, index = i) matchidx <- as.data.frame(which(gridlayout == i, arr.ind = TRUE)) print( autoplot( structure(partialfcast, class = "forecast"), PI = PI[i], ... ) + ggAddExtras(ylab = names(object$forecast)[i]), vp = grid::viewport( layout.pos.row = matchidx$row, layout.pos.col = matchidx$col ) ) } } } } #' @rdname tsdisplay #' #' @examples #' library(ggplot2) #' ggtsdisplay(USAccDeaths, plot.type="scatter", theme=theme_bw()) #' #' @export ggtsdisplay <- function(x, plot.type=c("partial", "histogram", "scatter", "spectrum"), points=TRUE, smooth=FALSE, lag.max, na.action=na.contiguous, theme=NULL, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else if (!requireNamespace("grid", quietly = TRUE)) { stop("grid is needed for this function to work. Install it via install.packages(\"grid\")", call. = FALSE) } else { if (NCOL(x) > 1) { stop("ggtsdisplay is only for univariate time series") } plot.type <- match.arg(plot.type) main <- deparse(substitute(x)) if (!is.ts(x)) { x <- ts(x) } if (missing(lag.max)) { lag.max <- round(min(max(10 * log10(length(x)), 3 * frequency(x)), length(x) / 3)) } dots <- list(...) if (is.null(dots$xlab)) { dots$xlab <- "" } if (is.null(dots$ylab)) { dots$ylab <- "" } labs <- match(c("xlab", "ylab", "main"), names(dots), nomatch = 0) # Set up grid for plots gridlayout <- matrix(c(1, 2, 1, 3), nrow = 2) grid::grid.newpage() grid::pushViewport(grid::viewport(layout = grid::grid.layout(nrow(gridlayout), ncol(gridlayout)))) # Add ts plot with points matchidx <- as.data.frame(which(gridlayout == 1, arr.ind = TRUE)) tsplot <- do.call(ggplot2::autoplot, c(object = quote(x), dots[labs])) if (points) { tsplot <- tsplot + ggplot2::geom_point(size = 0.5) } if (smooth) { tsplot <- tsplot + ggplot2::geom_smooth(method = "loess", se = FALSE) } if (is.null(tsplot$labels$title)) { # Add title if missing tsplot <- tsplot + ggplot2::ggtitle(main) } if (!is.null(theme)) { tsplot <- tsplot + theme } print( tsplot, vp = grid::viewport( layout.pos.row = matchidx$row, layout.pos.col = matchidx$col ) ) # Prepare Acf plot acfplot <- do.call(ggAcf, c(x = quote(x), lag.max = lag.max, na.action = na.action, dots[-labs])) + ggplot2::ggtitle(NULL) if (!is.null(theme)) { acfplot <- acfplot + theme } # Prepare last plot (variable) if (plot.type == "partial") { lastplot <- ggPacf(x, lag.max = lag.max, na.action = na.action) + ggplot2::ggtitle(NULL) # Match y-axis acfplotrange <- ggplot2::layer_scales(acfplot)$y$range$range pacfplotrange <- ggplot2::layer_scales(lastplot)$y$range$range yrange <- range(c(acfplotrange, pacfplotrange)) acfplot <- acfplot + ggplot2::ylim(yrange) lastplot <- lastplot + ggplot2::ylim(yrange) } else if (plot.type == "histogram") { lastplot <- gghistogram(x, add.normal = TRUE, add.rug = TRUE) + ggplot2::xlab(main) } else if (plot.type == "scatter") { scatterData <- data.frame(y = x[2:NROW(x)], x = x[1:NROW(x) - 1]) lastplot <- ggplot2::ggplot(ggplot2::aes(y = .data[["y"]], x = .data[["x"]]), data = scatterData) + ggplot2::geom_point() + ggplot2::labs(x = expression(Y[t - 1]), y = expression(Y[t])) } else if (plot.type == "spectrum") { specData <- spec.ar(x, plot = FALSE) specData <- data.frame(spectrum = specData$spec, frequency = specData$freq) lastplot <- ggplot2::ggplot(ggplot2::aes(y = .data[["spectrum"]], x = .data[["frequency"]]), data = specData) + ggplot2::geom_line() + ggplot2::scale_y_log10() } if (!is.null(theme)) { lastplot <- lastplot + theme } # Add ACF plot matchidx <- as.data.frame(which(gridlayout == 2, arr.ind = TRUE)) print( acfplot, vp = grid::viewport( layout.pos.row = matchidx$row, layout.pos.col = matchidx$col ) ) # Add last plot matchidx <- as.data.frame(which(gridlayout == 3, arr.ind = TRUE)) print( lastplot, vp = grid::viewport( layout.pos.row = matchidx$row, layout.pos.col = matchidx$col ) ) } } #' Time series lag ggplots #' #' Plots a lag plot using ggplot. #' #' \dQuote{gglagplot} will plot time series against lagged versions of #' themselves. Helps visualising 'auto-dependence' even when auto-correlations #' vanish. #' #' \dQuote{gglagchull} will layer convex hulls of the lags, layered on a single #' plot. This helps visualise the change in 'auto-dependence' as lags increase. #' #' @param x a time series object (type \code{ts}). #' @param lags number of lag plots desired, see arg set.lags. #' @param set.lags vector of positive integers specifying which lags to use. #' @param diag logical indicating if the x=y diagonal should be drawn. #' @param diag.col color to be used for the diagonal if(diag). #' @param do.lines if TRUE, lines will be drawn, otherwise points will be #' drawn. #' @param colour logical indicating if lines should be coloured. #' @param continuous Should the colour scheme for years be continuous or #' discrete? #' @param labels logical indicating if labels should be used. #' @param seasonal Should the line colour be based on seasonal characteristics #' (TRUE), or sequential (FALSE). #' @param \dots Not used (for consistency with lag.plot) #' @return None. #' @author Mitchell O'Hara-Wild #' @seealso \code{\link[stats]{lag.plot}} #' @examples #' #' gglagplot(woolyrnq) #' gglagplot(woolyrnq,seasonal=FALSE) #' #' lungDeaths <- cbind(mdeaths, fdeaths) #' gglagplot(lungDeaths, lags=2) #' gglagchull(lungDeaths, lags=6) #' #' @export gglagplot <- function(x, lags=ifelse(frequency(x) > 9, 16, 9), set.lags = 1:lags, diag=TRUE, diag.col="gray", do.lines = TRUE, colour = TRUE, continuous = frequency(x) > 12, labels = FALSE, seasonal = TRUE, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { freq <- frequency(x) if (freq > 1) { linecol <- cycle(x) if (freq > 24) { continuous <- TRUE } } else { seasonal <- FALSE continuous <- TRUE } if (!seasonal) { continuous <- TRUE } # Make sure lags is evaluated tmp <- lags x <- as.matrix(x) # Prepare data for plotting n <- NROW(x) data <- data.frame() for (i in 1:NCOL(x)) { for (lagi in set.lags) { sname <- colnames(x)[i] if (is.null(sname)) { sname <- deparse(match.call()$x) } data <- rbind( data, data.frame( lagnum = 1:(n - lagi), freqcur = if(seasonal) linecol[(lagi + 1):n] else (lagi + 1):n, orig = x[(lagi + 1):n, i], lagged = x[1:(n - lagi), i], lagVal = rep(lagi, n - lagi), series = factor(rep(sname, n - lagi)) ) ) } } if (!continuous) { data$freqcur <- factor(data$freqcur) } # Initialise ggplot object p <- ggplot2::ggplot(ggplot2::aes(x = .data[["lagged"]], y = .data[["orig"]]), data = data) if (diag) { p <- p + ggplot2::geom_abline(colour = diag.col, linetype = "dashed") } if (labels) { linesize <- 0.25 * (2 - do.lines) } else { linesize <- 0.5 * (2 - do.lines) } plottype <- if (do.lines) { function(...) ggplot2::geom_path(..., linewidth = linesize) } else { function(...) ggplot2::geom_point(..., size = linesize) } if (colour) { p <- p + plottype(ggplot2::aes(colour = .data[["freqcur"]])) } else { p <- p + plottype() } if (labels) { p <- p + ggplot2::geom_text(ggplot2::aes(label = .data[["lagnum"]])) } # Ensure all facets are of size size (if extreme values are excluded in lag specification) if (max(set.lags) > NROW(x) / 2) { axissize <- rbind(aggregate(orig ~ series, data = data, min), aggregate(orig~ series, data = data, max)) axissize <- data.frame(series = rep(axissize$series, length(set.lags)), orig = rep(axissize$orig, length(set.lags)), lagVal = rep(set.lags, each = NCOL(x))) p <- p + ggplot2::geom_blank(ggplot2::aes(x = .data[["orig"]], y = .data[["orig"]]), data = axissize) } # Facet labellerFn <- function(labels) { if (!is.null(labels$series)) { # Multivariate labels labels$series <- as.character(labels$series) } labels$lagVal <- paste("lag", labels$lagVal) return(labels) } if (NCOL(x) > 1) { p <- p + ggplot2::facet_wrap(~series + lagVal, scales = "free", labeller = labellerFn) } else { p <- p + ggplot2::facet_wrap(~lagVal, labeller = labellerFn) } p <- p + ggplot2::theme(aspect.ratio = 1) if (colour) { if (seasonal) { if (freq == 4L) { title <- "Quarter" } else if (freq == 12L) { title <- "Month" } else if (freq == 7L) { title <- "Day" } else if (freq == 24L) { title <- "Hour" } else { title <- "Season" } } else { title <- "Time" } if (continuous) { p <- p + ggplot2::guides(colour = ggplot2::guide_colourbar(title = title)) } else { p <- p + ggplot2::guides(colour = ggplot2::guide_legend(title = title)) } } p <- p + ggAddExtras(ylab = NULL, xlab = NULL) return(p) } } #' @rdname gglagplot #' #' @examples #' gglagchull(woolyrnq) #' #' @export gglagchull <- function(x, lags=ifelse(frequency(x) > 1, min(12, frequency(x)), 4), set.lags = 1:lags, diag=TRUE, diag.col="gray", ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { # Make sure lags is evaluated tmp <- lags x <- as.matrix(x) # Prepare data for plotting n <- NROW(x) data <- data.frame() for (i in 1:NCOL(x)) { for (lag in set.lags) { sname <- colnames(x)[i] if (is.null(sname)) { sname <- deparse(substitute(x)) } data <- rbind(data, data.frame(orig = x[(lag + 1):n, i], lagged = x[1:(n - lag), i], lag = rep(lag, n - lag), series = rep(sname, n - lag))[grDevices::chull(x[(lag + 1):n, i], x[1:(n - lag), i]), ]) } } # Initialise ggplot object p <- ggplot2::ggplot(ggplot2::aes(x = .data[["orig"]], y = .data[["lagged"]]), data = data) if (diag) { p <- p + ggplot2::geom_abline(colour = diag.col, linetype = "dashed") } p <- p + ggplot2::geom_polygon(ggplot2::aes(group = .data[["lag"]], colour = .data[["lag"]], fill = .data[["lag"]]), alpha = 1 / length(set.lags)) p <- p + ggplot2::guides(colour = ggplot2::guide_colourbar(title = "lag")) p <- p + ggplot2::theme(aspect.ratio = 1) # Facet if (NCOL(x) > 1) { p <- p + ggplot2::facet_wrap(~series, scales = "free") } p <- p + ggAddExtras(ylab = "lagged", xlab = "original") return(p) } } #' Create a seasonal subseries ggplot #' #' Plots a subseries plot using ggplot. Each season is plotted as a separate #' mini time series. The blue lines represent the mean of the observations #' within each season. #' #' The \code{ggmonthplot} function is simply a wrapper for #' \code{ggsubseriesplot} as a convenience for users familiar with #' \code{\link[stats]{monthplot}}. #' #' @param x a time series object (type \code{ts}). #' @param labels A vector of labels to use for each 'season' #' @param times A vector of times for each observation #' @param phase A vector of seasonal components #' @param \dots Not used (for consistency with monthplot) #' @return Returns an object of class \code{ggplot}. #' @author Mitchell O'Hara-Wild #' @seealso \code{\link[stats]{monthplot}} #' @examples #' #' ggsubseriesplot(AirPassengers) #' ggsubseriesplot(woolyrnq) #' #' @export ggmonthplot <- function(x, labels = NULL, times = time(x), phase = cycle(x), ...) { ggsubseriesplot(x, labels, times, phase, ...) } #' @rdname ggmonthplot #' @export ggsubseriesplot <- function(x, labels = NULL, times = time(x), phase = cycle(x), ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!inherits(x, "ts")) { stop("ggsubseriesplot requires a ts object, use x=object") } if (round(frequency(x)) <= 1) { stop("Data are not seasonal") } if("1" %in% dimnames(table(table(phase)))[[1]]){ stop(paste("Each season requires at least 2 observations.", ifelse(frequency(x)%%1 == 0, "Your series length may be too short for this graphic.", "This may be caused from specifying a time-series with non-integer frequency.") ) ) } data <- data.frame(y = as.numeric(x), year = trunc(time(x)), season = as.numeric(phase)) seasonwidth <- (max(data$year) - min(data$year)) * 1.05 data$time <- data$season + 0.025 + (data$year - min(data$year)) / seasonwidth avgLines <- stats::aggregate(data$y, by = list(data$season), FUN = mean) colnames(avgLines) <- c("season", "avg") data <- merge(data, avgLines, by = "season") # Initialise ggplot object # p <- ggplot2::ggplot(ggplot2::aes_(x=~interaction(year, season), y=~y, group=~season), data=data, na.rm=TRUE) p <- ggplot2::ggplot( ggplot2::aes(x = .data[["time"]], y = .data[["y"]], group = .data[["season"]]), data = data, na.rm = TRUE ) # Remove vertical break lines p <- p + ggplot2::theme(panel.grid.major.x = ggplot2::element_blank()) # Add data p <- p + ggplot2::geom_line() # Add average lines p <- p + ggplot2::geom_line(ggplot2::aes(y = .data[["avg"]]), col = "#0000AA") # Create x-axis labels xfreq <- frequency(x) if (!is.null(labels)) { if (xfreq != length(labels)) { stop("The number of labels supplied is not the same as the number of seasons.") } else { xbreaks <- labels } } else if (xfreq == 4) { xbreaks <- c("Q1", "Q2", "Q3", "Q4") xlab <- "Quarter" } else if (xfreq == 7) { xbreaks <- c( "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ) xlab <- "Day" } else if (xfreq == 12) { xbreaks <- month.abb xlab <- "Month" } else { xbreaks <- 1:frequency(x) xlab <- "Season" } # X-axis p <- p + ggplot2::scale_x_continuous(breaks = 0.5 + (1:xfreq), labels = xbreaks) # Graph labels p <- p + ggAddExtras(ylab = deparse(substitute(x)), xlab = xlab) return(p) } } #' @rdname seasonplot #' #' @param continuous Should the colour scheme for years be continuous or #' discrete? #' @param polar Plot the graph on seasonal coordinates #' #' @examples #' ggseasonplot(AirPassengers, col=rainbow(12), year.labels=TRUE) #' ggseasonplot(AirPassengers, year.labels=TRUE, continuous=TRUE) #' #' @export ggseasonplot <- function(x, season.labels=NULL, year.labels=FALSE, year.labels.left=FALSE, type=NULL, col=NULL, continuous=FALSE, polar=FALSE, labelgap=0.04, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } if (!inherits(x, "ts")) { stop("autoplot.seasonplot requires a ts object, use x=object") } if (!is.null(type)) { message("Plot types are not yet supported for seasonplot()") } # Check data are seasonal and convert to integer seasonality s <- round(frequency(x)) if (s <= 1) { stop("Data are not seasonal") } # Grab name for plot title xname <- deparse(substitute(x)) tspx <- tsp(x) x <- ts(x, start = tspx[1], frequency = s) data <- data.frame( y = as.numeric(x), year = trunc(round(time(x), 8)), cycle = as.numeric(cycle(x)), time = as.numeric((cycle(x) - 1) / s) ) data$year <- if (continuous) { as.numeric(data$year) } else { as.factor(data$year) } if (polar) { startValues <- data[data$cycle == 1, ] if (data$cycle[1] == 1) { startValues <- startValues[-1, ] } startValues$time <- 1 - .Machine$double.eps levels(startValues$year) <- as.numeric(levels(startValues$year)) - 1 data <- rbind(data, startValues) } # Initialise ggplot object p <- ggplot2::ggplot(ggplot2::aes(x = .data[["time"]], y = .data[["y"]], group = .data[["year"]], colour = .data[["year"]]), data = data, na.rm = TRUE) # p <- p + ggplot2::scale_x_continuous() # Add data p <- p + ggplot2::geom_line() if (!is.null(col)) { if(is.numeric(col)){ col <- palette()[(col-1)%%(length(palette())) + 1] } if (continuous) { p <- p + ggplot2::scale_color_gradientn(colours = col) } else { ncol <- length(unique(data$year)) if (length(col) == 1) { p <- p + ggplot2::scale_color_manual(guide = "none", values = rep(col, ncol)) } else { p <- p + ggplot2::scale_color_manual(values = rep(col, ceiling(ncol / length(col)))[1:ncol]) } } } if (year.labels) { yrlab <- stats::aggregate(time ~ year, data = data, FUN = max) yrlab <- cbind(yrlab, offset = labelgap) } if (year.labels.left) { yrlabL <- stats::aggregate(time ~ year, data = data, FUN = min) yrlabL <- cbind(yrlabL, offset = -labelgap) if (year.labels) { yrlab <- rbind(yrlab, yrlabL) } } if (year.labels || year.labels.left) { yrlab <- merge(yrlab, data) yrlab$time <- yrlab$time + yrlab$offset p <- p + ggplot2::guides(colour = "none") p <- p + ggplot2::geom_text(ggplot2::aes(x = .data[["time"]], y = .data[["y"]], label = .data[["year"]]), data = yrlab) } # Add seasonal labels if (s == 12) { labs <- month.abb xLab <- "Month" } else if (s == 4) { labs <- paste("Q", 1:4, sep = "") xLab <- "Quarter" } else if (s == 7) { labs <- c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat") xLab <- "Day" } else if (s == 52) { labs <- 1:s xLab <- "Week" } else if (s == 24) { labs <- 0:(s - 1) xLab <- "Hour" } else if (s == 48) { labs <- seq(0, 23.5, by = 0.5) xLab <- "Half-hour" } else { labs <- 1:s xLab <- "Season" } if (!is.null(season.labels)) { if (length(season.labels) != length(labs)) { warning(paste0("Provided season.labels have length ", length(season.labels), ", but ", length(labs), " are required. Ignoring season.labels.")) } else { labs <- season.labels } } breaks <- sort(unique(data$time)) if (polar) { breaks <- head(breaks, -1) p <- p + ggplot2::coord_polar() } p <- p + ggplot2::scale_x_continuous(breaks = breaks, minor_breaks = NULL, labels = labs) # Graph title and axes p <- p + ggAddExtras(main = paste("Seasonal plot:", xname), xlab = xLab, ylab = NULL) return(p) } #' @rdname plot.forecast #' @export autoplot.splineforecast <- function(object, PI=TRUE, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { p <- autoplot(object$x) + autolayer(object) p <- p + ggplot2::geom_point(size = 2) fit <- data.frame(datetime = as.numeric(time(object$fitted)), y = as.numeric(object$fitted)) p <- p + ggplot2::geom_line(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), colour = "red", data = fit) p <- p + ggAddExtras(ylab = deparse(object$model$call$x)) if (!is.null(object$series)) { p <- p + ggplot2::ylab(object$series) } return(p) } } #' @rdname autoplot.seas #' @export autoplot.stl <- function(object, labels = NULL, range.bars = TRUE, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!inherits(object, "stl")) { stop("autoplot.stl requires a stl object, use x=object") } # Re-order series as trend, seasonal, remainder object$time.series <- object$time.series[, c("trend", "seasonal", "remainder")] if (is.null(labels)) { labels <- colnames(object$time.series) } data <- object$time.series cn <- c("data", labels) data <- data.frame( datetime = rep(time(data), NCOL(data) + 1), y = c(rowSums(data), data), parts = factor(rep(cn, each = NROW(data)), levels = cn) ) # Initialise ggplot object p <- ggplot2::ggplot(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), data = data) # Add data # Time series lines p <- p + ggplot2::geom_line(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), data = subset(data, data$parts != cn[4]), na.rm = TRUE) p <- p + ggplot2::geom_segment( ggplot2::aes(x = .data[["datetime"]], xend = .data[["datetime"]], y = 0, yend = .data[["y"]]), data = subset(data, data$parts == cn[4]), lineend = "butt" ) # Rangebars if (range.bars) { yranges <- vapply(split(data$y, data$parts), function(x) range(x, na.rm = TRUE), numeric(2)) xranges <- range(data$datetime) barmid <- apply(yranges, 2, mean) barlength <- min(apply(yranges, 2, diff)) barwidth <- (1 / 64) * diff(xranges) barpos <- data.frame( left = xranges[2] + barwidth, right = xranges[2] + barwidth * 2, top = barmid + barlength / 2, bottom = barmid - barlength / 2, parts = factor(colnames(yranges), levels = cn), datetime = xranges[2], y = barmid ) p <- p + ggplot2::geom_rect(ggplot2::aes(xmin = .data[["left"]], xmax = .data[["right"]], ymax = .data[["top"]], ymin = .data[["bottom"]]), data = barpos, fill = "gray75", colour = "black", size = 1 / 3) } # Remainder p <- p + ggplot2::facet_grid("parts ~ .", scales = "free_y", switch = "y") p <- p + ggplot2::geom_hline(ggplot2::aes(yintercept = .data[["y"]]), data = data.frame(y = 0, parts = factor(cn[4], levels = cn))) # Add axis labels p <- p + ggAddExtras(xlab = "Time", ylab = "") # Make x axis contain only whole numbers (e.g., years) p <- p + ggplot2::scale_x_continuous(breaks = unique(round(pretty(data$datetime)))) # ^^ Remove rightmost x axis gap with `expand=c(0.05, 0, 0, 0)` argument when assymetric `expand` feature is supported # issue: tidyverse/ggplot2#1669 return(p) } } #' @rdname autoplot.seas #' @export autoplot.StructTS <- function(object, labels = NULL, range.bars = TRUE, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!inherits(object, "StructTS")) { stop("autoplot.StructTS requires a StructTS object.") } if (is.null(labels)) { labels <- colnames(object$fitted) } data <- object$fitted cn <- c("data", labels) data <- data.frame( datetime = rep(time(data), NCOL(data) + 1), y = c(object$data, data), parts = factor(rep(cn, each = NROW(data)), levels = cn) ) # Initialise ggplot object p <- ggplot2::ggplot(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), data = data) # Add data p <- p + ggplot2::geom_line(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), na.rm = TRUE) p <- p + ggplot2::facet_grid("parts ~ .", scales = "free_y", switch = "y") if (range.bars) { yranges <- vapply(split(data$y, data$parts), function(x) range(x, na.rm = TRUE), numeric(2)) xranges <- range(data$datetime) barmid <- apply(yranges, 2, mean) barlength <- min(apply(yranges, 2, diff)) barwidth <- (1 / 64) * diff(xranges) barpos <- data.frame( left = xranges[2] + barwidth, right = xranges[2] + barwidth * 2, top = barmid + barlength / 2, bottom = barmid - barlength / 2, parts = factor(colnames(yranges), levels = cn), datetime = xranges[2], y = barmid ) p <- p + ggplot2::geom_rect(ggplot2::aes(xmin = .data[["left"]], xmax = .data[["right"]], ymax = .data[["top"]], ymin = .data[["bottom"]]), data = barpos, fill = "gray75", colour = "black", size = 1 / 3) } # Add axis labels p <- p + ggAddExtras(xlab = "Time", ylab = "") # Make x axis contain only whole numbers (e.g., years) p <- p + ggplot2::scale_x_continuous(breaks = unique(round(pretty(data$datetime)))) return(p) } } #' Plot time series decomposition components using ggplot #' #' Produces a ggplot object of seasonally decomposed time series for objects of #' class \dQuote{\code{stl}} (created with \code{\link[stats]{stl}}), class #' \dQuote{\code{seas}} (created with \code{\link[seasonal]{seas}}), or class #' \dQuote{\code{decomposed.ts}} (created with \code{\link[stats]{decompose}}). #' #' @param object Object of class \dQuote{\code{seas}}, \dQuote{\code{stl}}, or #' \dQuote{\code{decomposed.ts}}. #' @param labels Labels to replace \dQuote{seasonal}, \dQuote{trend}, and #' \dQuote{remainder}. #' @param range.bars Logical indicating if each plot should have a bar at its #' right side representing relative size. If \code{NULL}, automatic selection #' takes place. #' @param ... Other plotting parameters to affect the plot. #' @return Returns an object of class \code{ggplot}. #' @author Mitchell O'Hara-Wild #' @seealso \code{\link[seasonal]{seas}}, \code{\link[stats]{stl}}, #' \code{\link[stats]{decompose}}, \code{\link[stats]{StructTS}}, #' \code{\link[stats]{plot.stl}}. #' @examples #' #' library(ggplot2) #' co2 %>% #' decompose() %>% #' autoplot() #' nottem %>% #' stl(s.window = "periodic") %>% #' autoplot() #' \dontrun{ #' library(seasonal) #' seas(USAccDeaths) %>% autoplot() #' } #' #' @export autoplot.seas <- function(object, labels = NULL, range.bars = NULL, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } if (!inherits(object, "seas")) { stop("autoplot.seas requires a seas object") } if (is.null(labels)) { if ("seasonal" %in% colnames(object$data)) { labels <- c("trend", "seasonal", "irregular") } else { labels <- c("trend", "irregular") } } data <- cbind(object$x, object$data[, labels]) colnames(data) <- cn <- c("data", labels) data <- data.frame( datetime = rep(time(data), NCOL(data)), y = c(data), parts = factor(rep(cn, each = NROW(data)), levels = cn) ) # Is it additive or multiplicative? freq <- frequency(object$data) sum_first_year <- try(sum(seasonal(object)[seq(freq)]), silent=TRUE) if(!inherits(sum_first_year, "try-error")) { int <- as.integer(sum_first_year > 0.5) # Closer to 1 than 0. } else { int <- 0 } # Initialise ggplot object p <- ggplot2::ggplot(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), data = data) # Add data p <- p + ggplot2::geom_line(ggplot2::aes(x = .data[["datetime"]], y = .data[["y"]]), data = subset(data, data$parts != tail(cn,1)), na.rm = TRUE) p <- p + ggplot2::geom_segment( ggplot2::aes(x = .data[["datetime"]], xend = .data[["datetime"]], y = int, yend = .data[["y"]]), data = subset(data, data$parts == tail(cn,1)), lineend = "butt" ) p <- p + ggplot2::facet_grid("parts ~ .", scales = "free_y", switch = "y") p <- p + ggplot2::geom_hline(ggplot2::aes(yintercept = .data[["y"]]), data = data.frame(y = int, parts = factor(tail(cn,1), levels = cn)) ) # Rangebars if (is.null(range.bars)) { range.bars <- object$spc$transform$`function` == "none" } if (range.bars) { yranges <- vapply(split(data$y, data$parts), function(x) range(x, na.rm = TRUE), numeric(2)) xranges <- range(data$datetime) barmid <- apply(yranges, 2, mean) barlength <- min(apply(yranges, 2, diff)) barwidth <- (1 / 64) * diff(xranges) barpos <- data.frame( left = xranges[2] + barwidth, right = xranges[2] + barwidth * 2, top = barmid + barlength / 2, bottom = barmid - barlength / 2, parts = factor(colnames(yranges), levels = cn), datetime = xranges[2], y = barmid ) p <- p + ggplot2::geom_rect(ggplot2::aes(xmin = .data[["left"]], xmax = .data[["right"]], ymax = .data[["top"]], ymin = .data[["bottom"]]), data = barpos, fill = "gray75", colour = "black", size = 1 / 3) } # Add axis labels p <- p + ggAddExtras(xlab = "Time", ylab = "") # Make x axis contain only whole numbers (e.g., years) p <- p + ggplot2::scale_x_continuous(breaks = unique(round(pretty(data$datetime)))) return(p) } #' @rdname autoplot.ts #' @export autolayer.mts <- function(object, colour = TRUE, series = NULL, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { cl <- match.call() cl[[1]] <- quote(autolayer) cl$object <- quote(object[, i]) if (length(series) != NCOL(object)) { if (colour) { message("For a multivariate time series, specify a seriesname for each time series. Defaulting to column names.") } series <- colnames(object) } out <- list() for (i in 1:NCOL(object)) { cl$series <- series[i] out[[i]] <- eval(cl) } return(out) } } #' @rdname autoplot.ts #' @export autolayer.msts <- function(object, series = NULL, ...) { if (NCOL(object) > 1) { class(object) <- c("mts", "ts", "matrix") } else { if (is.null(series)) { series <- deparse(substitute(series)) } class(object) <- c("ts") } attr(object, "msts") <- NULL autolayer(object, series = series, ...) } #' @rdname autoplot.ts #' @export autolayer.ts <- function(object, colour=TRUE, series=NULL, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { tsdata <- data.frame( timeVal = as.numeric(time(object)), series = ifelse(is.null(series), deparse(substitute(object)), series), seriesVal = as.numeric(object) ) if (colour) { ggplot2::geom_line(ggplot2::aes(x = .data[["timeVal"]], y = .data[["seriesVal"]], group = .data[["series"]], colour = .data[["series"]]), data = tsdata, ..., inherit.aes = FALSE) } else { ggplot2::geom_line(ggplot2::aes(x = .data[["timeVal"]], y = .data[["seriesVal"]], group = .data[["series"]]), data = tsdata, ..., inherit.aes = FALSE) } } } #' @rdname plot.forecast #' @export autolayer.forecast <- function(object, series = NULL, PI = TRUE, showgap = TRUE, ...) { PI <- PI & !is.null(object$level) data <- forecast2plotdf(object, PI = PI, showgap = showgap) mapping <- ggplot2::aes(x = .data[["x"]], y = .data[["y"]]) if (!is.null(object$series)) { data[["series"]] <- object$series } if (!is.null(series)) { data[["series"]] <- series mapping$colour <- quote(series) } if (PI) { mapping$level <- quote(level) mapping$ymin <- quote(ymin) mapping$ymax <- quote(ymax) } geom_forecast(mapping = mapping, data = data, stat = "identity", ..., inherit.aes = FALSE) } #' @rdname plot.mforecast #' @export autolayer.mforecast <- function(object, series = NULL, PI = TRUE, ...) { cl <- match.call() cl[[1]] <- quote(autolayer) cl$object <- quote(object$forecast[[i]]) if (!is.null(series)) { if (length(series) != length(object$forecast)) { series <- names(object$forecast) } } out <- list() for (i in 1:length(object$forecast)) { cl$series <- series[i] out[[i]] <- eval(cl) } return(out) } #' Automatically create a ggplot for time series objects #' #' \code{autoplot} takes an object of type \code{ts} or \code{mts} and creates #' a ggplot object suitable for usage with \code{stat_forecast}. #' #' \code{fortify.ts} takes a \code{ts} object and converts it into a data frame #' (for usage with ggplot2). #' #' @param object Object of class \dQuote{\code{ts}} or \dQuote{\code{mts}}. #' @param series Identifies the time series with a colour, which integrates well #' with the functionality of \link{geom_forecast}. #' @param facets If TRUE, multiple time series will be faceted (and unless #' specified, colour is set to FALSE). If FALSE, each series will be assigned a #' colour. #' @param colour If TRUE, the time series will be assigned a colour aesthetic #' @param model Object of class \dQuote{\code{ts}} to be converted to #' \dQuote{\code{data.frame}}. #' @param data Not used (required for \link{fortify} method) #' @param ... Other plotting parameters to affect the plot. #' @inheritParams plot.forecast #' @return None. Function produces a ggplot graph. #' @author Mitchell O'Hara-Wild #' @seealso \code{\link[stats]{plot.ts}}, \code{\link[ggplot2]{fortify}} #' @examples #' #' library(ggplot2) #' autoplot(USAccDeaths) #' #' lungDeaths <- cbind(mdeaths, fdeaths) #' autoplot(lungDeaths) #' autoplot(lungDeaths, facets=TRUE) #' #' @export autoplot.ts <- function(object, series=NULL, xlab = "Time", ylab = deparse(substitute(object)), main = NULL, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!is.ts(object)) { stop("autoplot.ts requires a ts object, use object=object") } # Create data frame with time as a column labelled x # and time series as a column labelled y. data <- data.frame(y = as.numeric(object), x = as.numeric(time(object))) if (!is.null(series)) { data <- transform(data, series = series) } # Initialise ggplot object p <- ggplot2::ggplot(ggplot2::aes(y = .data[["y"]], x = .data[["x"]]), data = data) # Add data if (!is.null(series)) { p <- p + ggplot2::geom_line(ggplot2::aes(group = .data[["series"]], colour = .data[["series"]]), na.rm = TRUE, ...) } else { p <- p + ggplot2::geom_line(na.rm = TRUE, ...) } # Add labels p <- p + ggAddExtras(xlab = xlab, ylab = ylab, main = main) # Make x axis contain only whole numbers (e.g., years) p <- p + ggplot2::scale_x_continuous(breaks = ggtsbreaks) return(p) } } #' @rdname autoplot.ts #' @export autoplot.mts <- function(object, colour=TRUE, facets=FALSE, xlab = "Time", ylab = deparse(substitute(object)), main = NULL, ...) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (!stats::is.mts(object)) { stop("autoplot.mts requires a mts object, use x=object") } if (NCOL(object) <= 1) { return(autoplot.ts(object, ...)) } cn <- colnames(object) if (is.null(cn)) { cn <- paste("Series", seq_len(NCOL(object))) } data <- data.frame( y = as.numeric(c(object)), x = rep(as.numeric(time(object)), NCOL(object)), series = factor(rep(cn, each = NROW(object)), levels = cn) ) # Initialise ggplot object mapping <- ggplot2::aes(y = .data[["y"]], x = .data[["x"]], group = .data[["series"]]) if (colour && (!facets || !missing(colour))) { mapping$colour <- quote(series) } p <- ggplot2::ggplot(mapping, data = data) p <- p + ggplot2::geom_line(na.rm = TRUE, ...) if (facets) { p <- p + ggplot2::facet_grid(series~., scales = "free_y") } p <- p + ggAddExtras(xlab = xlab, ylab = ylab, main = main) return(p) } } #' @rdname autoplot.ts #' @export autoplot.msts <- function(object, ...) { sname <- deparse(substitute(object)) if (NCOL(object) > 1) { class(object) <- c("mts", "ts", "matrix") } else { class(object) <- c("ts") } attr(object, "msts") <- NULL autoplot(object, ...) + ggAddExtras(ylab = sname) } #' @rdname autoplot.ts #' @export fortify.ts <- function(model, data, ...) { # Use ggfortify version if it is loaded # to prevent cran errors if (exists("ggfreqplot")) { tsp <- attr(model, which = "tsp") dtindex <- time(model) if (any(tsp[3] == c(4, 12))) { dtindex <- zoo::as.Date.yearmon(dtindex) } model <- data.frame(Index = dtindex, Data = as.numeric(model)) return(ggplot2::fortify(model)) } else { model <- cbind(x = as.numeric(time(model)), y = as.numeric(model)) as.data.frame(model) } } forecast2plotdf <- function(model, data=as.data.frame(model), PI=TRUE, showgap=TRUE, ...) { # Time series forecasts if (is.element("ts", class(model$mean))) { xVals <- as.numeric(time(model$mean)) # x axis is time } # Cross-sectional forecasts else if (!is.null(model[["newdata"]])) { xVals <- as.numeric(model[["newdata"]][, 1]) # Only display the first column of newdata, should be generalised. if (NCOL(model[["newdata"]]) > 1) { message("Note: only extracting first column of data") } } else { stop("Could not find forecast x axis") } Hiloc <- grep("Hi ", names(data)) Loloc <- grep("Lo ", names(data)) if (PI && !is.null(model$level)) { # PI if (length(Hiloc) == length(Loloc)) { if (length(Hiloc) > 0) { out <- data.frame( x = rep(xVals, length(Hiloc) + 1), y = c(rep(NA, NROW(data) * (length(Hiloc))), data[, 1]), level = c(as.numeric(rep(gsub("Hi ", "", names(data)[Hiloc]), each = NROW(data))), rep(NA, NROW(data))), ymax = c(unlist(data[, Hiloc]), rep(NA, NROW(data))), ymin = c(unlist(data[, Loloc]), rep(NA, NROW(data))) ) numInterval <- length(model$level) } } else { warning("missing intervals detected, plotting point predictions only") PI <- FALSE } } if (!PI) { # No PI out <- data.frame(x = xVals, y = as.numeric(model$mean), level = rep(NA, NROW(model$mean)), ymax = rep(NA, NROW(model$mean)), ymin = rep(NA, NROW(model$mean))) numInterval <- 0 } if (!showgap) { if (is.null(model$x)) { warning("Removing the gap requires historical data, provide this via model$x. Defaulting showgap to TRUE.") } else { intervalGap <- data.frame( x = rep(time(model$x)[length(model$x)], numInterval + 1), y = c(model$x[length(model$x)], rep(NA, numInterval)), level = c(NA, model$level)[seq_along(1:(numInterval + 1))], ymax = c(NA, rep(model$x[length(model$x)], numInterval)), ymin = c(NA, rep(model$x[length(model$x)], numInterval)) ) out <- rbind(intervalGap, out) } } return(out) } #' @rdname geom_forecast #' @export StatForecast <- ggplot2::ggproto( "StatForecast", ggplot2::Stat, required_aes = c("x", "y"), compute_group = function(data, scales, params, PI=TRUE, showgap=TRUE, series=NULL, h=NULL, level=c(80, 95), fan=FALSE, robust=FALSE, lambda=NULL, find.frequency=FALSE, allow.multiplicative.trend=FALSE, ...) { ## TODO: Rewrite tspx <- recoverTSP(data$x) if (is.null(h)) { h <- ifelse(tspx[3] > 1, 2 * tspx[3], 10) } tsdat <- ts(data = data$y, start = tspx[1], frequency = tspx[3]) fcast <- forecast( tsdat, h = h, level = level, fan = fan, robust = robust, lambda = lambda, find.frequency = find.frequency, allow.multiplicative.trend = allow.multiplicative.trend ) fcast <- forecast2plotdf(fcast, PI = PI, showgap = showgap) # Add ggplot & series information extraInfo <- as.list(data[1, !colnames(data) %in% colnames(fcast)]) extraInfo$`_data` <- quote(fcast) if (!is.null(series)) { if (data$group[1] > length(series)) { message("Recycling series argument, please provide a series name for each time series") } extraInfo[["series"]] <- series[(abs(data$group[1]) - 1) %% length(series) + 1] } do.call("transform", extraInfo) } ) #' @rdname geom_forecast #' @export GeomForecast <- ggplot2::ggproto( "GeomForecast", ggplot2::Geom, # Produces both point forecasts and intervals on graph required_aes = c("x", "y"), optional_aes = c("ymin", "ymax", "level"), default_aes = ggplot2::aes( colour = "blue", fill = "grey60", size = .5, linetype = 1, weight = 1, alpha = 1, level = NA ), draw_key = function(data, params, size) { lwd <- min(data$size, min(size) / 4) # Calculate and set colour linecol <- blendHex(data$col, "gray30", 1) fillcol <- blendHex(data$col, "#CCCCCC", 0.8) grid::grobTree( grid::rectGrob( width = grid::unit(1, "npc") - grid::unit(lwd, "mm"), height = grid::unit(1, "npc") - grid::unit(lwd, "mm"), gp = grid::gpar( col = fillcol, fill = scales::alpha(fillcol, data$alpha), lty = data$linetype, lwd = lwd * ggplot2::.pt, linejoin = "mitre" ) ), grid::linesGrob( x = c(0, 0.4, 0.6, 1), y = c(0.2, 0.6, 0.4, 0.9), gp = grid::gpar( col = linecol, fill = scales::alpha(linecol, data$alpha), lty = data$linetype, lwd = lwd * ggplot2::.pt, linejoin = "mitre" ) ) ) }, handle_na = function(self, data, params) { ## TODO: Consider removing/changing data }, draw_group = function(data, panel_scales, coord) { data <- if (!is.null(data$level)) { split(data, !is.na(data$level)) } else { list(data) } # Draw forecasted points and intervals if (length(data) == 1) { # PI=FALSE ggplot2:::ggname( "geom_forecast", GeomForecastPoint$draw_panel(data[[1]], panel_scales, coord) ) } else { # PI=TRUE ggplot2:::ggname( "geom_forecast", grid::addGrob( GeomForecastInterval$draw_group(data[[2]], panel_scales, coord), GeomForecastPoint$draw_panel(data[[1]], panel_scales, coord) ) ) } } ) GeomForecastPoint <- ggplot2::ggproto( "GeomForecastPoint", GeomForecast, ## Produces only point forecasts required_aes = c("x", "y"), setup_data = function(data, params) { data[!is.na(data$y), ] # Extract only forecast points }, draw_group = function(data, panel_scales, coord) { linecol <- blendHex(data$colour[1], "gray30", 1) # Compute alpha transparency data$alpha <- grDevices::col2rgb(linecol, alpha = TRUE)[4, ] / 255 * data$alpha # Select appropriate Geom and set defaults if (NROW(data) == 0) { # Blank ggplot2::GeomBlank$draw_panel } else if (NROW(data) == 1) { # Point GeomForecastPointGeom <- ggplot2::GeomPoint$draw_panel pointpred <- transform(data, fill = NA, colour = linecol, size = 1, shape = 19, stroke = 0.5) } else { # Line GeomForecastPointGeom <- ggplot2::GeomLine$draw_panel pointpred <- transform(data, fill = NA, colour = linecol, linewidth = size, size = NULL) } # Draw forecast points ggplot2:::ggname( "geom_forecast_point", grid::grobTree(GeomForecastPointGeom(pointpred, panel_scales, coord)) ) } ) blendHex <- function(mixcol, seqcol, alpha=1) { requireNamespace("methods") if (is.na(seqcol)) { return(mixcol) } # transform to hue/lightness/saturation colorspace seqcol <- grDevices::col2rgb(seqcol, alpha = TRUE) mixcol <- grDevices::col2rgb(mixcol, alpha = TRUE) seqcolHLS <- suppressWarnings(methods::coerce(colorspace::RGB(R = seqcol[1, ] / 255, G = seqcol[2, ] / 255, B = seqcol[3, ] / 255), structure(NULL, class = "HLS"))) mixcolHLS <- suppressWarnings(methods::coerce(colorspace::RGB(R = mixcol[1, ] / 255, G = mixcol[2, ] / 255, B = mixcol[3, ] / 255), structure(NULL, class = "HLS"))) # copy luminence mixcolHLS@coords[, "L"] <- seqcolHLS@coords[, "L"] mixcolHLS@coords[, "S"] <- alpha * mixcolHLS@coords[, "S"] + (1 - alpha) * seqcolHLS@coords[, "S"] mixcolHex <- suppressWarnings(methods::coerce(mixcolHLS, structure(NULL, class = "RGB"))) mixcolHex <- colorspace::hex(mixcolHex) mixcolHex <- ggplot2::alpha(mixcolHex, mixcol[4, ] / 255) return(mixcolHex) } GeomForecastInterval <- ggplot2::ggproto( "GeomForecastInterval", GeomForecast, ## Produces only forecasts intervals on graph required_aes = c("x", "ymin", "ymax"), setup_data = function(data, params) { data[is.na(data$y), ] # Extract only forecast intervals }, draw_group = function(data, panel_scales, coord) { # If level scale from fabletools is not loaded, convert to colour if(is.numeric(data$level)){ leveldiff <- diff(range(data$level)) if (leveldiff == 0) { leveldiff <- 1 } shadeVal <- (data$level - min(data$level)) / leveldiff * 0.2 + 8 / 15 data$level <- rgb(shadeVal, shadeVal, shadeVal) } intervalGrobList <- lapply( split(data, data$level), FUN = function(x) { # Calculate colour fillcol <- blendHex(x$colour[1], x$level[1], 0.7) # Compute alpha transparency x$alpha <- grDevices::col2rgb(fillcol, alpha = TRUE)[4, ] / 255 * x$alpha # Select appropriate Geom and set defaults if (NROW(x) == 0) { # Blank ggplot2::GeomBlank$draw_panel } else if (NROW(x) == 1) { # Linerange GeomForecastIntervalGeom <- ggplot2::GeomLinerange$draw_panel x <- transform(x, colour = fillcol, fill = NA, linewidth = 1) } else { # Ribbon GeomForecastIntervalGeom <- ggplot2::GeomRibbon$draw_group x <- transform(x, colour = NA, fill = fillcol, linewidth = size, size = NULL) } # Create grob return(GeomForecastIntervalGeom(x, panel_scales, coord)) ## Create list pair with average ymin/ymax to order layers } ) # Draw forecast intervals ggplot2:::ggname("geom_forecast_interval", do.call(grid::grobTree, rev(intervalGrobList))) # TODO: Find reliable method to stacking them correctly } ) #' Forecast plot #' #' Generates forecasts from \code{forecast.ts} and adds them to the plot. #' Forecasts can be modified via sending forecast specific arguments above. #' #' Multivariate forecasting is supported by having each time series on a #' different group. #' #' You can also pass \code{geom_forecast} a \code{forecast} object to add it to #' the plot. #' #' The aesthetics required for the forecasting to work includes forecast #' observations on the y axis, and the \code{time} of the observations on the x #' axis. Refer to the examples below. To automatically set up aesthetics, use #' \code{autoplot}. #' #' @inheritParams ggplot2::layer #' @param data The data to be displayed in this layer. There are three options: #' #' If \code{NULL}, the default, the data is inherited from the plot data as #' specified in the call to \code{\link{ggplot}}. #' #' A \code{data.frame}, or other object, will override the plot data. All #' objects will be fortified to produce a data frame. See \code{\link{fortify}} #' for which variables will be created. #' #' A \code{function} will be called with a single argument, the plot data. The #' return value must be a \code{data.frame}, and will be used as the layer #' data. #' @param stat The stat object to use calculate the data. #' @param position Position adjustment, either as a string, or the result of a #' call to a position adjustment function. #' @param na.rm If \code{FALSE} (the default), removes missing values with a #' warning. If \code{TRUE} silently removes missing values. #' @param show.legend logical. Should this layer be included in the legends? #' \code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE} #' never includes, and \code{TRUE} always includes. #' @param inherit.aes If \code{FALSE}, overrides the default aesthetics, rather #' than combining with them. This is most useful for helper functions that #' define both data and aesthetics and shouldn't inherit behaviour from the #' default plot specification, e.g. \code{\link{borders}}. #' @param PI If \code{FALSE}, confidence intervals will not be plotted, giving #' only the forecast line. #' @param showgap If \code{showgap=FALSE}, the gap between the historical #' observations and the forecasts is removed. #' @param series Matches an unidentified forecast layer with a coloured object #' on the plot. #' @param ... Additional arguments for \code{\link{forecast.ts}}, other #' arguments are passed on to \code{\link{layer}}. These are often aesthetics, #' used to set an aesthetic to a fixed value, like \code{color = "red"} or #' \code{alpha = .5}. They may also be parameters to the paired geom/stat. #' @return A layer for a ggplot graph. #' @author Mitchell O'Hara-Wild #' @seealso \code{\link{forecast}}, \code{\link[ggplot2]{ggproto}} #' @examples #' #' \dontrun{ #' library(ggplot2) #' autoplot(USAccDeaths) + geom_forecast() #' #' lungDeaths <- cbind(mdeaths, fdeaths) #' autoplot(lungDeaths) + geom_forecast() #' #' # Using fortify.ts #' p <- ggplot(aes(x=x, y=y), data=USAccDeaths) #' p <- p + geom_line() #' p + geom_forecast() #' #' # Without fortify.ts #' data <- data.frame(USAccDeaths=as.numeric(USAccDeaths), time=as.numeric(time(USAccDeaths))) #' p <- ggplot(aes(x=time, y=USAccDeaths), data=data) #' p <- p + geom_line() #' p + geom_forecast() #' #' p + geom_forecast(h=60) #' p <- ggplot(aes(x=time, y=USAccDeaths), data=data) #' p + geom_forecast(level=c(70,98)) #' p + geom_forecast(level=c(70,98),colour="lightblue") #' #' #Add forecasts to multivariate series with colour groups #' lungDeaths <- cbind(mdeaths, fdeaths) #' autoplot(lungDeaths) + geom_forecast(forecast(mdeaths), series="mdeaths") #' } #' #' @export geom_forecast <- function(mapping = NULL, data = NULL, stat = "forecast", position = "identity", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, PI=TRUE, showgap=TRUE, series=NULL, ...) { if (is.forecast(mapping) || is.mforecast(mapping)) { warning("Use autolayer instead of geom_forecast to add a forecast layer to your ggplot object.") cl <- match.call() cl[[1]] <- quote(autolayer) names(cl)[names(cl) == "mapping"] <- "object" return(eval.parent(cl)) } if (is.ts(mapping)) { data <- data.frame(y = as.numeric(mapping), x = as.numeric(time(mapping))) mapping <- ggplot2::aes(y = .data[["y"]], x = .data[["x"]]) } if (stat == "forecast") { paramlist <- list(na.rm = na.rm, PI = PI, showgap = showgap, series = series, ...) if (!is.null(series)) { if (inherits(mapping, "uneval")) { mapping$colour <- quote(ggplot2::after_stat(series)) } else { mapping <- ggplot2::aes(colour = ggplot2::after_stat(series)) } } } else { paramlist <- list(na.rm = na.rm, ...) } ggplot2::layer( geom = GeomForecast, mapping = mapping, data = data, stat = stat, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = paramlist ) } # Produce nice histogram with appropriately chosen bin widths # Designed to work with time series data without issuing warnings. #' Histogram with optional normal and kernel density functions #' #' Plots a histogram and density estimates using ggplot. #' #' #' @param x a numerical vector. #' @param add.normal Add a normal density function for comparison #' @param add.kde Add a kernel density estimate for comparison #' @param add.rug Add a rug plot on the horizontal axis #' @param bins The number of bins to use for the histogram. Selected by default #' using the Friedman-Diaconis rule given by \code{\link[grDevices]{nclass.FD}} #' @param boundary A boundary between two bins. #' @return None. #' @author Rob J Hyndman #' @seealso \code{\link[graphics]{hist}}, \code{\link[ggplot2]{geom_histogram}} #' @examples #' #' gghistogram(lynx, add.kde=TRUE) #' #' @export gghistogram <- function(x, add.normal=FALSE, add.kde=FALSE, add.rug=TRUE, bins, boundary=0) { if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("ggplot2 is needed for this function to work. Install it via install.packages(\"ggplot2\")", call. = FALSE) } else { if (missing(bins)) { bins <- min(500, grDevices::nclass.FD(na.exclude(x))) } data <- data.frame(x = as.numeric(c(x))) # Initialise ggplot object and plot histogram binwidth <- (max(x, na.rm = TRUE) - min(x, na.rm = TRUE)) / bins p <- ggplot2::ggplot() + ggplot2::geom_histogram(ggplot2::aes(x), data = data, binwidth = binwidth, boundary = boundary) + ggplot2::xlab(deparse(substitute(x))) # Add normal density estimate if (add.normal || add.kde) { xmin <- min(x, na.rm = TRUE) xmax <- max(x, na.rm = TRUE) if (add.kde) { h <- stats::bw.SJ(x) xmin <- xmin - 3 * h xmax <- xmax + 3 * h } if (add.normal) { xmean <- mean(x, na.rm = TRUE) xsd <- sd(x, na.rm = TRUE) xmin <- min(xmin, xmean - 3 * xsd) xmax <- max(xmax, xmean + 3 * xsd) } xgrid <- seq(xmin, xmax, length.out = 512) if (add.normal) { df <- data.frame(x = xgrid, y = length(x) * binwidth * stats::dnorm(xgrid, xmean, xsd)) p <- p + ggplot2::geom_line(ggplot2::aes(df$x, df$y), col = "#ff8a62") } if (add.kde) { kde <- stats::density(x, bw = h, from = xgrid[1], to = xgrid[512], n = 512) p <- p + ggplot2::geom_line(ggplot2::aes(x = kde$x, y = length(x) * binwidth * kde$y), col = "#67a9ff") } } if (add.rug) { p <- p + ggplot2::geom_rug(ggplot2::aes(x)) } return(p) } } forecast/R/msts.R0000644000176200001440000000730414207263356013406 0ustar liggesusers#' Multi-Seasonal Time Series #' #' msts is an S3 class for multi seasonal time series objects, intended to be #' used for models that support multiple seasonal periods. The msts class #' inherits from the ts class and has an additional "msts" attribute which #' contains the vector of seasonal periods. All methods that work on a ts #' class, should also work on a msts class. #' #' @aliases print.msts window.msts `[.msts` #' #' @param data A numeric vector, ts object, matrix or data frame. It is #' intended that the time series data is univariate, otherwise treated the same #' as ts(). #' @param seasonal.periods A vector of the seasonal periods of the msts. #' @param ts.frequency The seasonal period that should be used as frequency of #' the underlying ts object. The default value is \code{max(seasonal.periods)}. #' @param ... Arguments to be passed to the underlying call to \code{ts()}. For #' example \code{start=c(1987,5)}. #' @return An object of class \code{c("msts", "ts")}. If there is only one #' seasonal period (i.e., \code{length(seasonal.periods)==1}), then the object #' is of class \code{"ts"}. #' @author Slava Razbash and Rob J Hyndman #' @keywords ts #' @examples #' #' x <- msts(taylor, seasonal.periods=c(2*24,2*24*7,2*24*365), start=2000+22/52) #' y <- msts(USAccDeaths, seasonal.periods=12, start=1949) #' #' @export msts <- function(data, seasonal.periods, ts.frequency=floor(max(seasonal.periods)), ...) { # if(!is.element(ts.frequency, round(seasonal.periods-0.5+1e-12))) # stop("ts.frequency should be one of the seasonal periods") if (inherits(data, "ts") && frequency(data) == ts.frequency && length(list(...)) == 0) { object <- data } else { object <- ts(data = data, frequency = ts.frequency, ...) } if (length(seasonal.periods) > 1L) { class(object) <- c("msts", "ts") attr(object, "msts") <- sort(seasonal.periods) } return(object) } #' @export print.msts <- function(x, ...) { cat("Multi-Seasonal Time Series:\n") cat("Start: ") cat(start(x)) # cat("\nEnd: ") # cat(x$end) cat("\nSeasonal Periods: ") cat(attr(x, "msts")) cat("\nData:\n") xx <- unclass(x) # handles both univariate and multivariate ts attr(xx, "tsp") <- attr(xx, "msts") <- NULL print(xx) # print(matrix(x, ncol=length(x)), nrow=1) cat("\n") } #' @export window.msts <- function(x, ...) { seasonal.periods <- attr(x, "msts") class(x) <- c("ts") x <- window(x, ...) class(x) <- c("msts", "ts") attr(x, "msts") <- seasonal.periods return(x) } #' @export `[.msts` <- function(x, i, j, drop = TRUE) { y <- NextMethod("[") if(!inherits(y, "ts")) return(y) class(y) <- c("msts", class(y)) attr(y, "msts") <- attr(x, "msts") y } # Copy msts attributes from x to y copy_msts <- function(x, y) { if(NROW(x) > NROW(y)) { # Pad y with initial NAs if(NCOL(y) == 1) { y <- c(rep(NA, NROW(x) - NROW(y)), y) } else { y <- rbind(matrix(NA, ncol=NCOL(y), nrow = NROW(x) - NROW(y)), y) } } else if(NROW(x) != NROW(y)) { stop("x and y should have the same number of observations") } if(NCOL(y) > 1) { class(y) <- c("mts", "ts", "matrix") } else { class(y) <- "ts" } if("msts" %in% class(x)) class(y) <- c("msts", class(y)) attr <- attributes(x) attributes(y)$tsp <- attr$tsp attributes(y)$msts <- attr$msts return(y) } # Copy msts attributes from x to y shifted to forecast period future_msts <- function(x, y) { if(NCOL(y) > 1) { class(y) <- c("mts", "ts", "matrix") } else { class(y) <- "ts" } if("msts" %in% class(x)) class(y) <- c("msts", class(y)) attr <- attributes(x) attr$tsp[1:2] <- attr$tsp[2] + c(1,NROW(y))/attr$tsp[3] attributes(y)$tsp <- attr$tsp attributes(y)$msts <- attr$msts return(y) } forecast/R/subset.R0000644000176200001440000001231614150370574013722 0ustar liggesusers#' Subsetting a time series #' #' Various types of subsetting of a time series. Allows subsetting by index #' values (unlike \code{\link[stats]{window}}). Also allows extraction of the #' values of a specific season or subset of seasons in each year. For example, #' to extract all values for the month of May from a time series. #' #' If character values for months are used, either upper or lower case may be #' used, and partial unambiguous names are acceptable. Possible character #' values for quarters are \code{"Q1"}, \code{"Q2"}, \code{"Q3"}, and #' \code{"Q4"}. #' #' @param x a univariate time series to be subsetted #' @param subset optional logical expression indicating elements to keep; #' missing values are taken as false. \code{subset} must be the same length as #' \code{x}. #' @param month Numeric or character vector of months to retain. Partial #' matching on month names used. #' @param quarter Numeric or character vector of quarters to retain. #' @param season Numeric vector of seasons to retain. #' @param start Index of start of contiguous subset. #' @param end Index of end of contiguous subset. #' @param ... Other arguments, unused. #' @return If \code{subset} is used, a numeric vector is returned with no ts #' attributes. If \code{start} and/or \code{end} are used, a ts object is #' returned consisting of x[start:end], with the appropriate time series #' attributes retained. Otherwise, a ts object is returned with frequency equal #' to the length of \code{month}, \code{quarter} or \code{season}. #' @author Rob J Hyndman #' @seealso \code{\link[base]{subset}}, \code{\link[stats]{window}} #' @keywords ts #' @examples #' plot(subset(gas,month="November")) #' subset(woolyrnq,quarter=3) #' subset(USAccDeaths, start=49) #' #' @export subset.ts <- function(x, subset=NULL, month=NULL, quarter=NULL, season=NULL, start=NULL, end=NULL, ...) { if (!is.null(subset)) { if (NROW(subset) != NROW(x)) { stop("subset must be the same length as x") } if (NCOL(subset) != 1) { stop("subset must be a vector of rows to keep") } if ("mts" %in% class(x)) { return(subset.matrix(x, subset)) } else { return(subset.default(x, subset)) } } else if (!is.null(start) | !is.null(end)) { if (is.null(start)) { start <- 1 } if (is.null(end)) { end <- NROW(x) } if ("mts" %in% class(x)) { xsub <- x[start:end, , drop=FALSE] } else { xsub <- x[start:end] } tspx <- tsp(x) return(ts(xsub, frequency = tspx[3], start = tspx[1L] + (start - 1) / tspx[3L])) } else if (frequency(x) <= 1) { stop("Data must be seasonal") } if (!is.null(month)) { if (frequency(x) != 12) { stop("Data is not monthly") } if (is.character(month)) { season <- pmatch(tolower(month), tolower(month.name), duplicates.ok = TRUE) } else { season <- month } season <- na.omit(season) if (length(season) == 0L) { stop("No recognizable months") } if (min(season) < 1L | max(season) > 12L) { stop("Months must be between 1 and 12") } } else if (!is.null(quarter)) { if (frequency(x) != 4) { stop("Data is not quarterly") } if (is.character(quarter)) { season <- pmatch(tolower(quarter), paste("q", 1:4, sep = ""), duplicates.ok = TRUE) } else { season <- quarter } season <- na.omit(season) if (length(season) == 0L) { stop("No recognizable quarters") } if (min(season) < 1L | max(season) > 4L) { stop("Quarters must be between 1 and 4") } } else if (is.null(season)) { stop("No subset specified") } else if (min(season) < 1L | max(season) > frequency(x)) { stop(paste("Seasons must be between 1 and", frequency(x))) } start <- utils::head(time(x)[is.element(cycle(x), season)], 1) if ("mts" %in% class(x)) { x <- subset.matrix(x, is.element(cycle(x), season)) } else { x <- subset.default(x, is.element(cycle(x), season)) } return(ts(x, frequency = length(season), start = start)) } #' @export #' @importFrom utils head.matrix head.ts <- function(x, n=6L, ...) { attr_x <- attributes(x) attr_x$names <- NULL if (NCOL(x) > 1) { hx <- head.matrix(as.matrix(x), n = n, ...) } else if ((length(x) + n) > 0) { hx <- head(c(x), n = n, ...) } else { return(numeric(0)) } attr_x$tsp[2] <- attr_x$tsp[1] + (NROW(hx) - 1) / attr_x$tsp[3] if (!is.null(dim(x))) { attr_x$dim[1] <- NROW(hx) } attributes(hx) <- attr_x return(hx) } #' @export #' @importFrom utils tail.matrix tail.ts <- function(x, n=6L, ...) { attr_x <- attributes(x) attr_x$names <- NULL if (NCOL(x) > 1) { hx <- tail.matrix(as.matrix(x), n = n, ...) } else if ((length(x) + n) > 0) { hx <- tail(c(x), n = n, ...) } else { return(numeric(0)) } attr_x$tsp[1] <- attr_x$tsp[2] - (NROW(hx) - 1) / attr_x$tsp[3] if (!is.null(dim(x))) { attr_x$dim[1] <- NROW(hx) } attributes(hx) <- attr_x return(hx) } #' @rdname subset.ts #' @export subset.msts <- function(x, subset=NULL, start=NULL, end=NULL, ...) { out <- subset.ts(x, start = start, end = end, ...) tspx <- tsp(out) msts( out, seasonal.periods = attr(x, "msts"), start = tspx[1], ts.frequency = tspx[3] ) } forecast/R/bats.R0000644000176200001440000003727314323125536013355 0ustar liggesusers# Author: srazbash ############################################################################### #' BATS model (Exponential smoothing state space model with Box-Cox #' transformation, ARMA errors, Trend and Seasonal components) #' #' Fits a BATS model applied to \code{y}, as described in De Livera, Hyndman & #' Snyder (2011). Parallel processing is used by default to speed up the #' computations. #' #' @aliases as.character.bats print.bats #' #' @param y The time series to be forecast. Can be \code{numeric}, \code{msts} #' or \code{ts}. Only univariate time series are supported. #' @param use.box.cox \code{TRUE/FALSE} indicates whether to use the Box-Cox #' transformation or not. If \code{NULL} then both are tried and the best fit #' is selected by AIC. #' @param use.trend \code{TRUE/FALSE} indicates whether to include a trend or #' not. If \code{NULL} then both are tried and the best fit is selected by AIC. #' @param use.damped.trend \code{TRUE/FALSE} indicates whether to include a #' damping parameter in the trend or not. If \code{NULL} then both are tried #' and the best fit is selected by AIC. #' @param seasonal.periods If \code{y} is a numeric then seasonal periods can #' be specified with this parameter. #' @param use.arma.errors \code{TRUE/FALSE} indicates whether to include ARMA #' errors or not. If \code{TRUE} the best fit is selected by AIC. If #' \code{FALSE} then the selection algorithm does not consider ARMA errors. #' @param use.parallel \code{TRUE/FALSE} indicates whether or not to use #' parallel processing. #' @param num.cores The number of parallel processes to be used if using #' parallel processing. If \code{NULL} then the number of logical cores is #' detected and all available cores are used. #' @param bc.lower The lower limit (inclusive) for the Box-Cox transformation. #' @param bc.upper The upper limit (inclusive) for the Box-Cox transformation. #' @param biasadj Use adjusted back-transformed mean for Box-Cox #' transformations. If TRUE, point forecasts and fitted values are mean #' forecast. Otherwise, these points can be considered the median of the #' forecast densities. #' @param model Output from a previous call to \code{bats}. If model is passed, #' this same model is fitted to \code{y} without re-estimating any parameters. #' @param ... Additional arguments to be passed to \code{auto.arima} when #' choose an ARMA(p, q) model for the errors. (Note that xreg will be ignored, #' as will any arguments concerning seasonality and differencing, but arguments #' controlling the values of p and q will be used.) #' @return An object of class "\code{bats}". The generic accessor functions #' \code{fitted.values} and \code{residuals} extract useful features of the #' value returned by \code{bats} and associated functions. The fitted model is #' designated BATS(omega, p,q, phi, m1,...mJ) where omega is the Box-Cox #' parameter and phi is the damping parameter; the error is modelled as an #' ARMA(p,q) process and m1,...,mJ list the seasonal periods used in the model. #' @author Slava Razbash and Rob J Hyndman #' @references De Livera, A.M., Hyndman, R.J., & Snyder, R. D. (2011), #' Forecasting time series with complex seasonal patterns using exponential #' smoothing, \emph{Journal of the American Statistical Association}, #' \bold{106}(496), 1513-1527. #' @keywords ts #' @examples #' #' \dontrun{ #' fit <- bats(USAccDeaths) #' plot(forecast(fit)) #' #' taylor.fit <- bats(taylor) #' plot(forecast(taylor.fit)) #' } #' #' @export bats <- function(y, use.box.cox = NULL, use.trend = NULL, use.damped.trend = NULL, seasonal.periods = NULL, use.arma.errors = TRUE, use.parallel = length(y) > 1000, num.cores = 2, bc.lower = 0, bc.upper = 1, biasadj = FALSE, model = NULL, ...) { if (!is.numeric(y) || NCOL(y) > 1) { stop("y should be a univariate time series") } seriesname <- deparse(substitute(y)) origy <- y attr_y <- attributes(origy) # Get seasonal periods if (is.null(seasonal.periods)) { if ("msts" %in% class(y)) { seasonal.periods <- attr(y, "msts") } else if ("ts" %in% class(y)) { seasonal.periods <- frequency(y) } else { y <- as.ts(y) seasonal.periods <- 1 } seasonal.periods <- seasonal.periods[seasonal.periods < length(y)] if(length(seasonal.periods) == 0L) seasonal.periods <- 1 } else { # Add ts attributes if (!("ts" %in% class(y))) { y <- msts(y, seasonal.periods) } } seasonal.periods <- unique(pmax(seasonal.periods, 1)) if (all(seasonal.periods == 1)) { seasonal.periods <- NULL } ny <- length(y) y <- na.contiguous(y) if (ny != length(y)) { warning("Missing values encountered. Using longest contiguous portion of time series") if (!is.null(attr_y$tsp)) { attr_y$tsp[1:2] <- range(time(y)) } } # Refit model if available if (!is.null(model)) { refitModel <- try(fitPreviousBATSModel(y, model = model), silent = TRUE) return(refitModel) } # Check for constancy if (is.constant(y)) { fit <- list( y = y, x = matrix(y, nrow = 1, ncol = ny), errors = y * 0, fitted.values = y, seed.states = matrix(y[1]), AIC = -Inf, likelihood = -Inf, variance = 0, alpha = 0.9999, method = "BATS", call = match.call() ) return(structure(fit, class = "bats")) } # Check for non-positive data if (any((y <= 0))) { use.box.cox <- FALSE } if ((!is.null(use.box.cox)) && (!is.null(use.trend)) && (use.parallel)) { if (use.trend && (!is.null(use.damped.trend))) { # In the this case, there is only one alternative. use.parallel <- FALSE } else if (use.trend == FALSE) { # As above, in the this case, there is only one alternative. use.parallel <- FALSE } } if (!is.null(seasonal.periods)) { seasonal.mask <- (seasonal.periods == 1) seasonal.periods <- seasonal.periods[!seasonal.mask] } # Check if there is anything to parallelise if (is.null(seasonal.periods) && !is.null(use.box.cox) && !is.null(use.trend)) { use.parallel <- FALSE } if (is.null(use.box.cox)) { use.box.cox <- c(FALSE, TRUE) } if (any(use.box.cox)) { init.box.cox <- BoxCox.lambda(y, lower = bc.lower, upper = bc.upper) } else { init.box.cox <- NULL } if (is.null(use.trend)) { use.trend <- c(FALSE, TRUE) } else if (use.trend == FALSE) { use.damped.trend <- FALSE } if (is.null(use.damped.trend)) { use.damped.trend <- c(FALSE, TRUE) } y <- as.numeric(y) if (use.parallel) { # Set up the control array control.array <- NULL for (box.cox in use.box.cox) { for (trend in use.trend) { for (damping in use.damped.trend) { if (!trend && damping) { next } control.line <- c(box.cox, trend, damping) if (!is.null(control.array)) { control.array <- rbind(control.array, control.line) } else { control.array <- control.line } } } } ## Fit the models if (is.null(num.cores)) { num.cores <- detectCores(all.tests = FALSE, logical = TRUE) } clus <- makeCluster(num.cores) models.list <- clusterApplyLB(clus, c(1:nrow(control.array)), parFilterSpecifics, y = y, control.array = control.array, seasonal.periods = seasonal.periods, use.arma.errors = use.arma.errors, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj, ...) stopCluster(clus) ## Choose the best model #### Get the AICs aics <- numeric(nrow(control.array)) for (i in 1:nrow(control.array)) { aics[i] <- models.list[[i]]$AIC } best.number <- which.min(aics) best.model <- models.list[[best.number]] } else { best.aic <- Inf best.model <- NULL for (box.cox in use.box.cox) { for (trend in use.trend) { for (damping in use.damped.trend) { current.model <- try( filterSpecifics(y, box.cox = box.cox, trend = trend, damping = damping, seasonal.periods = seasonal.periods, use.arma.errors = use.arma.errors, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj, ... ), silent=TRUE ) if(!("try-error" %in% class(current.model))) { if (current.model$AIC < best.aic) { best.aic <- current.model$AIC best.model <- current.model } } } } } } if(is.null(best.model)) stop("Unable to fit a model") best.model$call <- match.call() if (best.model$optim.return.code != 0) { warning("optim() did not converge.") } attributes(best.model$fitted.values) <- attributes(best.model$errors) <- attr_y best.model$y <- origy best.model$series <- seriesname best.model$method <- "BATS" return(best.model) } filterSpecifics <- function(y, box.cox, trend, damping, seasonal.periods, use.arma.errors, force.seasonality = FALSE, init.box.cox = NULL, bc.lower = 0, bc.upper = 1, biasadj = FALSE, ...) { if (!trend && damping) { return(list(AIC = Inf)) } first.model <- fitSpecificBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj) if (!is.null(seasonal.periods) && !force.seasonality) { non.seasonal.model <- fitSpecificBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = NULL, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj) if (first.model$AIC > non.seasonal.model$AIC) { seasonal.periods <- NULL first.model <- non.seasonal.model } } if (use.arma.errors) { suppressWarnings(arma <- auto.arima(as.numeric(first.model$errors), d = 0, ...)) p <- arma$arma[1] q <- arma$arma[2] if (p != 0 || q != 0) { # Did auto.arima() find any AR() or MA() coefficients? if (p != 0) { ar.coefs <- numeric(p) } else { ar.coefs <- NULL } if (q != 0) { ma.coefs <- numeric(q) } else { ma.coefs <- NULL } starting.params <- first.model$parameters # printCASE(box.cox, trend, damping, seasonal.periods, ar.coefs, ma.coefs, p, q) second.model <- fitSpecificBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, ar.coefs = ar.coefs, ma.coefs = ma.coefs, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj) if (second.model$AIC < first.model$AIC) { return(second.model) } else { return(first.model) } } else { # Else auto.arima() did not find any AR() or MA()coefficients return(first.model) } } else { return(first.model) } } parFilterSpecifics <- function(control.number, control.array, y, seasonal.periods, use.arma.errors, force.seasonality = FALSE, init.box.cox = NULL, bc.lower = 0, bc.upper = 1, biasadj = FALSE, ...) { box.cox <- control.array[control.number, 1] trend <- control.array[control.number, 2] damping <- control.array[control.number, 3] if (!trend && damping) { return(list(AIC = Inf)) } first.model <- fitSpecificBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj) if (!is.null(seasonal.periods) && !force.seasonality) { non.seasonal.model <- fitSpecificBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = NULL, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj) if (first.model$AIC > non.seasonal.model$AIC) { seasonal.periods <- NULL first.model <- non.seasonal.model } } if (use.arma.errors) { suppressWarnings(arma <- auto.arima(as.numeric(first.model$errors), d = 0, ...)) p <- arma$arma[1] q <- arma$arma[2] if (p != 0 || q != 0) { # Did auto.arima() find any AR() or MA() coefficients? if (p != 0) { ar.coefs <- numeric(p) } else { ar.coefs <- NULL } if (q != 0) { ma.coefs <- numeric(q) } else { ma.coefs <- NULL } starting.params <- first.model$parameters # printCASE(box.cox, trend, damping, seasonal.periods, ar.coefs, ma.coefs, p, q) second.model <- fitSpecificBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, ar.coefs = ar.coefs, ma.coefs = ma.coefs, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj) if (second.model$AIC < first.model$AIC) { return(second.model) } else { return(first.model) } } else { # Else auto.arima() did not find any AR() or MA()coefficients return(first.model) } } else { return(first.model) } } #' @rdname fitted.Arima #' @export fitted.bats <- function(object, h = 1, ...) { if (h == 1) { return(object$fitted.values) } else { return(hfitted(object = object, h = h, FUN = "bats", ...)) } } #' @export print.bats <- function(x, ...) { cat(as.character(x)) cat("\n") cat("\nCall: ") print(x$call) cat("\nParameters") if (!is.null(x$lambda)) { cat("\n Lambda: ") cat(round(x$lambda, 6)) } cat("\n Alpha: ") cat(x$alpha) if (!is.null(x$beta)) { cat("\n Beta: ") cat(x$beta) cat("\n Damping Parameter: ") cat(round(x$damping.parameter, 6)) } if (!is.null(x$gamma.values)) { cat("\n Gamma Values: ") cat(x$gamma.values) } if (!is.null(x$ar.coefficients)) { cat("\n AR coefficients: ") cat(round(x$ar.coefficients, 6)) } if (!is.null(x$ma.coefficients)) { cat("\n MA coefficients: ") cat(round(x$ma.coefficients, 6)) } cat("\n") cat("\nSeed States:\n") print(x$seed.states) cat("\nSigma: ") cat(sqrt(x$variance)) cat("\nAIC: ") cat(x$AIC) cat("\n") } #' Plot components from BATS model #' #' Produces a plot of the level, slope and seasonal components from a BATS or #' TBATS model. The plotted components are Box-Cox transformed using the estimated transformation parameter. #' #' @param x Object of class \dQuote{bats/tbats}. #' @param object Object of class \dQuote{bats/tbats}. #' @param main Main title for plot. #' @param range.bars Logical indicating if each plot should have a bar at its #' right side representing relative size. If NULL, automatic selection takes #' place. #' @param ... Other plotting parameters passed to \code{\link[graphics]{par}}. #' @return None. Function produces a plot #' @author Rob J Hyndman #' @seealso \code{\link{bats}},\code{\link{tbats}} #' @keywords hplot #' #' @export plot.bats <- function(x, main = "Decomposition by BATS model", ...) { # Get original data, transform if necessary if (!is.null(x$lambda)) { y <- BoxCox(x$y, x$lambda) lambda <- attr(y, "lambda") } else { y <- x$y } # Extract states out <- cbind(observed = c(y), level = x$x[1, ]) if (!is.null(x$beta)) { out <- cbind(out, slope = x$x[2, ]) } nonseas <- 2 + !is.null(x$beta) # No. non-seasonal columns in out nseas <- length(x$gamma.values) # No. seasonal periods if (!is.null(x$gamma.values)) { seas.states <- x$x[-(1:(1 + !is.null(x$beta))), ] j <- cumsum(c(1, x$seasonal.periods)) for (i in 1:nseas) { out <- cbind(out, season = seas.states[j[i], ]) } if (nseas > 1) { colnames(out)[nonseas + 1:nseas] <- paste("season", 1:nseas, sep = "") } } # Add time series characteristics out <- ts(out) tsp(out) <- tsp(y) # Do the plot plot.ts(out, main = main, nc = 1, ...) } #' @rdname is.ets #' @export is.bats <- function(x) { inherits(x, "bats") } forecast/R/clean.R0000644000176200001440000001634214323125536013500 0ustar liggesusers# Functions to remove outliers and fill missing values in a time series # Nothing for multiple seasonality yet. # na.interp fills in missing values # Uses linear interpolation for non-seasonal series # Adds seasonality based on a periodic stl decomposition with seasonal series # Argument lambda allows for Box-Cox transformation #' Interpolate missing values in a time series #' #' By default, uses linear interpolation for non-seasonal series. For seasonal series, a #' robust STL decomposition is first computed. Then a linear interpolation is applied to the #' seasonally adjusted data, and the seasonal component is added back. #' #' A more general and flexible approach is available using \code{na.approx} in #' the \code{zoo} package. #' #' @param x time series #' @param linear Should a linear interpolation be used. #' @inheritParams forecast.ts #' @return Time series #' @author Rob J Hyndman #' @seealso \code{\link[forecast]{tsoutliers}} #' @keywords ts #' @examples #' #' data(gold) #' plot(na.interp(gold)) #' #' @export na.interp <- function(x, lambda=NULL, linear=(frequency(x) <= 1 | sum(!is.na(x)) <= 2 * frequency(x))) { missng <- is.na(x) # Do nothing if no missing values if (sum(missng) == 0L) { return(x) } origx <- x rangex <- range(x, na.rm=TRUE) drangex <- rangex[2L] - rangex[1L] # Convert to ts if (is.null(tsp(x))) { x <- ts(x) } if (length(dim(x)) > 1) { if (NCOL(x) == 1) { x <- x[, 1] } else { stop("The time series is not univariate.") } } # Transform if requested if (!is.null(lambda)) { x <- BoxCox(x, lambda = lambda) lambda <- attr(x, "lambda") } freq <- frequency(x) tspx <- tsp(x) n <- length(x) tt <- 1:n idx <- tt[!missng] if (linear) { # Use linear interpolation x <- ts(approx(idx, x[idx], tt, rule = 2)$y) } else { # Otherwise estimate seasonal component robustly # Then add to linear interpolation of seasonally adjusted series # Fit Fourier series for seasonality and a polynomial for the trend, # just to get something reasonable to start with if ("msts" %in% class(x)) { K <- pmin(trunc(attributes(x)$msts / 2), 20L) } else { K <- min(trunc(freq / 2), 5) } X <- cbind(fourier(x, K), poly(tt, degree = pmin(pmax(trunc(n / 10), 1), 6L))) fit <- lm(x ~ X, na.action = na.exclude) pred <- predict(fit, newdata = data.frame(X)) x[missng] <- pred[missng] # Now re-do it with stl to get better results fit <- mstl(x, robust = TRUE) # Interpolate seasonally adjusted values sa <- seasadj(fit) sa <- approx(idx, sa[idx], 1:n, rule = 2)$y # Replace original missing values seas <- seasonal(fit) if (NCOL(seas) > 1) { seas <- rowSums(seas) } x[missng] <- sa[missng] + seas[missng] } # Backtransform if required if (!is.null(lambda)) { x <- InvBoxCox(x, lambda = lambda) } # Ensure time series characteristics not lost tsp(x) <- tspx # Check stability and use linear interpolation if there is a problem if(!linear & (max(x) > rangex[2L]+0.5*drangex | min(x) < rangex[1L]-0.5*drangex)) return(na.interp(origx, lambda=lambda, linear=TRUE)) else return(x) } # Function to identify outliers and replace them with better values # Missing values replaced as well if replace.missing=TRUE #' Identify and replace outliers and missing values in a time series #' #' Uses supsmu for non-seasonal series and a robust STL decomposition for #' seasonal series. To estimate missing values and outlier replacements, #' linear interpolation is used on the (possibly seasonally adjusted) series #' #' @param x time series #' @param replace.missing If TRUE, it not only replaces outliers, but also #' interpolates missing values #' @param iterate the number of iterations required #' @inheritParams forecast.ts #' @return Time series #' @author Rob J Hyndman #' @references Hyndman (2021) "Detecting time series outliers" \url{https://robjhyndman.com/hyndsight/tsoutliers/}. #' @seealso \code{\link[forecast]{na.interp}}, #' \code{\link[forecast]{tsoutliers}}, \code{\link[stats]{supsmu}} #' @keywords ts #' @examples #' #' cleangold <- tsclean(gold) #' #' @export tsclean <- function(x, replace.missing=TRUE, iterate=2, lambda = NULL) { outliers <- tsoutliers(x, iterate = iterate, lambda = lambda) x[outliers$index] <- outliers$replacements if (replace.missing) { x <- na.interp(x, lambda = lambda) } return(x) } # Function to identify time series outlieres #' Identify and replace outliers in a time series #' #' Uses supsmu for non-seasonal series and a periodic stl decomposition with #' seasonal series to identify outliers and estimate their replacements. #' #' #' @param x time series #' @param iterate the number of iterations required #' @inheritParams forecast.ts #' @return \item{index}{Indicating the index of outlier(s)} #' \item{replacement}{Suggested numeric values to replace identified outliers} #' @author Rob J Hyndman #' @seealso \code{\link[forecast]{na.interp}}, \code{\link[forecast]{tsclean}} #' @references Hyndman (2021) "Detecting time series outliers" \url{https://robjhyndman.com/hyndsight/tsoutliers/}. #' @keywords ts #' @examples #' #' data(gold) #' tsoutliers(gold) #' #' @export tsoutliers <- function(x, iterate=2, lambda=NULL) { n <- length(x) freq <- frequency(x) # Identify and fill missing values missng <- is.na(x) nmiss <- sum(missng) if (nmiss > 0L) { xx <- na.interp(x, lambda = lambda) } else { xx <- x } # Check if constant if (is.constant(xx)) { return(list(index = integer(0), replacements = numeric(0))) } # Transform if requested if (!is.null(lambda)) { xx <- BoxCox(xx, lambda = lambda) lambda <- attr(xx, "lambda") } # Seasonally adjust data if necessary if (freq > 1 && n > 2 * freq) { fit <- mstl(xx, robust=TRUE) # Check if seasonality is sufficient to warrant adjustment rem <- remainder(fit) detrend <- xx - trendcycle(fit) strength <- 1 - var(rem) / var(detrend) if (strength >= 0.6) { xx <- seasadj(fit) } } # Use super-smoother on the (seasonally adjusted) data tt <- 1:n mod <- supsmu(tt, xx) resid <- xx - mod$y # Make sure missing values are not interpeted as outliers if (nmiss > 0L) { resid[missng] <- NA } # Limits of acceptable residuals resid.q <- quantile(resid, probs = c(0.25, 0.75), na.rm = TRUE) iqr <- diff(resid.q) limits <- resid.q + 3 * iqr * c(-1, 1) # Find residuals outside limits if ((limits[2] - limits[1]) > 1e-14) { outliers <- which((resid < limits[1]) | (resid > limits[2])) } else { outliers <- numeric(0) } # Replace all missing values including outliers x[outliers] <- NA x <- na.interp(x, lambda = lambda) # Do no more than 2 iterations regardless of the value of iterate if (iterate > 1) { tmp <- tsoutliers(x, iterate = 1, lambda = lambda) if (length(tmp$index) > 0) # Found some more { outliers <- sort(unique(c(outliers, tmp$index))) x[outliers] <- NA if(sum(!is.na(x)) == 1L) { # Only one non-missing value x[is.na(x)] <- x[!is.na(x)] } else x <- na.interp(x, lambda = lambda) } } # Return outlier indexes and replacements return(list(index = outliers, replacements = x[outliers])) } forecast/R/baggedModel.R0000644000176200001440000001706714323125536014615 0ustar liggesusers## #' Forecasting using a bagged model #' #' The bagged model forecasting method. #' #' This function implements the bagged model forecasting method described in #' Bergmeir et al. By default, the \code{\link{ets}} function is applied to all #' bootstrapped series. Base models other than \code{\link{ets}} can be given by the #' parameter \code{fn}. Using the default parameters, the function #' \code{\link{bld.mbb.bootstrap}} is used to calculate the bootstrapped series #' with the Box-Cox and Loess-based decomposition (BLD) bootstrap. The function #' \code{\link{forecast.baggedModel}} can then be used to calculate forecasts. #' #' \code{baggedETS} is a wrapper for \code{baggedModel}, setting \code{fn} to "ets". #' This function is included for backwards compatibility only, and may be #' deprecated in the future. #' #' @aliases print.baggedModel #' #' @param y A numeric vector or time series of class \code{ts}. #' @param bootstrapped_series bootstrapped versions of y. #' @param fn the forecast function to use. Default is \code{\link{ets}}. #' @param \dots Other arguments passed to the forecast function. #' @return Returns an object of class "\code{baggedModel}". #' #' The function \code{print} is used to obtain and print a summary of the #' results. #' #' \item{models}{A list containing the fitted ensemble models.} #' \item{method}{The function for producing a forecastable model.} #' \item{y}{The original time series.} #' \item{bootstrapped_series}{The bootstrapped series.} #' \item{modelargs}{The arguments passed through to \code{fn}.} #' \item{fitted}{Fitted values (one-step forecasts). The #' mean of the fitted values is calculated over the ensemble.} #' \item{residuals}{Original values minus fitted values.} #' @author Christoph Bergmeir, Fotios Petropoulos #' @references Bergmeir, C., R. J. Hyndman, and J. M. Benitez (2016). Bagging #' Exponential Smoothing Methods using STL Decomposition and Box-Cox #' Transformation. International Journal of Forecasting 32, 303-312. #' @keywords ts #' @examples #' fit <- baggedModel(WWWusage) #' fcast <- forecast(fit) #' plot(fcast) #' #' @export baggedModel <- function(y, bootstrapped_series=bld.mbb.bootstrap(y, 100), fn=ets, ...) { # Add package info in case forecast not loaded if(!is.function(fn)){ warning(paste0("Using character specification for `fn` is deprecated. Please use `fn = ", match.arg(fn,c("ets", "auto.arima")), "`.")) fn <- utils::getFromNamespace(match.arg(fn,c("ets", "auto.arima")), "forecast") } mod_boot <- lapply(bootstrapped_series, function(x) { mod <- fn(x, ...) }) # Return results out <- list() out$y <- as.ts(y) out$bootstrapped_series <- bootstrapped_series out$models <- mod_boot out$modelargs <- list(...) fitted_boot <- lapply(out$models, fitted) fitted_boot <- as.matrix(as.data.frame(fitted_boot)) out$fitted <- ts(apply(fitted_boot, 1, mean)) tsp(out$fitted) <- tsp(out$y) out$residuals <- out$y - out$fitted out$series <- deparse(substitute(y)) out$method <- "baggedModel" out$call <- match.call() return(structure(out, class = c("baggedModel"))) } #' @rdname baggedModel #' @export baggedETS <- function(y, bootstrapped_series=bld.mbb.bootstrap(y, 100), ...) { out <- baggedModel(y, bootstrapped_series, fn = ets, ...) class(out) <- c("baggedETS", class(out)) out } #' Forecasting using a bagged model #' #' Returns forecasts and other information for bagged models. #' #' Intervals are calculated as min and max values over the point forecasts from #' the models in the ensemble. I.e., the intervals are not prediction #' intervals, but give an indication of how different the forecasts within the #' ensemble are. #' #' @param object An object of class "\code{baggedModel}" resulting from a call to #' \code{\link{baggedModel}}. #' @param h Number of periods for forecasting. #' @param ... Other arguments, passed on to the \code{\link{forecast}} function of the original method #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' An object of class "\code{forecast}" is a list containing at least the #' following elements: #' \item{model}{A list containing information about the fitted model} #' \item{method}{The name of the forecasting method as a character string} #' \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} #' \item{upper}{Upper limits for prediction intervals} #' \item{level}{The confidence values associated with the prediction intervals} #' \item{x}{The original time series (either \code{object} itself or the #' time series used to create the model stored as \code{object}).} #' \item{xreg}{The external regressors used in fitting (if given).} #' \item{residuals}{Residuals from the fitted model. That #' is x minus fitted values.} #' \item{fitted}{Fitted values (one-step forecasts)} #' @author Christoph Bergmeir, Fotios Petropoulos #' @seealso \code{\link{baggedModel}}. #' @references Bergmeir, C., R. J. Hyndman, and J. M. Benitez (2016). Bagging #' Exponential Smoothing Methods using STL Decomposition and Box-Cox #' Transformation. International Journal of Forecasting 32, 303-312. #' @keywords ts #' @examples #' fit <- baggedModel(WWWusage) #' fcast <- forecast(fit) #' plot(fcast) #' #' \dontrun{ #' fit2 <- baggedModel(WWWusage, fn="auto.arima") #' fcast2 <- forecast(fit2) #' plot(fcast2) #' accuracy(fcast2)} #' #' @export forecast.baggedModel <- function(object, h=ifelse(frequency(object$y) > 1, 2 * frequency(object$y), 10), ...) { out <- list( model = object, series = object$series, x = object$y, method = object$method, fitted = object$fitted, residuals = object$residuals ) # out <- object tspx <- tsp(out$x) forecasts_boot <- lapply(out$model$models, function(mod) { if (inherits(mod, "ets")) { forecast(mod, PI = FALSE, h = h, ...)$mean } else { forecast(mod, h = h, ...)$mean } }) forecasts_boot <- as.matrix(as.data.frame(forecasts_boot)) colnames(forecasts_boot) <- NULL if (!is.null(tspx)) { start.f <- tspx[2] + 1 / frequency(out$x) } else { start.f <- length(out$x) + 1 } # out <- list() out$forecasts_boot <- forecasts_boot # browser() # out$model$models out$mean <- ts(apply(forecasts_boot, 1, mean), frequency = frequency(out$x), start = start.f) out$median <- ts(apply(forecasts_boot, 1, median)) out$lower <- ts(apply(forecasts_boot, 1, min)) out$upper <- ts(apply(forecasts_boot, 1, max)) out$level <- 100 tsp(out$median) <- tsp(out$lower) <- tsp(out$upper) <- tsp(out$mean) class(out) <- "forecast" out } # fitted.baggedModel <- function(object, h=1, accum_func=mean, ...){ # # fitted_boot <- lapply(object$models, fitted, h) # fitted_boot <- as.matrix(as.data.frame(fitted_boot)) # fitted_boot <- apply(fitted_boot, 2, accum_func) # fitted_boot # } # residuals.baggedModel <- function(object, h=1, ...){ # # residuals_boot <- lapply(object$models, residuals, h) # residuals_boot <- as.matrix(as.data.frame(residuals_boot)) # residuals_boot # # #Alternative implementation: # #object$x - fitted(object, h) # } #' @export print.baggedModel <- function(x, digits = max(3, getOption("digits") - 3), ...) { cat("Series:", x$series, "\n") cat("Model: ", x$method, "\n") cat("Call: ") print(x$call) # print(x$model) # cat("\nsigma^2 estimated as ", format(mean(residuals(x)^2,na.rm=TRUE), digits = digits), "\n", sep = "") invisible(x) } #' @rdname is.ets #' @export is.baggedModel <- function(x) { inherits(x, "baggedModel") } forecast/R/nnetar.R0000644000176200001440000005156714456202551013715 0ustar liggesusers# Defaults: # For non-seasonal data, p chosen using AIC from linear AR(p) model # For seasonal data, p chosen using AIC from linear AR(p) model after # seasonally adjusting with STL decomposition, and P=1 # size set to average of number of inputs and number of outputs: (p+P+1)/2 # if xreg is included then size = (p+P+ncol(xreg)+1)/2 #' Neural Network Time Series Forecasts #' #' Feed-forward neural networks with a single hidden layer and lagged inputs #' for forecasting univariate time series. #' #' A feed-forward neural network is fitted with lagged values of \code{y} as #' inputs and a single hidden layer with \code{size} nodes. The inputs are for #' lags 1 to \code{p}, and lags \code{m} to \code{mP} where #' \code{m=frequency(y)}. If \code{xreg} is provided, its columns are also #' used as inputs. If there are missing values in \code{y} or #' \code{xreg}, the corresponding rows (and any others which depend on them as #' lags) are omitted from the fit. A total of \code{repeats} networks are #' fitted, each with random starting weights. These are then averaged when #' computing forecasts. The network is trained for one-step forecasting. #' Multi-step forecasts are computed recursively. #' #' For non-seasonal data, the fitted model is denoted as an NNAR(p,k) model, #' where k is the number of hidden nodes. This is analogous to an AR(p) model #' but with nonlinear functions. For seasonal data, the fitted model is called #' an NNAR(p,P,k)[m] model, which is analogous to an ARIMA(p,0,0)(P,0,0)[m] #' model but with nonlinear functions. #' #' @aliases print.nnetar print.nnetarmodels #' #' @param y A numeric vector or time series of class \code{ts}. #' @param p Embedding dimension for non-seasonal time series. Number of #' non-seasonal lags used as inputs. For non-seasonal time series, the default #' is the optimal number of lags (according to the AIC) for a linear AR(p) #' model. For seasonal time series, the same method is used but applied to #' seasonally adjusted data (from an stl decomposition). If set to zero to #' indicate that no non-seasonal lags should be included, then P must be at #' least 1 and a model with only seasonal lags will be fit. #' @param P Number of seasonal lags used as inputs. #' @param size Number of nodes in the hidden layer. Default is half of the #' number of input nodes (including external regressors, if given) plus 1. #' @param repeats Number of networks to fit with different random starting #' weights. These are then averaged when producing forecasts. #' @param xreg Optionally, a vector or matrix of external regressors, which #' must have the same number of rows as \code{y}. Must be numeric. #' @param model Output from a previous call to \code{nnetar}. If model is #' passed, this same model is fitted to \code{y} without re-estimating any #' parameters. #' @param subset Optional vector specifying a subset of observations to be used #' in the fit. Can be an integer index vector or a logical vector the same #' length as \code{y}. All observations are used by default. #' @param scale.inputs If TRUE, inputs are scaled by subtracting the column #' means and dividing by their respective standard deviations. If \code{lambda} #' is not \code{NULL}, scaling is applied after Box-Cox transformation. #' @param x Deprecated. Included for backwards compatibility. #' @param \dots Other arguments passed to \code{\link[nnet]{nnet}} for #' \code{nnetar}. #' @inheritParams forecast.ts #' #' @return Returns an object of class "\code{nnetar}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{nnetar}. #' #' \item{model}{A list containing information about the fitted model} #' \item{method}{The name of the forecasting method as a character string} #' \item{x}{The original time series.} #' \item{xreg}{The external regressors used in fitting (if given).} #' \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} #' \item{fitted}{Fitted values (one-step forecasts)} #' \item{...}{Other arguments} #' #' @author Rob J Hyndman and Gabriel Caceres #' @keywords ts #' @examples #' fit <- nnetar(lynx) #' fcast <- forecast(fit) #' plot(fcast) #' #' ## Arguments can be passed to nnet() #' fit <- nnetar(lynx, decay=0.5, maxit=150) #' plot(forecast(fit)) #' lines(lynx) #' #' ## Fit model to first 100 years of lynx data #' fit <- nnetar(window(lynx,end=1920), decay=0.5, maxit=150) #' plot(forecast(fit,h=14)) #' lines(lynx) #' #' ## Apply fitted model to later data, including all optional arguments #' fit2 <- nnetar(window(lynx,start=1921), model=fit) #' #' @export nnetar <- function(y, p, P=1, size, repeats=20, xreg=NULL, lambda=NULL, model=NULL, subset=NULL, scale.inputs=TRUE, x=y, ...) { useoldmodel <- FALSE yname <- deparse(substitute(y)) if (!is.null(model)) { # Use previously fitted model useoldmodel <- TRUE # Check for conflicts between new and old data: # Check model class if (!is.nnetar(model)) { stop("Model must be a nnetar object") } # Check new data m <- max(round(frequency(model$x)), 1L) minlength <- max(c(model$p, model$P * m)) + 1 if (length(x) < minlength) { stop(paste("Series must be at least of length", minlength, "to use fitted model")) } if (tsp(as.ts(x))[3] != m) { warning(paste("Data frequency doesn't match fitted model, coercing to frequency =", m)) x <- ts(x, frequency = m) } # Check xreg if (!is.null(model$xreg)) { if (is.null(xreg)) { stop("No external regressors provided") } if (NCOL(xreg) != NCOL(model$xreg)) { stop("Number of external regressors does not match fitted model") } } # Update parameters with previous model lambda <- model$lambda size <- model$size p <- model$p P <- model$P if (p == 0 && P == 0){ stop("Both p = 0 and P = 0 in supplied 'model' object") } if (P > 0) { lags <- sort(unique(c(seq_len(p), m * (seq_len(P))))) } else { lags <- seq_len(p) } if (is.null(model$scalex)) { scale.inputs <- FALSE } } else { # when not using an old model if (length(y) < 3) { stop("Not enough data to fit a model") } # Check for constant data in time series constant_data <- is.constant(na.interp(x)) if (constant_data){ warning("Constant data, setting p=1, P=0, lambda=NULL, scale.inputs=FALSE") scale.inputs <- FALSE lambda <- NULL p <- 1 P <- 0 } ## Check for constant data in xreg if (!is.null(xreg)){ constant_xreg <- any(apply(as.matrix(xreg), 2, function(x) is.constant(na.interp(x)))) if (constant_xreg){ warning("Constant xreg column, setting scale.inputs=FALSE") scale.inputs <- FALSE } } } # Check for NAs in x if (any(is.na(x))) { warning("Missing values in x, omitting rows") } # Transform data if (!is.null(lambda)) { xx <- BoxCox(x, lambda) lambda <- attr(xx, "lambda") } else { xx <- x } ## Check whether to use a subset of the data xsub <- rep(TRUE, length(x)) if (is.numeric(subset)) { xsub[-subset] <- FALSE } if (is.logical(subset)) { xsub <- subset } # Scale series scalex <- NULL if (scale.inputs) { if (useoldmodel) { scalex <- model$scalex } else { tmpx <- scale(xx[xsub], center = TRUE, scale = TRUE) scalex <- list( center = attr(tmpx, "scaled:center"), scale = attr(tmpx, "scaled:scale") ) } xx <- scale(xx, center = scalex$center, scale = scalex$scale) xx <- xx[, 1] } # Check xreg class & dim xxreg <- NULL scalexreg <- NULL if (!is.null(xreg)) { xxreg <- xreg <- as.matrix(xreg) if (length(x) != NROW(xreg)) { stop("Number of rows in xreg does not match series length") } # Check for NAs in xreg if (any(is.na(xreg))) { warning("Missing values in xreg, omitting rows") } # Scale xreg if (scale.inputs) { if (useoldmodel) { scalexreg <- model$scalexreg } else { tmpx <- scale(xxreg[xsub, ], center = TRUE, scale = TRUE) scalexreg <- list( center = attr(tmpx, "scaled:center"), scale = attr(tmpx, "scaled:scale") ) } xxreg <- scale(xxreg, center = scalexreg$center, scale = scalexreg$scale) } } # Set up lagged matrix n <- length(xx) xx <- as.ts(xx) m <- max(round(frequency(xx)), 1L) if (!useoldmodel) { if (m == 1) { if (missing(p)) { p <- max(length(ar(na.interp(xx))$ar), 1) } # For non-seasonal data also use default calculation for p if that # argument is 0, but issue a warning if (p == 0){ warning("Cannot set p = 0 for non-seasonal data; using default calculation for p") p <- max(length(ar(na.interp(xx))$ar), 1) } if (p >= n) { warning("Reducing number of lagged inputs due to short series") p <- n - 1 } lags <- seq_len(p) if (P > 1) { warning("Non-seasonal data, ignoring seasonal lags") } P <- 0 } else { if (missing(p)) { if (n > 2 * m) { x.sa <- seasadj(mstl(na.interp(xx))) } else { x.sa <- na.interp(xx) } p <- max(length(ar(x.sa)$ar), 1) } if (p == 0 && P == 0){ stop("'p' and 'P' cannot both be zero") } if (p >= n) { warning("Reducing number of lagged inputs due to short series") p <- n - 1 } if (P > 0 && n >= m * P + 2) { lags <- sort(unique(c(seq_len(p), m * (seq_len(P))))) } else { lags <- seq_len(p) if (P > 0) { warning("Series too short for seasonal lags") P <- 0 } } } } maxlag <- max(lags) nlag <- length(lags) y <- xx[-(1:maxlag)] lags.X <- matrix(NA_real_, ncol = nlag, nrow = n - maxlag) for (i in 1:nlag) lags.X[, i] <- xx[(maxlag - lags[i] + 1):(n - lags[i])] # Add xreg into lagged matrix lags.X <- cbind(lags.X, xxreg[-(1:maxlag), , drop = FALSE]) if (missing(size)) { size <- round((NCOL(lags.X) + 1) / 2) } # Remove missing values if present j <- complete.cases(lags.X, y) ## Remove values not in subset j <- j & xsub[-(1:maxlag)] ## Stop if there's no data to fit (e.g. due to NAs or NaNs) if (NROW(lags.X[j,, drop=FALSE]) == 0) { stop("No data to fit (possibly due to NA or NaN)") } ## Fit average ANN. if (useoldmodel) { fit <- oldmodel_avnnet(lags.X[j, , drop = FALSE], y[j], size = size, model) } else { fit <- avnnet(lags.X[j, , drop=FALSE], y[j], size = size, repeats = repeats, ...) } # Return results out <- list() out$x <- as.ts(x) out$m <- m out$p <- p out$P <- P out$scalex <- scalex out$scalexreg <- scalexreg out$size <- size out$xreg <- xreg out$lambda <- lambda out$subset <- (1:length(x))[xsub] out$model <- fit out$nnetargs <- list(...) if (useoldmodel) { out$nnetargs <- model$nnetargs } if (NROW(lags.X[j,, drop=FALSE]) == 1){ fits <- c(rep(NA_real_, maxlag), mean(sapply(fit, predict))) } else{ fits <- c(rep(NA_real_, maxlag), rowMeans(sapply(fit, predict))) } if (scale.inputs) { fits <- fits * scalex$scale + scalex$center } fits <- ts(fits) if (!is.null(lambda)) { fits <- InvBoxCox(fits, lambda) } out$fitted <- ts(rep(NA_real_, length(out$x))) out$fitted[c(rep(TRUE, maxlag), j)] <- fits out$fitted <- copy_msts(out$x, out$fitted) out$residuals <- out$x - out$fitted out$lags <- lags out$series <- yname out$method <- paste("NNAR(", p, sep = "") if (P > 0) { out$method <- paste(out$method, ",", P, sep = "") } out$method <- paste(out$method, ",", size, ")", sep = "") if (P > 0) { out$method <- paste(out$method, "[", m, "]", sep = "") } out$call <- match.call() return(structure(out, class = c("nnetar"))) } # Aggregate several neural network models avnnet <- function(x, y, repeats, linout=TRUE, trace=FALSE, ...) { mods <- list() for (i in 1:repeats) mods[[i]] <- nnet::nnet(x, y, linout = linout, trace = trace, ...) return(structure(mods, class = "nnetarmodels")) } # Fit old model to new data oldmodel_avnnet <- function(x, y, size, model) { repeats <- length(model$model) args <- list(x = x, y = y, size = size, linout = 1, trace = FALSE) # include additional nnet arguments args <- c(args, model$nnetargs) # set iterations to zero (i.e. weights stay fixed) args$maxit <- 0 mods <- list() for (i in 1:repeats) { args$Wts <- model$model[[i]]$wts mods[[i]] <- do.call(nnet::nnet, args) } return(structure(mods, class = "nnetarmodels")) } #' @export print.nnetarmodels <- function(x, ...) { cat(paste("\nAverage of", length(x), "networks, each of which is\n")) print(x[[1]]) } #' Forecasting using neural network models #' #' Returns forecasts and other information for univariate neural network #' models. #' #' Prediction intervals are calculated through simulations and can be slow. #' Note that if the network is too complex and overfits the data, the residuals #' can be arbitrarily small; if used for prediction interval calculations, they #' could lead to misleadingly small values. It is possible to use out-of-sample #' residuals to ameliorate this, see examples. #' #' @param object An object of class "\code{nnetar}" resulting from a call to #' \code{\link{nnetar}}. #' @param h Number of periods for forecasting. If \code{xreg} is used, \code{h} #' is ignored and the number of forecast periods is set to the number of rows #' of \code{xreg}. #' @param PI If TRUE, prediction intervals are produced, otherwise only point #' forecasts are calculated. If \code{PI} is FALSE, then \code{level}, #' \code{fan}, \code{bootstrap} and \code{npaths} are all ignored. #' @param level Confidence level for prediction intervals. #' @param fan If \code{TRUE}, level is set to \code{seq(51,99,by=3)}. This is #' suitable for fan plots. #' @param xreg Future values of external regressor variables. #' @param bootstrap If \code{TRUE}, then prediction intervals computed using #' simulations with resampled residuals rather than normally distributed #' errors. Ignored if \code{innov} is not \code{NULL}. #' @param npaths Number of sample paths used in computing simulated prediction #' intervals. #' @param innov Values to use as innovations for prediction intervals. Must be #' a matrix with \code{h} rows and \code{npaths} columns (vectors are coerced #' into a matrix). If present, \code{bootstrap} is ignored. #' @param ... Additional arguments passed to \code{\link{simulate.nnetar}} #' @inheritParams forecast.ts #' #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{forecast.nnetar}. #' #' An object of class "\code{forecast}" is a list containing at least the #' following elements: #' \item{model}{A list containing information about the fitted model} #' \item{method}{The name of the forecasting method as a character string} #' \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} #' \item{upper}{Upper limits for prediction intervals} #' \item{level}{The confidence values associated with the prediction intervals} #' \item{x}{The original time series (either \code{object} itself or the time series #' used to create the model stored as \code{object}).} #' \item{xreg}{The external regressors used in fitting (if given).} #' \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} #' \item{fitted}{Fitted values (one-step forecasts)} #' \item{...}{Other arguments} #' #' @author Rob J Hyndman and Gabriel Caceres #' @seealso \code{\link{nnetar}}. #' @keywords ts #' @examples #' ## Fit & forecast model #' fit <- nnetar(USAccDeaths, size=2) #' fcast <- forecast(fit, h=20) #' plot(fcast) #' #' \dontrun{ #' ## Include prediction intervals in forecast #' fcast2 <- forecast(fit, h=20, PI=TRUE, npaths=100) #' plot(fcast2) #' #' ## Set up out-of-sample innovations using cross-validation #' fit_cv <- CVar(USAccDeaths, size=2) #' res_sd <- sd(fit_cv$residuals, na.rm=TRUE) #' myinnovs <- rnorm(20*100, mean=0, sd=res_sd) #' ## Forecast using new innovations #' fcast3 <- forecast(fit, h=20, PI=TRUE, npaths=100, innov=myinnovs) #' plot(fcast3) #' } #' #' @export forecast.nnetar <- function(object, h=ifelse(object$m > 1, 2 * object$m, 10), PI=FALSE, level=c(80, 95), fan=FALSE, xreg=NULL, lambda=object$lambda, bootstrap=FALSE, npaths=1000, innov=NULL, ...) { # require(nnet) out <- object tspx <- tsp(out$x) # if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } # Check if xreg was used in fitted model if (is.null(object$xreg)) { if (!is.null(xreg)) { warning("External regressors were not used in fitted model, xreg will be ignored") } xreg <- NULL } else { if (is.null(xreg)) { stop("No external regressors provided") } xreg <- as.matrix(xreg) if (NCOL(xreg) != NCOL(object$xreg)) { stop("Number of external regressors does not match fitted model") } if(!identical(colnames(xreg), colnames(object$xreg))){ warning("xreg contains different column names from the xreg used in training. Please check that the regressors are in the same order.") } h <- NROW(xreg) } fcast <- numeric(h) xx <- object$x xxreg <- xreg if (!is.null(lambda)) { xx <- BoxCox(xx, lambda) lambda <- attr(xx, "lambda") } # Check and apply scaling of fitted model if (!is.null(object$scalex)) { xx <- scale(xx, center = object$scalex$center, scale = object$scalex$scale) if (!is.null(xreg)) { xxreg <- scale(xreg, center = object$scalexreg$center, scale = object$scalexreg$scale) } } # Get lags used in fitted model lags <- object$lags maxlag <- max(lags) flag <- rev(tail(xx, n = maxlag)) # Iterative 1-step forecast for (i in 1:h) { newdata <- c(flag[lags], xxreg[i, ]) if (any(is.na(newdata))) { stop("I can't forecast when there are missing values near the end of the series.") } fcast[i] <- mean(sapply(object$model, predict, newdata = newdata)) flag <- c(fcast[i], flag[-maxlag]) } # Re-scale point forecasts if (!is.null(object$scalex)) { fcast <- fcast * object$scalex$scale + object$scalex$center } # Add ts properties fcast <- ts(fcast, start = tspx[2] + 1 / tspx[3], frequency = tspx[3]) # Back-transform point forecasts if (!is.null(lambda)) { fcast <- InvBoxCox(fcast, lambda) } # Compute prediction intervals using simulations if (isTRUE(PI)) { nint <- length(level) sim <- matrix(NA, nrow = npaths, ncol = h) if (!is.null(innov)) { if (length(innov) != h * npaths) { stop("Incorrect number of innovations, need h*npaths values") } innov <- matrix(innov, nrow = h, ncol = npaths) bootstrap <- FALSE } for (i in 1:npaths) sim[i, ] <- simulate(object, nsim = h, bootstrap = bootstrap, xreg = xreg, lambda = lambda, innov = innov[, i], ...) lower <- apply(sim, 2, quantile, 0.5 - level / 200, type = 8, na.rm = TRUE) upper <- apply(sim, 2, quantile, 0.5 + level / 200, type = 8, na.rm = TRUE) if (nint > 1L) { lower <- ts(t(lower)) upper <- ts(t(upper)) } else { lower <- ts(matrix(lower, ncol = 1L)) upper <- ts(matrix(upper, ncol = 1L)) } out$lower <- future_msts(out$x, lower) out$upper <- future_msts(out$x, upper) } else { level <- NULL lower <- NULL upper <- NULL } out$mean <- future_msts(out$x, fcast) out$level <- level return(structure(out, class = "forecast")) } #' @rdname fitted.Arima #' @export fitted.nnetar <- function(object, h=1, ...) { if (h == 1) { return(object$fitted) } else { return(hfitted(object = object, h = h, FUN = "nnetar", ...)) } } #' @export print.nnetar <- function(x, digits = max(3, getOption("digits") - 3), ...) { cat("Series:", x$series, "\n") cat("Model: ", x$method, "\n") # cat(" one hidden layer with",x$size,"nodes\n") cat("Call: ") print(x$call) print(x$model) cat( "\nsigma^2 estimated as ", format(mean(residuals(x) ^ 2, na.rm = TRUE), digits = digits), "\n", sep = "" ) invisible(x) } #' @rdname is.ets #' @export is.nnetar <- function(x) { inherits(x, "nnetar") } #' @rdname is.ets #' @export is.nnetarmodels <- function(x) { inherits(x, "nnetarmodels") } # Scale a univariate time series scale.ts <- function(x, center=TRUE, scale=TRUE) { tspx <- tsp(x) x <- as.ts(scale.default(x, center = center, scale = scale)) tsp(x) <- tspx return(x) } forecast/R/components.R0000644000176200001440000000712314150370574014602 0ustar liggesusers# Functions to extract components from time series decomposition # These should match corresponding functions in the seasonal package # providing similar functional for stl, decomposed.ts and tbats objects #' Extract components from a time series decomposition #' #' Returns a univariate time series equal to either a seasonal component, #' trend-cycle component or remainder component from a time series #' decomposition. #' #' @param object Object created by \code{\link[stats]{decompose}}, #' \code{\link[stats]{stl}} or \code{\link{tbats}}. #' @return Univariate time series. #' @author Rob J Hyndman #' @seealso \code{\link[stats]{stl}}, \code{\link[stats]{decompose}}, #' \code{\link{tbats}}, \code{\link{seasadj}}. #' @keywords ts #' @examples #' plot(USAccDeaths) #' fit <- stl(USAccDeaths, s.window="periodic") #' lines(trendcycle(fit),col="red") #' #' library(ggplot2) #' autoplot(cbind( #' Data=USAccDeaths, #' Seasonal=seasonal(fit), #' Trend=trendcycle(fit), #' Remainder=remainder(fit)), #' facets=TRUE) + #' ylab("") + xlab("Year") #' #' @export seasonal <- function(object) { if ("mstl" %in% class(object)) { cols <- grep("Season", colnames(object)) return(object[, cols]) } else if ("stl" %in% class(object)) { return(object$time.series[, "seasonal"]) } else if ("decomposed.ts" %in% class(object)) { return(object$seasonal) } else if ("tbats" %in% class(object)) { comp <- tbats.components(object) scols <- grep("season", colnames(comp)) season <- ts(rowSums(comp[, scols, drop = FALSE])) if (!is.null(object$lambda)) { season <- InvBoxCox(season, object$lambda) } tsp(season) <- tsp(comp) return(season) } else if ("seas" %in% class(object)) { return(object$data[, "seasonal"]) } else { stop("Unknown object type") } } #' @rdname seasonal #' @export trendcycle <- function(object) { if ("mstl" %in% class(object)) { return(object[, "Trend"]) } else if ("stl" %in% class(object)) { return(object$time.series[, "trend"]) } else if ("decomposed.ts" %in% class(object)) { return(object$trend) } # else if("tbats" %in% class(object)) # { # trnd <- tbats.components(object)[,"level"] # if (!is.null(object$lambda)) # trnd <- InvBoxCox(trnd, object$lambda) # return(trnd) # } else if ("seas" %in% class(object)) { return(seasextract_w_na_action(object, "trend")) } else { stop("Unknown object type") } } #' @rdname seasonal #' @export remainder <- function(object) { if ("mstl" %in% class(object)) { return(object[, "Remainder"]) } else if ("stl" %in% class(object)) { return(object$time.series[, "remainder"]) } else if ("decomposed.ts" %in% class(object)) { return(object$random) } # else if("tbats" %in% class(object)) # { # comp <- tbats.components(object) # trnd <- comp[,"level"] # scols <- grep("season",colnames(comp)) # season <- rowSums(comp[,scols,drop=FALSE]) # irreg <- ts(comp[,'observed'] - trnd - season) # tsp(irreg) <- tsp(comp) # return(irreg) # } else if ("seas" %in% class(object)) { return(seasextract_w_na_action(object, "irregular")) } else { stop("Unknown object type") } } ## Copied from seasonal:::extract_w_na_action ## Importing is problematic due to issues with ARM processors seasextract_w_na_action <- function(x, name) { if (is.null(x$data)) { return(NULL) } z <- na.omit(x$data[, name]) if (!is.null(x$na.action)) { if (attr(x$na.action, "class") == "exclude") { z <- ts(stats::napredict(x$na.action, z)) tsp(z) <- tsp(x$x) } } z } forecast/R/dshw.r0000644000176200001440000002212714207263356013425 0ustar liggesusers#################################################################### ## Double Seasonal Holt Winters method as per Taylor (2003) ## Periods must be nested. ## y can be an msts object, or periods can be passed explicitly. #################################################################### #' Double-Seasonal Holt-Winters Forecasting #' #' Returns forecasts using Taylor's (2003) Double-Seasonal Holt-Winters method. #' #' Taylor's (2003) double-seasonal Holt-Winters method uses additive trend and #' multiplicative seasonality, where there are two seasonal components which #' are multiplied together. For example, with a series of half-hourly data, one #' would set \code{period1=48} for the daily period and \code{period2=336} for #' the weekly period. The smoothing parameter notation used here is different #' from that in Taylor (2003); instead it matches that used in Hyndman et al #' (2008) and that used for the \code{\link{ets}} function. #' #' @param y Either an \code{\link{msts}} object with two seasonal periods or a #' numeric vector. #' @param period1 Period of the shorter seasonal period. Only used if \code{y} #' is not an \code{\link{msts}} object. #' @param period2 Period of the longer seasonal period. Only used if \code{y} #' is not an \code{\link{msts}} object. #' @param h Number of periods for forecasting. #' @param alpha Smoothing parameter for the level. If \code{NULL}, the #' parameter is estimated using least squares. #' @param beta Smoothing parameter for the slope. If \code{NULL}, the parameter #' is estimated using least squares. #' @param gamma Smoothing parameter for the first seasonal period. If #' \code{NULL}, the parameter is estimated using least squares. #' @param omega Smoothing parameter for the second seasonal period. If #' \code{NULL}, the parameter is estimated using least squares. #' @param phi Autoregressive parameter. If \code{NULL}, the parameter is #' estimated using least squares. #' @param armethod If TRUE, the forecasts are adjusted using an AR(1) model for #' the errors. #' @param model If it's specified, an existing model is applied to a new data #' set. #' @inheritParams forecast.ts #' @inheritParams BoxCox #' #' @return An object of class "\code{forecast}" which is a list that includes the #' following elements: #' \item{model}{A list containing information about the fitted model} #' \item{method}{The name of the forecasting method as a character string} #' \item{mean}{Point forecasts as a time series} #' \item{x}{The original time series.} #' \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} #' \item{fitted}{Fitted values (one-step forecasts)} #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{dshw}. #' #' @author Rob J Hyndman #' @seealso \code{\link[stats]{HoltWinters}}, \code{\link{ets}}. #' @references Taylor, J.W. (2003) Short-term electricity demand forecasting #' using double seasonal exponential smoothing. \emph{Journal of the #' Operational Research Society}, \bold{54}, 799-805. #' #' Hyndman, R.J., Koehler, A.B., Ord, J.K., and Snyder, R.D. (2008) #' \emph{Forecasting with exponential smoothing: the state space approach}, #' Springer-Verlag. \url{http://www.exponentialsmoothing.net}. #' @keywords ts #' @examples #' #' \dontrun{ #' fcast <- dshw(taylor) #' plot(fcast) #' #' t <- seq(0,5,by=1/20) #' x <- exp(sin(2*pi*t) + cos(2*pi*t*4) + rnorm(length(t),0,.1)) #' fit <- dshw(x,20,5) #' plot(fit) #' } #' #' @export dshw <- function(y, period1=NULL, period2=NULL, h=2 * max(period1, period2), alpha=NULL, beta=NULL, gamma=NULL, omega=NULL, phi=NULL, lambda=NULL, biasadj=FALSE, armethod=TRUE, model = NULL) { if (min(y, na.rm = TRUE) <= 0) { stop("dshw not suitable when data contain zeros or negative numbers") } seriesname <- deparse(substitute(y)) if (!is.null(model) && model$method == "DSHW") { period1 <- model$period1 period2 <- model$period2 } else if (inherits(y, "msts") && (length(attr(y, "msts")) == 2)) { period1 <- as.integer(sort(attr(y, "msts"))[1]) period2 <- as.integer(sort(attr(y, "msts"))[2]) } else if (is.null(period1) || is.null(period2)) { stop("Error in dshw(): y must either be an msts object with two seasonal periods OR the seasonal periods should be specified with period1= and period2=") } else { if (period1 > period2) { tmp <- period2 period2 <- period1 period1 <- tmp } } if (any(class(y) != "msts")) { y <- msts(y, c(period1, period2)) } if (length(y) < 2 * max(period2)) { stop("Insufficient data to estimate model") } if (!armethod) { phi <- 0 } if (period1 < 1 || period1 == period2) { stop("Inappropriate periods") } ratio <- period2 / period1 if (ratio - trunc(ratio) > 1e-10) { stop("Seasonal periods are not nested") } if (!is.null(model)) { lambda <- model$model$lambda } if (!is.null(lambda)) { origy <- y y <- BoxCox(y, lambda) lambda <- attr(y, "lambda") } if (!is.null(model)) { pars <- model$model alpha <- pars$alpha beta <- pars$beta gamma <- pars$gamma omega <- pars$omega phi <- pars$phi } else { pars <- rep(NA, 5) if (!is.null(alpha)) { pars[1] <- alpha } if (!is.null(beta)) { pars[2] <- beta } if (!is.null(gamma)) { pars[3] <- gamma } if (!is.null(omega)) { pars[4] <- omega } if (!is.null(phi)) { pars[5] <- phi } } # Estimate parameters if (sum(is.na(pars)) > 0) { pars <- par_dshw(y, period1, period2, pars) alpha <- pars[1] beta <- pars[2] gamma <- pars[3] omega <- pars[4] phi <- pars[5] } ## Allocate space n <- length(y) yhat <- numeric(n) ## Starting values I <- seasindex(y, period1) wstart <- seasindex(y, period2) wstart <- wstart / rep(I, ratio) w <- wstart x <- c(0, diff(y[1:period2])) t <- t.start <- mean(((y[1:period2] - y[(period2 + 1):(2 * period2)]) / period2) + x) / 2 s <- s.start <- (mean(y[1:(2 * period2)]) - (period2 + 0.5) * t) ## In-sample fit for (i in 1:n) { yhat[i] <- (s + t) * I[i] * w[i] snew <- alpha * (y[i] / (I[i] * w[i])) + (1 - alpha) * (s + t) tnew <- beta * (snew - s) + (1 - beta) * t I[i + period1] <- gamma * (y[i] / (snew * w[i])) + (1 - gamma) * I[i] w[i + period2] <- omega * (y[i] / (snew * I[i])) + (1 - omega) * w[i] s <- snew t <- tnew } # Forecasts fcast <- (s + (1:h) * t) * rep(I[n + (1:period1)], h / period1 + 1)[1:h] * rep(w[n + (1:period2)], h / period2 + 1)[1:h] fcast <- msts(fcast, c(period1, period2), start = tsp(y)[2] + 1 / tsp(y)[3]) # Calculate MSE and MAPE yhat <- ts(yhat) tsp(yhat) <- tsp(y) yhat <- msts(yhat, c(period1, period2)) e <- y - yhat e <- msts(e, c(period1, period2)) if (armethod) { yhat <- yhat + phi * c(0, e[-n]) fcast <- fcast + phi ^ (1:h) * e[n] e <- y - yhat } mse <- mean(e ^ 2) mape <- mean(abs(e) / y) * 100 end.y <- end(y) if (end.y[2] == frequency(y)) { end.y[1] <- end.y[1] + 1 end.y[2] <- 1 } else { end.y[2] <- end.y[2] + 1 } fcast <- msts(fcast, c(period1, period2)) if (!is.null(lambda)) { y <- origy fcast <- InvBoxCox(fcast, lambda, biasadj, var(e)) attr(lambda, "biasadj") <- biasadj # Does this also need a biasadj backtransform? yhat <- InvBoxCox(yhat, lambda) } return(structure(list( mean = fcast, method = "DSHW", x = y, residuals = e, fitted = yhat, series = seriesname, model = list( mape = mape, mse = mse, alpha = alpha, beta = beta, gamma = gamma, omega = omega, phi = phi, lambda = lambda, l0 = s.start, b0 = t.start, s10 = wstart, s20 = I ), period1 = period1, period2 = period2 ), class = "forecast")) } ### Double Seasonal Holt-Winters smoothing parameter optimization par_dshw <- function(y, period1, period2, pars) { start <- c(0.1, 0.01, 0.001, 0.001, 0.0)[is.na(pars)] out <- optim(start, dshw.mse, y = y, period1 = period1, period2 = period2, pars = pars) pars[is.na(pars)] <- out$par return(pars) } dshw.mse <- function(par, y, period1, period2, pars) { pars[is.na(pars)] <- par if (max(pars) > 0.99 | min(pars) < 0 | pars[5] > .9) { return(Inf) } else { return(dshw(y, period1, period2, h = 1, pars[1], pars[2], pars[3], pars[4], pars[5], armethod = (abs(pars[5]) > 1e-7))$model$mse) } } ### Calculating seasonal indexes seasindex <- function(y, p) { n <- length(y) n2 <- 2 * p shorty <- y[1:n2] average <- numeric(n) simplema <- zoo::rollmean.default(shorty, p) if (identical(p %% 2, 0)) # Even order { centeredma <- zoo::rollmean.default(simplema[1:(n2 - p + 1)], 2) average[p / 2 + 1:p] <- shorty[p / 2 + 1:p] / centeredma[1:p] si <- average[c(p + (1:(p / 2)), (1 + p / 2):p)] } else # Odd order { average[(p - 1) / 2 + 1:p] <- shorty[(p - 1) / 2 + 1:p] / simplema[1:p] si <- average[c(p + (1:((p - 1) / 2)), (1 + (p - 1) / 2):p)] } return(si) } forecast/R/mforecast.R0000644000176200001440000002532714323125536014404 0ustar liggesusers#' @rdname is.forecast #' @export is.mforecast <- function(x) { inherits(x, "mforecast") } mlmsplit <- function(x, index=NULL) { if (is.null(index)) { stop("Must select lm using index=integer(1)") } mfit <- match(c("coefficients", "residuals", "effects", "fitted.values"), names(x), 0L) for (j in mfit) { x[[j]] <- x[[j]][, index] } class(x) <- "lm" y <- attr(x$terms, "response") yName <- make.names(colnames(x$model[[y]])[index]) x$model[[y]] <- x$model[[y]][, index] colnames(x$model)[y] <- yName attr(x$model, "terms") <- terms(reformulate(attr(x$terms, "term.labels"), response = yName), data = x$model) if (!is.null(tsp(x$data[, 1]))) { tspx <- tsp(x$data[, 1]) # Consolidate ts attributes for forecast.lm x$data <- lapply(x$model, function(x) ts(x, start = tspx[1], end = tspx[2], frequency = tspx[3])) class(x$data) <- "data.frame" row.names(x$data) <- 1:max(sapply(x$data, NROW)) } x$terms <- terms(x$model) return(x) } #' Forecast a multiple linear model with possible time series components #' #' \code{forecast.mlm} is used to predict multiple linear models, especially #' those involving trend and seasonality components. #' #' \code{forecast.mlm} is largely a wrapper for #' \code{\link[forecast]{forecast.lm}()} except that it allows forecasts to be #' generated on multiple series. Also, the output is reformatted into a #' \code{mforecast} object. #' #' @param object Object of class "mlm", usually the result of a call to #' \code{\link[stats]{lm}} or \code{\link{tslm}}. #' @param newdata An optional data frame in which to look for variables with #' which to predict. If omitted, it is assumed that the only variables are #' trend and season, and \code{h} forecasts are produced. #' @param level Confidence level for prediction intervals. #' @param fan If \code{TRUE}, level is set to seq(51,99,by=3). This is suitable #' for fan plots. #' @param h Number of periods for forecasting. Ignored if \code{newdata} #' present. #' @param ts If \code{TRUE}, the forecasts will be treated as time series #' provided the original data is a time series; the \code{newdata} will be #' interpreted as related to the subsequent time periods. If \code{FALSE}, any #' time series attributes of the original data will be ignored. #' @param ... Other arguments passed to \code{\link[forecast]{forecast.lm}()}. #' @inheritParams forecast.ts #' #' @return An object of class "\code{mforecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{forecast.lm}. #' #' An object of class \code{"mforecast"} is a list containing at least the #' following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a multivariate time series} #' \item{lower}{Lower limits for prediction intervals of each series} #' \item{upper}{Upper limits for prediction intervals of each series} #' \item{level}{The confidence values associated with the prediction intervals} #' \item{x}{The historical data for the response variable.} #' \item{residuals}{Residuals from the fitted model. That is x minus fitted #' values.} \item{fitted}{Fitted values} #' @author Mitchell O'Hara-Wild #' @seealso \code{\link{tslm}}, \code{\link{forecast.lm}}, #' \code{\link[stats]{lm}}. #' @examples #' #' lungDeaths <- cbind(mdeaths, fdeaths) #' fit <- tslm(lungDeaths ~ trend + season) #' fcast <- forecast(fit, h=10) #' #' carPower <- as.matrix(mtcars[,c("qsec","hp")]) #' carmpg <- mtcars[,"mpg"] #' fit <- lm(carPower ~ carmpg) #' fcast <- forecast(fit, newdata=data.frame(carmpg=30)) #' #' @export forecast.mlm <- function(object, newdata, h=10, level=c(80, 95), fan=FALSE, lambda=object$lambda, biasadj=NULL, ts=TRUE, ...) { out <- list(model = object, forecast = vector("list", NCOL(object$coefficients))) cl <- match.call() cl[[1]] <- quote(forecast.lm) cl$object <- quote(mlmsplit(object, index = i)) for (i in seq_along(out$forecast)) { out$forecast[[i]] <- eval(cl) out$forecast[[i]]$series <- colnames(object$coefficients)[i] } out$method <- rep("Multiple linear regression model", length(out$forecast)) names(out$forecast) <- names(out$method) <- colnames(object$coefficients) return(structure(out, class = "mforecast")) } #' Forecasting time series #' #' \code{mforecast} is a class of objects for forecasting from multivariate #' time series or multivariate time series models. The function invokes #' particular \emph{methods} which depend on the class of the first argument. #' #' For example, the function \code{\link{forecast.mlm}} makes multivariate #' forecasts based on the results produced by \code{\link{tslm}}. #' #' @aliases mforecast print.mforecast summary.mforecast as.data.frame.mforecast #' #' @param object a multivariate time series or multivariate time series model #' for which forecasts are required #' @param h Number of periods for forecasting #' @param level Confidence level for prediction intervals. #' @param fan If TRUE, \code{level} is set to \code{seq(51,99,by=3)}. This is #' suitable for fan plots. #' @param robust If TRUE, the function is robust to missing values and outliers #' in \code{object}. This argument is only valid when \code{object} is of class #' \code{mts}. #' @param find.frequency If TRUE, the function determines the appropriate #' period, if the data is of unknown period. #' @param allow.multiplicative.trend If TRUE, then ETS models with #' multiplicative trends are allowed. Otherwise, only additive or no trend ETS #' models are permitted. #' @param ... Additional arguments affecting the forecasts produced. #' @inheritParams forecast.ts #' @return An object of class "\code{mforecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the multivariate #' forecasts and prediction intervals. #' #' The generic accessors functions \code{fitted.values} and \code{residuals} #' extract various useful features of the value returned by #' \code{forecast$model}. #' #' An object of class \code{"mforecast"} is a list usually containing at least #' the following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The original time series #' (either \code{object} itself or the time series used to create the model #' stored as \code{object}).} \item{residuals}{Residuals from the fitted model. #' For models with additive errors, the residuals will be x minus the fitted #' values.} \item{fitted}{Fitted values (one-step forecasts)} #' @author Rob J Hyndman & Mitchell O'Hara-Wild #' @seealso Other functions which return objects of class \code{"mforecast"} #' are \code{\link{forecast.mlm}}, \code{forecast.varest}. #' #' @export forecast.mts <- function(object, h=ifelse(frequency(object) > 1, 2 * frequency(object), 10), level=c(80, 95), fan=FALSE, robust=FALSE, lambda = NULL, biasadj = FALSE, find.frequency = FALSE, allow.multiplicative.trend=FALSE, ...) { out <- list(forecast = vector("list", NCOL(object))) cl <- match.call() cl[[1]] <- quote(forecast.ts) cl$object <- quote(object[, i]) for (i in 1:NCOL(object)) { out$forecast[[i]] <- eval(cl) out$forecast[[i]]$series <- colnames(object)[i] } out$method <- vapply(out$forecast, function(x) x$method, character(1)) names(out$forecast) <- names(out$method) <- colnames(object) return(structure(out, class = "mforecast")) } #' @export print.mforecast <- function(x, ...) { lapply(x$forecast, function(x) { cat(paste0(x$series, "\n")) print(x) cat("\n") }) return(invisible()) } #' Multivariate forecast plot #' #' Plots historical data with multivariate forecasts and prediction intervals. #' #' \code{autoplot} will produce an equivalent plot as a ggplot object. #' #' @param x Multivariate forecast object of class \code{mforecast}. #' @param object Multivariate forecast object of class \code{mforecast}. Used #' for ggplot graphics (S3 method consistency). #' @param main Main title. Default is the forecast method. For autoplot, #' specify a vector of titles for each plot. #' @param xlab X-axis label. For autoplot, specify a vector of labels for each #' plot. #' @param PI If \code{FALSE}, confidence intervals will not be plotted, giving #' only the forecast line. #' @param facets If TRUE, multiple time series will be faceted. If FALSE, each #' series will be assigned a colour. #' @param colour If TRUE, the time series will be assigned a colour aesthetic #' @param series Matches an unidentified forecast layer with a coloured object #' on the plot. #' @param \dots additional arguments to each individual \code{plot}. #' @author Mitchell O'Hara-Wild #' @seealso \code{\link[forecast]{plot.forecast}}, \code{\link[stats]{plot.ts}} #' @references Hyndman and Athanasopoulos (2018) \emph{Forecasting: principles #' and practice}, 2nd edition, OTexts: Melbourne, Australia. #' \url{https://otexts.com/fpp2/} #' @keywords ts #' @examples #' library(ggplot2) #' #' lungDeaths <- cbind(mdeaths, fdeaths) #' fit <- tslm(lungDeaths ~ trend + season) #' fcast <- forecast(fit, h=10) #' plot(fcast) #' autoplot(fcast) #' #' carPower <- as.matrix(mtcars[,c("qsec","hp")]) #' carmpg <- mtcars[,"mpg"] #' fit <- lm(carPower ~ carmpg) #' fcast <- forecast(fit, newdata=data.frame(carmpg=30)) #' plot(fcast, xlab="Year") #' autoplot(fcast, xlab=rep("Year",2)) #' #' @export plot.mforecast <- function(x, main=paste("Forecasts from", unique(x$method)), xlab="time", ...) { oldpar <- par(mfrow = c(length(x$forecast), 1), mar = c(0, 5.1, 0, 2.1), oma = c(6, 0, 5, 0)) on.exit(par(oldpar)) for (fcast in x$forecast) { plot(fcast, main = "", xaxt = "n", ylab = fcast$series, ...) } axis(1) mtext(xlab, outer = TRUE, side = 1, line = 3) title(main = main, outer = TRUE) } #' @export summary.mforecast <- function(object, ...) { class(object) <- c("summary.mforecast", class(object)) object } #' @export print.summary.mforecast <- function(x, ...) { cat(paste("\nForecast method:", unique(x$method))) cat(paste("\n\nModel Information:\n")) print(x$model) cat("\nError measures:\n") print(accuracy(x)) if (is.null(x$forecast)) { cat("\n No forecasts\n") } else { cat("\nForecasts:\n") NextMethod() } } forecast/R/forecast-package.R0000644000176200001440000000265314272665773015634 0ustar liggesusers#' @keywords package #' @aliases forecast-package "_PACKAGE" #' @import parallel #' @import Rcpp #' #' @importFrom colorspace sequential_hcl #' @importFrom fracdiff fracdiff diffseries fracdiff.sim #' @importFrom tseries adf.test pp.test kpss.test #' @importFrom zoo rollmean as.Date as.yearqtr #' @importFrom timeDate as.timeDate isBizday difftimeDate Easter as.Date.timeDate #' @importFrom nnet nnet #' @importFrom grDevices gray heat.colors nclass.FD palette #' @importFrom graphics abline axis grid layout lines mtext par plot points polygon text title hist #' @importFrom stats Box.test acf approx ar arima arima.sim as.ts complete.cases cycle decompose diffinv end extractAIC fitted formula frequency window filter na.contiguous spec.ar hatvalues is.ts ksmooth lm lsfit loess median model.frame na.exclude na.omit na.pass optim optimize pf plot.ts poly predict pt qnorm qt quantile residuals rnorm runif sd simulate smooth.spline start stl supsmu terms time ts tsp tsp<- tsdiag var logLik nobs napredict #' @importFrom stats aggregate as.formula is.mts reformulate #' @importFrom utils packageVersion tail head #' @importFrom ggplot2 autoplot fortify #' @importFrom lmtest bgtest #' @importFrom stats supsmu #' @importFrom magrittr %>% #' @importFrom generics forecast accuracy #' #' @useDynLib forecast, .registration = TRUE NULL # Generics to re-export #' @export magrittr::`%>%` #' @export generics::forecast #' @export generics::accuracy forecast/R/etsforecast.R0000644000176200001440000002635414272665773014763 0ustar liggesusers#' Forecasting using ETS models #' #' Returns forecasts and other information for univariate ETS models. #' #' #' @param object An object of class "\code{ets}". Usually the result of a call #' to \code{\link{ets}}. #' @param h Number of periods for forecasting #' @param level Confidence level for prediction intervals. #' @param fan If TRUE, level is set to seq(51,99,by=3). This is suitable for #' fan plots. #' @param simulate If TRUE, prediction intervals are produced by simulation rather #' than using analytic formulae. Errors are assumed to be normally distributed. #' @param bootstrap If TRUE, then prediction intervals are produced by simulation using #' resampled errors (rather than normally distributed errors). #' @param npaths Number of sample paths used in computing simulated prediction #' intervals. #' @param PI If TRUE, prediction intervals are produced, otherwise only point #' forecasts are calculated. If \code{PI} is FALSE, then \code{level}, #' \code{fan}, \code{simulate}, \code{bootstrap} and \code{npaths} are all #' ignored. #' @param ... Other arguments. #' @inheritParams forecast.ts #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{forecast.ets}. #' #' An object of class \code{"forecast"} is a list containing at least the #' following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The original time series #' (either \code{object} itself or the time series used to create the model #' stored as \code{object}).} \item{residuals}{Residuals from the fitted model. #' For models with additive errors, the residuals are x - fitted values. For #' models with multiplicative errors, the residuals are equal to x /(fitted #' values) - 1.} \item{fitted}{Fitted values (one-step forecasts)} #' @author Rob J Hyndman #' @seealso \code{\link{ets}}, \code{\link{ses}}, \code{\link{holt}}, #' \code{\link{hw}}. #' @keywords ts #' @examples #' fit <- ets(USAccDeaths) #' plot(forecast(fit,h=48)) #' #' @export forecast.ets #' @export forecast.ets <- function(object, h=ifelse(object$m > 1, 2 * object$m, 10), level=c(80, 95), fan=FALSE, simulate=FALSE, bootstrap=FALSE, npaths=5000, PI=TRUE, lambda=object$lambda, biasadj=NULL, ...) { # Check inputs # if(h>2000 | h<=0) if (h <= 0) { stop("Forecast horizon out of bounds") } if (is.null(lambda)) { biasadj <- FALSE } else { if (is.null(biasadj)) { biasadj <- attr(lambda, "biasadj") } if (!is.logical(biasadj)) { warning("biasadj information not found, defaulting to FALSE.") biasadj <- FALSE } } if (!PI && !biasadj) { simulate <- bootstrap <- fan <- FALSE if (!biasadj) { npaths <- 2 } # Just to avoid errors level <- 90 } if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } # Order levels level <- sort(level) n <- length(object$x) damped <- as.logical(object$components[4]) if (bootstrap) { simulate <- TRUE } if (simulate) { f <- pegelsfcast.C(h, object, level = level, bootstrap = bootstrap, npaths = npaths) } else if (object$components[1] == "A" && is.element(object$components[2], c("A", "N")) && is.element(object$components[3], c("N", "A"))) { f <- class1(h, object$states[n + 1, ], object$components[2], object$components[3], damped, object$m, object$sigma2, object$par) } else if (object$components[1] == "M" && is.element(object$components[2], c("A", "N")) && is.element(object$components[3], c("N", "A"))) { f <- class2(h, object$states[n + 1, ], object$components[2], object$components[3], damped, object$m, object$sigma2, object$par) } else if (object$components[1] == "M" && object$components[3] == "M" && object$components[2] != "M") { f <- class3(h, object$states[n + 1, ], object$components[2], object$components[3], damped, object$m, object$sigma2, object$par) } else { f <- pegelsfcast.C(h, object, level = level, bootstrap = bootstrap, npaths = npaths) } tsp.x <- tsp(object$x) if (!is.null(tsp.x)) { start.f <- tsp(object$x)[2] + 1 / object$m } else { start.f <- length(object$x) + 1 } out <- list( model = object, mean = future_msts(object$x, f$mu), level = level, x = object$x ) if (PI || biasadj) { if (!is.null(f$var)) { out$lower <- out$upper <- ts(matrix(NA, ncol = length(level), nrow = h)) colnames(out$lower) <- colnames(out$upper) <- paste(level, "%", sep = "") for (i in 1:length(level)) { marg.error <- sqrt(f$var) * abs(qnorm((100 - level[i]) / 200)) out$lower[, i] <- out$mean - marg.error out$upper[, i] <- out$mean + marg.error } out$lower <- copy_msts(out$mean, out$lower) out$upper <- copy_msts(out$mean, out$upper) } else if (!is.null(f$lower)) { out$lower <- copy_msts(out$mean, f$lower) out$upper <- copy_msts(out$mean, f$upper) } else if (PI) { warning("No prediction intervals for this model") } else if (any(biasadj)) { warning("No bias adjustment possible") } } out$fitted <- copy_msts(object$x, fitted(object)) out$method <- object$method if (!is.null(object$series)) { out$series <- object$series } else { out$series <- object$call$y } out$residuals <- copy_msts(object$x, residuals(object)) if (!is.null(lambda)) { # out$x <- InvBoxCox(object$x,lambda) # out$fitted <- InvBoxCox(out$fitted,lambda) out$mean <- InvBoxCox(out$mean, lambda, biasadj, out) if (PI) # PI = TRUE { out$lower <- InvBoxCox(out$lower, lambda) out$upper <- InvBoxCox(out$upper, lambda) } } if (!PI) { out$lower <- out$upper <- out$level <- NULL } return(structure(out, class = "forecast")) } pegelsfcast.C <- function(h, obj, npaths, level, bootstrap) { y.paths <- matrix(NA, nrow = npaths, ncol = h) obj$lambda <- NULL # No need to transform these here as we do it later. for (i in 1:npaths) y.paths[i, ] <- simulate.ets(obj, h, future = TRUE, bootstrap = bootstrap) y.f <- .C( "etsforecast", as.double(obj$states[length(obj$x) + 1, ]), as.integer(obj$m), as.integer(switch(obj$components[2], "N" = 0, "A" = 1, "M" = 2)), as.integer(switch(obj$components[3], "N" = 0, "A" = 1, "M" = 2)), as.double(ifelse(obj$components[4] == "FALSE", 1, obj$par["phi"])), as.integer(h), as.double(numeric(h)), PACKAGE = "forecast" )[[7]] if (abs(y.f[1] + 99999) < 1e-7) { stop("Problem with multiplicative damped trend") } lower <- apply(y.paths, 2, quantile, 0.5 - level / 200, type = 8, na.rm = TRUE) upper <- apply(y.paths, 2, quantile, 0.5 + level / 200, type = 8, na.rm = TRUE) if (length(level) > 1) { lower <- t(lower) upper <- t(upper) } return(list(mu = y.f, lower = lower, upper = upper)) } class1 <- function(h, last.state, trendtype, seasontype, damped, m, sigma2, par) { p <- length(last.state) H <- matrix(c(1, rep(0, p - 1)), nrow = 1) if (seasontype == "A") { H[1, p] <- 1 } if (trendtype == "A") { if (damped) { H[1, 2] <- par["phi"] } else { H[1, 2] <- 1 } } F <- matrix(0, p, p) F[1, 1] <- 1 if (trendtype == "A") { if (damped) { F[1, 2] <- F[2, 2] <- par["phi"] } else { F[1, 2] <- F[2, 2] <- 1 } } if (seasontype == "A") { F[p - m + 1, p] <- 1 F[(p - m + 2):p, (p - m + 1):(p - 1)] <- diag(m - 1) } G <- matrix(0, nrow = p, ncol = 1) G[1, 1] <- par["alpha"] if (trendtype == "A") { G[2, 1] <- par["beta"] } if (seasontype == "A") { G[3, 1] <- par["gamma"] } mu <- numeric(h) Fj <- diag(p) cj <- numeric(h - 1) if (h > 1) { for (i in 1:(h - 1)) { mu[i] <- H %*% Fj %*% last.state cj[i] <- H %*% Fj %*% G Fj <- Fj %*% F } cj2 <- cumsum(cj ^ 2) var <- sigma2 * c(1, 1 + cj2) } else { var <- sigma2 } mu[h] <- H %*% Fj %*% last.state return(list(mu = mu, var = var, cj = cj)) } class2 <- function(h, last.state, trendtype, seasontype, damped, m, sigma2, par) { tmp <- class1(h, last.state, trendtype, seasontype, damped, m, sigma2, par) theta <- numeric(h) theta[1] <- tmp$mu[1] ^ 2 if (h > 1) { for (j in 2:h) theta[j] <- tmp$mu[j] ^ 2 + sigma2 * sum(tmp$cj[1:(j - 1)] ^ 2 * theta[(j - 1):1]) } var <- (1 + sigma2) * theta - tmp$mu ^ 2 return(list(mu = tmp$mu, var = var)) } class3 <- function(h, last.state, trendtype, seasontype, damped, m, sigma2, par) { p <- length(last.state) H1 <- matrix(rep(1, 1 + (trendtype != "N")), nrow = 1) H2 <- matrix(c(rep(0, m - 1), 1), nrow = 1) if (trendtype == "N") { F1 <- 1 G1 <- par["alpha"] } else { F1 <- rbind(c(1, 1), c(0, ifelse(damped, par["phi"], 1))) G1 <- rbind(c(par["alpha"], par["alpha"]), c(par["beta"], par["beta"])) } F2 <- rbind(c(rep(0, m - 1), 1), cbind(diag(m - 1), rep(0, m - 1))) G2 <- matrix(0, m, m) G2[1, m] <- par["gamma"] Mh <- matrix(last.state[1:(p - m)]) %*% matrix(last.state[(p - m + 1):p], nrow = 1) Vh <- matrix(0, length(Mh), length(Mh)) H21 <- H2 %x% H1 F21 <- F2 %x% F1 G21 <- G2 %x% G1 K <- (G2 %x% F1) + (F2 %x% G1) mu <- var <- numeric(h) for (i in 1:h) { mu[i] <- H1 %*% Mh %*% t(H2) var[i] <- (1 + sigma2) * H21 %*% Vh %*% t(H21) + sigma2 * mu[i] ^ 2 vecMh <- c(Mh) Vh <- F21 %*% Vh %*% t(F21) + sigma2 * (F21 %*% Vh %*% t(G21) + G21 %*% Vh %*% t(F21) + K %*% (Vh + vecMh %*% t(vecMh)) %*% t(K) + sigma2 * G21 %*% (3 * Vh + 2 * vecMh %*% t(vecMh)) %*% t(G21)) Mh <- F1 %*% Mh %*% t(F2) + G1 %*% Mh %*% t(G2) * sigma2 } return(list(mu = mu, var = var)) } # ses <- function(x,h=10,level=c(80,95),fan=FALSE,...) # { # fcast <- forecast(ets(x,"ANN"),h,level=level,fan=fan,...) # fcast$method <- "Simple exponential smoothing" # fcast$model$call <- match.call() # return(fcast) # } # holt <- function(x,h=10, damped=FALSE, level=c(80,95), fan=FALSE, ...) # { # junk <- forecast(ets(x,"AAN",damped=damped),h,level=level,fan=fan,...) # if(damped) # junk$method <- "Damped Holt's method" # else # junk$method <- "Holt's method" # junk$model$call <- match.call() # return(junk) # } # hw <- function(x,h=2*frequency(x),seasonal="additive",damped=FALSE,level=c(80,95), fan=FALSE, ...) # { # if(seasonal=="additive") # { # junk <- forecast(ets(x,"AAA",damped=damped),h,level=level,fan=fan,...) # junk$method <- "Holt-Winters' additive method" # } # else # { # junk <- forecast(ets(x,"MAM",damped=damped),h,level=level,fan=fan,...) # junk$method <- "Holt-Winters' multiplicative method" # } # junk$model$call <- match.call() # return(junk) # } forecast/R/checkresiduals.R0000644000176200001440000001114614456202551015404 0ustar liggesusers#' Check that residuals from a time series model look like white noise #' #' If \code{plot=TRUE}, produces a time plot of the residuals, the #' corresponding ACF, and a histogram. If \code{test} is not \code{FALSE}, #' the output from either a Ljung-Box test or Breusch-Godfrey test is printed. #' #' @param object Either a time series model, a forecast object, or a time #' series (assumed to be residuals). #' @param lag Number of lags to use in the Ljung-Box or Breusch-Godfrey test. #' If missing, it is set to \code{min(10,n/5)} for non-seasonal data, and #' \code{min(2m, n/5)} for seasonal data, where \code{n} is the length of the series, #' and \code{m} is the seasonal period of the data. It is further constrained to be #' at least \code{df+3} where \code{df} is the degrees of freedom of the model. This #' ensures there are at least 3 degrees of freedom used in the chi-squared test. #' @param test Test to use for serial correlation. By default, if \code{object} #' is of class \code{lm}, then \code{test="BG"}. Otherwise, \code{test="LB"}. #' Setting \code{test=FALSE} will prevent the test results being printed. #' @param plot Logical. If \code{TRUE}, will produce the plot. #' @param ... Other arguments are passed to \code{\link{ggtsdisplay}}. #' @return None #' @author Rob J Hyndman #' @seealso \code{\link{ggtsdisplay}}, \code{\link[stats]{Box.test}}, #' \code{\link[lmtest]{bgtest}} #' @examples #' #' fit <- ets(WWWusage) #' checkresiduals(fit) #' #' @export checkresiduals <- function(object, lag, test, plot = TRUE, ...) { showtest <- TRUE if (missing(test)) { if (is.element("lm", class(object))) { test <- "BG" } else { test <- "LB" } showtest <- TRUE } else if (test != FALSE) { test <- match.arg(test, c("LB", "BG")) showtest <- TRUE } else { showtest <- FALSE } # Extract residuals if (is.element("ts", class(object)) | is.element("numeric", class(object))) { residuals <- object object <- list(method = "Missing") } else { residuals <- residuals(object) } if (length(residuals) == 0L) { stop("No residuals found") } if ("ar" %in% class(object)) { method <- paste("AR(", object$order, ")", sep = "") } else if (!is.null(object$method)) { method <- object$method } else if ("HoltWinters" %in% class(object)) { method <- "HoltWinters" } else if ("StructTS" %in% class(object)) { method <- "StructTS" } else { method <- try(as.character(object), silent = TRUE) if ("try-error" %in% class(method)) { method <- "Missing" } else if (length(method) > 1 | base::nchar(method[1]) > 50) { method <- "Missing" } } if (method == "Missing") { main <- "Residuals" } else { main <- paste("Residuals from", method) } if (plot) { suppressWarnings(ggtsdisplay(residuals, plot.type = "histogram", main = main, ...)) } # Check if we have the model if (is.element("forecast", class(object))) { object <- object$model } if (is.null(object) | !showtest) { return(invisible()) } # Seasonality of data freq <- frequency(residuals) # Find model df #if (grepl("STL \\+ ", method)) { # warning("The fitted degrees of freedom is based on the model used for the seasonally adjusted data.") #} if (inherits(object, "Arima") | test == "BG") { df <- modeldf(object) } else { df <- 0 } if (missing(lag)) { lag <- ifelse(freq > 1, 2 * freq, 10) lag <- min(lag, round(length(residuals) / 5)) lag <- max(df + 3, lag) } if (test == "BG") { # Do Breusch-Godfrey test BGtest <- lmtest::bgtest(object, order = lag) BGtest$data.name <- main # print(BGtest) return(BGtest) } else { # Do Ljung-Box test LBtest <- Box.test(zoo::na.approx(residuals), fitdf = df, lag = lag, type = "Ljung") LBtest$method <- "Ljung-Box test" LBtest$data.name <- main names(LBtest$statistic) <- "Q*" print(LBtest) cat(paste("Model df: ", df, ". Total lags used: ", lag, "\n\n", sep = "")) return(invisible(LBtest)) } } modeldf <- function(object, ...) { UseMethod("modeldf") } modeldf.default <- function(object, ...) { warning("Could not find appropriate degrees of freedom for this model.") NULL } modeldf.ets <- function(object, ...) { length(object$par) } modeldf.Arima <- function(object, ...) { sum(arimaorder(object)[c("p", "q", "P", "Q")], na.rm = TRUE) } modeldf.bats <- function(object, ...) { length(object$parameters$vect) } modeldf.lm <- function(object, ...) { length(object$coefficients) } modeldf.lagwalk <- function(object, ...) { as.numeric(object$par$includedrift) } modeldf.meanf <- function(object, ...) { 1 } forecast/R/wrangle.R0000644000176200001440000000320114150370574014045 0ustar liggesuserstoMat <- function(x) { if (NCOL(x) > 1 && !is.matrix(x)) { x <- matrix(x, ncol = NCOL(x)) } return(x) } # Converts arguments into data.frame, whilst retaining mts/ts/matrix properties datamat <- function(..., flatten=TRUE, functions=TRUE) { vars <- list(...) if (length(vars) == 0) { return(data.frame()) } if (!is.null(names(vars))) { names(vars)[!nzchar(names(vars))] <- as.character(substitute(list(...))[-1])[!nzchar(names(vars))] } else { names(vars) <- as.character(substitute(list(...))[-1]) } if (flatten) { i <- 1 while (i <= length(vars)) { if (is.data.frame(vars[[i]])) { vars <- c(vars, c(vars[[i]])) # Append data.frame components vars[[i]] <- NULL # Remove data.frame } else if (is.matrix(vars[[i]])) { for (j in 1:NCOL(vars[[i]])) { vars[[length(vars) + 1]] <- vars[[i]][, j] names(vars)[length(vars)] <- make.names(colnames(vars[[i]])[j]) } i <- i + 1 } else { i <- i + 1 } } } class(vars) <- "data.frame" row.names(vars) <- 1:max(sapply(vars, NROW)) # if(is.ts(vars[,1])){ # if(NCOL(vars)>1){ # class(vars) <- c(class(vars),"mts") # } # class(vars) <- c(class(vars),"ts") # tspx <- unique(sapply(vars,tsp), MARGIN = 2) # if(length(tspx)==3){ # attr(vars, "tsp") <- tspx # } # } return(vars) } recoverTSP <- function(times.x) { freq <- sort(unique(round(times.x %% 1, digits = 6))) # The subset cannot increase frequency freq <- length(freq) return(c(min(times.x), min(times.x) + (length(times.x) - 1) / freq, freq)) } forecast/R/simulate.R0000644000176200001440000006623214323125536014244 0ustar liggesusers#' Simulation from a time series model #' #' Returns a time series based on the model object \code{object}. #' #' With \code{simulate.Arima()}, the \code{object} should be produced by #' \code{\link{Arima}} or \code{\link{auto.arima}}, rather than #' \code{\link[stats]{arima}}. By default, the error series is assumed normally #' distributed and generated using \code{\link[stats]{rnorm}}. If \code{innov} #' is present, it is used instead. If \code{bootstrap=TRUE} and #' \code{innov=NULL}, the residuals are resampled instead. #' #' When \code{future=TRUE}, the sample paths are conditional on the data. When #' \code{future=FALSE} and the model is stationary, the sample paths do not #' depend on the data at all. When \code{future=FALSE} and the model is #' non-stationary, the location of the sample paths is arbitrary, so they all #' start at the value of the first observation. #' #' @param object An object of class "\code{ets}", "\code{Arima}", "\code{ar}" #' or "\code{nnetar}". #' @param nsim Number of periods for the simulated series. Ignored if either #' \code{xreg} or \code{innov} are not \code{NULL}. Otherwise the default is #' the length of series used to train model (or 100 if no data found). #' @param seed Either \code{NULL} or an integer that will be used in a call to #' \code{\link[base]{set.seed}} before simulating the time series. The default, #' \code{NULL}, will not change the random generator state. #' @param future Produce sample paths that are future to and conditional on the #' data in \code{object}. Otherwise simulate unconditionally. #' @param bootstrap Do simulation using resampled errors rather than normally #' distributed errors or errors provided as \code{innov}. #' @param innov A vector of innovations to use as the error series. Ignored if #' \code{bootstrap==TRUE}. If not \code{NULL}, the value of \code{nsim} is set #' to length of \code{innov}. #' @param xreg New values of \code{xreg} to be used for forecasting. The value #' of \code{nsim} is set to the number of rows of \code{xreg} if it is not #' \code{NULL}. #' @param ... Other arguments, not currently used. #' @inheritParams forecast.ts #' #' @return An object of class "\code{ts}". #' @author Rob J Hyndman #' @seealso \code{\link{ets}}, \code{\link{Arima}}, \code{\link{auto.arima}}, #' \code{\link{ar}}, \code{\link{arfima}}, \code{\link{nnetar}}. #' @keywords ts #' @examples #' fit <- ets(USAccDeaths) #' plot(USAccDeaths, xlim = c(1973, 1982)) #' lines(simulate(fit, 36), col = "red") #' @export simulate.ets <- function(object, nsim = length(object$x), seed = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, ...) { if (is.null(innov)) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) { runif(1) } if (is.null(seed)) { RNGstate <- get(".Random.seed", envir = .GlobalEnv) } else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } } else { nsim <- length(innov) } if (!is.null(object$x)) { if (is.null(tsp(object$x))) { object$x <- ts(object$x, frequency = 1, start = 1) } } else { if (nsim == 0L) { nsim <- 100 } object$x <- ts(10, frequency = object$m, start = 1 / object$m) future <- FALSE } if (future) { initstate <- object$states[length(object$x) + 1, ] } else { # choose a random starting point initstate <- object$states[sample(1:length(object$x), 1), ] } if (bootstrap) { res <- na.omit(c(object$residuals) - mean(object$residuals, na.rm = TRUE)) e <- sample(res, nsim, replace = TRUE) } else if (is.null(innov)) { e <- rnorm(nsim, 0, sqrt(object$sigma2)) } else if (length(innov) == nsim) { e <- innov } else { stop("Length of innov must be equal to nsim") } if (object$components[1] == "M") { e <- pmax(-1, e) } tmp <- ts(.C( "etssimulate", as.double(initstate), as.integer(object$m), as.integer(switch(object$components[1], "A" = 1, "M" = 2 )), as.integer(switch(object$components[2], "N" = 0, "A" = 1, "M" = 2 )), as.integer(switch(object$components[3], "N" = 0, "A" = 1, "M" = 2 )), as.double(object$par["alpha"]), as.double(ifelse(object$components[2] == "N", 0, object$par["beta"])), as.double(ifelse(object$components[3] == "N", 0, object$par["gamma"])), as.double(ifelse(object$components[4] == "FALSE", 1, object$par["phi"])), as.integer(nsim), as.double(numeric(nsim)), as.double(e), PACKAGE = "forecast" )[[11]], frequency = object$m, start = ifelse(future, tsp(object$x)[2] + 1 / tsp(object$x)[3], tsp(object$x)[1])) if (is.na(tmp[1])) { stop("Problem with multiplicative damped trend") } if (!is.null(object$lambda)) { tmp <- InvBoxCox(tmp, object$lambda) } return(tmp) } # Simulate ARIMA model starting with observed data x # Some of this function is borrowed from the arima.sim() function in the stats package. # Note that myarima.sim() does simulation conditional on the values of observed x, whereas # arima.sim() is unconditional on any observed x. myarima.sim <- function(model, n, x, e, ...) { start.innov <- residuals(model) innov <- e data <- x # Remove initial NAs first.nonmiss <- which(!is.na(x))[1] if (first.nonmiss > 1) { tsp.x <- tsp(x) start.x <- tsp.x[1] + (first.nonmiss - 1) / tsp.x[3] x <- window(x, start = start.x) start.innov <- window(start.innov, start = start.x) } model$x <- x n.start <- length(x) x <- ts(c(start.innov, innov), start = 1 - n.start, frequency = model$seasonal.period) flag.noadjust <- FALSE if (is.null(tsp(data))) { data <- ts(data, frequency = 1, start = 1) } if (!is.list(model)) { stop("'model' must be list") } if (n <= 0L) { stop("'n' must be strictly positive") } p <- length(model$ar) q <- length(model$ma) d <- 0 D <- model$seasonal.difference m <- model$seasonal.period if (!is.null(ord <- model$order)) { if (length(ord) != 3L) { stop("'model$order' must be of length 3") } if (p != ord[1L]) { stop("inconsistent specification of 'ar' order") } if (q != ord[3L]) { stop("inconsistent specification of 'ma' order") } d <- ord[2L] if (d != round(d) || d < 0) { stop("number of differences must be a positive integer") } } if (p) { minroots <- min(Mod(polyroot(c(1, -model$ar)))) if (minroots <= 1) { stop("'ar' part of model is not stationary") } } if (length(model$ma)) { # MA filtering x <- stats::filter(x, c(1, model$ma), method = "convolution", sides = 1L) x[seq_along(model$ma)] <- 0 } ## AR "filtering" len.ar <- length(model$ar) if (length(model$ar) && (len.ar <= length(data))) { if ((D != 0) && (d != 0)) { diff.data <- diff(data, lag = 1, differences = d) diff.data <- diff(diff.data, lag = m, differences = D) } else if ((D != 0) && (d == 0)) { diff.data <- diff(data, lag = model$seasonal.period, differences = D) } else if ((D == 0) && (d != 0)) { diff.data <- diff(data, lag = 1, differences = d) } else { diff.data <- data } x.new.innovations <- x[(length(start.innov) + 1):length(x)] x.with.data <- c(diff.data, x.new.innovations) for (i in (length(diff.data) + 1):length(x.with.data)) { lagged.x.values <- x.with.data[(i - len.ar):(i - 1)] ar.coefficients <- model$ar[length(model$ar):1] sum.multiplied.x <- sum((lagged.x.values * ar.coefficients)[abs(ar.coefficients) > .Machine$double.eps]) x.with.data[i] <- x.with.data[i] + sum.multiplied.x } x.end <- x.with.data[(length(diff.data) + 1):length(x.with.data)] x <- ts(x.end, start = 1, frequency = model$seasonal.period) flag.noadjust <- TRUE } else if (length(model$ar)) # but data too short { # AR filtering for all other cases where AR is used. x <- stats::filter(x, model$ar, method = "recursive") } if ((d == 0) && (D == 0) && (flag.noadjust == FALSE)) # Adjust to ensure end matches approximately { # Last 20 diffs if (n.start >= 20) { xdiff <- (model$x - x[1:n.start])[n.start - (19:0)] } else { xdiff <- model$x - x[1:n.start] } # If all same sign, choose last if (all(sign(xdiff) == 1) || all(sign(xdiff) == -1)) { xdiff <- xdiff[length(xdiff)] } else { # choose mean. xdiff <- mean(xdiff) } x <- x + xdiff } if ((n.start > 0) && (flag.noadjust == FALSE)) { x <- x[-(1:n.start)] } ######## Undo all differences if ((D > 0) && (d == 0)) { # Seasonal undifferencing, if there is no regular differencing i <- length(data) - D * m + 1 seasonal.xi <- data[i:length(data)] length.s.xi <- length(seasonal.xi) x <- diffinv(x, lag = m, differences = D, xi = seasonal.xi)[-(1:length.s.xi)] } else if ((d > 0) && (D == 0)) { # Regular undifferencing, if there is no seasonal differencing x <- diffinv(x, differences = d, xi = data[length(data) - (d:1) + 1])[-(1:d)] } else if ((d > 0) && (D > 0)) { # Undifferencing for where the differencing is both Seasonal and Non-Seasonal # Regular first delta.four <- diff(data, lag = m, differences = D) regular.xi <- delta.four[(length(delta.four) - D):length(delta.four)] x <- diffinv(x, differences = d, xi = regular.xi[length(regular.xi) - (d:1) + 1])[-(1:d)] # Then seasonal i <- length(data) - D * m + 1 seasonal.xi <- data[i:length(data)] length.s.xi <- length(seasonal.xi) x <- diffinv(x, lag = m, differences = D, xi = seasonal.xi) x <- x[-(1:length.s.xi)] } x <- ts(x[1:n], frequency = frequency(data), start = tsp(data)[2] + 1 / tsp(data)[3]) return(x) } #' @rdname simulate.ets #' @export simulate.Arima <- function(object, nsim = length(object$x), seed = NULL, xreg = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, lambda = object$lambda, ...) { # Error check: if (object$arma[7] < 0) { stop("Value for seasonal difference is < 0. Must be >= 0") } else if ((sum(object$arma[c(3, 4, 7)]) > 0) && (object$arma[5] < 2)) { stop("Invalid value for seasonal period") } # Check if data is included x <- object$x <- getResponse(object) if (is.null(x)) { n <- 0 future <- FALSE if (nsim == 0L) { nsim <- 100 } } else { if (is.null(tsp(x))) { x <- ts(x, frequency = 1, start = 1) } if (!is.null(lambda)) { x <- BoxCox(x, lambda) lambda <- attr(x, "lambda") } n <- length(x) } # Check xreg if (!is.null(xreg)) { xreg <- as.matrix(xreg) nsim <- nrow(xreg) } use.drift <- is.element("drift", names(object$coef)) usexreg <- (!is.null(xreg) | use.drift | !is.null(object$xreg)) xm <- oldxm <- 0 if (use.drift) { # Remove existing drift column if (NCOL(xreg) == 1 && all(diff(xreg) == 1)) { xreg <- NULL } else if (!is.null(colnames(xreg))) { xreg <- xreg[, colnames(xreg) != "drift", drop = FALSE] } # Create new drift column xreg <- cbind(drift = as.matrix(seq(nsim) + n * future), xreg) } # Check xreg has the correct dimensions if (usexreg) { if (is.null(xreg)) { stop("xreg argument missing") } else if (is.null(object$xreg)) { stop("xreg not required") } else if (NCOL(xreg) != NCOL(object$xreg)) { stop("xreg has incorrect dimension.") } } ######## Random Seed Code if (is.null(innov)) { if (!exists(".Random.seed", envir = .GlobalEnv)) { runif(1) } if (is.null(seed)) { RNGstate <- .Random.seed } else { R.seed <- .Random.seed set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } } else { nsim <- length(innov) } ######## End Random seed code # Check for seasonal ARMA components and set flag accordingly. This will be used later in myarima.sim() flag.s.arma <- (sum(object$arma[c(3, 4)]) > 0) # Check for Seasonality in ARIMA model if (sum(object$arma[c(3, 4, 7)]) > 0) { # return(simulateSeasonalArima(object, nsim=nsim, seed=seed, xreg=xreg, future=future, bootstrap=bootstrap, ...)) if (sum(object$model$phi) == 0) { ar <- NULL } else { ar <- as.double(object$model$phi) } if (sum(object$model$theta) == 0) { ma <- NULL } else { ma <- as.double(object$model$theta) } order <- c(length(ar), object$arma[6], length(ma)) if (future) { model <- list( order = order, ar = ar, ma = ma, sd = sqrt(object$sigma2), residuals = residuals(object), seasonal.difference = object$arma[7], seasonal.period = object$arma[5], flag.seasonal.arma = flag.s.arma, seasonal.order = object$arma[c(3, 7, 4)] ) } else { model <- list(order = order, ar = ar, ma = ma, sd = sqrt(object$sigma2), residuals = residuals(object)) } flag.seasonal.diff <- (object$arma[7] > 0) } else { #### Non-Seasonal ARIMA specific code: Set up the model order <- object$arma[c(1, 6, 2)] if (order[1] > 0) { ar <- object$model$phi[1:order[1]] } else { ar <- NULL } if (order[3] > 0) { ma <- object$model$theta[1:order[3]] } else { ma <- NULL } if (object$arma[2] != length(ma)) { stop("MA length wrong") } else if (object$arma[1] != length(ar)) { stop("AR length wrong") } if (future) { model <- list( order = object$arma[c(1, 6, 2)], ar = ar, ma = ma, sd = sqrt(object$sigma2), residuals = residuals(object), seasonal.difference = 0, flag.seasonal.arma = flag.s.arma, seasonal.order = c(0, 0, 0), seasonal.period = 1 ) } else { model <- list(order = object$arma[c(1, 6, 2)], ar = ar, ma = ma, sd = sqrt(object$sigma2), residuals = residuals(object)) } flag.seasonal.diff <- FALSE ### End non-seasonal ARIMA specific code } if (bootstrap) { res <- na.omit(c(model$residuals) - mean(model$residuals, na.rm = TRUE)) e <- sample(res, nsim, replace = TRUE) } else if (is.null(innov)) { e <- rnorm(nsim, 0, model$sd) } else if (length(innov) == nsim) { e <- innov } else { stop("Length of innov must be equal to nsim") } narma <- sum(object$arma[1L:4L]) if (length(object$coef) > narma) { if (names(object$coef)[narma + 1L] == "intercept") { xreg <- cbind(intercept = rep(1, nsim), xreg) if (future) { object$xreg <- cbind(intercept = rep(1, n), object$xreg) } } if (!is.null(xreg)) { xm <- if (narma == 0) { drop(as.matrix(xreg) %*% object$coef) } else { drop(as.matrix(xreg) %*% object$coef[-(1L:narma)]) } if (future) { oldxm <- if (narma == 0) { drop(as.matrix(object$xreg) %*% object$coef) } else { drop(as.matrix(object$xreg) %*% object$coef[-(1L:narma)]) } } } } if (future) { sim <- myarima.sim(model, nsim, x - oldxm, e = e) + xm } else { if (flag.seasonal.diff) { zeros <- object$arma[5] * object$arma[7] sim <- arima.sim(model, nsim, innov = e) sim <- diffinv(sim, lag = object$arma[5], differences = object$arma[7])[-(1:zeros)] sim <- tail(sim, nsim) + xm } else { sim <- tail(arima.sim(model, nsim, innov = e), nsim) + xm } if (!is.null(x)) { sim <- ts(sim, start = tsp(x)[1], frequency = tsp(x)[3]) } else { sim <- ts(sim, frequency = object$frequency) } # If model is non-stationary, then condition simulated data on first observation if (!is.null(x) & (model$order[2] > 0 || flag.seasonal.diff)) { sim <- sim - sim[1] + x[1] } } if (!is.null(lambda)) { sim <- InvBoxCox(sim, lambda) } return(sim) } #' @rdname simulate.ets #' @export simulate.ar <- function(object, nsim = object$n.used, seed = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, ...) { if (is.null(innov)) { if (!exists(".Random.seed", envir = .GlobalEnv)) { runif(1) } if (is.null(seed)) { RNGstate <- .Random.seed } else { R.seed <- .Random.seed set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } } else { nsim <- length(innov) } object$x <- getResponse(object) if (is.null(object$x)) { future <- FALSE x.mean <- 0 if (is.null(nsim)) { nsim <- 100 } } else { x.mean <- object$x.mean object$x <- object$x - x.mean } if (future) { model <- list(ar = object$ar, sd = sqrt(object$var.pred), residuals = object$resid, seasonal.difference = 0, seasonal.period = 1, flag.seasonal.arma = FALSE) } else { model <- list(ar = object$ar, sd = sqrt(object$var.pred), residuals = object$resid) } if (bootstrap) { res <- na.omit(c(model$residuals) - mean(model$residuals, na.rm = TRUE)) e <- sample(res, nsim, replace = TRUE) } else if (is.null(innov)) { e <- rnorm(nsim, 0, model$sd) } else if (length(innov) == nsim) { e <- innov } else { stop("Length of innov must be equal to nsim") } if (future) { return(myarima.sim(model, nsim, x = object$x, e = e) + x.mean) } else { return(arima.sim(model, nsim, innov = e) + x.mean) } } #' @rdname simulate.ets #' @export simulate.lagwalk <- function(object, nsim = length(object$x), seed = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, lambda = object$lambda, ...) { if (is.null(innov)) { if (!exists(".Random.seed", envir = .GlobalEnv)) { runif(1) } if (is.null(seed)) { RNGstate <- .Random.seed } else { R.seed <- .Random.seed set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } } else { nsim <- length(innov) } if (bootstrap) { res <- na.omit(c(object$residuals) - mean(object$residuals, na.rm = TRUE)) e <- sample(res, nsim, replace = TRUE) } else if (is.null(innov)) { se <- sqrt(object$sigma2) e <- rnorm(nsim, 0, se) } else { e <- innov } # Cumulate errors lag_grp <- rep_len(seq_len(object$par$lag), length(e)) e <- split(e, lag_grp) cumulative_e <- unsplit(lapply(e, cumsum), lag_grp) # Find starting position x <- object$x if (is.null(x)) { future <- FALSE if (nsim == 0L) { nsim <- 100 } x <- 1 } if (!is.null(lambda)) { x <- BoxCox(x, lambda) } if (future) { start <- tail(x, object$par$lag) } else { start <- head(x, object$par$lag) } # Handle missing values if (any(na_pos <- is.na(start))) { if (!is.null(innov)) { warning("Missing values encountered at simulation starting values, simulating starting values from closest observed value.") } lag_grp <- rep_len(seq_len(object$par$lag), length(x)) start[na_pos] <- vapply(split(x, lag_grp)[na_pos], function(x) { if (future) { x <- rev(x) } pos <- which.min(is.na(x)) x[pos] + sum(rnorm(pos - 1, 0, sqrt(object$sigma2))) }, numeric(1L)) } # Construct simulated ts simdrift <- object$par$drift + rnorm(1, 0, object$par$drift.se) sim <- rep_len(start, nsim) + seq_len(nsim) * simdrift + cumulative_e if (!is.null(lambda)) { sim <- InvBoxCox(sim, lambda) } tspx <- tsp(x) ts(sim, start = ifelse(future, tspx[2] + 1 / tspx[3], tspx[1]), frequency = tspx[3]) } #' @rdname simulate.ets #' @export simulate.fracdiff <- function(object, nsim = object$n, seed = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, ...) { x <- getResponse(object) if (is.null(x)) { future <- FALSE if (is.null(nsim)) { nsim <- 100 } x <- 0 } # Strip initial and final missing values xx <- na.ends(x) n <- length(xx) # Remove mean meanx <- mean(xx) xx <- xx - meanx # Difference series (removes mean as well) y <- undo.na.ends(x, diffseries(xx, d = object$d)) # Create ARMA model for differenced series arma <- Arima( y, order = c(length(object$ar), 0, length(object$ma)), include.mean = FALSE, fixed = c(object$ar, -object$ma) ) # Simulate from ARMA model ysim <- simulate(arma, nsim, seed, future = future, bootstrap = bootstrap, innov = innov) # Undo differencing and add back mean return(unfracdiff(xx, ysim, n, nsim, object$d) + meanx) } #' @rdname simulate.ets #' @export simulate.nnetar <- function(object, nsim = length(object$x), seed = NULL, xreg = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, lambda = object$lambda, ...) { if (is.null(innov)) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) { runif(1) } if (is.null(seed)) { RNGstate <- get(".Random.seed", envir = .GlobalEnv) } else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } } else { nsim <- length(innov) } if (is.null(object$x)) { future <- FALSE } ## only future currently implemented if (!future) { warning("simulate.nnetar() currently only supports future=TRUE") } ## set simulation innovations if (bootstrap) { res <- na.omit(c(residuals(object, type = "innovation"))) res <- res - mean(res) ## scale if appropriate if (!is.null(object$scalex$scale)) { res <- res / object$scalex$scale } e <- sample(res, nsim, replace = TRUE) } else if (is.null(innov)) { res <- na.omit(c(residuals(object, type = "innovation"))) ## scale if appropriate if (!is.null(object$scalex$scale)) { res <- res / object$scalex$scale } e <- rnorm(nsim, 0, sd(res, na.rm = TRUE)) } else if (length(innov) == nsim) { e <- innov if (!is.null(object$scalex$scale)) { e <- e / object$scalex$scale } } else if (isTRUE(innov == 0L)) { ## to pass innov=0 so simulation equals mean forecast e <- rep(innov, nsim) } else { stop("Length of innov must be equal to nsim") } ## tspx <- tsp(object$x) # Check if xreg was used in fitted model if (is.null(object$xreg)) { if (!is.null(xreg)) { warning("External regressors were not used in fitted model, xreg will be ignored") } xreg <- NULL } else { if (is.null(xreg)) { stop("No external regressors provided") } xreg <- as.matrix(xreg) if (NCOL(xreg) != NCOL(object$xreg)) { stop("Number of external regressors does not match fitted model") } if (NROW(xreg) != nsim) { stop("Number of rows in xreg does not match nsim") } } xx <- object$x if (!is.null(lambda)) { xx <- BoxCox(xx, lambda) lambda <- attr(xx, "lambda") } # Check and apply scaling of fitted model if (!is.null(object$scalex)) { xx <- scale(xx, center = object$scalex$center, scale = object$scalex$scale) if (!is.null(xreg)) { xreg <- scale(xreg, center = object$scalexreg$center, scale = object$scalexreg$scale) } } ## Get lags used in fitted model lags <- object$lags maxlag <- max(lags) flag <- rev(tail(xx, n = maxlag)) ## Simulate by iteratively forecasting and adding innovation path <- numeric(nsim) for (i in 1:nsim) { newdata <- c(flag[lags], xreg[i, ]) if (any(is.na(newdata))) { stop("I can't simulate when there are missing values near the end of the series.") } path[i] <- mean(sapply(object$model, predict, newdata = newdata)) + e[i] flag <- c(path[i], flag[-maxlag]) } ## Re-scale simulated points if (!is.null(object$scalex)) { path <- path * object$scalex$scale + object$scalex$center } ## Add ts properties path <- ts(path, start = tspx[2] + 1 / tspx[3], frequency = tspx[3]) ## Back-transform simulated points if (!is.null(lambda)) { path <- InvBoxCox(path, lambda) } return(path) } #' @rdname simulate.ets #' @export simulate.modelAR <- function(object, nsim = length(object$x), seed = NULL, xreg = NULL, future = TRUE, bootstrap = FALSE, innov = NULL, lambda = object$lambda, ...) { if (is.null(innov)) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) { runif(1) } if (is.null(seed)) { RNGstate <- get(".Random.seed", envir = .GlobalEnv) } else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } } else { nsim <- length(innov) } if (is.null(object$x)) { future <- FALSE } ## only future currently implemented if (!future) { warning("simulate.nnetar() currently only supports future=TRUE") } ## set simulation innovations if (bootstrap) { res <- na.omit(c(residuals(object, type = "innovation"))) res <- res - mean(res) ## scale if appropriate if (!is.null(object$scalex$scale)) { res <- res / object$scalex$scale } e <- sample(res, nsim, replace = TRUE) } else if (is.null(innov)) { res <- na.omit(c(residuals(object, type = "innovation"))) ## scale if appropriate if (!is.null(object$scalex$scale)) { res <- res / object$scalex$scale } e <- rnorm(nsim, 0, sd(res, na.rm = TRUE)) } else if (length(innov) == nsim) { e <- innov if (!is.null(object$scalex$scale)) { e <- e / object$scalex$scale } } else if (isTRUE(innov == 0L)) { ## to pass innov=0 so simulation equals mean forecast e <- rep(innov, nsim) } else { stop("Length of innov must be equal to nsim") } ## tspx <- tsp(object$x) # Check if xreg was used in fitted model if (is.null(object$xreg)) { if (!is.null(xreg)) { warning("External regressors were not used in fitted model, xreg will be ignored") } xreg <- NULL } else { if (is.null(xreg)) { stop("No external regressors provided") } xreg <- as.matrix(xreg) if (NCOL(xreg) != NCOL(object$xreg)) { stop("Number of external regressors does not match fitted model") } if (NROW(xreg) != nsim) { stop("Number of rows in xreg does not match nsim") } } xx <- object$x if (!is.null(lambda)) { xx <- BoxCox(xx, lambda) lambda <- attr(xx, "lambda") } # Check and apply scaling of fitted model if (!is.null(object$scalex)) { xx <- scale(xx, center = object$scalex$center, scale = object$scalex$scale) if (!is.null(xreg)) { xreg <- scale(xreg, center = object$scalexreg$center, scale = object$scalexreg$scale) } } ## Get lags used in fitted model lags <- object$lags maxlag <- max(lags) flag <- rev(tail(xx, n = maxlag)) ## Simulate by iteratively forecasting and adding innovation path <- numeric(nsim) for (i in 1:nsim) { newdata <- c(flag[lags], xreg[i, ]) if (any(is.na(newdata))) { stop("I can't simulate when there are missing values near the end of the series.") } path[i] <- object$predict.FUN(object$model, newdata) + e[i] flag <- c(path[i], flag[-maxlag]) } ## Re-scale simulated points if (!is.null(object$scalex)) { path <- path * object$scalex$scale + object$scalex$center } ## Add ts properties path <- ts(path, start = tspx[2] + 1 / tspx[3], frequency = tspx[3]) ## Back-transform simulated points if (!is.null(lambda)) { path <- InvBoxCox(path, lambda) } return(path) } forecast/R/lm.R0000644000176200001440000004716214323125536013032 0ustar liggesusers#' Fit a linear model with time series components #' #' \code{tslm} is used to fit linear models to time series including trend and #' seasonality components. #' #' \code{tslm} is largely a wrapper for \code{\link[stats]{lm}()} except that #' it allows variables "trend" and "season" which are created on the fly from #' the time series characteristics of the data. The variable "trend" is a #' simple time trend and "season" is a factor indicating the season (e.g., the #' month or the quarter depending on the frequency of the data). #' #' @param formula an object of class "formula" (or one that can be coerced to #' that class): a symbolic description of the model to be fitted. #' @param data an optional data frame, list or environment (or object coercible #' by as.data.frame to a data frame) containing the variables in the model. If #' not found in data, the variables are taken from environment(formula), #' typically the environment from which lm is called. #' @param subset an optional subset containing rows of data to keep. For best #' results, pass a logical vector of rows to keep. Also supports #' \code{\link[base]{subset}()} functions. #' @inheritParams forecast.ts #' #' @param ... Other arguments passed to \code{\link[stats]{lm}()} #' @return Returns an object of class "lm". #' @author Mitchell O'Hara-Wild and Rob J Hyndman #' @seealso \code{\link{forecast.lm}}, \code{\link[stats]{lm}}. #' @keywords stats #' @examples #' #' y <- ts(rnorm(120,0,3) + 1:120 + 20*sin(2*pi*(1:120)/12), frequency=12) #' fit <- tslm(y ~ trend + season) #' plot(forecast(fit, h=20)) #' #' @export tslm <- function(formula, data, subset, lambda=NULL, biasadj=FALSE, ...) { cl <- match.call() if (!("formula" %in% class(formula))) { formula <- stats::as.formula(formula) } if (missing(data)) { mt <- try(terms(formula)) if (is.element("try-error", class(mt))) { stop("Cannot extract terms from formula, please provide data argument.") } } else { mt <- terms(formula, data = data) } ## Categorise formula variables into time-series, functions, and data. vars <- attr(mt, "variables") # Check for time series variables tsvar <- match(c("trend", "season"), as.character(vars), 0L) # Check for functions (which should be evaluated later, in lm) fnvar <- NULL for (i in 2:length(vars)) { term <- vars[[i]] if (!is.symbol(term)) { if (typeof(eval(term[[1]])) == "closure") { # If this term is a function (alike fourier) fnvar <- c(fnvar, i) } } } ## Fix formula's environment for correct `...` scoping. attr(formula, ".Environment") <- environment() ## Ensure response variable is taken from dataset (including transformations) formula[[2]] <- as.symbol(deparse(formula[[2]])) if (sum(c(tsvar, fnvar)) > 0) { # Remove variables not needed in data (trend+season+functions) rmvar <- c(tsvar, fnvar) rmvar <- rmvar[rmvar != attr(mt, "response") + 1] # Never remove the reponse variable if (any(rmvar != 0)) { vars <- vars[-rmvar] } } ## Grab any variables missing from data if (!missing(data)) { # Check for any missing variables in data vars <- vars[c(TRUE, !as.character(vars[-1]) %in% colnames(data))] dataname <- substitute(data) } if (!missing(data)) { data <- datamat(do.call(datamat, as.list(vars[-1]), envir = parent.frame()), data) } else { data <- do.call(datamat, as.list(vars[-1]), envir = parent.frame()) } ## Set column name of univariate dataset if (is.null(dim(data)) && length(data) != 0) { cn <- as.character(vars)[2] } else { cn <- colnames(data) } ## Get time series attributes from the data if (is.null(tsp(data))) { if ((attr(mt, "response") + 1) %in% fnvar) { # Check unevaluated response variable tspx <- tsp(eval(attr(mt, "variables")[[attr(mt, "response") + 1]])) } tspx <- tsp(data[, 1]) # Check for complex ts data.frame } else { tspx <- tsp(data) } if (is.null(tspx)) { stop("Not time series data, use lm()") } tsdat <- match(c("trend", "season"), cn, 0L) ## Create trend and season if missing from the data if (tsdat[1] == 0) { # &tsvar[1]!=0){#If "trend" is not in data, but is in formula trend <- 1:NROW(data) cn <- c(cn, "trend") data <- cbind(data, trend) } if (tsdat[2] == 0) { # &tsvar[2]!=0){#If "season" is not in data, but is in formula if (tsvar[2] != 0 && tspx[3] <= 1) { # Nonseasonal data, and season requested stop("Non-seasonal data cannot be modelled using a seasonal factor") } season <- as.factor(cycle(data[, 1])) cn <- c(cn, "season") data <- cbind(data, season) } colnames(data) <- cn ## Subset the data according to subset argument if (!missing(subset)) { if (!is.logical(subset)) { stop("subset must be logical") } else if (NCOL(subset) > 1) { stop("subset must be a logical vector") } else if (NROW(subset) != NROW(data)) { stop("Subset must be the same length as the number of rows in the dataset") } warning("Subset has been assumed contiguous") timesx <- time(data[, 1])[subset] tspx <- recoverTSP(timesx) if (tspx[3] == 1 && tsdat[2] == 0 && tsvar[2] != 0) { stop("Non-seasonal data cannot be modelled using a seasonal factor") } data <- data[subset, ] # model.frame(formula,as.data.frame(data[subsetTF,])) } if (!is.null(lambda)) { resp_var <- deparse(attr(mt, "variables")[[attr(mt, "response") + 1]]) data[, resp_var] <- BoxCox(data[, resp_var], lambda) lambda <- attr(data[, resp_var], "lambda") } if (tsdat[2] == 0 && tsvar[2] != 0) { data$season <- factor(data$season) # fix for lost factor information, may not be needed? } ## Fit the model and prepare model structure fit <- lm(formula, data = data, na.action = na.exclude, ...) fit$data <- data responsevar <- deparse(formula[[2]]) fit$residuals <- ts(residuals(fit)) fit$x <- fit$residuals fit$x[!is.na(fit$x)] <- model.frame(fit)[, responsevar] fit$fitted.values <- ts(fitted(fit)) tsp(fit$residuals) <- tsp(fit$x) <- tsp(fit$fitted.values) <- tsp(data[, 1]) <- tspx fit$call <- cl fit$method <- "Linear regression model" if (exists("dataname")) { fit$call$data <- dataname } if (!is.null(lambda)) { attr(lambda, "biasadj") <- biasadj fit$lambda <- lambda fit$fitted.values <- InvBoxCox(fit$fitted.values, lambda, biasadj, var(fit$residuals)) fit$x <- InvBoxCox(fit$x, lambda) } class(fit) <- c("tslm", class(fit)) return(fit) } #' @export fitted.tslm <- function(object, ...){ object$fitted.values } #' Forecast a linear model with possible time series components #' #' \code{forecast.lm} is used to predict linear models, especially those #' involving trend and seasonality components. #' #' \code{forecast.lm} is largely a wrapper for #' \code{\link[stats]{predict.lm}()} except that it allows variables "trend" #' and "season" which are created on the fly from the time series #' characteristics of the data. Also, the output is reformatted into a #' \code{forecast} object. #' #' @param object Object of class "lm", usually the result of a call to #' \code{\link[stats]{lm}} or \code{\link{tslm}}. #' @param newdata An optional data frame in which to look for variables with #' which to predict. If omitted, it is assumed that the only variables are #' trend and season, and \code{h} forecasts are produced. #' @param level Confidence level for prediction intervals. #' @param fan If \code{TRUE}, level is set to seq(51,99,by=3). This is suitable #' for fan plots. #' @param h Number of periods for forecasting. Ignored if \code{newdata} #' present. #' @param ts If \code{TRUE}, the forecasts will be treated as time series #' provided the original data is a time series; the \code{newdata} will be #' interpreted as related to the subsequent time periods. If \code{FALSE}, any #' time series attributes of the original data will be ignored. #' @param ... Other arguments passed to \code{\link[stats]{predict.lm}()}. #' @inheritParams forecast.ts #' #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{forecast.lm}. #' #' An object of class \code{"forecast"} is a list containing at least the #' following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The historical data for #' the response variable.} \item{residuals}{Residuals from the fitted model. #' That is x minus fitted values.} \item{fitted}{Fitted values} #' @author Rob J Hyndman #' @seealso \code{\link{tslm}}, \code{\link[stats]{lm}}. #' @keywords stats #' @examples #' #' y <- ts(rnorm(120,0,3) + 1:120 + 20*sin(2*pi*(1:120)/12), frequency=12) #' fit <- tslm(y ~ trend + season) #' plot(forecast(fit, h=20)) #' #' @export forecast.lm <- function(object, newdata, h=10, level=c(80, 95), fan=FALSE, lambda=object$lambda, biasadj=NULL, ts=TRUE, ...) { if(h < 1) { stop("The forecast horizon must be at least 1.") } if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } if (!is.null(object$data)) { origdata <- object$data } # no longer exists else if (!is.null(object$model)) { origdata <- object$model } else if (!is.null(object$call$data)) { origdata <- try(object$data <- eval(object$call$data), silent = TRUE) if (is.element("try-error", class(origdata))) { stop("Could not find data. Try training your model using tslm() or attach data directly to the object via object$data<-modeldata for some object<-lm(formula,modeldata).") } } else { origdata <- as.data.frame(fitted(object) + residuals(object)) } if (!is.element("data.frame", class(origdata))) { origdata <- as.data.frame(origdata) if (!is.element("data.frame", class(origdata))) { stop("Could not find data. Try training your model using tslm() or attach data directly to the object via object$data<-modeldata for some object<-lm(formula,modeldata).") } } # Check if the forecasts will be time series if (ts && is.element("ts", class(origdata))) { tspx <- tsp(origdata) timesx <- time(origdata) } else if (ts && is.element("ts", class(origdata[, 1]))) { tspx <- tsp(origdata[, 1]) timesx <- time(origdata[, 1]) } else if (ts && is.element("ts", class(fitted(object)))) { tspx <- tsp(fitted(object)) timesx <- time(fitted(object)) } else { tspx <- NULL } # if(!is.null(object$call$subset)) # { # j <- eval(object$call$subset) # origdata <- origdata[j,] # if(!is.null(tspx)) # { # # Try to figure out times for subset. Assume they are contiguous. # timesx <- timesx[j] # tspx <- tsp(origdata) <- c(min(timesx),max(timesx),tspx[3]) # } # } # Add trend and seasonal to data frame oldterms <- terms(object) # Adjust terms for function variables and rename datamat colnames to match. if (!missing(newdata)) { reqvars <- as.character(attr(object$terms, "variables")[-1])[-attr(object$terms, "response")] # Search for time series variables tsvar <- match(c("trend", "season"), reqvars, 0L) # Check if required variables are functions fnvar <- sapply(reqvars, function(x) !(is.symbol(parse(text = x)[[1]]) || typeof(eval(parse(text = x)[[1]][[1]])) != "closure")) if (!is.data.frame(newdata)) { newdata <- datamat(newdata) colnames(newdata)[1] <- ifelse(sum(tsvar > 0), reqvars[-tsvar][1], reqvars[1]) warning("newdata column names not specified, defaulting to first variable required.") } oldnewdata <- newdata newvars <- make.names(colnames(newdata)) # Check if variables are missing misvar <- match(make.names(reqvars), newvars, 0L) == 0L if (any(!misvar & !fnvar)) { # If any variables are not missing/functions, add them to data tmpdata <- datamat(newdata[reqvars[!misvar]]) rm1 <- FALSE } else { # Prefill the datamat tmpdata <- datamat(1:NROW(newdata)) rm1 <- TRUE } # Remove trend and seasonality from required variables if (sum(tsvar) > 0) { reqvars <- reqvars[-tsvar] fnvar <- fnvar[-tsvar] misvar <- match(make.names(reqvars), newvars, 0L) == 0L } if (any(misvar | fnvar)) { # If any variables are missing/functions reqvars <- reqvars[misvar | fnvar] # They are required fnvar <- fnvar[misvar | fnvar] # Update required function variables for (i in reqvars) { found <- FALSE subvars <- NULL for (j in 1:length(object$coefficients)) { subvars[j] <- pmatch(i, names(object$coefficients)[j]) } subvars <- !is.na(subvars) subvars <- names(object$coefficients)[subvars] # Detect if subvars if multivariate if (length(subvars) > 1) { # Extract prefix only subvars <- substr(subvars, nchar(i) + 1, 999L) fsub <- match(make.names(subvars), newvars, 0L) if (any(fsub == 0)) { # Check for misnamed columns fsub <- grep(paste(make.names(subvars), collapse = "|"), newvars) } if (all(fsub != 0) && length(fsub) == length(subvars)) { imat <- as.matrix(newdata[, fsub], ncol = length(fsub)) colnames(imat) <- subvars tmpdata[[length(tmpdata) + 1]] <- imat found <- TRUE } else { # Attempt to evaluate it as a function subvars <- i } } if (length(subvars) == 1) { # Check if it is a function if (fnvar[match(i, reqvars)]) { # Pre-evaluate function from data tmpdata[[length(tmpdata) + 1]] <- eval(parse(text = subvars)[[1]], newdata) found <- TRUE } } if (found) { names(tmpdata)[length(tmpdata)] <- paste0("solvedFN___", match(i, reqvars)) subvarloc <- match(i, lapply(attr(object$terms, "predvars"), deparse)) attr(object$terms, "predvars")[[subvarloc]] <- attr(object$terms, "variables")[[subvarloc]] <- parse(text = paste0("solvedFN___", match(i, reqvars)))[[1]] } else { warning(paste0("Could not find required variable ", i, " in newdata. Specify newdata as a named data.frame")) } } } if (rm1) { tmpdata[[1]] <- NULL } newdata <- cbind(newdata, tmpdata) h <- nrow(newdata) } if (!is.null(tspx)) { # Always generate trend series trend <- ifelse(is.null(origdata$trend), NCOL(origdata), max(origdata$trend)) + seq_len(h) if (!missing(newdata)) { newdata <- cbind(newdata, trend) } else { newdata <- datamat(trend) } # Always generate season series x <- ts(seq_len(h), start = tspx[2] + 1 / tspx[3], frequency = tspx[3]) season <- as.factor(cycle(x)) newdata <- cbind(newdata, season) } newdata <- as.data.frame(newdata) if (!exists("oldnewdata")) { oldnewdata <- newdata } # If only one column, assume its name. if (ncol(newdata) == 1 && colnames(newdata)[1] == "newdata") { colnames(newdata) <- as.character(formula(object$model))[3] } # Check regressors included in newdata. # Not working so removed for now. # xreg <- attributes(terms(object$model))$term.labels # if(any(!is.element(xreg,colnames(newdata)))) # stop("Predictor variables not included") object$x <- getResponse(object) # responsevar <- as.character(formula(object$model))[2] # responsevar <- gsub("`","",responsevar) # object$x <- model.frame(object$model)[,responsevar] # Remove missing values from residuals predict_object <- object predict_object$residuals <- na.omit(as.numeric(object$residuals)) out <- list() nl <- length(level) for (i in 1:nl) out[[i]] <- predict(predict_object, newdata = newdata, se.fit = TRUE, interval = "prediction", level = level[i] / 100, ...) if (nrow(newdata) != length(out[[1]]$fit[, 1])) { stop("Variables not found in newdata") } object$terms <- oldterms if (is.null(object$series)) { # Model produced via lm(), add series attribute object$series <- deparse(attr(oldterms, "variables")[[1 + attr(oldterms, "response")]]) } fcast <- list( model = object, mean = out[[1]]$fit[, 1], lower = out[[1]]$fit[, 2], upper = out[[1]]$fit[, 3], level = level, x = object$x, series = object$series ) fcast$method <- "Linear regression model" fcast$newdata <- oldnewdata fcast$residuals <- residuals(object) fcast$fitted <- fitted(object) if (NROW(origdata) != NROW(fcast$x)) { # Give up on ts attributes as some data are missing tspx <- NULL } if (NROW(fcast$x) != NROW(fcast$residuals)) { tspx <- NULL } if (!is.null(tspx)) { fcast$x <- ts(fcast$x) fcast$residuals <- ts(fcast$residuals) fcast$fitted <- ts(fcast$fitted) tsp(fcast$x) <- tsp(fcast$residuals) <- tsp(fcast$fitted) <- tspx } if (nl > 1) { for (i in 2:nl) { fcast$lower <- cbind(fcast$lower, out[[i]]$fit[, 2]) fcast$upper <- cbind(fcast$upper, out[[i]]$fit[, 3]) } } if (!is.null(tspx)) { fcast$mean <- ts(fcast$mean, start = tspx[2] + 1 / tspx[3], frequency = tspx[3]) fcast$upper <- ts(fcast$upper, start = tspx[2] + 1 / tspx[3], frequency = tspx[3]) fcast$lower <- ts(fcast$lower, start = tspx[2] + 1 / tspx[3], frequency = tspx[3]) } if (!is.null(lambda)) { #fcast$x <- InvBoxCox(fcast$x, lambda) fcast$mean <- InvBoxCox(fcast$mean, lambda, biasadj, fcast) fcast$lower <- InvBoxCox(fcast$lower, lambda) fcast$upper <- InvBoxCox(fcast$upper, lambda) } return(structure(fcast, class = "forecast")) } #' @export summary.tslm <- function(object, ...) { # Remove NA from object structure as summary.lm() expects (#836) object$residuals <- na.omit(as.numeric(object$residuals)) object$fitted.values <- na.omit(as.numeric(object$fitted.values)) if(!is.null(object$lambda)) { object$fitted.values <- BoxCox(object$fitted.values, object$lambda) } NextMethod() } # Compute cross-validation and information criteria from a linear model #' Cross-validation statistic #' #' Computes the leave-one-out cross-validation statistic (the mean of PRESS #' -- prediction residual sum of squares), AIC, corrected AIC, BIC and adjusted #' R^2 values for a linear model. #' #' #' @param obj output from \code{\link[stats]{lm}} or \code{\link{tslm}} #' @return Numerical vector containing CV, AIC, AICc, BIC and AdjR2 values. #' @author Rob J Hyndman #' @seealso \code{\link[stats]{AIC}} #' @keywords models #' @examples #' #' y <- ts(rnorm(120,0,3) + 20*sin(2*pi*(1:120)/12), frequency=12) #' fit1 <- tslm(y ~ trend + season) #' fit2 <- tslm(y ~ season) #' CV(fit1) #' CV(fit2) #' #' @export CV <- function(obj) { if (!is.element("lm", class(obj))) { stop("This function is for objects of class lm") } n <- length(obj$residuals) k <- extractAIC(obj)[1] - 1 # number of predictors (constant removed) aic <- extractAIC(obj)[2] + 2 # add 2 for the variance estimate aicc <- aic + 2 * (k + 2) * (k + 3) / (n - k - 3) bic <- aic + (k + 2) * (log(n) - 2) cv <- mean((residuals(obj) / (1 - hatvalues(obj))) ^ 2, na.rm = TRUE) adjr2 <- summary(obj)$adj out <- c(cv, aic, aicc, bic, adjr2) names(out) <- c("CV", "AIC", "AICc", "BIC", "AdjR2") return(out) } forecast/R/forecastTBATS.R0000644000176200001440000001333414323125536015020 0ustar liggesusers#' @rdname forecast.bats #' @export forecast.tbats <- function(object, h, level = c(80, 95), fan = FALSE, biasadj = NULL, ...) { # Check if forecast.tbats called incorrectly if (identical(class(object), "bats")) { return(forecast.bats(object, h, level, fan, biasadj, ...)) } # Set up the variables if (any(class(object$y) == "ts")) { ts.frequency <- frequency(object$y) } else { ts.frequency <- ifelse(!is.null(object$seasonal.periods), max(object$seasonal.periods), 1) } if (missing(h)) { if (is.null(object$seasonal.periods)) { h <- ifelse(ts.frequency == 1, 10, 2 * ts.frequency) } else { h <- 2 * max(object$seasonal.periods) } } else if (h <= 0) { stop("Forecast horizon out of bounds") } if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } if (!is.null(object$k.vector)) { tau <- 2 * sum(object$k.vector) } else { tau <- 0 } x <- matrix(0, nrow = nrow(object$x), ncol = h) y.forecast <- numeric(h) if (!is.null(object$beta)) { adj.beta <- 1 } else { adj.beta <- 0 } # Set up the matrices w <- .Call("makeTBATSWMatrix", smallPhi_s = object$damping.parameter, kVector_s = as.integer(object$k.vector), arCoefs_s = object$ar.coefficients, maCoefs_s = object$ma.coefficients, tau_s = as.integer(tau), PACKAGE = "forecast") if (!is.null(object$seasonal.periods)) { gamma.bold <- matrix(0, nrow = 1, ncol = tau) .Call("updateTBATSGammaBold", gammaBold_s = gamma.bold, kVector_s = as.integer(object$k.vector), gammaOne_s = object$gamma.one.values, gammaTwo_s = object$gamma.two.values, PACKAGE = "forecast") } else { gamma.bold <- NULL } g <- matrix(0, nrow = (tau + 1 + adj.beta + object$p + object$q), ncol = 1) if (object$p != 0) { g[(1 + adj.beta + tau + 1), 1] <- 1 } if (object$q != 0) { g[(1 + adj.beta + tau + object$p + 1), 1] <- 1 } .Call("updateTBATSGMatrix", g_s = g, gammaBold_s = gamma.bold, alpha_s = object$alpha, beta_s = object$beta.v, PACKAGE = "forecast") # print(g) F <- makeTBATSFMatrix(alpha = object$alpha, beta = object$beta, small.phi = object$damping.parameter, seasonal.periods = object$seasonal.periods, k.vector = as.integer(object$k.vector), gamma.bold.matrix = gamma.bold, ar.coefs = object$ar.coefficients, ma.coefs = object$ma.coefficients) # Do the forecast y.forecast[1] <- w$w.transpose %*% object$x[, ncol(object$x)] x[, 1] <- F %*% object$x[, ncol(object$x)] # + g %*% object$errors[length(object$errors)] if (h > 1) { for (t in 2:h) { x[, t] <- F %*% x[, (t - 1)] y.forecast[t] <- w$w.transpose %*% x[, (t - 1)] } } ## Make prediction intervals here lower.bounds <- upper.bounds <- matrix(NA, ncol = length(level), nrow = h) variance.multiplier <- numeric(h) variance.multiplier[1] <- 1 if (h > 1) { for (j in 1:(h - 1)) { if (j == 1) { f.running <- diag(ncol(F)) } else { f.running <- f.running %*% F } c.j <- w$w.transpose %*% f.running %*% g variance.multiplier[(j + 1)] <- variance.multiplier[j] + c.j^2 } } variance <- object$variance * variance.multiplier # print(variance) st.dev <- sqrt(variance) for (i in 1:length(level)) { marg.error <- st.dev * abs(qnorm((100 - level[i]) / 200)) lower.bounds[, i] <- y.forecast - marg.error upper.bounds[, i] <- y.forecast + marg.error } # Inv Box Cox transform if required if (!is.null(object$lambda)) { y.forecast <- InvBoxCox(y.forecast, object$lambda, biasadj, list(level = level, upper = upper.bounds, lower = lower.bounds)) lower.bounds <- InvBoxCox(lower.bounds, object$lambda) if (object$lambda < 1) { lower.bounds <- pmax(lower.bounds, 0) } upper.bounds <- InvBoxCox(upper.bounds, object$lambda) } colnames(upper.bounds) <- colnames(lower.bounds) <- paste0(level, "%") forecast.object <- list( model = object, mean = future_msts(object$y, y.forecast), level = level, x = object$y, series = object$series, upper = future_msts(object$y, upper.bounds), lower = future_msts(object$y, lower.bounds), fitted = copy_msts(object$y, object$fitted.values), method = as.character(object), residuals = copy_msts(object$y, object$errors) ) if (is.null(object$series)) { forecast.object$series <- deparse(object$call$y) } class(forecast.object) <- "forecast" return(forecast.object) } #' @export as.character.tbats <- function(x, ...) { name <- "TBATS(" if (!is.null(x$lambda)) { name <- paste(name, round(x$lambda, digits = 3), sep = "") } else { name <- paste(name, "1", sep = "") } name <- paste(name, ", {", sep = "") if (!is.null(x$ar.coefficients)) { name <- paste(name, length(x$ar.coefficients), sep = "") } else { name <- paste(name, "0", sep = "") } name <- paste(name, ",", sep = "") if (!is.null(x$ma.coefficients)) { name <- paste(name, length(x$ma.coefficients), sep = "") } else { name <- paste(name, "0", sep = "") } name <- paste(name, "}, ", sep = "") if (!is.null(x$damping.parameter)) { name <- paste(name, round(x$damping.parameter, digits = 3), ",", sep = "") } else { name <- paste(name, "-,", sep = "") } if (!is.null(x$seasonal.periods)) { name <- paste(name, " {", sep = "") M <- length(x$seasonal.periods) for (i in 1:M) { name <- paste(name, "<", round(x$seasonal.periods[i], 2), ",", x$k.vector[i], ">", sep = "") if (i < M) { name <- paste(name, ", ", sep = "") } else { name <- paste(name, "})", sep = "") } } } else { name <- paste(name, "{-})", sep = "") } return(name) } forecast/R/data.R0000644000176200001440000000321014150370574013317 0ustar liggesusers #' Australian monthly gas production #' #' Australian monthly gas production: 1956--1995. #' #' #' @format Time series data #' @source Australian Bureau of Statistics. #' @keywords datasets #' @examples #' plot(gas) #' seasonplot(gas) #' tsdisplay(gas) #' "gas" #' Daily morning gold prices #' #' Daily morning gold prices in US dollars. 1 January 1985 -- 31 March 1989. #' #' #' @format Time series data #' @keywords datasets #' @examples #' tsdisplay(gold) #' "gold" #' Half-hourly electricity demand #' #' Half-hourly electricity demand in England and Wales from Monday 5 June 2000 #' to Sunday 27 August 2000. Discussed in Taylor (2003), and kindly provided by #' James W Taylor. Units: Megawatts #' #' #' @format Time series data #' @references Taylor, J.W. (2003) Short-term electricity demand forecasting #' using double seasonal exponential smoothing. \emph{Journal of the #' Operational Research Society}, \bold{54}, 799-805. #' @source James W Taylor #' @keywords datasets #' @examples #' plot(taylor) #' "taylor" #' Australian total wine sales #' #' Australian total wine sales by wine makers in bottles <= 1 litre. Jan 1980 #' -- Aug 1994. #' #' #' @format Time series data #' @source Time Series Data Library. \url{https://pkg.yangzhuoranyang.com/tsdl/} #' @keywords datasets #' @examples #' tsdisplay(wineind) #' "wineind" #' Quarterly production of woollen yarn in Australia #' #' Quarterly production of woollen yarn in Australia: tonnes. Mar 1965 -- Sep #' 1994. #' #' #' @format Time series data #' @source Time Series Data Library. \url{https://pkg.yangzhuoranyang.com/tsdl/} #' @keywords datasets #' @examples #' tsdisplay(woolyrnq) #' "woolyrnq" forecast/R/arfima.R0000644000176200001440000002525414323125536013657 0ustar liggesusers# Remove missing values from end points na.ends <- function(x) { tspx <- tsp(x) # Strip initial and final missing values nonmiss <- (1:length(x))[!is.na(x)] if (length(nonmiss) == 0) { stop("No non-missing data") } j <- nonmiss[1] k <- nonmiss[length(nonmiss)] x <- x[j:k] if (!is.null(tspx)) { x <- ts(x, start = tspx[1] + (j - 1) / tspx[3], frequency = tspx[3]) } return(x) } # Add back missing values at ends # x is original series. y is the series with NAs removed at ends. # returns y with the nas put back at beginning but not end. undo.na.ends <- function(x, y) { n <- length(x) nonmiss <- (1:length(x))[!is.na(x)] j <- nonmiss[1] k <- nonmiss[length(nonmiss)] if (j > 1) { y <- c(rep(NA, j - 1), y) } if (k < n) { y <- c(y, rep(NA, n - k)) } tspx <- tsp(x) if (!is.null(tspx)) { tsp(y) <- tsp(x) } return(y) } ## Undifference unfracdiff <- function(x, y, n, h, d) { bin.c <- (-1) ^ (0:(n + h)) * choose(d, (0:(n + h))) b <- numeric(n) xnew <- LHS <- numeric(h) RHS <- cumsum(y) bs <- cumsum(bin.c[1:h]) b <- bin.c[(1:n) + 1] xnew[1] <- RHS[1] <- y[1] - sum(b * rev(x)) if (h > 1) { for (k in 2:h) { b <- b + bin.c[(1:n) + k] RHS[k] <- RHS[k] - sum(b * rev(x)) LHS[k] <- sum(rev(xnew[1:(k - 1)]) * bs[2:k]) xnew[k] <- RHS[k] - LHS[k] } } tspx <- tsp(x) if (is.null(tspx)) { tspx <- c(1, length(x), 1) } return(ts(xnew, frequency = tspx[3], start = tspx[2] + 1 / tspx[3])) } ## Automatic ARFIMA modelling ## Will return Arima object if d < 0.01 to prevent estimation problems #' Fit a fractionally differenced ARFIMA model #' #' An ARFIMA(p,d,q) model is selected and estimated automatically using the #' Hyndman-Khandakar (2008) algorithm to select p and q and the Haslett and #' Raftery (1989) algorithm to estimate the parameters including d. #' #' This function combines \code{\link[fracdiff]{fracdiff}} and #' \code{\link{auto.arima}} to automatically select and estimate an ARFIMA #' model. The fractional differencing parameter is chosen first assuming an #' ARFIMA(2,d,0) model. Then the data are fractionally differenced using the #' estimated d and an ARMA model is selected for the resulting time series #' using \code{\link{auto.arima}}. Finally, the full ARFIMA(p,d,q) model is #' re-estimated using \code{\link[fracdiff]{fracdiff}}. If \code{estim=="mle"}, #' the ARMA coefficients are refined using \code{\link[stats]{arima}}. #' #' @param y a univariate time series (numeric vector). #' @param drange Allowable values of d to be considered. Default of #' \code{c(0,0.5)} ensures a stationary model is returned. #' @param estim If \code{estim=="ls"}, then the ARMA parameters are calculated #' using the Haslett-Raftery algorithm. If \code{estim=="mle"}, then the ARMA #' parameters are calculated using full MLE via the \code{\link[stats]{arima}} #' function. #' @param model Output from a previous call to \code{arfima}. If model is #' passed, this same model is fitted to y without re-estimating any parameters. #' @param x Deprecated. Included for backwards compatibility. #' @param \dots Other arguments passed to \code{\link{auto.arima}} when #' selecting p and q. #' @inheritParams forecast.ts #' #' @return A list object of S3 class \code{"fracdiff"}, which is described in #' the \code{\link[fracdiff]{fracdiff}} documentation. A few additional objects #' are added to the list including \code{x} (the original time series), and the #' \code{residuals} and \code{fitted} values. #' #' @export #' #' @author Rob J Hyndman and Farah Yasmeen #' @seealso \code{\link[fracdiff]{fracdiff}}, \code{\link{auto.arima}}, #' \code{\link{forecast.fracdiff}}. #' @references J. Haslett and A. E. Raftery (1989) Space-time Modelling with #' Long-memory Dependence: Assessing Ireland's Wind Power Resource (with #' discussion); \emph{Applied Statistics} \bold{38}, 1-50. #' #' Hyndman, R.J. and Khandakar, Y. (2008) "Automatic time series forecasting: #' The forecast package for R", \emph{Journal of Statistical Software}, #' \bold{26}(3). #' @keywords ts #' @examples #' #' library(fracdiff) #' x <- fracdiff.sim( 100, ma=-.4, d=.3)$series #' fit <- arfima(x) #' tsdisplay(residuals(fit)) #' arfima <- function(y, drange = c(0, 0.5), estim = c("mle", "ls"), model = NULL, lambda = NULL, biasadj = FALSE, x=y, ...) { estim <- match.arg(estim) # require(fracdiff) seriesname <- deparse(substitute(y)) orig.x <- x if (!is.null(lambda)) { x <- BoxCox(x, lambda) lambda <- attr(x, "lambda") } # Re-fit arfima model if (!is.null(model)) { fit <- model fit$residuals <- fit$fitted <- fit$lambda <- NULL if (!is.null(lambda)) { fit$lambda <- lambda # Required for residuals.fracdiff() } } # Estimate model else { # Strip initial and final missing values xx <- na.ends(x) # Remove mean meanx <- mean(xx) xx <- xx - meanx # Choose differencing parameter with AR(2) proxy to handle correlations suppressWarnings(fit <- fracdiff::fracdiff(xx, nar = 2, drange = drange)) # Choose p and q d <- fit$d y <- fracdiff::diffseries(xx, d = d) fit <- auto.arima(y, max.P = 0, max.Q = 0, stationary = TRUE, ...) # Refit model using fracdiff suppressWarnings(fit <- fracdiff::fracdiff(xx, nar = fit$arma[1], nma = fit$arma[2], drange = drange)) # Refine parameters with MLE if (estim == "mle") { y <- fracdiff::diffseries(xx, d = fit$d) p <- length(fit$ar) q <- length(fit$ma) fit2 <- try(Arima(y, order = c(p, 0, q), include.mean = FALSE)) if (is.element("try-error", class(fit2))) { fit2 <- try(Arima(y, order = c(p, 0, q), include.mean = FALSE, method = "ML")) } if (!is.element("try-error", class(fit2))) { if (p > 0) { fit$ar <- fit2$coef[1:p] } if (q > 0) { fit$ma <- -fit2$coef[p + (1:q)] } fit$residuals <- fit2$residuals } else { warning("MLE estimation failed. Returning LS estimates") } } } # Add things to model that will be needed by forecast.fracdiff fit$x <- orig.x fit$residuals <- undo.na.ends(x, residuals(fit)) fit$fitted <- x - fit$residuals if (!is.null(lambda)) { fit$fitted <- InvBoxCox(fit$fitted, lambda, biasadj, var(fit$residuals)) attr(lambda, "biasadj") <- biasadj } fit$lambda <- lambda fit$call <- match.call() fit$series <- seriesname fit <- structure(fit, class = c("ARFIMA","fracdiff")) # fit$call$data <- data.frame(x=x) #Consider replacing fit$call with match.call for consistency and tidyness return(fit) } # Forecast the output of fracdiff() or arfima() #' @rdname forecast.Arima #' @export forecast.fracdiff <- function(object, h=10, level=c(80, 95), fan=FALSE, lambda=object$lambda, biasadj=NULL, ...) { # Extract data x <- object$x <- getResponse(object) if (is.null(x)) { stop("Unable to find original time series") } if (!is.null(lambda)) { x <- BoxCox(x, lambda) lambda <- attr(x, "lambda") } xx <- na.ends(x) n <- length(xx) meanx <- mean(xx) xx <- xx - meanx # Construct ARMA part of model and forecast with it y <- fracdiff::diffseries(xx, d = object$d) fit <- Arima(y, order = c(length(object$ar), 0, length(object$ma)), include.mean = FALSE, fixed = c(object$ar, -object$ma)) fcast.y <- forecast(fit, h = h, level = level) # Undifference fcast.x <- unfracdiff(xx, fcast.y$mean, n, h, object$d) # Binomial coefficient for expansion of d bin.c <- (-1) ^ (0:(n + h)) * choose(object$d, (0:(n + h))) # Cumulative forecasts of y and forecast of y # b <- numeric(n) # fcast.x <- LHS <- numeric(h) # RHS <- cumsum(fcast.y$mean) # bs <- cumsum(bin.c[1:h]) # b <- bin.c[(1:n)+1] # fcast.x[1] <- RHS[1] <- fcast.y$mean[1] - sum(b*rev(xx)) # if(h>1) # { # for (k in 2:h) # { # b <- b + bin.c[(1:n)+k] # RHS[k] <- RHS[k] - sum(b*rev(xx)) # LHS[k] <- sum(rev(fcast.x[1:(k-1)]) * bs[2:k]) # fcast.x[k] <- RHS[k] - LHS[k] # } # } # Extract stuff from ARMA model p <- fit$arma[1] q <- fit$arma[2] phi <- theta <- numeric(h) if (p > 0) { phi[1:p] <- fit$coef[1:p] } if (q > 0) { theta[1:q] <- fit$coef[p + (1:q)] } # Calculate psi weights new.phi <- psi <- numeric(h) psi[1] <- new.phi[1] <- 1 if (h > 1) { new.phi[2:h] <- -bin.c[2:h] for (i in 2:h) { if (p > 0) { new.phi[i] <- sum(phi[1:(i - 1)] * bin.c[(i - 1):1]) - bin.c[i] } psi[i] <- sum(new.phi[2:i] * rev(psi[1:(i - 1)])) + theta[i - 1] } } # Compute forecast variances fse <- sqrt(cumsum(psi ^ 2) * fit$sigma2) # Compute prediction intervals if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } nint <- length(level) upper <- lower <- matrix(NA, ncol = nint, nrow = h) for (i in 1:nint) { qq <- qnorm(0.5 * (1 + level[i] / 100)) lower[, i] <- fcast.x - qq * fse upper[, i] <- fcast.x + qq * fse } colnames(lower) <- colnames(upper) <- paste(level, "%", sep = "") res <- undo.na.ends(x, residuals(fit)) fits <- x - res data.tsp <- tsp(x) if (is.null(data.tsp)) { data.tsp <- c(1, length(x), 1) } mean.fcast <- ts(fcast.x + meanx, frequency = data.tsp[3], start = data.tsp[2] + 1 / data.tsp[3]) lower <- ts(lower + meanx, frequency = data.tsp[3], start = data.tsp[2] + 1 / data.tsp[3]) upper <- ts(upper + meanx, frequency = data.tsp[3], start = data.tsp[2] + 1 / data.tsp[3]) method <- paste("ARFIMA(", p, ",", round(object$d, 2), ",", q, ")", sep = "") if (!is.null(lambda)) { x <- InvBoxCox(x, lambda) fits <- InvBoxCox(fits, lambda) mean.fcast <- InvBoxCox(mean.fcast, lambda, biasadj, list(level = level, upper = upper, lower = lower)) lower <- InvBoxCox(lower, lambda) upper <- InvBoxCox(upper, lambda) } seriesname <- if (!is.null(object$series)) { object$series } else { deparse(object$call$x) } return(structure(list( x = x, mean = mean.fcast, upper = upper, lower = lower, level = level, method = method, model = object, series = seriesname, residuals = res, fitted = fits ), class = "forecast")) } # Fitted values from arfima() #' @rdname fitted.Arima #' @export fitted.ARFIMA <- function(object, h = 1, ...) { if (!is.null(object$fitted)) { # Object produced by arfima() if (h == 1) { return(object$fitted) } else { return(hfitted(object = object, h = h, FUN = "arfima", ...)) } } else { if (h != 1) { warning("h-step fits are not supported for models produced by fracdiff(), returning one-step fits (h=1)") } x <- getResponse(object) return(x - residuals(object)) } } forecast/R/tbats.R0000644000176200001440000006305014323125536013531 0ustar liggesusers# Author: srazbash ############################################################################### #' TBATS model (Exponential smoothing state space model with Box-Cox #' transformation, ARMA errors, Trend and Seasonal components) #' #' Fits a TBATS model applied to \code{y}, as described in De Livera, Hyndman & #' Snyder (2011). Parallel processing is used by default to speed up the #' computations. #' #' @aliases as.character.tbats print.tbats #' #' @param y The time series to be forecast. Can be \code{numeric}, \code{msts} #' or \code{ts}. Only univariate time series are supported. #' @param use.box.cox \code{TRUE/FALSE} indicates whether to use the Box-Cox #' transformation or not. If \code{NULL} then both are tried and the best fit #' is selected by AIC. #' @param use.trend \code{TRUE/FALSE} indicates whether to include a trend or #' not. If \code{NULL} then both are tried and the best fit is selected by AIC. #' @param use.damped.trend \code{TRUE/FALSE} indicates whether to include a #' damping parameter in the trend or not. If \code{NULL} then both are tried #' and the best fit is selected by AIC. #' @param seasonal.periods If \code{y} is \code{numeric} then seasonal periods #' can be specified with this parameter. #' @param use.arma.errors \code{TRUE/FALSE} indicates whether to include ARMA #' errors or not. If \code{TRUE} the best fit is selected by AIC. If #' \code{FALSE} then the selection algorithm does not consider ARMA errors. #' @param use.parallel \code{TRUE/FALSE} indicates whether or not to use #' parallel processing. #' @param num.cores The number of parallel processes to be used if using #' parallel processing. If \code{NULL} then the number of logical cores is #' detected and all available cores are used. #' @param bc.lower The lower limit (inclusive) for the Box-Cox transformation. #' @param bc.upper The upper limit (inclusive) for the Box-Cox transformation. #' @param biasadj Use adjusted back-transformed mean for Box-Cox #' transformations. If TRUE, point forecasts and fitted values are mean #' forecast. Otherwise, these points can be considered the median of the #' forecast densities. #' @param model Output from a previous call to \code{tbats}. If model is #' passed, this same model is fitted to \code{y} without re-estimating any #' parameters. #' @param ... Additional arguments to be passed to \code{auto.arima} when #' choose an ARMA(p, q) model for the errors. (Note that xreg will be ignored, #' as will any arguments concerning seasonality and differencing, but arguments #' controlling the values of p and q will be used.) #' @return An object with class \code{c("tbats", "bats")}. The generic accessor #' functions \code{fitted.values} and \code{residuals} extract useful features #' of the value returned by \code{bats} and associated functions. The fitted #' model is designated TBATS(omega, p,q, phi, ,...,) where omega #' is the Box-Cox parameter and phi is the damping parameter; the error is #' modelled as an ARMA(p,q) process and m1,...,mJ list the seasonal periods #' used in the model and k1,...,kJ are the corresponding number of Fourier #' terms used for each seasonality. #' @author Slava Razbash and Rob J Hyndman #' @seealso \code{\link{tbats.components}}. #' @references De Livera, A.M., Hyndman, R.J., & Snyder, R. D. (2011), #' Forecasting time series with complex seasonal patterns using exponential #' smoothing, \emph{Journal of the American Statistical Association}, #' \bold{106}(496), 1513-1527. #' @keywords ts #' @examples #' #' \dontrun{ #' fit <- tbats(USAccDeaths) #' plot(forecast(fit)) #' #' taylor.fit <- tbats(taylor) #' plot(forecast(taylor.fit))} #' #' @export tbats <- function(y, use.box.cox=NULL, use.trend=NULL, use.damped.trend=NULL, seasonal.periods=NULL, use.arma.errors=TRUE, use.parallel=length(y) > 1000, num.cores=2, bc.lower=0, bc.upper=1, biasadj=FALSE, model=NULL, ...) { if (!is.numeric(y) || NCOL(y) > 1) { stop("y should be a univariate time series") } seriesname <- deparse(substitute(y)) origy <- y attr_y <- attributes(origy) # Get seasonal periods if (is.null(seasonal.periods)) { if ("msts" %in% class(y)) { seasonal.periods <- sort(attr(y, "msts")) } else if ("ts" %in% class(y)) { seasonal.periods <- frequency(y) } else { y <- as.ts(y) seasonal.periods <- 1 } } else { # Add ts attributes if (!("ts" %in% class(y))) { y <- msts(y, seasonal.periods) } } seasonal.periods <- unique(pmax(seasonal.periods, 1)) if (all(seasonal.periods == 1)) { seasonal.periods <- NULL } ny <- length(y) y <- na.contiguous(y) if (ny != length(y)) { warning("Missing values encountered. Using longest contiguous portion of time series") if (!is.null(attr_y$tsp)) { attr_y$tsp[1:2] <- range(time(y)) } } # Refit model if available if (!is.null(model)) { if (is.element("tbats", class(model))) { refitModel <- try(fitPreviousTBATSModel(y, model = model), silent = TRUE) } else if (is.element("bats", class(model))) { refitModel <- bats(origy, model = model) } return(refitModel) } # Return constant model if required if (is.constant(y)) { fit <- list( y = y, x = matrix(y, nrow = 1, ncol = ny), errors = y * 0, fitted.values = y, seed.states = matrix(y[1]), AIC = -Inf, likelihood = -Inf, variance = 0, alpha = 0.9999, method = "TBATS", call = match.call() ) return(structure(fit, class = "bats")) } # Check for observations are positive if (any((y <= 0))) { use.box.cox <- FALSE } # Fit non-seasonal model as a benchmark non.seasonal.model <- bats( as.numeric(y), use.box.cox = use.box.cox, use.trend = use.trend, use.damped.trend = use.damped.trend, use.arma.errors = use.arma.errors, use.parallel = use.parallel, num.cores = num.cores, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj, ... ) # If non-seasonal data, return the non-seasonal model if (is.null(seasonal.periods)) { non.seasonal.model$call <- match.call() attributes(non.seasonal.model$fitted.values) <- attributes(non.seasonal.model$errors) <- attributes(origy) non.seasonal.model$y <- origy return(non.seasonal.model) } else { seasonal.mask <- (seasonal.periods == 1) seasonal.periods <- seasonal.periods[!seasonal.mask] } if (is.null(use.box.cox)) { use.box.cox <- c(FALSE, TRUE) } if (any(use.box.cox)) { init.box.cox <- BoxCox.lambda(y, lower = bc.lower, upper = bc.upper) } else { init.box.cox <- NULL } if (is.null(use.trend)) { use.trend <- c(FALSE, TRUE) } else if (use.trend == FALSE) { use.damped.trend <- FALSE } if (is.null(use.damped.trend)) { use.damped.trend <- c(FALSE, TRUE) } # Set a vector of model params for later comparison model.params <- logical(length = 3) model.params[1] <- any(use.box.cox) model.params[2] <- any(use.trend) model.params[3] <- any(use.damped.trend) y <- as.numeric(y) n <- length(y) k.vector <- rep(1, length(seasonal.periods)) if (use.parallel) { if (is.null(num.cores)) { num.cores <- detectCores(all.tests = FALSE, logical = TRUE) } clus <- makeCluster(num.cores) } best.model <- try(fitSpecificTBATS( y, use.box.cox = model.params[1], use.beta = model.params[2], use.damping = model.params[3], seasonal.periods = seasonal.periods, k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj ), silent = TRUE) if (is.element("try-error", class(best.model))) { best.model <- list(AIC = Inf) } for (i in 1:length(seasonal.periods)) { if (seasonal.periods[i] == 2) { next } max.k <- floor(((seasonal.periods[i] - 1) / 2)) if (i != 1) { current.k <- 2 while (current.k <= max.k) { if (seasonal.periods[i] %% current.k != 0) { current.k <- current.k + 1 next } latter <- seasonal.periods[i] / current.k if (any(((seasonal.periods[1:(i - 1)] %% latter) == 0))) { max.k <- current.k - 1 break } else { current.k <- current.k + 1 } } } if (max.k == 1) { next } if (max.k <= 6) { k.vector[i] <- max.k best.model$AIC <- Inf repeat { # old.k <- k.vector[i] # k.vector[i] <- k.vector[i]-1 new.model <- try( fitSpecificTBATS( y, use.box.cox = model.params[1], use.beta = model.params[2], use.damping = model.params[3], seasonal.periods = seasonal.periods, k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj ), silent = TRUE ) if (is.element("try-error", class(new.model))) { new.model <- list(AIC = Inf) } if (new.model$AIC > best.model$AIC) { k.vector[i] <- k.vector[i] + 1 break } else { if (k.vector[i] == 1) { break } k.vector[i] <- k.vector[i] - 1 best.model <- new.model } } next } else { # Three different k vectors step.up.k <- k.vector step.down.k <- k.vector step.up.k[i] <- 7 step.down.k[i] <- 5 k.vector[i] <- 6 # Fit three different models ### if(use.parallel) then do parallel if (use.parallel) { k.control.array <- rbind(step.up.k, step.down.k, k.vector) models.list <- clusterApplyLB( clus, c(1:3), parFitSpecificTBATS, y = y, box.cox = model.params[1], trend = model.params[2], damping = model.params[3], seasonal.periods = seasonal.periods, k.control.matrix = k.control.array, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj ) up.model <- models.list[[1]] level.model <- models.list[[3]] down.model <- models.list[[2]] } else { up.model <- try( fitSpecificTBATS( y, use.box.cox = model.params[1], use.beta = model.params[2], use.damping = model.params[3], seasonal.periods = seasonal.periods, k.vector = step.up.k, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj ), silent = TRUE ) if (is.element("try-error", class(up.model))) { up.model <- list(AIC = Inf) } level.model <- try( fitSpecificTBATS( y, use.box.cox = model.params[1], use.beta = model.params[2], use.damping = model.params[3], seasonal.periods = seasonal.periods, k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj ), silent = TRUE ) if (is.element("try-error", class(level.model))) { level.model <- list(AIC = Inf) } down.model <- try( fitSpecificTBATS( y, use.box.cox = model.params[1], use.beta = model.params[2], use.damping = model.params[3], seasonal.periods = seasonal.periods, k.vector = step.down.k, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj ), silent = TRUE ) if (is.element("try-error", class(down.model))) { down.model <- list(AIC = Inf) } } # Decide the best model of the three and then follow that direction to find the optimal k aic.vector <- c(up.model$AIC, level.model$AIC, down.model$AIC) ## If shifting down if (min(aic.vector) == down.model$AIC) { best.model <- down.model k.vector[i] <- 5 repeat{ k.vector[i] <- k.vector[i] - 1 down.model <- try( fitSpecificTBATS( y = y, use.box.cox = model.params[1], use.beta = model.params[2], use.damping = model.params[3], seasonal.periods = seasonal.periods, k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj ), silent = TRUE ) if (is.element("try-error", class(down.model))) { down.model <- list(AIC = Inf) } if (down.model$AIC > best.model$AIC) { k.vector[i] <- k.vector[i] + 1 break } else { best.model <- down.model } if (k.vector[i] == 1) { break } } ## If staying level } else if (min(aic.vector) == level.model$AIC) { best.model <- level.model next ## If shifting up } else { best.model <- up.model k.vector[i] <- 7 repeat { k.vector[i] <- k.vector[i] + 1 up.model <- try( fitSpecificTBATS(y, model.params[1], model.params[2], model.params[3], seasonal.periods, k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj), silent = TRUE ) if (is.element("try-error", class(up.model))) { up.model <- list(AIC = Inf) } if (up.model$AIC > best.model$AIC) { k.vector[i] <- k.vector[i] - 1 break } else { best.model <- up.model } if (k.vector[i] == max.k) { break } } } } } aux.model <- best.model if (non.seasonal.model$AIC < best.model$AIC) { best.model <- non.seasonal.model } if ((length(use.box.cox) == 1) && use.trend[1] && (length(use.trend) == 1) && (length(use.damped.trend) == 1) && (use.parallel)) { # In the this case, there is only one alternative. use.parallel <- FALSE stopCluster(clus) } else if ((length(use.box.cox) == 1) && !use.trend[1] && (length(use.trend) == 1) && (use.parallel)) { # As above, in the this case, there is only one alternative. use.parallel <- FALSE stopCluster(clus) } if (use.parallel) { # Set up the control array control.array <- NULL for (box.cox in use.box.cox) { for (trend in use.trend) { for (damping in use.damped.trend) { if (!trend && damping) { next } control.line <- c(box.cox, trend, damping) if (!is.null(control.array)) { control.array <- rbind(control.array, control.line) } else { control.array <- control.line } } } } models.list <- clusterApplyLB(clus, c(1:nrow(control.array)), parFilterTBATSSpecifics, y = y, control.array = control.array, model.params = model.params, seasonal.periods = seasonal.periods, k.vector = k.vector, use.arma.errors = use.arma.errors, aux.model = aux.model, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj, ...) stopCluster(clus) ## Choose the best model #### Get the AICs aics <- numeric(nrow(control.array)) for (i in 1:nrow(control.array)) { aics[i] <- models.list[[i]]$AIC } best.number <- which.min(aics) best.seasonal.model <- models.list[[best.number]] if (best.seasonal.model$AIC < best.model$AIC) { best.model <- best.seasonal.model } } else { for (box.cox in use.box.cox) { for (trend in use.trend) { for (damping in use.damped.trend) { if (all((model.params == c(box.cox, trend, damping)))) { new.model <- filterTBATSSpecifics(y, box.cox, trend, damping, seasonal.periods, k.vector, use.arma.errors, aux.model = aux.model, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj, ...) } else if (trend || !damping) { new.model <- filterTBATSSpecifics(y, box.cox, trend, damping, seasonal.periods, k.vector, use.arma.errors, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj, ...) } if (new.model$AIC < best.model$AIC) { best.model <- new.model } } } } } best.model$call <- match.call() attributes(best.model$fitted.values) <- attributes(best.model$errors) <- attr_y best.model$y <- origy best.model$series <- seriesname best.model$method <- "TBATS" return(best.model) } ###################################################################################################################################### parFilterTBATSSpecifics <- function(control.number, y, control.array, model.params, seasonal.periods, k.vector, use.arma.errors, aux.model=NULL, init.box.cox=NULL, bc.lower=0, bc.upper=1, biasadj=FALSE, ...) { box.cox <- control.array[control.number, 1] trend <- control.array[control.number, 2] damping <- control.array[control.number, 3] if (!all((model.params == c(box.cox, trend, damping)))) { first.model <- try( fitSpecificTBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj), silent = TRUE ) } else { first.model <- aux.model } if (is.element("try-error", class(first.model))) { first.model <- list(AIC = Inf) } if (use.arma.errors) { suppressWarnings(arma <- try(auto.arima(as.numeric(first.model$errors), d = 0, ...), silent = TRUE)) if (!is.element("try-error", class(arma))) { p <- arma$arma[1] q <- arma$arma[2] if ((p != 0) || (q != 0)) { # Did auto.arima() find any AR() or MA() coefficients? if (p != 0) { ar.coefs <- numeric(p) } else { ar.coefs <- NULL } if (q != 0) { ma.coefs <- numeric(q) } else { ma.coefs <- NULL } starting.params <- first.model$parameters second.model <- try( fitSpecificTBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, k.vector = k.vector, ar.coefs = ar.coefs, ma.coefs = ma.coefs, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj), silent = TRUE ) if (is.element("try-error", class(second.model))) { second.model <- list(AIC = Inf) } if (second.model$AIC < first.model$AIC) { return(second.model) } else { return(first.model) } } else { # Else auto.arima() did not find any AR() or MA()coefficients return(first.model) } } else { return(first.model) } } else { return(first.model) } } ################################################################################################# parFitSpecificTBATS <- function(control.number, y, box.cox, trend, damping, seasonal.periods, k.control.matrix, init.box.cox=NULL, bc.lower=0, bc.upper=1, biasadj=FALSE) { k.vector <- k.control.matrix[control.number, ] model <- try( fitSpecificTBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj), silent = TRUE ) if (is.element("try-error", class(model))) { model <- list(AIC = Inf) } return(model) } filterTBATSSpecifics <- function(y, box.cox, trend, damping, seasonal.periods, k.vector, use.arma.errors, aux.model=NULL, init.box.cox=NULL, bc.lower=0, bc.upper=1, biasadj=FALSE, ...) { if (is.null(aux.model)) { first.model <- try( fitSpecificTBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, k.vector = k.vector, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj), silent = TRUE ) } else { first.model <- aux.model } if (is.element("try-error", class(first.model))) { first.model <- list(AIC = Inf) } if (use.arma.errors) { suppressWarnings(arma <- try(auto.arima(as.numeric(first.model$errors), d = 0, ...), silent = TRUE)) if (!is.element("try-error", class(arma))) { p <- arma$arma[1] q <- arma$arma[2] if ((p != 0) || (q != 0)) { # Did auto.arima() find any AR() or MA() coefficients? if (p != 0) { ar.coefs <- numeric(p) } else { ar.coefs <- NULL } if (q != 0) { ma.coefs <- numeric(q) } else { ma.coefs <- NULL } starting.params <- first.model$parameters second.model <- try( fitSpecificTBATS(y, use.box.cox = box.cox, use.beta = trend, use.damping = damping, seasonal.periods = seasonal.periods, k.vector = k.vector, ar.coefs = ar.coefs, ma.coefs = ma.coefs, init.box.cox = init.box.cox, bc.lower = bc.lower, bc.upper = bc.upper, biasadj = biasadj), silent = TRUE ) if (is.element("try-error", class(second.model))) { second.model <- list(AIC = Inf) } if (second.model$AIC < first.model$AIC) { return(second.model) } else { return(first.model) } } else { # Else auto.arima() did not find any AR() or MA()coefficients return(first.model) } } else { return(first.model) } } else { return(first.model) } } makeSingleFourier <- function(j, m, T) { frier <- matrix(0, nrow = T, ncol = 2) for (t in 1:T) { frier[t, 1] <- cos((2 * pi * j) / m) frier[t, 2] <- sin((2 * pi * j) / m) } return(frier) } calcFTest <- function(r.sse, ur.sse, num.restrictions, num.u.params, num.observations) { f.stat <- ((r.sse - ur.sse) / num.restrictions) / (r.sse / (num.observations - num.u.params)) p.value <- pf(f.stat, num.restrictions, (num.observations - num.u.params), lower.tail = FALSE) return(p.value) } #' @rdname fitted.Arima #' @export fitted.tbats <- function(object, h=1, ...) { if (h == 1) { return(object$fitted.values) } else { return(hfitted(object = object, h = h, FUN = "tbats", ...)) } } #' @export print.tbats <- function(x, ...) { cat(as.character(x)) cat("\n") cat("\nCall: ") print(x$call) cat("\nParameters") if (!is.null(x$lambda)) { cat("\n Lambda: ") cat(round(x$lambda, 6)) } cat("\n Alpha: ") cat(x$alpha) if (!is.null(x$beta)) { cat("\n Beta: ") cat(x$beta) cat("\n Damping Parameter: ") cat(round(x$damping.parameter, 6)) } if (!is.null(x$gamma.one.values)) { cat("\n Gamma-1 Values: ") cat(x$gamma.one.values) } if (!is.null(x$gamma.two.values)) { cat("\n Gamma-2 Values: ") cat(x$gamma.two.values) } if (!is.null(x$ar.coefficients)) { cat("\n AR coefficients: ") cat(round(x$ar.coefficients, 6)) } if (!is.null(x$ma.coefficients)) { cat("\n MA coefficients: ") cat(round(x$ma.coefficients, 6)) } cat("\n") cat("\nSeed States:\n") print(x$seed.states) cat("\nSigma: ") cat(sqrt(x$variance)) cat("\nAIC: ") cat(x$AIC) cat("\n") } #' @rdname plot.bats #' #' @examples #' #' \dontrun{ #' fit <- tbats(USAccDeaths) #' plot(fit) #' autoplot(fit, range.bars = TRUE)} #' #' @export plot.tbats <- function(x, main="Decomposition by TBATS model", ...) { out <- tbats.components(x) plot.ts(out, main = main, nc = 1, ...) } #' Extract components of a TBATS model #' #' Extract the level, slope and seasonal components of a TBATS model. The extracted components are Box-Cox transformed using the estimated transformation parameter. #' #' #' @param x A tbats object created by \code{\link{tbats}}. #' @return A multiple time series (\code{mts}) object. The first series is the observed time series. The second series is the trend component of the fitted model. Series three onwards are the seasonal components of the fitted model with one time series for each of the seasonal components. All components are transformed using estimated Box-Cox parameter. #' @author Slava Razbash and Rob J Hyndman #' @seealso \code{\link{tbats}}. #' @references De Livera, A.M., Hyndman, R.J., & Snyder, R. D. (2011), #' Forecasting time series with complex seasonal patterns using exponential #' smoothing, \emph{Journal of the American Statistical Association}, #' \bold{106}(496), 1513-1527. #' @keywords ts #' @examples #' #' \dontrun{ #' fit <- tbats(USAccDeaths, use.parallel=FALSE) #' components <- tbats.components(fit) #' plot(components)} #' #' @export tbats.components <- function(x) { # Get original data, transform if necessary if (!is.null(x$lambda)) { y <- BoxCox(x$y, x$lambda) lambda <- attr(y, "lambda") } else { y <- x$y } # Compute matrices tau <- ifelse(!is.null(x$k.vector), 2 * sum(x$k.vector), 0) w <- .Call( "makeTBATSWMatrix", smallPhi_s = x$damping.parameter, kVector_s = as.integer(x$k.vector), arCoefs_s = x$ar.coefficients, maCoefs_s = x$ma.coefficients, tau_s = as.integer(tau), PACKAGE = "forecast" ) out <- cbind(observed = c(y), level = x$x[1, ]) if (!is.null(x$beta)) { out <- cbind(out, slope = x$x[2, ]) } # Add seasonal components if they exist if (tau > 0) { nonseas <- 2 + !is.null(x$beta) # No. non-seasonal columns in out nseas <- length(x$seasonal.periods) # No. seasonal periods seas.states <- cbind(x$seed.states, x$x)[-(1:(1 + !is.null(x$beta))), ] seas.states <- seas.states[, -ncol(seas.states)] w <- w$w.transpose[, -(1:(1 + !is.null(x$beta))), drop = FALSE] w <- w[, 1:tau, drop = FALSE] j <- cumsum(c(1, 2 * x$k.vector)) for (i in 1:nseas) out <- cbind(out, season = c(w[, j[i]:(j[i + 1] - 1), drop = FALSE] %*% seas.states[j[i]:(j[i + 1] - 1), ])) if (nseas > 1) { colnames(out)[nonseas + 1:nseas] <- paste("season", 1:nseas, sep = "") } } # Add time series characteristics out <- ts(out) tsp(out) <- tsp(y) return(out) } forecast/R/DM2.R0000644000176200001440000001220414456202551012771 0ustar liggesusers# Diebold-Mariano test. Modified from code by Adrian Trapletti. # Then adapted by M. Yousaf Khan for better performance on small samples #' Diebold-Mariano test for predictive accuracy #' #' The Diebold-Mariano test compares the forecast accuracy of two forecast #' methods. #' #' This function implements the modified test proposed by Harvey, Leybourne and #' Newbold (1997). The null hypothesis is that the two methods have the same #' forecast accuracy. For \code{alternative="less"}, the alternative hypothesis #' is that method 2 is less accurate than method 1. For #' \code{alternative="greater"}, the alternative hypothesis is that method 2 is #' more accurate than method 1. For \code{alternative="two.sided"}, the #' alternative hypothesis is that method 1 and method 2 have different levels #' of accuracy. The long-run variance estimator can either the #' auto-correlation estimator \code{varestimator = "acf"}, or the estimator based #' on Bartlett weights \code{varestimator = "bartlett"} which ensures a positive estimate. #' Both long-run variance estimators are proposed in Diebold and Mariano (1995). #' #' @param e1 Forecast errors from method 1. #' @param e2 Forecast errors from method 2. #' @param alternative a character string specifying the alternative hypothesis, #' must be one of \code{"two.sided"} (default), \code{"greater"} or #' \code{"less"}. You can specify just the initial letter. #' @param h The forecast horizon used in calculating \code{e1} and \code{e2}. #' @param power The power used in the loss function. Usually 1 or 2. #' @param varestimator a character string specifying the long-run variance estimator. #' Options are \code{"acf"} (default) or \code{"bartlett"}. #' @return A list with class \code{"htest"} containing the following #' components: #' \item{statistic}{the value of the DM-statistic.} #' \item{parameter}{the forecast horizon and loss function power used in the test.} #' \item{alternative}{a character string describing the alternative hypothesis.} #' \item{varestimator}{a character string describing the long-run variance estimator.} #' \item{p.value}{the p-value for the test.} #' \item{method}{a character string with the value "Diebold-Mariano Test".} #' \item{data.name}{a character vector giving the names of the two error series.} #' @author George Athanasopoulos and Kirill Kuroptev #' @references Diebold, F.X. and Mariano, R.S. (1995) Comparing predictive #' accuracy. \emph{Journal of Business and Economic Statistics}, \bold{13}, #' 253-263. #' #' Harvey, D., Leybourne, S., & Newbold, P. (1997). Testing the equality of #' prediction mean squared errors. \emph{International Journal of forecasting}, #' \bold{13}(2), 281-291. #' @keywords htest ts #' @examples #' #' # Test on in-sample one-step forecasts #' f1 <- ets(WWWusage) #' f2 <- auto.arima(WWWusage) #' accuracy(f1) #' accuracy(f2) #' dm.test(residuals(f1), residuals(f2), h = 1) #' #' # Test on out-of-sample one-step forecasts #' f1 <- ets(WWWusage[1:80]) #' f2 <- auto.arima(WWWusage[1:80]) #' f1.out <- ets(WWWusage[81:100], model = f1) #' f2.out <- Arima(WWWusage[81:100], model = f2) #' accuracy(f1.out) #' accuracy(f2.out) #' dm.test(residuals(f1.out), residuals(f2.out), h = 1) #' @export dm.test <- function(e1, e2, alternative = c("two.sided", "less", "greater"), h = 1, power = 2, varestimator = c("acf", "bartlett")) { alternative <- match.arg(alternative) varestimator <- match.arg(varestimator) h <- as.integer(h) if(h < 1L) { stop("h must be at least 1") } if(h > length(e1)) { stop("h cannot be longer than the number of forecast errors") } d <- c(abs(e1))^power - c(abs(e2))^power d.cov <- acf(d, na.action = na.omit, lag.max = h-1, type = "covariance", plot = FALSE)$acf[, , 1] n <- length(d) if (varestimator == "acf" | h == 1L) { # Original estimator d.var <- sum(c(d.cov[1], 2 * d.cov[-1])) / n } else { # varestimator == "bartlett" # Using Bartlett weights to ensure a positive estimate of long-run-variance d.var <- sum(c(d.cov[1], 2 * (1 - seq_len(h-1)/h) * d.cov[-1])) / n } dv <- d.var if (dv > 0) { STATISTIC <- mean(d, na.rm = TRUE) / sqrt(dv) } else if (h == 1) { stop("Variance of DM statistic is zero") } else { warning("Variance is negative. Try varestimator = bartlett. Proceeding with horizon h=1.") return(dm.test(e1, e2, alternative, h = 1, power, varestimator)) } k <- ((n + 1 - 2 * h + (h / n) * (h - 1)) / n) ^ (1 / 2) STATISTIC <- STATISTIC * k names(STATISTIC) <- "DM" if (alternative == "two.sided") { PVAL <- 2 * pt(-abs(STATISTIC), df = n - 1) } else if (alternative == "less") { PVAL <- pt(STATISTIC, df = n - 1) } else if (alternative == "greater") { PVAL <- pt(STATISTIC, df = n - 1, lower.tail = FALSE) } PARAMETER <- c(h, power) names(PARAMETER) <- c("Forecast horizon", "Loss function power") structure( list( statistic = STATISTIC, parameter = PARAMETER, alternative = alternative, varestimator = varestimator, p.value = PVAL, method = "Diebold-Mariano Test", data.name = c(deparse(substitute(e1)), deparse(substitute(e2))) ), class = "htest" ) } is.htest <- function(x) { inherits(x, "htest") } forecast/R/errors.R0000644000176200001440000002524314323125536013732 0ustar liggesusers## Measures of forecast accuracy ## Forecasts in f. This may be a numerical vector or the output from arima or ets or derivatives. ## Actual values in x # dx = response variable in historical data ## test enables a subset of x and f to be tested. # MASE: d is the # of differencing # MASE: D is the # of seasonal differencing testaccuracy <- function(f, x, test, d, D) { dx <- getResponse(f) if (is.data.frame(x)) { responsevar <- as.character(formula(f$model))[2] if (is.element(responsevar, colnames(x))) { x <- x[, responsevar] } else { stop("I can't figure out what data to use.") } } if (is.list(f)) { if (is.element("mean", names(f))) { f <- f$mean } else { stop("Unknown list structure") } } if (is.ts(x) && is.ts(f)) { tspf <- tsp(f) tspx <- tsp(x) start <- max(tspf[1], tspx[1]) end <- min(tspf[2], tspx[2]) # Adjustment to allow for floating point issues start <- min(start, end) end <- max(start, end) f <- window(f, start = start, end = end) x <- window(x, start = start, end = end) } n <- length(x) if (is.null(test)) { test <- 1:n } else if (min(test) < 1 || max(test) > n) { warning("test elements must be within sample") test <- test[test >= 1 & test <= n] } ff <- f xx <- x # Check length of f if (length(f) < n) { stop("Not enough forecasts. Check that forecasts and test data match.") } error <- (xx - ff[1:n])[test] pe <- error / xx[test] * 100 me <- mean(error, na.rm = TRUE) mse <- mean(error^2, na.rm = TRUE) mae <- mean(abs(error), na.rm = TRUE) mape <- mean(abs(pe), na.rm = TRUE) mpe <- mean(pe, na.rm = TRUE) out <- c(me, sqrt(mse), mae, mpe, mape) names(out) <- c("ME", "RMSE", "MAE", "MPE", "MAPE") # Compute MASE if historical data available if (!is.null(dx)) { tspdx <- tsp(dx) if (!is.null(tspdx)) { if (D > 0) { # seasonal differencing nsd <- diff(dx, lag = round(tspdx[3L]), differences = D) } else { # non seasonal differencing nsd <- dx } if (d > 0) { nd <- diff(nsd, differences = d) } else { nd <- nsd } scale <- mean(abs(nd), na.rm = TRUE) } else { # not time series scale <- mean(abs(dx - mean(dx, na.rm = TRUE)), na.rm = TRUE) } mase <- mean(abs(error / scale), na.rm = TRUE) out <- c(out, mase) names(out)[length(out)] <- "MASE" } # Additional time series measures if (!is.null(tsp(x)) && n > 1) { fpe <- (c(ff[2:n]) / c(xx[1:(n - 1)]) - 1)[test - 1] ape <- (c(xx[2:n]) / c(xx[1:(n - 1)]) - 1)[test - 1] theil <- sqrt(sum((fpe - ape)^2, na.rm = TRUE) / sum(ape^2, na.rm = TRUE)) if (length(error) > 1) { r1 <- acf(error, plot = FALSE, lag.max = 2, na.action = na.pass)$acf[2, 1, 1] } else { r1 <- NA } nj <- length(out) out <- c(out, r1, theil) names(out)[nj + (1:2)] <- c("ACF1", "Theil's U") } return(out) } trainingaccuracy <- function(f, test, d, D) { # Make sure x is an element of f when f is a fitted model rather than a forecast # if(!is.list(f)) # stop("f must be a forecast object or a time series model object.") dx <- getResponse(f) if (is.element("splineforecast", class(f))) { fits <- f$onestepf } else { fits <- fitted(f) } # Don't use f$resid as this may contain multiplicative errors. res <- dx - fits n <- length(res) if (is.null(test)) { test <- 1:n } if (min(test) < 1 || max(test) > n) { warning("test elements must be within sample") test <- test[test >= 1 & test <= n] } tspdx <- tsp(dx) res <- res[test] dx <- dx[test] pe <- res / dx * 100 # Percentage error me <- mean(res, na.rm = TRUE) mse <- mean(res^2, na.rm = TRUE) mae <- mean(abs(res), na.rm = TRUE) mape <- mean(abs(pe), na.rm = TRUE) mpe <- mean(pe, na.rm = TRUE) out <- c(me, sqrt(mse), mae, mpe, mape) names(out) <- c("ME", "RMSE", "MAE", "MPE", "MAPE") # Compute MASE if historical data available if (!is.null(dx)) { if (!is.null(tspdx)) { if (D > 0) { # seasonal differencing nsd <- diff(dx, lag = round(tspdx[3L]), differences = D) } else { # non seasonal differencing nsd <- dx } if (d > 0) { nd <- diff(nsd, differences = d) } else { nd <- nsd } scale <- mean(abs(nd), na.rm = TRUE) } else { # not time series scale <- mean(abs(dx - mean(dx, na.rm = TRUE)), na.rm = TRUE) } mase <- mean(abs(res / scale), na.rm = TRUE) out <- c(out, mase) names(out)[length(out)] <- "MASE" } # Additional time series measures if (!is.null(tspdx)) { if (length(res) > 1) { r1 <- acf(res, plot = FALSE, lag.max = 2, na.action = na.pass)$acf[2, 1, 1] } else { r1 <- NA } nj <- length(out) out <- c(out, r1) names(out)[nj + 1] <- "ACF1" } return(out) } #' Accuracy measures for a forecast model #' #' Returns range of summary measures of the forecast accuracy. If \code{x} is #' provided, the function measures test set forecast accuracy #' based on \code{x-f}. If \code{x} is not provided, the function only produces #' training set accuracy measures of the forecasts based on #' \code{f["x"]-fitted(f)}. All measures are defined and discussed in Hyndman #' and Koehler (2006). #' #' The measures calculated are: #' \itemize{ #' \item ME: Mean Error #' \item RMSE: Root Mean Squared Error #' \item MAE: Mean Absolute Error #' \item MPE: Mean Percentage Error #' \item MAPE: Mean Absolute Percentage Error #' \item MASE: Mean Absolute Scaled Error #' \item ACF1: Autocorrelation of errors at lag 1. #' } #' By default, the MASE calculation is scaled using MAE of training set naive #' forecasts for non-seasonal time series, training set seasonal naive forecasts #' for seasonal time series and training set mean forecasts for non-time series data. #' If \code{f} is a numerical vector rather than a \code{forecast} object, the MASE #' will not be returned as the training data will not be available. #' #' See Hyndman and Koehler (2006) and Hyndman and Athanasopoulos (2014, Section #' 2.5) for further details. #' #' @param object An object of class \dQuote{\code{forecast}}, or a numerical vector #' containing forecasts. It will also work with \code{Arima}, \code{ets} and #' \code{lm} objects if \code{x} is omitted -- in which case training set accuracy #' measures are returned. #' @param x An optional numerical vector containing actual values of the same #' length as object, or a time series overlapping with the times of \code{f}. #' @param test Indicator of which elements of \code{x} and \code{f} to test. If #' \code{test} is \code{NULL}, all elements are used. Otherwise test is a #' numeric vector containing the indices of the elements to use in the test. #' @param d An integer indicating the number of lag-1 differences to be used #' for the denominator in MASE calculation. Default value is 1 for non-seasonal #' series and 0 for seasonal series. #' @param D An integer indicating the number of seasonal differences to be used #' for the denominator in MASE calculation. Default value is 0 for non-seasonal #' series and 1 for seasonal series. #' @param ... Additional arguments depending on the specific method. #' @param f Deprecated. Please use `object` instead. #' @return Matrix giving forecast accuracy measures. #' @author Rob J Hyndman #' @references Hyndman, R.J. and Koehler, A.B. (2006) "Another look at measures #' of forecast accuracy". \emph{International Journal of Forecasting}, #' \bold{22}(4), 679-688. Hyndman, R.J. and Athanasopoulos, G. (2018) #' "Forecasting: principles and practice", 2nd ed., OTexts, Melbourne, Australia. #' Section 3.4 "Evaluating forecast accuracy". #' \url{https://otexts.com/fpp2/accuracy.html}. #' @keywords ts #' @examples #' #' fit1 <- rwf(EuStockMarkets[1:200, 1], h = 100) #' fit2 <- meanf(EuStockMarkets[1:200, 1], h = 100) #' accuracy(fit1) #' accuracy(fit2) #' accuracy(fit1, EuStockMarkets[201:300, 1]) #' accuracy(fit2, EuStockMarkets[201:300, 1]) #' plot(fit1) #' lines(EuStockMarkets[1:300, 1]) #' @export accuracy.default <- function(object, x, test = NULL, d = NULL, D = NULL, f = NULL, ...) { if (!is.null(f)) { warning("Using `f` as the argument for `accuracy()` is deprecated. Please use `object` instead.") object <- f } if (!any(is.element(class(object), c( "ARFIMA", "mforecast", "forecast", "ts", "integer", "numeric", "Arima", "ets", "lm", "bats", "tbats", "nnetar", "stlm", "baggedModel" )))) { stop(paste("No accuracy method found for an object of class",class(object))) } if (is.element("mforecast", class(object))) { return(accuracy.mforecast(object, x, test, d, D)) } trainset <- (is.list(object)) testset <- (!missing(x)) if (testset && !is.null(test)) { trainset <- FALSE } if (!trainset && !testset) { stop("Unable to compute forecast accuracy measures") } # Find d and D if (is.null(D) && is.null(d)) { if (testset) { d <- as.numeric(frequency(x) == 1) D <- as.numeric(frequency(x) > 1) } else if (trainset) { if (!is.null(object$mean)) { d <- as.numeric(frequency(object$mean) == 1) D <- as.numeric(frequency(object$mean) > 1) } else { d <- as.numeric(frequency(object[["x"]]) == 1) D <- as.numeric(frequency(object[["x"]]) > 1) } } else { d <- as.numeric(frequency(object) == 1) D <- as.numeric(frequency(object) > 1) } } if (trainset) { trainout <- trainingaccuracy(object, test, d, D) trainnames <- names(trainout) } else { trainnames <- NULL } if (testset) { testout <- testaccuracy(object, x, test, d, D) testnames <- names(testout) } else { testnames <- NULL } outnames <- unique(c(trainnames, testnames)) out <- matrix(NA, nrow = 2, ncol = length(outnames)) colnames(out) <- outnames rownames(out) <- c("Training set", "Test set") if (trainset) { out[1, names(trainout)] <- trainout } if (testset) { out[2, names(testout)] <- testout } if (!testset) { out <- out[1, , drop = FALSE] } if (!trainset) { out <- out[2, , drop = FALSE] } return(out) } # Compute accuracy for an mforecast object #' @export accuracy.mforecast <- function(object, x, test = NULL, d, D, f = NULL, ...) { if (!is.null(f)) { warning("Using `f` as the argument for `accuracy()` is deprecated. Please use `object` instead.") object <- f } out <- NULL nox <- missing(x) i <- 1 for (fcast in object$forecast) { if (nox) { out1 <- accuracy(fcast, test = test, d = d, D = D) } else { out1 <- accuracy(fcast, x[, i], test, d, D) } rownames(out1) <- paste(fcast$series, rownames(out1)) out <- rbind(out, out1) i <- i + 1 } return(out) } forecast/R/modelAR.R0000644000176200001440000004173714323125536013747 0ustar liggesusers# Defaults: # For non-seasonal data, p chosen using AIC from linear AR(p) model # For seasonal data, p chosen using AIC from linear AR(p) model after # seasonally adjusting with STL decomposition, and P=1 #' Time Series Forecasts with a user-defined model #' #' Experimental function to forecast univariate time series with a #' user-defined model #' #' This is an experimental function and only recommended for advanced users. #' The selected model is fitted with lagged values of \code{y} as #' inputs. The inputs are for #' lags 1 to \code{p}, and lags \code{m} to \code{mP} where #' \code{m=frequency(y)}. If \code{xreg} is provided, its columns are also #' used as inputs. If there are missing values in \code{y} or #' \code{xreg}, the corresponding rows (and any others which depend on them as #' lags) are omitted from the fit. The model is trained for one-step #' forecasting. Multi-step forecasts are computed recursively. #' #' @aliases print.modelAR #' #' @param y A numeric vector or time series of class \code{ts}. #' @param p Embedding dimension for non-seasonal time series. Number of #' non-seasonal lags used as inputs. For non-seasonal time series, the default #' is the optimal number of lags (according to the AIC) for a linear AR(p) #' model. For seasonal time series, the same method is used but applied to #' seasonally adjusted data (from an stl decomposition). #' @param P Number of seasonal lags used as inputs. #' @param FUN Function used for model fitting. Must accept argument \code{x} #' and \code{y} for the predictors and response, respectively (\code{formula} #' object not currently supported). #' @param predict.FUN Prediction function used to apply \code{FUN} to new data. #' Must accept an object of class \code{FUN} as its first argument, and a #' data frame or matrix of new data for its second argument. Additionally, #' it should return fitted values when new data is omitted. #' @param xreg Optionally, a vector or matrix of external regressors, which #' must have the same number of rows as \code{y}. Must be numeric. #' @param model Output from a previous call to \code{nnetar}. If model is #' passed, this same model is fitted to \code{y} without re-estimating any #' parameters. #' @param subset Optional vector specifying a subset of observations to be used #' in the fit. Can be an integer index vector or a logical vector the same #' length as \code{y}. All observations are used by default. #' @param scale.inputs If TRUE, inputs are scaled by subtracting the column #' means and dividing by their respective standard deviations. If \code{lambda} #' is not \code{NULL}, scaling is applied after Box-Cox transformation. #' @param x Deprecated. Included for backwards compatibility. #' @param \dots Other arguments passed to \code{FUN} for #' \code{modelAR}. #' @inheritParams forecast.ts #' #' @return Returns an object of class "\code{modelAR}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{nnetar}. #' #' \item{model}{A list containing information about the fitted model} #' \item{method}{The name of the forecasting method as a character string} #' \item{x}{The original time series.} #' \item{xreg}{The external regressors used in fitting (if given).} #' \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} #' \item{fitted}{Fitted values (one-step forecasts)} #' \item{...}{Other arguments} #' #' @author Rob J Hyndman and Gabriel Caceres #' @keywords ts #' #' @export modelAR <- function(y, p, P=1, FUN, predict.FUN, xreg=NULL, lambda=NULL, model=NULL, subset=NULL, scale.inputs=FALSE, x=y, ...) { useoldmodel <- FALSE yname <- deparse(substitute(y)) if (!is.null(model)) { # Use previously fitted model useoldmodel <- TRUE # Check for conflicts between new and old data: # Check model class if (!is.modelAR(model)) { stop("Model must be a modelAR object") } # Check new data m <- max(round(frequency(model$x)), 1L) minlength <- max(c(model$p, model$P * m)) + 1 if (length(x) < minlength) { stop(paste("Series must be at least of length", minlength, "to use fitted model")) } if (tsp(as.ts(x))[3] != m) { warning(paste("Data frequency doesn't match fitted model, coercing to frequency =", m)) x <- ts(x, frequency = m) } # Check xreg if (!is.null(model$xreg)) { if (is.null(xreg)) { stop("No external regressors provided") } if (NCOL(xreg) != NCOL(model$xreg)) { stop("Number of external regressors does not match fitted model") } } # Update parameters with previous model lambda <- model$lambda p <- model$p P <- model$P FUN <- model$FUN predict.FUN <- model$predict.FUN if (P > 0) { lags <- sort(unique(c(1:p, m * (1:P)))) } else { lags <- 1:p } if (!is.null(model$scalex)) { scale.inputs <- TRUE } } else { # when not using an old model if (length(y) < 3) { stop("Not enough data to fit a model") } # Check for constant data in time series constant_data <- is.constant(na.interp(x)) if (constant_data){ warning("Constant data, setting p=1, P=0, lambda=NULL, scale.inputs=FALSE") scale.inputs <- FALSE lambda <- NULL p <- 1 P <- 0 } ## Check for constant data in xreg if (!is.null(xreg)){ constant_xreg <- any(apply(as.matrix(xreg), 2, function(x) is.constant(na.interp(x)))) if (constant_xreg){ warning("Constant xreg column, setting scale.inputs=FALSE") scale.inputs <- FALSE } } } # Check for NAs in x if (any(is.na(x))) { warning("Missing values in x, omitting rows") } # Transform data if (!is.null(lambda)) { xx <- BoxCox(x, lambda) lambda <- attr(xx, "lambda") } else { xx <- x } ## Check whether to use a subset of the data xsub <- rep(TRUE, length(x)) if (is.numeric(subset)) { xsub[-subset] <- FALSE } if (is.logical(subset)) { xsub <- subset } # Scale series scalex <- NULL if (scale.inputs) { if (useoldmodel) { scalex <- model$scalex } else { tmpx <- scale(xx[xsub], center = TRUE, scale = TRUE) scalex <- list( center = attr(tmpx, "scaled:center"), scale = attr(tmpx, "scaled:scale") ) } xx <- scale(xx, center = scalex$center, scale = scalex$scale) xx <- xx[, 1] } # Check xreg class & dim xxreg <- NULL scalexreg <- NULL if (!is.null(xreg)) { xxreg <- xreg <- as.matrix(xreg) if (length(x) != NROW(xreg)) { stop("Number of rows in xreg does not match series length") } # Check for NAs in xreg if (any(is.na(xreg))) { warning("Missing values in xreg, omitting rows") } # Scale xreg if (scale.inputs) { if (useoldmodel) { scalexreg <- model$scalexreg } else { tmpx <- scale(xxreg[xsub, ], center = TRUE, scale = TRUE) scalexreg <- list( center = attr(tmpx, "scaled:center"), scale = attr(tmpx, "scaled:scale") ) } xxreg <- scale(xxreg, center = scalexreg$center, scale = scalexreg$scale) } } # Set up lagged matrix n <- length(xx) xx <- as.ts(xx) m <- max(round(frequency(xx)), 1L) if (!useoldmodel) { if (m == 1) { if (missing(p)) { p <- max(length(ar(na.interp(xx))$ar), 1) } if (p >= n) { warning("Reducing number of lagged inputs due to short series") p <- n - 1 } lags <- 1:p if (P > 1) { warning("Non-seasonal data, ignoring seasonal lags") } P <- 0 } else { if (missing(p)) { if (n >= 2 * m) { x.sa <- seasadj(mstl(na.interp(xx))) } else { x.sa <- na.interp(xx) } p <- max(length(ar(x.sa)$ar), 1) } if (p >= n) { warning("Reducing number of lagged inputs due to short series") p <- n - 1 } if (P > 0 && n >= m * P + 2) { lags <- sort(unique(c(1:p, m * (1:P)))) } else { lags <- 1:p if (P > 0) { warning("Series too short for seasonal lags") P <- 0 } } } } maxlag <- max(lags) nlag <- length(lags) y <- xx[-(1:maxlag)] lags.X <- matrix(NA_real_, ncol = nlag, nrow = n - maxlag) for (i in 1:nlag){ lags.X[, i] <- xx[(maxlag - lags[i] + 1):(n - lags[i])] } # Add xreg into lagged matrix lags.X <- cbind(lags.X, xxreg[-(1:maxlag), ]) # Remove missing values if present j <- complete.cases(lags.X, y) ## Remove values not in subset j <- j & xsub[-(1:maxlag)] ## Stop if there's no data to fit (e.g. due to NAs or NaNs) if (NROW(lags.X[j,, drop=FALSE]) == 0) { stop("No data to fit (possibly due to NA or NaN)") } ## Fit selected model if (useoldmodel) { fit <- model$model } else { fit <- FUN(x = lags.X[j,, drop=FALSE], y = y[j], ...) } # Return results out <- list() out$x <- as.ts(x) out$m <- m out$p <- p out$P <- P out$FUN <- FUN out$predict.FUN <- predict.FUN out$scalex <- scalex out$scalexreg <- scalexreg out$xreg <- xreg out$lambda <- lambda out$subset <- (1:length(x))[xsub] out$model <- fit out$modelargs <- list(...) if (useoldmodel) { out$modelargs <- model$modelargs fits <- c(rep(NA_real_, maxlag), predict.FUN(fit, lags.X[j,, drop=FALSE])) } else { fits <- c(rep(NA_real_, maxlag), predict.FUN(fit)) } if (scale.inputs) { fits <- fits * scalex$scale + scalex$center } fits <- ts(fits) if (!is.null(lambda)) { fits <- InvBoxCox(fits, lambda) } out$fitted <- ts(rep(NA_real_, length(out$x))) out$fitted[c(rep(TRUE, maxlag), j)] <- fits tsp(out$fitted) <- tsp(out$x) out$residuals <- out$x - out$fitted out$lags <- lags out$series <- yname out$method <- deparse(substitute(FUN)) out$method <- paste0(out$method, "-AR(", p) if (P > 0) out$method <- paste(out$method, ",", P, sep = "") out$method <- paste0(out$method, ")") if (P > 0) out$method <- paste(out$method, "[", m, "]", sep = "") out$call <- match.call() return(structure(out, class = c("modelAR"))) } #' Forecasting using user-defined model #' #' Returns forecasts and other information for user-defined #' models. #' #' Prediction intervals are calculated through simulations and can be slow. #' Note that if the model is too complex and overfits the data, the residuals #' can be arbitrarily small; if used for prediction interval calculations, they #' could lead to misleadingly small values. #' #' @param object An object of class "\code{modelAR}" resulting from a call to #' \code{\link{modelAR}}. #' @param h Number of periods for forecasting. If \code{xreg} is used, \code{h} #' is ignored and the number of forecast periods is set to the number of rows #' of \code{xreg}. #' @param PI If TRUE, prediction intervals are produced, otherwise only point #' forecasts are calculated. If \code{PI} is FALSE, then \code{level}, #' \code{fan}, \code{bootstrap} and \code{npaths} are all ignored. #' @param level Confidence level for prediction intervals. #' @param fan If \code{TRUE}, level is set to \code{seq(51,99,by=3)}. This is #' suitable for fan plots. #' @param xreg Future values of external regressor variables. #' @param bootstrap If \code{TRUE}, then prediction intervals computed using #' simulations with resampled residuals rather than normally distributed #' errors. Ignored if \code{innov} is not \code{NULL}. #' @param npaths Number of sample paths used in computing simulated prediction #' intervals. #' @param innov Values to use as innovations for prediction intervals. Must be #' a matrix with \code{h} rows and \code{npaths} columns (vectors are coerced #' into a matrix). If present, \code{bootstrap} is ignored. #' @param ... Additional arguments passed to \code{\link{simulate.nnetar}} #' @inheritParams forecast.ts #' #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{forecast.nnetar}. #' #' An object of class "\code{forecast}" is a list containing at least the #' following elements: #' \item{model}{A list containing information about the fitted model} #' \item{method}{The name of the forecasting method as a character string} #' \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} #' \item{upper}{Upper limits for prediction intervals} #' \item{level}{The confidence values associated with the prediction intervals} #' \item{x}{The original time series (either \code{object} itself or the time series #' used to create the model stored as \code{object}).} #' \item{xreg}{The external regressors used in fitting (if given).} #' \item{residuals}{Residuals from the fitted model. That is x minus fitted values.} #' \item{fitted}{Fitted values (one-step forecasts)} #' \item{...}{Other arguments} #' #' @author Rob J Hyndman and Gabriel Caceres #' @seealso \code{\link{nnetar}}. #' @keywords ts #' #' @export forecast.modelAR <- function(object, h=ifelse(object$m > 1, 2 * object$m, 10), PI=FALSE, level=c(80, 95), fan=FALSE, xreg=NULL, lambda=object$lambda, bootstrap=FALSE, npaths=1000, innov=NULL, ...) { out <- object tspx <- tsp(out$x) # if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } # Check if xreg was used in fitted model if (is.null(object$xreg)) { if (!is.null(xreg)) { warning("External regressors were not used in fitted model, xreg will be ignored") } xreg <- NULL } else { if (is.null(xreg)) { stop("No external regressors provided") } xreg <- as.matrix(xreg) if (NCOL(xreg) != NCOL(object$xreg)) { stop("Number of external regressors does not match fitted model") } h <- NROW(xreg) } fcast <- numeric(h) xx <- object$x xxreg <- xreg if (!is.null(lambda)) { xx <- BoxCox(xx, lambda) lambda <- attr(xx, "lambda") } # Check and apply scaling of fitted model if (!is.null(object$scalex)) { xx <- scale(xx, center = object$scalex$center, scale = object$scalex$scale) if (!is.null(xreg)) { xxreg <- scale(xreg, center = object$scalexreg$center, scale = object$scalexreg$scale) } } # Get lags used in fitted model lags <- object$lags maxlag <- max(lags) flag <- rev(tail(xx, n = maxlag)) # Iterative 1-step forecast for (i in 1:h) { newdata <- c(flag[lags], xxreg[i, ]) if (any(is.na(newdata))) { stop("I can't forecast when there are missing values near the end of the series.") } fcast[i] <- object$predict.FUN(object$model, newdata) flag <- c(fcast[i], flag[-maxlag]) } # Re-scale point forecasts if (!is.null(object$scalex)) { fcast <- fcast * object$scalex$scale + object$scalex$center } # Add ts properties fcast <- ts(fcast, start = tspx[2] + 1 / tspx[3], frequency = tspx[3]) # Back-transform point forecasts if (!is.null(lambda)) { fcast <- InvBoxCox(fcast, lambda) } # Compute prediction intervals using simulations if (isTRUE(PI)) { nint <- length(level) sim <- matrix(NA, nrow = npaths, ncol = h) if (!is.null(innov)) { if (length(innov) != h * npaths) { stop("Incorrect number of innovations, need h*npaths values") } innov <- matrix(innov, nrow = h, ncol = npaths) bootstrap <- FALSE } for (i in 1:npaths) sim[i, ] <- simulate(object, nsim = h, bootstrap = bootstrap, xreg = xreg, lambda = lambda, innov = innov[, i], ...) lower <- apply(sim, 2, quantile, 0.5 - level / 200, type = 8) upper <- apply(sim, 2, quantile, 0.5 + level / 200, type = 8) if (nint > 1L) { lower <- ts(t(lower)) upper <- ts(t(upper)) } else { lower <- ts(matrix(lower, ncol = 1L)) upper <- ts(matrix(upper, ncol = 1L)) } tsp(lower) <- tsp(upper) <- tsp(fcast) } else { level <- NULL lower <- NULL upper <- NULL } out$mean <- fcast out$level <- level out$lower <- lower out$upper <- upper return(structure(out, class = "forecast")) } #' @rdname fitted.Arima #' @export fitted.modelAR <- function(object, h=1, ...) { if (h == 1) { return(object$fitted) } else { return(hfitted(object = object, h = h, FUN = "modelAR", ...)) } } #' @export print.modelAR <- function(x, digits = max(3, getOption("digits") - 3), ...) { cat("Series:", x$series, "\n") cat("Model: ", x$method, "\n") cat("Call: ") print(x$call) print(x$model) cat( "\nsigma^2 estimated as ", format(mean(residuals(x) ^ 2, na.rm = TRUE), digits = digits), "\n", sep = "" ) invisible(x) } #' @rdname is.ets #' @export is.modelAR <- function(x) { inherits(x, "modelAR") } forecast/R/ets.R0000644000176200001440000011767314323125536013222 0ustar liggesusers#' Exponential smoothing state space model #' #' Returns ets model applied to \code{y}. #' #' Based on the classification of methods as described in Hyndman et al (2008). #' #' The methodology is fully automatic. The only required argument for ets is #' the time series. The model is chosen automatically if not specified. This #' methodology performed extremely well on the M3-competition data. (See #' Hyndman, et al, 2002, below.) #' #' @aliases print.ets summary.ets as.character.ets coef.ets tsdiag.ets #' #' @param y a numeric vector or time series of class \code{ts} #' @param model Usually a three-character string identifying method using the #' framework terminology of Hyndman et al. (2002) and Hyndman et al. (2008). #' The first letter denotes the error type ("A", "M" or "Z"); the second letter #' denotes the trend type ("N","A","M" or "Z"); and the third letter denotes #' the season type ("N","A","M" or "Z"). In all cases, "N"=none, "A"=additive, #' "M"=multiplicative and "Z"=automatically selected. So, for example, "ANN" is #' simple exponential smoothing with additive errors, "MAM" is multiplicative #' Holt-Winters' method with multiplicative errors, and so on. #' #' It is also possible for the model to be of class \code{"ets"}, and equal to #' the output from a previous call to \code{ets}. In this case, the same model #' is fitted to \code{y} without re-estimating any smoothing parameters. See #' also the \code{use.initial.values} argument. #' @param damped If TRUE, use a damped trend (either additive or #' multiplicative). If NULL, both damped and non-damped trends will be tried #' and the best model (according to the information criterion \code{ic}) #' returned. #' @param alpha Value of alpha. If NULL, it is estimated. #' @param beta Value of beta. If NULL, it is estimated. #' @param gamma Value of gamma. If NULL, it is estimated. #' @param phi Value of phi. If NULL, it is estimated. #' @param additive.only If TRUE, will only consider additive models. Default is #' FALSE. #' @param lambda Box-Cox transformation parameter. If \code{lambda="auto"}, #' then a transformation is automatically selected using \code{BoxCox.lambda}. #' The transformation is ignored if NULL. Otherwise, #' data transformed before model is estimated. When \code{lambda} is specified, #' \code{additive.only} is set to \code{TRUE}. #' @param lower Lower bounds for the parameters (alpha, beta, gamma, phi). Ignored if \code{bounds=="admissible"}. #' @param upper Upper bounds for the parameters (alpha, beta, gamma, phi). Ignored if \code{bounds=="admissible"}. #' @param opt.crit Optimization criterion. One of "mse" (Mean Square Error), #' "amse" (Average MSE over first \code{nmse} forecast horizons), "sigma" #' (Standard deviation of residuals), "mae" (Mean of absolute residuals), or #' "lik" (Log-likelihood, the default). #' @param nmse Number of steps for average multistep MSE (1<=\code{nmse}<=30). #' @param bounds Type of parameter space to impose: \code{"usual" } indicates #' all parameters must lie between specified lower and upper bounds; #' \code{"admissible"} indicates parameters must lie in the admissible space; #' \code{"both"} (default) takes the intersection of these regions. #' @param ic Information criterion to be used in model selection. #' @param restrict If \code{TRUE} (default), the models with infinite variance #' will not be allowed. #' @param allow.multiplicative.trend If \code{TRUE}, models with multiplicative #' trend are allowed when searching for a model. Otherwise, the model space #' excludes them. This argument is ignored if a multiplicative trend model is #' explicitly requested (e.g., using \code{model="MMN"}). #' @param use.initial.values If \code{TRUE} and \code{model} is of class #' \code{"ets"}, then the initial values in the model are also not #' re-estimated. #' @param na.action A function which indicates what should happen when the data #' contains NA values. By default, the largest contiguous portion of the #' time-series will be used. #' @param ... Other undocumented arguments. #' @inheritParams forecast.ts #' #' @return An object of class "\code{ets}". #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{ets} and associated #' functions. #' @author Rob J Hyndman #' @seealso \code{\link[stats]{HoltWinters}}, \code{\link{rwf}}, #' \code{\link{Arima}}. #' @references Hyndman, R.J., Koehler, A.B., Snyder, R.D., and Grose, S. (2002) #' "A state space framework for automatic forecasting using exponential #' smoothing methods", \emph{International J. Forecasting}, \bold{18}(3), #' 439--454. #' #' Hyndman, R.J., Akram, Md., and Archibald, B. (2008) "The admissible #' parameter space for exponential smoothing models". \emph{Annals of #' Statistical Mathematics}, \bold{60}(2), 407--426. #' #' Hyndman, R.J., Koehler, A.B., Ord, J.K., and Snyder, R.D. (2008) #' \emph{Forecasting with exponential smoothing: the state space approach}, #' Springer-Verlag. \url{http://www.exponentialsmoothing.net}. #' @keywords ts #' @examples #' fit <- ets(USAccDeaths) #' plot(forecast(fit)) #' #' @export ets <- function(y, model="ZZZ", damped=NULL, alpha=NULL, beta=NULL, gamma=NULL, phi=NULL, additive.only=FALSE, lambda=NULL, biasadj=FALSE, lower=c(rep(0.0001, 3), 0.8), upper=c(rep(0.9999, 3), 0.98), opt.crit=c("lik", "amse", "mse", "sigma", "mae"), nmse=3, bounds=c("both", "usual", "admissible"), ic=c("aicc", "aic", "bic"), restrict=TRUE, allow.multiplicative.trend=FALSE, use.initial.values=FALSE, na.action = c("na.contiguous", "na.interp", "na.fail"), ...) { # dataname <- substitute(y) opt.crit <- match.arg(opt.crit) bounds <- match.arg(bounds) ic <- match.arg(ic) if(!is.function(na.action)){ na.fn_name <- match.arg(na.action) na.action <- get(na.fn_name) } seriesname <- deparse(substitute(y)) if (any(class(y) %in% c("data.frame", "list", "matrix", "mts"))) { stop("y should be a univariate time series") } y <- as.ts(y) # Check if data is constant if (missing(model) && is.constant(y)) { return(ses(y, alpha = 0.99999, initial = "simple")$model) } # Remove missing values near ends ny <- length(y) y <- na.action(y) if (ny != length(y) && na.fn_name == "na.contiguous") { warning("Missing values encountered. Using longest contiguous portion of time series") ny <- length(y) } orig.y <- y if (identical(class(model), "ets") && is.null(lambda)) { lambda <- model$lambda } if (!is.null(lambda)) { y <- BoxCox(y, lambda) lambda <- attr(y, "lambda") additive.only <- TRUE } if (nmse < 1 || nmse > 30) { stop("nmse out of range") } m <- frequency(y) if (any(upper < lower)) { stop("Lower limits must be less than upper limits") } # If model is an ets object, re-fit model to new data if ("ets" %in% class(model)) { # Prevent alpha being zero (to avoid divide by zero in the C code) alpha <- max(model$par["alpha"], 1e-10) beta <- model$par["beta"] if (is.na(beta)) { beta <- NULL } gamma <- model$par["gamma"] if (is.na(gamma)) { gamma <- NULL } phi <- model$par["phi"] if (is.na(phi)) { phi <- NULL } modelcomponents <- paste(model$components[1], model$components[2], model$components[3], sep = "") damped <- (model$components[4] == "TRUE") if (use.initial.values) { errortype <- substr(modelcomponents, 1, 1) trendtype <- substr(modelcomponents, 2, 2) seasontype <- substr(modelcomponents, 3, 3) # Recompute errors from pegelsresid.C e <- pegelsresid.C(y, m, model$initstate, errortype, trendtype, seasontype, damped, alpha, beta, gamma, phi, nmse) # Compute error measures np <- length(model$par) + 1 model$loglik <- -0.5 * e$lik model$aic <- e$lik + 2 * np model$bic <- e$lik + log(ny) * np model$aicc <- model$aic + 2 * np * (np + 1) / (ny - np - 1) model$mse <- e$amse[1] model$amse <- mean(e$amse) # Compute states, fitted values and residuals tsp.y <- tsp(y) model$states <- ts(e$states, frequency = tsp.y[3], start = tsp.y[1] - 1 / tsp.y[3]) colnames(model$states)[1] <- "l" if (trendtype != "N") { colnames(model$states)[2] <- "b" } if (seasontype != "N") { colnames(model$states)[(2 + (trendtype != "N")):ncol(model$states)] <- paste("s", 1:m, sep = "") } if (errortype == "A") { model$fitted <- ts(y - e$e, frequency = tsp.y[3], start = tsp.y[1]) } else { model$fitted <- ts(y / (1 + e$e), frequency = tsp.y[3], start = tsp.y[1]) } model$residuals <- ts(e$e, frequency = tsp.y[3], start = tsp.y[1]) model$sigma2 <- sum(model$residuals ^ 2, na.rm = TRUE) / (ny - np) model$x <- orig.y model$series <- seriesname if (!is.null(lambda)) { model$fitted <- InvBoxCox(model$fitted, lambda, biasadj, var(model$residuals)) attr(lambda, "biasadj") <- biasadj } model$lambda <- lambda # Return model object return(model) } else { model <- modelcomponents if (missing(use.initial.values)) { message("Model is being refit with current smoothing parameters but initial states are being re-estimated.\nSet 'use.initial.values=TRUE' if you want to re-use existing initial values.") } } } errortype <- substr(model, 1, 1) trendtype <- substr(model, 2, 2) seasontype <- substr(model, 3, 3) if (!is.element(errortype, c("M", "A", "Z"))) { stop("Invalid error type") } if (!is.element(trendtype, c("N", "A", "M", "Z"))) { stop("Invalid trend type") } if (!is.element(seasontype, c("N", "A", "M", "Z"))) { stop("Invalid season type") } if (m < 1 || length(y) <= m) { # warning("I can't handle data with frequency less than 1. Seasonality will be ignored.") seasontype <- "N" } if (m == 1) { if (seasontype == "A" || seasontype == "M") { stop("Nonseasonal data") } else { substr(model, 3, 3) <- seasontype <- "N" } } if (m > 24) { if (is.element(seasontype, c("A", "M"))) { stop("Frequency too high") } else if (seasontype == "Z") { warning("I can't handle data with frequency greater than 24. Seasonality will be ignored. Try stlf() if you need seasonal forecasts.") substr(model, 3, 3) <- seasontype <- "N" # m <- 1 } } # Check inputs if (restrict) { if ((errortype == "A" && (trendtype == "M" || seasontype == "M")) | (errortype == "M" && trendtype == "M" && seasontype == "A") || (additive.only && (errortype == "M" || trendtype == "M" || seasontype == "M"))) { stop("Forbidden model combination") } } data.positive <- (min(y) > 0) if (!data.positive && errortype == "M") { stop("Inappropriate model for data with negative or zero values") } if (!is.null(damped)) { if (damped && trendtype == "N") { stop("Forbidden model combination") } } n <- length(y) # Check we have enough data to fit a model npars <- 2L # alpha + l0 if (trendtype == "A" || trendtype == "M") { npars <- npars + 2L } # beta + b0 if (seasontype == "A" || seasontype == "M") { npars <- npars + m } # gamma + s if (!is.null(damped)) { npars <- npars + as.numeric(damped) } # Produce something non-optimized for tiny data sets if (n <= npars + 4L) { if (!is.null(damped)) { if (damped) { warning("Not enough data to use damping") } } if (seasontype == "A" || seasontype == "M") { fit <- try(HoltWintersZZ( orig.y, alpha = alpha, beta = beta, gamma = gamma, phi = phi, exponential = (trendtype == "M"), seasonal = ifelse(seasontype != "A", "multiplicative", "additive"), lambda = lambda, biasadj = biasadj, warnings = FALSE ), silent = TRUE) if (!("try-error" %in% class(fit))) { fit$call <- match.call() fit$method <- as.character(fit) fit$series <- deparse(substitute(y)) return(fit) } else { warning("Seasonal component could not be estimated") } } if (trendtype == "A" || trendtype == "M") { fit <- try(HoltWintersZZ( orig.y, alpha = alpha, beta = beta, gamma = FALSE, phi = phi, exponential = (trendtype == "M"), lambda = lambda, biasadj = biasadj, warnings = FALSE ), silent = TRUE) if (!("try-error" %in% class(fit))) { fit$call <- match.call() fit$method <- as.character(fit) fit$series <- deparse(substitute(y)) return(fit) } else { warning("Trend component could not be estimated") } } if (trendtype == "N" && seasontype == "N") { fit <- try(HoltWintersZZ( orig.y, alpha = alpha, beta = FALSE, gamma = FALSE, lambda = lambda, biasadj = biasadj, warnings = FALSE ), silent = TRUE) if (!("try-error" %in% class(fit))) { fit$call <- match.call() fit$method <- as.character(fit) fit$series <- deparse(substitute(y)) return(fit) } } # Try holt and ses and return best fit1 <- try(HoltWintersZZ( orig.y, alpha = alpha, beta = beta, gamma = FALSE, phi = phi, exponential = (trendtype == "M"), lambda = lambda, biasadj = biasadj, warnings = FALSE ), silent = TRUE) fit2 <- try(HoltWintersZZ( orig.y, alpha = alpha, beta = FALSE, gamma = FALSE, phi = phi, exponential = (trendtype == "M"), lambda = lambda, biasadj = biasadj, warnings = FALSE ), silent = TRUE) if ("try-error" %in% class(fit1)) { fit <- fit2 } else if (fit1$sigma2 < fit2$sigma2) { fit <- fit1 } else { fit <- fit2 } if("try-error" %in% class(fit)) stop("Unable to estimate a model.") fit$call <- match.call() fit$method <- as.character(fit) fit$series <- deparse(substitute(y)) return(fit) } # Fit model (assuming only one nonseasonal model) if (errortype == "Z") { errortype <- c("A", "M") } if (trendtype == "Z") { if (allow.multiplicative.trend) { trendtype <- c("N", "A", "M") } else { trendtype <- c("N", "A") } } if (seasontype == "Z") { seasontype <- c("N", "A", "M") } if (is.null(damped)) { damped <- c(TRUE, FALSE) } best.ic <- Inf for (i in 1:length(errortype)) { for (j in 1:length(trendtype)) { for (k in 1:length(seasontype)) { for (l in 1:length(damped)) { if (trendtype[j] == "N" && damped[l]) { next } if (restrict) { if (errortype[i] == "A" && (trendtype[j] == "M" || seasontype[k] == "M")) { next } if (errortype[i] == "M" && trendtype[j] == "M" && seasontype[k] == "A") { next } if (additive.only && (errortype[i] == "M" || trendtype[j] == "M" || seasontype[k] == "M")) { next } } if (!data.positive && errortype[i] == "M") { next } fit <- try(etsmodel( y, errortype[i], trendtype[j], seasontype[k], damped[l], alpha, beta, gamma, phi, lower = lower, upper = upper, opt.crit = opt.crit, nmse = nmse, bounds = bounds, ... ), silent=TRUE) if(is.element("try-error", class(fit))) fit.ic <- Inf else fit.ic <- switch(ic, aic = fit$aic, bic = fit$bic, aicc = fit$aicc) if (!is.na(fit.ic)) { if (fit.ic < best.ic) { model <- fit best.ic <- fit.ic best.e <- errortype[i] best.t <- trendtype[j] best.s <- seasontype[k] best.d <- damped[l] } } } } } } if (best.ic == Inf) { stop("No model able to be fitted") } model$m <- m model$method <- paste("ETS(", best.e, ",", best.t, ifelse(best.d, "d", ""), ",", best.s, ")", sep = "") model$series <- seriesname model$components <- c(best.e, best.t, best.s, best.d) model$call <- match.call() model$initstate <- model$states[1, ] np <- length(model$par) model$sigma2 <- sum(model$residuals^2, na.rm = TRUE) / (ny - np) model$x <- orig.y if (!is.null(lambda)) { model$fitted <- InvBoxCox(model$fitted, lambda, biasadj, model$sigma2) attr(lambda, "biasadj") <- biasadj } model$lambda <- lambda # model$call$data <- dataname return(structure(model, class = "ets")) } #' @export as.character.ets <- function(x, ...) { paste( "ETS(", x$components[1], ",", x$components[2], ifelse(x$components[4], "d", ""), ",", x$components[3], ")", sep = "" ) } # myRequire <- function(libName) { # req.suc <- require(libName, quietly=TRUE, character.only=TRUE) # if(!req.suc) stop("The ",libName," package is not available.") # req.suc # } # getNewBounds <- function(par, lower, upper, nstate) { # myLower <- NULL # myUpper <- NULL # if("alpha" %in% names(par)) { # myLower <- c(myLower, lower[1]) # myUpper <- c(myUpper, upper[1]) # } # if("beta" %in% names(par)) { # myLower <- c(myLower, lower[2]) # myUpper <- c(myUpper, upper[2]) # } # if("gamma" %in% names(par)) { # myLower <- c(myLower, lower[3]) # myUpper <- c(myUpper, upper[3]) # } # if("phi" %in% names(par)) { # myLower <- c(myLower, lower[4]) # myUpper <- c(myUpper, upper[4]) # } # myLower <- c(myLower,rep(-1e8,nstate)) # myUpper <- c(myUpper,rep(1e8,nstate)) # list(lower=myLower, upper=myUpper) # } etsmodel <- function(y, errortype, trendtype, seasontype, damped, alpha=NULL, beta=NULL, gamma=NULL, phi=NULL, lower, upper, opt.crit, nmse, bounds, maxit=2000, control=NULL, seed=NULL, trace=FALSE) { tsp.y <- tsp(y) if (is.null(tsp.y)) { tsp.y <- c(1, length(y), 1) } if (seasontype != "N") { m <- tsp.y[3] } else { m <- 1 } # Modify limits if alpha, beta or gamma have been specified. if (!is.null(alpha)) { upper[2] <- min(alpha, upper[2]) upper[3] <- min(1 - alpha, upper[3]) } if (!is.null(beta)) { lower[1] <- max(beta, lower[1]) } if (!is.null(gamma)) { upper[1] <- min(1 - gamma, upper[1]) } # Initialize smoothing parameters par <- initparam(alpha, beta, gamma, phi, trendtype, seasontype, damped, lower, upper, m, bounds) names(alpha) <- names(beta) <- names(gamma) <- names(phi) <- NULL par.noopt <- c(alpha = alpha, beta = beta, gamma = gamma, phi = phi) if (!is.null(par.noopt)) { par.noopt <- c(na.omit(par.noopt)) } if (!is.na(par["alpha"])) { alpha <- par["alpha"] } if (!is.na(par["beta"])) { beta <- par["beta"] } if (!is.na(par["gamma"])) { gamma <- par["gamma"] } if (!is.na(par["phi"])) { phi <- par["phi"] } # if(errortype=="M" | trendtype=="M" | seasontype=="M") # bounds="usual" if (!check.param(alpha, beta, gamma, phi, lower, upper, bounds, m)) { print(paste("Model: ETS(", errortype, ",", trendtype, ifelse(damped, "d", ""), ",", seasontype, ")", sep = "")) stop("Parameters out of range") } # Initialize state init.state <- initstate(y, trendtype, seasontype) nstate <- length(init.state) par <- c(par, init.state) lower <- c(lower, rep(-Inf, nstate)) upper <- c(upper, rep(Inf, nstate)) np <- length(par) if (np >= length(y) - 1) { # Not enough data to continue return(list(aic = Inf, bic = Inf, aicc = Inf, mse = Inf, amse = Inf, fit = NULL, par = par, states = init.state)) } # ------------------------------------------------- # if(is.null(seed)) seed <- 1000*runif(1) # if(solver=="malschains" || solver=="malschains_c") { # malschains <- NULL # if(!myRequire("Rmalschains")) # stop("malschains optimizer unavailable") # func <- NULL # #env <- NULL # if(solver=="malschains") { # func <- function(myPar) { # names(myPar) <- names(par) # res <- lik(myPar,y=y,nstate=nstate, errortype=errortype, trendtype=trendtype, # seasontype=seasontype, damped=damped, par.noopt=par.noopt, lowerb=lower, upperb=upper, # opt.crit=opt.crit, nmse=nmse, bounds=bounds, m=m,pnames=names(par),pnames2=names(par.noopt)) # res # } # env <- new.env() # } else { # env <- etsTargetFunctionInit(par=par, y=y, nstate=nstate, errortype=errortype, trendtype=trendtype, # seasontype=seasontype, damped=damped, par.noopt=par.noopt, lowerb=lower, upperb=upper, # opt.crit=opt.crit, nmse=nmse, bounds=bounds, m=m,pnames=names(par),pnames2=names(par.noopt)) # func <- .Call("etsGetTargetFunctionRmalschainsPtr", PACKAGE="forecast") # } # myBounds <- getNewBounds(par, lower, upper, nstate) # if(is.null(control)) { # control <- Rmalschains::malschains.control(ls="simplex", lsOnly=TRUE) # } # control$optimum <- if(opt.crit=="lik") -1e12 else 0 # fredTmp <- Rmalschains::malschains(func, env=env, lower=myBounds$lower, upper=myBounds$upper, # maxEvals=maxit, seed=seed, initialpop=par, control=control) # fred <- NULL # fred$par <- fredTmp$sol # fit.par <- fred$par # names(fit.par) <- names(par) # } else if (solver=="Rdonlp2") { # # donlp2 <- NULL # myRequire("Rdonlp2") # # env <- etsTargetFunctionInit(par=par, y=y, nstate=nstate, errortype=errortype, trendtype=trendtype, # seasontype=seasontype, damped=damped, par.noopt=par.noopt, lowerb=lower, upperb=upper, # opt.crit=opt.crit, nmse=nmse, bounds=bounds, m=m,pnames=names(par),pnames2=names(par.noopt)) # # func <- .Call("etsGetTargetFunctionRdonlp2Ptr", PACKAGE="forecast") # # myBounds <- getNewBounds(par, lower, upper, nstate) # # fred <- donlp2(par, func, env=env, par.lower=myBounds$lower, par.upper=myBounds$upper)#, nlin.lower=c(-1), nlin.upper=c(1)) #nlin.lower=c(0,-Inf, -Inf, -Inf), nlin.upper=c(0,0,0,0)) # # fit.par <- fred$par # # names(fit.par) <- names(par) # } else if(solver=="optim_c"){ env <- etsTargetFunctionInit( par = par, y = y, nstate = nstate, errortype = errortype, trendtype = trendtype, seasontype = seasontype, damped = damped, par.noopt = par.noopt, lowerb = lower, upperb = upper, opt.crit = opt.crit, nmse = as.integer(nmse), bounds = bounds, m = m, pnames = names(par), pnames2 = names(par.noopt) ) fred <- .Call( "etsNelderMead", par, env, -Inf, sqrt(.Machine$double.eps), 1.0, 0.5, 2.0, trace, maxit, PACKAGE = "forecast" ) fit.par <- fred$par names(fit.par) <- names(par) # } else { #if(solver=="optim") # # Optimize parameters and state # if(length(par)==1) # method <- "Brent" # else # method <- "Nelder-Mead" # fred <- optim(par,lik,method=method,y=y,nstate=nstate, errortype=errortype, trendtype=trendtype, # seasontype=seasontype, damped=damped, par.noopt=par.noopt, lowerb=lower, upperb=upper, # opt.crit=opt.crit, nmse=nmse, bounds=bounds, m=m,pnames=names(par),pnames2=names(par.noopt), # control=list(maxit=maxit)) # fit.par <- fred$par # names(fit.par) <- names(par) # } # ------------------------------------------------- init.state <- fit.par[(np - nstate + 1):np] # Add extra state if (seasontype != "N") { init.state <- c(init.state, m * (seasontype == "M") - sum(init.state[(2 + (trendtype != "N")):nstate])) } if (!is.na(fit.par["alpha"])) { alpha <- fit.par["alpha"] } if (!is.na(fit.par["beta"])) { beta <- fit.par["beta"] } if (!is.na(fit.par["gamma"])) { gamma <- fit.par["gamma"] } if (!is.na(fit.par["phi"])) { phi <- fit.par["phi"] } e <- pegelsresid.C(y, m, init.state, errortype, trendtype, seasontype, damped, alpha, beta, gamma, phi, nmse) np <- np + 1 ny <- length(y) aic <- e$lik + 2 * np bic <- e$lik + log(ny) * np aicc <- aic + 2 * np * (np + 1) / (ny - np - 1) mse <- e$amse[1] amse <- mean(e$amse) states <- ts(e$states, frequency = tsp.y[3], start = tsp.y[1] - 1 / tsp.y[3]) colnames(states)[1] <- "l" if (trendtype != "N") { colnames(states)[2] <- "b" } if (seasontype != "N") { colnames(states)[(2 + (trendtype != "N")):ncol(states)] <- paste("s", 1:m, sep = "") } tmp <- c("alpha", rep("beta", trendtype != "N"), rep("gamma", seasontype != "N"), rep("phi", damped)) fit.par <- c(fit.par, par.noopt) # fit.par <- fit.par[order(names(fit.par))] if (errortype == "A") { fits <- y - e$e } else { fits <- y / (1 + e$e) } return(list( loglik = -0.5 * e$lik, aic = aic, bic = bic, aicc = aicc, mse = mse, amse = amse, fit = fred, residuals = ts(e$e, frequency = tsp.y[3], start = tsp.y[1]), fitted = ts(fits, frequency = tsp.y[3], start = tsp.y[1]), states = states, par = fit.par )) } etsTargetFunctionInit <- function(par, y, nstate, errortype, trendtype, seasontype, damped, par.noopt, lowerb, upperb, opt.crit, nmse, bounds, m, pnames, pnames2) { names(par) <- pnames names(par.noopt) <- pnames2 alpha <- c(par["alpha"], par.noopt["alpha"])["alpha"] if (is.na(alpha)) { stop("alpha problem!") } if (trendtype != "N") { beta <- c(par["beta"], par.noopt["beta"])["beta"] if (is.na(beta)) { stop("beta Problem!") } } else { beta <- NULL } if (seasontype != "N") { gamma <- c(par["gamma"], par.noopt["gamma"])["gamma"] if (is.na(gamma)) { stop("gamma Problem!") } } else { m <- 1 gamma <- NULL } if (damped) { phi <- c(par["phi"], par.noopt["phi"])["phi"] if (is.na(phi)) { stop("phi Problem!") } } else { phi <- NULL } # determine which values to optimize and which ones are given by the user/not needed optAlpha <- !is.null(alpha) optBeta <- !is.null(beta) optGamma <- !is.null(gamma) optPhi <- !is.null(phi) givenAlpha <- FALSE givenBeta <- FALSE givenGamma <- FALSE givenPhi <- FALSE if (!is.null(par.noopt["alpha"])) { if (!is.na(par.noopt["alpha"])) { optAlpha <- FALSE givenAlpha <- TRUE } } if (!is.null(par.noopt["beta"])) { if (!is.na(par.noopt["beta"])) { optBeta <- FALSE givenBeta <- TRUE } } if (!is.null(par.noopt["gamma"])) { if (!is.na(par.noopt["gamma"])) { optGamma <- FALSE givenGamma <- TRUE } } if (!is.null(par.noopt["phi"])) { if (!is.na(par.noopt["phi"])) { optPhi <- FALSE givenPhi <- TRUE } } if (!damped) { phi <- 1 } if (trendtype == "N") { beta <- 0 } if (seasontype == "N") { gamma <- 0 } # cat("alpha: ", alpha) # cat(" beta: ", beta) # cat(" gamma: ", gamma) # cat(" phi: ", phi, "\n") # # cat("useAlpha: ", useAlpha) # cat(" useBeta: ", useBeta) # cat(" useGamma: ", useGamma) # cat(" usePhi: ", usePhi, "\n") env <- new.env() res <- .Call( "etsTargetFunctionInit", y = y, nstate = nstate, errortype = switch(errortype, "A" = 1, "M" = 2), trendtype = switch(trendtype, "N" = 0, "A" = 1, "M" = 2), seasontype = switch(seasontype, "N" = 0, "A" = 1, "M" = 2), damped = damped, lowerb = lowerb, upperb = upperb, opt.crit = opt.crit, nmse = as.integer(nmse), bounds = bounds, m = m, optAlpha, optBeta, optGamma, optPhi, givenAlpha, givenBeta, givenGamma, givenPhi, alpha, beta, gamma, phi, env, PACKAGE = "forecast" ) res } initparam <- function(alpha, beta, gamma, phi, trendtype, seasontype, damped, lower, upper, m, bounds) { if(bounds == "admissible") { lower[1L:3L] <- lower[1L:3L]*0 upper[1L:3L] <- upper[1L:3L]*0 + 1e-3 } else if (any(lower > upper)) { stop("Inconsistent parameter boundaries") } # Select alpha if (is.null(alpha)) { alpha <- lower[1] + 0.2 * (upper[1] - lower[1]) / m if (alpha > 1 || alpha < 0) { alpha <- lower[1] + 2e-3 } par <- c(alpha = alpha) } else { par <- numeric(0) } # Select beta if (trendtype != "N" && is.null(beta)) { # Ensure beta < alpha upper[2] <- min(upper[2], alpha) beta <- lower[2] + 0.1 * (upper[2] - lower[2]) if (beta < 0 || beta > alpha) { beta <- alpha - 1e-3 } par <- c(par, beta = beta) } # Select gamma if (seasontype != "N" && is.null(gamma)) { # Ensure gamma < 1-alpha upper[3] <- min(upper[3], 1 - alpha) gamma <- lower[3] + 0.05 * (upper[3] - lower[3]) if (gamma < 0 || gamma > 1 - alpha) { gamma <- 1 - alpha - 1e-3 } par <- c(par, gamma = gamma) } # Select phi if (damped && is.null(phi)) { phi <- lower[4] + .99 * (upper[4] - lower[4]) if (phi < 0 || phi > 1) { phi <- upper[4] - 1e-3 } par <- c(par, phi = phi) } return(par) } check.param <- function(alpha, beta, gamma, phi, lower, upper, bounds, m) { if (bounds != "admissible") { if (!is.null(alpha)) { if (alpha < lower[1] || alpha > upper[1]) { return(0) } } if (!is.null(beta)) { if (beta < lower[2] || beta > alpha || beta > upper[2]) { return(0) } } if (!is.null(phi)) { if (phi < lower[4] || phi > upper[4]) { return(0) } } if (!is.null(gamma)) { if (gamma < lower[3] || gamma > 1 - alpha || gamma > upper[3]) { return(0) } } } if (bounds != "usual") { if (!admissible(alpha, beta, gamma, phi, m)) { return(0) } } return(1) } initstate <- function(y, trendtype, seasontype) { if (seasontype != "N") { # Do decomposition m <- frequency(y) n <- length(y) if (n < 4) { stop("You've got to be joking (not enough data).") } else if (n < 3 * m) # Fit simple Fourier model. { fouriery <- fourier(y, 1) fit <- tslm(y ~ trend + fouriery) if (seasontype == "A") { y.d <- list(seasonal = y - fit$coefficients[1] - fit$coefficients[2] * (1:n)) } else { # seasontype=="M". Biased method, but we only need a starting point y.d <- list(seasonal = y / (fit$coefficients[1] + fit$coefficients[2] * (1:n))) } } else { # n is large enough to do a decomposition y.d <- decompose(y, type = switch(seasontype, A = "additive", M = "multiplicative")) } init.seas <- rev(y.d$seasonal[2:m]) # initial seasonal component names(init.seas) <- paste("s", 0:(m - 2), sep = "") # Seasonally adjusted data if (seasontype == "A") { y.sa <- y - y.d$seasonal } else { init.seas <- pmax(init.seas, 1e-2) # We do not want negative seasonal indexes if (sum(init.seas) > m) { init.seas <- init.seas / sum(init.seas + 1e-2) } y.sa <- y / pmax(y.d$seasonal, 1e-2) } } else # non-seasonal model { m <- 1 init.seas <- NULL y.sa <- y } maxn <- min(max(10, 2 * m), length(y.sa)) if (trendtype == "N") { l0 <- mean(y.sa[1:maxn]) b0 <- NULL } else # Simple linear regression on seasonally adjusted data { fit <- lsfit(1:maxn, y.sa[1:maxn]) if (trendtype == "A") { l0 <- fit$coefficients[1] b0 <- fit$coefficients[2] # If error type is "M", then we don't want l0+b0=0. # So perturb just in case. if (abs(l0 + b0) < 1e-8) { l0 <- l0 * (1 + 1e-3) b0 <- b0 * (1 - 1e-3) } } else # if(trendtype=="M") { l0 <- fit$coefficients[1] + fit$coefficients[2] # First fitted value if (abs(l0) < 1e-8) { l0 <- 1e-7 } b0 <- (fit$coefficients[1] + 2 * fit$coefficients[2]) / l0 # Ratio of first two fitted values l0 <- l0 / b0 # First fitted value divided by b0 if (abs(b0) > 1e10) { # Avoid infinite slopes b0 <- sign(b0) * 1e10 } if (l0 < 1e-8 || b0 < 1e-8) # Simple linear approximation didn't work. { l0 <- max(y.sa[1], 1e-3) b0 <- max(y.sa[2] / y.sa[1], 1e-3) } } } names(l0) <- "l" if (!is.null(b0)) { names(b0) <- "b" } return(c(l0, b0, init.seas)) } lik <- function(par, y, nstate, errortype, trendtype, seasontype, damped, par.noopt, lowerb, upperb, opt.crit, nmse, bounds, m, pnames, pnames2) { # browser() # cat("par: ", par, "\n") names(par) <- pnames names(par.noopt) <- pnames2 alpha <- c(par["alpha"], par.noopt["alpha"])["alpha"] if (is.na(alpha)) { stop("alpha problem!") } if (trendtype != "N") { beta <- c(par["beta"], par.noopt["beta"])["beta"] if (is.na(beta)) { stop("beta Problem!") } } else { beta <- NULL } if (seasontype != "N") { gamma <- c(par["gamma"], par.noopt["gamma"])["gamma"] if (is.na(gamma)) { stop("gamma Problem!") } } else { m <- 1 gamma <- NULL } if (damped) { phi <- c(par["phi"], par.noopt["phi"])["phi"] if (is.na(phi)) { stop("phi Problem!") } } else { phi <- NULL } if (!check.param(alpha, beta, gamma, phi, lowerb, upperb, bounds, m)) { return(Inf) } np <- length(par) init.state <- par[(np - nstate + 1):np] # Add extra state if (seasontype != "N") { init.state <- c(init.state, m * (seasontype == "M") - sum(init.state[(2 + (trendtype != "N")):nstate])) } # Check states if (seasontype == "M") { seas.states <- init.state[-(1:(1 + (trendtype != "N")))] if (min(seas.states) < 0) { return(Inf) } } e <- pegelsresid.C(y, m, init.state, errortype, trendtype, seasontype, damped, alpha, beta, gamma, phi, nmse) if (is.na(e$lik)) { return(Inf) } if (e$lik < -1e10) { # Avoid perfect fits return(-1e10) } # cat("lik: ", e$lik, "\n") # points(alpha,e$lik,col=2) if (opt.crit == "lik") { return(e$lik) } else if (opt.crit == "mse") { return(e$amse[1]) } else if (opt.crit == "amse") { return(mean(e$amse)) } else if (opt.crit == "sigma") { return(mean(e$e ^ 2)) } else if (opt.crit == "mae") { return(mean(abs(e$e))) } } #' @export print.ets <- function(x, ...) { cat(paste(x$method, "\n\n")) if(!is.null(x$call)) { cat(paste("Call:\n", deparse(x$call), "\n\n")) } ncoef <- length(x$initstate) if (!is.null(x$lambda)) { cat(" Box-Cox transformation: lambda=", round(x$lambda, 4), "\n\n") } cat(" Smoothing parameters:\n") cat(paste(" alpha =", round(x$par["alpha"], 4), "\n")) if (x$components[2] != "N") { cat(paste(" beta =", round(x$par["beta"], 4), "\n")) } if (x$components[3] != "N") { cat(paste(" gamma =", round(x$par["gamma"], 4), "\n")) } if (x$components[4] != "FALSE") { cat(paste(" phi =", round(x$par["phi"], 4), "\n")) } cat("\n Initial states:\n") cat(paste(" l =", round(x$initstate[1], 4), "\n")) if (x$components[2] != "N") { cat(paste(" b =", round(x$initstate[2], 4), "\n")) } else { x$initstate <- c(x$initstate[1], NA, x$initstate[2:ncoef]) ncoef <- ncoef + 1 } if (x$components[3] != "N") { cat(" s = ") if (ncoef <= 8) { cat(round(x$initstate[3:ncoef], 4)) } else { cat(round(x$initstate[3:8], 4)) cat("\n ") cat(round(x$initstate[9:ncoef], 4)) } cat("\n") } cat("\n sigma: ") cat(round(sqrt(x$sigma2), 4)) if (!is.null(x$aic)) { stats <- c(x$aic, x$aicc, x$bic) names(stats) <- c("AIC", "AICc", "BIC") cat("\n\n") print(stats) } # cat("\n AIC: ") # cat(round(x$aic,4)) # cat("\n AICc: ") # cat(round(x$aicc,4)) # cat("\n BIC: ") # cat(round(x$bic,4)) } pegelsresid.C <- function(y, m, init.state, errortype, trendtype, seasontype, damped, alpha, beta, gamma, phi, nmse) { n <- length(y) p <- length(init.state) x <- numeric(p * (n + 1)) x[1:p] <- init.state e <- numeric(n) lik <- 0 if (!damped) { phi <- 1 } if (trendtype == "N") { beta <- 0 } if (seasontype == "N") { gamma <- 0 } amse <- numeric(nmse) Cout <- .C( "etscalc", as.double(y), as.integer(n), as.double(x), as.integer(m), as.integer(switch(errortype, "A" = 1, "M" = 2)), as.integer(switch(trendtype, "N" = 0, "A" = 1, "M" = 2)), as.integer(switch(seasontype, "N" = 0, "A" = 1, "M" = 2)), as.double(alpha), as.double(beta), as.double(gamma), as.double(phi), as.double(e), as.double(lik), as.double(amse), as.integer(nmse), PACKAGE = "forecast" ) if (!is.na(Cout[[13]])) { if (abs(Cout[[13]] + 99999) < 1e-7) { Cout[[13]] <- NA } } tsp.y <- tsp(y) e <- ts(Cout[[12]]) tsp(e) <- tsp.y return(list(lik = Cout[[13]], amse = Cout[[14]], e = e, states = matrix(Cout[[3]], nrow = n + 1, ncol = p, byrow = TRUE))) } admissible <- function(alpha, beta, gamma, phi, m) { if (is.null(phi)) { phi <- 1 } if (phi < 0 || phi > 1 + 1e-8) { return(0) } if (is.null(gamma)) { if (alpha < 1 - 1 / phi || alpha > 1 + 1 / phi) { return(0) } if (!is.null(beta)) { if (beta < alpha * (phi - 1) || beta > (1 + phi) * (2 - alpha)) { return(0) } } } else if (m > 1) # Seasonal model { if (is.null(beta)) { beta <- 0 } if (gamma < max(1 - 1 / phi - alpha, 0) || gamma > 1 + 1 / phi - alpha) { return(0) } if (alpha < 1 - 1 / phi - gamma * (1 - m + phi + phi * m) / (2 * phi * m)) { return(0) } if (beta < -(1 - phi) * (gamma / m + alpha)) { return(0) } # End of easy tests. Now use characteristic equation P <- c(phi * (1 - alpha - gamma), alpha + beta - alpha * phi + gamma - 1, rep(alpha + beta - alpha * phi, m - 2), (alpha + beta - phi), 1) roots <- polyroot(P) # cat("maxpolyroots: ", max(abs(roots)), "\n") if (max(abs(roots)) > 1 + 1e-10) { return(0) } } # Passed all tests return(1) } ### PLOT COMPONENTS #' Plot components from ETS model #' #' Produces a plot of the level, slope and seasonal components from an ETS #' model. #' #' \code{autoplot} will produce an equivalent plot as a ggplot object. #' #' @param x Object of class \dQuote{ets}. #' @param object Object of class \dQuote{ets}. Used for ggplot graphics (S3 #' method consistency). #' @param range.bars Logical indicating if each plot should have a bar at its #' right side representing relative size. If NULL, automatic selection takes #' place. #' @param ... Other plotting parameters to affect the plot. #' @return None. Function produces a plot #' @author Rob J Hyndman & Mitchell O'Hara-Wild #' @seealso \code{\link{ets}} #' @keywords hplot #' @examples #' #' fit <- ets(USAccDeaths) #' plot(fit) #' plot(fit,plot.type="single",ylab="",col=1:3) #' #' library(ggplot2) #' autoplot(fit) #' #' @export plot.ets <- function(x, ...) { if (!is.null(x$lambda)) { y <- BoxCox(x$x, x$lambda) } else { y <- x$x } if (x$components[3] == "N" && x$components[2] == "N") { plot( cbind(observed = y, level = x$states[, 1]), main = paste("Decomposition by", x$method, "method"), ... ) } else if (x$components[3] == "N") { plot( cbind(observed = y, level = x$states[, 1], slope = x$states[, "b"]), main = paste("Decomposition by", x$method, "method"), ... ) } else if (x$components[2] == "N") { plot( cbind(observed = y, level = x$states[, 1], season = x$states[, "s1"]), main = paste("Decomposition by", x$method, "method"), ... ) } else { plot( cbind( observed = y, level = x$states[, 1], slope = x$states[, "b"], season = x$states[, "s1"] ), main = paste("Decomposition by", x$method, "method"), ... ) } } #' @export summary.ets <- function(object, ...) { class(object) <- c("summary.ets", class(object)) object } #' @export print.summary.ets <- function(x, ...) { NextMethod() cat("\nTraining set error measures:\n") print(accuracy(x)) } #' @export coef.ets <- function(object, ...) { object$par } #' @rdname fitted.Arima #' @export fitted.ets <- function(object, h=1, ...) { if (h == 1) { return(object$fitted) } else { return(hfitted(object = object, h = h, FUN = "ets", ...)) } } #' @export logLik.ets <- function(object, ...) { structure(object$loglik, df = length(object$par) + 1, class = "logLik") } #' @export nobs.ets <- function(object, ...) { length(object$x) } #' Is an object a particular model type? #' #' Returns true if the model object is of a particular type #' #' @param x object to be tested #' @export is.ets <- function(x) { inherits(x, "ets") } forecast/R/spline.R0000644000176200001440000002010314323125536013676 0ustar liggesusers############################################### ##### Forecasting Using Smoothing Splines ##### ############################################### # Optimal smoothing paramter denoted by beta # lambda is Box-Cox parameter. ################# FUNCTIONS ################## ## Set up Sigma of order (n x n) make.Sigma <- function(n, n0=0) { nn <- n + n0 Sigma <- matrix(0, nrow = nn, ncol = nn) for (i in 1:nn) Sigma[i, i:nn] <- Sigma[i:nn, i] <- (i * i * (3 * (i:nn) - i)) / 6 return(Sigma / (n ^ 3)) } ## Compute spline matrices spline.matrices <- function(n, beta, cc=1e2, n0=0) { nn <- n + n0 Sigma <- make.Sigma(n, n0) s <- cbind(rep(1, nn), (1:nn) / n) Omega <- cc * s %*% t(s) + Sigma / beta + diag(nn) max.Omega <- max(Omega) inv.Omega <- solve(Omega / max.Omega, tol = 1e-10) / max.Omega P <- chol(inv.Omega) return(list(s = s, Sigma = Sigma, Omega = Omega, inv.Omega = inv.Omega, P = P)) } ## Compute smoothing splines ## Return -loglikelihood # beta multiplied by 1e6 to avoid numerical difficulties in optimization spline.loglik <- function(beta, y, cc=1e2) { n <- length(y) mat <- spline.matrices(n, beta / 1e6, cc = cc) y.star <- mat$P %*% matrix(y) return(-log(det(mat$P)) + 0.5 * n * log(sum(y.star ^ 2))) } # Spline forecasts #' Cubic Spline Forecast #' #' Returns local linear forecasts and prediction intervals using cubic #' smoothing splines. #' #' The cubic smoothing spline model is equivalent to an ARIMA(0,2,2) model but #' with a restricted parameter space. The advantage of the spline model over #' the full ARIMA model is that it provides a smooth historical trend as well #' as a linear forecast function. Hyndman, King, Pitrun, and Billah (2002) show #' that the forecast performance of the method is hardly affected by the #' restricted parameter space. #' #' @param y a numeric vector or time series of class \code{ts} #' @param h Number of periods for forecasting #' @param level Confidence level for prediction intervals. #' @param fan If TRUE, level is set to seq(51,99,by=3). This is suitable for #' fan plots. #' @param method Method for selecting the smoothing parameter. If #' \code{method="gcv"}, the generalized cross-validation method from #' \code{\link[stats]{smooth.spline}} is used. If \code{method="mle"}, the #' maximum likelihood method from Hyndman et al (2002) is used. #' @param x Deprecated. Included for backwards compatibility. #' @inheritParams forecast.ts #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{splinef}. #' #' An object of class \code{"forecast"} containing the following elements: #' \item{model}{A list containing information about the fitted model} #' \item{method}{The name of the forecasting method as a character string} #' \item{mean}{Point forecasts as a time series} \item{lower}{Lower limits for #' prediction intervals} \item{upper}{Upper limits for prediction intervals} #' \item{level}{The confidence values associated with the prediction intervals} #' \item{x}{The original time series (either \code{object} itself or the time #' series used to create the model stored as \code{object}).} #' \item{onestepf}{One-step forecasts from the fitted model.} #' \item{fitted}{Smooth estimates of the fitted trend using all data.} #' \item{residuals}{Residuals from the fitted model. That is x minus one-step #' forecasts.} #' @author Rob J Hyndman #' @seealso \code{\link[stats]{smooth.spline}}, \code{\link[stats]{arima}}, #' \code{\link{holt}}. #' @references Hyndman, King, Pitrun and Billah (2005) Local linear forecasts #' using cubic smoothing splines. \emph{Australian and New Zealand Journal of #' Statistics}, \bold{47}(1), 87-99. #' \url{https://robjhyndman.com/publications/splinefcast/}. #' @keywords ts #' @examples #' fcast <- splinef(uspop,h=5) #' plot(fcast) #' summary(fcast) #' #' @export splinef <- function(y, h=10, level=c(80, 95), fan=FALSE, lambda=NULL, biasadj=FALSE, method=c("gcv", "mle"), x=y) { method <- match.arg(method) if (!is.ts(x)) { x <- ts(x) } n <- length(x) freq <- frequency(x) if (!is.null(lambda)) { origx <- x x <- BoxCox(x, lambda) lambda <- attr(x, "lambda") } # Find optimal beta using likelihood approach in Hyndman et al paper. if (method == "mle") { if (n > 100) { # Use only last 100 observations to get beta xx <- x[(n - 99):n] } else { xx <- x } beta.est <- optimize(spline.loglik, interval = c(1e-6, 1e7), y = xx)$minimum / 1e6 # Compute spar which is equivalent to beta r <- 256 * smooth.spline(1:n, x, spar = 0)$lambda lss <- beta.est * n ^ 3 / (n - 1) ^ 3 spar <- (log(lss / r) / log(256) + 1) / 3 splinefit <- smooth.spline(1:n, x, spar = spar) sfits <- splinefit$y } else # Use GCV { splinefit <- smooth.spline(1:n, x, cv = FALSE, spar = NULL) sfits <- splinefit$y beta.est <- pmax(1e-7, splinefit$lambda * (n - 1) ^ 3 / n ^ 3) } # Compute matrices for optimal beta mat <- spline.matrices(n, beta.est) newmat <- spline.matrices(n, beta.est, n0 = h) # Get one-step predictors yfit <- e <- rep(NA, n) if (n > 1000) { warning("Series too long to compute training set fits and residuals") } else # This is probably grossly inefficient but I can't think of a better way right now { for (i in 1:(n - 1)) { U <- mat$Omega[1:i, i + 1] Oinv <- solve(mat$Omega[1:i, 1:i] / 1e6) / 1e6 yfit[i + 1] <- t(U) %*% Oinv %*% x[1:i] sd <- sqrt(mat$Omega[i + 1, i + 1] - t(U) %*% Oinv %*% U) e[i + 1] <- (x[i + 1] - yfit[i + 1]) / sd } } # Compute sigma^2 sigma2 <- mean(e ^ 2, na.rm = TRUE) # Compute mean and var of forecasts U <- newmat$Omega[1:n, n + (1:h)] Omega0 <- newmat$Omega[n + (1:h), n + (1:h)] Yhat <- t(U) %*% mat$inv.Omega %*% x sd <- sqrt(sigma2 * diag(Omega0 - t(U) %*% mat$inv.Omega %*% U)) # Compute prediction intervals. if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } nconf <- length(level) lower <- upper <- matrix(NA, nrow = h, ncol = nconf) for (i in 1:nconf) { conf.factor <- qnorm(0.5 + 0.005 * level[i]) upper[, i] <- Yhat + conf.factor * sd lower[, i] <- Yhat - conf.factor * sd } lower <- ts(lower, start = tsp(x)[2] + 1 / freq, frequency = freq) upper <- ts(upper, start = tsp(x)[2] + 1 / freq, frequency = freq) res <- ts(x - yfit, start = start(x), frequency = freq) if (!is.null(lambda)) { Yhat <- InvBoxCox(Yhat, lambda, biasadj, list(level = level, upper = upper, lower = lower)) upper <- InvBoxCox(upper, lambda) lower <- InvBoxCox(lower, lambda) yfit <- InvBoxCox(yfit, lambda) sfits <- InvBoxCox(sfits, lambda) x <- origx } return(structure( list( method = "Cubic Smoothing Spline", level = level, x = x, series = deparse(substitute(y)), mean = ts(Yhat, frequency = freq, start = tsp(x)[2] + 1 / freq), upper = ts(upper, start = tsp(x)[2] + 1 / freq, frequency = freq), lower = ts(lower, start = tsp(x)[2] + 1 / freq, frequency = freq), model = list(beta = beta.est * n ^ 3, call = match.call()), fitted = ts(sfits, start = start(x), frequency = freq), residuals = res, standardizedresiduals = ts(e, start = start(x), frequency = freq), onestepf = ts(yfit, start = start(x), frequency = freq) ), lambda = lambda, class = c("splineforecast", "forecast") )) } #' @rdname plot.forecast #' #' @examples #' fcast <- splinef(airmiles,h=5) #' plot(fcast) #' autoplot(fcast) #' #' @export plot.splineforecast <- function(x, fitcol=2, type="o", pch=19, ...) { plot.forecast(x, type = type, pch = pch, ...) lines(x$fitted, col = fitcol) } #' @rdname is.forecast #' @export is.splineforecast <- function(x) { inherits(x, "splineforecast") } forecast/R/whichmodels.R0000644000176200001440000000103714323125536014717 0ustar liggesusersWhichModels <- function(max.p, max.q, max.P, max.Q, maxK) { total.models <- (max.p + 1) * (max.q + 1) * (max.P + 1) * (max.Q + 1) * length(0:maxK) x <- numeric(total.models) i <- 1 for (x1 in 0:max.p) for (x2 in 0:max.q) { for (x3 in 0:max.P) for (x4 in 0:max.Q) { for (K in 0:maxK) { x[i] <- paste(x1, "f", x2, "f", x3, "f", x4, "f", K, sep = "") i <- i + 1 } } } return(x) } UndoWhichModels <- function(n) { as.numeric(unlist(strsplit(n, split = "f"))) } forecast/R/forecast2.R0000644000176200001440000005077714332530471014316 0ustar liggesusers# Mean forecast #' Mean Forecast #' #' Returns forecasts and prediction intervals for an iid model applied to y. #' #' The iid model is \deqn{Y_t=\mu + Z_t}{Y[t]=mu + Z[t]} where \eqn{Z_t}{Z[t]} #' is a normal iid error. Forecasts are given by \deqn{Y_n(h)=\mu}{Y[n+h]=mu} #' where \eqn{\mu}{mu} is estimated by the sample mean. #' #' @param y a numeric vector or time series of class \code{ts} #' @param h Number of periods for forecasting #' @param level Confidence levels for prediction intervals. #' @param fan If TRUE, level is set to seq(51,99,by=3). This is suitable for #' fan plots. #' @param bootstrap If TRUE, use a bootstrap method to compute prediction intervals. #' Otherwise, assume a normal distribution. #' @param npaths Number of bootstrapped sample paths to use if \code{bootstrap==TRUE}. #' @param x Deprecated. Included for backwards compatibility. #' @inheritParams forecast.ts #' #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{meanf}. #' #' An object of class \code{"forecast"} is a list containing at least the #' following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The original time series #' (either \code{object} itself or the time series used to create the model #' stored as \code{object}).} \item{residuals}{Residuals from the fitted model. #' That is x minus fitted values.} \item{fitted}{Fitted values (one-step #' forecasts)} #' @author Rob J Hyndman #' @seealso \code{\link{rwf}} #' @keywords ts #' @examples #' nile.fcast <- meanf(Nile, h=10) #' plot(nile.fcast) #' #' @export meanf <- function(y, h=10, level=c(80, 95), fan=FALSE, lambda=NULL, biasadj=FALSE, bootstrap=FALSE, npaths=5000, x=y) { n <- length(x) if (!is.null(lambda)) { origx <- x x <- BoxCox(x, lambda) lambda <- attr(x, "lambda") } meanx <- mean(x, na.rm = TRUE) fits <- rep(meanx, length(x)) res <- x - fits f <- rep(meanx, h) if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } nconf <- length(level) s <- sd(x, na.rm = TRUE) if (bootstrap) { e <- na.omit(res) - mean(res, na.rm = TRUE) sim <- matrix(sample(e, size = npaths * h, replace = TRUE), ncol = npaths, nrow = h) sim <- sweep(sim, 1, f, "+") lower <- t(apply(sim, 1, quantile, prob = .5 - level / 200)) upper <- t(apply(sim, 1, quantile, prob = .5 + level / 200)) } else { lower <- upper <- matrix(NA, nrow = h, ncol = nconf) for (i in 1:nconf) { if (n > 1) { tfrac <- qt(0.5 - level[i] / 200, n - 1) } else { tfrac <- -Inf } w <- -tfrac * s * sqrt(1 + 1 / n) lower[, i] <- f - w upper[, i] <- f + w } } colnames(lower) <- colnames(upper) <- paste(level, "%", sep = "") if (is.ts(x)) { fits <- copy_msts(x, fits) res <- copy_msts(x, res) f <- future_msts(x, f) lower <- future_msts(x, lower) upper <- future_msts(x, upper) } if (!is.null(lambda)) { fits <- InvBoxCox(fits, lambda) x <- origx f <- InvBoxCox(f, lambda, biasadj, list(level = level, upper = upper, lower = lower)) lower <- InvBoxCox(lower, lambda) upper <- InvBoxCox(upper, lambda) } out <- list( method = "Mean", level = level, x = x, series = deparse(substitute(y)), mean = f, lower = lower, upper = upper, model = structure(list(mu = f[1], mu.se = s / sqrt(length(x)), sd = s, bootstrap = bootstrap), class = "meanf"), lambda = lambda, fitted = fits, residuals = res ) out$model$call <- match.call() return(structure(out, class = "forecast")) } #' Box Cox Transformation #' #' BoxCox() returns a transformation of the input variable using a Box-Cox #' transformation. InvBoxCox() reverses the transformation. #' #' The Box-Cox transformation (as given by Bickel & Doksum 1981) is given by \deqn{f_\lambda(x) =(sign(x)|x|^\lambda - #' 1)/\lambda}{f(x;lambda)=(sign(x)|x|^lambda - 1)/lambda} if \eqn{\lambda\ne0}{lambda #' is not equal to 0}. For \eqn{\lambda=0}{lambda=0}, #' \deqn{f_0(x)=\log(x)}{f(x;0)=log(x)}. #' #' @param x a numeric vector or time series of class \code{ts}. #' @param lambda transformation parameter. If \code{lambda = "auto"}, then #' the transformation parameter lambda is chosen using BoxCox.lambda (with a lower bound of -0.9) #' @param biasadj Use adjusted back-transformed mean for Box-Cox #' transformations. If transformed data is used to produce forecasts and fitted values, #' a regular back transformation will result in median forecasts. If biasadj is TRUE, #' an adjustment will be made to produce mean forecasts and fitted values. #' @param fvar Optional parameter required if biasadj=TRUE. Can either be the #' forecast variance, or a list containing the interval \code{level}, and the #' corresponding \code{upper} and \code{lower} intervals. #' @return a numeric vector of the same length as x. #' @author Rob J Hyndman & Mitchell O'Hara-Wild #' @seealso \code{\link{BoxCox.lambda}} #' @references Box, G. E. P. and Cox, D. R. (1964) An analysis of #' transformations. \emph{JRSS B} \bold{26} 211--246. #' Bickel, P. J. and Doksum K. A. (1981) An Analysis of Transformations Revisited. \emph{JASA} \bold{76} 296-311. #' @keywords ts #' @examples #' #' lambda <- BoxCox.lambda(lynx) #' lynx.fit <- ar(BoxCox(lynx,lambda)) #' plot(forecast(lynx.fit,h=20,lambda=lambda)) #' #' @export BoxCox <- function(x, lambda) { if (lambda == "auto") { lambda <- BoxCox.lambda(x, lower = -0.9) } if (lambda < 0) { x[x < 0] <- NA } if (lambda == 0) { out <- log(x) } else { out <- (sign(x) * abs(x) ^ lambda - 1) / lambda } if (!is.null(colnames(x))) { colnames(out) <- colnames(x) } attr(out, "lambda") <- lambda return(out) } #' @rdname BoxCox #' @export InvBoxCox <- function(x, lambda, biasadj=FALSE, fvar=NULL) { if (lambda < 0) { x[x > -1 / lambda] <- NA } if (lambda == 0) { out <- exp(x) } else { xx <- x * lambda + 1 out <- sign(xx) * abs(xx) ^ (1 / lambda) } if (!is.null(colnames(x))) { colnames(out) <- colnames(x) } if (is.null(biasadj)) { biasadj <- attr(lambda, "biasadj") } if (!is.logical(biasadj)) { warning("biasadj information not found, defaulting to FALSE.") biasadj <- FALSE } if (biasadj) { if (is.null(fvar)) { stop("fvar must be provided when biasadj=TRUE") } if (is.list(fvar)) { # Create fvar from forecast interval level <- max(fvar$level) if (NCOL(fvar$upper) > 1 && NCOL(fvar$lower)) { i <- match(level, fvar$level) fvar$upper <- fvar$upper[, i] fvar$lower <- fvar$lower[, i] } if (level > 1) { level <- level / 100 } level <- mean(c(level, 1)) # Note: Use BoxCox transformed upper and lower values fvar <- as.numeric((fvar$upper - fvar$lower) / stats::qnorm(level) / 2) ^ 2 } if (NCOL(fvar) > 1) { fvar <- diag(fvar) } out <- out * (1 + 0.5 * as.numeric(fvar) * (1 - lambda) / (out) ^ (2 * lambda)) } return(out) } # Deprecated InvBoxCoxf <- function(x=NULL, fvar=NULL, lambda=NULL) { message("Deprecated, use InvBoxCox instead") if (is.null(lambda)) { stop("Must specify lambda using lambda=numeric(1)") } if (is.null(fvar)) { level <- max(x$level) if (NCOL(x$upper) > 1 && NCOL(x$lower)) { i <- match(level, x$level) x$upper <- x$upper[, i] x$lower <- x$lower[, i] } if (level > 1) { level <- level / 100 } level <- mean(c(level, 1)) # Note: Use BoxCox transformed upper and lower values fvar <- ((x$upper - x$lower) / stats::qnorm(level) / 2) ^ 2 } else { x <- list(mean = x) } if ("matrix" %in% class(fvar)) { fvar <- diag(fvar) } return(x$mean * (1 + 0.5 * fvar * (1 - lambda) / (x$mean) ^ (2 * lambda))) } #' Forecasting using Structural Time Series models #' #' Returns forecasts and other information for univariate structural time #' series models. #' #' This function calls \code{predict.StructTS} and constructs an object of #' class "\code{forecast}" from the results. #' #' @param object An object of class "\code{StructTS}". Usually the result of a #' call to \code{\link[stats]{StructTS}}. #' @param h Number of periods for forecasting #' @param level Confidence level for prediction intervals. #' @param fan If TRUE, level is set to seq(51,99,by=3). This is suitable for #' fan plots. #' @param ... Other arguments. #' @inheritParams forecast.ts #' #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{forecast.StructTS}. #' #' An object of class \code{"forecast"} is a list containing at least the #' following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The original time series #' (either \code{object} itself or the time series used to create the model #' stored as \code{object}).} \item{residuals}{Residuals from the fitted model. #' That is x minus fitted values.} \item{fitted}{Fitted values (one-step #' forecasts)} #' @author Rob J Hyndman #' @seealso \code{\link[stats]{StructTS}}. #' @keywords ts #' @examples #' fit <- StructTS(WWWusage,"level") #' plot(forecast(fit)) #' #' @export forecast.StructTS <- function(object, h=ifelse(object$coef["epsilon"] > 1e-10, 2 * object$xtsp[3], 10), level=c(80, 95), fan=FALSE, lambda=NULL, biasadj=NULL, ...) { x <- object$data pred <- predict(object, n.ahead = h) if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } nint <- length(level) upper <- lower <- matrix(NA, ncol = nint, nrow = length(pred$pred)) for (i in 1:nint) { qq <- qnorm(0.5 * (1 + level[i] / 100)) lower[, i] <- pred$pred - qq * pred$se upper[, i] <- pred$pred + qq * pred$se } colnames(lower) <- colnames(upper) <- paste(level, "%", sep = "") if (is.element("seas", names(object$coef))) { method <- "Basic structural model" } else if (is.element("slope", names(object$coef))) { method <- "Local linear structural model" } else { method <- "Local level structural model" } # Compute fitted values and residuals sigma2 <- c(predict(object, n.ahead=1)$se) res <- residuals(object) * sigma2 fits <- x - res if (!is.null(lambda)) { fits <- InvBoxCox(fits, lambda) x <- InvBoxCox(x, lambda) pred$pred <- InvBoxCox(pred$pred, lambda, biasadj, list(level = level, upper = upper, lower = lower)) lower <- InvBoxCox(lower, lambda) upper <- InvBoxCox(upper, lambda) } mean <- future_msts(x, pred$pred) lower <- future_msts(x, lower) upper <- future_msts(x, upper) fits <- copy_msts(x, fits) res <- copy_msts(x, res) return(structure( list( method = method, model = object, level = level, mean = pred$pred, lower = lower, upper = upper, x = x, series = object$series, fitted = fits, residuals = res ), class = "forecast" )) } #' Forecasting using Holt-Winters objects #' #' Returns forecasts and other information for univariate Holt-Winters time #' series models. #' #' This function calls \code{\link[stats]{predict.HoltWinters}} and constructs #' an object of class "\code{forecast}" from the results. #' #' It is included for completeness, but the \code{\link{ets}} is recommended #' for use instead of \code{\link[stats]{HoltWinters}}. #' #' @param object An object of class "\code{HoltWinters}". Usually the result of #' a call to \code{\link[stats]{HoltWinters}}. #' @param h Number of periods for forecasting #' @param level Confidence level for prediction intervals. #' @param fan If TRUE, level is set to seq(51,99,by=3). This is suitable for #' fan plots. #' @param ... Other arguments. #' @inheritParams forecast.ts #' #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by #' \code{forecast.HoltWinters}. #' #' An object of class \code{"forecast"} is a list containing at least the #' following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The original time series #' (either \code{object} itself or the time series used to create the model #' stored as \code{object}).} \item{residuals}{Residuals from the fitted #' model.} \item{fitted}{Fitted values (one-step forecasts)} #' @author Rob J Hyndman #' @seealso \code{\link[stats]{predict.HoltWinters}}, #' \code{\link[stats]{HoltWinters}}. #' @keywords ts #' @examples #' fit <- HoltWinters(WWWusage,gamma=FALSE) #' plot(forecast(fit)) #' #' @export forecast.HoltWinters <- function(object, h=ifelse(frequency(object$x) > 1, 2 * frequency(object$x), 10), level=c(80, 95), fan=FALSE, lambda=NULL, biasadj=NULL, ...) { x <- object$x if (!is.null(object$exponential)) { if (object$exponential) { stop("Forecasting for exponential trend not yet implemented.") } } if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } nint <- length(level) pred <- predict(object, n.ahead = h, prediction.interval = TRUE, level = level[1] / 100) pmean <- pred[, 1] upper <- lower <- matrix(NA, ncol = nint, nrow = length(pred[, 1])) se <- (pred[, 2] - pred[, 3]) / (2 * qnorm(0.5 * (1 + level[1] / 100))) for (i in 1:nint) { qq <- qnorm(0.5 * (1 + level[i] / 100)) lower[, i] <- pmean - qq * se upper[, i] <- pmean + qq * se } colnames(lower) <- colnames(upper) <- paste(level, "%", sep = "") if (!is.null(lambda)) { fitted <- InvBoxCox(object$fitted[, 1], lambda) x <- InvBoxCox(x, lambda) pmean <- InvBoxCox(pmean, lambda, biasadj, list(level = level, upper = upper, lower = lower)) lower <- InvBoxCox(lower, lambda) upper <- InvBoxCox(upper, lambda) } else { fitted <- object$fitted[, 1] } # Pad fitted values with NAs nf <- length(fitted) n <- length(x) fitted <- ts(c(rep(NA, n - nf), fitted)) fitted <- copy_msts(object$x, fitted) pmean <- future_msts(object$x, pmean) lower <- future_msts(object$x, lower) upper <- future_msts(object$x, upper) return(structure( list( method = "HoltWinters", model = object, level = level, mean = pmean, lower = lower, upper = upper, x = x, series = deparse(object$call$x), fitted = fitted, residuals = x - fitted ), class = "forecast" )) } ## CROSTON #' Forecasts for intermittent demand using Croston's method #' #' Returns forecasts and other information for Croston's forecasts applied to #' y. #' #' Based on Croston's (1972) method for intermittent demand forecasting, also #' described in Shenstone and Hyndman (2005). Croston's method involves using #' simple exponential smoothing (SES) on the non-zero elements of the time #' series and a separate application of SES to the times between non-zero #' elements of the time series. The smoothing parameters of the two #' applications of SES are assumed to be equal and are denoted by \code{alpha}. #' #' Note that prediction intervals are not computed as Croston's method has no #' underlying stochastic model. #' #' @param y a numeric vector or time series of class \code{ts} #' @param h Number of periods for forecasting. #' @param alpha Value of alpha. Default value is 0.1. #' @param x Deprecated. Included for backwards compatibility. #' @return An object of class \code{"forecast"} is a list containing at least #' the following elements: \item{model}{A list containing information about the #' fitted model. The first element gives the model used for non-zero demands. #' The second element gives the model used for times between non-zero demands. #' Both elements are of class \code{forecast}.} \item{method}{The name of the #' forecasting method as a character string} \item{mean}{Point forecasts as a #' time series} \item{x}{The original time series (either \code{object} itself #' or the time series used to create the model stored as \code{object}).} #' \item{residuals}{Residuals from the fitted model. That is y minus fitted #' values.} \item{fitted}{Fitted values (one-step forecasts)} #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{croston} and #' associated functions. #' @author Rob J Hyndman #' @seealso \code{\link{ses}}. #' @references Croston, J. (1972) "Forecasting and stock control for #' intermittent demands", \emph{Operational Research Quarterly}, \bold{23}(3), #' 289-303. #' #' Shenstone, L., and Hyndman, R.J. (2005) "Stochastic models underlying #' Croston's method for intermittent demand forecasting". \emph{Journal of #' Forecasting}, \bold{24}, 389-402. #' @keywords ts #' @examples #' y <- rpois(20,lambda=.3) #' fcast <- croston(y) #' plot(fcast) #' #' @export croston <- function(y, h=10, alpha=0.1, x=y) { if (sum(x < 0) > 0) { stop("Series should not contain negative values") } out <- croston2(x, h, alpha) out$x <- x if (!is.null(out$fitted)) { out$residuals <- x - out$fitted } out$method <- "Croston's method" out$series <- deparse(substitute(y)) return(structure(out, class = "forecast")) } croston2 <- function(x, h=10, alpha=0.1, nofits=FALSE) { x <- as.ts(x) y <- x[x > 0] tsp.x <- tsp(x) freq.x <- tsp.x[3] start.f <- tsp.x[2] + 1 / freq.x if (length(y) == 0) # All historical values are equal to zero { fc <- ts(rep(0, h), start = start.f, frequency = freq.x) if (nofits) { return(fc) } else { return(list(mean = fc, fitted = ts(x * 0, start = tsp.x[1], frequency = freq.x))) } } tt <- diff(c(0, (1:length(x))[x > 0])) # Times between non-zero observations if (length(y) == 1 && length(tt) == 1) # Only one non-zero observation { y.f <- list(mean = ts(rep(y, h), start = start.f, frequency = freq.x)) p.f <- list(mean = ts(rep(tt, h), start = start.f, frequency = freq.x)) } else if (length(y) <= 1 || length(tt) <= 1) { # length(tt)==0 but length(y)>0. How does that happen? return(list(mean = ts(rep(NA, h), start = start.f, frequency = freq.x))) } else { y.f <- ses(y, alpha = alpha, initial = "simple", h = h, PI = FALSE) p.f <- ses(tt, alpha = alpha, initial = "simple", h = h, PI = FALSE) } ratio <- ts(y.f$mean / p.f$mean, start = start.f, frequency = freq.x) if (nofits) { return(ratio) } else { n <- length(x) fits <- x * NA if (n > 1) { for (i in 1:(n - 1)) fits[i + 1] <- croston2(x[1:i], h = 1, alpha = alpha, nofits = TRUE) } ratio <- future_msts(x, ratio) fits <- copy_msts(x, fits) return(list(mean = ratio, fitted = fits, model = list(demand = y.f, period = p.f))) } } forecast/R/makeParamVector.R0000644000176200001440000001734514323125536015503 0ustar liggesusers# TODO: Add comment # # Author: srazbash ############################################################################### unParameteriseTBATS <- function(param.vector, control) { # print(control) if (control$use.box.cox) { lambda <- param.vector[1] alpha <- param.vector[2] if (control$use.beta) { if (control$use.damping) { small.phi <- param.vector[3] beta <- param.vector[4] gamma.start <- 5 } else { small.phi <- 1 beta <- param.vector[3] gamma.start <- 4 } } else { small.phi <- NULL beta <- NULL gamma.start <- 3 } if (control$length.gamma > 0) { gamma.one.vector <- param.vector[gamma.start:(gamma.start + (control$length.gamma / 2) - 1)] gamma.two.vector <- param.vector[(gamma.start + (control$length.gamma / 2)):(gamma.start + (control$length.gamma) - 1)] final.gamma.pos <- gamma.start + control$length.gamma - 1 } else { gamma.one.vector <- NULL gamma.two.vector <- NULL final.gamma.pos <- gamma.start - 1 } if (control$p != 0) { ar.coefs <- param.vector[(final.gamma.pos + 1):(final.gamma.pos + control$p)] } else { ar.coefs <- NULL } if (control$q != 0) { ma.coefs <- param.vector[(final.gamma.pos + control$p + 1):length(param.vector)] } else { ma.coefs <- NULL } } else { lambda <- NULL alpha <- param.vector[1] if (control$use.beta) { if (control$use.damping) { small.phi <- param.vector[2] beta <- param.vector[3] gamma.start <- 4 } else { small.phi <- 1 beta <- param.vector[2] gamma.start <- 3 } } else { small.phi <- NULL beta <- NULL gamma.start <- 2 } if (control$length.gamma > 0) { gamma.one.vector <- param.vector[gamma.start:(gamma.start + (control$length.gamma / 2) - 1)] gamma.two.vector <- param.vector[(gamma.start + (control$length.gamma / 2)):(gamma.start + (control$length.gamma) - 1)] final.gamma.pos <- gamma.start + control$length.gamma - 1 } else { gamma.one.vector <- NULL gamma.two.vector <- NULL final.gamma.pos <- gamma.start - 1 } if (control$p != 0) { ar.coefs <- param.vector[(final.gamma.pos + 1):(final.gamma.pos + control$p)] } else { ar.coefs <- NULL } if (control$q != 0) { ma.coefs <- param.vector[(final.gamma.pos + control$p + 1):length(param.vector)] } else { ma.coefs <- NULL } } return(list(lambda = lambda, alpha = alpha, beta = beta, small.phi = small.phi, gamma.one.v = gamma.one.vector, gamma.two.v = gamma.two.vector, ar.coefs = ar.coefs, ma.coefs = ma.coefs)) } makeParscale <- function(control) { # print(control) if (control$use.box.cox) { parscale <- c(.001, .01) } else { parscale <- .01 } if (control$use.beta) { if (control$use.damping) { parscale <- c(parscale, 1e-2, 1e-2) } else { parscale <- c(parscale, 1e-2) } } if (control$length.gamma > 0) { parscale <- c(parscale, rep(1e-5, control$length.gamma)) } if ((control$p != 0) | (control$q != 0)) { parscale <- c(parscale, rep(1e-1, (control$p + control$q))) } # print(parscale) return(parscale) } ############################################################################################################################################################################################## ## BATS related stuff below ######################################## makeParscaleBATS <- function(control) { # print(control) if (control$use.box.cox) { parscale <- c(.001, .1) } else { parscale <- .1 } if (control$use.beta) { if (control$use.damping) { parscale <- c(parscale, 1e-2, 1e-2) } else { parscale <- c(parscale, 1e-2) } } if (control$length.gamma > 0) { parscale <- c(parscale, rep(1e-2, control$length.gamma)) } if ((control$p != 0) | (control$q != 0)) { parscale <- c(parscale, rep(1e-1, (control$p + control$q))) } # print(parscale) return(parscale) } parameterise <- function(alpha, beta.v=NULL, small.phi=1, gamma.v=NULL, lambda=NULL, ar.coefs=NULL, ma.coefs=NULL) { # print("urg") # print(lambda) if (!is.null(lambda)) { param.vector <- cbind(lambda, alpha) use.box.cox <- TRUE } else { # print("hello") param.vector <- alpha use.box.cox <- FALSE # print(use.box.cox) } if (!is.null(beta.v)) { use.beta <- TRUE if (is.null(small.phi)) { use.damping <- FALSE } else if (small.phi != 1) { param.vector <- cbind(param.vector, small.phi) use.damping <- TRUE } else { use.damping <- FALSE } param.vector <- cbind(param.vector, beta.v) } else { use.beta <- FALSE use.damping <- FALSE } if (!is.null(gamma.v)) { gamma.v <- matrix(gamma.v, nrow = 1, ncol = length(gamma.v)) param.vector <- cbind(param.vector, gamma.v) length.gamma <- length(gamma.v) } else { length.gamma <- 0 } if (!is.null(ar.coefs)) { ar.coefs <- matrix(ar.coefs, nrow = 1, ncol = length(ar.coefs)) param.vector <- cbind(param.vector, ar.coefs) p <- length(ar.coefs) } else { p <- 0 } if (!is.null(ma.coefs)) { ma.coefs <- matrix(ma.coefs, nrow = 1, ncol = length(ma.coefs)) param.vector <- cbind(param.vector, ma.coefs) q <- length(ma.coefs) } else { q <- 0 } # print(use.box.cox) control <- list(use.beta = use.beta, use.box.cox = use.box.cox, use.damping = use.damping, length.gamma = length.gamma, p = p, q = q) return(list(vect = as.numeric(param.vector), control = control)) } unParameterise <- function(param.vector, control) { # print(control) if (control$use.box.cox) { lambda <- param.vector[1] alpha <- param.vector[2] if (control$use.beta) { if (control$use.damping) { small.phi <- param.vector[3] beta <- param.vector[4] gamma.start <- 5 } else { small.phi <- 1 beta <- param.vector[3] gamma.start <- 4 } } else { small.phi <- NULL beta <- NULL gamma.start <- 3 } if (control$length.gamma > 0) { gamma.vector <- param.vector[gamma.start:(gamma.start + control$length.gamma - 1)] final.gamma.pos <- gamma.start + control$length.gamma - 1 } else { gamma.vector <- NULL final.gamma.pos <- gamma.start - 1 } if (control$p != 0) { ar.coefs <- param.vector[(final.gamma.pos + 1):(final.gamma.pos + control$p)] } else { ar.coefs <- NULL } if (control$q != 0) { ma.coefs <- param.vector[(final.gamma.pos + control$p + 1):length(param.vector)] } else { ma.coefs <- NULL } } else { lambda <- NULL alpha <- param.vector[1] if (control$use.beta) { if (control$use.damping) { small.phi <- param.vector[2] beta <- param.vector[3] gamma.start <- 4 } else { small.phi <- 1 beta <- param.vector[2] gamma.start <- 3 } } else { small.phi <- NULL beta <- NULL gamma.start <- 2 } if (control$length.gamma > 0) { gamma.vector <- param.vector[gamma.start:(gamma.start + control$length.gamma - 1)] final.gamma.pos <- gamma.start + control$length.gamma - 1 } else { gamma.vector <- NULL final.gamma.pos <- gamma.start - 1 } if (control$p != 0) { ar.coefs <- param.vector[(final.gamma.pos + 1):(final.gamma.pos + control$p)] } else { ar.coefs <- NULL } if (control$q != 0) { ma.coefs <- param.vector[(final.gamma.pos + control$p + 1):length(param.vector)] } else { ma.coefs <- NULL } } return(list(lambda = lambda, alpha = alpha, beta = beta, small.phi = small.phi, gamma.v = gamma.vector, ar.coefs = ar.coefs, ma.coefs = ma.coefs)) } forecast/R/getResponse.R0000644000176200001440000000635514323125536014717 0ustar liggesusers# Functions to return the response variable for different models. # If a Box-Cox transformation is used, the series returned here should # be on the original scale, not the Box-Cox transformed scale. #' Get response variable from time series model. #' #' \code{getResponse} is a generic function for extracting the historical data #' from a time series model (including \code{Arima}, \code{ets}, \code{ar}, #' \code{fracdiff}), a linear model of class \code{lm}, or a forecast object. #' The function invokes particular \emph{methods} which depend on the class of #' the first argument. #' #' #' @param object a time series model or forecast object. #' @param ... Additional arguments that are ignored. #' @return A numerical vector or a time series object of class \code{ts}. #' @author Rob J Hyndman #' @keywords ts #' #' @export getResponse <- function(object, ...) UseMethod("getResponse") #' @rdname getResponse #' @export getResponse.default <- function(object, ...) { if (is.list(object)) { return(object$x) } else { return(NULL) } } #' @rdname getResponse #' @export getResponse.lm <- function(object, ...) { if(!is.null(object[["x"]])){ object[["x"]] } else{ responsevar <- deparse(formula(object)[[2]]) model.frame(object$model)[, responsevar] } } #' @rdname getResponse #' @export getResponse.Arima <- function(object, ...) { if (is.element("x", names(object))) { x <- object$x } else { series.name <- object$series if (is.null(series.name)) { return(NULL) } else { x <- try(eval.parent(parse(text = series.name)), silent = TRUE) if (is.element("try-error", class(x))) { # Try one level further up the chain x <- try(eval.parent(parse(text = series.name), 2), silent = TRUE) } if (is.element("try-error", class(x))) { # Give up return(NULL) } } } return(as.ts(x)) } #' @rdname getResponse #' @export getResponse.fracdiff <- function(object, ...) { if (is.element("x", names(object))) { x <- object$x } else { series.name <- as.character(object$call)[2] if (is.null(series.name)) { stop("missing original time series") } else { x <- try(eval.parent(parse(text = series.name)), silent = TRUE) if (is.element("try-error", class(x))) { # Try one level further up the chain x <- try(eval.parent(parse(text = series.name), 2), silent = TRUE) } if (is.element("try-error", class(x))) { # Give up return(NULL) } } } return(as.ts(x)) } #' @rdname getResponse #' @export getResponse.ar <- function(object, ...) { getResponse.Arima(object) } #' @rdname getResponse #' @export getResponse.tbats <- function(object, ...) { if (is.element("y", names(object))) { y <- object$y } else { return(NULL) } return(as.ts(y)) } #' @rdname getResponse #' @export getResponse.bats <- function(object, ...) { return(getResponse.tbats(object, ...)) } #' @rdname getResponse #' @export getResponse.mforecast <- function(object, ...) { return(do.call(cbind, lapply(object$forecast, function(x) x$x))) } #' @rdname getResponse #' @export getResponse.baggedModel <- function(object, ...) { if (is.element("y", names(object))) { y <- object$y } else { return(NULL) } return(as.ts(y)) } forecast/R/forecast.R0000644000176200001440000005630614341272370014227 0ustar liggesusers#' Forecasting time series #' #' \code{forecast} is a generic function for forecasting from time series or #' time series models. The function invokes particular \emph{methods} which #' depend on the class of the first argument. #' #' For example, the function \code{\link{forecast.Arima}} makes forecasts based #' on the results produced by \code{\link[stats]{arima}}. #' #' If \code{model=NULL},the function \code{\link{forecast.ts}} makes forecasts #' using \code{\link{ets}} models (if the data are non-seasonal or the seasonal #' period is 12 or less) or \code{\link{stlf}} (if the seasonal period is 13 or #' more). #' #' If \code{model} is not \code{NULL}, \code{forecast.ts} will apply the #' \code{model} to the \code{object} time series, and then generate forecasts #' accordingly. #' #' @aliases print.forecast summary.forecast as.data.frame.forecast as.ts.forecast #' #' @param object a time series or time series model for which forecasts are #' required #' @param h Number of periods for forecasting #' @param level Confidence level for prediction intervals. #' @param fan If TRUE, \code{level} is set to \code{seq(51,99,by=3)}. This is #' suitable for fan plots. #' @param robust If TRUE, the function is robust to missing values and outliers #' in \code{object}. This argument is only valid when \code{object} is of class #' \code{ts}. #' @param lambda Box-Cox transformation parameter. If \code{lambda="auto"}, #' then a transformation is automatically selected using \code{BoxCox.lambda}. #' The transformation is ignored if NULL. Otherwise, #' data transformed before model is estimated. #' @param find.frequency If TRUE, the function determines the appropriate #' period, if the data is of unknown period. #' @param allow.multiplicative.trend If TRUE, then ETS models with #' multiplicative trends are allowed. Otherwise, only additive or no trend ETS #' models are permitted. #' @param model An object describing a time series model; e.g., one of of class #' \code{ets}, \code{Arima}, \code{bats}, \code{tbats}, or \code{nnetar}. #' @param ... Additional arguments affecting the forecasts produced. If #' \code{model=NULL}, \code{forecast.ts} passes these to \code{\link{ets}} or #' \code{\link{stlf}} depending on the frequency of the time series. If #' \code{model} is not \code{NULL}, the arguments are passed to the relevant #' modelling function. #' @inheritParams BoxCox #' #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessors functions \code{fitted.values} and \code{residuals} #' extract various useful features of the value returned by #' \code{forecast$model}. #' #' An object of class \code{"forecast"} is a list usually containing at least #' the following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The original time series #' (either \code{object} itself or the time series used to create the model #' stored as \code{object}).} \item{residuals}{Residuals from the fitted model. #' For models with additive errors, the residuals will be x minus the fitted #' values.} \item{fitted}{Fitted values (one-step forecasts)} #' @author Rob J Hyndman #' @seealso Other functions which return objects of class \code{"forecast"} are #' \code{\link{forecast.ets}}, \code{\link{forecast.Arima}}, #' \code{\link{forecast.HoltWinters}}, \code{\link{forecast.StructTS}}, #' \code{\link{meanf}}, \code{\link{rwf}}, \code{\link{splinef}}, #' \code{\link{thetaf}}, \code{\link{croston}}, \code{\link{ses}}, #' \code{\link{holt}}, \code{\link{hw}}. #' @keywords ts #' @examples #' #' WWWusage %>% forecast %>% plot #' fit <- ets(window(WWWusage, end=60)) #' fc <- forecast(WWWusage, model=fit) #' @export forecast.ts <- function(object, h=ifelse(frequency(object) > 1, 2 * frequency(object), 10), level=c(80, 95), fan=FALSE, robust=FALSE, lambda = NULL, biasadj = FALSE, find.frequency = FALSE, allow.multiplicative.trend=FALSE, model=NULL, ...) { n <- length(object) if (find.frequency) { object <- ts(object, frequency = findfrequency(object)) obj.freq <- frequency(object) } else { obj.freq <- frequency(object) } if (robust) { object <- tsclean(object, replace.missing = TRUE, lambda = lambda) } if (!is.null(model)) { if (inherits(model, "forecast")) { model <- model$model } if (inherits(model, "ets")) { fit <- ets(object, model = model, ...) } else if (inherits(model, "Arima")) { fit <- Arima(object, model = model, ...) } else if (inherits(model, "tbats")) { fit <- tbats(object, model = model, ...) } else if (inherits(model, "bats")) { fit <- bats(object, model = model, ...) } else if (inherits(model, "nnetar")) { fit <- nnetar(object, model = model, ...) } else { stop("Unknown model class") } return(forecast(fit, h = h, level = level, fan = fan)) } if (n > 3) { if (obj.freq < 13) { out <- forecast( ets(object, lambda = lambda, biasadj = biasadj, allow.multiplicative.trend = allow.multiplicative.trend, ...), h = h, level = level, fan = fan ) } else if (n > 2 * obj.freq) { out <- stlf( object, h = h, level = level, fan = fan, lambda = lambda, biasadj = biasadj, allow.multiplicative.trend = allow.multiplicative.trend, ... ) } else { out <- forecast( ets(object, model = "ZZN", lambda = lambda, biasadj = biasadj, allow.multiplicative.trend = allow.multiplicative.trend, ...), h = h, level = level, fan = fan ) } } else { out <- meanf(object, h = h, level = level, fan = fan, lambda = lambda, biasadj = biasadj, ...) } out$series <- deparse(substitute(object)) return(out) } #' @rdname forecast.ts #' @method forecast default #' @export forecast.default <- function(object, ...) forecast.ts(object, ...) #' @rdname forecast.ts #' @export print.forecast <- function(x, ...) { print(as.data.frame(x)) } #' @export summary.forecast <- function(object, ...) { class(object) <- c("summary.forecast", class(object)) object } #' @export print.summary.forecast <- function(x, ...) { cat(paste("\nForecast method:", x$method)) # cat(paste("\n\nCall:\n",deparse(x$call))) cat(paste("\n\nModel Information:\n")) print(x$model) cat("\nError measures:\n") print(accuracy(x)) if (is.null(x$mean)) { cat("\n No forecasts\n") } else { cat("\nForecasts:\n") NextMethod() } } plotlmforecast <- function(object, PI, shaded, shadecols, col, fcol, pi.col, pi.lty, xlim=NULL, ylim, main, ylab, xlab, ...) { xvar <- attributes(terms(object$model))$term.labels if (length(xvar) > 1) { stop("Forecast plot for regression models only available for a single predictor") } else if (ncol(object$newdata) == 1) { # Make sure column has correct name colnames(object$newdata) <- xvar } if (is.null(xlim)) { xlim <- range(object$newdata[, xvar], model.frame(object$model)[, xvar]) } if (is.null(ylim)) { ylim <- range(object$upper, object$lower, fitted(object$model) + residuals(object$model)) } plot( formula(object$model), data = model.frame(object$model), xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, main = main, col = col, ... ) abline(object$model) nf <- length(object$mean) if (PI) { nint <- length(object$level) idx <- rev(order(object$level)) if (is.null(shadecols)) { # require(colorspace) if (min(object$level) < 50) { # Using very small confidence levels. shadecols <- rev(colorspace::sequential_hcl(100)[object$level]) } else { # This should happen almost all the time. Colors mapped to levels. shadecols <- rev(colorspace::sequential_hcl(52)[object$level - 49]) } } if (length(shadecols) == 1) { if (shadecols == "oldstyle") { # Default behaviour up to v3.25. shadecols <- heat.colors(nint + 2)[switch(1 + (nint > 1), 2, nint:1) + 1] } } for (i in 1:nf) { for (j in 1:nint) { if (shaded) { lines(rep(object$newdata[i, xvar], 2), c(object$lower[i, idx[j]], object$upper[i, idx[j]]), col = shadecols[j], lwd = 6) } else { lines(rep(object$newdata[i, xvar], 2), c(object$lower[i, idx[j]], object$upper[i, idx[j]]), col = pi.col, lty = pi.lty) } } } } points(object$newdata[, xvar], object$mean, pch = 19, col = fcol) } #' Forecast plot #' #' Plots historical data with forecasts and prediction intervals. #' #' \code{autoplot} will produce a ggplot object. #' #' plot.splineforecast autoplot.splineforecast #' @param x Forecast object produced by \code{\link{forecast}}. #' @param object Forecast object produced by \code{\link{forecast}}. Used for #' ggplot graphics (S3 method consistency). #' @param include number of values from time series to include in plot. Default #' is all values. #' @param PI Logical flag indicating whether to plot prediction intervals. #' @param showgap If \code{showgap=FALSE}, the gap between the historical #' observations and the forecasts is removed. #' @param shaded Logical flag indicating whether prediction intervals should be #' shaded (\code{TRUE}) or lines (\code{FALSE}) #' @param shadebars Logical flag indicating if prediction intervals should be #' plotted as shaded bars (if \code{TRUE}) or a shaded polygon (if #' \code{FALSE}). Ignored if \code{shaded=FALSE}. Bars are plotted by default #' if there are fewer than five forecast horizons. #' @param shadecols Colors for shaded prediction intervals. To get default #' colors used prior to v3.26, set \code{shadecols="oldstyle"}. #' @param col Colour for the data line. #' @param fcol Colour for the forecast line. #' @param flty Line type for the forecast line. #' @param flwd Line width for the forecast line. #' @param pi.col If \code{shaded=FALSE} and \code{PI=TRUE}, the prediction #' intervals are plotted in this colour. #' @param pi.lty If \code{shaded=FALSE} and \code{PI=TRUE}, the prediction #' intervals are plotted using this line type. #' @param ylim Limits on y-axis. #' @param main Main title. #' @param xlab X-axis label. #' @param ylab Y-axis label. #' @param series Matches an unidentified forecast layer with a coloured object #' on the plot. #' @param fitcol Line colour for fitted values. #' @param type 1-character string giving the type of plot desired. As for #' \code{\link[graphics]{plot.default}}. #' @param pch Plotting character (if \code{type=="p"} or \code{type=="o"}). #' @param ... Other plotting parameters to affect the plot. #' @return None. #' @author Rob J Hyndman & Mitchell O'Hara-Wild #' @seealso \code{\link[stats]{plot.ts}} #' @references Hyndman and Athanasopoulos (2018) \emph{Forecasting: principles #' and practice}, 2nd edition, OTexts: Melbourne, Australia. #' \url{https://otexts.com/fpp2/} #' @keywords ts #' @examples #' library(ggplot2) #' #' wine.fit <- hw(wineind,h=48) #' plot(wine.fit) #' autoplot(wine.fit) #' #' fit <- tslm(wineind ~ fourier(wineind,4)) #' fcast <- forecast(fit, newdata=data.frame(fourier(wineind,4,20))) #' autoplot(fcast) #' #' @export plot.forecast <- function(x, include, PI=TRUE, showgap = TRUE, shaded=TRUE, shadebars=(length(x$mean) < 5), shadecols=NULL, col=1, fcol=4, pi.col=1, pi.lty=2, ylim=NULL, main=NULL, xlab="", ylab="", type="l", flty = 1, flwd = 2, ...) { if (is.element("x", names(x))) { # Assume stored as x xx <- x$x } else { xx <- NULL } if (is.null(x$lower) || is.null(x$upper) || is.null(x$level)) { PI <- FALSE } else if (!is.finite(max(x$upper))) { PI <- FALSE } if (!shaded) { shadebars <- FALSE } if (is.null(main)) { main <- paste("Forecasts from ", x$method, sep = "") } if (PI) { x$upper <- as.matrix(x$upper) x$lower <- as.matrix(x$lower) } if (is.element("lm", class(x$model)) && !is.element("ts", class(x$mean))) # Non time series linear model { plotlmforecast( x, PI = PI, shaded = shaded, shadecols = shadecols, col = col, fcol = fcol, pi.col = pi.col, pi.lty = pi.lty, ylim = ylim, main = main, xlab = xlab, ylab = ylab, ... ) if (PI) { return(invisible(list(mean = x$mean, lower = as.matrix(x$lower), upper = as.matrix(x$upper)))) } else { return(invisible(list(mean = x$mean))) } } # Otherwise assume x is from a time series forecast n <- length(xx) if (n == 0) { include <- 0 } else if (missing(include)) { include <- length(xx) } # Check if all historical values are missing if (n > 0) { if (sum(is.na(xx)) == length(xx)) { n <- 0 } } if (n > 0) { xx <- as.ts(xx) freq <- frequency(xx) strt <- start(xx) nx <- max(which(!is.na(xx))) xxx <- xx[1:nx] include <- min(include, nx) if (!showgap) { lastObs <- x$x[length(x$x)] lastTime <- time(x$x)[length(x$x)] x$mean <- ts(c(lastObs, x$mean), start = lastTime, frequency = freq) x$upper <- ts(rbind(lastObs, x$upper), start = lastTime, frequency = freq) x$lower <- ts(rbind(lastObs, x$lower), start = lastTime, frequency = freq) } } else { freq <- frequency(x$mean) strt <- start(x$mean) nx <- include <- 1 xx <- xxx <- ts(NA, frequency = freq, end = tsp(x$mean)[1] - 1 / freq) if (!showgap) { warning("Removing the gap requires historical data, provide this via model$x. Defaulting showgap to TRUE.") } } pred.mean <- x$mean if (is.null(ylim)) { ylim <- range(c(xx[(n - include + 1):n], pred.mean), na.rm = TRUE) if (PI) { ylim <- range(ylim, x$lower, x$upper, na.rm = TRUE) } } npred <- length(pred.mean) tsx <- is.ts(pred.mean) if (!tsx) { pred.mean <- ts(pred.mean, start = nx + 1, frequency = 1) type <- "p" } plot( ts(c(xxx[(nx - include + 1):nx], rep(NA, npred)), end = tsp(xx)[2] + (nx - n) / freq + npred / freq, frequency = freq), xlab = xlab, ylim = ylim, ylab = ylab, main = main, col = col, type = type, ... ) if (PI) { if (is.ts(x$upper)) { xxx <- time(x$upper) } else { xxx <- tsp(pred.mean)[1] - 1 / freq + (1:npred) / freq } idx <- rev(order(x$level)) nint <- length(x$level) if (is.null(shadecols)) { # require(colorspace) if (min(x$level) < 50) { # Using very small confidence levels. shadecols <- rev(colorspace::sequential_hcl(100)[x$level]) } else { # This should happen almost all the time. Colors mapped to levels. shadecols <- rev(colorspace::sequential_hcl(52)[x$level - 49]) } } if (length(shadecols) == 1) { if (shadecols == "oldstyle") { # Default behaviour up to v3.25. shadecols <- heat.colors(nint + 2)[switch(1 + (nint > 1), 2, nint:1) + 1] } } for (i in 1:nint) { if (shadebars) { for (j in 1:npred) { polygon( xxx[j] + c(-0.5, 0.5, 0.5, -0.5) / freq, c(rep(x$lower[j, idx[i]], 2), rep(x$upper[j, idx[i]], 2)), col = shadecols[i], border = FALSE ) } } else if (shaded) { polygon( c(xxx, rev(xxx)), c(x$lower[, idx[i]], rev(x$upper[, idx[i]])), col = shadecols[i], border = FALSE ) } else if (npred == 1) { lines(c(xxx) + c(-0.5, 0.5) / freq, rep(x$lower[, idx[i]], 2), col = pi.col, lty = pi.lty) lines(c(xxx) + c(-0.5, 0.5) / freq, rep(x$upper[, idx[i]], 2), col = pi.col, lty = pi.lty) } else { lines(x$lower[, idx[i]], col = pi.col, lty = pi.lty) lines(x$upper[, idx[i]], col = pi.col, lty = pi.lty) } } } if (npred > 1 && !shadebars && tsx) { lines(pred.mean, lty = flty, lwd = flwd, col = fcol) } else { points(pred.mean, col = fcol, pch = 19) } if (PI) { invisible(list(mean = pred.mean, lower = x$lower, upper = x$upper)) } else { invisible(list(mean = pred.mean)) } } #' @export predict.default <- function(object, ...) { forecast(object, ...) } hfitted <- function(object, h=1, FUN=NULL, ...) { if (h == 1) { return(fitted(object)) } # Attempt to get model function if (is.null(FUN)) { FUN <- class(object) for (i in FUN) { if (exists(i)) { if (typeof(eval(parse(text = i)[[1]])) == "closure") { FUN <- i i <- "Y" break } } } if (i != "Y") { stop("Could not find appropriate function to refit, specify FUN=function") } } x <- getResponse(object) tspx <- tsp(x) fits <- fitted(object) * NA n <- length(fits) refitarg <- list(x = NULL, model = object) names(refitarg)[1] <- names(formals(FUN))[1] fcarg <- list(h = h, biasadj=TRUE, lambda=object$lambda) if (FUN == "ets") { refitarg$use.initial.values <- TRUE } for (i in 1:(n - h)) { refitarg[[1]] <- ts(x[1:i], start = tspx[1], frequency = tspx[3]) if(!is.null(object$xreg) & any(colnames(object$xreg)!="drift")){ if(any(colnames(object$xreg)=="drift")){ idx <- which(colnames(object$xreg)=="drift") refitarg$xreg <- ts(object$xreg[1:i, -idx], start = tspx[1], frequency = tspx[3]) fcarg$xreg <- ts(object$xreg[(i + 1):(i + h), -idx], start = tspx[1] + i / tspx[3], frequency = tspx[3]) }else{ refitarg$xreg <- ts(object$xreg[1:i, ], start = tspx[1], frequency = tspx[3]) fcarg$xreg <- ts(object$xreg[(i + 1):(i + h), ], start = tspx[1] + i / tspx[3], frequency = tspx[3]) } } fcarg$object <- try(suppressWarnings(do.call(FUN, refitarg)), silent = TRUE) if (!is.element("try-error", class(fcarg$object))) { # Keep original variance estimate (for consistent bias adjustment) if(!is.null(object$sigma2)) fcarg$object$sigma2 <- object$sigma2 fits[i + h] <- suppressWarnings(do.call("forecast", fcarg)$mean[h]) } } return(fits) } # The following function is for when users don't realise they already have the forecasts. # e.g., with the dshw(), meanf() or rwf() functions. #' @export forecast.forecast <- function(object, ...) { input_names <- as.list(substitute(list(...))) # Read level argument if (is.element("level", names(input_names))) { level <- list(...)[["level"]] if (!identical(level, object$level)) { stop("Please set the level argument when the forecasts are first computed") } } # Read h argument if (is.element("h", names(input_names))) { h <- list(...)[["h"]] if (h > length(object$mean)) { stop("Please select a longer horizon when the forecasts are first computed") } tspf <- tsp(object$mean) object$mean <- ts(object$mean[1:h], start = tspf[1], frequency = tspf[3]) if (!is.null(object$upper)) { object$upper <- ts(object$upper[1:h, , drop = FALSE], start = tspf[1], frequency = tspf[3]) object$lower <- ts(object$lower[1:h, , drop = FALSE], start = tspf[1], frequency = tspf[3]) } } return(object) } subset.forecast <- function(x, ...) { tspx <- tsp(x$mean) x$mean <- subset(x$mean, ...) x$lower <- subset(ts(x$lower, start = tspx[1], frequency = tspx[3]), ...) x$upper <- subset(ts(x$upper, start = tspx[1], frequency = tspx[3]), ...) return(x) } #' Is an object a particular forecast type? #' #' Returns true if the forecast object is of a particular type #' #' @param x object to be tested #' @export is.forecast <- function(x) { inherits(x, "forecast") } #' @export as.ts.forecast <- function(x, ...) { df <- ts(as.matrix(as.data.frame.forecast(x))) tsp(df) <- tsp(x$mean) return(df) } #' @export as.data.frame.mforecast <- function(x, ...) { tmp <- lapply(x$forecast, as.data.frame) series <- names(tmp) times <- rownames(tmp[[1]]) h <- NROW(tmp[[1]]) output <- cbind(Time = times, Series = rep(series[1], h), tmp[[1]]) if (length(tmp) > 1) { for (i in 2:length(tmp)) output <- rbind( output, cbind(Time = times, Series = rep(series[i], h), tmp[[i]]) ) } rownames(output) <- NULL return(output) } #' @export as.data.frame.forecast <- function(x, ...) { nconf <- length(x$level) out <- matrix(x$mean, ncol = 1) ists <- is.ts(x$mean) fr.x <- frequency(x$mean) if (ists) { out <- ts(out) attributes(out)$tsp <- attributes(x$mean)$tsp } names <- c("Point Forecast") if (!is.null(x$lower) && !is.null(x$upper) && !is.null(x$level)) { x$upper <- as.matrix(x$upper) x$lower <- as.matrix(x$lower) for (i in 1:nconf) { out <- cbind(out, x$lower[, i, drop = FALSE], x$upper[, i, drop = FALSE]) names <- c(names, paste("Lo", x$level[i]), paste("Hi", x$level[i])) } } colnames(out) <- names tx <- time(x$mean) if (max(abs(tx - round(tx))) < 1e-11) { nd <- 0L } else { nd <- max(round(log10(fr.x) + 1), 2L) } if(nd == 0L) rownames(out) <- round(tx) else rownames(out) <- format(tx, nsmall = nd, digits = nd) # Rest of function borrowed from print.ts(), but with header() omitted if (!ists) { return(as.data.frame(out)) } x <- as.ts(out) calendar <- any(fr.x == c(4, 12)) && length(start(x)) == 2L Tsp <- tsp(x) if (is.null(Tsp)) { warning("series is corrupt, with no 'tsp' attribute") print(unclass(x)) return(invisible(x)) } nn <- 1 + round((Tsp[2L] - Tsp[1L]) * Tsp[3L]) if (NROW(x) != nn) { warning(gettextf("series is corrupt: length %d with 'tsp' implying %d", NROW(x), nn), domain = NA, call. = FALSE) calendar <- FALSE } if (NCOL(x) == 1) { if (calendar) { if (fr.x > 1) { dn2 <- if (fr.x == 12) { month.abb } else if (fr.x == 4) { c("Qtr1", "Qtr2", "Qtr3", "Qtr4") } else { paste("p", 1L:fr.x, sep = "") } if (NROW(x) <= fr.x && start(x)[1L] == end(x)[1L]) { dn1 <- start(x)[1L] dn2 <- dn2[1 + (start(x)[2L] - 2 + seq_along(x)) %% fr.x] x <- matrix( format(x, ...), nrow = 1L, byrow = TRUE, dimnames = list(dn1, dn2) ) } else { start.pad <- start(x)[2L] - 1 end.pad <- fr.x - end(x)[2L] dn1 <- start(x)[1L]:end(x)[1L] x <- matrix( c(rep.int("", start.pad), format(x, ...), rep.int("", end.pad)), ncol = fr.x, byrow = TRUE, dimnames = list(dn1, dn2) ) } } else { attributes(x) <- NULL names(x) <- tx } } else { attr(x, "class") <- attr(x, "tsp") <- attr(x, "na.action") <- NULL } } else { if (calendar && fr.x > 1) { tm <- time(x) t2 <- cycle(x) p1 <- format(floor(tm + 1e-8)) rownames(x) <- if (fr.x == 12) { paste(month.abb[t2], p1, sep = " ") } else { paste( p1, if (fr.x == 4) { c("Q1", "Q2", "Q3", "Q4")[t2] } else { format(t2) }, sep = " " ) } } else { rownames(x) <- format(time(x), nsmall = nd) } attr(x, "class") <- attr(x, "tsp") <- attr(x, "na.action") <- NULL } return(as.data.frame(x)) } forecast/R/unitRoot.R0000644000176200001440000004056414150370574014246 0ustar liggesusers#' Number of differences required for a stationary series #' #' Functions to estimate the number of differences required to make a given #' time series stationary. \code{ndiffs} estimates the number of first #' differences necessary. #' #' \code{ndiffs} uses a unit root test to determine the number of differences #' required for time series \code{x} to be made stationary. If #' \code{test="kpss"}, the KPSS test is used with the null hypothesis that #' \code{x} has a stationary root against a unit-root alternative. Then the #' test returns the least number of differences required to pass the test at #' the level \code{alpha}. If \code{test="adf"}, the Augmented Dickey-Fuller #' test is used and if \code{test="pp"} the Phillips-Perron test is used. In #' both of these cases, the null hypothesis is that \code{x} has a unit root #' against a stationary root alternative. Then the test returns the least #' number of differences required to fail the test at the level \code{alpha}. #' #' @param x A univariate time series #' @param alpha Level of the test, possible values range from 0.01 to 0.1. #' @param test Type of unit root test to use #' @param type Specification of the deterministic component in the regression #' @param max.d Maximum number of non-seasonal differences allowed #' @param ... Additional arguments to be passed on to the unit root test #' @return An integer indicating the number of differences required for stationarity. #' @author Rob J Hyndman, Slava Razbash & Mitchell O'Hara-Wild #' @seealso \code{\link{auto.arima}} and \code{\link{ndiffs}} #' @references #' Dickey DA and Fuller WA (1979), "Distribution of the Estimators for #' Autoregressive Time Series with a Unit Root", \emph{Journal of the American #' Statistical Association} \bold{74}:427-431. #' #' Kwiatkowski D, Phillips PCB, Schmidt P and Shin Y (1992) "Testing the Null #' Hypothesis of Stationarity against the Alternative of a Unit Root", #' \emph{Journal of Econometrics} \bold{54}:159-178. #' #' Osborn, D.R. (1990) "A survey of seasonality in UK macroeconomic variables", #' \emph{International Journal of Forecasting}, \bold{6}:327-336. #' #' Phillips, P.C.B. and Perron, P. (1988) "Testing for a unit root in time series regression", #' \emph{Biometrika}, \bold{72}(2), 335-346. #' #' Said E and Dickey DA (1984), "Testing for Unit Roots in Autoregressive #' Moving Average Models of Unknown Order", \emph{Biometrika} #' \bold{71}:599-607. #' @keywords ts #' @examples #' ndiffs(WWWusage) #' ndiffs(diff(log(AirPassengers), 12)) #' @importFrom urca ur.kpss ur.df ur.pp #' @export ndiffs <- function(x, alpha = 0.05, test = c("kpss", "adf", "pp"), type = c("level", "trend"), max.d = 2, ...) { test <- match.arg(test) type <- match(match.arg(type), c("level", "trend")) x <- c(na.omit(c(x))) d <- 0 if (alpha < 0.01) { warning("Specified alpha value is less than the minimum, setting alpha=0.01") alpha <- 0.01 } else if (alpha > 0.1) { warning("Specified alpha value is larger than the maximum, setting alpha=0.1") alpha <- 0.1 } if (is.constant(x)) { return(d) } urca_pval <- function(urca_test) { approx(urca_test@cval[1, ], as.numeric(sub("pct", "", colnames(urca_test@cval))) / 100, xout = urca_test@teststat[1], rule = 2)$y } kpss_wrap <- function(..., use.lag = trunc(3 * sqrt(length(x)) / 13)) { ur.kpss(..., use.lag = use.lag) } runTests <- function(x, test, alpha) { tryCatch( { suppressWarnings( diff <- switch(test, kpss = urca_pval(kpss_wrap(x, type = c("mu", "tau")[type], ...)) < alpha, adf = urca_pval(ur.df(x, type = c("drift", "trend")[type], ...)) > alpha, pp = urca_pval(ur.pp(x, type = "Z-tau", model = c("constant", "trend")[type], ...)) > alpha, stop("This shouldn't happen") ) ) diff }, error = function(e) { warning( call. = FALSE, sprintf( "The chosen unit root test encountered an error when testing for the %s difference. From %s(): %s %i differences will be used. Consider using a different unit root test.", switch(as.character(d), `0` = "first", `1` = "second", `2` = "third", paste0(d + 1, "th")), deparse(e$call[[1]]), e$message, d ) ) FALSE } ) } dodiff <- runTests(x, test, alpha) if (is.na(dodiff)) { return(d) } while (dodiff && d < max.d) { d <- d + 1 x <- diff(x) if (is.constant(x)) { return(d) } dodiff <- runTests(x, test, alpha) if (is.na(dodiff)) { return(d - 1) } } return(d) } # Number of seasonal differences #' Number of differences required for a seasonally stationary series #' #' Functions to estimate the number of differences required to make a given #' time series stationary. \code{nsdiffs} estimates the number of seasonal differences #' necessary. #' #' \code{nsdiffs} uses seasonal unit root tests to determine the number of #' seasonal differences required for time series \code{x} to be made stationary #' (possibly with some lag-one differencing as well). #' #' Several different tests are available: #' * If \code{test="seas"} (default), a measure of seasonal strength is used, where differencing is #' selected if the seasonal strength (Wang, Smith & Hyndman, 2006) exceeds 0.64 #' (based on minimizing MASE when forecasting using auto.arima on M3 and M4 data). #' * If \code{test="ch"}, the Canova-Hansen (1995) test is used #' (with null hypothesis of deterministic seasonality) #' * If \code{test="hegy"}, the Hylleberg, Engle, Granger & Yoo (1990) test is used. #' * If \code{test="ocsb"}, the Osborn-Chui-Smith-Birchenhall #' (1988) test is used (with null hypothesis that a seasonal unit root exists). #' #' @md #' #' @inheritParams ndiffs #' @param x A univariate time series #' @param alpha Level of the test, possible values range from 0.01 to 0.1. #' @param test Type of unit root test to use #' @param m Deprecated. Length of seasonal period #' @param max.D Maximum number of seasonal differences allowed #' #' @return An integer indicating the number of differences required for stationarity. #' #' @references #' #' Wang, X, Smith, KA, Hyndman, RJ (2006) "Characteristic-based clustering #' for time series data", \emph{Data Mining and Knowledge Discovery}, #' \bold{13}(3), 335-364. #' #' Osborn DR, Chui APL, Smith J, and Birchenhall CR (1988) "Seasonality and the #' order of integration for consumption", \emph{Oxford Bulletin of Economics #' and Statistics} \bold{50}(4):361-377. #' #' Canova F and Hansen BE (1995) "Are Seasonal Patterns Constant #' over Time? A Test for Seasonal Stability", \emph{Journal of Business and #' Economic Statistics} \bold{13}(3):237-252. #' #' Hylleberg S, Engle R, Granger C and Yoo B (1990) "Seasonal integration #' and cointegration.", \emph{Journal of Econometrics} \bold{44}(1), pp. 215-238. #' #' @author Rob J Hyndman, Slava Razbash and Mitchell O'Hara-Wild #' #' @seealso \code{\link{auto.arima}}, \code{\link{ndiffs}}, \code{\link{ocsb.test}}, \code{\link[uroot]{hegy.test}}, and \code{\link[uroot]{ch.test}} #' #' @examples #' nsdiffs(AirPassengers) #' @export nsdiffs <- function(x, alpha = 0.05, m = frequency(x), test = c("seas", "ocsb", "hegy", "ch"), max.D = 1, ...) { test <- match.arg(test) D <- 0 if (alpha < 0.01) { warning("Specified alpha value is less than the minimum, setting alpha=0.01") alpha <- 0.01 } else if (alpha > 0.1) { warning("Specified alpha value is larger than the maximum, setting alpha=0.1") alpha <- 0.1 } if (test == "ocsb" && alpha != 0.05) { warning("Significance levels other than 5% are not currently supported by test='ocsb', defaulting to alpha = 0.05.") alpha <- 0.05 } if (test %in% c("hegy", "ch")) { if (!requireNamespace("uroot", quietly = TRUE)) { stop(paste0("Using a ", test, ' test requires the uroot package. Please install it using `install.packages("uroot")`')) } } if (is.constant(x)) { return(D) } if (!missing(m)) { warning("argument m is deprecated; please set the frequency in the ts object.", call. = FALSE ) x <- ts(x, frequency = m) } if (frequency(x) == 1) { stop("Non seasonal data") } else if (frequency(x) < 1) { warning("I can't handle data with frequency less than 1. Seasonality will be ignored.") return(0) } if (frequency(x) >= length(x)) { return(0) } # Can't take differences runTests <- function(x, test, alpha) { tryCatch( { suppressWarnings( diff <- switch(test, seas = seas.heuristic(x, ...) > 0.64, # Threshold chosen based on seasonal M3 auto.arima accuracy. ocsb = with(ocsb.test(x, maxlag = 3, lag.method = "AIC", ...), statistics > critical), hegy = tail(uroot::hegy.test(x, deterministic = c(1, 1, 0), maxlag = 3, lag.method = "AIC", ...)$pvalues, 2)[-2] > alpha, ch = uroot::ch.test(x, type = "trig", ...)$pvalues["joint"] < alpha ) ) stopifnot(diff %in% c(0, 1)) diff }, error = function(e) { warning( call. = FALSE, sprintf( "The chosen seasonal unit root test encountered an error when testing for the %s difference. From %s(): %s %i seasonal differences will be used. Consider using a different unit root test.", switch(as.character(D), `0` = "first", `1` = "second", `2` = "third", paste0(D + 1, "th")), deparse(e$call[[1]]), e$message, D ) ) 0 } ) } dodiff <- runTests(x, test, alpha) if (dodiff && frequency(x) %% 1 != 0) { warning("The time series frequency has been rounded to support seasonal differencing.", call. = FALSE) x <- ts(x, frequency = round(frequency(x))) } while (dodiff && D < max.D) { D <- D + 1 x <- diff(x, lag = frequency(x)) if (is.constant(x)) { return(D) } if (length(x) >= 2 * frequency(x) & D < max.D) { dodiff <- runTests(x, test, alpha) } else { dodiff <- FALSE } } return(D) } # Adjusted from robjhyndman/tsfeatures seas.heuristic <- function(x) { if ("msts" %in% class(x)) { msts <- attributes(x)$msts nperiods <- length(msts) } else if ("ts" %in% class(x)) { msts <- frequency(x) nperiods <- msts > 1 season <- 0 } else { stop("The object provided must be a time-series object (`msts` or `ts`)") } season <- NA stlfit <- mstl(x) remainder <- stlfit[, "Remainder"] seasonal <- stlfit[, grep("Season", colnames(stlfit)), drop = FALSE] vare <- var(remainder, na.rm = TRUE) nseas <- NCOL(seasonal) if (nseas > 0) { season <- numeric(nseas) for (i in seq(nseas)) { season[i] <- max(0, min(1, 1-vare/var(remainder+seasonal[, i], na.rm = TRUE))) } } return(season) } # Model specification from Osborn DR, Chui APL, Smith J, and Birchenhall CR (1988) "Seasonality and the order of integration for consumption", Oxford Bulletin of Economics and Statistics 50(4):361-377. # # $\Delta\Delta_m X_t = \beta_1Z_{4,t-1} + \beta_2Z_{5,t-m} + \alpha_1\Delta\Delta_mX_{t-1} + \ldots + \alpha_p\Delta\Delta_mX_{t-p}$ # Where $Z_{4,t} = \hat{\lambda}(B)\Delta_mX_t$, $Z_{5,t} = \hat{\lambda}(B)\Delta X_t$, and $\hat{\lambda}(B)$ is an AR(p) lag operator with coefficients from an estimated AR(p) process of $\Delta\Delta_m X_t$. #' Osborn, Chui, Smith, and Birchenhall Test for Seasonal Unit Roots #' #' An implementation of the Osborn, Chui, Smith, and Birchenhall (OCSB) test. #' #' @inheritParams uroot::hegy.test #' @aliases print.OCSBtest #' @details #' The regression equation may include lags of the dependent variable. When lag.method = "fixed", the lag order is fixed to maxlag; otherwise, maxlag is the maximum number of lags considered in a lag selection procedure that minimises the lag.method criterion, which can be AIC or BIC or corrected AIC, AICc, obtained as AIC + (2k(k+1))/(n-k-1), where k is the number of parameters and n is the number of available observations in the model. #' #' Critical values for the test are based on simulations, which has been smoothed over to produce critical values for all seasonal periods. #' #' @return #' ocsb.test returns a list of class "OCSBtest" with the following components: #' * statistics the value of the test statistics. #' * pvalues the p-values for each test statistics. #' * method a character string describing the type of test. #' * data.name a character string giving the name of the data. #' * fitted.model the fitted regression model. #' #' @references #' Osborn DR, Chui APL, Smith J, and Birchenhall CR (1988) "Seasonality and the #' order of integration for consumption", \emph{Oxford Bulletin of Economics #' and Statistics} \bold{50}(4):361-377. #' #' @seealso \code{\link{nsdiffs}} #' #' @examples #' ocsb.test(AirPassengers) #' @importFrom stats AIC BIC #' #' @export ocsb.test <- function(x, lag.method = c("fixed", "AIC", "BIC", "AICc"), maxlag = 0) { lag.method <- match.arg(lag.method) sname <- deparse(substitute(x)) period <- round(frequency(x)) # Avoid non-integer seasonal period if (period == 1) { stop("Data must be seasonal to use `ocsb.test`. Check your ts frequency.") } genLags <- function(y, maxlag) { if (maxlag == 0) { return(ts(numeric(NROW(y)), start = start(y), frequency = frequency(y))) } out <- do.call(cbind, lapply(seq_len(maxlag), function(k) stats::lag(y, -k))) if (NCOL(out) > 1) { colnames(out) <- paste0("lag_", seq_len(maxlag)) } return(out) } fitOCSB <- function(x, lag, maxlag) { period <- round(frequency(x)) # Avoid non-integer seasonal period # Compute (1-B)(1-B^m)y_t y <- diff(diff(x, period)) ylag <- genLags(y, lag) if (maxlag > 0) { # Ensure models are fitted on same length for lag order selection via lag.method y <- tail(y, -maxlag) } mf <- na.omit(cbind(y = y, x = ylag)) # Estimate lambda(B) coefficients ar.fit <- lm(y ~ 0 + ., data = mf) # Compute lambda(B)(1-B^m)y_{t-1} Z4_frame <- na.omit(cbind(y = diff(x, period), x = genLags(diff(x, period), lag))) Z4 <- Z4_frame[, "y"] - suppressWarnings(predict(ar.fit, Z4_frame)) # Compute lambda(B)(1-B)y_{t-m} Z5_frame <- na.omit(cbind(y = diff(x), x = genLags(diff(x), lag))) Z5 <- Z5_frame[, "y"] - suppressWarnings(predict(ar.fit, Z5_frame)) # Combine regressors data <- na.omit(cbind(mf, Z4 = stats::lag(Z4, -1), Z5 = stats::lag(Z5, -period))) y <- data[, 1] xreg <- data[, -1] lm(y ~ 0 + xreg) } # Estimate maxlag if (maxlag > 0) { if (lag.method != "fixed") { tmp <- vector("list", maxlag + 1) fits <- lapply(seq_len(maxlag), function(lag) fitOCSB(x, lag, maxlag)) icvals <- unlist(switch(lag.method, AIC = lapply(fits, AIC), BIC = lapply(fits, BIC), AICc = lapply( fits, function(x) { k <- x$rank + 1 -2 * logLik(x) + 2 * k + (2 * k * (k + 1)) / (length(residuals(x)) - k - 1) } ) )) id <- which.min(icvals) maxlag <- id - 1 } } regression <- fitOCSB(x, maxlag, maxlag) # if(any(is.na(regression$coefficients))) # stop("Model did not reach a solution. Check the time series data.") stat <- summary(regression)$coefficients[c("xregZ4", "xregZ5"), "t value"] if (any(is.na(stat))) { stop("Model did not reach a solution. Consider using a longer series or a different test.") } structure(list( statistics = stat[2], critical = calcOCSBCritVal(period), method = "OCSB test", lag.method = lag.method, lag.order = maxlag, fitted.model = regression, data.name = sname ), class = "OCSBtest" ) } # Return critical values for OCSB test at 5% level # Approximation based on extensive simulations. calcOCSBCritVal <- function(seasonal.period) { log.m <- log(seasonal.period) return(-0.2937411 * exp(-0.2850853 * (log.m - 0.7656451) + (-0.05983644) * ((log.m - 0.7656451)^2)) - 1.652202) } #' @export print.OCSBtest <- function(x, ...) { cat("\n") cat(strwrap(x$method, prefix = "\t"), sep = "\n") cat("\n") cat("data: ", x$data.name, "\n\n", sep = "") cat(paste0("Test statistic: ", round(x$statistics, 4), ", 5% critical value: ", round(x$critical, 4))) cat("\n") cat("alternative hypothesis: stationary") cat("\n\n") cat(paste0("Lag order ", x$lag.order, " was selected using ", x$lag.method)) } forecast/R/fitBATS.R0000644000176200001440000005332114323125536013650 0ustar liggesusers# TODO: # # Author: srazbash ############################################################################### fitPreviousBATSModel <- function(y, model, biasadj=FALSE) { seasonal.periods <- model$seasonal.periods if (is.null(seasonal.periods) == FALSE) { seasonal.periods <- as.integer(sort(seasonal.periods)) } paramz <- unParameterise(model$parameters$vect, model$parameters$control) lambda <- paramz$lambda alpha <- paramz$alpha beta.v <- paramz$beta small.phi <- paramz$small.phi gamma.v <- paramz$gamma.v ar.coefs <- paramz$ar.coefs ma.coefs <- paramz$ma.coefs p <- length(ar.coefs) q <- length(ma.coefs) ## Calculate the variance: # 1. Re-set up the matrices w <- .Call("makeBATSWMatrix", smallPhi_s = small.phi, sPeriods_s = seasonal.periods, arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, PACKAGE = "forecast") g <- .Call("makeBATSGMatrix", as.numeric(alpha), beta.v, gamma.v, seasonal.periods, as.integer(p), as.integer(q), PACKAGE = "forecast") F <- makeFMatrix(alpha = alpha, beta = beta.v, small.phi <- small.phi, seasonal.periods = seasonal.periods, gamma.bold.matrix = g$gamma.bold.matrix, ar.coefs = ar.coefs, ma.coefs = ma.coefs) # 2. Calculate! y.touse <- y if (!is.null(lambda)) { y.touse <- BoxCox(y, lambda = lambda) lambda <- attr(y.touse, "lambda") } fitted.values.and.errors <- calcModel(y.touse, model$seed.states, F, g$g, w) e <- fitted.values.and.errors$e fitted.values <- fitted.values.and.errors$y.hat variance <- sum((e * e)) / length(y) if (!is.null(lambda)) { fitted.values <- InvBoxCox(fitted.values, lambda = lambda, biasadj, variance) } model.for.output <- model model.for.output$variance <- variance model.for.output$fitted.values <- c(fitted.values) model.for.output$errors <- c(e) model.for.output$x <- fitted.values.and.errors$x model.for.output$y <- y attributes(model.for.output$fitted.values) <- attributes(model.for.output$errors) <- attributes(y) return(model.for.output) } fitSpecificBATS <- function(y, use.box.cox, use.beta, use.damping, seasonal.periods=NULL, starting.params=NULL, x.nought=NULL, ar.coefs=NULL, ma.coefs=NULL, init.box.cox=NULL, bc.lower=0, bc.upper=1, biasadj=FALSE) { if (!is.null(seasonal.periods)) { seasonal.periods <- as.integer(sort(seasonal.periods)) } ## Meaning/purpose of the first if() statement: If this is the first pass, then use default starting values. Else if it is the second pass, then use the values form the first pass as starting values. if (is.null(starting.params)) { ## Check for the existence of ARMA() coefficients if (!is.null(ar.coefs)) { p <- length(ar.coefs) } else { p <- 0 } if (!is.null(ma.coefs)) { q <- length(ma.coefs) } else { q <- 0 } # Calculate starting values: if (sum(seasonal.periods) > 16) { alpha <- (1e-6) } else { alpha <- .09 } if (use.beta) { if (sum(seasonal.periods) > 16) { beta.v <- (5e-7) } else { beta.v <- .05 } b <- 0.00 if (use.damping) { small.phi <- .999 } else { small.phi <- 1 } } else { beta.v <- NULL b <- NULL small.phi <- NULL use.damping <- FALSE } if (!is.null(seasonal.periods)) { gamma.v <- rep(.001, length(seasonal.periods)) s.vector <- numeric(sum(seasonal.periods)) # for(s in seasonal.periods) { # s.vector <- cbind(s.vector, numeric(s)) # } } else { gamma.v <- NULL s.vector <- NULL } if (use.box.cox) { if (!is.null(init.box.cox)) { lambda <- init.box.cox } else { lambda <- BoxCox.lambda(y, lower = 0, upper = 1.5) } y.transformed <- BoxCox(y, lambda = lambda) lambda <- attr(y.transformed, "lambda") } else { # the "else" is not needed at the moment lambda <- NULL } } else { paramz <- unParameterise(starting.params$vect, starting.params$control) lambda <- paramz$lambda alpha <- paramz$alpha beta.v <- paramz$beta b <- 0 small.phi <- paramz$small.phi gamma.v <- paramz$gamma.v if (!is.null(seasonal.periods)) { s.vector <- numeric(sum(seasonal.periods)) } else { s.vector <- NULL } # ar.coefs <- paramz$ar.coefs # ma.coefs <- paramz$ma.coefs ## Check for the existence of ARMA() coefficients if (!is.null(ar.coefs)) { p <- length(ar.coefs) } else { p <- 0 } if (!is.null(ma.coefs)) { q <- length(ma.coefs) } else { q <- 0 } } if (is.null(x.nought)) { # Start with the seed states equal to zero if (!is.null(ar.coefs)) { d.vector <- numeric(length(ar.coefs)) } else { d.vector <- NULL } if (!is.null(ma.coefs)) { epsilon.vector <- numeric(length(ma.coefs)) } else { epsilon.vector <- NULL } x.nought <- makeXMatrix(l = 0, b = b, s.vector = s.vector, d.vector = d.vector, epsilon.vector = epsilon.vector)$x } ## Optimise the starting values: # Make the parameter vector parameterise param.vector <- parameterise(alpha = alpha, beta.v = beta.v, small.phi = small.phi, gamma.v = gamma.v, lambda = lambda, ar.coefs = ar.coefs, ma.coefs = ma.coefs) par.scale <- makeParscaleBATS(param.vector$control) # w <- makeWMatrix(small.phi=small.phi, seasonal.periods=seasonal.periods, ar.coefs=ar.coefs, ma.coefs=ma.coefs) w <- .Call("makeBATSWMatrix", smallPhi_s = small.phi, sPeriods_s = seasonal.periods, arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, PACKAGE = "forecast") # g <- makeGMatrix(alpha=alpha, beta=beta.v, gamma.vector=gamma, seasonal.periods=seasonal.periods, p=p, q=q) g <- .Call("makeBATSGMatrix", as.numeric(alpha), beta.v, gamma.v, seasonal.periods, as.integer(p), as.integer(q), PACKAGE = "forecast") F <- makeFMatrix(alpha = alpha, beta = beta.v, small.phi = small.phi, seasonal.periods = seasonal.periods, gamma.bold.matrix = g$gamma.bold.matrix, ar.coefs = ar.coefs, ma.coefs = ma.coefs) D <- F - g$g %*% w$w.transpose ## Set up matrices to find the seed states if (use.box.cox) { y.transformed <- BoxCox(y, lambda = lambda) lambda <- attr(y.transformed, "lambda") # x.nought <- BoxCox(x.nought, lambda=lambda) y.tilda <- calcModel(y.transformed, x.nought, F, g$g, w)$e } else { y.tilda <- calcModel(y, x.nought, F, g$g, w)$e } w.tilda.transpose <- matrix(0, nrow = length(y), ncol = ncol(w$w.transpose)) w.tilda.transpose[1, ] <- w$w.transpose # for(i in 2:length(y)) { # w.tilda.transpose[i,] <- w.tilda.transpose[(i-1),] %*% D # } w.tilda.transpose <- .Call( "calcWTilda", wTildaTransposes = w.tilda.transpose, Ds = D, PACKAGE = "forecast" ) ## If there is a seasonal component in the model, then the follow adjustment need to be made so that the seed states can be found if (!is.null(seasonal.periods)) { # drop the lines from w.tilda.transpose that correspond to the last seasonal value of each seasonal period list.cut.w <- cutW(use.beta = use.beta, w.tilda.transpose = w.tilda.transpose, seasonal.periods = seasonal.periods, p = p, q = q) w.tilda.transpose <- list.cut.w$matrix mask.vector <- list.cut.w$mask.vector ## Run the regression to find the SEED STATES coefs <- lm(t(y.tilda) ~ w.tilda.transpose - 1)$coefficients ## Find the ACTUAL SEASONAL seed states x.nought <- calcSeasonalSeeds(use.beta = use.beta, coefs = coefs, seasonal.periods = seasonal.periods, mask.vector = mask.vector, p = p, q = q) } else { # Remove the AR() and MA() bits if they exist if ((p != 0) | (q != 0)) { end.cut <- ncol(w.tilda.transpose) start.cut <- end.cut - (p + q) + 1 w.tilda.transpose <- w.tilda.transpose[, -c(start.cut:end.cut)] } x.nought <- lm(t(y.tilda) ~ w.tilda.transpose - 1)$coefficients x.nought <- matrix(x.nought, nrow = length(x.nought), ncol = 1) ## Replace the AR() and MA() bits if they exist if ((p != 0) | (q != 0)) { arma.seed.states <- numeric((p + q)) arma.seed.states <- matrix(arma.seed.states, nrow = length(arma.seed.states), ncol = 1) x.nought <- rbind(x.nought, arma.seed.states) } } #### # Set up environment opt.env <- new.env() assign("F", F, envir = opt.env) assign("w.transpose", w$w.transpose, envir = opt.env) assign("g", g$g, envir = opt.env) assign("gamma.bold.matrix", g$gamma.bold.matrix, envir = opt.env) assign("y", matrix(y, nrow = 1, ncol = length(y)), envir = opt.env) assign("y.hat", matrix(0, nrow = 1, ncol = length(y)), envir = opt.env) assign("e", matrix(0, nrow = 1, ncol = length(y)), envir = opt.env) assign("x", matrix(0, nrow = length(x.nought), ncol = length(y)), envir = opt.env) if (!is.null(seasonal.periods)) { tau <- sum(seasonal.periods) } else { tau <- 0 } ## Second pass of optimisation if (use.box.cox) { # Un-transform the seed states # x.nought.untransformed <- InvBoxCox(x.nought, lambda=lambda) assign("x.nought.untransformed", InvBoxCox(x.nought, lambda = lambda), envir = opt.env) # Optimise the likelihood function optim.like <- optim(par = param.vector$vect, fn = calcLikelihood, method = "Nelder-Mead", opt.env = opt.env, use.beta = use.beta, use.small.phi = use.damping, seasonal.periods = seasonal.periods, p = p, q = q, tau = tau, bc.lower = bc.lower, bc.upper = bc.upper, control = list(maxit = (100 * length(param.vector$vect) ^ 2), parscale = par.scale)) # Get the parameters out of the param.vector paramz <- unParameterise(optim.like$par, param.vector$control) lambda <- paramz$lambda alpha <- paramz$alpha beta.v <- paramz$beta small.phi <- paramz$small.phi gamma.v <- paramz$gamma.v ar.coefs <- paramz$ar.coefs ma.coefs <- paramz$ma.coefs # Transform the seed states x.nought <- BoxCox(opt.env$x.nought.untransformed, lambda = lambda) lambda <- attr(x.nought, "lambda") ## Calculate the variance: # 1. Re-set up the matrices # w <- makeWMatrix(small.phi=small.phi, seasonal.periods=seasonal.periods, ar.coefs=ar.coefs, ma.coefs=ma.coefs) w <- .Call("makeBATSWMatrix", smallPhi_s = small.phi, sPeriods_s = seasonal.periods, arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, PACKAGE = "forecast") # g <- makeGMatrix(alpha=alpha, beta=beta.v, gamma.vector=gamma, seasonal.periods=seasonal.periods, p=p, q=q) g <- .Call("makeBATSGMatrix", as.numeric(alpha), beta.v, gamma.v, seasonal.periods, as.integer(p), as.integer(q), PACKAGE = "forecast") F <- makeFMatrix(alpha = alpha, beta = beta.v, small.phi = small.phi, seasonal.periods = seasonal.periods, gamma.bold.matrix = g$gamma.bold.matrix, ar.coefs = ar.coefs, ma.coefs = ma.coefs) # 2. Calculate! y.transformed <- BoxCox(y, lambda = lambda) lambda <- attr(y.transformed, "lambda") fitted.values.and.errors <- calcModel(y.transformed, x.nought, F, g$g, w) e <- fitted.values.and.errors$e variance <- sum((e * e)) / length(y) fitted.values <- InvBoxCox(fitted.values.and.errors$y.hat, lambda = lambda, biasadj, variance) attr(lambda, "biasadj") <- biasadj # e <- InvBoxCox(e, lambda=lambda) # ee <- y-fitted.values } else { # else if we are not using the Box-Cox transformation # Optimise the likelihood function if (length(param.vector$vect) > 1) { optim.like <- optim(par = param.vector$vect, fn = calcLikelihoodNOTransformed, method = "Nelder-Mead", opt.env = opt.env, x.nought = x.nought, use.beta = use.beta, use.small.phi = use.damping, seasonal.periods = seasonal.periods, p = p, q = q, tau = tau, control = list(maxit = (100 * length(param.vector$vect) ^ 2), parscale = par.scale)) } else { optim.like <- optim(par = param.vector$vect, fn = calcLikelihoodNOTransformed, method = "BFGS", opt.env = opt.env, x.nought = x.nought, use.beta = use.beta, use.small.phi = use.damping, seasonal.periods = seasonal.periods, p = p, q = q, tau = tau, control = list(parscale = par.scale)) } # Get the parameters out of the param.vector paramz <- unParameterise(optim.like$par, param.vector$control) lambda <- paramz$lambda alpha <- paramz$alpha beta.v <- paramz$beta small.phi <- paramz$small.phi gamma.v <- paramz$gamma.v ar.coefs <- paramz$ar.coefs ma.coefs <- paramz$ma.coefs ## Calculate the variance: # 1. Re-set up the matrices # w <- makeWMatrix(small.phi=small.phi, seasonal.periods=seasonal.periods, ar.coefs=ar.coefs, ma.coefs=ma.coefs) w <- .Call("makeBATSWMatrix", smallPhi_s = small.phi, sPeriods_s = seasonal.periods, arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, PACKAGE = "forecast") # g <- makeGMatrix(alpha=alpha, beta=beta.v, gamma.vector=gamma, seasonal.periods=seasonal.periods, p=p, q=q) g <- .Call("makeBATSGMatrix", as.numeric(alpha), beta.v, gamma.v, seasonal.periods, as.integer(p), as.integer(q), PACKAGE = "forecast") F <- makeFMatrix(alpha = alpha, beta = beta.v, small.phi <- small.phi, seasonal.periods = seasonal.periods, gamma.bold.matrix = g$gamma.bold.matrix, ar.coefs = ar.coefs, ma.coefs = ma.coefs) # 2. Calculate! fitted.values.and.errors <- calcModel(y, x.nought, F, g$g, w) e <- fitted.values.and.errors$e fitted.values <- fitted.values.and.errors$y.hat variance <- sum((e * e)) / length(y) } # Get the likelihood likelihood <- optim.like$value # Calculate the AIC aic <- likelihood + 2 * (length(param.vector$vect) + nrow(x.nought)) # Make a list object model.for.output <- list(lambda = lambda, alpha = alpha, beta = beta.v, damping.parameter = small.phi, gamma.values = gamma.v, ar.coefficients = ar.coefs, ma.coefficients = ma.coefs, likelihood = likelihood, optim.return.code = optim.like$convergence, variance = variance, AIC = aic, parameters = list(vect = optim.like$par, control = param.vector$control), seed.states = x.nought, fitted.values = c(fitted.values), errors = c(e), x = fitted.values.and.errors$x, seasonal.periods = seasonal.periods, y = y) class(model.for.output) <- "bats" #### return(model.for.output) } calcModel <- function(y, x.nought, F, g, w) { # w is passed as a list length.ts <- length(y) x <- matrix(0, nrow = length(x.nought), ncol = length.ts) y.hat <- matrix(0, nrow = 1, ncol = length.ts) e <- matrix(0, nrow = 1, ncol = length.ts) y.hat[, 1] <- w$w.transpose %*% x.nought e[, 1] <- y[1] - y.hat[, 1] x[, 1] <- F %*% x.nought + g %*% e[, 1] y <- matrix(y, nrow = 1, ncol = length.ts) loop <- .Call("calcBATS", ys = y, yHats = y.hat, wTransposes = w$w.transpose, Fs = F, xs = x, gs = g, es = e, PACKAGE = "forecast") return(list(y.hat = loop$y.hat, e = loop$e, x = loop$x)) } calcLikelihood <- function(param.vector, opt.env, use.beta, use.small.phi, seasonal.periods, p=0, q=0, tau=0, bc.lower=0, bc.upper=1) { # param vector should be as follows: Box-Cox.parameter, alpha, beta, small.phi, gamma.vector, ar.coefs, ma.coefs # Put the components of the param.vector into meaningful individual variables box.cox.parameter <- param.vector[1] alpha <- param.vector[2] if (use.beta) { if (use.small.phi) { small.phi <- param.vector[3] beta.v <- param.vector[4] gamma.start <- 5 } else { small.phi <- 1 beta.v <- param.vector[3] gamma.start <- 4 } } else { small.phi <- NULL beta.v <- NULL gamma.start <- 3 } if (!is.null(seasonal.periods)) { gamma.vector <- param.vector[gamma.start:(gamma.start + length(seasonal.periods) - 1)] final.gamma.pos <- gamma.start + length(gamma.vector) - 1 } else { gamma.vector <- NULL final.gamma.pos <- gamma.start - 1 } if (p != 0) { ar.coefs <- matrix(param.vector[(final.gamma.pos + 1):(final.gamma.pos + p)], nrow = 1, ncol = p) } else { ar.coefs <- NULL } if (q != 0) { ma.coefs <- matrix(param.vector[(final.gamma.pos + p + 1):length(param.vector)], nrow = 1, ncol = q) } else { ma.coefs <- NULL } x.nought <- BoxCox(opt.env$x.nought.untransformed, lambda = box.cox.parameter) lambda <- attr(x.nought, "lambda") # w <- makeWMatrix(small.phi=small.phi, seasonal.periods=seasonal.periods, ar.coefs=ar.coefs, ma.coefs=ma.coefs) # w <- .Call("makeBATSWMatrix", smallPhi_s = small.phi, sPeriods_s = seasonal.periods, arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, PACKAGE = "forecast") .Call("updateWtransposeMatrix", wTranspose_s = opt.env$w.transpose, smallPhi_s = small.phi, tau_s = as.integer(tau), arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, p_s = as.integer(p), q_s = as.integer(q), PACKAGE = "forecast") # g <- makeGMatrix(alpha=alpha, beta=beta, gamma.vector=gamma.vector, seasonal.periods=seasonal.periods, p=p, q=q) # g <- .Call("makeBATSGMatrix", as.numeric(alpha), beta.v, gamma.vector, seasonal.periods, as.integer(p), as.integer(q), PACKAGE="forecast") .Call("updateGMatrix", g_s = opt.env$g, gammaBold_s = opt.env$gamma.bold.matrix, alpha_s = alpha, beta_s = beta.v, gammaVector_s = gamma.vector, seasonalPeriods_s = seasonal.periods, PACKAGE = "forecast") # F <- makeFMatrix(alpha=alpha, beta=beta.v, small.phi=small.phi, seasonal.periods=seasonal.periods, gamma.bold.matrix=g$gamma.bold.matrix, ar.coefs=ar.coefs, ma.coefs=ma.coefs) .Call("updateFMatrix", opt.env$F, small.phi, alpha, beta.v, opt.env$gamma.bold.matrix, ar.coefs, ma.coefs, tau, PACKAGE = "forecast") mat.transformed.y <- BoxCox(opt.env$y, box.cox.parameter) lambda <- attr(mat.transformed.y, "lambda") n <- ncol(opt.env$y) .Call("calcBATSFaster", ys = mat.transformed.y, yHats = opt.env$y.hat, wTransposes = opt.env$w.transpose, Fs = opt.env$F, xs = opt.env$x, gs = opt.env$g, es = opt.env$e, xNought_s = x.nought, sPeriods_s = seasonal.periods, betaV = beta.v, tau_s = as.integer(tau), p_s = as.integer(p), q_s = as.integer(q), PACKAGE = "forecast") log.likelihood <- n * log(sum(opt.env$e ^ 2)) - 2 * (box.cox.parameter - 1) * sum(log(opt.env$y)) assign("D", (opt.env$F - opt.env$g %*% opt.env$w.transpose), envir = opt.env) if (checkAdmissibility(opt.env, box.cox = box.cox.parameter, small.phi = small.phi, ar.coefs = ar.coefs, ma.coefs = ma.coefs, tau = tau, bc.lower = bc.lower, bc.upper = bc.upper)) { return(log.likelihood) } else { return(10 ^ 20) } } calcLikelihoodNOTransformed <- function(param.vector, opt.env, x.nought, use.beta, use.small.phi, seasonal.periods, p=0, q=0, tau=0) { # The likelihood function without the Box-Cox Transformation # param vector should be as follows: alpha, beta, small.phi, gamma.vector, ar.coefs, ma.coefs # Put the components of the param.vector into meaningful individual variables alpha <- param.vector[1] if (use.beta) { if (use.small.phi) { small.phi <- param.vector[2] beta.v <- param.vector[3] gamma.start <- 4 } else { small.phi <- 1 beta.v <- param.vector[2] gamma.start <- 3 } } else { small.phi <- NULL beta.v <- NULL gamma.start <- 2 } if (!is.null(seasonal.periods)) { gamma.vector <- param.vector[gamma.start:(gamma.start + length(seasonal.periods) - 1)] final.gamma.pos <- gamma.start + length(gamma.vector) - 1 } else { gamma.vector <- NULL final.gamma.pos <- gamma.start - 1 } if (p != 0) { ar.coefs <- matrix(param.vector[(final.gamma.pos + 1):(final.gamma.pos + p)], nrow = 1, ncol = p) } else { ar.coefs <- NULL } if (q != 0) { ma.coefs <- matrix(param.vector[(final.gamma.pos + p + 1):length(param.vector)], nrow = 1, ncol = q) } else { ma.coefs <- NULL } # w <- makeWMatrix(small.phi=small.phi, seasonal.periods=seasonal.periods, ar.coefs=ar.coefs, ma.coefs=ma.coefs) # w <- .Call("makeBATSWMatrix", smallPhi_s = small.phi, sPeriods_s = seasonal.periods, arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, PACKAGE = "forecast") .Call("updateWtransposeMatrix", wTranspose_s = opt.env$w.transpose, smallPhi_s = small.phi, tau_s = as.integer(tau), arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, p_s = as.integer(p), q_s = as.integer(q), PACKAGE = "forecast") # g <- makeGMatrix(alpha=alpha, beta=beta, gamma.vector=gamma.vector, seasonal.periods=seasonal.periods, p=p, q=q) # g <- .Call("makeBATSGMatrix", alpha, beta.v, gamma.vector, seasonal.periods, as.integer(p), as.integer(q), PACKAGE="forecast") .Call("updateGMatrix", g_s = opt.env$g, gammaBold_s = opt.env$gamma.bold.matrix, alpha_s = alpha, beta_s = beta.v, gammaVector_s = gamma.vector, seasonalPeriods_s = seasonal.periods, PACKAGE = "forecast") # F <- makeFMatrix(alpha=alpha, beta=beta.v, small.phi=small.phi, seasonal.periods=seasonal.periods, gamma.bold.matrix=g$gamma.bold.matrix, ar.coefs=ar.coefs, ma.coefs=ma.coefs) .Call("updateFMatrix", opt.env$F, small.phi, alpha, beta.v, opt.env$gamma.bold.matrix, ar.coefs, ma.coefs, tau, PACKAGE = "forecast") n <- ncol(opt.env$y) ######################################################################################### # e <- calcModel(y=y, x.nought=x.nought, F=F, g=g$g, w=w)$e ###################### #### calcModel() code: ## # x <- matrix(0, nrow=length(x.nought), ncol=n) # y.hat <- matrix(0,nrow=1, ncol=n) # e <- matrix(0, nrow=1, ncol=n) # opt.env$y.hat[,1] <- w$w.transpose %*% x.nought # opt.env$e[,1] <- opt.env$y[,1]-opt.env$y.hat[,1] # opt.env$x[,1] <- opt.env$F %*% x.nought + g$g %*% opt.env$e[,1] # mat.y <- matrix(opt.env$y, nrow=1, ncol=n) .Call("calcBATSFaster", ys = opt.env$y, yHats = opt.env$y.hat, wTransposes = opt.env$w.transpose, Fs = opt.env$F, xs = opt.env$x, gs = opt.env$g, es = opt.env$e, xNought_s = x.nought, sPeriods_s = seasonal.periods, betaV = beta.v, tau_s = as.integer(tau), p_s = as.integer(p), q_s = as.integer(q), PACKAGE = "forecast") ## #### #################################################################### log.likelihood <- n * log(sum(opt.env$e * opt.env$e)) # D <- opt.env$F - g$g %*% w$w.transpose assign("D", (opt.env$F - opt.env$g %*% opt.env$w.transpose), envir = opt.env) if (checkAdmissibility(opt.env = opt.env, box.cox = NULL, small.phi = small.phi, ar.coefs = ar.coefs, ma.coefs = ma.coefs, tau = tau)) { return(log.likelihood) } else { return(10 ^ 20) } } forecast/R/simulate_tbats.R0000644000176200001440000000302514207263356015434 0ustar liggesusers#' @rdname simulate.ets #' @export simulate.tbats <- function(object, nsim=length(object$y), seed = NULL, future=TRUE, bootstrap=FALSE, innov = NULL, ...) { if (is.null(innov)) { if (!exists(".Random.seed", envir = .GlobalEnv)) { runif(1) } if (is.null(seed)) { RNGstate <- .Random.seed } else { R.seed <- .Random.seed set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } } else { nsim <- length(innov) } if (bootstrap) { res <- residuals(object) res <- na.omit(res - mean(res, na.rm = TRUE)) e <- sample(res, nsim, replace = TRUE) } else if (is.null(innov)) { e <- rnorm(nsim, 0, sqrt(object$variance)) } else { e <- innov } x <- getResponse(object) y <- numeric(nsim) if(future) { dataplusy <- x } else { # Start somewhere in the original series dataplusy <- ts(sample(x, 1), start=-1/frequency(x), frequency = frequency(x)) } fitplus <- object for(i in seq_along(y)) { fc <- forecast(fitplus, h=1, biasadj=FALSE)$mean if(is.null(object$lambda)) { y[i] <- fc + e[i] } else { y[i] <- InvBoxCox(BoxCox(fc, object$lambda) + e[i], object$lambda) } dataplusy <- ts(c(dataplusy, y[i]), start=start(dataplusy), frequency=frequency(dataplusy)) fitplus <- tbats(dataplusy, model=fitplus) } return(tail(dataplusy, nsim)) } forecast/R/tscv.R0000644000176200001440000002133114456202551013367 0ustar liggesusers# Time series cross-validation # y is a time series # forecastfunction must return an object of class forecast # h is number of steps ahead to forecast # ... are passed to forecastfunction #' Time series cross-validation #' #' \code{tsCV} computes the forecast errors obtained by applying #' \code{forecastfunction} to subsets of the time series \code{y} using a #' rolling forecast origin. #' #' Let \code{y} contain the time series \eqn{y_1,\dots,y_T}{y[1:T]}. Then #' \code{forecastfunction} is applied successively to the time series #' \eqn{y_1,\dots,y_t}{y[1:t]}, for \eqn{t=1,\dots,T-h}, making predictions #' \eqn{\hat{y}_{t+h|t}}{f[t+h]}. The errors are given by \eqn{e_{t+h} = #' y_{t+h}-\hat{y}_{t+h|t}}{e[t+h] = y[t+h]-f[t+h]}. If h=1, these are returned as a #' vector, \eqn{e_1,\dots,e_T}{e[1:T]}. For h>1, they are returned as a matrix with #' the hth column containing errors for forecast horizon h. #' The first few errors may be missing as #' it may not be possible to apply \code{forecastfunction} to very short time #' series. #' #' @param y Univariate time series #' @param forecastfunction Function to return an object of class #' \code{forecast}. Its first argument must be a univariate time series, and it #' must have an argument \code{h} for the forecast horizon. If exogenous predictors are used, #' then it must also have \code{xreg} and \code{newxreg} arguments corresponding to the #' training and test periods. #' @param h Forecast horizon #' @param window Length of the rolling window, if NULL, a rolling window will not be used. #' @param xreg Exogeneous predictor variables passed to the forecast function if required. #' @param initial Initial period of the time series where no cross-validation is performed. #' @param ... Other arguments are passed to \code{forecastfunction}. #' @return Numerical time series object containing the forecast errors as a vector (if h=1) #' and a matrix otherwise. The time index corresponds to the last period of the training #' data. The columns correspond to the forecast horizons. #' @author Rob J Hyndman #' @seealso \link{CV}, \link{CVar}, \link{residuals.Arima}, \url{https://robjhyndman.com/hyndsight/tscv/}. #' #' @keywords ts #' @examples #' #' #Fit an AR(2) model to each rolling origin subset #' far2 <- function(x, h){forecast(Arima(x, order=c(2,0,0)), h=h)} #' e <- tsCV(lynx, far2, h=1) #' #' #Fit the same model with a rolling window of length 30 #' e <- tsCV(lynx, far2, h=1, window=30) #' #' #Example with exogenous predictors #' far2_xreg <- function(x, h, xreg, newxreg) { #' forecast(Arima(x, order=c(2,0,0), xreg=xreg), xreg=newxreg) #' } #' #' y <- ts(rnorm(50)) #' xreg <- matrix(rnorm(100),ncol=2) #' e <- tsCV(y, far2_xreg, h=3, xreg=xreg) #' #' @export tsCV <- function(y, forecastfunction, h=1, window=NULL, xreg=NULL, initial=0, ...) { y <- as.ts(y) n <- length(y) e <- ts(matrix(NA_real_, nrow = n, ncol = h)) if(initial >= n) stop("initial period too long") tsp(e) <- tsp(y) if (!is.null(xreg)) { # Make xreg a ts object to allow easy subsetting later xreg <- ts(as.matrix(xreg)) if(NROW(xreg) != length(y)) stop("xreg must be of the same size as y") # Pad xreg with NAs xreg <- ts(rbind(xreg, matrix(NA, nrow=h, ncol=NCOL(xreg))), start = start(y), frequency = frequency(y)) } if (is.null(window)) indx <- seq(1+initial, n - 1L) else indx <- seq(window+initial, n - 1L, by = 1L) for (i in indx) { y_subset <- subset( y, start = ifelse(is.null(window), 1L, ifelse(i - window >= 0L, i - window + 1L, stop("small window"))), end = i) if (is.null(xreg)) { fc <- try(suppressWarnings( forecastfunction(y_subset, h = h, ...) ), silent = TRUE) } else { xreg_subset <- subset( xreg, start = ifelse(is.null(window), 1L, ifelse(i - window >= 0L, i - window + 1L, stop("small window"))), end = i) xreg_future <- subset( xreg, start = i+1, end = i+h) fc <- try(suppressWarnings( forecastfunction(y_subset, h = h, xreg = xreg_subset, newxreg=xreg_future, ...) ), silent = TRUE) } if (!is.element("try-error", class(fc))) { e[i, ] <- y[i + seq(h)] - fc$mean[seq(h)] } } if (h == 1) { return(e[, 1L]) } else { colnames(e) <- paste("h=", 1:h, sep = "") return(e) } } # Cross-validation for AR models # By Gabriel Caceres ## Note arguments to pass must be named #' k-fold Cross-Validation applied to an autoregressive model #' #' \code{CVar} computes the errors obtained by applying an autoregressive #' modelling function to subsets of the time series \code{y} using k-fold #' cross-validation as described in Bergmeir, Hyndman and Koo (2015). It also #' applies a Ljung-Box test to the residuals. If this test is significant #' (see returned pvalue), there is serial correlation in the residuals and the #' model can be considered to be underfitting the data. In this case, the #' cross-validated errors can underestimate the generalization error and should #' not be used. #' #' @aliases print.CVar #' #' @param y Univariate time series #' @param k Number of folds to use for cross-validation. #' @param FUN Function to fit an autoregressive model. Currently, it only works #' with the \code{\link{nnetar}} function. #' @param cvtrace Provide progress information. #' @param blocked choose folds randomly or as blocks? #' @param LBlags lags for the Ljung-Box test, defaults to 24, for yearly series can be set to 20 #' @param ... Other arguments are passed to \code{FUN}. #' @return A list containing information about the model and accuracy for each #' fold, plus other summary information computed across folds. #' @author Gabriel Caceres and Rob J Hyndman #' @seealso \link{CV}, \link{tsCV}. #' @references Bergmeir, C., Hyndman, R.J., Koo, B. (2018) A note on the #' validity of cross-validation for evaluating time series prediction. #' \emph{Computational Statistics & Data Analysis}, \bold{120}, 70-83. #' \url{https://robjhyndman.com/publications/cv-time-series/}. #' @keywords ts #' @examples #' #' modelcv <- CVar(lynx, k=5, lambda=0.15) #' print(modelcv) #' print(modelcv$fold1) #' #' library(ggplot2) #' autoplot(lynx, series="Data") + #' autolayer(modelcv$testfit, series="Fits") + #' autolayer(modelcv$residuals, series="Residuals") #' ggAcf(modelcv$residuals) #' #' @export CVar <- function(y, k=10, FUN=nnetar, cvtrace=FALSE, blocked=FALSE, LBlags=24, ...) { nx <- length(y) # n-folds at most equal number of points k <- min(as.integer(k), nx) if (k <= 1L) { stop("k must be at least 2") } # Set up folds ind <- seq_len(nx) fold <- if (blocked) { sort(rep(1:k, length.out = nx)) } else { sample(rep(1:k, length.out = nx)) } cvacc <- matrix(NA_real_, nrow = k, ncol = 7) out <- list() alltestfit <- rep(NA, length.out = nx) for (i in 1:k) { out[[paste0("fold", i)]] <- list() testset <- ind[fold == i] trainset <- ind[fold != i] trainmodel <- FUN(y, subset = trainset, ...) testmodel <- FUN(y, model = trainmodel, xreg = trainmodel$xreg) testfit <- fitted(testmodel) acc <- accuracy(y, testfit, test = testset) cvacc[i, ] <- acc out[[paste0("fold", i)]]$model <- trainmodel out[[paste0("fold", i)]]$accuracy <- acc out[[paste0("fold", i)]]$testfit <- testfit out[[paste0("fold", i)]]$testset <- testset alltestfit[testset] <- testfit[testset] if (isTRUE(cvtrace)) { cat("Fold", i, "\n") print(acc) cat("\n") } } out$testfit <- ts(alltestfit) tsp(out$testfit) <- tsp(y) out$residuals <- out$testfit - y out$LBpvalue <- Box.test(out$residuals, type = "Ljung", lag = LBlags)$p.value out$k <- k # calculate mean accuracy accross all folds CVmean <- matrix(apply(cvacc, 2, FUN = mean, na.rm = TRUE), dimnames = list(colnames(acc), "Mean")) # calculate accuracy sd accross all folds --- include? CVsd <- matrix(apply(cvacc, 2, FUN = sd, na.rm = TRUE), dimnames = list(colnames(acc), "SD")) out$CVsummary <- cbind(CVmean, CVsd) out$series <- deparse(substitute(y)) out$call <- match.call() return(structure(out, class = c("CVar", class(trainmodel)))) } #' @export print.CVar <- function(x, ...) { cat("Series:", x$series, "\n") cat("Call: ") print(x$call) # Add info about series, function, and parameters # Add note about any NA/NaN in folds? # # Print number of folds cat("\n", x$k, "-fold cross-validation\n", sep = "") # Print mean & sd accuracy() results print(x$CVsummary) cat("\n") cat("p-value of Ljung-Box test of residuals is ", x$LBpvalue, "\n") cat("if this value is significant (<0.05),\n") cat("the result of the cross-validation should not be used\n") cat("as the model is underfitting the data.\n") invisible(x) } forecast/R/season.R0000644000176200001440000002531314150370574013706 0ustar liggesusers### Functions to handle seasonality #' Number of days in each season #' #' Returns number of days in each month or quarter of the observed time period. #' #' Useful for month length adjustments #' #' @param x time series #' @return Time series #' @author Rob J Hyndman #' @seealso \code{\link[forecast]{bizdays}} #' @keywords ts #' @examples #' #' par(mfrow=c(2,1)) #' plot(ldeaths,xlab="Year",ylab="pounds", #' main="Monthly deaths from lung disease (UK)") #' ldeaths.adj <- ldeaths/monthdays(ldeaths)*365.25/12 #' plot(ldeaths.adj,xlab="Year",ylab="pounds", #' main="Adjusted monthly deaths from lung disease (UK)") #' #' @export monthdays <- function(x) { if (!is.ts(x)) { stop("Not a time series") } f <- frequency(x) if (f == 12) { days <- c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) } else if (f == 4) { days <- c(90, 91, 92, 92) } else { stop("Not monthly or quarterly data") } nyears <- round(length(x) / f + 1) + 1 years <- (1:nyears) + (start(x)[1] - 1) leap.years <- ((years %% 4 == 0) & !(years %% 100 == 0 & years %% 400 != 0))[1:nyears] dummy <- t(matrix(rep(days, nyears), nrow = f)) if (f == 12) { dummy[leap.years, 2] <- 29 } else { dummy[leap.years, 1] <- 91 } xx <- c(t(dummy))[start(x)[2] - 1 + (1:length(x))] return(ts(xx, start = start(x), frequency = f)) } #' Forecast seasonal index #' #' Returns vector containing the seasonal index for \code{h} future periods. If #' the seasonal index is non-periodic, it uses the last values of the index. #' #' #' @param object Output from \code{\link[stats]{decompose}} or #' \link[stats]{stl}. #' @param h Number of periods ahead to forecast #' @return Time series #' @author Rob J Hyndman #' @keywords ts #' @examples #' uk.stl <- stl(UKDriverDeaths,"periodic") #' uk.sa <- seasadj(uk.stl) #' uk.fcast <- holt(uk.sa,36) #' seasf <- sindexf(uk.stl,36) #' uk.fcast$mean <- uk.fcast$mean + seasf #' uk.fcast$lower <- uk.fcast$lower + cbind(seasf,seasf) #' uk.fcast$upper <- uk.fcast$upper + cbind(seasf,seasf) #' uk.fcast$x <- UKDriverDeaths #' plot(uk.fcast,main="Forecasts from Holt's method with seasonal adjustment") #' #' @export sindexf <- function(object, h) { if ("stl" %in% class(object)) { ss <- object$time.series[, 1] m <- frequency(ss) ss <- ss[length(ss) - (m:1) + 1] tsp.x <- tsp(object$time.series) } else if ("decomposed.ts" %in% class(object)) { ss <- object$figure m <- frequency(object$seasonal) n <- length(object$trend) ss <- rep(ss, n / m + 1)[1:n] ss <- ss[n - (m:1) + 1] tsp.x <- tsp(object$seasonal) } else { stop("Object of unknown class") } out <- ts(rep(ss, h / m + 1)[1:h], frequency = m, start = tsp.x[2] + 1 / m) return(out) } #' Seasonal dummy variables #' #' \code{seasonaldummy} returns a matrix of dummy variables suitable for use in #' \code{\link{Arima}}, \code{\link{auto.arima}} or \code{\link{tslm}}. The #' last season is omitted and used as the control. #' #' \code{seasonaldummyf} is deprecated, instead use the \code{h} argument in #' \code{seasonaldummy}. #' #' The number of dummy variables is determined from the time series #' characteristics of \code{x}. When \code{h} is missing, the length of #' \code{x} also determines the number of rows for the matrix returned by #' \code{seasonaldummy}. the value of \code{h} determines the number of rows #' for the matrix returned by \code{seasonaldummy}, typically used for #' forecasting. The values within \code{x} are not used. #' #' @param x Seasonal time series: a \code{ts} or a \code{msts} object #' @param h Number of periods ahead to forecast (optional) #' @return Numerical matrix. #' @author Rob J Hyndman #' @seealso \code{\link{fourier}} #' @keywords ts #' @examples #' #' plot(ldeaths) #' #' # Using seasonal dummy variables #' month <- seasonaldummy(ldeaths) #' deaths.lm <- tslm(ldeaths ~ month) #' tsdisplay(residuals(deaths.lm)) #' ldeaths.fcast <- forecast(deaths.lm, #' data.frame(month=I(seasonaldummy(ldeaths,36)))) #' plot(ldeaths.fcast) #' #' # A simpler approach to seasonal dummy variables #' deaths.lm <- tslm(ldeaths ~ season) #' ldeaths.fcast <- forecast(deaths.lm, h=36) #' plot(ldeaths.fcast) #' #' @export seasonaldummy <- function(x, h=NULL) { if (!is.ts(x)) { stop("Not a time series") } else { fr.x <- frequency(x) } if (is.null(h)) { if (fr.x == 1) { stop("Non-seasonal time series") } dummy <- as.factor(cycle(x)) dummy.mat <- matrix(0, ncol = frequency(x) - 1, nrow = length(x)) nrow <- 1:length(x) for (i in 1:(frequency(x) - 1)) dummy.mat[dummy == paste(i), i] <- 1 colnames(dummy.mat) <- if (fr.x == 12) { month.abb[1:11] } else if (fr.x == 4) { c("Q1", "Q2", "Q3") } else { paste("S", 1:(fr.x - 1), sep = "") } return(dummy.mat) } else { return(seasonaldummy(ts(rep(0, h), start = tsp(x)[2] + 1 / fr.x, frequency = fr.x))) } } #' @rdname seasonaldummy #' @export seasonaldummyf <- function(x, h) { warning("seasonaldummyf() is deprecated, please use seasonaldummy()") if (!is.ts(x)) { stop("Not a time series") } f <- frequency(x) return(seasonaldummy(ts(rep(0, h), start = tsp(x)[2] + 1 / f, frequency = f))) } #' Fourier terms for modelling seasonality #' #' \code{fourier} returns a matrix containing terms from a Fourier series, up #' to order \code{K}, suitable for use in \code{\link{Arima}}, #' \code{\link{auto.arima}}, or \code{\link{tslm}}. #' #' \code{fourierf} is deprecated, instead use the \code{h} argument in #' \code{fourier}. #' #' The period of the Fourier terms is determined from the time series #' characteristics of \code{x}. When \code{h} is missing, the length of #' \code{x} also determines the number of rows for the matrix returned by #' \code{fourier}. Otherwise, the value of \code{h} determines the number of #' rows for the matrix returned by \code{fourier}, typically used for #' forecasting. The values within \code{x} are not used. #' #' Typical use would omit \code{h} when generating Fourier terms for training a model #' and include \code{h} when generating Fourier terms for forecasting. #' #' When \code{x} is a \code{ts} object, the value of \code{K} should be an #' integer and specifies the number of sine and cosine terms to return. Thus, #' the matrix returned has \code{2*K} columns. #' #' When \code{x} is a \code{msts} object, then \code{K} should be a vector of #' integers specifying the number of sine and cosine terms for each of the #' seasonal periods. Then the matrix returned will have \code{2*sum(K)} #' columns. #' #' @param x Seasonal time series: a \code{ts} or a \code{msts} object #' @param K Maximum order(s) of Fourier terms #' @param h Number of periods ahead to forecast (optional) #' @return Numerical matrix. #' @author Rob J Hyndman #' @seealso \code{\link{seasonaldummy}} #' @keywords ts #' @examples #' #' library(ggplot2) #' #' # Using Fourier series for a "ts" object #' # K is chosen to minimize the AICc #' deaths.model <- auto.arima(USAccDeaths, xreg=fourier(USAccDeaths,K=5), seasonal=FALSE) #' deaths.fcast <- forecast(deaths.model, xreg=fourier(USAccDeaths, K=5, h=36)) #' autoplot(deaths.fcast) + xlab("Year") #' #' # Using Fourier series for a "msts" object #' taylor.lm <- tslm(taylor ~ fourier(taylor, K = c(3, 3))) #' taylor.fcast <- forecast(taylor.lm, #' data.frame(fourier(taylor, K = c(3, 3), h = 270))) #' autoplot(taylor.fcast) #' #' @export fourier <- function(x, K, h=NULL) { if (is.null(h)) { return(...fourier(x, K, 1:NROW(x))) } else { return(...fourier(x, K, NROW(x) + (1:h))) } } #' @rdname fourier #' @export fourierf <- function(x, K, h) { warning("fourierf() is deprecated, please use fourier()") return(...fourier(x, K, length(x) + (1:h))) } # Function to do the work. ...fourier <- function(x, K, times) { if (any(class(x) == "msts")) { period <- attr(x, "msts") } else { period <- frequency(x) } # Patch for older versions of R that do not have sinpi and cospi functions. if (!exists("sinpi")) { sinpi <- function(x) { sin(pi * x) } cospi <- function(x) { cos(pi * x) } } if (length(period) != length(K)) { stop("Number of periods does not match number of orders") } if (any(2 * K > period)) { stop("K must be not be greater than period/2") } # Compute periods of all Fourier terms p <- numeric(0) labels <- character(0) for (j in seq_along(period)) { if (K[j] > 0) { p <- c(p, (1:K[j]) / period[j]) labels <- c(labels, paste( paste0(c("S", "C"), rep(1:K[j], rep(2, K[j]))), round(period[j]), sep = "-" )) } } # Remove equivalent seasonal periods due to multiple seasonality k <- duplicated(p) p <- p[!k] labels <- labels[!rep(k, rep(2, length(k)))] # Remove columns where sinpi=0 k <- abs(2 * p - round(2 * p)) > .Machine$double.eps # Compute matrix of Fourier terms X <- matrix(NA_real_, nrow = length(times), ncol = 2L * length(p)) for (j in seq_along(p)) { if (k[j]) { X[, 2L * j - 1L] <- sinpi(2 * p[j] * times) } X[, 2L * j] <- cospi(2 * p[j] * times) } colnames(X) <- labels # Remove missing columns X <- X[, !is.na(colSums(X)), drop = FALSE] return(X) } #' Moving-average smoothing #' #' \code{ma} computes a simple moving average smoother of a given time series. #' #' The moving average smoother averages the nearest \code{order} periods of #' each observation. As neighbouring observations of a time series are likely #' to be similar in value, averaging eliminates some of the randomness in the #' data, leaving a smooth trend-cycle component. \deqn{\hat{T}_{t} = #' \frac{1}{m} \sum_{j=-k}^k #' y_{t+j}}{T[t]=1/m(y[t-k]+y[t-k+1]+\ldots+y[t]+\ldots+y[t+k-1]+y[t+k])} where #' \eqn{k=\frac{m-1}{2}}{k=(m-1)/2} #' #' When an even \code{order} is specified, the observations averaged will #' include one more observation from the future than the past (k is rounded #' up). If centre is TRUE, the value from two moving averages (where k is #' rounded up and down respectively) are averaged, centering the moving #' average. #' #' @param x Univariate time series #' @param order Order of moving average smoother #' @param centre If TRUE, then the moving average is centred for even orders. #' @return Numerical time series object containing the simple moving average #' smoothed values. #' @author Rob J Hyndman #' @seealso \code{\link[stats]{decompose}} #' @keywords ts #' @examples #' #' plot(wineind) #' sm <- ma(wineind,order=12) #' lines(sm,col="red") #' #' @export ma <- function(x, order, centre=TRUE) { if (abs(order - round(order)) > 1e-8) { stop("order must be an integer") } if (order %% 2 == 0 && centre) { # centred and even w <- c(0.5, rep(1, order - 1), 0.5) / order } else { # odd or not centred w <- rep(1, order) / order } return(filter(x, w)) } forecast/R/makeMatrices.R0000644000176200001440000002253014323125536015017 0ustar liggesusers# These functions make the w, F, x and g matrices # # # Author: srazbash ############################################################################### makeTBATSFMatrix <- function(alpha, beta=NULL, small.phi=NULL, seasonal.periods=NULL, k.vector=NULL, gamma.bold.matrix=NULL, ar.coefs=NULL, ma.coefs=NULL) { # 1. Alpha Row F <- matrix(1, nrow = 1, ncol = 1) if (!is.null(beta)) { F <- cbind(F, matrix(small.phi, nrow = 1, ncol = 1)) } if (!is.null(seasonal.periods)) { tau <- sum(k.vector) * 2 zero.tau <- matrix(0, nrow = 1, ncol = tau) F <- cbind(F, zero.tau) } if (!is.null(ar.coefs)) { p <- length(ar.coefs) ar.coefs <- matrix(ar.coefs, nrow = 1, ncol = p) alpha.phi <- alpha * ar.coefs F <- cbind(F, alpha.phi) } if (!is.null(ma.coefs)) { q <- length(ma.coefs) ma.coefs <- matrix(ma.coefs, nrow = 1, ncol = q) alpha.theta <- alpha * ma.coefs F <- cbind(F, alpha.theta) } # 2. Beta Row if (!is.null(beta)) { beta.row <- matrix(c(0, small.phi), nrow = 1, ncol = 2) if (!is.null(seasonal.periods)) { beta.row <- cbind(beta.row, zero.tau) } if (!is.null(ar.coefs)) { beta.phi <- beta * ar.coefs beta.row <- cbind(beta.row, beta.phi) } if (!is.null(ma.coefs)) { beta.theta <- beta * ma.coefs beta.row <- cbind(beta.row, beta.theta) } F <- rbind(F, beta.row) } # 3. Seasonal Row if (!is.null(seasonal.periods)) { seasonal.row <- t(zero.tau) if (!is.null(beta)) { seasonal.row <- cbind(seasonal.row, seasonal.row) } # Make the A matrix A <- matrix(0, tau, tau) last.pos <- 0 for (i in 1:length(k.vector)) { if (seasonal.periods[i] != 2) { C <- .Call("makeCIMatrix", k_s = as.integer(k.vector[i]), m_s = as.double(seasonal.periods[i]), PACKAGE = "forecast") } else { C <- matrix(0, 1, 1) } S <- .Call("makeSIMatrix", k_s = as.integer(k.vector[i]), m_s = as.double(seasonal.periods[i]), PACKAGE = "forecast") # C <- matrix(0,k.vector[i],k.vector[i]) # for(j in 1:k.vector[i]) { # l <- round((2*pi*j/seasonal.periods[i]), digits=15) # C[j,j] <- cos(l) # } # S <- matrix(0,k.vector[i],k.vector[i]) # for(j in 1:k.vector[i]) { # S[j,j] <- sin(2*pi*j/seasonal.periods[i]) # } # print(C) # print(S) Ai <- .Call("makeAIMatrix", C_s = C, S_s = S, k_s = as.integer(k.vector[i]), PACKAGE = "forecast") A[(last.pos + 1):(last.pos + (2 * k.vector[i])), (last.pos + 1):(last.pos + (2 * k.vector[i]))] <- Ai last.pos <- last.pos + (2 * k.vector[i]) } seasonal.row <- cbind(seasonal.row, A) if (!is.null(ar.coefs)) { B <- t(gamma.bold.matrix) %*% ar.coefs seasonal.row <- cbind(seasonal.row, B) } if (!is.null(ma.coefs)) { C <- t(gamma.bold.matrix) %*% ma.coefs seasonal.row <- cbind(seasonal.row, C) } F <- rbind(F, seasonal.row) } # 4. AR() Rows if (!is.null(ar.coefs)) { # p <- length(ar.coefs) ar.rows <- matrix(0, nrow = p, ncol = 1) if (!is.null(beta)) { ar.rows <- cbind(ar.rows, ar.rows) } if (!is.null(seasonal.periods)) { ar.seasonal.zeros <- matrix(0, nrow = p, ncol = tau) ar.rows <- cbind(ar.rows, ar.seasonal.zeros) } ident <- diag((p - 1)) ident <- cbind(ident, matrix(0, nrow = (p - 1), ncol = 1)) ar.part <- rbind(ar.coefs, ident) ar.rows <- cbind(ar.rows, ar.part) if (!is.null(ma.coefs)) { ma.in.ar <- matrix(0, nrow = p, ncol = q) ma.in.ar[1, ] <- ma.coefs ar.rows <- cbind(ar.rows, ma.in.ar) } F <- rbind(F, ar.rows) } # 5. MA() Rows if (!is.null(ma.coefs)) { ma.rows <- matrix(0, nrow = q, ncol = 1) if (!is.null(beta)) { ma.rows <- cbind(ma.rows, ma.rows) } if (!is.null(seasonal.periods)) { ma.seasonal <- matrix(0, nrow = q, ncol = tau) ma.rows <- cbind(ma.rows, ma.seasonal) } if (!is.null(ar.coefs)) { ar.in.ma <- matrix(0, nrow = q, ncol = p) ma.rows <- cbind(ma.rows, ar.in.ma) } ident <- diag((q - 1)) ident <- cbind(ident, matrix(0, nrow = (q - 1), ncol = 1)) ma.part <- rbind(matrix(0, nrow = 1, ncol = q), ident) ma.rows <- cbind(ma.rows, ma.part) F <- rbind(F, ma.rows) } return(F) } # makeWMatrix <- function(small.phi=NULL, seasonal.periods=NULL, ar.coefs=NULL, ma.coefs=NULL) { # # the.list <- .Call("makeBATSWMatrix", smallPhi_s = small.phi, sPeriods_s = as.integer(seasonal.periods), arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, PACKAGE = "forecast") # # # return(the.list) # # } # makeGMatrix <- function(alpha, beta=NULL, gamma.vector=NULL, seasonal.periods=NULL, p=0, q=0) { # li <- .Call("makeBATSGMatrix", alpha, beta, gamma.vector, as.integer(seasonal.periods), as.integer(p), as.integer(q), PACKAGE="forecast") # # return(li) # } makeFMatrix <- function(alpha, beta=NULL, small.phi=NULL, seasonal.periods=NULL, gamma.bold.matrix=NULL, ar.coefs=NULL, ma.coefs=NULL) { # 1. Alpha Row F <- matrix(1, nrow = 1, ncol = 1) if (!is.null(beta)) { F <- cbind(F, matrix(small.phi, nrow = 1, ncol = 1)) } if (!is.null(seasonal.periods)) { tau <- sum(seasonal.periods) zero.tau <- matrix(0, nrow = 1, ncol = tau) F <- cbind(F, zero.tau) } if (!is.null(ar.coefs)) { p <- length(ar.coefs) ar.coefs <- matrix(ar.coefs, nrow = 1, ncol = p) alpha.phi <- alpha * ar.coefs F <- cbind(F, alpha.phi) } if (!is.null(ma.coefs)) { q <- length(ma.coefs) ma.coefs <- matrix(ma.coefs, nrow = 1, ncol = q) alpha.theta <- alpha * ma.coefs F <- cbind(F, alpha.theta) } # 2. Beta Row if (!is.null(beta)) { beta.row <- matrix(c(0, small.phi), nrow = 1, ncol = 2) if (!is.null(seasonal.periods)) { beta.row <- cbind(beta.row, zero.tau) } if (!is.null(ar.coefs)) { beta.phi <- beta * ar.coefs beta.row <- cbind(beta.row, beta.phi) } if (!is.null(ma.coefs)) { beta.theta <- beta * ma.coefs beta.row <- cbind(beta.row, beta.theta) } F <- rbind(F, beta.row) } # 3. Seasonal Row if (!is.null(seasonal.periods)) { seasonal.row <- t(zero.tau) if (!is.null(beta)) { seasonal.row <- cbind(seasonal.row, seasonal.row) } # Make the A matrix for (i in seasonal.periods) { if (i == seasonal.periods[1]) { a.row.one <- matrix(0, nrow = 1, ncol = i) a.row.one[i] <- 1 a.row.two <- cbind(diag((i - 1)), matrix(0, nrow = (i - 1), ncol = 1)) A <- rbind(a.row.one, a.row.two) } else { old.A.rows <- dim(A)[1] old.A.columns <- dim(A)[2] a.row.one <- matrix(0, nrow = 1, ncol = i) a.row.one[i] <- 1 a.row.two <- cbind(diag((i - 1)), matrix(0, nrow = (i - 1), ncol = 1)) Ai <- rbind(a.row.one, a.row.two) A <- rbind(A, matrix(0, nrow = dim(Ai)[1], ncol = old.A.columns)) A <- cbind(A, matrix(0, nrow = dim(A)[1], ncol = dim(Ai)[2])) A[((old.A.rows + 1):(old.A.rows + dim(Ai)[1])), ((old.A.columns + 1):(old.A.columns + dim(Ai)[2]))] <- Ai } } seasonal.row <- cbind(seasonal.row, A) if (!is.null(ar.coefs)) { B <- t(gamma.bold.matrix) %*% ar.coefs seasonal.row <- cbind(seasonal.row, B) } if (!is.null(ma.coefs)) { C <- t(gamma.bold.matrix) %*% ma.coefs seasonal.row <- cbind(seasonal.row, C) } F <- rbind(F, seasonal.row) } # 4. AR() Rows if (!is.null(ar.coefs)) { # p <- length(ar.coefs) ar.rows <- matrix(0, nrow = p, ncol = 1) if (!is.null(beta)) { ar.rows <- cbind(ar.rows, ar.rows) } if (!is.null(seasonal.periods)) { ar.seasonal.zeros <- matrix(0, nrow = p, ncol = tau) ar.rows <- cbind(ar.rows, ar.seasonal.zeros) } ident <- diag((p - 1)) ident <- cbind(ident, matrix(0, nrow = (p - 1), ncol = 1)) ar.part <- rbind(ar.coefs, ident) ar.rows <- cbind(ar.rows, ar.part) if (!is.null(ma.coefs)) { ma.in.ar <- matrix(0, nrow = p, ncol = q) ma.in.ar[1, ] <- ma.coefs ar.rows <- cbind(ar.rows, ma.in.ar) } F <- rbind(F, ar.rows) } # 5. MA() Rows if (!is.null(ma.coefs)) { ma.rows <- matrix(0, nrow = q, ncol = 1) if (!is.null(beta)) { ma.rows <- cbind(ma.rows, ma.rows) } if (!is.null(seasonal.periods)) { ma.seasonal <- matrix(0, nrow = q, ncol = tau) ma.rows <- cbind(ma.rows, ma.seasonal) } if (!is.null(ar.coefs)) { ar.in.ma <- matrix(0, nrow = q, ncol = p) ma.rows <- cbind(ma.rows, ar.in.ma) } ident <- diag((q - 1)) ident <- cbind(ident, matrix(0, nrow = (q - 1), ncol = 1)) ma.part <- rbind(matrix(0, nrow = 1, ncol = q), ident) ma.rows <- cbind(ma.rows, ma.part) F <- rbind(F, ma.rows) } return(F) } makeXMatrix <- function(l, b=NULL, s.vector=NULL, d.vector=NULL, epsilon.vector=NULL) { x.transpose <- matrix(l, nrow = 1, ncol = 1) if (!is.null(b)) { x.transpose <- cbind(x.transpose, matrix(b, nrow = 1, ncol = 1)) } if (!is.null(s.vector)) { x.transpose <- cbind(x.transpose, matrix(s.vector, nrow = 1, ncol = length(s.vector))) } if (!is.null(d.vector)) { x.transpose <- cbind(x.transpose, matrix(d.vector, nrow = 1, ncol = length(d.vector))) } if (!is.null(epsilon.vector)) { x.transpose <- cbind(x.transpose, matrix(epsilon.vector, nrow = 1, ncol = length(epsilon.vector))) } x <- t(x.transpose) return(list(x = x, x.transpose = x.transpose)) } forecast/R/guerrero.R0000644000176200001440000000765014323125536014252 0ustar liggesusers# This R script contains code for extracting the Box-Cox # parameter, lambda, using Guerrero's method (1993). # Written by Leanne Chhay # guer.cv computes the coefficient of variation # Input: # lam = lambda # x = original time series as a time series object # Output: coefficient of variation guer.cv <- function(lam, x, nonseasonal.length=2) { period <- round(max(nonseasonal.length, frequency(x))) nobsf <- length(x) nyr <- floor(nobsf / period) nobst <- floor(nyr * period) x.mat <- matrix(x[(nobsf - nobst + 1):nobsf], period, nyr) x.mean <- apply(x.mat, 2, mean, na.rm = TRUE) x.sd <- apply(x.mat, 2, sd, na.rm = TRUE) x.rat <- x.sd / x.mean ^ (1 - lam) return(sd(x.rat, na.rm = TRUE) / mean(x.rat, na.rm = TRUE)) } # guerrero extracts the required lambda # Input: x = original time series as a time series object # Output: lambda that minimises the coefficient of variation guerrero <- function(x, lower=-1, upper=2, nonseasonal.length=2) { if(any(x <= 0, na.rm = TRUE)) warning("Guerrero's method for selecting a Box-Cox parameter (lambda) is given for strictly positive data.") return(optimize( guer.cv, c(lower, upper), x = x, nonseasonal.length = nonseasonal.length )$minimum) } # Modified version of boxcox from MASS package bcloglik <- function(x, lower=-1, upper=2) { n <- length(x) if (any(x <= 0, na.rm = TRUE)) { stop("x must be positive") } logx <- log(na.omit(c(x))) xdot <- exp(mean(logx)) if (all(class(x) != "ts")) { fit <- lm(x ~ 1, data = data.frame(x = x), na.action = na.exclude) } else if (frequency(x) > 1) { fit <- tslm(x ~ trend + season, data = data.frame(x = x)) } else { fit <- tslm(x ~ trend, data = data.frame(x = x)) } xqr <- fit$qr lambda <- seq(lower, upper, by = .05) xl <- loglik <- as.vector(lambda) m <- length(xl) x <- na.omit(c(x)) for (i in 1L:m) { if (abs(la <- xl[i]) > 0.02) { xt <- (x ^ la - 1) / la } else { xt <- logx * (1 + (la * logx) / 2 * (1 + (la * logx) / 3 * (1 + (la * logx) / 4))) } loglik[i] <- -n / 2 * log(sum(qr.resid(xqr, xt / xdot ^ (la - 1)) ^ 2)) } return(xl[which.max(loglik)]) } #' Automatic selection of Box Cox transformation parameter #' #' If \code{method=="guerrero"}, Guerrero's (1993) method is used, where lambda #' minimizes the coefficient of variation for subseries of \code{x}. #' #' If \code{method=="loglik"}, the value of lambda is chosen to maximize the #' profile log likelihood of a linear model fitted to \code{x}. For #' non-seasonal data, a linear time trend is fitted while for seasonal data, a #' linear time trend with seasonal dummy variables is used. #' #' #' @param x a numeric vector or time series of class \code{ts} #' @param method Choose method to be used in calculating lambda. #' @param lower Lower limit for possible lambda values. #' @param upper Upper limit for possible lambda values. #' @return a number indicating the Box-Cox transformation parameter. #' @author Leanne Chhay and Rob J Hyndman #' @seealso \code{\link{BoxCox}} #' @references Box, G. E. P. and Cox, D. R. (1964) An analysis of #' transformations. \emph{JRSS B} \bold{26} 211--246. #' #' Guerrero, V.M. (1993) Time-series analysis supported by power #' transformations. \emph{Journal of Forecasting}, \bold{12}, 37--48. #' @keywords ts #' @examples #' #' lambda <- BoxCox.lambda(AirPassengers,lower=0) #' air.fit <- Arima(AirPassengers, order=c(0,1,1), #' seasonal=list(order=c(0,1,1),period=12), lambda=lambda) #' plot(forecast(air.fit)) #' #' @export BoxCox.lambda <- function(x, method=c("guerrero", "loglik"), lower=-1, upper=2) { if (any(x <= 0, na.rm = TRUE)) { lower <- max(lower, 0) } if (length(x) <= 2 * frequency(x)) { return(1) } # Not enough data to do much more than this # stop("All values must be positive") method <- match.arg(method) if (method == "loglik") { return(bcloglik(x, lower, upper)) } else { return(guerrero(x, lower, upper)) } } forecast/R/fitTBATS.R0000644000176200001440000005051714323125536014000 0ustar liggesusersfitPreviousTBATSModel <- function(y, model, biasadj=FALSE) { seasonal.periods <- model$seasonal.periods if (is.null(seasonal.periods) == FALSE) { seasonal.periods <- sort(seasonal.periods) } # Get the parameters out of the param.vector paramz <- unParameteriseTBATS(model$parameters$vect, model$parameters$control) lambda <- paramz$lambda alpha <- paramz$alpha beta.v <- paramz$beta if (!is.null(beta.v)) { adj.beta <- 1 } else { adj.beta <- 0 } small.phi <- paramz$small.phi gamma.one.v <- paramz$gamma.one.v gamma.two.v <- paramz$gamma.two.v if (!is.null(paramz$ar.coefs)) { p <- length(paramz$ar.coefs) ar.coefs <- matrix(paramz$ar.coefs, nrow = 1, ncol = p) } else { ar.coefs <- NULL p <- 0 } if (!is.null(paramz$ma.coefs)) { q <- length(paramz$ma.coefs) ma.coefs <- matrix(paramz$ma.coefs, nrow = 1, ncol = q) } else { ma.coefs <- NULL q <- 0 } if (!is.null(seasonal.periods)) { tau <- as.integer(2 * sum(model$k.vector)) gamma.bold <- matrix(0, nrow = 1, ncol = (2 * sum(model$k.vector))) } else { tau <- as.integer(0) gamma.bold <- NULL } g <- matrix(0, nrow = ((2 * sum(model$k.vector)) + 1 + adj.beta + p + q), ncol = 1) if (p != 0) { g[(1 + adj.beta + tau + 1), 1] <- 1 } if (q != 0) { g[(1 + adj.beta + tau + p + 1), 1] <- 1 } y.touse <- y if (is.null(lambda) == FALSE) { y.touse <- BoxCox(y, lambda = lambda) lambda <- attr(y.touse, "lambda") } ## Calculate the variance: # 1. Re-set up the matrices w <- .Call("makeTBATSWMatrix", smallPhi_s = small.phi, kVector_s = model$k.vector, arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, tau_s = tau, PACKAGE = "forecast") if (!is.null(gamma.bold)) { .Call("updateTBATSGammaBold", gammaBold_s = gamma.bold, kVector_s = model$k.vector, gammaOne_s = gamma.one.v, gammaTwo_s = gamma.two.v, PACKAGE = "forecast") } .Call("updateTBATSGMatrix", g_s = g, gammaBold_s = gamma.bold, alpha_s = alpha, beta_s = beta.v, PACKAGE = "forecast") F <- makeTBATSFMatrix(alpha = alpha, beta = beta.v, small.phi = small.phi, seasonal.periods = seasonal.periods, k.vector = model$k.vector, gamma.bold.matrix = gamma.bold, ar.coefs = ar.coefs, ma.coefs = ma.coefs) .Call("updateFMatrix", F, small.phi, alpha, beta.v, gamma.bold, ar.coefs, ma.coefs, tau, PACKAGE = "forecast") # 2. Calculate! fitted.values.and.errors <- calcModel(y.touse, model$seed.states, F, g, w) e <- fitted.values.and.errors$e fitted.values <- fitted.values.and.errors$y.hat variance <- sum((e * e)) / length(y) if (!is.null(lambda)) { fitted.values <- InvBoxCox(fitted.values, lambda = lambda, biasadj, variance) } model.for.output <- model model.for.output$variance <- variance model.for.output$fitted.values <- ts(c(fitted.values)) model.for.output$errors <- ts(c(e)) tsp(model.for.output$fitted.values) <- tsp(model.for.output$errors) <- tsp(y) model.for.output$x <- fitted.values.and.errors$x model.for.output$y <- y return(model.for.output) } fitSpecificTBATS <- function(y, use.box.cox, use.beta, use.damping, seasonal.periods=NULL, k.vector=NULL, starting.params=NULL, x.nought=NULL, ar.coefs=NULL, ma.coefs=NULL, init.box.cox=NULL, bc.lower=0, bc.upper=1, biasadj=FALSE) { if (!is.null(seasonal.periods)) { seasonal.periods <- sort(seasonal.periods) } ## Meaning/purpose of the first if() statement: If this is the first pass, then use default starting values. Else if it is the second pass, then use the values form the first pass as starting values. if (is.null(starting.params)) { ## Check for the existence of ARMA() coefficients if (!is.null(ar.coefs)) { p <- length(ar.coefs) } else { p <- 0 } if (!is.null(ma.coefs)) { q <- length(ma.coefs) } else { q <- 0 } # Calculate starting values: alpha <- 0.09 if (use.beta) { adj.beta <- 1 beta.v <- 0.05 b <- 0.00 if (use.damping) { small.phi <- .999 } else { small.phi <- 1 } } else { adj.beta <- 0 beta.v <- NULL b <- NULL small.phi <- NULL use.damping <- FALSE } if (!is.null(seasonal.periods)) { gamma.one.v <- rep(0, length(k.vector)) gamma.two.v <- rep(0, length(k.vector)) s.vector <- numeric(2 * sum(k.vector)) k.vector <- as.integer(k.vector) } else { gamma.one.v <- NULL gamma.two.v <- NULL s.vector <- NULL } if (use.box.cox) { if (!is.null(init.box.cox)) { lambda <- init.box.cox } else { lambda <- BoxCox.lambda(y, lower = 0, upper = 1.5) } y.transformed <- BoxCox(y, lambda = lambda) lambda <- attr(y.transformed, "lambda") } else { # the "else" is not needed at the moment lambda <- NULL } } else { paramz <- unParameteriseTBATS(starting.params$vect, starting.params$control) lambda <- paramz$lambda alpha <- paramz$alpha beta.v <- paramz$beta if (!is.null(beta.v)) { adj.beta <- 1 } else { adj.beta <- 0 } b <- 0 small.phi <- paramz$small.phi gamma.one.v <- paramz$gamma.one.v gamma.two.v <- paramz$gamma.two.v if (!is.null(seasonal.periods)) { s.vector <- numeric(2 * sum(k.vector)) } else { s.vector <- NULL } # ar.coefs <- paramz$ar.coefs # ma.coefs <- paramz$ma.coefs ## Check for the existence of ARMA() coefficients if (!is.null(ar.coefs)) { p <- length(ar.coefs) } else { p <- 0 } if (!is.null(ma.coefs)) { q <- length(ma.coefs) } else { q <- 0 } } if (is.null(x.nought)) { # Start with the seed states equal to zero if (!is.null(ar.coefs)) { d.vector <- numeric(length(ar.coefs)) } else { d.vector <- NULL } if (!is.null(ma.coefs)) { epsilon.vector <- numeric(length(ma.coefs)) } else { epsilon.vector <- NULL } x.nought <- makeXMatrix(l = 0, b = b, s.vector = s.vector, d.vector = d.vector, epsilon.vector = epsilon.vector)$x } # Make the parameter vector parameterise param.vector <- parameterise(alpha = alpha, beta.v = beta.v, small.phi = small.phi, gamma.v = cbind(gamma.one.v, gamma.two.v), lambda = lambda, ar.coefs = ar.coefs, ma.coefs = ma.coefs) par.scale <- makeParscale(param.vector$control) if (!is.null(seasonal.periods)) { tau <- as.integer(2 * sum(k.vector)) } else { tau <- as.integer(0) } w <- .Call("makeTBATSWMatrix", smallPhi_s = small.phi, kVector_s = k.vector, arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, tau_s = tau, PACKAGE = "forecast") if (!is.null(seasonal.periods)) { gamma.bold <- matrix(0, nrow = 1, ncol = (2 * sum(k.vector))) .Call("updateTBATSGammaBold", gammaBold_s = gamma.bold, kVector_s = k.vector, gammaOne_s = gamma.one.v, gammaTwo_s = gamma.two.v, PACKAGE = "forecast") } else { gamma.bold <- NULL } g <- matrix(0, nrow = ((2 * sum(k.vector)) + 1 + adj.beta + p + q), ncol = 1) if (p != 0) { g[(1 + adj.beta + tau + 1), 1] <- 1 } if (q != 0) { g[(1 + adj.beta + tau + p + 1), 1] <- 1 } .Call("updateTBATSGMatrix", g_s = g, gammaBold_s = gamma.bold, alpha_s = alpha, beta_s = beta.v, PACKAGE = "forecast") F <- makeTBATSFMatrix(alpha = alpha, beta = beta.v, small.phi = small.phi, seasonal.periods = seasonal.periods, k.vector = k.vector, gamma.bold.matrix = gamma.bold, ar.coefs = ar.coefs, ma.coefs = ma.coefs) D <- F - g %*% w$w.transpose #### # Set up environment opt.env <- new.env() assign("F", F, envir = opt.env) assign("w.transpose", w$w.transpose, envir = opt.env) assign("g", g, envir = opt.env) assign("gamma.bold", gamma.bold, envir = opt.env) assign("k.vector", k.vector, envir = opt.env) assign("y", matrix(y, nrow = 1, ncol = length(y)), envir = opt.env) assign("y.hat", matrix(0, nrow = 1, ncol = length(y)), envir = opt.env) assign("e", matrix(0, nrow = 1, ncol = length(y)), envir = opt.env) assign("x", matrix(0, nrow = length(x.nought), ncol = length(y)), envir = opt.env) ## Set up matrices to find the seed states if (use.box.cox) { y.transformed <- BoxCox(y, lambda = lambda) lambda <- attr(y.transformed, "lambda") .Call("calcTBATSFaster", ys = matrix(y.transformed, nrow = 1, ncol = length(y.transformed)), yHats = opt.env$y.hat, wTransposes = opt.env$w.transpose, Fs = opt.env$F, xs = opt.env$x, gs = opt.env$g, es = opt.env$e, xNought_s = x.nought, PACKAGE = "forecast") y.tilda <- opt.env$e } else { .Call("calcTBATSFaster", ys = opt.env$y, yHats = opt.env$y.hat, wTransposes = opt.env$w.transpose, Fs = opt.env$F, xs = opt.env$x, gs = opt.env$g, es = opt.env$e, xNought_s = x.nought, PACKAGE = "forecast") y.tilda <- opt.env$e } w.tilda.transpose <- matrix(0, nrow = length(y), ncol = ncol(w$w.transpose)) w.tilda.transpose[1, ] <- w$w.transpose w.tilda.transpose <- .Call("calcWTilda", wTildaTransposes = w.tilda.transpose, Ds = D, PACKAGE = "forecast") # Remove the AR() and MA() bits if they exist if ((p != 0) | (q != 0)) { end.cut <- ncol(w.tilda.transpose) start.cut <- end.cut - (p + q) + 1 w.tilda.transpose <- w.tilda.transpose[, -c(start.cut:end.cut)] } x.nought <- lm(t(y.tilda) ~ w.tilda.transpose - 1)$coefficients x.nought <- matrix(x.nought, nrow = length(x.nought), ncol = 1) ## Replace the AR() and MA() bits if they exist if ((p != 0) | (q != 0)) { arma.seed.states <- numeric((p + q)) arma.seed.states <- matrix(arma.seed.states, nrow = length(arma.seed.states), ncol = 1) x.nought <- rbind(x.nought, arma.seed.states) } ## Optimisation if (use.box.cox) { # Un-transform the seed states assign("x.nought.untransformed", InvBoxCox(x.nought, lambda = lambda), envir = opt.env) # Optimise the likelihood function optim.like <- optim( par = param.vector$vect, fn = calcLikelihoodTBATS, method = "Nelder-Mead", opt.env = opt.env, use.beta = use.beta, use.small.phi = use.damping, seasonal.periods = seasonal.periods, param.control = param.vector$control, p = p, q = q, tau = tau, bc.lower = bc.lower, bc.upper = bc.upper, control = list(maxit = (100 * length(param.vector$vect) ^ 2), parscale = par.scale) ) # Get the parameters out of the param.vector paramz <- unParameteriseTBATS(optim.like$par, param.vector$control) lambda <- paramz$lambda alpha <- paramz$alpha beta.v <- paramz$beta small.phi <- paramz$small.phi gamma.one.v <- paramz$gamma.one.v gamma.two.v <- paramz$gamma.two.v if (!is.null(paramz$ar.coefs)) { p <- length(paramz$ar.coefs) ar.coefs <- matrix(paramz$ar.coefs, nrow = 1, ncol = p) } else { ar.coefs <- NULL p <- 0 } if (!is.null(paramz$ma.coefs)) { q <- length(paramz$ma.coefs) ma.coefs <- matrix(paramz$ma.coefs, nrow = 1, ncol = q) } else { ma.coefs <- NULL q <- 0 } # Transform the seed states x.nought <- BoxCox(opt.env$x.nought.untransformed, lambda = lambda) lambda <- attr(x.nought, "lambda") ## Calculate the variance: # 1. Re-set up the matrices w <- .Call("makeTBATSWMatrix", smallPhi_s = small.phi, kVector_s = k.vector, arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, tau_s = tau, PACKAGE = "forecast") if (!is.null(gamma.bold)) { .Call("updateTBATSGammaBold", gammaBold_s = gamma.bold, kVector_s = k.vector, gammaOne_s = gamma.one.v, gammaTwo_s = gamma.two.v, PACKAGE = "forecast") } .Call("updateTBATSGMatrix", g_s = g, gammaBold_s = gamma.bold, alpha_s = alpha, beta_s = beta.v, PACKAGE = "forecast") .Call("updateFMatrix", F, small.phi, alpha, beta.v, gamma.bold, ar.coefs, ma.coefs, tau, PACKAGE = "forecast") # 2. Calculate! y.transformed <- BoxCox(y, lambda = lambda) lambda <- attr(y.transformed, "lambda") fitted.values.and.errors <- calcModel(y.transformed, x.nought, F, g, w) e <- fitted.values.and.errors$e variance <- sum((e * e)) / length(y) fitted.values <- InvBoxCox(fitted.values.and.errors$y.hat, lambda = lambda, biasadj, variance) attr(lambda, "biasadj") <- biasadj # e <- InvBoxCox(e, lambda=lambda) ee <- y - fitted.values } else { # else if we are not using the Box-Cox transformation # Optimise the likelihood function if (length(param.vector$vect) > 1) { optim.like <- optim(par = param.vector$vect, fn = calcLikelihoodNOTransformedTBATS, method = "Nelder-Mead", opt.env = opt.env, x.nought = x.nought, use.beta = use.beta, use.small.phi = use.damping, seasonal.periods = seasonal.periods, param.control = param.vector$control, p = p, q = q, tau = tau, control = list(maxit = (100 * length(param.vector$vect) ^ 2), parscale = par.scale)) } else { optim.like <- optim(par = param.vector$vect, fn = calcLikelihoodNOTransformedTBATS, method = "BFGS", opt.env = opt.env, x.nought = x.nought, use.beta = use.beta, use.small.phi = use.damping, seasonal.periods = seasonal.periods, param.control = param.vector$control, p = p, q = q, tau = tau, control = list(parscale = par.scale)) } # Get the parameters out of the param.vector paramz <- unParameteriseTBATS(optim.like$par, param.vector$control) lambda <- paramz$lambda alpha <- paramz$alpha beta.v <- paramz$beta small.phi <- paramz$small.phi gamma.one.v <- paramz$gamma.one.v gamma.two.v <- paramz$gamma.two.v if (!is.null(paramz$ar.coefs)) { p <- length(paramz$ar.coefs) ar.coefs <- matrix(paramz$ar.coefs, nrow = 1, ncol = p) } else { ar.coefs <- NULL p <- 0 } if (!is.null(paramz$ma.coefs)) { q <- length(paramz$ma.coefs) ma.coefs <- matrix(paramz$ma.coefs, nrow = 1, ncol = q) } else { ma.coefs <- NULL q <- 0 } ## Calculate the variance: # 1. Re-set up the matrices w <- .Call("makeTBATSWMatrix", smallPhi_s = small.phi, kVector_s = k.vector, arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, tau_s = tau, PACKAGE = "forecast") if (!is.null(gamma.bold)) { .Call("updateTBATSGammaBold", gammaBold_s = gamma.bold, kVector_s = k.vector, gammaOne_s = gamma.one.v, gammaTwo_s = gamma.two.v, PACKAGE = "forecast") } .Call("updateTBATSGMatrix", g_s = g, gammaBold_s = gamma.bold, alpha_s = alpha, beta_s = beta.v, PACKAGE = "forecast") .Call("updateFMatrix", F, small.phi, alpha, beta.v, gamma.bold, ar.coefs, ma.coefs, tau, PACKAGE = "forecast") # 2. Calculate! fitted.values.and.errors <- calcModel(y, x.nought, F, g, w) e <- fitted.values.and.errors$e fitted.values <- fitted.values.and.errors$y.hat variance <- sum((e * e)) / length(y) } # Get the likelihood likelihood <- optim.like$value # Calculate the AIC aic <- likelihood + 2 * (length(param.vector$vect) + nrow(x.nought)) # Make a list object fits <- ts(c(fitted.values)) e <- ts(c(e)) tsp(fits) <- tsp(e) <- tsp(y) model.for.output <- list( lambda = lambda, alpha = alpha, beta = beta.v, damping.parameter = small.phi, gamma.one.values = gamma.one.v, gamma.two.values = gamma.two.v, ar.coefficients = ar.coefs, ma.coefficients = ma.coefs, likelihood = likelihood, optim.return.code = optim.like$convergence, variance = variance, AIC = aic, parameters = list(vect = optim.like$par, control = param.vector$control), seed.states = x.nought, fitted.values = fits, errors = e, x = fitted.values.and.errors$x, seasonal.periods = seasonal.periods, k.vector = k.vector, y = y, p = p, q = q ) class(model.for.output) <- c("tbats", "bats") return(model.for.output) } calcLikelihoodTBATS <- function(param.vector, opt.env, use.beta, use.small.phi, seasonal.periods, param.control, p=0, q=0, tau=0, bc.lower=0, bc.upper=1) { # param vector should be as follows: Box-Cox.parameter, alpha, beta, small.phi, gamma.vector, ar.coefs, ma.coefs # Put the components of the param.vector into meaningful individual variables paramz <- unParameteriseTBATS(param.vector, param.control) box.cox.parameter <- paramz$lambda alpha <- paramz$alpha beta.v <- paramz$beta small.phi <- paramz$small.phi gamma.one.v <- paramz$gamma.one.v gamma.two.v <- paramz$gamma.two.v ar.coefs <- paramz$ar.coefs ma.coefs <- paramz$ma.coefs if (!is.null(paramz$ar.coefs)) { p <- length(paramz$ar.coefs) ar.coefs <- matrix(paramz$ar.coefs, nrow = 1, ncol = p) } else { ar.coefs <- NULL p <- 0 } if (!is.null(paramz$ma.coefs)) { q <- length(paramz$ma.coefs) ma.coefs <- matrix(paramz$ma.coefs, nrow = 1, ncol = q) } else { ma.coefs <- NULL q <- 0 } x.nought <- BoxCox(opt.env$x.nought.untransformed, lambda = box.cox.parameter) lambda <- attr(x.nought, "lambda") .Call("updateWtransposeMatrix", wTranspose_s = opt.env$w.transpose, smallPhi_s = small.phi, tau_s = as.integer(tau), arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, p_s = as.integer(p), q_s = as.integer(q), PACKAGE = "forecast") if (!is.null(opt.env$gamma.bold)) { .Call("updateTBATSGammaBold", gammaBold_s = opt.env$gamma.bold, kVector_s = opt.env$k.vector, gammaOne_s = gamma.one.v, gammaTwo_s = gamma.two.v) } .Call("updateTBATSGMatrix", g_s = opt.env$g, gammaBold_s = opt.env$gamma.bold, alpha_s = alpha, beta_s = beta.v, PACKAGE = "forecast") .Call("updateFMatrix", opt.env$F, small.phi, alpha, beta.v, opt.env$gamma.bold, ar.coefs, ma.coefs, tau, PACKAGE = "forecast") mat.transformed.y <- BoxCox(opt.env$y, box.cox.parameter) lambda <- attr(mat.transformed.y, "lambda") n <- ncol(opt.env$y) .Call("calcTBATSFaster", ys = mat.transformed.y, yHats = opt.env$y.hat, wTransposes = opt.env$w.transpose, Fs = opt.env$F, xs = opt.env$x, gs = opt.env$g, es = opt.env$e, xNought_s = x.nought, PACKAGE = "forecast") ## #### #################################################################### log.likelihood <- n * log(sum(opt.env$e ^ 2)) - 2 * (box.cox.parameter - 1) * sum(log(opt.env$y)) if (is.na(log.likelihood)) { # Not sure why this would occur return(Inf) } assign("D", (opt.env$F - opt.env$g %*% opt.env$w.transpose), envir = opt.env) if (checkAdmissibility(opt.env, box.cox = box.cox.parameter, small.phi = small.phi, ar.coefs = ar.coefs, ma.coefs = ma.coefs, tau = sum(seasonal.periods), bc.lower = bc.lower, bc.upper = bc.upper)) { return(log.likelihood) } else { return(Inf) } } calcLikelihoodNOTransformedTBATS <- function(param.vector, opt.env, x.nought, use.beta, use.small.phi, seasonal.periods, param.control, p=0, q=0, tau=0) { # The likelihood function without the Box-Cox Transformation # param vector should be as follows: alpha, beta, small.phi, gamma.vector, ar.coefs, ma.coefs # Put the components of the param.vector into meaningful individual variables paramz <- unParameteriseTBATS(param.vector, param.control) box.cox.parameter <- paramz$lambda alpha <- paramz$alpha beta.v <- paramz$beta small.phi <- paramz$small.phi gamma.one.v <- paramz$gamma.one.v gamma.two.v <- paramz$gamma.two.v if (!is.null(paramz$ar.coefs)) { p <- length(paramz$ar.coefs) ar.coefs <- matrix(paramz$ar.coefs, nrow = 1, ncol = p) } else { ar.coefs <- NULL p <- 0 } if (!is.null(paramz$ma.coefs)) { q <- length(paramz$ma.coefs) ma.coefs <- matrix(paramz$ma.coefs, nrow = 1, ncol = q) } else { ma.coefs <- NULL q <- 0 } .Call("updateWtransposeMatrix", wTranspose_s = opt.env$w.transpose, smallPhi_s = small.phi, tau_s = as.integer(tau), arCoefs_s = ar.coefs, maCoefs_s = ma.coefs, p_s = as.integer(p), q_s = as.integer(q), PACKAGE = "forecast") if (!is.null(opt.env$gamma.bold)) { .Call("updateTBATSGammaBold", gammaBold_s = opt.env$gamma.bold, kVector_s = opt.env$k.vector, gammaOne_s = gamma.one.v, gammaTwo_s = gamma.two.v) } .Call("updateTBATSGMatrix", g_s = opt.env$g, gammaBold_s = opt.env$gamma.bold, alpha_s = alpha, beta_s = beta.v, PACKAGE = "forecast") .Call("updateFMatrix", opt.env$F, small.phi, alpha, beta.v, opt.env$gamma.bold, ar.coefs, ma.coefs, tau, PACKAGE = "forecast") n <- ncol(opt.env$y) .Call("calcTBATSFaster", ys = opt.env$y, yHats = opt.env$y.hat, wTransposes = opt.env$w.transpose, Fs = opt.env$F, xs = opt.env$x, gs = opt.env$g, es = opt.env$e, xNought_s = x.nought, PACKAGE = "forecast") ## #### #################################################################### log.likelihood <- n * log(sum(opt.env$e * opt.env$e)) if (is.na(log.likelihood)) { # Not sure why this would occur return(Inf) } assign("D", (opt.env$F - opt.env$g %*% opt.env$w.transpose), envir = opt.env) if (checkAdmissibility(opt.env = opt.env, box.cox = NULL, small.phi = small.phi, ar.coefs = ar.coefs, ma.coefs = ma.coefs, tau = tau)) { return(log.likelihood) } else { return(Inf) } } forecast/R/attach.R0000644000176200001440000000552314207263356013665 0ustar liggesusers.onAttach <- function(...) { if (!interactive() || stats::runif(1) > 0.2) return() tips <- c( "Use suppressPackageStartupMessages() to eliminate package startup messages.", "Stackoverflow is a great place to get help on R issues:\n http://stackoverflow.com/tags/forecasting+r.", "Crossvalidated is a great place to get help on forecasting issues:\n http://stats.stackexchange.com/tags/forecasting.", "Need help getting started? Try the online textbook FPP:\n http://otexts.com/fpp2/", "Want to stay up-to-date? Read the Hyndsight blog:\n https://robjhyndman.com/hyndsight/", "Want to meet other forecasters? Join the International Institute of Forecasters:\n http://forecasters.org/" ) tip <- sample(tips, 1) msg <- paste("This is forecast", packageVersion("forecast"), "\n ", tip) packageStartupMessage(msg) } register_s3_method <- function(pkg, generic, class, fun = NULL) { stopifnot(is.character(pkg), length(pkg) == 1) stopifnot(is.character(generic), length(generic) == 1) stopifnot(is.character(class), length(class) == 1) if (is.null(fun)) { fun <- get(paste0(generic, ".", class), envir = parent.frame()) } else { stopifnot(is.function(fun)) } if (pkg %in% loadedNamespaces()) { registerS3method(generic, class, fun, envir = asNamespace(pkg)) } # Always register hook in case package is later unloaded & reloaded setHook( packageEvent(pkg, "onLoad"), function(...) { registerS3method(generic, class, fun, envir = asNamespace(pkg)) } ) } overwrite_s3_generic <- function(pkg, generic){ if (pkg %in% loadedNamespaces()) { assign(generic, get(generic, asNamespace(pkg)), envir = asNamespace("forecast")) } # Always register hook in case package is later unloaded & reloaded # setHook( # packageEvent(pkg, "onLoad"), # function(...) { # pkg_env <- asNamespace("forecast") # unlockBinding(generic, pkg_env) # assign(generic, get(generic, asNamespace(pkg)), envir = pkg_env) # lockBinding(generic, pkg_env) # } # ) } #' @importFrom utils methods .onLoad <- function(...) { overwrite_s3_generic("ggplot2", "autolayer") register_s3_method("ggplot2", "autolayer", "ts") register_s3_method("ggplot2", "autolayer", "mts") register_s3_method("ggplot2", "autolayer", "msts") register_s3_method("ggplot2", "autolayer", "forecast") register_s3_method("ggplot2", "autolayer", "mforecast") # methods <- strsplit(utils::.S3methods(forecast), ".", fixed = TRUE) # overwrite_s3_generic("fabletools", "forecast") # for(method in methods){ # register_s3_method("fabletools", method[1], method[2]) # } # methods <- strsplit(utils::.S3methods(accuracy), ".", fixed = TRUE) # overwrite_s3_generic("fabletools", "accuracy") # for(method in methods){ # register_s3_method("fabletools", method[1], method[2]) # } invisible() } forecast/R/naive.R0000644000176200001440000002115014456202551013511 0ustar liggesusers# Random walk related forecasts # Based on lagged walks # lag=1 corresponds to standard random walk (i.e., naive forecast) # lag=m corresponds to seasonal naive method lagwalk <- function(y, lag=1, drift=FALSE, lambda=NULL, biasadj=FALSE) { if(!is.ts(y)){ y <- as.ts(y) } dimy <- dim(y) if(!is.null(dimy)) { if(dimy[2] > 1) stop("Multivariate time series detected. This function is designed for univariate time series only.") } origy <- y if (!is.null(lambda)) { y <- BoxCox(y, lambda) lambda <- attr(y, "lambda") } m <- frequency(y) # Complete missing values with lagged values y_na <- which(is.na(y)) y_na <- y_na[y_na>lag] fits <- stats::lag(y, -lag) for(i in y_na){ if(is.na(fits)[i]){ fits[i] <- fits[i-lag] } } fitted <- ts(c(rep(NA, lag), head(fits, -lag)), start = start(y), frequency = m) fitted <- copy_msts(y, fitted) if(drift){ fit <- summary(lm(y-fitted ~ 1, na.action=na.exclude)) b <- fit$coefficients[1,1] b.se <- fit$coefficients[1,2] sigma <- fit$sigma fitted <- fitted + b res <- y - fitted method <- "Lag walk with drift" } else{ res <- y - fitted b <- b.se <- 0 sigma <- sqrt(mean(res^2, na.rm=TRUE)) method <- "Lag walk" } if (!is.null(lambda)) { fitted <- InvBoxCox(fitted, lambda, biasadj, var(res)) attr(lambda, "biasadj") <- biasadj } model <- structure( list( x = origy, fitted = fitted, future = tail(fits, lag), residuals = res, method = method, series = deparse(substitute(y)), sigma2 = sigma^2, par = list(includedrift = drift, drift = b, drift.se = b.se, lag = lag), lambda = lambda, call = match.call() ), class = "lagwalk" ) } #' @export forecast.lagwalk <- function(object, h=10, level=c(80, 95), fan=FALSE, lambda=NULL, simulate=FALSE, bootstrap=FALSE, npaths=5000, biasadj=FALSE, ...) { lag <- object$par$lag fullperiods <- (h-1)/lag+1 steps <- rep(1:fullperiods, rep(lag,fullperiods))[1:h] # Point forecasts fc <- rep(object$future, fullperiods)[1:h] + steps*object$par$drift # Intervals # Adjust prediction intervals to allow for drift coefficient standard error mse <- sum(object$residuals^2, na.rm = TRUE)/(sum(!is.na(object$residuals)) - (object$par$drift != 0)) se <- sqrt(mse*steps + (steps*object$par$drift.se)^2) if(fan) level <- seq(51,99,by=3) else { if(min(level) > 0 & max(level) < 1) level <- 100*level else if(min(level) < 0 | max(level) > 99.99) stop("Confidence limit out of range") } nconf <- length(level) if (simulate | bootstrap) # Compute prediction intervals using simulations { sim <- matrix(NA, nrow = npaths, ncol = h) for (i in 1:npaths) sim[i, ] <- simulate(object, nsim = h, bootstrap = bootstrap, lambda = lambda) lower <- apply(sim, 2, quantile, 0.5 - level / 200, type = 8) upper <- apply(sim, 2, quantile, 0.5 + level / 200, type = 8) if (nconf > 1L) { lower <- t(lower) upper <- t(upper) } else { lower <- matrix(lower, ncol = 1) upper <- matrix(upper, ncol = 1) } } else { z <- qnorm(.5 + level/200) lower <- upper <- matrix(NA,nrow=h,ncol=nconf) for(i in 1:nconf) { lower[,i] <- fc - z[i]*se upper[,i] <- fc + z[i]*se } } if (!is.null(lambda)) { fc <- InvBoxCox(fc, lambda, biasadj, se^2) if(!bootstrap){ # Bootstrap intervals are already backtransformed upper <- InvBoxCox(upper, lambda) lower <- InvBoxCox(lower, lambda) } } # Set attributes fc <- future_msts(object$x, fc) lower <- future_msts(object$x, lower) upper <- future_msts(object$x, upper) colnames(lower) <- colnames(upper) <- paste(level,"%",sep="") return(structure( list( method = object$method, model = object, lambda = lambda, x = object$x, fitted = fitted(object), residuals = residuals(object), series = object$series, mean = fc, level = level, lower = lower, upper = upper ), class = "forecast") ) } #' @export print.lagwalk <- function(x, ...) { cat(paste("Call:", deparse(x$call), "\n\n")) if (x$par$includedrift) { cat(paste("Drift: ", round(x$par$drift, 4), " (se ", round(x$par$drift.se, 4), ")\n", sep = "")) } cat(paste("Residual sd:", round(sqrt(x$sigma2), 4), "\n")) } #' @export fitted.lagwalk <- function(object, ...){ object$fitted } # Random walk #' @rdname naive #' #' @examples #' #' gold.fcast <- rwf(gold[1:60], h=50) #' plot(gold.fcast) #' #' @export rwf <- function(y, h=10, drift=FALSE, level=c(80, 95), fan=FALSE, lambda=NULL, biasadj=FALSE, ..., x=y) { fit <- lagwalk( x, lag = 1, drift = drift, lambda = lambda, biasadj = biasadj ) fc <- forecast(fit, h = h, level = level, fan = fan, lambda = fit$lambda, biasadj = biasadj, ...) fc$model$call <- match.call() fc$series <- deparse(substitute(y)) if (drift) { fc$method <- "Random walk with drift" } else { fc$method <- "Random walk" } return(fc) } #' Naive and Random Walk Forecasts #' #' \code{rwf()} returns forecasts and prediction intervals for a random walk #' with drift model applied to \code{y}. This is equivalent to an ARIMA(0,1,0) #' model with an optional drift coefficient. \code{naive()} is simply a wrapper #' to \code{rwf()} for simplicity. \code{snaive()} returns forecasts and #' prediction intervals from an ARIMA(0,0,0)(0,1,0)m model where m is the #' seasonal period. #' #' The random walk with drift model is \deqn{Y_t=c + Y_{t-1} + Z_t}{Y[t]=c + #' Y[t-1] + Z[t]} where \eqn{Z_t}{Z[t]} is a normal iid error. Forecasts are #' given by \deqn{Y_n(h)=ch+Y_n}{Y[n+h]=ch+Y[n]}. If there is no drift (as in #' \code{naive}), the drift parameter c=0. Forecast standard errors allow for #' uncertainty in estimating the drift parameter (unlike the corresponding #' forecasts obtained by fitting an ARIMA model directly). #' #' The seasonal naive model is \deqn{Y_t= Y_{t-m} + Z_t}{Y[t]=Y[t-m] + Z[t]} #' where \eqn{Z_t}{Z[t]} is a normal iid error. #' #' @aliases print.naive #' #' @param y a numeric vector or time series of class \code{ts} #' @param h Number of periods for forecasting #' @param drift Logical flag. If TRUE, fits a random walk with drift model. #' @param level Confidence levels for prediction intervals. #' @param fan If TRUE, level is set to seq(51,99,by=3). This is suitable for #' fan plots. #' @param x Deprecated. Included for backwards compatibility. #' @inheritParams forecast.ts #' #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{naive} or #' \code{snaive}. #' #' An object of class \code{"forecast"} is a list containing at least the #' following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The original time series #' (either \code{object} itself or the time series used to create the model #' stored as \code{object}).} \item{residuals}{Residuals from the fitted model. #' That is x minus fitted values.} \item{fitted}{Fitted values (one-step #' forecasts)} #' @author Rob J Hyndman #' @seealso \code{\link{Arima}} #' @keywords ts #' @examples #' #' plot(naive(gold,h=50),include=200) #' #' @export naive <- function(y, h=10, level=c(80, 95), fan=FALSE, lambda=NULL, biasadj=FALSE, ..., x=y) { fc <- rwf( x, h = h, level = level, fan = fan, lambda = lambda, drift = FALSE, biasadj = biasadj, ... ) fc$model$call <- match.call() fc$series <- deparse(substitute(y)) fc$method <- "Naive method" return(fc) } #' @rdname naive #' #' @examples #' #' plot(snaive(wineind)) #' #' @export snaive <- function(y, h=2 * frequency(x), level=c(80, 95), fan=FALSE, lambda=NULL, biasadj=FALSE, ..., x=y) { fit <- lagwalk( x, lag = frequency(x), drift = FALSE, lambda = lambda, biasadj = biasadj ) fc <- forecast(fit, h = h, level = level, fan = fan, lambda = fit$lambda, biasadj = biasadj, ...) fc$model$call <- match.call() fc$series <- deparse(substitute(y)) fc$method <- "Seasonal naive method" return(fc) } forecast/R/mstl.R0000644000176200001440000005322714323125536013400 0ustar liggesusers#' Multiple seasonal decomposition #' #' Decompose a time series into seasonal, trend and remainder components. #' Seasonal components are estimated iteratively using STL. Multiple seasonal periods are #' allowed. The trend component is computed for the last iteration of STL. #' Non-seasonal time series are decomposed into trend and remainder only. #' In this case, \code{\link[stats]{supsmu}} is used to estimate the trend. #' Optionally, the time series may be Box-Cox transformed before decomposition. #' Unlike \code{\link[stats]{stl}}, \code{mstl} is completely automated. #' @param x Univariate time series of class \code{msts} or \code{ts}. #' @param iterate Number of iterations to use to refine the seasonal component. #' @param s.window Seasonal windows to be used in the decompositions. If scalar, #' the same value is used for all seasonal components. Otherwise, it should be a vector #' of the same length as the number of seasonal components (or longer). #' @param ... Other arguments are passed to \code{\link[stats]{stl}}. #' @inheritParams forecast.ts #' #' @seealso \code{\link[stats]{stl}}, \code{\link[stats]{supsmu}} #' @examples #' library(ggplot2) #' mstl(taylor) %>% autoplot() #' mstl(AirPassengers, lambda = "auto") %>% autoplot() #' @export mstl <- function(x, lambda = NULL, iterate = 2, s.window = 7+4*seq(6), ...) { # What is x? origx <- x n <- length(x) if ("msts" %in% class(x)) { msts <- attributes(x)$msts if (any(msts >= n / 2)) { warning("Dropping seasonal components with fewer than two full periods.") msts <- msts[msts < n / 2] x <- msts(x, seasonal.periods = msts) } msts <- sort(msts, decreasing = FALSE) } else if ("ts" %in% class(x)) { msts <- frequency(x) iterate <- 1L } else { x <- as.ts(x) msts <- 1L } # Check dimension if (!is.null(dim(x))) { if (NCOL(x) == 1L) { x <- x[, 1] } } # Replace missing values if necessary if (anyNA(x)) { x <- na.interp(x, lambda = lambda) } # Transform if necessary if (!is.null(lambda)) { x <- BoxCox(x, lambda = lambda) lambda <- attr(x, "lambda") } tt <- seq_len(n) # Now fit stl models with only one type of seasonality at a time if (msts[1L] > 1) { seas <- as.list(rep(0, length(msts))) deseas <- x if (length(s.window) == 1L) { s.window <- rep(s.window, length(msts)) } iterate <- pmax(1L, iterate) for (j in seq_len(iterate)) { for (i in seq_along(msts)) { deseas <- deseas + seas[[i]] fit <- stl(ts(deseas, frequency = msts[i]), s.window = s.window[i], ...) seas[[i]] <- msts(seasonal(fit), seasonal.periods = msts) attributes(seas[[i]]) <- attributes(x) deseas <- deseas - seas[[i]] } } trend <- msts(trendcycle(fit), seasonal.periods = msts) } else { msts <- NULL deseas <- x trend <- ts(stats::supsmu(seq_len(n), x)$y) } attributes(trend) <- attributes(x) # Put back NAs deseas[is.na(origx)] <- NA # Estimate remainder remainder <- deseas - trend # Package into matrix output <- cbind(c(origx), c(trend)) if (!is.null(msts)) { for (i in seq_along(msts)) { output <- cbind(output, c(seas[[i]])) } } output <- cbind(output, c(remainder)) colnames(output) <- paste0("V",seq(NCOL(output))) colnames(output)[1L:2L] <- c("Data", "Trend") if (!is.null(msts)) { colnames(output)[2L + seq_along(msts)] <- paste0("Seasonal", round(msts, 2)) } colnames(output)[NCOL(output)] <- "Remainder" output <- copy_msts(origx, output) class(output) <- c("mstl", class(output)) return(output) } #' @rdname autoplot.seas #' @export autoplot.mstl <- function(object, ...) { autoplot.mts(object, facets = TRUE, ylab = "", ...) } #' Forecasting using stl objects #' #' Forecasts of STL objects are obtained by applying a non-seasonal forecasting #' method to the seasonally adjusted data and re-seasonalizing using the last #' year of the seasonal component. #' #' \code{stlm} takes a time series \code{y}, applies an STL decomposition, and #' models the seasonally adjusted data using the model passed as #' \code{modelfunction} or specified using \code{method}. It returns an object #' that includes the original STL decomposition and a time series model fitted #' to the seasonally adjusted data. This object can be passed to the #' \code{forecast.stlm} for forecasting. #' #' \code{forecast.stlm} forecasts the seasonally adjusted data, then #' re-seasonalizes the results by adding back the last year of the estimated #' seasonal component. #' #' \code{stlf} combines \code{stlm} and \code{forecast.stlm}. It takes a #' \code{ts} argument, applies an STL decomposition, models the seasonally #' adjusted data, reseasonalizes, and returns the forecasts. However, it allows #' more general forecasting methods to be specified via #' \code{forecastfunction}. #' #' \code{forecast.stl} is similar to \code{stlf} except that it takes the STL #' decomposition as the first argument, instead of the time series. #' #' Note that the prediction intervals ignore the uncertainty associated with #' the seasonal component. They are computed using the prediction intervals #' from the seasonally adjusted series, which are then reseasonalized using the #' last year of the seasonal component. The uncertainty in the seasonal #' component is ignored. #' #' The time series model for the seasonally adjusted data can be specified in #' \code{stlm} using either \code{method} or \code{modelfunction}. The #' \code{method} argument provides a shorthand way of specifying #' \code{modelfunction} for a few special cases. More generally, #' \code{modelfunction} can be any function with first argument a \code{ts} #' object, that returns an object that can be passed to \code{\link{forecast}}. #' For example, \code{forecastfunction=ar} uses the \code{\link{ar}} function #' for modelling the seasonally adjusted series. #' #' The forecasting method for the seasonally adjusted data can be specified in #' \code{stlf} and \code{forecast.stl} using either \code{method} or #' \code{forecastfunction}. The \code{method} argument provides a shorthand way #' of specifying \code{forecastfunction} for a few special cases. More #' generally, \code{forecastfunction} can be any function with first argument a #' \code{ts} object, and other \code{h} and \code{level}, which returns an #' object of class \code{\link{forecast}}. For example, #' \code{forecastfunction=thetaf} uses the \code{\link{thetaf}} function for #' forecasting the seasonally adjusted series. #' #' @param y A univariate numeric time series of class \code{ts} #' @param object An object of class \code{stl} or \code{stlm}. Usually the #' result of a call to \code{\link[stats]{stl}} or \code{stlm}. #' @param method Method to use for forecasting the seasonally adjusted series. #' @param modelfunction An alternative way of specifying the function for #' modelling the seasonally adjusted series. If \code{modelfunction} is not #' \code{NULL}, then \code{method} is ignored. Otherwise \code{method} is used #' to specify the time series model to be used. #' @param model Output from a previous call to \code{stlm}. If a \code{stlm} #' model is passed, this same model is fitted to y without re-estimating any #' parameters. #' @param forecastfunction An alternative way of specifying the function for #' forecasting the seasonally adjusted series. If \code{forecastfunction} is #' not \code{NULL}, then \code{method} is ignored. Otherwise \code{method} is #' used to specify the forecasting method to be used. #' @param etsmodel The ets model specification passed to #' \code{\link[forecast]{ets}}. By default it allows any non-seasonal model. If #' \code{method!="ets"}, this argument is ignored. #' @param xreg Historical regressors to be used in #' \code{\link[forecast]{auto.arima}()} when \code{method=="arima"}. #' @param newxreg Future regressors to be used in #' \code{\link[forecast]{forecast.Arima}()}. #' @param h Number of periods for forecasting. #' @param level Confidence level for prediction intervals. #' @param fan If \code{TRUE}, level is set to seq(51,99,by=3). This is suitable #' for fan plots. #' @param s.window Either the character string ``periodic'' or the span (in #' lags) of the loess window for seasonal extraction. #' @param t.window A number to control the smoothness of the trend. See #' \code{\link[stats]{stl}} for details. #' @param robust If \code{TRUE}, robust fitting will used in the loess #' procedure within \code{\link[stats]{stl}}. #' @param allow.multiplicative.trend If TRUE, then ETS models with #' multiplicative trends are allowed. Otherwise, only additive or no trend ETS #' models are permitted. #' @param x Deprecated. Included for backwards compatibility. #' @param ... Other arguments passed to \code{forecast.stl}, #' \code{modelfunction} or \code{forecastfunction}. #' @inheritParams forecast.ts #' #' @return \code{stlm} returns an object of class \code{stlm}. The other #' functions return objects of class \code{forecast}. #' #' There are many methods for working with \code{\link{forecast}} objects #' including \code{summary} to obtain and print a summary of the results, while #' \code{plot} produces a plot of the forecasts and prediction intervals. The #' generic accessor functions \code{fitted.values} and \code{residuals} extract #' useful features. #' @author Rob J Hyndman #' @seealso \code{\link[stats]{stl}}, \code{\link{forecast.ets}}, #' \code{\link{forecast.Arima}}. #' @keywords ts #' @examples #' #' tsmod <- stlm(USAccDeaths, modelfunction = ar) #' plot(forecast(tsmod, h = 36)) #' #' decomp <- stl(USAccDeaths, s.window = "periodic") #' plot(forecast(decomp)) #' @export forecast.stl <- function(object, method = c("ets", "arima", "naive", "rwdrift"), etsmodel = "ZZN", forecastfunction = NULL, h = frequency(object$time.series) * 2, level = c(80, 95), fan = FALSE, lambda = NULL, biasadj = NULL, xreg = NULL, newxreg = NULL, allow.multiplicative.trend = FALSE, ...) { method <- match.arg(method) if (is.null(forecastfunction)) { if (method != "arima" && (!is.null(xreg) || !is.null(newxreg))) { stop("xreg and newxreg arguments can only be used with ARIMA models") } if (method == "ets") { # Ensure non-seasonal model if (substr(etsmodel, 3, 3) != "N") { warning("The ETS model must be non-seasonal. I'm ignoring the seasonal component specified.") substr(etsmodel, 3, 3) <- "N" } forecastfunction <- function(x, h, level, ...) { fit <- ets(na.interp(x), model = etsmodel, allow.multiplicative.trend = allow.multiplicative.trend, ...) return(forecast(fit, h = h, level = level)) } } else if (method == "arima") { forecastfunction <- function(x, h, level, ...) { fit <- auto.arima(x, xreg = xreg, seasonal = FALSE, ...) return(forecast(fit, h = h, level = level, xreg = newxreg)) } } else if (method == "naive") { forecastfunction <- function(x, h, level, ...) { rwf(x, drift = FALSE, h = h, level = level, ...) } } else if (method == "rwdrift") { forecastfunction <- function(x, h, level, ...) { rwf(x, drift = TRUE, h = h, level = level, ...) } } } if (is.null(xreg) != is.null(newxreg)) { stop("xreg and newxreg arguments must both be supplied") } if (!is.null(newxreg)) { if (NROW(as.matrix(newxreg)) != h) { stop("newxreg should have the same number of rows as the forecast horizon h") } } if (fan) { level <- seq(51, 99, by = 3) } if ("mstl" %in% class(object)) { seasoncolumns <- which(grepl("Season", colnames(object))) nseasons <- length(seasoncolumns) seascomp <- matrix(0, ncol = nseasons, nrow = h) seasonal.periods <- as.numeric(sub("Seasonal","", colnames(object)[seasoncolumns])) n <- NROW(object) for (i in seq(nseasons)) { mp <- seasonal.periods[i] colname <- colnames(object)[seasoncolumns[i]] seascomp[, i] <- rep(object[n - rev(seq_len(mp)) + 1, colname], trunc(1 + (h - 1) / mp))[seq_len(h)] } lastseas <- rowSums(seascomp) xdata <- object[, "Data"] seascols <- grep("Seasonal", colnames(object)) allseas <- rowSumsTS(object[, seascols, drop = FALSE]) series <- NULL } else if ("stl" %in% class(object)) { m <- frequency(object$time.series) n <- NROW(object$time.series) lastseas <- rep(seasonal(object)[n - (m:1) + 1], trunc(1 + (h - 1) / m))[1:h] xdata <- ts(rowSums(object$time.series)) tsp(xdata) <- tsp(object$time.series) allseas <- seasonal(object) series <- deparse(object$call$x) } else { stop("Unknown object class") } # De-seasonalize x.sa <- seasadj(object) # Forecast fcast <- forecastfunction(x.sa, h = h, level = level, ...) # Reseasonalize fcast$mean <- future_msts(xdata, fcast$mean + lastseas) fcast$upper <- future_msts(xdata, fcast$upper + lastseas) fcast$lower <- future_msts(xdata, fcast$lower + lastseas) fcast$x <- xdata fcast$method <- paste("STL + ", fcast$method) fcast$series <- series fcast$fitted <- copy_msts(xdata, fitted(fcast) + allseas) fcast$residuals <- copy_msts(xdata, fcast$x - fcast$fitted) if (!is.null(lambda)) { fcast$x <- InvBoxCox(fcast$x, lambda) fcast$fitted <- InvBoxCox(fcast$fitted, lambda) fcast$mean <- InvBoxCox(fcast$mean, lambda, biasadj, fcast) fcast$lower <- InvBoxCox(fcast$lower, lambda) fcast$upper <- InvBoxCox(fcast$upper, lambda) attr(lambda, "biasadj") <- biasadj fcast$lambda <- lambda } return(fcast) } #' @export forecast.mstl <- function(object, method = c("ets", "arima", "naive", "rwdrift"), etsmodel = "ZZN", forecastfunction = NULL, h = frequency(object) * 2, level = c(80, 95), fan = FALSE, lambda = NULL, biasadj = NULL, xreg = NULL, newxreg = NULL, allow.multiplicative.trend = FALSE, ...) { forecast.stl( object, method = method, etsmodel = etsmodel, forecastfunction = forecastfunction, h = h, level = level, fan = fan, lambda = lambda, biasadj = biasadj, xreg = xreg, newxreg = newxreg, allow.multiplicative.trend = allow.multiplicative.trend, ... ) } # rowSums for mts objects # # Applies rowSums and returns ts with same tsp attributes as input. This # allows the result to be added to other time series with different lengths # but overlapping time indexes. # param mts a matrix or multivariate time series # return a vector of rowsums which is a ts if the \code{mts} is a ts rowSumsTS <- function (mts) { the_tsp <- tsp(mts) ret <- rowSums(mts) if (is.null(the_tsp)){ ret } else { tsp(ret) <- the_tsp as.ts(ret) } } # Function takes time series, does STL decomposition, and fits a model to seasonally adjusted series # But it does not forecast. Instead, the result can be passed to forecast(). #' @rdname forecast.stl #' @export stlm <- function(y, s.window = 7+4*seq(6), robust = FALSE, method = c("ets", "arima"), modelfunction = NULL, model = NULL, etsmodel = "ZZN", lambda = NULL, biasadj = FALSE, xreg = NULL, allow.multiplicative.trend = FALSE, x = y, ...) { method <- match.arg(method) # Check univariate if (NCOL(x) > 1L) { stop("y must be a univariate time series") } else { if (!is.null(ncol(x))) { if (ncol(x) == 1L) { # Probably redundant check x <- x[, 1L] } } } # Check x is a seasonal time series tspx <- tsp(x) if (is.null(tspx)) { stop("y is not a seasonal ts object") } else if (tspx[3] <= 1L) { stop("y is not a seasonal ts object") } # Transform data if necessary origx <- x if (!is.null(lambda)) { x <- BoxCox(x, lambda) lambda <- attr(x, "lambda") } # Do STL decomposition stld <- mstl(x, s.window = s.window, robust = robust) if (!is.null(model)) { if (inherits(model$model, "ets")) { modelfunction <- function(x, ...) { return(ets(x, model = model$model, use.initial.values = TRUE, ...)) } } else if (inherits(model$model, "Arima")) { modelfunction <- function(x, ...) { return(Arima(x, model = model$model, xreg = xreg, ...)) } } else if (!is.null(model$modelfunction)) { if ("model" %in% names(formals(model$modelfunction))) { modelfunction <- function(x, ...) { return(model$modelfunction(x, model = model$model, ...)) } } } if (is.null(modelfunction)) { stop("Unknown model type") } } # Construct modelfunction if not passed as an argument else if (is.null(modelfunction)) { if (method != "arima" && !is.null(xreg)) { stop("xreg arguments can only be used with ARIMA models") } if (method == "ets") { # Ensure non-seasonal model if (substr(etsmodel, 3, 3) != "N") { warning("The ETS model must be non-seasonal. I'm ignoring the seasonal component specified.") substr(etsmodel, 3, 3) <- "N" } modelfunction <- function(x, ...) { return(ets( x, model = etsmodel, allow.multiplicative.trend = allow.multiplicative.trend, ... )) } } else if (method == "arima") { modelfunction <- function(x, ...) { return(auto.arima(x, xreg = xreg, seasonal = FALSE, ...)) } } } # De-seasonalize x.sa <- seasadj(stld) # Model seasonally adjusted data fit <- modelfunction(x.sa, ...) fit$x <- x.sa # Fitted values and residuals seascols <- grep("Seasonal", colnames(stld)) allseas <- rowSumsTS(stld[, seascols, drop = FALSE]) fits <- fitted(fit) + allseas res <- residuals(fit) if (!is.null(lambda)) { fits <- InvBoxCox(fits, lambda, biasadj, var(res)) attr(lambda, "biasadj") <- biasadj } return(structure(list( stl = stld, model = fit, modelfunction = modelfunction, lambda = lambda, x = origx, series = deparse(substitute(y)), m = frequency(origx), fitted = fits, residuals = res ), class = "stlm")) } #' @rdname forecast.stl #' @export forecast.stlm <- function(object, h = 2 * object$m, level = c(80, 95), fan = FALSE, lambda = object$lambda, biasadj = NULL, newxreg = NULL, allow.multiplicative.trend = FALSE, ...) { if (!is.null(newxreg)) { if (nrow(as.matrix(newxreg)) != h) { stop("newxreg should have the same number of rows as the forecast horizon h") } } if (fan) { level <- seq(51, 99, by = 3) } # Forecast seasonally adjusted series if (is.element("Arima", class(object$model)) && !is.null(newxreg)) { fcast <- forecast(object$model, h = h, level = level, xreg = newxreg, ...) } else if (is.element("ets", class(object$model))) { fcast <- forecast( object$model, h = h, level = level, allow.multiplicative.trend = allow.multiplicative.trend, ... ) } else { fcast <- forecast(object$model, h = h, level = level, ...) } # In-case forecast method uses different horizon length (such as using xregs) h <- NROW(fcast$mean) # Forecast seasonal series with seasonal naive seasonal.periods <- attributes(object$stl)$msts if (is.null(seasonal.periods)) { seasonal.periods <- frequency(object$stl) } seascomp <- matrix(0, ncol = length(seasonal.periods), nrow = h) for (i in seq_along(seasonal.periods)) { mp <- seasonal.periods[i] n <- NROW(object$stl) colname <- paste0("Seasonal", round(mp, 2)) seascomp[, i] <- rep(object$stl[n - rev(seq_len(mp)) + 1, colname], trunc(1 + (h - 1) / mp))[seq_len(h)] } lastseas <- rowSums(seascomp) xdata <- object$stl[, "Data"] seascols <- grep("Seasonal", colnames(object$stl)) allseas <- rowSumsTS(object$stl[, seascols, drop = FALSE]) series <- NULL # m <- frequency(object$stl$time.series) n <- NROW(xdata) # Reseasonalize fcast$mean <- fcast$mean + lastseas fcast$upper <- fcast$upper + lastseas fcast$lower <- fcast$lower + lastseas fcast$method <- paste("STL + ", fcast$method) fcast$series <- object$series # fcast$seasonal <- ts(lastseas[1:m],frequency=m,start=tsp(object$stl$time.series)[2]-1+1/m) # fcast$residuals <- residuals() fcast$fitted <- fitted(fcast) + allseas fcast$residuals <- residuals(fcast) if (!is.null(lambda)) { fcast$fitted <- InvBoxCox(fcast$fitted, lambda) fcast$mean <- InvBoxCox(fcast$mean, lambda, biasadj, fcast) fcast$lower <- InvBoxCox(fcast$lower, lambda) fcast$upper <- InvBoxCox(fcast$upper, lambda) attr(lambda, "biasadj") <- biasadj fcast$lambda <- lambda } fcast$x <- object$x return(fcast) } #' @rdname forecast.stl #' #' @examples #' #' plot(stlf(AirPassengers, lambda = 0)) #' @export stlf <- function(y, h = frequency(x) * 2, s.window = 7+4*seq(6), t.window = NULL, robust = FALSE, lambda = NULL, biasadj = FALSE, x = y, ...) { seriesname <- deparse(substitute(y)) # Check univariate if (NCOL(x) > 1L) { stop("y must be a univariate time series") } else { if (!is.null(ncol(x))) { if (ncol(x) == 1L) { # Probably redundant check x <- x[, 1L] } } } # Check x is a seasonal time series tspx <- tsp(x) if (is.null(tspx)) { stop("y is not a seasonal ts object") } else if (tspx[3] <= 1L) { stop("y is not a seasonal ts object") } if (!is.null(lambda)) { x <- BoxCox(x, lambda) lambda <- attr(x, "lambda") } fit <- mstl(x, s.window = s.window, t.window = t.window, robust = robust) fcast <- forecast(fit, h = h, lambda = lambda, biasadj = biasadj, ...) # if (!is.null(lambda)) # { # fcast$x <- origx # fcast$fitted <- InvBoxCox(fcast$fitted, lambda) # fcast$mean <- InvBoxCox(fcast$mean, lambda) # fcast$lower <- InvBoxCox(fcast$lower, lambda) # fcast$upper <- InvBoxCox(fcast$upper, lambda) # fcast$lambda <- lambda # } fcast$series <- seriesname return(fcast) } #' @rdname is.ets #' @export is.stlm <- function(x) { inherits(x, "stlm") } forecast/R/checkAdmissibility.R0000644000176200001440000000235114150370574016217 0ustar liggesusers# Author: srazbash and Rob J Hyndman ############################################################################### checkAdmissibility <- function(opt.env, box.cox=NULL, small.phi=NULL, ar.coefs=NULL, ma.coefs=NULL, tau=0, bc.lower=0, bc.upper=1) { # Check the range of the Box-Cox parameter if (!is.null(box.cox)) { if ((box.cox <= bc.lower) | (box.cox >= bc.upper)) { return(FALSE) } } # Check the range of small.phi if (!is.null(small.phi)) { if (((small.phi < .8) | (small.phi > 1))) { return(FALSE) } } # Check AR part for stationarity if (!is.null(ar.coefs)) { arlags <- which(abs(ar.coefs) > 1e-08) if (length(arlags) > 0L) { p <- max(arlags) if (min(Mod(polyroot(c(1, -ar.coefs[1L:p])))) < 1 + 1e-2) { return(FALSE) } } } # Check MA part for invertibility if (!is.null(ma.coefs)) { malags <- which(abs(ma.coefs) > 1e-08) if (length(malags) > 0L) { q <- max(malags) if (min(Mod(polyroot(c(1, ma.coefs[1L:q])))) < 1 + 1e-2) { return(FALSE) } } } # Check the eigen values of the D matrix D.eigen.values <- eigen(opt.env$D, symmetric = FALSE, only.values = TRUE)$values return(all(abs(D.eigen.values) < 1 + 1e-2)) } forecast/R/graph.R0000644000176200001440000001715614323125536013523 0ustar liggesusers### Time series graphics and transformations #' Time series display #' #' Plots a time series along with its acf and either its pacf, lagged #' scatterplot or spectrum. #' #' \code{ggtsdisplay} will produce the equivalent plot using ggplot graphics. #' #' @param x a numeric vector or time series of class \code{ts}. #' @param plot.type type of plot to include in lower right corner. #' @param points logical flag indicating whether to show the individual points #' or not in the time plot. #' @param smooth logical flag indicating whether to show a smooth loess curve #' superimposed on the time plot. #' @param ci.type type of confidence limits for ACF that is passed to #' \code{\link[stats]{acf}}. Should the confidence limits assume a white noise #' input or for lag \eqn{k} an MA(\eqn{k-1}) input? #' @param lag.max the maximum lag to plot for the acf and pacf. A suitable #' value is selected by default if the argument is missing. #' @param na.action function to handle missing values in acf, pacf and spectrum #' calculations. The default is \code{\link[stats]{na.contiguous}}. Useful #' alternatives are \code{\link[stats]{na.pass}} and \code{\link{na.interp}}. #' @param theme Adds a ggplot element to each plot, typically a theme. #' @param main Main title. #' @param xlab X-axis label. #' @param ylab Y-axis label. #' @param pch Plotting character. #' @param cex Character size. #' @param \dots additional arguments to \code{\link[stats]{acf}}. #' @return None. #' @author Rob J Hyndman #' @seealso \code{\link[stats]{plot.ts}}, \code{\link{Acf}}, #' \code{\link[stats]{spec.ar}} #' @references Hyndman and Athanasopoulos (2018) \emph{Forecasting: principles #' and practice}, 2nd edition, OTexts: Melbourne, Australia. #' \url{https://otexts.com/fpp2/} #' @keywords ts #' @examples #' tsdisplay(diff(WWWusage)) #' ggtsdisplay(USAccDeaths, plot.type="scatter") #' #' @export tsdisplay <- function(x, plot.type=c("partial", "histogram", "scatter", "spectrum"), points=TRUE, ci.type=c("white", "ma"), lag.max, na.action=na.contiguous, main=NULL, xlab="", ylab="", pch=1, cex=0.5, ...) { plot.type <- match.arg(plot.type) ci.type <- match.arg(ci.type) def.par <- par(no.readonly = TRUE) # save default, for resetting... nf <- layout(matrix(c(1, 1, 2, 3), 2, 2, byrow = TRUE)) if (is.null(main)) { main <- deparse(substitute(x)) } if (!is.ts(x)) { x <- ts(x) } if (missing(lag.max)) { lag.max <- round(min(max(10 * log10(length(x)), 3 * frequency(x)), length(x) / 3)) } plot.ts(x, main = main, ylab = ylab, xlab = xlab, ylim = range(x, na.rm = TRUE), ...) if (points) { points(x, pch = pch, cex = cex, ...) } ylim <- c(-1, 1) * 3 / sqrt(length(x)) junk1 <- stats::acf(c(x), lag.max = lag.max, plot = FALSE, na.action = na.action) junk1$acf[1, 1, 1] <- 0 if (ci.type == "ma") { ylim <- range(ylim, 0.66 * ylim * max(sqrt(cumsum(c(1, 2 * junk1$acf[-1, 1, 1] ^ 2))))) } ylim <- range(ylim, junk1$acf) if (plot.type == "partial") { junk2 <- stats::pacf(c(x), lag.max = lag.max, plot = FALSE, na.action = na.action) ylim <- range(ylim, junk2$acf) } oldpar <- par(mar = c(5, 4.1, 1.5, 2)) plot(junk1, ylim = ylim, xlim = c(1, lag.max), ylab = "ACF", main = "", ci.type = ci.type, ...) if (plot.type == "scatter") { n <- length(x) plot(x[1:(n - 1)], x[2:n], xlab = expression(Y[t - 1]), ylab = expression(Y[t]), ...) } else if (plot.type == "spectrum") { spec.ar(x, main = "", na.action = na.action) } else if (plot.type == "histogram") { graphics::hist(x, breaks = "FD", main = "", xlab = main) } else { plot(junk2, ylim = ylim, xlim = c(1, lag.max), ylab = "PACF", main = "", ...) } par(def.par) layout(1) invisible() } #' Seasonal plot #' #' Plots a seasonal plot as described in Hyndman and Athanasopoulos (2014, #' chapter 2). This is like a time plot except that the data are plotted #' against the seasons in separate years. #' #' @param x a numeric vector or time series of class \code{ts}. #' @param s seasonal frequency of x #' @param season.labels Labels for each season in the "year" #' @param year.labels Logical flag indicating whether labels for each year of #' data should be plotted on the right. #' @param year.labels.left Logical flag indicating whether labels for each year #' of data should be plotted on the left. #' @param type plot type (as for \code{\link[graphics]{plot}}). Not yet #' supported for ggseasonplot. #' @param main Main title. #' @param xlab X-axis label. #' @param ylab Y-axis label. #' @param col Colour #' @param labelgap Distance between year labels and plotted lines #' @param \dots additional arguments to \code{\link[graphics]{plot}}. #' @return None. #' @author Rob J Hyndman & Mitchell O'Hara-Wild #' @seealso \code{\link[stats]{monthplot}} #' @references Hyndman and Athanasopoulos (2018) \emph{Forecasting: principles #' and practice}, 2nd edition, OTexts: Melbourne, Australia. #' \url{https://otexts.com/fpp2/} #' @keywords ts #' @examples #' seasonplot(AirPassengers, col=rainbow(12), year.labels=TRUE) #' #' @export seasonplot <- function(x, s, season.labels=NULL, year.labels=FALSE, year.labels.left=FALSE, type="o", main, xlab=NULL, ylab="", col=1, labelgap=0.1, ...) { if (missing(main)) { main <- paste("Seasonal plot:", deparse(substitute(x))) } # Check data are seasonal and convert to integer seasonality if (missing(s)) { s <- round(frequency(x)) } if (s <= 1) { stop("Data are not seasonal") } tspx <- tsp(x) x <- ts(x, start = tspx[1], frequency = s) # Pad series tsx <- x startperiod <- round(cycle(x)[1]) if (startperiod > 1) { x <- c(rep(NA, startperiod - 1), x) } x <- c(x, rep(NA, s - length(x) %% s)) Season <- rep(c(1:s, NA), length(x) / s) xnew <- rep(NA, length(x)) xnew[!is.na(Season)] <- x if (s == 12) { labs <- month.abb xLab <- "Month" } else if (s == 4) { labs <- paste("Q", 1:4, sep = "") xLab <- "Quarter" } else if (s == 7) { labs <- c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat") xLab <- "Day" } else if (s == 52) { labs <- 1:s xLab <- "Week" } else if (s == 24) { labs <- 0:(s - 1) xLab <- "Hour" } else if (s == 48) { labs <- seq(0, 23.5, by = 0.5) xLab <- "Half-hour" } else { if (s < 20) { labs <- 1:s } else { labs <- NULL } xLab <- "Season" } if (is.null(xlab)) { xlab <- xLab } if (is.null(season.labels)) { season.labels <- labs } if (year.labels) { xlim <- c(1 - labelgap, s + 0.4 + labelgap) } else { xlim <- c(1 - labelgap, s) } if (year.labels.left) { xlim[1] <- 0.4 - labelgap } plot(Season, xnew, xaxt = "n", xlab = xlab, type = type, ylab = ylab, main = main, xlim = xlim, col = 0, ...) nn <- length(Season) / s col <- rep(col, nn)[1:nn] for (i in 0:(nn - 1)) lines(Season[(i * (s + 1) + 1):((s + 1) * (i + 1))], xnew[(i * (s + 1) + 1):((s + 1) * (i + 1))], type = type, col = col[i + 1], ...) if (year.labels) { idx <- which(Season[!is.na(xnew)] == s) year <- round(time(tsx)[idx], nchar(s)) text(x = rep(s + labelgap, length(year)), y = tsx[idx], labels = paste(c(trunc(year))), adj = 0, ..., col = col[1:length(idx)]) } if (year.labels.left) { idx <- which(Season[!is.na(xnew)] == 1) year <- round(time(tsx)[idx], nchar(s)) if (min(idx) > 1) { # First year starts after season 1n col <- col[-1] } text(x = rep(1 - labelgap, length(year)), y = tsx[idx], labels = paste(c(trunc(year))), adj = 1, ..., col = col[1:length(idx)]) } if (is.null(labs)) { axis(1, ...) } else { axis(1, labels = season.labels, at = 1:s, ...) } } forecast/R/calendar.R0000644000176200001440000001336114323125536014165 0ustar liggesusers## Add as.Date.timeDate to S3 method table #' @export as.Date.timeDate <- timeDate::as.Date.timeDate #' Number of trading days in each season #' #' Returns number of trading days in each month or quarter of the observed time #' period in a major financial center. #' #' Useful for trading days length adjustments. More on how to define "business #' days", please refer to \code{\link[timeDate]{isBizday}}. #' #' @param x Monthly or quarterly time series #' @param FinCenter Major financial center. #' @return Time series #' @author Earo Wang #' @seealso \code{\link[forecast]{monthdays}} #' @keywords ts #' @examples #' #' x <- ts(rnorm(30), start = c(2013, 2), frequency = 12) #' bizdays(x, FinCenter = "New York") #' @export bizdays <- function(x, FinCenter = c( "New York", "London", "NERC", "Toronto", "Zurich" )) { # Return the number of trading days corresponding to the input ts # # Args: # x: a ts object # FinCenter: inherits holiday calendar from "timeDate" package # # Returns: # A matrix contains the number of trading days if (is.null(tsp(x))) { stop("We cannot handle a time series without time attributes.") } # Convert tsp to date freq <- frequency(x) years <- start(x)[1L]:end(x)[1L] # Grab the holidays from years and financial center FinCenter <- match.arg(FinCenter) if (FinCenter == "New York") { holidays <- timeDate::holidayNYSE(years) } else if (FinCenter == "London") { holidays <- timeDate::holidayLONDON(years) } else if (FinCenter == "NERC") { holidays <- timeDate::holidayNERC(years) } else if (FinCenter == "Toronto") { holidays <- timeDate::holidayTSX(years) } else if (FinCenter == "Zurich") { holidays <- timeDate::holidayZURICH(years) } if (freq == 12L) { # monthly data date <- zoo::as.Date(time(x)) start <- date[1L] end <- seq(date[length(date)], length.out = 2L, by = "month")[2L] - 1L days.len <- timeDate::timeSequence(from = start, to = end) # Grab business days biz <- days.len[timeDate::isBizday(days.len, holidays = holidays)] bizdays <- format(biz, format = "%Y-%m") } else if (freq == 4L) { # Quarterly data date <- zoo::as.Date(time(x)) start <- date[1L] end <- seq(date[length(date)], length.out = 2L, by = "3 month")[2L] - 1L days.len <- timeDate::timeSequence(from = start, to = end) # Grab business days biz <- days.len[timeDate::isBizday(days.len, holidays = holidays)] bizdays <- format(zoo::as.yearqtr(biz), format = "%Y Qtr%q") } # else if (freq == 52L) { # Weekly data # start <- paste0(start(x)[1L], "-01-01") # start <- as.Date(start) + start(x)[2L] * 7L # end <- start + length(time(x)) * 7L # days.len <- as.timeDate(seq(start, end, by = "days"), FinCenter = FinCenter) # biz <- days.len[isBizday(days.len, # holidays = unique(format(days.len, "%Y")))] # bizdays <- format(biz, format = "%Y Wk%W") # } num.days <- table(bizdays) out <- ts(num.days, start = tsp(x)[1L], frequency = freq) return(out) } #' Easter holidays in each season #' #' Returns a vector of 0's and 1's or fractional results if Easter spans March #' and April in the observed time period. Easter is defined as the days from #' Good Friday to Easter Sunday inclusively, plus optionally Easter Monday if #' \code{easter.mon=TRUE}. #' #' Useful for adjusting calendar effects. #' #' @param x Monthly or quarterly time series #' @param easter.mon If TRUE, the length of Easter holidays includes Easter #' Monday. #' @return Time series #' @author Earo Wang #' @keywords ts #' @examples #' #' easter(wineind, easter.mon = TRUE) #' @export easter <- function(x, easter.mon = FALSE) { # Return a vector of 0's and 1's for easter holidays # # Args: # x: monthly, quarterly or weekly data # easter.mon: An option including easter.mon if (is.null(tsp(x))) { stop("We cannot handle a time series without time attributes.") } freq <- frequency(x) date <- zoo::as.Date(time(x)) start.yr <- start(x)[1L] end.yr <- end(x)[1L] yr.span <- seq(start.yr, end.yr) gd.fri0 <- Easter(yr.span, -2L) if (easter.mon) { easter0 <- Easter(yr.span, 1L) } else { easter0 <- Easter(yr.span) } if (freq == 12L) { fmat <- "%Y-%m" yr.mon <- format(date, format = fmat) gd.fri <- format(gd.fri0, format = fmat) # good fri easter <- format(easter0, format = fmat) # easter mon } else if (freq == 4L) { fmat <- "%Y-%q" yr.mon <- format(zoo::as.yearqtr(date), format = fmat) # yr.qtr gd.fri <- format(zoo::as.yearqtr(gd.fri0), format = fmat) easter <- format(zoo::as.yearqtr(easter0), format = fmat) } span <- cbind(gd.fri, easter) # the span of easter holidays hdays <- unlist(apply(span, 1, unique)) dummies <- ifelse(yr.mon %in% hdays, 1L, 0L) # Allow fractional results denominator <- (easter0 - gd.fri0 + 1L)[1L] last.mar <- as.timeDate(paste0(yr.span, "-03-31")) dif <- difftimeDate(last.mar, gd.fri0, units = "days") + 1L # Remove easter out of date range if (date[1L] > as.character(last.mar[1L])) { dif <- dif[-1L] } if (date[length(yr.mon)] < as.character(last.mar[length(last.mar)])) { dif <- dif[-length(dif)] } replace <- dif > denominator | dif <= 0L dif[replace] <- denominator # Easter in the same month # Start to insert the remaining part falling in Apr index <- which(dif != denominator) if (length(index) != 0L) { values <- denominator - dif[index] new.index <- index[1L] for (i in 1L:length(index)) { dif <- append(dif, values = values[i], new.index) new.index <- index[i + 1L] + i } dummies[dummies == 1L] <- round(dif / unclass(denominator), digits = 2) } out <- ts(dummies, start = tsp(x)[1L], frequency = freq) return(out) } forecast/R/forecast.varest.R0000644000176200001440000000355314150370574015531 0ustar liggesusers# forecast function for varest, just a wrapper for predict.varest #' @export forecast.varest <- function(object, h=10, level=c(80, 95), fan=FALSE, ...) { out <- list(model = object, forecast = vector("list", object$K)) # Get residuals and fitted values and fix the times tspx <- tsp(object$y) vres <- residuals(object) vfits <- fitted(object) method <- paste0("VAR(", object$p, ")") # Add forecasts with prediction intervals # out$mean <- out$lower <- out$upper <- vector("list",object$K) for (i in seq_along(level)) { pr <- predict(object, n.ahead = h, ci = level[i] / 100, ...) for (j in 1:object$K) { out$forecast[[j]]$lower <- cbind(out$forecast[[j]]$lower, pr$fcst[[j]][, "lower"]) out$forecast[[j]]$upper <- cbind(out$forecast[[j]]$upper, pr$fcst[[j]][, "upper"]) } } j <- 1 for (fcast in out$forecast) { fcast$mean <- ts(pr$fcst[[j]][, "fcst"], frequency = tspx[3], start = tspx[2] + 1 / tspx[3]) fcast$lower <- ts(fcast$lower, frequency = tspx[3], start = tspx[2] + 1 / tspx[3]) fcast$upper <- ts(fcast$upper, frequency = tspx[3], start = tspx[2] + 1 / tspx[3]) colnames(fcast$lower) <- colnames(fcast$upper) <- paste0(level, "%") fcast$residuals <- fcast$fitted <- ts(rep(NA, nrow(object$y))) fcast$residuals[((1 - nrow(vres)):0) + length(fcast$residuals)] <- vres[, j] fcast$fitted[((1 - nrow(vfits)):0) + length(fcast$fitted)] <- vfits[, j] fcast$method <- method fcast$level <- level fcast$x <- object$y[, j] fcast$series <- colnames(object$y)[j] tsp(fcast$residuals) <- tsp(fcast$fitted) <- tspx fcast <- structure(fcast, class = "forecast") out$forecast[[j]] <- fcast j <- j + 1 } names(out$forecast) <- names(pr$fcst) out$method <- rep(method, object$K) names(out$forecast) <- names(out$method) <- names(pr$fcst) return(structure(out, class = "mforecast")) } forecast/R/armaroots.R0000644000176200001440000001110714323125536014417 0ustar liggesusers# Functions to plot the roots of an ARIMA model # Compute AR roots arroots <- function(object) { if (!any(is.element(class(object), c("Arima", "ar")))) { stop("object must be of class Arima or ar") } if (is.element("Arima", class(object))) { parvec <- object$model$phi } else { parvec <- object$ar } if (length(parvec) > 0) { last.nonzero <- max(which(abs(parvec) > 1e-08)) if (last.nonzero > 0) { return(structure(list( roots = polyroot(c(1, -parvec[1:last.nonzero])), type = "AR" ), class = "armaroots")) } } return(structure(list(roots = numeric(0), type = "AR"), class = "armaroots")) } # Compute MA roots maroots <- function(object) { if (!is.element("Arima", class(object))) { stop("object must be of class Arima") } parvec <- object$model$theta if (length(parvec) > 0) { last.nonzero <- max(which(abs(parvec) > 1e-08)) if (last.nonzero > 0) { return(structure(list( roots = polyroot(c(1, parvec[1:last.nonzero])), type = "MA" ), class = "armaroots")) } } return(structure(list(roots = numeric(0), type = "MA"), class = "armaroots")) } plot.armaroots <- function(x, xlab, ylab, main, ...) { if (missing(main)) { main <- paste("Inverse", x$type, "roots") } oldpar <- par(pty = "s") on.exit(par(oldpar)) plot( c(-1, 1), c(-1, 1), xlab = xlab, ylab = ylab, type = "n", bty = "n", xaxt = "n", yaxt = "n", main = main, ... ) axis(1, at = c(-1, 0, 1), line = 0.5, tck = -0.025) axis(2, at = c(-1, 0, 1), labels = c("-i", "0", "i"), line = 0.5, tck = -0.025) circx <- seq(-1, 1, length.out = 501) circy <- sqrt(1 - circx^2) lines(c(circx, circx), c(circy, -circy), col = "gray") lines(c(-2, 2), c(0, 0), col = "gray") lines(c(0, 0), c(-2, 2), col = "gray") if (length(x$roots) > 0) { inside <- abs(x$roots) > 1 points(1 / x$roots[inside], pch = 19, col = "black") if (sum(!inside) > 0) { points(1 / x$roots[!inside], pch = 19, col = "red") } } } #' Plot characteristic roots from ARIMA model #' #' Produces a plot of the inverse AR and MA roots of an ARIMA model. Inverse #' roots outside the unit circle are shown in red. #' #' \code{autoplot} will produce an equivalent plot as a ggplot object. #' #' @param x Object of class \dQuote{Arima} or \dQuote{ar}. #' @param object Object of class \dQuote{Arima} or \dQuote{ar}. Used for ggplot #' graphics (S3 method consistency). #' @param type Determines if both AR and MA roots are plotted, of if just one #' set is plotted. #' @param main Main title. Default is "Inverse AR roots" or "Inverse MA roots". #' @param xlab X-axis label. #' @param ylab Y-axis label. #' @param ... Other plotting parameters passed to \code{\link[graphics]{par}}. #' @return None. Function produces a plot #' @author Rob J Hyndman & Mitchell O'Hara-Wild #' @seealso \code{\link{Arima}}, \code{\link[stats]{ar}} #' @keywords hplot #' @examples #' #' library(ggplot2) #' #' fit <- Arima(WWWusage, order = c(3, 1, 0)) #' plot(fit) #' autoplot(fit) #' #' fit <- Arima(woolyrnq, order = c(2, 0, 0), seasonal = c(2, 1, 1)) #' plot(fit) #' autoplot(fit) #' #' plot(ar.ols(gold[1:61])) #' autoplot(ar.ols(gold[1:61])) #' @export plot.Arima <- function(x, type = c("both", "ar", "ma"), main, xlab = "Real", ylab = "Imaginary", ...) { type <- match.arg(type) if (!is.element("Arima", class(x))) { stop("This function is for objects of class 'Arima'.") } q <- p <- 0 # AR component if (length(x$model$phi) > 0) { test <- abs(x$model$phi) > 1e-09 if (any(test)) { p <- max(which(test)) } } # MA component if (length(x$model$theta) > 0) { test <- abs(x$model$theta) > 1e-09 if (any(test)) { q <- max(which(test)) } } # Check for MA parts if (type == "both") { if (p == 0) { type <- "ma" } else if (q == 0) { type <- "ar" } } if ((type == "ar" && (p == 0)) || (type == "ma" && (q == 0)) || (p == 0 && q == 0)) { warning("No roots to plot") if (missing(main)) { main <- "No AR or MA roots" } } if (type == "both") { oldpar <- par(mfrow = c(1, 2)) on.exit(par(oldpar)) } if (type != "ma") { plot(arroots(x), main = main, xlab = xlab, ylab = ylab, ...) } if (type != "ar") { plot(maroots(x), main = main, xlab = xlab, ylab = ylab, ...) } } #' @rdname plot.Arima #' @export plot.ar <- function(x, main, xlab = "Real", ylab = "Imaginary", ...) { if (!is.element("ar", class(x))) { stop("This function is for objects of class 'ar'.") } plot(arroots(x), main = main, xlab = xlab, ylab = ylab, ...) } forecast/R/theta.R0000644000176200001440000001155314150370574013524 0ustar liggesusers# Implement standard Theta method of Assimakopoulos and Nikolopoulos (2000) # More general methods are available in the forecTheta package # Author: RJH #' Theta method forecast #' #' Returns forecasts and prediction intervals for a theta method forecast. #' #' The theta method of Assimakopoulos and Nikolopoulos (2000) is equivalent to #' simple exponential smoothing with drift. This is demonstrated in Hyndman and #' Billah (2003). #' #' The series is tested for seasonality using the test outlined in A&N. If #' deemed seasonal, the series is seasonally adjusted using a classical #' multiplicative decomposition before applying the theta method. The resulting #' forecasts are then reseasonalized. #' #' Prediction intervals are computed using the underlying state space model. #' #' More general theta methods are available in the #' \code{\link[forecTheta]{forecTheta}} package. #' #' @param y a numeric vector or time series of class \code{ts} #' @param h Number of periods for forecasting #' @param level Confidence levels for prediction intervals. #' @param fan If TRUE, level is set to seq(51,99,by=3). This is suitable for #' fan plots. #' @param x Deprecated. Included for backwards compatibility. #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{rwf}. #' #' An object of class \code{"forecast"} is a list containing at least the #' following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The original time series #' (either \code{object} itself or the time series used to create the model #' stored as \code{object}).} \item{residuals}{Residuals from the fitted model. #' That is x minus fitted values.} \item{fitted}{Fitted values (one-step #' forecasts)} #' @author Rob J Hyndman #' @seealso \code{\link[stats]{arima}}, \code{\link{meanf}}, \code{\link{rwf}}, #' \code{\link{ses}} #' @references Assimakopoulos, V. and Nikolopoulos, K. (2000). The theta model: #' a decomposition approach to forecasting. \emph{International Journal of #' Forecasting} \bold{16}, 521-530. #' #' Hyndman, R.J., and Billah, B. (2003) Unmasking the Theta method. #' \emph{International J. Forecasting}, \bold{19}, 287-290. #' @keywords ts #' @examples #' nile.fcast <- thetaf(Nile) #' plot(nile.fcast) #' @export thetaf <- function(y, h = ifelse(frequency(y) > 1, 2 * frequency(y), 10), level = c(80, 95), fan = FALSE, x = y) { # Check inputs if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 && max(level) < 1) { level <- 100 * level } else if (min(level) < 0 || max(level) > 99.99) { stop("Confidence limit out of range") } } # Check seasonality n <- length(x) x <- as.ts(x) m <- frequency(x) if (m > 1 && !is.constant(x) && n > 2 * m) { r <- as.numeric(acf(x, lag.max = m, plot = FALSE)$acf)[-1] stat <- sqrt((1 + 2 * sum(r[-m]^2)) / n) seasonal <- (abs(r[m]) / stat > qnorm(0.95)) } else { seasonal <- FALSE } # Seasonal decomposition origx <- x if (seasonal) { decomp <- decompose(x, type = "multiplicative") if (any(abs(seasonal(decomp)) < 1e-4)) { warning("Seasonal indexes close to zero. Using non-seasonal Theta method") } else { x <- seasadj(decomp) } } # Find theta lines fcast <- ses(x, h = h) tmp2 <- lsfit(0:(n - 1), x)$coefficients[2] / 2 alpha <- pmax(1e-10, fcast$model$par["alpha"]) fcast$mean <- fcast$mean + tmp2 * (0:(h - 1) + (1 - (1 - alpha)^n) / alpha) # Reseasonalize if (seasonal) { fcast$mean <- fcast$mean * rep(tail(decomp$seasonal, m), trunc(1 + h / m))[1:h] fcast$fitted <- fcast$fitted * decomp$seasonal } fcast$residuals <- origx - fcast$fitted # Find prediction intervals fcast.se <- sqrt(fcast$model$sigma2) * sqrt((0:(h - 1)) * alpha^2 + 1) nconf <- length(level) fcast$lower <- fcast$upper <- ts(matrix(NA, nrow = h, ncol = nconf)) tsp(fcast$lower) <- tsp(fcast$upper) <- tsp(fcast$mean) for (i in 1:nconf) { zt <- -qnorm(0.5 - level[i] / 200) fcast$lower[, i] <- fcast$mean - zt * fcast.se fcast$upper[, i] <- fcast$mean + zt * fcast.se } # Return results fcast$x <- origx fcast$level <- level fcast$method <- "Theta" fcast$model <- list(alpha = alpha, drift = tmp2, sigma = fcast$model$sigma2) fcast$model$call <- match.call() return(fcast) } forecast/R/newarima2.R0000644000176200001440000010012714323125536014276 0ustar liggesusers#' Fit best ARIMA model to univariate time series #' #' Returns best ARIMA model according to either AIC, AICc or BIC value. The #' function conducts a search over possible model within the order constraints #' provided. #' #' The default arguments are designed for rapid estimation of models for many time series. #' If you are analysing just one time series, and can afford to take some more time, it #' is recommended that you set \code{stepwise=FALSE} and \code{approximation=FALSE}. #' #' Non-stepwise selection can be slow, especially for seasonal data. The stepwise #' algorithm outlined in Hyndman & Khandakar (2008) is used except that the default #' method for selecting seasonal differences is now based on an estimate of seasonal #' strength (Wang, Smith & Hyndman, 2006) rather than the Canova-Hansen test. #' There are also some other minor variations to the algorithm described in #' Hyndman and Khandakar (2008). #' #' @inheritParams stats::arima #' @param y a univariate time series #' @param d Order of first-differencing. If missing, will choose a value based #' on \code{test}. #' @param D Order of seasonal-differencing. If missing, will choose a value #' based on \code{season.test}. #' @param max.p Maximum value of p #' @param max.q Maximum value of q #' @param max.P Maximum value of P #' @param max.Q Maximum value of Q #' @param max.order Maximum value of p+q+P+Q if model selection is not #' stepwise. #' @param max.d Maximum number of non-seasonal differences #' @param max.D Maximum number of seasonal differences #' @param start.p Starting value of p in stepwise procedure. #' @param start.q Starting value of q in stepwise procedure. #' @param start.P Starting value of P in stepwise procedure. #' @param start.Q Starting value of Q in stepwise procedure. #' @param stationary If \code{TRUE}, restricts search to stationary models. #' @param seasonal If \code{FALSE}, restricts search to non-seasonal models. #' @param ic Information criterion to be used in model selection. #' @param stepwise If \code{TRUE}, will do stepwise selection (faster). #' Otherwise, it searches over all models. Non-stepwise selection can be very #' slow, especially for seasonal models. #' @param nmodels Maximum number of models considered in the stepwise search. #' @param trace If \code{TRUE}, the list of ARIMA models considered will be #' reported. #' @param approximation If \code{TRUE}, estimation is via conditional sums of #' squares and the information criteria used for model selection are #' approximated. The final model is still computed using maximum likelihood #' estimation. Approximation should be used for long time series or a high #' seasonal period to avoid excessive computation times. #' @param truncate An integer value indicating how many observations to use in #' model selection. The last \code{truncate} values of the series are used to #' select a model when \code{truncate} is not \code{NULL} and #' \code{approximation=TRUE}. All observations are used if either #' \code{truncate=NULL} or \code{approximation=FALSE}. #' @param xreg Optionally, a numerical vector or matrix of external regressors, which #' must have the same number of rows as \code{y}. (It should not be a data frame.) #' @param test Type of unit root test to use. See \code{\link{ndiffs}} for #' details. #' @param test.args Additional arguments to be passed to the unit root test. #' @param seasonal.test This determines which method is used to select the number of seasonal differences. #' The default method is to use a measure of seasonal strength computed from an STL decomposition. #' Other possibilities involve seasonal unit root tests. #' @param seasonal.test.args Additional arguments to be passed to the seasonal #' unit root test. #' See \code{\link{nsdiffs}} for details. #' @param allowdrift If \code{TRUE}, models with drift terms are considered. #' @param allowmean If \code{TRUE}, models with a non-zero mean are considered. #' @param parallel If \code{TRUE} and \code{stepwise = FALSE}, then the #' specification search is done in parallel. This can give a significant #' speedup on multicore machines. #' @param num.cores Allows the user to specify the amount of parallel processes #' to be used if \code{parallel = TRUE} and \code{stepwise = FALSE}. If #' \code{NULL}, then the number of logical cores is automatically detected and #' all available cores are used. #' @param x Deprecated. Included for backwards compatibility. #' @param ... Additional arguments to be passed to \code{\link[stats]{arima}}. #' @inheritParams forecast.ts #' #' @return Same as for \code{\link{Arima}} #' @author Rob J Hyndman #' @seealso \code{\link{Arima}} #' @references Hyndman, RJ and Khandakar, Y (2008) "Automatic time series #' forecasting: The forecast package for R", \emph{Journal of Statistical #' Software}, \bold{26}(3). #' #' Wang, X, Smith, KA, Hyndman, RJ (2006) "Characteristic-based clustering #' for time series data", \emph{Data Mining and Knowledge Discovery}, #' \bold{13}(3), 335-364. #' @keywords ts #' @examples #' fit <- auto.arima(WWWusage) #' plot(forecast(fit,h=20)) #' #' @export auto.arima <- function(y, d=NA, D=NA, max.p=5, max.q=5, max.P=2, max.Q=2, max.order=5, max.d=2, max.D=1, start.p=2, start.q=2, start.P=1, start.Q=1, stationary=FALSE, seasonal=TRUE, ic=c("aicc", "aic", "bic"), stepwise=TRUE, nmodels = 94, trace=FALSE, approximation=(length(x) > 150 | frequency(x) > 12), method = NULL, truncate=NULL, xreg=NULL, test=c("kpss", "adf", "pp"), test.args = list(), seasonal.test=c("seas", "ocsb", "hegy", "ch"), seasonal.test.args = list(), allowdrift=TRUE, allowmean=TRUE, lambda=NULL, biasadj=FALSE, parallel=FALSE, num.cores=2, x=y, ...) { # Only non-stepwise parallel implemented so far. if (stepwise && parallel) { warning("Parallel computer is only implemented when stepwise=FALSE, the model will be fit in serial.") parallel <- FALSE } if (trace && parallel) { message("Tracing model searching in parallel is not supported.") trace <- FALSE } series <- deparse(substitute(y)) x <- as.ts(x) if (NCOL(x) > 1) { stop("auto.arima can only handle univariate time series") } # Trim leading NAs and find length of non-missing data orig.x <- x missing <- is.na(x) firstnonmiss <- head(which(!missing),1) lastnonmiss <- tail(which(!missing),1) serieslength <- sum(!missing[firstnonmiss:lastnonmiss]) # Trim initial missing values x <- subset(x, start=firstnonmiss) if(!is.null(xreg)) { if(!is.numeric(xreg)) stop("xreg should be a numeric matrix or a numeric vector") xreg <- as.matrix(xreg) xreg <- xreg[firstnonmiss:NROW(xreg),,drop=FALSE] } # Check for constant data if (is.constant(x)) { if(all(is.na(x))) stop("All data are missing") if (allowmean) { fit <- Arima(x, order = c(0, 0, 0), fixed = mean(x, na.rm = TRUE), ...) } else { fit <- Arima(x, order = c(0, 0, 0), include.mean = FALSE, ...) } fit$x <- orig.x fit$series <- series fit$call <- match.call() fit$call$x <- data.frame(x = x) fit$constant <- TRUE return(fit) } ic <- match.arg(ic) test <- match.arg(test) seasonal.test <- match.arg(seasonal.test) # Only consider non-seasonal models if (seasonal) { m <- frequency(x) } else { m <- 1 } if (m < 1) { # warning("I can't handle data with frequency less than 1. Seasonality will be ignored.") m <- 1 } else { m <- round(m) } # Avoid non-integer seasonal periods max.p <- min(max.p, floor(serieslength / 3)) max.q <- min(max.q, floor(serieslength / 3)) max.P <- min(max.P, floor(serieslength / 3 / m)) max.Q <- min(max.Q, floor(serieslength / 3 / m)) # Use AIC if npar <= 3 # AICc won't work for tiny samples. if (serieslength <= 3L) { ic <- "aic" } # Transform data if requested if (!is.null(lambda)) { x <- BoxCox(x, lambda) lambda <- attr(x, "lambda") attr(lambda, "biasadj") <- biasadj } # Check xreg and do regression if necessary if (!is.null(xreg)) { if (is.null(colnames(xreg))) { colnames(xreg) <- if (ncol(xreg) == 1) "xreg" else paste("xreg", 1:ncol(xreg), sep = "") } xregg <- xreg xx <- x # Check that xreg is not rank deficient # First check if any columns are constant constant_columns <- apply(xregg, 2, is.constant) if (all(constant_columns)) { xregg <- NULL } else{ if (any(constant_columns)) { xregg <- xregg[, -which(constant_columns), drop = FALSE] } # Now check if it is rank deficient sv <- svd(na.omit(cbind(rep(1, NROW(xregg)), xregg)))$d if (min(sv) / sum(sv) < .Machine$double.eps) { stop("xreg is rank deficient") } # Finally find residuals from regression in order # to estimate appropriate level of differencing j <- !is.na(x) & !is.na(rowSums(xregg)) xx[j] <- residuals(lm(x ~ xregg)) } } else { xx <- x xregg <- NULL } # Choose order of differencing if (stationary) { d <- D <- 0 } if (m == 1) { D <- max.P <- max.Q <- 0 } else if(is.na(D) & length(xx) <= 2*m) { D <- 0 } else if(is.na(D)) { D <- do.call("nsdiffs", c(list(xx, test=seasonal.test, max.D=max.D), seasonal.test.args)) # Make sure xreg is not null after differencing if (D > 0 && !is.null(xregg)) { diffxreg <- diff(xregg, differences = D, lag = m) if (any(apply(diffxreg, 2, is.constant))) { D <- D - 1 } } # Make sure xx is not all missing after differencing if (D > 0) { dx <- diff(xx, differences = D, lag = m) if (all(is.na(dx))) D <- D - 1 } } if (D > 0) { dx <- diff(xx, differences = D, lag = m) } else { dx <- xx } if (!is.null(xregg)) { if (D > 0) { diffxreg <- diff(xregg, differences = D, lag = m) } else { diffxreg <- xregg } } if (is.na(d)) { d <- do.call("ndiffs", c(list(dx, test = test, max.d = max.d), test.args)) # Make sure xreg is not null after differencing if (d > 0 && !is.null(xregg)) { diffxreg <- diff(diffxreg, differences = d, lag = 1) if (any(apply(diffxreg, 2, is.constant))) { d <- d - 1 } } # Make sure dx is not all missing after differencing if (d > 0) { diffdx <- diff(dx, differences=d, lag=1) if(all(is.na(diffdx))) d <- d - 1 } } # Check number of differences selected if (D >= 2) { warning("Having more than one seasonal differences is not recommended. Please consider using only one seasonal difference.") } else if (D + d > 2) { warning("Having 3 or more differencing operations is not recommended. Please consider reducing the total number of differences.") } if (d > 0) { dx <- diff(dx, differences = d, lag = 1) } if(length(dx) == 0L) stop("Not enough data to proceed") else if (is.constant(dx)) { if (is.null(xreg)) { if (D > 0 && d == 0) { fit <- Arima(x, order = c(0, d, 0), seasonal = list(order = c(0, D, 0), period = m), include.constant = TRUE, fixed = mean(dx/m, na.rm = TRUE), method = method, ...) } else if (D > 0 && d > 0) { fit <- Arima(x, order = c(0, d, 0), seasonal = list(order = c(0, D, 0), period = m), method = method, ...) } else if (d == 2) { fit <- Arima(x, order = c(0, d, 0), method = method, ...) } else if (d < 2) { fit <- Arima(x, order = c(0, d, 0), include.constant = TRUE, fixed = mean(dx, na.rm = TRUE), method = method, ...) } else { stop("Data follow a simple polynomial and are not suitable for ARIMA modelling.") } } else # Perfect regression { if (D > 0) { fit <- Arima(x, order = c(0, d, 0), seasonal = list(order = c(0, D, 0), period = m), xreg = xreg, method = method, ...) } else { fit <- Arima(x, order = c(0, d, 0), xreg = xreg, method = method, ...) } } fit$x <- orig.x fit$series <- series fit$call <- match.call() fit$call$x <- data.frame(x = x) return(fit) } if (m > 1) { if (max.P > 0) { max.p <- min(max.p, m - 1) } if (max.Q > 0) { max.q <- min(max.q, m - 1) } } # Find constant offset for AIC calculation using white noise model if (approximation) { if (!is.null(truncate)) { tspx <- tsp(x) if (length(x) > truncate) { x <- ts(tail(x, truncate), end = tspx[2], frequency = tspx[3]) } } if (D == 0) { fit <- try(stats::arima(x, order = c(0, d, 0), xreg = xreg, ...), silent = TRUE) } else { fit <- try(stats::arima( x, order = c(0, d, 0), seasonal = list(order = c(0, D, 0), period = m), xreg = xreg, ... ), silent = TRUE) } if (!is.element("try-error", class(fit))) { offset <- -2 * fit$loglik - serieslength * log(fit$sigma2) } else # Not sure this should ever happen { # warning("Unable to calculate AIC offset") offset <- 0 } } else { offset <- 0 } allowdrift <- allowdrift & (d + D) == 1 allowmean <- allowmean & (d + D) == 0 constant <- allowdrift | allowmean if (approximation && trace) { cat("\n Fitting models using approximations to speed things up...\n") } if (!stepwise) { bestfit <- search.arima( x, d, D, max.p, max.q, max.P, max.Q, max.order, stationary, ic, trace, approximation, method = method, xreg = xreg, offset = offset, allowdrift = allowdrift, allowmean = allowmean, parallel = parallel, num.cores = num.cores, ... ) bestfit$call <- match.call() bestfit$call$x <- data.frame(x = x) bestfit$lambda <- lambda bestfit$x <- orig.x bestfit$series <- series bestfit$fitted <- fitted.Arima(bestfit) if (trace) { cat("\n\n Best model:", arima.string(bestfit, padding = TRUE), "\n\n") } return(bestfit) } # Starting model if (length(x) < 10L) { start.p <- min(start.p, 1L) start.q <- min(start.q, 1L) start.P <- 0L start.Q <- 0L } p <- start.p <- min(start.p, max.p) q <- start.q <- min(start.q, max.q) P <- start.P <- min(start.P, max.P) Q <- start.Q <- min(start.Q, max.Q) results <- matrix(NA, nrow = nmodels, ncol = 8) bestfit <- myarima(x, order = c(p, d, q), seasonal = c(P, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[1, ] <- c(p, d, q, P, D, Q, constant, bestfit$ic) # Null model with possible constant fit <- myarima(x, order = c(0, d, 0), seasonal = c(0, D, 0), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[2, ] <- c(0, d, 0, 0, D, 0, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit p <- q <- P <- Q <- 0 } k <- 2 # Basic AR model if (max.p > 0 || max.P > 0) { fit <- myarima(x, order = c(max.p > 0, d, 0), seasonal = c((m > 1) & (max.P > 0), D, 0), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k+1, ] <- c(max.p > 0, d, 0, (m > 1) & (max.P > 0), D, 0, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit p <- (max.p > 0) P <- (m > 1) & (max.P > 0) q <- Q <- 0 } k <- k + 1 } # Basic MA model if (max.q > 0 || max.Q > 0) { fit <- myarima(x, order = c(0, d, max.q > 0), seasonal = c(0, D, (m > 1) & (max.Q > 0)), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k+1, ] <- c(0, d, max.q > 0, 0, D, (m > 1) & (max.Q > 0), constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit p <- P <- 0 Q <- (m > 1) & (max.Q > 0) q <- (max.q > 0) } k <- k + 1 } # Null model with no constant if (constant) { fit <- myarima(x, order = c(0, d, 0), seasonal = c(0, D, 0), constant = FALSE, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k+1, ] <- c(0, d, 0, 0, D, 0, 0, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit p <- q <- P <- Q <- 0 } k <- k + 1 } startk <- 0 while (startk < k && k < nmodels) { startk <- k if (P > 0 && newmodel(p, d, q, P - 1, D, Q, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q), seasonal = c(P - 1, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q, P - 1, D, Q, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit P <- (P - 1) next } } if (Q > 0 && newmodel(p, d, q, P, D, Q - 1, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q), seasonal = c(P, D, Q - 1), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q, P, D, Q - 1, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit Q <- (Q - 1) next } } if (P < max.P && newmodel(p, d, q, P + 1, D, Q, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q), seasonal = c(P + 1, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q, P + 1, D, Q, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit P <- (P + 1) next } } if (Q < max.Q && newmodel(p, d, q, P, D, Q + 1, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q), seasonal = c(P, D, Q + 1), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q, P, D, Q + 1, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit Q <- (Q + 1) next } } if (Q > 0 && P > 0 && newmodel(p, d, q, P - 1, D, Q - 1, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q), seasonal = c(P - 1, D, Q - 1), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q, P - 1, D, Q - 1, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit Q <- (Q - 1) P <- (P - 1) next } } if (Q < max.Q && P > 0 && newmodel(p, d, q, P - 1, D, Q + 1, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q), seasonal = c(P - 1, D, Q + 1), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q, P - 1, D, Q + 1, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit Q <- (Q + 1) P <- (P - 1) next } } if (Q > 0 && P < max.P && newmodel(p, d, q, P + 1, D, Q - 1, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q), seasonal = c(P + 1, D, Q - 1), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q, P + 1, D, Q - 1, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit Q <- (Q - 1) P <- (P + 1) next } } if (Q < max.Q && P < max.P && newmodel(p, d, q, P + 1, D, Q + 1, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q), seasonal = c(P + 1, D, Q + 1), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q, P + 1, D, Q + 1, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit Q <- (Q + 1) P <- (P + 1) next } } if (p > 0 && newmodel(p - 1, d, q, P, D, Q, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p - 1, d, q), seasonal = c(P, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p - 1, d, q, P, D, Q, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit p <- (p - 1) next } } if (q > 0 && newmodel(p, d, q - 1, P, D, Q, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q - 1), seasonal = c(P, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q - 1, P, D, Q, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit q <- (q - 1) next } } if (p < max.p && newmodel(p + 1, d, q, P, D, Q, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p + 1, d, q), seasonal = c(P, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p + 1, d, q, P, D, Q, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit p <- (p + 1) next } } if (q < max.q && newmodel(p, d, q + 1, P, D, Q, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q + 1), seasonal = c(P, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q + 1, P, D, Q, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit q <- (q + 1) next } } if (q > 0 && p > 0 && newmodel(p - 1, d, q - 1, P, D, Q, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p - 1, d, q - 1), seasonal = c(P, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p - 1, d, q - 1, P, D, Q, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit q <- (q - 1) p <- (p - 1) next } } if (q < max.q && p > 0 && newmodel(p - 1, d, q + 1, P, D, Q, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p - 1, d, q + 1), seasonal = c(P, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p - 1, d, q + 1, P, D, Q, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit q <- (q + 1) p <- (p - 1) next } } if (q > 0 && p < max.p && newmodel(p + 1, d, q - 1, P, D, Q, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p + 1, d, q - 1), seasonal = c(P, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p + 1, d, q - 1, P, D, Q, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit q <- (q - 1) p <- (p + 1) next } } if (q < max.q && p < max.p && newmodel(p + 1, d, q + 1, P, D, Q, constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p + 1, d, q + 1), seasonal = c(P, D, Q), constant = constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p + 1, d, q + 1, P, D, Q, constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit q <- (q + 1) p <- (p + 1) next } } if (allowdrift || allowmean) { if (newmodel(p, d, q, P, D, Q, !constant, results[1:k, ])) { k <- k + 1; if(k>nmodels) next fit <- myarima(x, order = c(p, d, q), seasonal = c(P, D, Q), constant = !constant, ic, trace, approximation, method = method, offset = offset, xreg = xreg, ...) results[k, ] <- c(p, d, q, P, D, Q, !constant, fit$ic) if (fit$ic < bestfit$ic) { bestfit <- fit constant <- !constant } } } } if(k > nmodels){ warning(sprintf("Stepwise search was stopped early due to reaching the model number limit: `nmodels = %i`", nmodels)) } # Refit using ML if approximation used for IC if (approximation && !is.null(bestfit$arma)) { if (trace) { cat("\n\n Now re-fitting the best model(s) without approximations...\n") } icorder <- order(results[, 8]) nmodels <- sum(!is.na(results[, 8])) for (i in seq(nmodels)) { k <- icorder[i] fit <- myarima( x, order = c(results[k, 1], d, results[k, 3]), seasonal = c(results[k, 4], D, results[k, 6]), constant = results[k, 7] == 1, ic, trace, approximation = FALSE, method = method, xreg = xreg, ... ) if (fit$ic < Inf) { bestfit <- fit break } } } # Nothing fitted if (bestfit$ic == Inf && !isTRUE(method=="CSS")) { if (trace) { cat("\n") } stop("No suitable ARIMA model found") } # Return best fit bestfit$x <- orig.x bestfit$series <- series bestfit$ic <- NULL bestfit$call <- match.call() bestfit$call$x <- data.frame(x = x) bestfit$lambda <- lambda bestfit$fitted <- fitted.Arima(bestfit) if (trace) { cat("\n\n Best model:", arima.string(bestfit, padding = TRUE), "\n\n") } return(bestfit) } # Calls arima from stats package and adds data to the returned object # Also allows refitting to new data # and drift terms to be included. myarima <- function(x, order = c(0, 0, 0), seasonal = c(0, 0, 0), constant=TRUE, ic="aic", trace=FALSE, approximation=FALSE, offset=0, xreg=NULL, method = NULL, ...) { # Length of non-missing interior missing <- is.na(x) firstnonmiss <- head(which(!missing),1) lastnonmiss <- tail(which(!missing),1) n <- sum(!missing[firstnonmiss:lastnonmiss]) m <- frequency(x) use.season <- (sum(seasonal) > 0) & m > 0 diffs <- order[2] + seasonal[2] if(is.null(method)){ if (approximation) { method <- "CSS" } else { method <- "CSS-ML" } } if (diffs == 1 && constant) { xreg <- `colnames<-`(cbind(drift = 1:length(x), xreg), make.unique(c("drift", if(is.null(colnames(xreg)) && !is.null(xreg)) rep("", NCOL(xreg)) else colnames(xreg)))) if (use.season) { suppressWarnings(fit <- try(stats::arima(x = x, order = order, seasonal = list(order = seasonal, period = m), xreg = xreg, method = method, ...), silent = TRUE)) } else { suppressWarnings(fit <- try(stats::arima(x = x, order = order, xreg = xreg, method = method, ...), silent = TRUE)) } } else { if (use.season) { suppressWarnings(fit <- try(stats::arima(x = x, order = order, seasonal = list(order = seasonal, period = m), include.mean = constant, method = method, xreg = xreg, ...), silent = TRUE)) } else { suppressWarnings(fit <- try(stats::arima(x = x, order = order, include.mean = constant, method = method, xreg = xreg, ...), silent = TRUE)) } } if (is.null(xreg)) { nxreg <- 0 } else { nxreg <- ncol(as.matrix(xreg)) } if (!is.element("try-error", class(fit))) { nstar <- n - order[2] - seasonal[2] * m if (diffs == 1 && constant) { # fitnames <- names(fit$coef) # fitnames[length(fitnames)-nxreg] <- "drift" # names(fit$coef) <- fitnames fit$xreg <- xreg } npar <- length(fit$coef[fit$mask]) + 1 if (method == "CSS") { fit$aic <- offset + nstar * log(fit$sigma2) + 2 * npar } if (!is.na(fit$aic)) { fit$bic <- fit$aic + npar * (log(nstar) - 2) fit$aicc <- fit$aic + 2 * npar * (npar + 1) / (nstar - npar - 1) fit$ic <- switch(ic, bic = fit$bic, aic = fit$aic, aicc = fit$aicc) } else { fit$aic <- fit$bic <- fit$aicc <- fit$ic <- Inf } # Adjust residual variance to be unbiased fit$sigma2 <- sum(fit$residuals ^ 2, na.rm = TRUE) / (nstar - npar + 1) # Check for unit roots minroot <- 2 if (order[1] + seasonal[1] > 0) { testvec <- fit$model$phi k <- abs(testvec) > 1e-8 if (sum(k) > 0) { last.nonzero <- max(which(k)) } else { last.nonzero <- 0 } if (last.nonzero > 0) { testvec <- testvec[1:last.nonzero] proots <- try(polyroot(c(1,-testvec))) if (!is.element("try-error", class(proots))) { minroot <- min(minroot, abs(proots)) } else fit$ic <- Inf } } if (order[3] + seasonal[3] > 0 & fit$ic < Inf) { testvec <- fit$model$theta k <- abs(testvec) > 1e-8 if (sum(k) > 0) { last.nonzero <- max(which(k)) } else { last.nonzero <- 0 } if (last.nonzero > 0) { testvec <- testvec[1:last.nonzero] proots <- try(polyroot(c(1,testvec))) if (!is.element("try-error", class(proots))) { minroot <- min(minroot, abs(proots)) } else fit$ic <- Inf } } # Avoid bad models if (minroot < 1 + 1e-2 | checkarima(fit)) { fit$ic <- Inf } fit$xreg <- xreg if (trace) { cat("\n", arima.string(fit, padding = TRUE), ":", fit$ic) } return(structure(fit, class = c("forecast_ARIMA", "ARIMA", "Arima"))) } else { # Catch errors due to unused arguments if (length(grep("unused argument", fit)) > 0L) { stop(fit[1]) } if (trace) { cat("\n ARIMA(", order[1], ",", order[2], ",", order[3], ")", sep = "") if (use.season) { cat("(", seasonal[1], ",", seasonal[2], ",", seasonal[3], ")[", m, "]", sep = "") } if (constant && (order[2] + seasonal[2] == 0)) { cat(" with non-zero mean") } else if (constant && (order[2] + seasonal[2] == 1)) { cat(" with drift ") } else if (!constant && (order[2] + seasonal[2] == 0)) { cat(" with zero mean ") } else { cat(" ") } cat(" :", Inf) } return(list(ic = Inf)) } } newmodel <- function(p, d, q, P, D, Q, constant, results) { n <- nrow(results) for (i in 1:n) { if(!all(is.na(results[i, seq(7)]))) { if (all(c(p, d, q, P, D, Q, constant) == results[i, 1:7])) { return(FALSE) } } } return(TRUE) } arima.string <- function(object, padding=FALSE) { order <- object$arma[c(1, 6, 2, 3, 7, 4, 5)] m <- order[7] result <- paste("ARIMA(", order[1], ",", order[2], ",", order[3], ")", sep = "") if (m > 1 && sum(order[4:6]) > 0) { result <- paste(result, "(", order[4], ",", order[5], ",", order[6], ")[", m, "]", sep = "") } if (padding && m > 1 && sum(order[4:6]) == 0) { result <- paste(result, " ", sep = "") if (m <= 9) { result <- paste(result, " ", sep = "") } else if (m <= 99) { result <- paste(result, " ", sep = "") } else { result <- paste(result, " ", sep = "") } } if (!is.null(object$xreg)) { if (NCOL(object$xreg) == 1 && is.element("drift", names(object$coef))) { result <- paste(result, "with drift ") } else { result <- paste("Regression with", result, "errors") } } else { if (is.element("constant", names(object$coef)) || is.element("intercept", names(object$coef))) { result <- paste(result, "with non-zero mean") } else if (order[2] == 0 && order[5] == 0) { result <- paste(result, "with zero mean ") } else { result <- paste(result, " ") } } if (!padding) { # Strip trailing spaces result <- gsub("[ ]*$", "", result) } return(result) } #' @export summary.Arima <- function(object, ...) { class(object) <- c("summary.Arima", class(object)) object } #' @export print.summary.Arima <- function(x, ...) { NextMethod() cat("\nTraining set error measures:\n") print(accuracy(x)) } # Check that Arima object has positive coefficient variances without returning warnings checkarima <- function(object) { suppressWarnings(test <- any(is.nan(sqrt(diag(object$var.coef))))) return(test) } #' Is an object constant? #' #' Returns true if the object's numerical values do not vary. #' #' #' @param x object to be tested #' @export is.constant <- function(x) { x <- as.numeric(x) y <- rep(x[1], length(x)) return(isTRUE(all.equal(x, y))) } forecast/R/arima.R0000644000176200001440000010155214323125536013505 0ustar liggesuserssearch.arima <- function(x, d=NA, D=NA, max.p=5, max.q=5, max.P=2, max.Q=2, max.order=5, stationary=FALSE, ic=c("aic", "aicc", "bic"), trace=FALSE, approximation=FALSE, xreg=NULL, offset=offset, allowdrift=TRUE, allowmean=TRUE, parallel=FALSE, num.cores=2, ...) { # dataname <- substitute(x) ic <- match.arg(ic) m <- frequency(x) allowdrift <- allowdrift & (d + D) == 1 allowmean <- allowmean & (d + D) == 0 maxK <- (allowdrift | allowmean) # Choose model orders # Serial - technically could be combined with the code below if (parallel == FALSE) { best.ic <- Inf for (i in 0:max.p) { for (j in 0:max.q) { for (I in 0:max.P) { for (J in 0:max.Q) { if (i + j + I + J <= max.order) { for (K in 0:maxK) { fit <- myarima( x, order = c(i, d, j), seasonal = c(I, D, J), constant = (K == 1), trace = trace, ic = ic, approximation = approximation, offset = offset, xreg = xreg, ... ) if (fit$ic < best.ic) { best.ic <- fit$ic bestfit <- fit constant <- (K == 1) } } } } } } } } else if (parallel == TRUE) { to.check <- WhichModels(max.p, max.q, max.P, max.Q, maxK) par.all.arima <- function(l, max.order) { .tmp <- UndoWhichModels(l) i <- .tmp[1] j <- .tmp[2] I <- .tmp[3] J <- .tmp[4] K <- .tmp[5] == 1 if (i + j + I + J <= max.order) { fit <- myarima( x, order = c(i, d, j), seasonal = c(I, D, J), constant = (K == 1), trace = trace, ic = ic, approximation = approximation, offset = offset, xreg = xreg, ... ) } if (exists("fit")) { return(cbind(fit, K)) } else { return(NULL) } } if (is.null(num.cores)) { num.cores <- detectCores() } all.models <- mclapply(X = to.check, FUN = par.all.arima, max.order=max.order, mc.cores = num.cores) # Removing null elements all.models <- all.models[!sapply(all.models, is.null)] # Choosing best model best.ic <- Inf for (i in 1:length(all.models)) { if (!is.null(all.models[[i]][, 1]$ic) && all.models[[i]][, 1]$ic < best.ic) { bestfit <- all.models[[i]][, 1] best.ic <- bestfit$ic constant <- unlist(all.models[[i]][1, 2]) } } class(bestfit) <- c("forecast_ARIMA", "ARIMA", "Arima") } if (exists("bestfit")) { # Refit using ML if approximation used for IC if (approximation) { if (trace) { cat("\n\n Now re-fitting the best model(s) without approximations...\n") } # constant <- length(bestfit$coef) - ncol(xreg) > sum(bestfit$arma[1:4]) newbestfit <- myarima( x, order = bestfit$arma[c(1, 6, 2)], seasonal = bestfit$arma[c(3, 7, 4)], constant = constant, ic, trace = FALSE, approximation = FALSE, xreg = xreg, ... ) if (newbestfit$ic == Inf) { # Final model is lousy. Better try again without approximation # warning("Unable to fit final model using maximum likelihood. AIC value approximated") bestfit <- search.arima( x, d = d, D = D, max.p = max.p, max.q = max.q, max.P = max.P, max.Q = max.Q, max.order = max.order, stationary = stationary, ic = ic, trace = trace, approximation = FALSE, xreg = xreg, offset = offset, allowdrift = allowdrift, allowmean = allowmean, parallel = parallel, num.cores = num.cores, ... ) bestfit$ic <- switch(ic, bic = bestfit$bic, aic = bestfit$aic, aicc = bestfit$aicc) } else { bestfit <- newbestfit } } } else { stop("No ARIMA model able to be estimated") } bestfit$x <- x bestfit$series <- deparse(substitute(x)) bestfit$ic <- NULL bestfit$call <- match.call() if (trace) { cat("\n\n") } return(bestfit) } # Set up seasonal dummies using Fourier series SeasDummy <- function(x) { n <- length(x) m <- frequency(x) if (m == 1) { stop("Non-seasonal data") } tt <- 1:n fmat <- matrix(NA, nrow = n, ncol = 2 * m) for (i in 1:m) { fmat[, 2 * i] <- sin(2 * pi * i * tt / m) fmat[, 2 * (i - 1) + 1] <- cos(2 * pi * i * tt / m) } return(fmat[, 1:(m - 1)]) } # CANOVA-HANSEN TEST # Largely based on uroot package code for CH.test() SD.test <- function(wts, s=frequency(wts)) { if (any(is.na(wts))) { stop("Series contains missing values. Please choose order of seasonal differencing manually.") } if (s == 1) { stop("Not seasonal data") } t0 <- start(wts) N <- length(wts) if (N <= s) { stop("Insufficient data") } frec <- rep(1, as.integer((s + 1) / 2)) ltrunc <- round(s * (N / 100) ^ 0.25) R1 <- as.matrix(SeasDummy(wts)) lmch <- lm(wts ~ R1, na.action = na.exclude) # run the regression : y(i)=mu+f(i)'gamma(i)+e(i) Fhat <- Fhataux <- matrix(nrow = N, ncol = s - 1) for (i in 1:(s - 1)) Fhataux[, i] <- R1[, i] * residuals(lmch) for (i in 1:N) { for (n in 1:(s - 1)) Fhat[i, n] <- sum(Fhataux[1:i, n]) } wnw <- 1 - seq(1, ltrunc, 1) / (ltrunc + 1) Ne <- nrow(Fhataux) Omnw <- 0 for (k in 1:ltrunc) Omnw <- Omnw + (t(Fhataux)[, (k + 1):Ne] %*% Fhataux[1:(Ne - k), ]) * wnw[k] Omfhat <- (crossprod(Fhataux) + Omnw + t(Omnw)) / Ne sq <- seq(1, s - 1, 2) frecob <- rep(0, s - 1) for (i in 1:length(frec)) { if (frec[i] == 1 && i == as.integer(s / 2)) { frecob[sq[i]] <- 1 } if (frec[i] == 1 && i < as.integer(s / 2)) { frecob[sq[i]] <- frecob[sq[i] + 1] <- 1 } } a <- length(which(frecob == 1)) A <- matrix(0, nrow = s - 1, ncol = a) j <- 1 for (i in 1:(s - 1)) { if (frecob[i] == 1) { A[i, j] <- 1 ifelse(frecob[i] == 1, j <- j + 1, j <- j) } } tmp <- t(A) %*% Omfhat %*% A problems <- (min(svd(tmp)$d) < .Machine$double.eps) if (problems) { stL <- 0 } else { stL <- (1 / N ^ 2) * sum(diag(solve(tmp, tol = 1e-25) %*% t(A) %*% t(Fhat) %*% Fhat %*% A)) } return(stL) } #' Forecasting using ARIMA or ARFIMA models #' #' Returns forecasts and other information for univariate ARIMA models. #' #' For \code{Arima} or \code{ar} objects, the function calls #' \code{\link[stats]{predict.Arima}} or \code{\link[stats]{predict.ar}} and #' constructs an object of class "\code{forecast}" from the results. For #' \code{fracdiff} objects, the calculations are all done within #' \code{\link{forecast.fracdiff}} using the equations given by Peiris and #' Perera (1988). #' #' @param object An object of class "\code{Arima}", "\code{ar}" or #' "\code{fracdiff}". Usually the result of a call to #' \code{\link[stats]{arima}}, \code{\link{auto.arima}}, #' \code{\link[stats]{ar}}, \code{\link{arfima}} or #' \code{\link[fracdiff]{fracdiff}}. #' @param h Number of periods for forecasting. If \code{xreg} is used, \code{h} #' is ignored and the number of forecast periods is set to the number of rows #' of \code{xreg}. #' @param level Confidence level for prediction intervals. #' @param fan If \code{TRUE}, level is set to \code{seq(51,99,by=3)}. This is #' suitable for fan plots. #' @param xreg Future values of an regression variables (for class \code{Arima} #' objects only). A numerical vector or matrix of external regressors; it should not be a data frame. #' @param bootstrap If \code{TRUE}, then prediction intervals computed using #' simulation with resampled errors. #' @param npaths Number of sample paths used in computing simulated prediction #' intervals when \code{bootstrap=TRUE}. #' @param ... Other arguments. #' @inheritParams forecast.ts #' #' @return An object of class "\code{forecast}". #' #' The function \code{summary} is used to obtain and print a summary of the #' results, while the function \code{plot} produces a plot of the forecasts and #' prediction intervals. #' #' The generic accessor functions \code{fitted.values} and \code{residuals} #' extract useful features of the value returned by \code{forecast.Arima}. #' #' An object of class "\code{forecast}" is a list containing at least the #' following elements: \item{model}{A list containing information about the #' fitted model} \item{method}{The name of the forecasting method as a #' character string} \item{mean}{Point forecasts as a time series} #' \item{lower}{Lower limits for prediction intervals} \item{upper}{Upper #' limits for prediction intervals} \item{level}{The confidence values #' associated with the prediction intervals} \item{x}{The original time series #' (either \code{object} itself or the time series used to create the model #' stored as \code{object}).} \item{residuals}{Residuals from the fitted model. #' That is x minus fitted values.} \item{fitted}{Fitted values (one-step #' forecasts)} #' @author Rob J Hyndman #' @seealso \code{\link[stats]{predict.Arima}}, #' \code{\link[stats]{predict.ar}}, \code{\link{auto.arima}}, #' \code{\link{Arima}}, \code{\link[stats]{arima}}, \code{\link[stats]{ar}}, #' \code{\link{arfima}}. #' @references Peiris, M. & Perera, B. (1988), On prediction with fractionally #' differenced ARIMA models, \emph{Journal of Time Series Analysis}, #' \bold{9}(3), 215-220. #' @keywords ts #' @aliases forecast.forecast_ARIMA #' @examples #' fit <- Arima(WWWusage,c(3,1,0)) #' plot(forecast(fit)) #' #' library(fracdiff) #' x <- fracdiff.sim( 100, ma=-.4, d=.3)$series #' fit <- arfima(x) #' plot(forecast(fit,h=30)) #' #' @export forecast.Arima <- function(object, h=ifelse(object$arma[5] > 1, 2 * object$arma[5], 10), level=c(80, 95), fan=FALSE, xreg=NULL, lambda=object$lambda, bootstrap=FALSE, npaths=5000, biasadj=NULL, ...) { # Check whether there are non-existent arguments all.args <- names(formals()) user.args <- names(match.call())[-1L] # including arguments passed to 3 dots check <- user.args %in% all.args if (!all(check)) { error.args <- user.args[!check] warning(sprintf("The non-existent %s arguments will be ignored.", error.args)) } use.drift <- is.element("drift", names(object$coef)) x <- object$x <- getResponse(object) usexreg <- (use.drift | is.element("xreg", names(object))) # | use.constant) if (!is.null(xreg) && usexreg) { if(!is.numeric(xreg)) stop("xreg should be a numeric matrix or a numeric vector") xreg <- as.matrix(xreg) if (is.null(colnames(xreg))) { colnames(xreg) <- if (ncol(xreg) == 1) "xreg" else paste("xreg", 1:ncol(xreg), sep = "") } origxreg <- xreg <- as.matrix(xreg) h <- nrow(xreg) } else { if(!is.null(xreg)){ warning("xreg not required by this model, ignoring the provided regressors") xreg <- NULL } origxreg <- NULL } if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 & max(level) < 1) { level <- 100 * level } else if (min(level) < 0 | max(level) > 99.99) { stop("Confidence limit out of range") } } level <- sort(level) if (use.drift) { n <- length(x) #missing <- is.na(x) #firstnonmiss <- head(which(!missing),1) #n <- length(x) - firstnonmiss + 1 if (!is.null(xreg)) { xreg <- `colnames<-`(cbind(drift = (1:h) + n, xreg), make.unique(c("drift", if(is.null(colnames(xreg)) && !is.null(xreg)) rep("", NCOL(xreg)) else colnames(xreg)))) } else { xreg <- `colnames<-`(as.matrix((1:h) + n), "drift") } } # Check if data is constant if (!is.null(object$constant)) { if (object$constant) { pred <- list(pred = rep(x[1], h), se = rep(0, h)) } else { stop("Strange value of object$constant") } } else if (usexreg) { if (is.null(xreg)) { stop("No regressors provided") } object$call$xreg <- getxreg(object) if (NCOL(xreg) != NCOL(object$call$xreg)) { stop("Number of regressors does not match fitted model") } if(!identical(colnames(xreg), colnames(object$call$xreg))){ warning("xreg contains different column names from the xreg used in training. Please check that the regressors are in the same order.") } pred <- predict(object, n.ahead = h, newxreg = xreg) } else { pred <- predict(object, n.ahead = h) } # Fix time series characteristics if there are missing values at end of series, or if tsp is missing from pred if (!is.null(x)) { tspx <- tsp(x) nx <- max(which(!is.na(x))) if (nx != length(x) | is.null(tsp(pred$pred)) | is.null(tsp(pred$se))) { tspx[2] <- time(x)[nx] start.f <- tspx[2] + 1 / tspx[3] pred$pred <- ts(pred$pred, frequency = tspx[3], start = start.f) pred$se <- ts(pred$se, frequency = tspx[3], start = start.f) } } # Compute prediction intervals nint <- length(level) if (bootstrap) # Compute prediction intervals using simulations { sim <- matrix(NA, nrow = npaths, ncol = h) for (i in 1:npaths) sim[i, ] <- simulate(object, nsim = h, bootstrap = TRUE, xreg = origxreg, lambda = lambda) lower <- apply(sim, 2, quantile, 0.5 - level / 200, type = 8) upper <- apply(sim, 2, quantile, 0.5 + level / 200, type = 8) if (nint > 1L) { lower <- t(lower) upper <- t(upper) } else { lower <- matrix(lower, ncol = 1) upper <- matrix(upper, ncol = 1) } } else { # Compute prediction intervals via the normal distribution lower <- matrix(NA, ncol = nint, nrow = length(pred$pred)) upper <- lower for (i in 1:nint) { qq <- qnorm(0.5 * (1 + level[i] / 100)) lower[, i] <- pred$pred - qq * pred$se upper[, i] <- pred$pred + qq * pred$se } if (!is.finite(max(upper))) { warning("Upper prediction intervals are not finite.") } } colnames(lower) <- colnames(upper) <- paste(level, "%", sep = "") lower <- ts(lower) upper <- ts(upper) tsp(lower) <- tsp(upper) <- tsp(pred$pred) method <- arima.string(object, padding = FALSE) seriesname <- if (!is.null(object$series)) { object$series } else if (!is.null(object$call$x)) { object$call$x } else { object$call$y } fits <- fitted.Arima(object) if (!is.null(lambda) & is.null(object$constant)) { # Back-transform point forecasts and prediction intervals pred$pred <- InvBoxCox(pred$pred, lambda, biasadj, pred$se^2) if (!bootstrap) { # Bootstrapped intervals already back-transformed lower <- InvBoxCox(lower, lambda) upper <- InvBoxCox(upper, lambda) } } return(structure( list( method = method, model = object, level = level, mean = future_msts(x, pred$pred), lower = future_msts(x, lower), upper = future_msts(x, upper), x = x, series = seriesname, fitted = copy_msts(x, fits), residuals = copy_msts(x, residuals.Arima(object)) ), class = "forecast" )) } #' @export forecast.forecast_ARIMA <- forecast.Arima #' @rdname forecast.Arima #' @export forecast.ar <- function(object, h=10, level=c(80, 95), fan=FALSE, lambda=NULL, bootstrap=FALSE, npaths=5000, biasadj=FALSE, ...) { x <- getResponse(object) pred <- predict(object, newdata = x, n.ahead = h) if (bootstrap) # Recompute se using simulations { sim <- matrix(NA, nrow = npaths, ncol = h) for (i in 1:npaths) sim[i, ] <- simulate(object, nsim = h, bootstrap = TRUE) pred$se <- apply(sim, 2, sd) } if (fan) { level <- seq(51, 99, by = 3) } else { if (min(level) > 0 & max(level) < 1) { level <- 100 * level } else if (min(level) < 0 | max(level) > 99.99) { stop("Confidence limit out of range") } } nint <- length(level) lower <- matrix(NA, ncol = nint, nrow = length(pred$pred)) upper <- lower for (i in 1:nint) { qq <- qnorm(0.5 * (1 + level[i] / 100)) lower[, i] <- pred$pred - qq * pred$se upper[, i] <- pred$pred + qq * pred$se } colnames(lower) <- colnames(upper) <- paste(level, "%", sep = "") method <- paste("AR(", object$order, ")", sep = "") f <- frequency(x) res <- residuals.ar(object) fits <- fitted.ar(object) if (!is.null(lambda)) { pred$pred <- InvBoxCox(pred$pred, lambda, biasadj, list(level = level, upper = upper, lower = lower)) lower <- InvBoxCox(lower, lambda) upper <- InvBoxCox(upper, lambda) fits <- InvBoxCox(fits, lambda) x <- InvBoxCox(x, lambda) } return(structure( list( method = method, model = object, level = level, mean = future_msts(x, pred$pred), lower = future_msts(x, lower), upper = future_msts(x, upper), x = x, series = deparse(object$call$x), fitted = copy_msts(x, fits), residuals = copy_msts(x, res) ) , class = "forecast" )) } # Find xreg matrix in an Arima object getxreg <- function(z) { # Look in the obvious place first if (is.element("xreg", names(z))) { return(z$xreg) } # Next most obvious place else if (is.element("xreg", names(z$coef))) { return(eval.parent(z$coef$xreg)) } # Now check under call else if (is.element("xreg", names(z$call))) { return(eval.parent(z$call$xreg)) } # Otherwise check if it exists else { armapar <- sum(z$arma[1:4]) + is.element("intercept", names(z$coef)) npar <- length(z$coef) if (npar > armapar) { stop("It looks like you have an xreg component but I don't know what it is.\n Please use Arima() or auto.arima() rather than arima().") } else { # No xreg used return(NULL) } } } #' Errors from a regression model with ARIMA errors #' #' Returns time series of the regression residuals from a fitted ARIMA model. #' #' This is a deprecated function #' which is identical to \code{\link{residuals.Arima}(object, type="regression")} #' Regression residuals are equal to the original data #' minus the effect of any regression variables. If there are no regression #' variables, the errors will be identical to the original series (possibly #' adjusted to have zero mean). #' #' @param object An object containing a time series model of class \code{Arima}. #' @return A \code{ts} object #' @author Rob J Hyndman #' @seealso \code{\link{residuals.Arima}}. #' @keywords ts #' #' @export arima.errors <- function(object) { message("Deprecated, use residuals.Arima(object, type='regression') instead") residuals.Arima(object, type = "regression") } # Return one-step fits #' h-step in-sample forecasts for time series models. #' #' Returns h-step forecasts for the data used in fitting the model. #' #' @param object An object of class "\code{Arima}", "\code{bats}", #' "\code{tbats}", "\code{ets}" or "\code{nnetar}". #' @param h The number of steps to forecast ahead. #' @param ... Other arguments. #' @return A time series of the h-step forecasts. #' @author Rob J Hyndman & Mitchell O'Hara-Wild #' @seealso \code{\link{forecast.Arima}}, \code{\link{forecast.bats}}, #' \code{\link{forecast.tbats}}, \code{\link{forecast.ets}}, #' \code{\link{forecast.nnetar}}, \code{\link{residuals.Arima}}, #' \code{\link{residuals.bats}}, \code{\link{residuals.tbats}}, #' \code{\link{residuals.ets}}, \code{\link{residuals.nnetar}}. #' @keywords ts #' @aliases fitted.forecast_ARIMA #' @examples #' fit <- ets(WWWusage) #' plot(WWWusage) #' lines(fitted(fit), col='red') #' lines(fitted(fit, h=2), col='green') #' lines(fitted(fit, h=3), col='blue') #' legend("topleft", legend=paste("h =",1:3), col=2:4, lty=1) #' #' @export fitted.Arima <- function(object, h = 1, ...) { if (h == 1) { x <- getResponse(object) if (!is.null(object$fitted)) { return(object$fitted) } else if (is.null(x)) { # warning("Fitted values are unavailable due to missing historical data") return(NULL) } else if (is.null(object$lambda)) { return(x - object$residuals) } else { fits <- InvBoxCox(BoxCox(x, object$lambda) - object$residuals, object$lambda, NULL, object$sigma2) return(fits) } } else { return(hfitted(object = object, h = h, FUN = "Arima", ...)) } } #' @export fitted.forecast_ARIMA <- fitted.Arima # Calls arima from stats package and adds data to the returned object # Also allows refitting to new data # and drift terms to be included. #' Fit ARIMA model to univariate time series #' #' Largely a wrapper for the \code{\link[stats]{arima}} function in the stats #' package. The main difference is that this function allows a drift term. It #' is also possible to take an ARIMA model from a previous call to \code{Arima} #' and re-apply it to the data \code{y}. #' #' See the \code{\link[stats]{arima}} function in the stats package. #' #' @aliases print.ARIMA summary.Arima as.character.Arima #' #' @param y a univariate time series of class \code{ts}. #' @param order A specification of the non-seasonal part of the ARIMA model: #' the three components (p, d, q) are the AR order, the degree of differencing, #' and the MA order. #' @param seasonal A specification of the seasonal part of the ARIMA model, #' plus the period (which defaults to frequency(y)). This should be a list with #' components order and period, but a specification of just a numeric vector of #' length 3 will be turned into a suitable list with the specification as the #' order. #' @param xreg Optionally, a numerical vector or matrix of external regressors, which #' must have the same number of rows as y. It should not be a data frame. #' @param include.mean Should the ARIMA model include a mean term? The default #' is \code{TRUE} for undifferenced series, \code{FALSE} for differenced ones #' (where a mean would not affect the fit nor predictions). #' @param include.drift Should the ARIMA model include a linear drift term? #' (i.e., a linear regression with ARIMA errors is fitted.) The default is #' \code{FALSE}. #' @param include.constant If \code{TRUE}, then \code{include.mean} is set to #' be \code{TRUE} for undifferenced series and \code{include.drift} is set to #' be \code{TRUE} for differenced series. Note that if there is more than one #' difference taken, no constant is included regardless of the value of this #' argument. This is deliberate as otherwise quadratic and higher order #' polynomial trends would be induced. #' @param method Fitting method: maximum likelihood or minimize conditional #' sum-of-squares. The default (unless there are missing values) is to use #' conditional-sum-of-squares to find starting values, then maximum likelihood. #' @param model Output from a previous call to \code{Arima}. If model is #' passed, this same model is fitted to \code{y} without re-estimating any #' parameters. #' @param x Deprecated. Included for backwards compatibility. #' @param ... Additional arguments to be passed to \code{\link[stats]{arima}}. #' @inheritParams forecast.ts #' @return See the \code{\link[stats]{arima}} function in the stats package. #' The additional objects returned are \item{x}{The time series data} #' \item{xreg}{The regressors used in fitting (when relevant).} #' \item{sigma2}{The bias adjusted MLE of the innovations variance.} #' #' @export #' #' @author Rob J Hyndman #' @seealso \code{\link{auto.arima}}, \code{\link{forecast.Arima}}. #' @keywords ts #' @examples #' library(ggplot2) #' WWWusage %>% #' Arima(order=c(3,1,0)) %>% #' forecast(h=20) %>% #' autoplot #' #' # Fit model to first few years of AirPassengers data #' air.model <- Arima(window(AirPassengers,end=1956+11/12),order=c(0,1,1), #' seasonal=list(order=c(0,1,1),period=12),lambda=0) #' plot(forecast(air.model,h=48)) #' lines(AirPassengers) #' #' # Apply fitted model to later data #' air.model2 <- Arima(window(AirPassengers,start=1957),model=air.model) #' #' # Forecast accuracy measures on the log scale. #' # in-sample one-step forecasts. #' accuracy(air.model) #' # out-of-sample one-step forecasts. #' accuracy(air.model2) #' # out-of-sample multi-step forecasts #' accuracy(forecast(air.model,h=48,lambda=NULL), #' log(window(AirPassengers,start=1957))) #' Arima <- function(y, order=c(0, 0, 0), seasonal=c(0, 0, 0), xreg=NULL, include.mean=TRUE, include.drift=FALSE, include.constant, lambda=model$lambda, biasadj=FALSE, method=c("CSS-ML", "ML", "CSS"), model=NULL, x=y, ...) { # Remove outliers near ends # j <- time(x) # x <- na.contiguous(x) # if(length(j) != length(x)) # warning("Missing values encountered. Using longest contiguous portion of time series") series <- deparse(substitute(y)) origx <- y if (!is.null(lambda)) { x <- BoxCox(x, lambda) lambda <- attr(x, "lambda") if (is.null(attr(lambda, "biasadj"))) { attr(lambda, "biasadj") <- biasadj } } if (!is.null(xreg)) { if(!is.numeric(xreg)) stop("xreg should be a numeric matrix or a numeric vector") xreg <- as.matrix(xreg) if (is.null(colnames(xreg))) { colnames(xreg) <- if (ncol(xreg) == 1) "xreg" else paste("xreg", 1:ncol(xreg), sep = "") } } if (!is.list(seasonal)) { if (frequency(x) <= 1) { seasonal <- list(order = c(0, 0, 0), period = NA) if(length(x) <= order[2L]) stop("Not enough data to fit the model") } else { seasonal <- list(order = seasonal, period = frequency(x)) if(length(x) <= order[2L] + seasonal$order[2L] * seasonal$period) stop("Not enough data to fit the model") } } if (!missing(include.constant)) { if (include.constant) { include.mean <- TRUE if ((order[2] + seasonal$order[2]) == 1) { include.drift <- TRUE } } else { include.mean <- include.drift <- FALSE } } if ((order[2] + seasonal$order[2]) > 1 & include.drift) { warning("No drift term fitted as the order of difference is 2 or more.") include.drift <- FALSE } if (!is.null(model)) { tmp <- arima2(x, model, xreg = xreg, method = method) xreg <- tmp$xreg tmp$fitted <- NULL tmp$lambda <- model$lambda } else { if (include.drift) { xreg <- `colnames<-`(cbind(drift = 1:length(x), xreg), make.unique(c("drift", if(is.null(colnames(xreg)) && !is.null(xreg)) rep("", NCOL(xreg)) else colnames(xreg)))) } if (is.null(xreg)) { suppressWarnings(tmp <- stats::arima(x = x, order = order, seasonal = seasonal, include.mean = include.mean, method = method, ...)) } else { suppressWarnings(tmp <- stats::arima(x = x, order = order, seasonal = seasonal, xreg = xreg, include.mean = include.mean, method = method, ...)) } } # Calculate aicc & bic based on tmp$aic npar <- length(tmp$coef[tmp$mask]) + 1 missing <- is.na(tmp$residuals) firstnonmiss <- head(which(!missing),1) lastnonmiss <- tail(which(!missing),1) n <- sum(!missing[firstnonmiss:lastnonmiss]) nstar <- n - tmp$arma[6] - tmp$arma[7] * tmp$arma[5] tmp$aicc <- tmp$aic + 2 * npar * (nstar / (nstar - npar - 1) - 1) tmp$bic <- tmp$aic + npar * (log(nstar) - 2) tmp$series <- series tmp$xreg <- xreg tmp$call <- match.call() tmp$lambda <- lambda tmp$x <- origx # Adjust residual variance to be unbiased if (is.null(model)) { tmp$sigma2 <- sum(tmp$residuals ^ 2, na.rm = TRUE) / (nstar - npar + 1) } out <- structure(tmp, class = c("forecast_ARIMA", "ARIMA", "Arima")) out$fitted <- fitted.Arima(out) out$series <- series return(out) } # Refits the model to new data x arima2 <- function(x, model, xreg, method) { use.drift <- is.element("drift", names(model$coef)) use.intercept <- is.element("intercept", names(model$coef)) use.xreg <- is.element("xreg", names(model$call)) sigma2 <- model$sigma2 if (use.drift) { driftmod <- lm(model$xreg[, "drift"] ~ I(time(as.ts(model$x)))) newxreg <- driftmod$coefficients[1] + driftmod$coefficients[2] * time(as.ts(x)) if (!is.null(xreg)) { origColNames <- colnames(xreg) xreg <- cbind(newxreg, xreg) colnames(xreg) <- c("drift", origColNames) } else { xreg <- as.matrix(data.frame(drift = newxreg)) } use.xreg <- TRUE } if (!is.null(model$xreg)) { if (is.null(xreg)) { stop("No regressors provided") } if (ncol(xreg) != ncol(model$xreg)) { stop("Number of regressors does not match fitted model") } } if (model$arma[5] > 1 & sum(abs(model$arma[c(3, 4, 7)])) > 0) # Seasonal model { if (use.xreg) { refit <- Arima( x, order = model$arma[c(1, 6, 2)], seasonal = list(order = model$arma[c(3, 7, 4)], period = model$arma[5]), include.mean = use.intercept, xreg = xreg, method = method, fixed = model$coef ) } else { refit <- Arima( x, order = model$arma[c(1, 6, 2)], seasonal = list(order = model$arma[c(3, 7, 4)], period = model$arma[5]), include.mean = use.intercept, method = method, fixed = model$coef ) } } else if (length(model$coef) > 0) # Nonseasonal model with some parameters { if (use.xreg) { refit <- Arima(x, order = model$arma[c(1, 6, 2)], xreg = xreg, include.mean = use.intercept, method = method, fixed = model$coef) } else { refit <- Arima(x, order = model$arma[c(1, 6, 2)], include.mean = use.intercept, method = method, fixed = model$coef) } } else { # No parameters refit <- Arima(x, order = model$arma[c(1, 6, 2)], include.mean = FALSE, method = method) } refit$var.coef <- matrix(0, length(refit$coef), length(refit$coef)) if (use.xreg) { # Why is this needed? refit$xreg <- xreg } refit$sigma2 <- sigma2 return(refit) } # Modified version of function print.Arima from stats package #' @export print.forecast_ARIMA <- function(x, digits=max(3, getOption("digits") - 3), se=TRUE, ...) { cat("Series:", x$series, "\n") cat(arima.string(x, padding = FALSE), "\n") if (!is.null(x$lambda)) { cat("Box Cox transformation: lambda=", x$lambda, "\n") } # cat("\nCall:", deparse(x$call, width.cutoff=75), "\n", sep=" ") # if(!is.null(x$xreg)) # { # cat("\nRegression variables fitted:\n") # xreg <- as.matrix(x$xreg) # for(i in 1:3) # cat(" ",xreg[i,],"\n") # cat(" . . .\n") # for(i in 1:3) # cat(" ",xreg[nrow(xreg)-3+i,],"\n") # } if (length(x$coef) > 0) { cat("\nCoefficients:\n") coef <- round(x$coef, digits = digits) if (se && NROW(x$var.coef)) { ses <- rep.int(0, length(coef)) ses[x$mask] <- round(sqrt(diag(x$var.coef)), digits = digits) coef <- matrix(coef, 1L, dimnames = list(NULL, names(coef))) coef <- rbind(coef, s.e. = ses) } # Change intercept to mean if no regression variables j <- match("intercept", colnames(coef)) if (is.null(x$xreg) & !is.na(j)) { colnames(coef)[j] <- "mean" } print.default(coef, print.gap = 2) } cm <- x$call$method cat("\nsigma^2 = ", format(x$sigma2, digits = digits), sep="") if(!is.na(x$loglik)) cat(": log likelihood = ", format(round(x$loglik, 2L)), sep = "") cat("\n") if (is.null(cm) || cm != "CSS") { if(!is.na(x$aic)) { npar <- length(x$coef[x$mask]) + 1 missing <- is.na(x$residuals) firstnonmiss <- head(which(!missing),1) lastnonmiss <- tail(which(!missing),1) n <- lastnonmiss - firstnonmiss + 1 nstar <- n - x$arma[6] - x$arma[7] * x$arma[5] bic <- x$aic + npar * (log(nstar) - 2) aicc <- x$aic + 2 * npar * (nstar / (nstar - npar - 1) - 1) cat("AIC=", format(round(x$aic, 2L)), sep = "") cat(" AICc=", format(round(aicc, 2L)), sep = "") cat(" BIC=", format(round(bic, 2L)), "\n", sep = "") } } invisible(x) } #' Return the order of an ARIMA or ARFIMA model #' #' Returns the order of a univariate ARIMA or ARFIMA model. #' #' #' @param object An object of class \dQuote{\code{Arima}}, dQuote\code{ar} or #' \dQuote{\code{fracdiff}}. Usually the result of a call to #' \code{\link[stats]{arima}}, \code{\link{Arima}}, \code{\link{auto.arima}}, #' \code{\link[stats]{ar}}, \code{\link{arfima}} or #' \code{\link[fracdiff]{fracdiff}}. #' @return A numerical vector giving the values \eqn{p}, \eqn{d} and \eqn{q} of #' the ARIMA or ARFIMA model. For a seasonal ARIMA model, the returned vector #' contains the values \eqn{p}, \eqn{d}, \eqn{q}, \eqn{P}, \eqn{D}, \eqn{Q} and #' \eqn{m}, where \eqn{m} is the period of seasonality. #' @author Rob J Hyndman #' @seealso \code{\link[stats]{ar}}, \code{\link{auto.arima}}, #' \code{\link{Arima}}, \code{\link[stats]{arima}}, \code{\link{arfima}}. #' @keywords ts #' @examples #' WWWusage %>% auto.arima %>% arimaorder #' #' @export arimaorder <- function(object) { if (is.element("Arima", class(object))) { order <- object$arma[c(1, 6, 2, 3, 7, 4, 5)] names(order) <- c("p", "d", "q", "P", "D", "Q", "Frequency") seasonal <- (order[7] > 1 & sum(order[4:6]) > 0) if (seasonal) { return(order) } else { return(order[1:3]) } } else if (is.element("ar", class(object))) { return(c("p" = object$order, "d" = 0, "q" = 0)) } else if (is.element("fracdiff", class(object))) { return(c("p" = length(object$ar), "d" = object$d, "q" = length(object$ma))) } else { stop("object not of class Arima, ar or fracdiff") } } #' @export as.character.Arima <- function(x, ...) { arima.string(x, padding = FALSE) } #' @rdname is.ets #' @export is.Arima <- function(x) { inherits(x, "Arima") } #' @rdname fitted.Arima #' @export fitted.ar <- function(object, ...) { getResponse(object) - residuals(object) } forecast/R/bootstrap.R0000644000176200001440000000720114323125536014425 0ustar liggesusers# Bootstrap functions # Trend estimation like STL without seasonality. # Non-robust version tl <- function(x, ...) { x <- as.ts(x) tspx <- tsp(x) n <- length(x) tt <- 1:n fit <- supsmu(tt, x) out <- ts(cbind(trend = fit$y, remainder = x - fit$y)) tsp(out) <- tsp(x) out <- structure(list(time.series = out), class = "stl") return(out) } # Function to return some bootstrap samples of x # based on LPB lpb <- function(x, nsim=100) { n <- length(x) meanx <- mean(x) y <- x - meanx gamma <- wacf(y, lag.max = n)$acf[, , 1] s <- length(gamma) Gamma <- matrix(1, s, s) d <- row(Gamma) - col(Gamma) for (i in 1:(s - 1)) Gamma[d == i | d == (-i)] <- gamma[i + 1] L <- t(chol(Gamma)) W <- solve(L) %*% matrix(y, ncol = 1) out <- ts(L %*% matrix(sample(W, n * nsim, replace = TRUE), nrow = n, ncol = nsim) + meanx) tsp(out) <- tsp(x) return(out) } # Bootstrapping time series (based on Bergmeir et al., 2016, IJF paper) # Author: Fotios Petropoulos MBB <- function(x, window_size) { bx <- array(0, (floor(length(x) / window_size) + 2) * window_size) for (i in 1:(floor(length(x) / window_size) + 2)) { c <- sample(1:(length(x) - window_size + 1), 1) bx[((i - 1) * window_size + 1):(i * window_size)] <- x[c:(c + window_size - 1)] } start_from <- sample(0:(window_size - 1), 1) + 1 bx[start_from:(start_from + length(x) - 1)] } #' Box-Cox and Loess-based decomposition bootstrap. #' #' Generates bootstrapped versions of a time series using the Box-Cox and #' Loess-based decomposition bootstrap. #' #' The procedure is described in Bergmeir et al. Box-Cox decomposition is #' applied, together with STL or Loess (for non-seasonal time series), and the #' remainder is bootstrapped using a moving block bootstrap. #' #' @param x Original time series. #' @param num Number of bootstrapped versions to generate. #' @param block_size Block size for the moving block bootstrap. #' @return A list with bootstrapped versions of the series. The first series in #' the list is the original series. #' @author Christoph Bergmeir, Fotios Petropoulos #' @seealso \code{\link{baggedETS}}. #' @references Bergmeir, C., R. J. Hyndman, and J. M. Benitez (2016). Bagging #' Exponential Smoothing Methods using STL Decomposition and Box-Cox #' Transformation. International Journal of Forecasting 32, 303-312. #' @keywords ts #' @examples #' bootstrapped_series <- bld.mbb.bootstrap(WWWusage, 100) #' #' @export bld.mbb.bootstrap <- function(x, num, block_size=NULL) { if(length(x) <= 1L) return(rep(list(x), num)) freq <- frequency(x) if(length(x) <= 2*freq) freq <- 1L if (is.null(block_size)) { block_size <- ifelse(freq > 1, 2 * freq, min(8, floor(length(x) / 2))) } xs <- list() xs[[1]] <- x # the first series is the original one if (num > 1) { # Box-Cox transformation if (min(x) > 1e-6) { lambda <- BoxCox.lambda(x, lower = 0, upper = 1) } else { lambda <- 1 } x.bc <- BoxCox(x, lambda) lambda <- attr(x.bc, "lambda") if (freq > 1) { # STL decomposition x.stl <- stl(ts(x.bc, frequency = freq), "per")$time.series seasonal <- x.stl[, 1] trend <- x.stl[, 2] remainder <- x.stl[, 3] } else { # Loess trend <- 1:length(x) suppressWarnings( x.loess <- loess(ts(x.bc, frequency = 1) ~ trend, span = 6 / length(x), degree = 1) ) seasonal <- rep(0, length(x)) trend <- x.loess$fitted remainder <- x.loess$residuals } } # Bootstrap some series, using MBB for (i in 2:num) { xs[[i]] <- ts(InvBoxCox(trend + seasonal + MBB(remainder, block_size), lambda)) tsp(xs[[i]]) <- tsp(x) } xs } forecast/NEWS.md0000644000176200001440000012524214474112007013164 0ustar liggesusers# forecast 8.21.1 * nnetar now allows p or P to be 0 * Bug fixes and improved docs # forecast 8.21 * Fixed df calculation for Ljung-Box tests in checkresiduals * Fixed some broken tests # forecast 8.20 * Improvements to unit tests, and migrate to testthat 3e * Prevent failure in C23 mode # forecast 8.19 * Bug fixes # forecast 8.18 * Updated RW forecasts to use an unbiased estimate of sigma2 * Bug fixes # forecast 8.17.0 * Updated dm.test() to add alternative variance estimators. (#898) * Added `simulate.tbats()` for simulating from TBATS models. * Added dependency on generics for accuracy() and forecast() (#902) * Bux fixes # forecast 8.16 * Fixed `tslm()` incorrectly applying Box-Cox transformations when an `mts` is provided to the `data` argument (#886). * Set D=0 when auto.arima applied to series with 2m observations or fewer. * Improved performance of parallel search of ARIMA models (jonlachmann, #891). * Fixed scoping of functions used in `ggAcf()` (#896). * Fixed checks on xreg in `simulate.Arima()` (#818) * Improved docs and bug fixes. # forecast 8.15 * Changed `summary()` methods to defer console output until `print()` * Changed default `s.window` values for `mstl()`, `stlf()` and `stlm()`. The new defaults are based on extensive empirical testing. # forecast 8.14 * Changed default `BoxCox(lambda = "auto")` lower bound to -0.9. * Use better variance estimates for `ets()` bias adjustments. * Improved robustness of `autoplot.seas()` for non-seasonal decomposition. * Fixed scoping of parameters in `auto.arima(parallel = TRUE)` (#874). * Fixed handling of `xreg` in `tsCV()`. # forecast 8.13 * Fixed forecasts from Arima with drift with initial NAs. * Fixed season colours in `gglagplot()` to match y-axis (original data). * Fixed facet order for classical decomposition `autoplot()` * Fixed `summary()` erroring for `tslm()` models containing NA values. # forecast 8.12 * Fixed bias adjusted forecast mean for ARIMA forecasts. * Improved naming of `accuracy()` generic formals. * Fix seasonal periods for `taylor` dataset. # forecast 8.11 * The axis for `gglagplot()` have been reversed for consistency with `stats::lag.plot()`. # forecast 8.10 * Updates to remove new CRAN errors * Bug fixes # forecast 8.9 * Updates for CRAN policies on Suggests packages * Bug fixes # forecast 8.8 * Updates for compatibility with fable * Bug fixes # forecast 8.7 * Documentation improvements * Bug fixes # forecast 8.6 * Reduced conflicts with tidy forecasting packages * Forecast autoplots now use same colour shading as autolayer() and geom_forecast * Documentation improvements * Bug fixes # forecast 8.5 * Updated tsCV() to handle exogenous regressors * Reimplemented lagwalk methods (naive, snaive, rwf) for speed improvements * Added support for passing arguments to auto.arima() unit root tests * Improved auto.arima() stepwise search algorithm * Documentation improvements * Bug fixes # forecast 8.4 * Added modelAR(), generalising nnetar() to support user-defined functions * Added na.action argument to ets * Documentation improvements * Bug fixes # forecast 8.3 * Added mstl() to handle multiple seasonal decomposition * stlf(), stlm(), tsoutliers() and tsclean() all now use mstl(). * Updated tsCV() to handle multiple horizons * Switched unit root tests in ndiffs() to use urca package * Added ocsb.test * Changed method for choosing D in auto.arima() to a measure of seasonal strength. * Added baggedModel() function to generalize baggedETS * Added bootstrapped PI to more functions * Allowed lambda='auto' for all functions with lambda argument. * Updated author list to include all major contributors * Documentation improvements * Bug fixes # forecast 8.2 * Added pkgdown site * Added rolling window option to tsCV * Improved robustness to short time series and missing values * Bug fixes # forecast 8.1 * Added as.character.ets, as.character.bats, as.character.tbats * Made gghistogram() and checkresiduals() robust to missing values * All documentation now generated using roxygen * Improved documentation for many functions * Added autoplot.msts() and autolayer.msts * Added as.character methods for many models to generate model names * Added as.ts.forecast * autoplot method for bats/tbats models * Better ARIMA trace output * Made accuracy an S3 method * Bug fixes # forecast 8.0 * Added tips to start up message * Added pipe operator * Added tsCV() and CVar() functions * Added baggedETS * Added head.ts() and tail.ts(), so head and tail now work properly on ts objects. * Added gghistogram() and checkresiduals * Added ggseasonplot with polar coordinates * Modified defaults for gglagplot * Added autolayer.ts * Added type argument to residuals() for different types of residuals * Added support for seas objects from the seasonal package * Component extraction for seasonal decomposition methods * Range bars for decomposition autoplots * Added autoplot.StructTS * Added vignette based on 2008 JSS article by Hyndman and Khandakar * Improved ggplot functions * mforecast objects re-structured * Added as.data.frame.mforecast * autoplot functions now exported * Refit support for arfima() and stlm * Better bias adjustment support after Box-Cox transformation * print.ARIMA has better labelling of constants * Bug fixes * Removed fortify method for forecast objects # forecast 7.3 * Added prediction intervals and simulation for nnetar(). * Documentation improvement * Bug fixes # forecast 7.2 * Faceting for autoplot.mts * Box-Cox support for ses, holt, hw * ets() now works for tiny time series * Added h-step fitted values in fitted() function. * seasonal adjustment added to thetaf * y now the standard first argument in all modelling functions * Added truncate argument to auto.arima * seasadj() now an S3 method * series with frequency < 1 and non-integer seasonality now handled better * ggplot2 theme support * Added gglagplot, gglagchull * Arima() and auto.arima() now allow any argument to be passed to stats::arima(). * Bug fixes and speed improvements # forecast 7.1 * Fixed bug in auto.arima where the Box-Cox transformation was sometimes applied twice * Improved axes for ggseasonalplot * Improved tslm() to avoid some problems finding data * nnetar() updated to allow subsets * Modified initial values for ets * Improved unit tests to avoid deprecated functions and to avoid data from fpp * Removed fpp from Suggests list # forecast 7.0 * Added ggplot2 graphics * Bias adjustment option added for all functions that allow Box-Cox transformations * Added Ccf function, and rewrote Acf to handle multivariate series. * tslm() completely rewritten to be more robust and to handle fourier terms more easily * Support for multivariate linear models added * subset.ts() more robust, and captures some errors. * Added xreg argument to nnetar * Improved labels in seasonplot * More unit tests added * Documentation improvements * Bug fixes # forecast 6.2 * Many unit tests added using testthat. * Fixed bug in ets when very short seasonal series were passed in a data frame. * Fixed bug in nnetar where the initial predictor vector was reversed. * Corrected model name returned in nnetar(). * Fixed bug in accuracy() when non-integer seasonality used. * Made auto.arima() robust to non-integer seasonality. * Fixed bug in auto.arima where allowmean was ignored when stepwise=FALSE. * Improved robustness of forecast.ets() for explosive models with multiplicative trends. * Exogenous variables now passed to VAR forecasts * Increased maximum nmse in ets() to 30. * Made tsoutliers() more robust to weak seasonality * Changed tsoutliers() to use supsmu on non-seasonal and seasonally adjusted data. * Fixed bug in tbats() when seasonal period 1 is a small multiple of seasonal period 2. * Other bug fixes # forecast 6.1 * Made auto.arima more robust # forecast 6.0 * Modified dm.test to give error when variance is zero * Corrected help file for splinef(). * Fixed typo in accuracy help file regarding RMSE * Fixed bug in accuracy() which occurred with Arima and ets objects. * Fixed arima.errors() to handle Box-Cox transformed models. * Modified auto.arima() to be stricter on near-unit-roots. * Added allowmean argument in auto.arima(). * Improved handling of constant series in Arima() and forecast.Arima(). * Added plot.Arima() and plot.ar() functions. * Added as.character.Arima * Captured problem in bats/tbats where data are constant. * Modified TBATS and BATS estimation to avoid occasional instabilities. * Fixed bug in forecasts from bats which labelled them as TBATS. * Added allow.multiplicative.trend argument to ets(). * Set allow.multiplictive.trend=FALSE in stlf(), stlm() and forecast.ts(). * Simplified arguments in stlf(). * Added taperedacf and taperedpacf functions * Added functions for bootstrapping time series # forecast 5.9 * Improved documentation of accuracy() function. * Fixed occasional bug in accuracy() when test set is a single observation. * Improved Acf() to give better handling of horizontal axis for seasonal data or when ... is passed. * Removed print.Arima and predict.Arima and added print.ARIMA * method argument now passed when re-fitting an ARIMA model. * Fixed error when CH test applied to short series # forecast 5.8 * Fixed bug in versions of R before 3.10 when using fourier and fourierf. * Made BoxCox.lambda() robust to missing values. # forecast 5.7 * Fixed bug in tbats/bats where optional arguments were not being passed to auto.arima(). * Revised fourier() and fourierf() to avoid large orders, and to avoid zero columns. * Improved accuracy of fourier() and fourierf(), while simplifying the code. * Removed duplicate columns returned by fourier/fourierf with multiple seasonal periods. * Corrected some bugs in simulate.Arima for models involving xreg. * Centred simulations from simulate.Arima for non-stationary models by conditioning on first observation. * Added findfrequency() function. * Fixed error in computed residuals from forecast.stl(). * Improved handling of very short series in auto.arima(). * Fixed error in forecasting with additive damped models. Damping previously applied only from second forecast horizon. * Fixed misuse of abs() in two places in C code. * Added na.action argument to Acf() and fixed na.action argument in tsdisplay(). # forecast 5.6 * Improved tbats and bats by ensuring ARMA coefficients are not close to the boundary of invertibility and stationarity. * Improved nsdiffs() handling of degenerate series (e.g., all zeros). * Improved forecast.ar() when function buried within other functions. * Improved handling of degenerate ARIMA models when xreg used. * More robust ets() initialization. * Fixed problem in na.interp() with seasonal data having frequency <= 5. * Removed undocumented option to use Rmalschains for optimization of ets(). # forecast 5.5 * Improved documentation for croston * Added stlm() and forecast.stlm() functions, and added forecastfunction argument as a way of specifying a forecast method in stlf() and forecast.stl(). * Improved forecast.ar() so that it is more likely to work if ar() and forecast.ar() are embedded within other functions. * Improved handling of ARIMA models with seasonality greater than 48 * Improved handling of some degenerate regression models in nsdiffs * Changed AIC for poor models from 1e20 to Inf. * Update fourier() and fourierf() to work with msts object. * Added a new argument find.frequency to forecast.ts(). * Added new arguments d and D to accuracy() for MASE. * Corrected bugs in accuracy(). * Better handling of regression models with perfect fit in auto.arima(). * Fixed bug in tbats.components() when there are no seasonal components. # forecast 5.4 * Fixed bug in forecast.tbats() and forecast.bats() when ts.frequency does not match seasonal.periods. * Fixed bug in getResponse.lm() when there's a logged dependent variable. * Modified ets() to avoid problems when data contains large numbers. * Modified ets() to produce forecasts when the data are constant. * Improved arima.errors() to find xreg more often, and to return an error if it can't be found. # forecast 5.3 * Unit tests added * Fixed bug in zzhw() which reversed the sign of the residuals. * Updated help file for CV() to specify it is only leave-one-out. * Fixed guer.cv() to allow non-integer periods without warning. * Added use.initial.values argument in ets(). * Added arimaorder() function. * Modified warnings suppression by using suppressWarnings() throughout. # forecast 5.2 * Changed default number of cores to 2 for all functions that use parallel processing. * Removed remaining call to bats() from examples that are run. # forecast 5.1 * Fixed bug in tsoutliers() and tsclean() with very short seasonal series. * Fixed bug in Arima() when seasonal order is specified numerically instead of via a list. * Removed dimension attribution from output of arima.errors * Improved handling of "test" in accuracy * Changed parallel processing to parLapply for auto.arima * Added timeDate dependency to avoid errors in easter() and link to Rcpp >= 0.11.0. # forecast 5.0 * Added argument model to dshw(). * Added bizdays() and easter() for calendar variables. * Added arguments max.D and max.d to auto.arima(), ndiffs() and nsdiffs(). * Made several functions more robust to zoo objects. * Corrected an error in the calculation of AICc when using CV(). * Made minimum default p in nnetar equal to 1. * Added tsoutliers() and tsclean() for identifying and replacing outliers * Improved na.interp() to handle seasonality and added argument lambda to na.interp * Added robust option to forecast.ts() to allow outliers and missing values * Improved output from snaive() and naive() to better reflect user expectations * Allowed Acf() to handle missing values by using na.contiguous * Changed default information criterion in ets() to AICc. * Removed drift term in Arima() when d+D>1. * Added bootstrap option to forecast.Arima # forecast 4.8 * Fixed bug in rwf() that was introduced in v4.7 # forecast 4.7 * Added forecast.forecast() to simply return the object that is passed. * Removed leading zero in package number. i.e., 4.7 instead of 4.07. * better handling of nearly constant time series, and nearly linear time series * improved handling of missing values in rwf * corrected fitted values and residuals in meanf() for time series data * bats() and tbats() now handle missing values in the same way as ets(). i.e., using longest contiguous portion. * better handling of very short time series * initial states for ets() modified for very short time series (less than 3 years). * nsdiffs with CH test now handles degenerate cases without returning an error. * nnetar now handles missing values * Fixed bug in forecast.varest() so residuals and fitted values computed correctly. * Added accuracy() calculation for VAR models * Fixed a bug in simulate.fracdiff() when future=TRUE. Sometimes the future argument was being ignored. # forecast 4.06 * accuracy() was returning a mape and mpe 100 times too large for in-sample errors. # forecast 4.05 * Fixed bug in hw() so it works when initial="simple" * Allowed bats() and tbats() to take non-positive values. * ets() now calls optim direct via c code making ets() run much faster. * Added Rmalschains as a possible optimizer in ets(). Not documented. * Modified forecast.lm so it is more likely that the original data are stored in the returned object. * Corrected bug in forecast.Arima that occurred when a Box-Cox transformation was used with bootstrap=TRUE. * accuracy() updated so that it gives more information, and returns a matrix of both test and training measures. * Corrected training error measures for splinef() forecasts. # forecast 4.04 * Added ylim argument to Acf * Avoided clash with the signal package when using auto.arima(). * Fixed problem in plot.forecast() when all historical data are NA or when there is no available historical data. * forecast.Arima() is now a little more robust if a zoo object is passed instead of a ts object. * CV() now handles missing values in the residuals. * Fixed bug in holt() and hw() so that the printed model no longer contains missing values. # forecast 4.03 * forecast.lm now guesses the variable name if there is only one predictor variable. * Removed error trap in forecast.lm when no xreg variables passed as it was catching legitimate calls. # forecast 4.02 * Fixed error in the prediction intervals returned by forecast.ets() when simulation was used and a Box-Cox transformation was specified. * Fixed bug in accuracy() when a numerical f vector was passed. * Fixed man file for Diebold-Mariano test. * Corrected references in nsdiffs() help page. * Added warning to nsdiffs when series too short for seasonal differencing. * Fixed problem in getResponse.Arima when Arima object created by stats::arima() from within a function. * Added tbats.components() and extended seasadj() to allow tbats objects. * Added undocumented functions for forecasting, printing and plotting output from vars::VAR. # forecast 4.01 * Error now trapped when newxreg variables not passed to forecast.lm * Corrected help file for dshw() to remove references to prediction intervals. * Improved help file for dm.test() to give more information about the alternative hypotheses. * Improved dm.test() performance for small samples by using a t-distribution instead of normal. * Modified bats() and tbats() examples to follow CRAN policies on parallel processing. * Moved some packages from Depends to Imports. * Added getResponse() function to return the historical time series from various time series model objects. * Modified accuracy() to use getResponse(). * Allowed user-generated innovations in simulate.ets(), simulate.Arima(), etc. * Allowed xreg argument in forecast.stl() and stlf() when ARIMA model used. * Removed reliance on caret, and associated fitted and residuals functions. # forecast 4.00 * More robust handling of degenerate ARIMA models. * New defaults for shaded colors used for prediction intervals in plots. * auto.arima() now remembers the name of the series when a Box-Cox transformation is used. * New function nnetar() for automatic neural network forecasting of time series. * arfima() now tries harder to ensure the ARMA part is stationary. * ts control added for forecast of linear models in forecast.lm(). * Fixed bug in bats() which caused an error when use.box.cox=FALSE and use.trend=FALSE. * Added residuals and fitted methods for train and avNNet objects from caret package. * accuracy() can now figure out overlapping times for x and f. * rwf() now handles missing values. * Revised ses(), holt() and hw() so that they can optionally use traditional initialization. # forecast 3.25 * Fixed bug in simulate.Arima. * Improved handling of short seasonal time series in auto.arima(). * Added seasonal argument to auto.arima(). * Fixed bug in splinef() and added gcv method for estimating smoothing parameter. # forecast 3.24 (23 July 2012 * Fixed bug in auto.arima() introduced in v3.23 which meant a ARIMA(0,0,0) model was returned about half the time. # forecast 3.23 * Fixed bug in arfima() which meant the drange argument was being ignored. * Extended auto.arima() so it returns something sensible when the data are constant. # forecast 3.22 * Increased maximum forecast horizon for ets models from 2000 to unlimited. * Corrected bug in Arima(). Previously include.constant=FALSE was ignored. * Some corrections to bats and tbats. * Modified parallel implementation in auto.arima for Windows. # forecast 3.21 * Fixed bug in auto.arima() when lambda is non-zero and stepwise is FALSE. * Fixed bug in auto.arima() in selecting d when D>0. * Fixed bug in ets() when seasonal period is less than 1. * Turned off warnings in auto.arima() and ets() when seasonal period is less than 1. * Added plotting methods for bats and tbats objects. * Changed default forecast horizons for bats and tbats objects. * Modified bats and tbats so they now use seasonal.periods when ts and msts objects are being modelled. # forecast 3.20 * Fixed bugs in forecast.lm(). * Improved handling of newdata in forecast.lm() to provide more meaningful error messages. * Fixed bug in dm.test() that occurred when errors were very small. # forecast 3.19 * Improved plotting of forecast objects from lm models * Added MASE for lm forecasts using insample mean forecasts for scaling. * Modified definition of MASE for seasonal time series to use seasonal naive insample scaling. * Modified meanf() to allow it to be used with cross-sectional data. * Updated accuracy() to allow it to be used with cross-sectional data, lm forecasts and lm objects. # forecast 3.18 * Added method for plotting non-time-series forecasts to plot.forecast(). * Removed partial arg matching. * Cleaned up some code, removing commented out sections, etc. * Added robust option to stlf(). * Added naive and rwdrift options to stlf() and forecast.stl(). * Improved handling of msts objects in BoxCox.lambda * Fixed some minor bugs in tbats() and bats * Improved speed of bats() and tbats(). # forecast 3.17 * Improved forecast.lm() so it is more likely to find the original data from an lm object. * Parallel processing now available in auto.arima() when stepwise=FALSE * Default model selection in auto.arima() changed to AICc rather than AIC. This may affect model selection for very short time series. * max orders in auto.arima() now restricted to be less than 1/3 of length of data. # forecast 3.16 * Corrected problem with AIC computation in bats and tbats * Fixed handling of non-seasonal data in bats * Changed dependency to >= R 2.14.0 in order to ensure parallel package available. # forecast 3.15 * New functions tbats() and forecast.tbats() for multiple seasonal time series modelling. * bats() and tbats() use parallel processing when possible. * Minor improvements to bats() and forecast.bats(). * decompose() removed as the function in the stats package has now been fixed. # forecast 3.14 * Improved documentation for forecast.ts * Corrected bug in dshw() when applied to a non-ts object. * Added error message when dshw() applied to data containing zeros or negative values * Added checks when dshw() applied to time series with non-nested periods. * Added msts object class for multiple seasonal time series * Made taylor data set an msts object. * Added bats() function for multiple seasonal time series modelling * Added forecast.bats() function for forecasting BATS models * Byte compiling turned on * Depending on Rcpp and RcppArmadillo to speed some code up. # forecast 3.13 * Bug fix for forecast.StructTS() due to changes in the StructTS object. The default h was being set to 0. Thanks to Tarmo Leinonen for reporting this problem. * Bug fix for forecast.stl() where h longer than one seasonal period sometimes returned missing forecasts. Thanks to Kevin Burton for reporting this problem. * forecast.stl() no longer allows a seasonal ETS model to be specified. Thanks to Stefano Birmani for the suggestion. # forecast 3.12 * Added option to control ets model in stlf() and forecast.stl(). Thanks to Stefano Birmani for the suggestion. * Reordered arguments for forecast.lm() and stlf() to be consistent with other forecast functions. * Modified tslm() so that it is more likely to find the relevant data when it is not passed as an argument. * Fixed bug in forecast.ets which returned all zero forecasts for some models when seasonal period > 24. # forecast 3.11 * Fixed bug in dshw() when smallest period is odd # forecast 3.10 * Added lambda argument to naive() and snaive(). * Fixed bug in ets() with high frequency data. * Fixed bug in rwf() where incorrect fitted values and residuals were sometimes returned. * Modified number of lags displayed by default in tsdisplay(). # forecast 3.09 * Fixed bug causing occasional problems in simulate.Arima() when MA order greater than 2 and future=TRUE. # forecast 3.08 * Bug fix in forecast.stl() which occurred when forecast horizon is less than seasonal period. * Added lambda argument to forecast.stl(). # forecast 3.07 * Bug fix in ets() concerning non-seasonal models and high-frequency data. It sometimes returned all forecasts equal to zero. # forecast 3.06 * Switched to useDynLib in preparation for Rv2.14.0. # forecast 3.05 * Fixed bug in ets() which prevent non-seasonal models being fitted to high frequency data. # forecast 3.04 * Fixed bug when drift and xreg used together in auto.arima() or Arima(). # forecast 3.03 * Bug fix in dshw() which was using slightly incorrect seasonal estimates for the forecasts * Bug fix in forecast.StructTS due to change in structure of StructTS object. * Better error capture in tslm when seasonal dummies are specified for non-seasonal data. * Re-formatted some help files to prevent viewing problems with the pdf manual. # forecast 3.02 * Bug fixes # forecast 3.00 * Added Box-Cox parameter as argument to Arima(), ets(), arfima(), stlf(), rwf(), meanf(), splinef * Added Box-Cox parameter as argument to forecast.Arima(), forecast.ets(), forecast.fracdiff(), forecast.ar(), forecast.StructTS, forecast.HoltWinters(). * Removed lambda argument from plot.forecast() and accuracy(). * Added BoxCox.lambda() function to allow automatic choice for Box-Cox parameter using Guerrero's method or the profile log likelihood method. * Modified BoxCox and InvBoxCox to return missing values when lambda < 0 and data < 0. * Add nsdiffs() function for selecting the number of seasonal differences. * Modified selection of seasonal differencing in auto.arima(). * Better error message if seasonal factor used in tslm() with non-seasonal data. * Added PI argument to forecast.ets() to allow only point forecasts to be computed. * Added include.constant argument to Arima(). * Added subset.ts() function. * Upgraded seasonplot() function to allow colors and to fix some bugs. * Fixed fitted values returned by forecast.HoltWinters * Modified simulate.Arima() because of undocumented changes in filter() function in stats package. * Changed residuals returned by splinef() to be ordinary residuals. The standardized residuals are now returned as standardizedresiduals. * Added dshw() function for double-seasonal Holt-Winters method based on Taylor (2003). * Fixed further bugs in the decompose() function that caused the results to be incorrect with odd frequencies. # forecast 2.19 * Added xreg information to the object returned by auto.arima(). * Added Acf(), Pacf(), ma() and CV() functions. * Fixed bugs in re-fitting ARIMA models to new data. # forecast 2.18 (2011-05-19) * Fixed bug in seasonplot() where year labels were sometimes incorrect. # forecast 2.17 * Modified simulate.Arima() to handle seasonal ARIMA models. * Modified ets() to handle missing values. The largest continuous section of data is now modelled. * Improved plot.forecast() to handle missing values at the end of the observed series. * Added replacement decompose() to avoid truncation of seasonal term and seasonally adjusted series. * Fixed bug in seasadj() to handle multiplicative decomposition, and to avoid missing values at ends. # forecast 2.16 * Changed the way missing values are handled in tslm # forecast 2.15 * Added fourier(), fourierf(), tslm * Improved forecast.lm() to allow trend and seasonal terms. # forecast 2.14 * Added forecast.lm * Modified accuracy() and print.forecast() to allow non time series forecasts. * Fixed visibility of stlf(). # forecast 2.13 * Fixed bug in accuracy() when only 1 forecast is specified. * Added forecast.stl() and stlf() functions * Modified forecast.ts() to use stlf() if frequency > 12. * Made BoxCox() and InvBoxCox() robust to negative values * Fixed bug in simulate.Arima() when future=TRUE. There was a bias in the sample paths. # forecast 2.12 * Added naive() and snaive() functions. * Improved handling of seasonal data with frequency < 1. * Added lambda argument to accuracy(). # forecast 2.11 * If MLE in arfima() fails (usually because the series is non-stationary), the LS estimate is now returned. # forecast 2.10 * Fixed bug in arfima() where the MA parameters were of the wrong sign if estim="mle" chosen. * arfima() now allowed to have a sequence of missing values at the start of the series and end of the series # forecast 2.09 * Fixed bug in forecast.fracdiff() which caused an error when h=1. * Added shadebars to plot.forecast(). * Fixed bug in plot.forecast() to allow plotting when h=1. # forecast 2.08 * Added pp test option for auto.arima() and ndiffs(). * Fixed bug in simulate.ets() which was causing problems when forecasting from some ETS models including ETS(M,M,N). # forecast 2.07 * Fixed bug in simulate.Arima(). Previous sample paths when d=2 and future=TRUE were incorrect. * Changed way color is implemented in plot.forecast() to avoid colour changes when the graphics window is refreshed. # forecast 2.06 * Added MLE option for arfima(). * Added simulate.Arima(), simulate.ar() and simulate.fracdiff # forecast 2.05 * Added arfima() and a forecast method to handle ARFIMA models from arfima() and fracdiff(). * Added residuals and fitted methods for fracdiff objects. # forecast 2.04 * Fixed bug in auto.arima() that occurred rarely. # forecast 2.03 * Added an option to auto.arima() to allow drift terms to be excluded from the models considered. # forecast 2.02 * Fixed bug in auto.arima() that occurred when there was an xreg but no drift, approximation=TRUE and stepwise=FALSE. # forecast 2.01 * Fixed bug in time index of croston() output. * Added further explanation about models to croston() help file. # forecast 2.00 * Package removed from forecasting bundle # forecast 1.26 (29 August 2009) * Added as.data.frame.forecast(). This allows write.table() to work for forecast objects. # forecast 1.25 (22 July 2009) * Added argument to auto.arima() and ndiffs() to allow the ADF test to be used instead of the KPSS test in selecting the number of differences. * Added argument to plot.forecast() to allow different colors and line types when plotting prediction intervals. * Modified forecast.ts() to give sensible results with a time series containing fewer than four observations. # forecast 1.24 (9 April 2009) * Fixed bug in dm.test() to avoid errors when there are missing values in the residuals. * More informative error messages when auto.arima() fails to find a suitable model. # forecast 1.23 (22 February 2009) * Fixed bugs that meant xreg terms in auto.arima() sometimes caused errors when stepwise=FALSE. # forecast 1.22 (30 January 2009) * Fixed bug that meant regressor variables could not be used with seasonal time series in auto.arima(). # forecast 1.21 (16 December 2008) * Fixed bugs introduced in v1.20. # forecast 1.20 (14 December 2008) * Updated auto.arima() to allow regression variables. * Fixed a bug in print.Arima() which caused problems when the data were inside a data.frame. * In forecast.Arima(), argument h is now set to the length of the xreg argument if it is not null. # forecast 1.19 (7 November 2008) * Updated Arima() to allow regression variables when refitting an existing model to new data. # forecast 1.18 (6 November 2008) * Bug fix in ets(): models with frequency less than 1 would cause R to hang. * Bug fix in ets(): models with frequency greater than 12 would not fit due to parameters being out of range. * Default lower and upper bounds on parameters , and in ets() changed to 0.0001 and 0.9999 (instead of 0.01 and 0.99). # forecast 1.17 (10 October 2008) * Calculation of BIC did not account for reduction in length of series due to differencing. Now fixed in auto.arima() and in print.Arima(). * tsdiag() now works with ets objects. # forecast 1.16 (29 September 2008) * Another bug fix in auto.arima(). Occasionally the root checking would cause an error. The condition is now trapped. # forecast 1.15 (16 September 2008) * Bug fix in auto.arima(). The series wasn't always being stored as part of the return object when stepwise=FALSE. # forecast 1.14 (1 August 2008) * The time series stored in M3 in the Mcomp package did not contain all the components listed in the help file. This problem has now been fixed. # forecast 1.13 (16 June 2008) * Bug in plot.ets() fixed so that plots of non-seasonal models for seasonal data now work. * Warning added to ets() if the time series contains very large numbers (which can cause numerical problems). Anything up to 1,000,000 should be ok, but any larger and it is best to scale the series first. * Fixed problem in forecast.HoltWinters() where the lower and upper limits were interchanged. # forecast 1.12 (22 April 2008) * Objects are now coerced to class ts in ets(). This allows it to work with zoo objects. * A new function dm.test() has been added. This implements the Diebold-Mariano test for predictive accuracy. * Yet more bug-fixes for auto.arima(). # forecast 1.11 (8 February 2008) * Modifications to auto.arima() in the case where ML estimation does not work for the chosen model. Previously this would return no model. Now it returns the model estimated using CSS. * AIC values reported in auto.arima() when trace=TRUE and approximation=TRUE are now comparable to the final AIC values. * Addition of the expsmooth package. # forecast 1.10 (21 January 2008) * Fixed bug in seasadj() so it allows multiple seasonality * Fixed another bug in print.Arima() * Bug fixes in auto.arima(). It was sometimes returning a non-optimal model, and occasionally no model at all. Also, additional stationarity and invertibility testing is now done. # forecast 1.09 (11 December 2007) * A new argument 'restrict' has been added to ets() with default TRUE. If set to FALSE, then the unstable ETS models are also allowed. * A bug in the print.Arima() function was fixed. # forecast 1.08 (21 November 2007) * AICc and BIC corrected. Previously I had not taken account of the sigma^2 parameter when computing the number of parameters. * arima() function changed to Arima() to avoid the clash with the arima() function in the stats package. * auto.arima now uses an approximation to the likelihood when selecting a model if the series is more than 100 observations or the seasonal period is greater than 12. This behaviour can be over-ridden via the approximation argument. * A new function plot.ets() provides a decomposition plot of an ETS model. * predict() is now an alias for forecast() wherever there is not an existing predict() method. * The argument conf has been changed to level in all forecasting methods to be consistent with other R functions. * The functions gof() and forecasterrors() have been replaced by accuracy() which handles in-sample and out-of-sample forecast accuracy. * The initialization method used for a non-seasonal ETS model applied to seasonal data was changed slightly. * The following methods for ets objects were added: summary, coef and logLik. * The following methods for Arima objects were added: summary. # forecast 1.07 (25 July 2007) * Bug fix in summary of in-sample errors. For ets models with multiplicative errors, the reported in-sample values of MSE, MAPE, MASE, etc., in summary() and gof() were incorrect. * ARIMA models with frequency greater than 49 now allowed. But there is no unit-root testing if the frequency is 50 or more, so be careful! * Improvements in documentation. # forecast 1.06 (15 June 2007) * Bug fix in auto.arima(). It would not always respect the stated values of max.p, max.q, max.P and max.Q. * The tseries package is now installed automatically along with the forecasting bundle, whereas previously it was only suggested. # forecast 1.05 (28 May 2007) * Introduced auto.arima() to provide a stepwise approach to ARIMA modelling. This is much faster than the old best.arima(). * The old grid-search method used by best.arima() is still available by using stepwise=FALSE when calling auto.arima(). * Automated choice of seasonal differences introduced in auto.arima(). * Some small changes to the starting values of ets() models. * Fixed a bug in applying ets() to new data using a previously fitted model. # forecast 1.04 (30 January 2007) * Added include.drift to arima() * Fixed bug in seasonal forecasting with ets() # forecast 1.03 (20 October 2006) * Fixed some DOS line feed problems that were bothering unix users. # forecast 1.02 (12 October 2006) * Added AICc option to ets() and best.arima(). * Corrected bug in calculation of fitted values in ets models with multiplicative errors. # forecast 1.01 (25 September 2006) * Modified ndiffs() so that the maximum number of differences allowed is 2. # forecast 1.0 (31 August 2006) * Added MASE to gof(). * croston() now returns fitted values and residuals. * arima() no longer allows linear trend + ARMA errors by default. Also, drift in non-stationary models can be turned off. * This version is the first to be uploaded to CRAN. # forecast 0.99992 (8 August 2006) * Corrections to help files. No changes to functionality. # forecast 0.99991 (2 August 2006) * More bug fixes. ets now converges to a good model more often. # forecast 0.9999 (1 August 2006) * Mostly bug fixes. * A few data sets have been moved from fma to forecast as they are not used in my book. * ets is now considerably slower but gives better results. Full optimization is now the only option (which is what slows it down). I had too many problems with poor models when partial optimization was used. I'll work on speeding it up sometime, but this is not a high priority. It is fast enough for most use. If you really need to forecast 1000 series, run it overnight. * In ets, I've experimented with new starting conditions for optimization and it seems to be fairly robust now. * Multiplicative error models can no longer be applied to series containing zeros or negative values. However, the forecasts from these models are not constrained to be positive. # forecast 0.999 (27 July 2006) * The package has been turned into three packages forming a bundle. The functions and a few datasets are still in the forecast package. The data from Makridakis, Wheelwright and Hyndman (1998) is now in the fma package. The M-competition data is now in the Mcomp package. Both fma and Mcomp automatically load forecast. * This is the first version available on all operating systems (not just Windows). * pegels has been replaced by ets. ets only fits the model; it doesn't produce forecasts. To get forecasts, apply the forecast function to the ets object. * ets has been completely rewritten which makes it slower, but much easier to maintain. Different boundary conditions are used and a different optimizer is used, so don't expect the results to be identical to what was done by the old pegels function. To get something like the results from the old pegels function, use forecast(ets()). * simulate.ets() added to simulate from an ets model. * Changed name of cars to auto to avoid clash with the cars data in the datasets package. * arima2 functionality is now handled by arima() and pegels2 functionality is now handled by ets. * best.arima now allows the option of BIC to be used for model selection. * Croston's method added in function croston(). * ts.display renamed as tsdisplay * mean.f changed to meanf, theta.f changed to thetaf, rw.f changed to rwf, seasonaldummy.f to seasonaldummyf, sindex.f to sindexf, and spline.f to splinef. These changes are to avoid potential problems if anyone introduces an 'f' class. # forecast 0.994 (4 October 2004) * Fixed bug in arima which caused predict() to sometimes fail when there was no xreg term. * More bug fixes in handling regression terms in arima models. * New print.Arima function for more informative output. # forecast 0.993 (20 July 2004) * Added forecast function for structural time series models obtained using StructTS(). * Changed default parameter space for pegels() to force admissibility. * Added option to pegels() to allow restriction to models with finite forecast variance. This restriction is imposed by default. * Fixed bug in arima.errors(). Changes made to arima() meant arima.errors() was often returning an error message. * Added a namespace to the package making fewer functions visible to the user. # forecast 0.99 (21 May 2004) * Added automatic selection of order of differencing for best.arima. * Added possibility of linear trend in arima models. * In pegels(), option added to allow parameters of an exponential smoothing model to be in the 'admissible' (or invertible) region rather than within the usual (0,1) region. * Fixed some bugs in pegels. * Included all M1 and M3 data and some functions to subset and plot them. * Note: This package will only work in R1.9 or later. # forecast 0.98 (23 August 2003) * Added facilities in pegels. o It is now possible to specify particular values of the smoothing parameters rather than always use the optimized values. If none are specified, the optimal values are still estimated as before. o It is also possible to specify upper and lower bounds for each parameter separately. * New function: theta.f. This implements the Theta method which did very well in the M3 competition. * A few minor problems with pegels fixed and a bug in forecast.plot that meant it didn't work when the series contained missing values. # forecast 0.972 (11 July 2003) * Small bug fix: pegels did not return correct model when model was partially specified. # forecast 0.971 (10 July 2003) * Minor fixes to make sure the package will work with R v1.6.x. No changes to functionality. # forecast 0.97 (9 July 2003) * Fully automatic forecasting based on the state space approach to exponential smoothing has now been added. For technical details, see Hyndman, Koehler, Snyder and Grose (2002). * Local linear forecasting using cubic smoothing splines added. For technical details, see Hyndman, King, Pitrun and Billah (2002). # forecast 0.96 (15 May 2003) * Many functions rewritten to make use of methods and classes. Consequently several functions have had their names changed and many arguments have been altered. Please see the help files for details. * Added functions forecast.Arima and forecat.ar * Added functions gof and seasadj * Fixed bug in plot.forecast. The starting date for the plot was sometimes incorrect. * Added residuals components to rw.f and mean.f. * Made several changes to ensure compatibility with Rv1.7.0. * Removed a work-around to fix a bug in monthplot command present in R v<=1.6.2. * Fixed the motel data set (columns were swapped) forecast/MD50000644000176200001440000002545214474126512012406 0ustar liggesusers7a21637ee504819b91bd3ce7bef9261a *DESCRIPTION 5a6cad287439a3c64bc1766650d86faa *NAMESPACE bca90234bac56cb1881b3ec4165908f4 *NEWS.md 8635679a18d096d9e79886efa4d42274 *R/DM2.R d1e5423b18da656d6809572192149ee8 *R/HoltWintersNew.R 1012ef80bcd84169d6dc2deb48a963f8 *R/acf.R 2cb2e34bcc6071acc40b22e6c3ddcc24 *R/adjustSeasonalSeeds.R fb78073e67fe711b3941914d3282771d *R/arfima.R 186f8d3f866f36609848717b022bd616 *R/arima.R 253ffc0ebc22581cea9578227e9cef37 *R/armaroots.R 4b8878d0f00f9970035ffe574f82090b *R/attach.R e8dfa73402b7eae5fa670c4156cabcbe *R/baggedModel.R 90bd8fca0bfdc3fa6f31e5acc4d8e1b5 *R/bats.R c69f52ca63cefc8ec286095b01c490c8 *R/bootstrap.R 7f5e80ed72cf26c3a2386b7d404b6c82 *R/calendar.R 2c2afd2e69e126efdc41a43a633d18b0 *R/checkAdmissibility.R 03989934cde5321851546cffdc594fa6 *R/checkresiduals.R 53200cd8b991f2f2e544377700904741 *R/clean.R 53bd366467f8721c4b2082c60a5c95ea *R/components.R 55fe30a71cd7f5ae40f161978e6fdd97 *R/data.R bca8b27285a9249d7623917d8e2f3fca *R/dshw.r 4a801ec36eb9a67d3db94e61d3756414 *R/errors.R 61a41c576edf70545cbc9a3647d207ea *R/ets.R 3b3832d7c86c1f67879af315d49c5265 *R/etsforecast.R a49e4ecccdb8c0f70f70da4b1f8b6086 *R/findfrequency.R 3dd911673657a1e049a988b0d629e30f *R/fitBATS.R 4489632ce1bb96eaaafe51247e31eb4f *R/fitTBATS.R ef1376b4cc03cf23e66b9569117be3a8 *R/forecast-package.R b5caad6cba32a2527bd68d53d450f1a9 *R/forecast.R 87d693356c97be5cc411f98cb1adfe93 *R/forecast.varest.R 7f6028f3368a9d511f3a2ac8248dd1c7 *R/forecast2.R cb6d7bf60c5cc71b81270d2b0205bc19 *R/forecastBATS.R 4677125c3355f315250c5067e99557b3 *R/forecastTBATS.R f647e0b6d5a501facddd395bdb302346 *R/getResponse.R a49c19c9527504ae24d4f18836507f5d *R/ggplot.R e7f4efc9862f994b97d627d7b76e6ec3 *R/graph.R f20ad130d6d6233d41d4e5acf82703ea *R/guerrero.R 90ad1f95e8d0f73c785736698d1b127e *R/lm.R 4dbc5927102faf07edc172b15ee24c4e *R/makeMatrices.R d2a82600e97e923595e3adc760b81288 *R/makeParamVector.R d4e1c72e929d802c63261350dad4bab6 *R/mforecast.R 474a536845da7b177a4d426d223e4908 *R/modelAR.R 333e564e346e4bc310a219ac08d7d781 *R/mstl.R 3a91d0c24a2601ca3a18fb60acccd08f *R/msts.R 2afd90efc04fd8d2bffece766fd4ca38 *R/naive.R c876f7b654137e8841dd2f1629230226 *R/newarima2.R 9a76ce60e9e7fdd39a318858f316c8b0 *R/nnetar.R 4ed698eabfe343e66ba3dc075ddc486e *R/residuals.R 48e2aee79cf37130ec860583a6d0af6d *R/seasadj.R 6e59c019d0589002ae9f1d224a488f42 *R/season.R 2fbf0f39f2012e5c4d5ead042215e66e *R/simulate.R 7cd92409b235cd14d40ee0701e0204a9 *R/simulate_tbats.R 3a8559406c735ff0f799388e55d77d5a *R/spline.R ba07e532a8b3adbd4a6c2fb7a195cc07 *R/subset.R ba9d6d3fbdb1c085de82e30b90b6c48f *R/tbats.R 1db4998c13a6a1da4e228ca7a612111a *R/theta.R df1a2b39917256fd4786e1992e4d2c04 *R/tscv.R 25430683535a3072c340c44c5d996c6a *R/unitRoot.R 10f32c6de9168ac9d066307d318bae91 *R/whichmodels.R 7238604557eca232bdfce0502ee4a2eb *R/wrangle.R 57d15099f8ff73632535706efd46709b *README.md 672e9efb78ac1c24461c8b9350766a7d *build/vignette.rds d83263b393c17189250711ff49f730b6 *data/gas.rda de9a9e1c277aa90de4c8ee718d4ef93f *data/gold.rda f0c82cb5de038d46b4489ef90769d58b *data/taylor.rda 38679e434ddf3856988e34aabbe662fc *data/wineind.rda 20dae67335336c52c4228859c36a22c3 *data/woolyrnq.rda 53e0549cb41cee239267f548c1ccf666 *inst/CITATION 74af0a2135e16058d635d5f3ef844ade *inst/doc/JSS2008.R c654c548be2e55c4284dab38657eeebe *inst/doc/JSS2008.Rmd f38a170bee8011bebd016e836f5f20b2 *inst/doc/JSS2008.pdf 83f91e71324a77f2e65abbb0a39dac82 *man/Acf.Rd 1233246bc7f32a0e669fe3670261dc78 *man/Arima.Rd b91d83371a8f3574234b6ba4beca570a *man/BoxCox.Rd 0390825433c5207e11a3aa2c9689bbc1 *man/BoxCox.lambda.Rd 4a846add965855d0144767d413211791 *man/CV.Rd 6ecc8c117a35ae3e4a8e20eb546f1856 *man/CVar.Rd 6aaaeb47ccdcde81fcb14a9d65329f44 *man/accuracy.default.Rd 5442830e86a1a0c1013e8d41157871db *man/arfima.Rd 9c2e1f1ef44dcd07a8db27af46c6273f *man/arima.errors.Rd 99d19c63d529f30309f5fa5f22b20d59 *man/arimaorder.Rd b77919994f730e51935edf1c014caa7d *man/auto.arima.Rd a5a62e15bc34c63075b2ee3d2110c7bf *man/autolayer.Rd f33ea9daffe72a6de6dc4e444c8bf564 *man/autoplot.acf.Rd 292043ae77bb3767640d3493a56424b8 *man/autoplot.seas.Rd d1f76dff11374046f0bdbb99749a765c *man/autoplot.ts.Rd 790787a95a2779ed85b870d9d774cc8b *man/baggedModel.Rd 0e974f0da71ec18800366d77440e1356 *man/bats.Rd e0142a3b240fcc9f6229defc4727c238 *man/bizdays.Rd 209c496a43538dfa3eb929a9a23933c3 *man/bld.mbb.bootstrap.Rd 9848999fddd6caa019b2a97e981b96ba *man/checkresiduals.Rd 3f93237aef8ee4696ddb0bca3f6e9a02 *man/croston.Rd 7558acd6622ed6f745cb4c4de74957f5 *man/dm.test.Rd fad85528e3c36a57426495d6c0be5ba8 *man/dshw.Rd 4c3a40f6807c40d497529da77186946e *man/easter.Rd d241ce7b381ed46cd972f6dc0565f830 *man/ets.Rd d2ccaa153869329ea005b8816b2e029f *man/figures/logo.png 30b384c8dd90a0f902218e76bba5f472 *man/findfrequency.Rd 871cc6cc555d50e6d4c82e3eef954207 *man/fitted.Arima.Rd d6af44d29bf7ba75aa55796f02bd729e *man/forecast-package.Rd 0f4856ac677c1f398d034a012f6a5b6a *man/forecast.Arima.Rd f77aeca83a063a307f911797e020d6df *man/forecast.HoltWinters.Rd dda85c94530c8b7978d0b9a49968d4c5 *man/forecast.StructTS.Rd a5ede17e227dab77b77e801aff71815f *man/forecast.baggedModel.Rd 3bd2f567500d1898d3d49dae8b120700 *man/forecast.bats.Rd 88a24a1870163ac29156ce1cc8056119 *man/forecast.ets.Rd bb17f9d40014a8987f4ed9f50a4c6f22 *man/forecast.lm.Rd 76069224b3c79f9d51454611ab8ffe2e *man/forecast.mlm.Rd fb7fdcc24bb63ece5fb4e5484f78ae23 *man/forecast.modelAR.Rd 6bbb48242a147f43078a9c543c927e61 *man/forecast.mts.Rd 7b811361f9d9e69f6ef03b68a98f377a *man/forecast.nnetar.Rd e3f2afcecffefa3e11da29c3c7dfc082 *man/forecast.stl.Rd ca6e4c0080b53338eaa63e5388737c59 *man/forecast.ts.Rd 8536b20ad167f572a8f41ff7157276a4 *man/fourier.Rd 4c310ce65a57cac565c003ba8d1c706c *man/gas.Rd a04b42c97c1bbf9972e5c05fb5649954 *man/geom_forecast.Rd 04278fb50a27f247325113f626cd9214 *man/getResponse.Rd c6dbd99bebbefa49cb1cb4a012e66963 *man/gghistogram.Rd fe56843c030a399f300275aae6eee230 *man/gglagplot.Rd a655c9f394843bc1aec3af6deb67f9f6 *man/ggmonthplot.Rd 2f30c1139541d530824d307fd86a93e7 *man/gold.Rd 69c39a9661c43d0631c6a1ef4c3ffae3 *man/is.constant.Rd a1f193faca93a50f329bd52eafbe6d6e *man/is.ets.Rd 0fdb9a4ef0c3274a7a6a1135b5a2f591 *man/is.forecast.Rd c4419f85bca986614f9950fe954b6c86 *man/ma.Rd 4f946a31c5da15d9c1a2697a8144d2d4 *man/meanf.Rd fbb350863c6aecb2c95e3512a54801bb *man/modelAR.Rd ee39cdf50cad2d1aae445da601dc4ac9 *man/monthdays.Rd 35e54c362d1568b509e7606544d823f9 *man/mstl.Rd c1e2f69e0e056997a5dcf747fba66c1b *man/msts.Rd db892ad70cd382574ab7ab004c800e4d *man/na.interp.Rd b84cf459aa295fc8b8203b6ecc85c507 *man/naive.Rd f457cb0539e4dc98f37f10786af164ba *man/ndiffs.Rd 378ede105a593b204006701d6362fd93 *man/nnetar.Rd d8e464947377cc21729cc855499ab2c3 *man/nsdiffs.Rd e5efcb1e39e1e8816050e0a04f2de0dd *man/ocsb.test.Rd ca040caf6e8cb69a709f093640e05b19 *man/plot.Arima.Rd 05d3c713844dabe0e0ab1e7e877acc94 *man/plot.bats.Rd dda7462647917f639d1ed48e010c0874 *man/plot.ets.Rd 523b0e23da1fef3337e44c0095869c8c *man/plot.forecast.Rd bd4278fe0b6984e2f88c3e3e97f1fa48 *man/plot.mforecast.Rd 6c7f2c4386245076645be6f16737cdd7 *man/reexports.Rd 4348571330fa10db423a393eac8e8a72 *man/residuals.forecast.Rd 43698c8686becf342e68c02aa797cbc0 *man/seasadj.Rd c8a8a9bf21ea57bf9e1304698905cfd3 *man/seasonal.Rd 86a05976843a74991be96ac536fcdfee *man/seasonaldummy.Rd 38bc4dcc5611b22fe90b80617693aafd *man/seasonplot.Rd e31cfb0650c537e97c8e8c32e170461a *man/ses.Rd 21fe5bde767254014a7d7c4f00e258c6 *man/simulate.ets.Rd 59b2af1fb81a9088a5aa0e8e66507ae3 *man/sindexf.Rd 54be116966779434d622dddcd9eabb1d *man/splinef.Rd 2901ce6660f7b06d7ecaa226276052ae *man/subset.ts.Rd 580471f7024edd6c15f796caefa804a7 *man/taylor.Rd e2723ca1bd6e6df55bc698fd572de580 *man/tbats.Rd 0ae2d2dd61045aefec1202213a05e2f7 *man/tbats.components.Rd 04a2aa0f9f3f2e9314562247f444c302 *man/thetaf.Rd 137757f5d574ca7845b9a651e348d316 *man/tsCV.Rd a068b467a8b0b54c43771c998de15d0f *man/tsclean.Rd fe3f298d209fbf7769faac02f39733f6 *man/tsdisplay.Rd 3822a7637be4232e2d420c72a45871be *man/tslm.Rd 888eba3e26715d9861473ac27e9cbd1f *man/tsoutliers.Rd df48ac7208918eeecd7681e8578ba872 *man/wineind.Rd d456755f11d47e081631c0753c67ddde *man/woolyrnq.Rd c89d6b95d18721894ab45341d0fbc7a6 *src/Makevars c89d6b95d18721894ab45341d0fbc7a6 *src/Makevars.win f9ecc1f5c60cd3dc0308fb1a3d7330f9 *src/calcBATS.cpp 8a950e3ff166da855be2bac67fa699ef *src/calcBATS.h d0f166950dc9401c846838e18d237c52 *src/calcTBATS.cpp 3c53efc7fbf62345277457fce024695a *src/etsTargetFunction.cpp 1862021b4c660004e29148f2ab00c46a *src/etsTargetFunction.h 1c35039e6ec6da9e4bbb4c04b6235905 *src/etsTargetFunctionWrapper.cpp 1993b54b4241d9339a89e94deee49959 *src/etscalc.c 10a04e99372e13b619cb078598d72b19 *src/etspolyroot.c 7737b4b0565a59df9fdacdeab4234f8d *src/makeBATSMatrices.cpp f6e3a6eda213b1c0154adb7d2a638852 *src/makeTBATSMatrices.cpp 7c22b1b500cab25872924e218f1645f5 *src/registerDynamicSymbol.c cdb9079de1d58f886f769aa68f0f480e *src/updateMatrices.cpp 06adefbef775d3ddd511764f84d16f13 *src/updateTBATSMatrices.cpp 22708a41a2f33a810a8353ff0a62a1eb *tests/testthat.R 5ce26e5e558d08a2e0c411a36ad60f7b *tests/testthat/test-accuracy.R 3b580e8bd4b8da3bac0d6489a2ad2e64 *tests/testthat/test-acf.R 05607b4a3ee0b4bd6354a7c342bf748c *tests/testthat/test-arfima.R 9384d55845d8325a918ea6b3192f4c93 *tests/testthat/test-arima.R aafe5438a347abe8740fca4add616a2a *tests/testthat/test-armaroots.R bad4f6f86a7b3d74333cd6e42fd82912 *tests/testthat/test-bats.R cc07f49ef73d238837d4a0c6cee150b9 *tests/testthat/test-boxcox.R 1e11fa115e1e56b18afb3fd79e5f53c4 *tests/testthat/test-calendar.R d06215d364164219a5087baa8fc71968 *tests/testthat/test-clean.R e29ef19acf0b97b246205cae7a2a242b *tests/testthat/test-dshw.R 569467a4f0e400a68f047a0bb5c029cb *tests/testthat/test-ets.R 348765823673999adcf1e9d3321069c8 *tests/testthat/test-forecast.R a9bdc48ed3b761ff84658aa01d961795 *tests/testthat/test-forecast2.R aa707bfcff6a8d83a6658020105a78ef *tests/testthat/test-ggplot.R 60a12b281f2c89cb93382a3757087cd9 *tests/testthat/test-graph.R 7248fd1953fc5bdca50933fa5922fdec *tests/testthat/test-hfitted.R 2f8dac2d15acdae41651ccae610cc8b3 *tests/testthat/test-mforecast.R 89b6ecb622d8db706a41ad8efaa7e547 *tests/testthat/test-modelAR.R 57a3715236999a8c51c3d49ec103ca9a *tests/testthat/test-msts.R aa1f54d7fb4b0e7651081a622fcf9c5b *tests/testthat/test-newarima2.R 6ffa8156de363742c886dd577e45c419 *tests/testthat/test-nnetar.R 802309f8f3824c53438b1a3d22ed19be *tests/testthat/test-refit.R 165c2eefc684be35818d4bf75b446312 *tests/testthat/test-season.R f0e5201af3cd697d85c3e1bd82720ba0 *tests/testthat/test-spline.R cdeaa87e7bc3e478497824d70d1d3390 *tests/testthat/test-subset.R 697883c2fa3b300efcef42a4911cdfb5 *tests/testthat/test-tbats.R 8741ad0287cb8bbbb0d73fbd7bb630e3 *tests/testthat/test-thetaf.R 45254e5bb52856455e58d771ce5fca4c *tests/testthat/test-tslm.R 20c99ad040532709bc751d5f0cf90115 *tests/testthat/test-wrangle.R 2fa3082147c937669b5c1a44722eb85b *vignettes/JSS-paper.bib c654c548be2e55c4284dab38657eeebe *vignettes/JSS2008.Rmd 16e6ff1f952a8b8b4f77aa0adf736559 *vignettes/jsslogo.jpg 5476cea82c73c77a83ddb7e2619d3727 *vignettes/orcidlink.sty forecast/inst/0000755000176200001440000000000014474112176013044 5ustar liggesusersforecast/inst/doc/0000755000176200001440000000000014474112176013611 5ustar liggesusersforecast/inst/doc/JSS2008.pdf0000644000176200001440000064345414474112177015276 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4926 /Filter /FlateDecode /N 85 /First 720 >> stream x\YsƖ~_ԭ-q8q,Ŷ=Z$&Tt7@%H3S2ʂЅZYB8 _M )(@j(8B9R^*ܛp{@՚Ta<'X/V+_H*n0Hn*x 5Dy+N^c0 o//@5d:KNqnC0Y. z`\c &h NH DAiBxPLB =b<yddɭ-@\!%!R\H0!ZZ P^H e@CՀlYRDS@2 k AK FZ7.,- 43-1_ A n@:\ukaHA2@vp Ynk>0c: :@@< {z{ $l+<  ( 3w;rTI8*سd<x|tn^uUl'ѣbzy1ߝUgg y(=.yC8!ҽbվynѷtV2cHNgbX.NeJcJ&~N2eѫuę،&%z_j_6RO3Č^-ClTZ<-#ЖFlf%Dt OxHggOv0r>ӽoyz /_ΊJO^v:%u~N*u6Fv:-i.25dMtYMVa`Oǣ'&K 3=jOf?ŲȎ`kّ{5(ܯˆ|%Je1L ik21֕e\tR{auSi@E")X']'E'(:JSw=KIC=KdHH, Jͳs2U>|6=H 2/B 2mB&m&A?Ѣٻr4z2t<=-eHC7B#Jb i"紌 $f| jtDA$i@ᣠDJ S tW>z +AOH9 ;XX"u `ExX׀0?{Odq*a!3_0k(b}Vy˄,!(*`FyFә"9Nl2;eR86.OG VM錝ѿ1ل sv^l t\ͫx~c dS6c36K-fbWdV&2]%+)q w .f$oRxzق-q8.ƟbB3Zż]?GfWF|#'EհR~PȪ_I`\~]V7'XJOvppXE&v._HeZcR~5VmQ\YT5E40jEC9e4֏)ϨDXy7e 1dw(`_G,'aߜ?5Rǹ{IBy5 !-2*_8-xmYd Z&GD_S&MkR V)I:*#JmeP:~7P䴭OjSꓹ>勮6Q:T7o*KAwA[M!ZA+-(U(pM2P5I( 1p>OB8ԛM\ ejH/ISLȟ Zt>CS{ÖMt-'$}nqn@ize]SDNzLm="CZ@pʆ Nye ea".>}{SGŝ-f u7$RfQMfa#(Ob .M.l@>[¶:.ث'u˝ZFG_],Wo; ^ oc-埜r1s;Vv^RWǯ1l:W|S)ce⚗79}pxi>cm.mcLʍ-N7$Ȫ?GJ'en/&CsiG_$7nV7siy'E8ܔ77"ؽ6f"A.-ww qrYKJރ!WB:( ew 6/ rVG:suh wsy|Fe1l^~%W@ځC5P_@&ӴjF LbQr# NADG%WJ@ Hd.m#.i1 V@@g(5VDҾ4ռrb_'%]]q#,( Hr% '^ j!X~!♐#/R< GIq^Z*8SB3!HG5L %rڄUxhv mf)KG:rt`eIAH (J<0qSGnEJI]s|w2c!#U/Tm-\a3jB\ }4d7Z!pMɯP5@6 BbXܺP{)P@H ސȞt'H 堦rd={#-Fˋ0믒v*IYV^o`j:*; TjUJL7Uv_<6ЛHK]Nƀ),H>buA8hM60ՁTRO; -4HT"Ma +9l42e4bL ٝ9}SENT$9`MW0%1pC!eŽXU聢>힝 ;VY~HPcDn܅8Y hgJJ );$!xܕ~BvgmdI?(ZYyqE:/,Lt3B*v█c[@ߓvHڱy\z~\M|PJx<@Pl@( JKhb=LZ> j]M'oNj)yqLҌU(Zlu~(LIL1c^8 _moɰ^L}Gv\pTYsm{L$y:qzXendstream endobj 87 0 obj << /Subtype /XML /Type /Metadata /Length 1627 >> stream GPL Ghostscript 9.55.0 ARIMA models, automatic forecasting, exponential smoothing, prediction intervals, state space models, time series, R 2023-08-31T15:24:14+02:00 2023-08-31T15:24:14+02:00 LaTeX with hyperref Automatic Time Series Forecasting: the forecast Package for RRob J Hyndman, Yeasmin KhandakarJournal of Statistical Software endstream endobj 88 0 obj << /Type /ObjStm /Length 3688 /Filter /FlateDecode /N 85 /First 782 >> stream x\nG}߯]X;Ec+ ?X"LZr=s#g(,`!s>]u!2`{%:E3 £eRȢcR;LZ9#ALFGoI8QL)p2R0e ^`TT80=L;`Fh:)3^-4cEcUN,:\R:f2Ym@eݱѢ#J0GXCEa3N4s-e#He 0\*ϼ4T&0o{jT +;?8Q o,xz B@ZRaZ=!H OJX!J't) ]i,.$ x$5HIZl՜:,H1$P&̒n,Iё ,I&h:#Gj4q*u@<>2bDRqtYjE vH> 'Q43*sP<Orʹ/4n][j9ϞN/&9Njdzi:̯`a+ҩ0R"q(Y]d,q?5rh17 ;ϨъҊ#Ƣq~iÊ4C TIܣv'5޶Aa-D꨹ M`pKL *{Pܸ JX*P&NS2eeSet *. 7 RҒU.Ikط;иRJ5L-w0ԠepÔe H˅~Iaa掾=3yje,ٙM?^l[K Exef(v 3:Wwu"BUMAN^UGi)w,_m-Ldz0 +6mX_1adA.ղTVV혚^15Ԅ@x j>xzȮ| UZav,Pz1| Z21Ao@CB -њ^zfA)z0OÙ0Vaq3l/HJ",䟖ٜen_F|zg|hMŢ8+ օۭ)_ڪ"u?.r-l˴ijgucYnWP`o>X/wos:ZbشuIN8rhWܘjCϯ>mu6qmġs-iOnmM&Y߉b݉ƺ!AU+'r6jZ) Z@pf5MUMt@iϣd[ E3T Y慠:aM3)#pQm+4$uЉoPMC${_UE)G V<[_%U%GJ'WW1eW)?.2꒔>UW1ݕW+Z76oJ|kblrf Txw;58!ȩG_wl Ԛ .끳̗ΰXE`cuT5٦݁C&^TzmJaC 0[uddIzY{vV( " NgWm{ђ>TEKݎ(zJws<#okx.‘td1Rư" @QYG @1%[sM p:ec")!&迆d4ywoDXU(08iMXf ު"XnbgS\_Ob;M C3kP!pw*刷dQ+"S =$% ' \`rcwͳ$5Xnw݌20EP<)'դb+swseMw\f2Ah/;L~N窻\,[V^~6@? iYGGN_=B:2XN#x(cE^o(cQA F mEbsӘ:VN:^2|ɗQߩ´Mܼ'v9JUL%MVr8l(ִbkmy=5w(}>r~j:ҶSu%VNB/w:;4 "~N(eY]d8dWU6S6+"rMrJfH6ztg&}gk1U%7VL2;!ȴ N:!2CC~mKjlNQsʵ+!yb>=#";XWgVػegnT(}ɳO0J.Nlf1h\96Ok<WXAU*ax T<5 biYnsA Z&SLXXv# Mfd4]F3{6\;F>|X*6ʪzNJ6um{GOqmfJ$% b&)0® "[6v}G ̦Auq)rgQJ+LQڨ|6ƾwgx;W[M^7T:W&}G]]tOL.Zlǟ>sʒN tb$B&uѿSd|u;5cvS\\ν%{-'2L^MiTRZ/I"7 imA]HU?]ȣj`kP0Y}Ku-dMendstream endobj 174 0 obj << /Type /ObjStm /Length 2941 /Filter /FlateDecode /N 85 /First 785 >> stream x[mo_-p _yR\&:#ԖK)P\Eʮ, w\p'1 QA 6(>C3GƯQAor1B0cƨ!՜ 9J.*:Sq&xFM )1NaG ` ,J6<@b8 qΈs/ 23`^ \RW`E2)LY`(&bzCDzA&f|0 p',&>H&EI %&H,qI `(0v&1%S|P]fS(SUjပoWf%L >0HݡP&ǤU* KC<(  #c]1os@1$s""ūAġXOj$J ԈE}sV9(zIѫQ~tڢ3'W-VZ[[j6j}m?|9cW}Oɓ1"-zW+`V3w'׫(u}tWϞ%FG\T:"u7@rkioM޷Z|[o󩵹ebX%wR[Q[Q[Q[QVh m m m m m2OOٷϮ˧ "0MW/˫*_&^粢P,niY5V`~z\5ptO'_~>UşLkG^??- )3|\=|>;*h"֟~\Lő|vx~v>;bUq^/o5HmsCD%}淗1jjAy*–*jt1ЄSAu-Mȶ&nϾݭzz\^}^]U7x[wo~{>nX5dza{+Md#M7lpiCs7h-BTd5`F۠BQs}^ ;:5k (oiZoi7ȸ# uwRS.մN\Ij?Hl#݋Oa'ww `CPBiXJ5kW1 =:˦Gk';nwG&V_u͉m'ryooo}h5þ! VC{OcJݤ]OV_M}=-K}ƲbٞS9Wv}ֻ2yL#l[Royݖ%~^woIwڝ^_ŤMYw]tݗ鿖+?1MaEfio}c6رc 8rRߗlJPgc3l6DwIJPHcX-ap6b9( 2`@[9R&d3Q=Z^V06(+Z8$gheH-kjڲnUb0bʖR5Me JQ m[za*$GQT-%W^Z5>,$X2 Z! Ha:$>rwZLvNuoqgZ\jQݲuF?5GwD`STң 1׌nWH%S>%֫g%xV=b tGJC)y+z0e=oԻokY a)"v{#:6.x^ˡm%ې7LI2=#Rj|}QÞި7"0SAQhӆ{sG2ۜvfhW&&$Ώ)vs!f@#k7H i{$6lݻ\`L# dBm|!=lQ#VF5tV}} >' ThngVo.l0#e¯1#*Nc~o}DŽANN@Qt6 ,K8&W  %x5 pT)Y֠Q6}"MqLJ\@/+0-qy]N?Ӯ0cǠµn)a">%fV?8 TiDFiHsLyb$],B1)ŊJ=&i2y_r⚧q}ഭ~׃"BzgkϽr]Vb=iq]G[>t_J"DJY"UMU ԤzPpv,.a.0;/Ea95ᢟgTxtLzʩ^xPߙl&3(t[؈銖5j"kxݿeJP-Wu 張 &CR"8ZZKQj> stream x[[o7~_E8_$m-naon>(H$7w8hْ#a3Cq!)$L9Ŕif0#<&&ѸLw`2z)M1G ˴ ֩gkXDf5*3` q}b&̅!P=fˬq4ck g$I.B,Vt}lBo;!Ģ2ۊz'PIV1d.PHIX^t_#ptܢ+B;nK`C\Eךc,13F B {JPd_ܟl-G䫥oU1ۓ|rvkF5#ítr1f5W[Ƒ9u<3)UX.ǡ"(X n9 轰%`S*YE/Qн$}c質$؎݃tC { %؅{p^Mg/ض=QK=f.1#];MGd~ߌ$&K 8 [}݉۶ud]d+^yn`+k߫|l'I\nRȬ9x6[/oVٚh/XZNg$懯^03nz?fj WFQFhN~Пx[dUdEj-779*r}~75l6wۢΝ~G!BlqäDJ#IvZD٨*c)qxb 5-.hm|j6?m㉌*> ܇q\Wc soةZnsų;s!Ax VFsG6}TYl ֡kVqU\ l'Pa1*EÅ3P'mpWƠ!U+D5mkyCJI>RTG][Yd=K|g~zvV|B fr͚|qŚ拧|ې";Y1Dʤ u,Wiy "֜߼$$EI[ ?/6oZ;3n6Y![MJc)4Q/crZ_OSb7 k"L漙4f\6Y4fլMs|4I>Dzr5a)2. "[(/3 YGɻ]_k+1*ݏ(gOtϙ\cP]^>{|XK@5.WjB$b /=yz2E Z}y$e:]Bբƭ"#bʈZ6v؞V8yȄi8URGHFjLth789UW@i(:?`t@-\죀2ާ,xP/HQ8a!:[N_IHt5 %bs@}H  Wt0$c>S-JH0L%S/;4%:{~qg-e)G=Q%8pqÒ!æֵu s"[Kt~~c=ldFð<șL24U@؏|O:t4~ݐ_7?I+EbBȣCi,3ê3+9G(p<܃tń#8edpm^pKkx9y- nyoYeF2S !PK>#9xY繀pOP|A<^zbx`+*%˄1 'cZ`\BCC 4qݡ(* C`[G$v m( -RIz *GgM<$Lֶ" ")P #*NGlIdǠ{t~(TZ[P!@(d.(122цNn`08AuRՀ(Pu~;7[n9g(mr6f$*줗RYێ|h>y7_o-oZ\,nͧi 8` q0&IDvyiV7mQ864LWFsNÇfY//7|jJ 6ъsX/B{> stream xZI7$U<)}M8*&q0FH%EU:h}K7ۅ\/-$]n?_-zi$iryzS2ȥW..^D+ *Ʀ?W-Zx Ѵ.ZYن`;:٪rnz=  ޶R- n1mKapeRygM)Dw:uZhn^c?io^_?vj[%wNk ZZk4x!(k>چVH(!3|tVr:4{ӲySb^ Br7_l4!4$Aw٘0Ǖ}e' qY ,f;$e6߿v]6R6^ "P.ſ^(=^Q%(`"F. YwSS մrhM ްǤ1  B߭ 5]faDH%yp0:Ewj!ZypG@z[@6$DV oUDdg Kؘ8bȼd/4 C!lMBY5Rce46\A\Xǵ |Ga,le][%RӮ|;*-*]<,5-ǬɨkC!PΧX[ܱr=I%^a(L_/4rY$ O]ʰex*'U/xF A $McƴRZgT@XKGf`FflZ3rѠ4<ӞDڪ;и`9$Hyj=+n$9ax\ C%lγ "w;d@QFH;l[ck)+DbhI*v%ZXY({z= u +30ҐD$*ij%WK6 I w'!;@bS`fvWu(bh9gq`B?Wi8ncYp-\)8`yf8ߧJwc&y;y.eջУ\B]k,;@PvVRIN$.- ԯߖظl$l&6H {Rvy11Ru=^3BE8?tILGA R:C.˵Fkjrʤpԟ6LC0>A7C^7$;3.e*ʿ cOP~_Pn# a gƬ-dwRIlFL1̜STΆZLiF7D:CtO8ȁ3S$rd`L@pߗ>MքMqR75$|FDHc!BnbuJe1Al.}yk EJ-G7[?IKK|6E#`<h"r9ZN(g5&z|0PEJUu&o5KXvA cVr#5˨& /Kg*b /RL xF N* .o6D F]~Z }u] i̡*DQ382S'gaH @$c)Fi6f-k7[u_I IV&dg-aOaDS?iBO6py+QȬJu %)5b@6oLU4D :=/)8O94|сGVY\+I>a B2vmSP_& ܖAF-y&4lbdcҏSqUnʇ8p[w^;TmjWx UxR-آASr1ڗmz_K_T9r TCeOa⠂R'{z9;Gx24đQfx[\aeGA|R OƴQóWci .`xXҎ!ljتhLJڿ7z869 z4/vBv dZKX ]+:$9× +SF8nj`Uv69 ȩ#c/rC^|UȬ\6Q,籉pGBǙ#ΞY.:^*.mn_j?r[75^ZޕS-755Z!3  #-ֿNPyM'4Jre5; fnqg(~S}GU![ں2_ëLSk3vjrypfܼ©c G :539G=lPtN}>9r UEb[$IU@q:@uTQҩ3z_c)XiŤ Ayr^ARN?8nWмyD ~;Zi)8>/@ "[P`2q4H#|j&ag3|bRW<06x)S,T4'CnHQS㼆S|ܖ~ܥϤfڠmϮP^PJR2 qw8TZ>C]]sKxCA1uls`Q~rPq3ÑEYL_d5~s=Yf<@/fe:ƓXùf䗱U^a[%c+i錊O}ij*nsf?ӥ~lϳG2|v}$iSэtgC+tӝ ㏳S!-)r11"K\'pH @>$rV4p Dt\'endstream endobj 347 0 obj << /Filter /FlateDecode /Length 5336 >> stream x\Ksɑ767Ğw#|gY[Z[X4{h(X!H_Yn$@C@uVV>̪,V,:"fw/oy /θ6(Y_40:6Vxo^j_vmg!47W o}sH"$kq>ֺ:\_Jv3UNto~ Res\Kl~F.]zD^!r\shߦp] c[])zk*4s~Kk|[lf>' ܘ&Iw\70^~vmk}"cAM ~o#[R%ZWl%ENF73N[5)[j[RIߣB` 3\Z'u`ƤIѡF׵Nnlr27n#Lw2ܦ7zٟi!hk]KC1p`ρEՊKgZ-E?aIkk~q7_m5owUCB ;1x!RMu4h!2eƐ>%U$<6܊( AޑT^5\^^IHz6.m!~vM\+]KnzP(f<2gL7# 얿Zu E+1D~KդiNp׼HGd2͗f1al5#6fsk~id:vwo)Xӳd[2 eS#c|z?["{-0ЬLߔBǨnJZS$[UIJP'jq X1{{f73Pt98S>{OhI0dq`lՋ0T)LXT|ҝj3VY*Z LL;S$SZpFu61xS8Cœ-k`>1'M YO6qcDY__{$WL K7KM$ⲵƞ##qGRBğ/n,F=0 N傽k׬I;5Ó=8CG%q20m ƜXvY`PK/7TxiܳgWBQ7G_ahqڴF5YDToB ,#gƍB {HC,x Z,|JD%c㒔Diݐ W_L/@b;JPqe +0 `X0!(H%M+>Vt30 en~8qo/[sUlbJs.f4戇)&ð@70@u4aDOh߂kd6 w,#p" +u+`"E)6>LM9~Bu@?QUl ramN*VD$7q;ԭufҾ=`]I@BD[o(RU8v|F{{KkK3i,GD7J,5eL Ft@;"3Ko0~D?uji݌79ejb.KΑyl_eBd, ɥ#8x]êK(\﷯afs7LD+?, ^%4JYJM*(@qe*\ІLϢqHaIy Xu[/|VgX{/|ُ:ƻDoI~xƝ`)18 iI@)u^T3`V] )!e݈ fTXۨS=~縧ԉ MkE$ymhI;;+6+g5b({+}w:`9{{]AV}r {Ow`|ꬨh1[^ 4 *SU#~~WłUE"%*C!JM+Le>#cIf\bݤi̸BWaow&5N#O&q@VB0q㳜 "˟_Gǃ-|'X)oǒS"x 0`g! i EvjZGةyt,`4!n\chW]b?ؙjTyX1Uô *V*W`hZ̞k>^;7I`_;D[ AXO 2-_\4ȐCAr)WYLo~>`D1F/6r?j< ҀeaQiF 4] wXVᛃm+n^:߂Fgb's 6=#Լݳ֩?ݥh˟9doa##%HlܾҖjc\}K? ٗ;1k!2$)NOpc\fNr5.Q\i܇gkTCzc4*>}0M͊qns2'!j]KjkR &4LGyzX"^ݢT6t 53I*KA2_3lzcFHH<}J/$4YjRppOMRJב OwiviEN5:>I`e\ܹ4"fƎ-|:3+W(/׵_|ԸE 4c[U&:e#,MCK>auN.fĦ'p 0{ /aj3N.(N6_ ԣ"wҨXo'mʆNXBJ[IJ֪`v0i$)!,tW"!u}“ W߽T8sz< `i8Uh|\:EΊ[g Tn? IoHg@bT$31H"A&uc@] LN+Bmar5od:2hAj0ڷCG)22QX;͈0 ~tVaX2ts8`: ^@(/f{kmghI~eyqH?s5ШȆ?WBJON@̢;0{Di| _f4R >fDU)HoX6$49rƧxl9\ymͥť&rt 4"5udN<,1s #!5>hGC}V\lg0V'evN1 JSKY۩l'N^M2;{j8ϞW h 2?¹H܃3)9m6fqaq &(FW\03c<?񊺯;UI!l Ya/%;Y`;aQ"pAб"෧.ƜS ֥Mgs^"UlT4&6+}A̚ 0Q}N?#QX 3;sC>4;,䵥9ƀ q' N0gvC|8b :E Aoإ=ϷsZVq)xOKH-.&;00jsVp<=]]ϸq!SgE->ɛfԒn:R)5ƃz'dwL{5M7j)`2w}u p_@q?^0X-dPX_hHa 0hx r$=duHePSu"@ڥVӞ.rBRqHE"N"=~ Cg$v8plG5DdeENPN'Zkآ5ChgY gY 3=v4iA]:TuYLO\NEZ \k#.i^k5/^܃[d*wR2ىhҬF/ zs{@o"!b$ֺ~a֡ D*H K-",J3UK-a `Z/g/%u7Üf73Ezw-OzqݔQ|qT_FqF3 @݆YVhPVWk &?3~yg|*.9b}QuYf Bxz/P(،`?G]owx_u=D$  S٩ŕlq_Xt!LT5y2Dr(N6(TA O"5 <-TlEM2y4}Z~p0n>| i\z~eעuCSC| \|S&UndjV4"!S.Isf+ Ɣ=R{K7 5Х0 jK?[6e4_;lMOK{?#%ߤ3vtgu{6/Dhc_ŗZ>zߦ5㷵2=W]6x[TܐuZtйcFgPOn; @$/Kո!b$Dendstream endobj 348 0 obj << /Filter /FlateDecode /Length 5116 >> stream x\Kqvo>h/BQhC,JD#@pl&5̪* y*x`7YY21/V\v/p]_|2pU x΋ať3\^.~s?0:6V _^m>l37}3]ʵҸ>-fnN5WNX/|3øR'F۟v9NæCJwYt)D덑x0큝[ep$ٚv5d󆖵Ny8Vx n_oí R6w1.KvOvZviW3s4p ;p/0٬aRنzR}SHX(nӜ? |۸*t{{d y bң4Ҫ66Pa  AK4Mܝĉ&8ќCf򤴟'O[|{Ty32 ,0kwbx)eD@;v?wn3 <A2QN_й,+&F <ܘp)(M7?"P'O$K+4& Y4nf@_?e.\ [9 kqjNä_>q )I6Pf,?WJh0|OZa< ЌMO'qͫCdhTocrWՅ'edA3'bx]0!"QJ.7im'AoM_ sFrqcB!rg=i!?&29ZXH/?]Q!5?En"t`*2I{2Xq$ p&wg DaM`k~=ȹyHDD:_6}r\6ߟvrE6޶kV[dCI U'B6Qҝ&M685 X>|K0?@₀۸ؤ- G_S!cf_ԾR0RT?2/M6"[H ILHM(E"wU '#;%zM+}XN ilB6b|Ӽ3;1H~B (qI rT)Kjі6iZ44ʺpH KLFc$R?\T^YXnOSpaRpL{V̵ƴ JQj~:8>$~x`P׵}nbK"]48<EQR"7,DXNٰd@g1R8C v9pw,dlٞ9*)mˀDsv?#pRA ]爢cXj$+EН>Z $ 0BaeKq xj1J_Oi rݡ-zn߽'eV*FGLmFzyy6"$CBO!$?r]`[m€Llm|qQ@S>g =ؼ2P# v!/=.|c8Щ#) }'VZ);°wE^PʀP5֚c~q i)ƴvLE @ )p5aq'nR%bPa~-ps >IGr[].lA~*._KJg %i8dD/SRV {(pN5 Xî18zpE_)Y2SBzYbQFwIq:ŀЮGRtT>7X/eyS e8 Way':'ā K)ar7~Es]/i9p`1hO>F QIGχ2z0T%q\&@YA)Z0jݷez3&ƴ*<TEYʶbRFsU&!npN^7 r*YK^h= %%ϸ`>5K%ӽȫKw(kw׋Q^wGX\Lo^5ܲj0CacTgB /KG(,6.m]╩%^UT/~9h\ R̝pMDU1/O U/X3g֪rJ@6Q@|yKFY7Oc2 16tf4>4\v,~N\ E%As ¤t^^HQw^q| 1*@ Ox3Sa..cאfch䃼[4rTvוع4w38i s|Ɨ ?< fq+Zv5ɛUadl[\Rb;;H?4Vi++GR7>#:N(|U~7WK\(Vb ښۻ}lQF{uӶR_mk=SI (AH#Qڍ.2KZwseIJ}[S?Zr{:n|5AP[wZxb~%]"7i|YRi#B @R@ t%cr! ⥒U ++ U>h V}zֱf@=E41aG}VEYI?tMݱ@hTE4@UzI4lprOendstream endobj 349 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8434 >> stream xzw\T2 j`ޢWw.R( ea{"k-cKhPS,7}}4&&gΜzg2@ e|}Ǎm_p:p]%٩~ZpkzѪP X0?zSa%%[T/ՏQ4PmTGʚDP])_GuQ=d)3*z%<ly"7kږ~ƬdJ;4ӵλr讧 vez$,'e/& lUlH:i 7i䎿oR! Pb?T2@#I݅sIU`E~c*blm~F H"FӰxt@k\@xHL `@W,C1t>ʕJc۸׮;6-'`'&qh #kC^cD1`:k GoI0!/~M# =M(ﺐ+k$pgBhQ.>"dqϧCiJ /|'B"73gE3QO>8q _%_M!AkF hAgXLI( g6j/GL2*M{`ǡ h(_eAB[ϿO|.-ꁥؚ*J9"YԁgCaJ$fZd[Jr - #G,SQ+%>WmMsN\ gC8o-& e?(!X2&Vrp]dk-+UT(>d7{st9;XU:)(a)35BVmAbRO>#8L*E,l B &ʻ~˖ c 5&X_}"/H0x23' wes&reh]| Jn@8#Zfu}F_3i7E8EF4Ј21Pc=\Ʉb׸R]{eMJ/ў6 q:6Qŕҵ[lQ䩲73Z+o$) )ڄl w'e Kt[^*DPE2[~$IJs3I(a SֺpV<@ ,#t"kwY_Lh%fD%+2}RwL Q(!1ͽ$+c K,H]mJrΏVK󗬲Ag#JZp̘]CD~=:\$Ņ5KNӫʃ,DC#{m_i 6-)YVQPTPY#+9Y3[ue>-X>i५.ds\ʝ- t\"֌qKͱдBa%B|-B6ii'2*/'f) 11q*>!E7Kq.-)QmˆԴ6`$`4bM,^JHw{Xې{XÐe'h4dbĴ>dQ 33[iq6iJ 4<,>s,nMvWL 2QS߫Ctٹ}@_Y}/JuՌXE4ҕCzwVЈ{#MHK v59tO6eo t#{cv`QTG< BC!h2OV 6ԭEg.暓Tq%<^|p$Dmi7rg"0Xpf]XN9 b(Oz?Q01Ą!X-BWf$&+?P'* Y`)Xo%cRya!Q>.59SʢyܬK5y I}"&ΒPNT$cDf~4 Mx2ڣFR@PVQVڬb #SdEn|ST(|%|-|ᙢu|8k=/>ܙXDbgB*6 mWGțAp\jhQVKKDꖮRM ћB]3@ |`z(#[ӱSI\LeA8oxASa@HZ!"S&oJyڈ>&Œ؍+}ϴ8@coڷ֩́Xφ!{ Ť !Qoq qK]7AuN{`qo)]F̝dUo*iROGK/kEW\^lZޕ\(RKT5`Z E:Ij}*M֨SɁBw./ٷ)FsLmjvibqquwp uP);Ao_F~A$,-FV#JA2*k&ny)lqCO$ $M qwcX-A#ʑG>ߠ\kzǵOiz2tԹW?yM3[|DF17GB|. @&8/2}-C5/%i)ŌSH WnĢ4ꂨ^2'E%mQMv)Avg (.@ RL˹pn$?jk3+jgg]H1 c*<;f \WnoD_WX+9A+)U_5 [uu%=-S˕ rfvfGex$ DxQR5ėYi%>2ԡaC?yho{N$U{m17b4Fx0jM&CrH_ԣm[ d [@fbլbgx lFݕ_Pi p,4}?OyDyKlްk#>bw*d8 Y4cd m%)`>|[Ix7"+Ǹ$o2^ ilJ0mt5PGm%~l / ؽ ,#^!8}he!D+$b)EQ]uJuY]Έ՛>uOMh3SY]RID:}:ڨ|7\)1O{^i#Fm1o<}UQGrm$o< rhnR.׼:m]/|=[ةd%T+l 1ri/o宓 w~+wh&ɀXx4LG"Q>+tǠt54uDHRAK:e2O[6 [|XxCxB]@\PJK[C؞*rLj0f Ț Юɪlmrr ⾖ԕW^;T&!}Jl`.NE9)ĀiɴJ˖ˢ hUL|29KԉEkz'_3ePxyRJp$A?`BuRK.Wp.ݥ۝^~ˑK\y#!lFaNĒ-p U/F =UE9JxT" Lh;v!K~);+56d*fx8w;`p_rO8^ ŏP'IzIƯĺ8;lf82xqͫv:h*A!>[rIHy>|"mi-/1Epq]53Wowd 啴lsvry^ 27(׷4ŷtxv(r/H;[ÛIiMdjR&>.b6gGXz?[xM '[J2c1 vEs`>,7>#?4ЬySpG_<ӌrp c %7$iVKz$Gڤ4ӵ(Rҵ:Hjj%C|8c[=aL6lq6>]|m,!>lQ1DM{/%~1K$'[LC*9Li[2$1U#:%f->uڗE4[nsܚjeϿ>n{. RK;=}ݩZ\ӬaZ;}9,>1(,jC`K[$K35驯Ԟ:!oٿЯvm;'x?'wXj)Kze-qNZ]uv٤7c&zm/lw 8^BZ|YxUFAQ(uq x%wB5]?qK2IIҥ[S(:xk2'!7Dr ~Dq"YɳDUpL$|+G"lH/`kh@OoԔ1Eŵ}ukΟwo#oOoQut%RPAbaٰ3x`rplՆ?E2%5$>G37b_?6Mzd!-b#Y%!#s}w. ?`E>f  X,VCZ[h<}(5EK~yClFr9Mdd~+lR(jP ,D$DćAzNP=JPkZ+3 n|;7[헇>g[ӡ*bc1i3KUܘx,mvE*cbT,s ̜ {='9d g(Y/@:@*laZKj=دL`}]r?m$f#揣+%0~2Ǫ LmȜ~zъ3A'z iWr٪V9V˺`+ q.ZhܑF-}%[c B" ܛ1aҚr[|3S"cNfWotk̥j ]t)Nԥ+EeȢvendstream endobj 350 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7522 >> stream xzxT Ap$xNDA("M=4Cz!m'e23>kzK/ i$A(b(׫ի'ۓz Or2眽zkpѣ3MےBY0o}Zh|l,a3}75x}`<Ə~m$ NP?IpEe!4<.)35./41oM6'e⋱~s"cBvD۹=`vwn?.ן X*qupSgl ( {%+|S1b+!h3 -^t󳞚7 bH@d꣏nFO){{Ʌ9is=P>YoG#mr(/Garwdk@ÃX|-v('<4Ac~g/>(֗wV8tcm<}Q/VѡWQZɻmy. #E\Ƃ\y}3Y3 z[gS>3.0q@FD"n)@??w#ssГq==+z&t(@O*֒= `86K(8Q)Rr ]Nڸ%Nk3Q対}[> MӨ/5QbwSè |XaOD&R{rI4 5:A>¬aMЬ`@,TBY우84AODv_ȭ6h}hDZ-.TȎK_$.fn·\o/PzKSQj ^W塭h;Ev>lId޻mMr gb-V=7>fzP7DLgݟ7%) EZ>b6p(&aWPgB+OA+H`q e&5$ġw $M?sHg;4 45Bm&֩t!%N0Me kh^ P1|:f c0T^/P /Z7@_~P .#f&Olp|9zOz%=Q~I]f qT*2.-s~>7y$}E';>M\CxVh~}>] 9xZN=W\fʛތrN_ fg,'W˩&1!h\ >[QfOD۠a],IũBRKvuimAWoذA/^+nY vq3ӷPЍPWy -d;}+CZ%pL\g+ T־ۀBM :k=+uj9 g6RdX"E>yw!ϟЩ9 9~_]6\i.3p:]o d2 4Xp83A(RUhĄ@w:w˚˴\dAwvr"es}=өIN tr@*"HdIZt'ti:b'0!#QZp41-wbZJZZUaQP8,IY;f@gu(sc83 .$A/&0tſ2b7\("]i@, 8^JrPuil:cYr0Wr[yAg+ xq w~ELjAӻ_F&3 OZpXۖPe׊MrLc-;Bל&(t K|:vgFt>SzUf|+h^U/Y6CUmVdh H#^2DRL T ܜEbxEd:}v3 V=6A Y̢%Apok3?nbBorfSdrjռl w {QbKmq%`8s]B6|Y9zr\CT mXWyΦ׀4=xw;3ui1@푒`@.e[4f#q2bB-P%6S 0z޹ ٠(A!'3GCmq [J 06{2-T1=V6/- ś']o^g4k*]Y#Ǒ"Z%ywr1Lng!ߦ6@x UljOhmX 5ї dJ%:=6_sb=rFFuQyG93(n9W-zs9c\8mlГ+Ѕc Uvk}oJF8}l_4 BKQ%R?Dzgi 40[h߰P6;~h:ڏP:RRaQ4+ص`]e% !h}έ?k4/Q!#ryW8)7y,=S]aUaO* vzo31RՃc%da <6/(d 2JEPA9))1]m?tV+ec^cVb[d+lP-IPS!L/T`岍.f!0{8S1!K I')q>p4;>o]teFR |8byBgwa~yrV8"-߽n9[ؼx]F49݃}ֈ}S~ b z6VY)*Z*-`ORBjZv ݏ%,u--6k%%&Q5N(;Syճ=mRmr ^s(cjh̹7_{ȯ<ώٜTI$it6uW :n}6.&&!z#s?iuRQ=iЪUo(khzu0~2I:d!NSz[Sbb嘃~y_J|;r"_obS:W{F6)&J;rem@^+)ʡdצNy % Cm7"x-&j^,7=ݪTiBD',#TrDTs=i11\WB\\|\zIFCS䦻= 9hetb C3)-1>U/2U&$?~w׮J)Nی ( EN'ͧGF|"_c\ER}O4YN3+f/\*sщ-37$AlV`4-VaM¶rO&#.&F{w@1g!Nܧ7JHЗе}.v⮅P5otϩ1g mux PA6d9ZV^rL̎ >.n8a? #Cusgw^oDk&00y Ԡ-2k  zd:h2H z=Z_@6@PhqgNlΏɗ-^bxQ'p>4LJ=Nҹ7h ɨ2AE k4i"Ԧěp+BkC`,3RxaZp;:?yݢWj*z3m=债WUEugYnPŀ|p՗f5 ɉ0!5Z8:Ц[Fu[2ҿ/uIڋ΁#GY.:4'TJ Mi)ReB`WkB{>IY0guJSH1w 8RttZj4S` ff*}OE&*\T,, ^K!?@4`e/[o8ی6pK3gGzwЁtnlJ~%<aWCAeC {awPͧ7M^x>pJ8%iɸwŎ5koKuJl5$| F3RMzچK.7("4E{ojU 3#&e֫`CUXJRW^k.ngԸl+F Uz(Kv2t '|uQCocd8b5xR~v>";T!|? aa!,]ӉH]O߿gwq!|mF]?j5a5rI%A>OnT1X v[HwKܦ4):]zqf"QVS6 T|G & s]=F袻W}w8˫V*pxxz-.S:2,b:H[)a)BOt:O1+N`,c7dZBz>8Ʒ)U @R)$dbԣ%2sh '\>9[`ahjhP隑(0E՗6Bgdf;EwB1ۺ&}Φ^sa˖2ioC-x vܬcz[˥nw֨&΍KJT}p7mGsdvƛ2@dゐ٠hʟH'ž {`O.ua%ҒGobw4~X/?~yEdeL/| SC՗՗zϸ8SD)o]9Iq&e65RU9IԔ-qi%&ۅT\?}t WwƳni5RO 1FpXHNH]:*ٗK؛.@*ۖ-RPSNbmU_~d˷"[뿨ꤌwb'{zۅIA64Bw~5 ϕ:ߣ4v2{mPTF"i"5-Cw;k~02*!NF9 ,)u+398\.Jƅ$U*+J&3➍%1j5F M-8ouG%PiΧM>bqrlvZPi*eÕnp4q#sn&e=|g]&WjrcvXl1[CF﹆Fso'oYkyBBCDTF":}dżChVb-jjbhДJ5?4r5Ulg=ZLeY=lz# RTqP d+xdf/nΡk>H[~/^Q*Pɢ7L4Uˡf>aZz+g:]TⓎW ϑ/Joe7j$+M%Aʫk[ .߉R %.Fq`X?e-wd?o5L ?endstream endobj 351 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5403 >> stream xXXT׶>0sh8%3boM,XADQ`*M@ŀtI)c"jb5r1\RH1&_>i)^-c02lp߈iN^/]+:g#@rNQ`9 :GS2݇1r; ba萠g]]OL%hL,,1EJd  Dj׬ZKdʵ^qV}cf "7,ܾ8-feq+z ^&4,{ڳg7d$?Cw`XڰO?u6W~.2|VUn]ݯg: GͯU,j7ɺ,XRmZn`HOM Y=?;CdftfU2P7YXȢ0r:+bVdhw1]E(q *lt:1K(( .;N,"qݫв䁿eM0=!,VHݟU!-fp4yw,zYȐt;|Ns3oB.g^VMf' @j\rarG2q]`a6m~Ik6q0`X*t=FfE}d3?m2P5 GTpfr "7pOr'vѕfF@8DBal-Z}jYCZ?gQF|NESBAv9kzOLhH5$|P?Hg,1:Ag1Mޝ񪲴ɠ7 )dQyO0RpKy@+q[ -! d)S#jȎ/:ep${2?Ocm@7d7dGEl'O0F”s}\V,wMO߀f [{"-pϟ5lGF/\B:&`?sӕA,XjEtm8Mf?^*_KG\By6lUO(xb)*V %Dŵ)#0YZ[UvY/ "JxGy8OdUއ}^ qC9!WȬ?s]\=܁Ǚp:SwBgY[k.kR.LFNYRs9)M^ q[-j-5X8j C(:eg:43>݀#4Z⮫rqU4^em8Ka_ovYxDJxZ#b!.ms4*~ą4J)'-X kgqY衁JG^gJfi>$&'|2"ͼ]wJFRR#E+7`e=;o+sgԞx7^57EȚg (B.AKDV=]J6@=@Vu0:qE&+VIo39hٺTe&YMB#b ^bqQmA)K!Tpg/y%9?J2BAl"#{0@ yR~()oYk!c_R=G 1\69j*1Jl5{%?~ɔW!VвMhjj6ʾm7,Y2ȡcOyL5D"yTTR EۓZEWI{RSg}+S=axݾ<S^*PNZe= Acȵo3vX$U*M_1K2BxxPϰ/Vnc**J;?N)KauL`PXgTqRr96'_hXdU(/Đ|p33ӌ /F ϳ|Y'"=Q9m1d@sMY ™c*bt!Ou }s_A̤j,kg Z` b=$2҅K2 8,Gt0 Fuffn|쬭l.asoXֈ\đO8_lIHёԞHZ 刎ڋ k4Fe$ *Mڃ{v; &;"ǘUx>ڞs r aC, :C8BP(4.K!ȕ\7YY򻤓llŖz7c xtڛl׽>1ȉ ߥExbn5‰W=d\N?ciKvC.cfb|·4ڈNxNT 6Vy_%G['ēSQER֬cMUY(; ((7H+&{Xs1\jJD1$Đd, "A!θAq>qYܢChRύM{;^_HM[GW{Xbx3c2T<\#j j[JFI1zJ #Rshv')PUs I^V҂*iiMd2>%I}RTK,է?2I DVT3 [M)mՏpܻ<Ljm&WK -S_މk^ZVLS[XǤyX略sŪbfLhYڼ*Y%ED]IV~{DsR&̔NDNw С n3>NknԴU"HO.\%jT*oJ-W cZ1. w?pͿmoI2,xԌͽs:TN7& aHXLHI3:59{CQh)J7?(bCSRUTC툗ï=ۃ5V9P5ig2qgf~wm ]l=_Mvq"˿A^pwme 'UkHc_6#a-Mc7Y8%z~\UǗDVKF4:ʼvŮ7J![Ɗ~ˍ;5Ә"h_plQzN{AlJ?>sZ݋j:Q7n>ߝUP(쯭}sDr}# M$_P~}DDEċ8}2VGm.L*1܄L3 Ȁj_;2 H{H#[$^ @';#[ahCqaQ5D o.ܛAYN(D- N~ʼnk}--._UC??o/dӼ>97jޕзhۇZ$(\1Jn5K: 03oʖ?{}$?Kl\K钒0=t>\ʉbS*t*vp6XuL\9Qb$VHQ{V%bib @Lq{l+aȈ_[TBaumLݙ-YhjrKq {|sw^}9y Wnʽk |r B.Ѿfx7@ "נ#~,3)FktDO.MJL]佩sa7ST#Zk> D1aŤuՍ͕*r:r*<1u jNSHp)bL:~ }ok XKTz7djuos^"jb5sΆcտVq:<*w36B $c1Ð ̅%Z(Lݲ#>jKAI#mZ#>uoDkq(_OK(u(L@ee2[k̉N0$tTh(QS=K.Q"DOB>~f7*F"s #lRͬɈ>{Cgy+f:(qL 6V=I{}PD,vi5m,K5e2Kjr`)0_(>|S&BEx#Dkǫ|U-a㌏g )]q䑘ϡa)y._՛W֪}OڊBǿN>Fb6&|'I[G[]~|JBo]dHlErXiF jM0xP}vVvv)+?m9 M90kendstream endobj 352 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2932 >> stream x}VyPwa9"pGb2FEnDQD )}0or(0hLcRjIV+11#bUkQMvRSt}=ekCd2ץ+u#b6ϞYf}2Mlħ@V?>{9۞qƞ 럢2ٚ\_]JnZ|l\zάYt]UTFnLe'ƫ#Ձ3g鲥guꨘȤMj&PꀕBBkM&.^MO[18f6k(ʋZHPS4*N¨ՔKQ3%,*MNySsy2ʑ򔈡l lM=y6ضUa6t.})gXa8cws_n_m~4Q gˆ]2(Wh\(T7r߽,ߠOgWd@>Z_&+UU}(Wb6 e0Y"`,noׅ;98z`*Li42ypj^+ܫQ1&؝ %'ŧ֗s!ʓk׮1|H|ɾ(t->=&basza;N& /Io"rft"QwD|qJ .Bo]C5O\ꕠ5ћ2baĴU =~Awr]b]zV@ F/Ű-%ƼmEg3Kwǝ-p|t o[ɴ-5ce =P[TQQR̯X޲uEBH:>Op=DURϛ(ZfQ&$Z!kō{J܃J>+b!rk& bV:*ّ7ht6%J ,=ϫ+QY."Υs}=ߵ'kU ^pjC/vBǕa+tq͞A;{hvUJZ{S14M w[VT1ҩFߢus ,"NmtL-7ܦ%YA~P*^w$ 7!'/9+V /H?o~m rfX5tiPp@}7s:cxZ5ö[4wI*2C*UjƦ]K.5]p6skSBwlMT>N/ܨʪ-vW(.(jcNnjvUWboE! ِ7Y(/a0*"cru&({FYYUjgDlHk[R < c¶%o {xB8`1zZ9p4κOc,5o!hO;ߑ9i7.A|U`KO%R @Ӱ07nYRCI{ ۘ:N'1*ۓFt,kq8iW -F $SqѓhWF3U=e=q /v`=[,X}?J4^ DZ9/039*mнҊiTX*0Dѥ+*"ʐ SLZF;îB"x[>Ot!&[s$72*O]`jF[kI^&o .f\qZd)Qe" O籏QG-ݒ|<8]rRru1 $LlIc>ַp(11BaU'$9[D'[; qrV(/7l/K3K&Ԗۗg8Oin⋊ @[>Z q7ɳC/Kԉ @RF<'dY}؈ܸ]xsP L5Hsj*2S' `H׳\>3 J<%yM $f,Ȫ,1mx`5Wj{g T5U7b8SU [ٸiݸ,%w(QҜeye[e+6Kl'UWcɗ\?KWL>^e5?vR@סMͷ쉑+d0'ߏ0`MXfAK/UNϺ-=Ø$;IGIsnlJ_X5> stream xe Pg{nF{@<@<$ @ *(^BPkp7r*78#'j*k4k5.Ig۠IVWuU^K(#J"L7j_PBUQ{CF)a0o_HLM.Mb< ךSQ),vr˕UJ]QʠgJDq1\  ڻ[[1trwݦ~sWԟOE)VF p%,]5@QM*j5P%%(5l([ʎBML8xIơC&kMNil[3~qkOc[h xtNҤ4A8Qp(;'bNdḂN 5< +8X/8Hd/15 9 $CJ??S.W("RIhe-g(achtߛǞqϖk%ʽyh,@K@Z*#7{ )rL7{N6tP`-πZBq0\![Q~U ,ͬ厂`eq,JiY^GG7 ڱWӷKH:ܖVŊ_=J" ”6ˎx zW]t+C}/͠o3sͭʅ4`bA Y5%pkWG\:mEg@Q.|H-~LxZ>"Xj n4"BOEkgost0{D_~ 22Ě Ն}<;Nf!(sMtHg}tP FO$s|١ֶ3'56]0wA\8FCJb<|VvU@Gv [.}/R\:A2(kQszNfb?'h!ܛ>Z[]s>|vӓ)8 6ƄDȈwyBY?2 >j۩¤.Bha-&q4L1ۓC8'B_Tr)y Yv3L x$4aKO"S"3w;0HЪ0X5I{ /7E&lck<2hnUm=6x,I,˹.yUG;ɖwԷhZƼs8F4^ХjUw bzN\{p2-?ǵcgk΀ ^_=oWxS9(},ѐp$X `xp%pN6?Xc4?0gnP+g&0نIAZ bDDMZ5QGRP9x%K*.9ӜkHʺA7_xr NCwRm@nxAHA V`2Oe1yw&$A̛*©ނWpV^Afl+h[^ADZ^-T~JIZvpe|煞vF=Cz߈=Mm] .M-d[: {yz J?yhLsQzɵvisB.NnB#~a]a51O~wMtDV`^t-Lm^&yFr԰WEfl.5@HNH=* NWAs*DHHlLy_4GXʒi(cf-w_ Ef![9lpJ\cĿACt1LH ԇ=,65ra})/+Q6o1tNh/( u%%45T+V`: nendstream endobj 354 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 321 >> stream xcd`ab`dd N+64 JM/I, f!Cß^<<,$ݟ3#cxzs~AeQfzF.THTpSJL//THKQS/ f*h)$f$))F(+k; g``` b`0f`bdd ?SUe/>0oaWw[V}GoG~zw#/ KUؾs p+]#`-ýyVT#3z@| 饁endstream endobj 355 0 obj << /Filter /FlateDecode /Length 4076 >> stream x[MsS<9|QeK"YrTe- wʴf%i*)8;o~rXW^}xϻG7=?|KXSlx^Bq^c#Ŗ`THkhW(WryΞ:^',FZsQZ6wq|,j2 mp; χ>+q;0uQjP#31.H7 , 36a1@ ͨ9h;)Gƌ La8" m ٸ[gh٫Vr[q©>Qj+LHM8  K^q"asqA;nh@֟hv 1 jvĭ†::,r*4] $ dy(>~'ŢF@i3*LP%T 666l|tX ~t;޼7Ui*v `=S \oaAKPB&:wD ʽ $ykxFVh[G rr60y=? O"9KNw @xSETp@.:u!NjUt6OUd/ xf 3(eÀ5v@(\{7..J!L.Gȃ6a {%_7RűY wn_pXd=fD{Tkj,k#@[(_+W~'3#<]Bցq]B|:{ DE.>&'S ^//3ʥׅKٛA_U7|6Lf?eH2+{CgSRoϲRa#_'|a2O%~?OOqtiJrt8b(q8AH#l*~4m|Z%4QA'M6,34V%B p/ȇf.-&Jl3L*5Զ\ 38Y|'cPM"')ve‡* mi<# '$LQLO#C d \BjLL{dwIhS4%[lT1#p+,RQHt@w3\/ 8(@s"Mi#q ._3^|GM'6pOцVkGɹŷ͎Eoж)oJӁy;~nKJCiT%-V\mެ˰l8W[AnTX訒` kE_L<:j©2a!K UXj76Ͼj]f"!l* }Qp7T,'BCQkTԠ6欤 Pga#6.@շ:}Be'`[TJ3) 9bpЏuOvc* !h:smW,@ClI7d$e?tX "ȷ:Kdq90Ɉ-4&bd^nT"f:Q׶,?Naz/yMiw#鐍m> G)wz _!Ɲ7UBl Qw5nRf.<+(=>k4}u h[֔NfNF9z0`~)h}:_|\;ԕMh{Nc$Yd7wIV'rN>tYr(W1V03Ɲ>ݭ2i!?KM `F8/!}6n8Q(51}L"Eiv(uMd?H;=. R(jS Xr(VWlGQ&] 6tw*Q| BEt8ÄLR{+办)]wrO{]&Ӌ Q PNl(lzx>b_0«Aw 4&P΃cAS< dixUgH$pMK&vB7 <۵ ml8xUjBP{E{}C4*C1%v˹W^/}+:q_ 6'v|Ls׿9.+[9NJt>+:Q7쬻Gto%-> stream xmSkLSwעl^QL(0T @4~p\2!.Y"f;%>m_^爈$D==QQ~B>+ci<ƭq)1bq/ \uԯĊxT bP1[[glPWV526fd{Lٞ*)u5jDsٕ7٧ E50U%)(?x0=0/WR_"6K+#d!$VPH$<{_j"Nq,GE\j&{O^=܊4^<CF~Ee9Ԫn=^X Puq1E|WB_=_qW[,GeqBDN"pI$qtWIo Ƌ(UuSO4bQ̓ǓʢB=#n&1WJ'<OV45 Y8 g6]Olab )StW?]߾`1pG*aW,nm!2ZXԜ\2_4u5,#]@ݶF#/R: ]!A F<^;AxvA=fq'`:R@$~`]̅¦* $C;%S =m[TWv s~ uM-*2 8:Șp"3揞l>p-H)74iZ/zc &| )r",pXkNBtvL@%hOw\0]^]q Akq(/ݜ3 |/p)` \C+E?5|4n\(!ءvKz߾2%ĕ7eR|Ҋ; I_7c"`2?we1Yz%1m=օ^  $t^*suASj੖;oPEB4ts)#?9Xpdmm[? a2EY\G?@Eb7J X/fN8#,8{> stream xVyTSW~FJ'ZպuZqE+jH ($@6K^BC"TqCVUԩƅ.m}Lfdi?朜{>Cx-[dpI4{yaI) I{,ca \蛣q|sOM"QT;M!|y]keyl ϛk7':&oux,gɤℤxIX,HG o #|AC2Cي٤䔴I Al'vsDIDshb @l$Bb)Fp1?'Ix},|D~.>'g"{BΉY@%A=\h4'E6m'7 |F]]z(+: Ҡg@o0EA"6nfC&`^:Rdr Zn~aUc | (ہr0;PxGC+ ;a DZwu{F\d˹JcT V q ʏG>C_SdIP^f4X3Jp 8MUp{sx@Mڅ(ryPR)͈# qzu0Xek~s{(>3WSDy~ζ jvґs"MEv|SCc ӸU9jnӂj_ &ޠPjtFJkx GNxР')Y2q#;Ea H8(J͂TK4):*A7*L5LU~d , DP=_.(\>בNy WsO{kd"T&';CO~xҗʏWfv$'G:6WU\vw uPe8e6(N*ZI5)ڲZ5 u+זF' 2ȼm2 j} jXhlrpe]qp?{cV#Tn2zЖ@5{ȝ )B=u$B t(~f;<"w{Fm ۂ\{73e7 UѺUG2H{Wփf?Ef?)`Ɏk^C$EGct E{h.T[[ Ehl' bMbHf{_CY&D/uqꎘ[=>\Bu,IC.o n_oi*=v_ W K!ATꨃ}+2 _`yZ)mEEQ`2ZhKk4 3L8^)+/kU.ӊQhc{Gv~Th* ky5zD^jbT#Q鮪n#T@*Վq{K‹wfil҉l>ioz߾ڽVSmfn6p5XZLй}́E&y~~c"-m_ȯ٣'Hju!D TWDr)܂b@jwi?A+ޱ={݁?A!$~DF4Qh(=^U&C|-mCF΂2cPXWeijr۠,GH }9ܔg!Г0M}g þcnKMZ[4jB(ݐz$Wq1l!Q`\u>OǛ:{ry("_I"Z㥚}߃ V]87h_ 7 +7E>wM,J:`=̋ʛ*7ND1@@iH1 #(&G/x5Ο<45P.Vhswj&vxSm&eS`͘C1Cn"! |S,!S8ٓ@RF<Vl(ZER3T$WNAtMVbKjr81N'fZ( M ~ꖵ[{f_IemrhjĜt榆.Y _ת޷*z-mԩUeJa*ecH.#L\y2SןG,oM$e[ZNwEe[Q e'ND=gg, =_3`¥6 /=jd1r#h [}Sos30н =t X$2UeA[5%@.ɵά#6{ tރ^\mOhW6p=5*.V^z^,!5'u:Pf(QƆiYؿ );[,K&]ne W,=lW-=ƊR)7NM "wŔZ嫯RmAQ}(r0FGl%{$|-uu9X1̥+SPi@'b`AbR`0Ֆ.J63 Lu4S"qFy^BS^T>g=O>t>&|C"$ߗ r$ݵW>v/+)п'5=A5T-Ri!T+m8ԎANFr.I!(LX?e4&nb.kZ0w1 c'Ŀ+Eendstream endobj 358 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5449 >> stream xXXڞ̎Ƒwxk$* 4bAeeeEz/Ҕ`$VlD5ĘD&&5?YorsgvΞ~~7JOH$뜜d7s !NtĿfXwG}dN6D7<ҕHd+e{b~ _8oy;kW,:<(+t,|`9Kjk)tlu7ζm^S6Nrj}xHhuv;nw p t > -^ҷW̚m<7(j:;zzAR3)7j#5D͡ܩ͔ZIͣR(+j5eOޤRBʑZDG-QK('ʙZJS&KM8j eJQ5q 5@KM&Q+ޔHq9:su+UK_+mM7JI3̅qSƝ201QN}u7b/ J .?1ݨ^AZZBb3S3,(lQ1#= 0wJxC kuT431A ~yeGlp>iϩy)TCm1Ttב.Ā矀]z )krOn^[kas{Aij GQѼ=H0z4r/ޛǦIA(5:3Cu[Q1m 4sK"+S }kEC>飥ߋ]g:tL(5JJ}0 `]5S^cD zb3L# ntc[<0+LNu@F%ȭ- ^j's蟘k*ZSnZ}ȴvuMeHX9$0³9g'!Z*&7pvײ;r>6, 1p sHQZSi#2ːB>vc$q\C9e`e+S6A,:p_{:)h9:C#J鹪zxv{P^znA^ k 9 94!)Q-PC Li]&J4PJa]آ+fj ƎFc %pKM qcPu ŏQ2CMHGe}j#l׸pc)NiBuBϥZ'~]R#wa>+ɥ~@Ea dKx0 02? Jk-* <Ƚ+L+ET6^A ѻ49x"F1b5%x,~uk.!'G{+*P)b8J-h.&g7=4,W#6jw,VyN?^+|!L&a+vg:jGƓa10K0]4#jDJSNHEiU[YP-eh`)w_|Ӗ1IBRЁ;Iݒ}['=[Gx)#-vXΪ_rrCT.ְ!˥:1;@BdFNjߴ(ˣaPvun+2(/8$-ұK_\Jv{i1|捩2dlNi/x 6;O<ى-*:ŏ"b~奯2$֩~^WtqeXo9Vb3DX=0A+&|:Mm#yu"ph=}( lPGj{\[CysaE^}i*oZ˹xzo:qg?z̹o˜9s͑]]|Ku)AGGQv#uÊa/հ5!A奪''EbBr8:~7OjhP1O{ꔽ߽{xׁd Gc@(.&- b>i?WBu|xtöSxjGvIDnNu`x“GF-Q&#^j&g8N@P\1e`e*)YQk]kUӍN0N^y|AdYFhfqڙq!iwrX6˗=M=lU[7ы,wnX.Z?ܚڠӍ͆]wcqeW)8DWGtl`t5#PD{ AXx24Fq'O0q(()?!?-&*"2.gSke'>K4!3t`ZZ`'}Ѯ!tRju=̚h4̐h5lsw6#1lA߭eLf;cB7تZ__C0Z/3Q[*H&'廓᜖iKwMgÏRΎ᝽4 '~ӧ?<<m7ĞB >u=VMGݳ`7eĊaC&qX@/N-jQ QG29(vjf HF4e2kl`ަ8G+1BrAeziPI,JEȔ}$S_5˵ߥ9ʈI˸6ET,D,l3}AEuEKm\cPG_go5򩥨/(9IPq[Q iQQuެlUJH! N(\50:Xosgt@)X#o_Y` G0Y0C捫̑ $s9zXC"&#))Yd6Q~ztLL NirfPbp/: FTt[G3O_f%44մgVUOt~w,\}7aԴL0W?M"xH̨NVBeت49:~b TBе:?h;17>Rlz}`^`eƾ󚺯GHH!(L㒕P(R8{rzήzDPTTRzyDdBЂ'j`G#A)ſoc4 G8arP4XC 1*9uG8䰜9< ЩJ}䒐 ;`Ho\ ]iG#$:i,-,ڷ[{`~q٪oqo 9b{DuTBHV?y:7@lk/ ' p븻y{ʏw:(2P,^R_wn=p = Ce>e4Ҝߌ*KZ[ JQa7b7tFu,JCLFĿux:vԔENy6?0'% |RI~M/6|X:]rb[e^zoŜG L_ 9'O;=`tL/.T9{ץ<ԡ_ϯs!C UAUL+d`]XwR}9S{OV̚K//0ͱa踐Ua^V~mmRMZ#m$ݥ1,Ԝ%XXz\OHk<8ط#PKg'/;KpzDMn_/EPP_DIGQs0gh;$dKq1džWo=l ǫ4< [&`s|p| ^;Gǎok ]x)w+"$ȧ]~$/OpIt#*9F3 ?_ 5:ɟݭ5[7s/ RQz9~k\*s{:v'41ۆz&gr 5ё $>=>4تƒbݾ:ĕeV y=78ddx8O*/,62n/,ɫw>kܹendstream endobj 359 0 obj << /Filter /FlateDecode /Length 3272 >> stream xZKsxJx DZdI)ڪLHXwA`g@,AX`^=~p۞󓽧Z* N>.g\98Y1)''BcT\ypr}lN o=7_'p\%LZsŞM]e㶫E60R ^f1K?dﲖ|uSܤ.B/hyd<<LqKoO,[XK:\8PQ&(ݮ9sRp4[FU7mEL1g&B?On6߮t\u{1q ^i8 9,d?Ot`i9{3k/VܴEX`Y<@y=rI#:)-p*- U$.j[KJ WQ`Zʪ*c^9]a.^.Kao慗ݤygM17*lK6 d`]\^)0'ڕ#5ӷ'{?Pwm{ӧz <04FZ] U%m !.g?nF0L5EKz"_&]ĂyMCկӶZϟN/gכd@˸|K `8 wFܐaKʖ0vh ٳ_Mt*ᑺzv|H7l$PK|k@`X뒉v2JPҿ֩_VM&ytOhuTPzQt+;L ,sqac}I[8M͕"@a0@8δ_M۴S,}Xj /fkbh@k\X? YfE p&:(`Zv.q0[ 83'j O(HE$=-M}(>UܯW1eK@S|eXFft ~7N'y[$tV'dxC g K5~S+ܽ䢣?p|5 ^0'B\"Q ͈g'6NX6IDppIe@]ȹ쫉&m&Tr_[ b) .m y=u [kqA.^l&,}w̖=@=J;ᜁזŽ0rbJ}%LjbGS*TC=*&w쨀Bvrqu.ŷ#b Nc/nq2A6ć3f@ѕ.$@n8бgk>Q/ϯj $i%9 6Ԍc͕fj=$64(t~Cdqy [YP`F w([} ֫Bp`yIu ][C["Qx5IS3-T(Y, S]m1f8 ӜL2_i0r7ݨKyO9ƽD9[sLvq_{n+4J~JeӞe:.rO3hNA~ 2Г琑Ћ@+4|._Rւ hHi)\PWƢs-dv$ 2w*N𿞢!JSnveDq4{6hJ7D>a7@Gqs@,ˑ׈D3᳠,? A|jcnC dHƲ#$J;H!ӰJ.!Y]F̵4~?;5 {' Pi|4 rߑ= /zF:$N C='?7Xr C peR`G0-Bc>ۢF "d((FbtPNZ! N(`\cvJQF#+  ǘGLMΎ?chwi)p1 &H=Ÿ nMbe4Xsj-aZB .­i R?~\n&6ˋX|A]]#av(;KTgv\wby?)`啰%[\FYG:pJ 79VDmU^G.5,۫ld)t3,R)Ob z*M!rMHAQd 2MEySH|c=R.|%-{v9sb珹~ ~U dPت|N$ ]~-]GasX4cM[gۺ ϛԁM.O(|Bf*2_@Ruߗi:h)Fs"-yQڎP5;,J]sjP-yiev1< WKpoT)%F`1Q8^@N}nzL0oݨ > stream xZMsxe c![fȦhRv.RorHw 0]TJa`=aYW/|azuxrKNQ?z??ԇWL`cBt͋_YYZZ^V׳9vvsJډ8WR(KɎg%7=$nUm`j)%dWj~X&#& pž9QT82^} o^;䪐o;s gInٴ]}a&9l(V`[n<B(E3 ?ձ_rV{㯅-hvõHvrr]pm`0CU)k9# Օf'4xe_gEܭɜoۛm,ƱeE׬{euy dӳ ʶ&T3DL#dc2nbeo8sU ɢM/oU%goТ46&s8]5%:;e[k mToz>Ұұ3)FF/! nܱEPX>(c/[b[ܪ6rWvudq:ixY[Wce3&.\]0CjwɏzySNN=U g#ᩮ^ek;SV3v+|Y]n?!p9Z}~;DŌK5^8JogD_Bj^!k0}F ro-\ZR~=`i'rh:l庿"6&dG>ii ) bď0~hBXlܮgā(9Wl y&c= j>$l1)KQ6͝!a+ 6M%[Qq+˫1p,! P rd:L x~0k[WWg%;B7C|J7i@AHSt>%`,:H  m٧}%F轟n14& ]JvfQoN>/&dg!<|C3 aRZ-HCOZ4LW&۶[d^ؿɹÈgotڦf_OBmge?T`A ;# qiOu*=҅bł3'XBkB VI]1S*/)KV#-WYyX Cpsya7q`fsykh=osp, mH@dծ(]6_Kb+a40QH`*6H(:=0J`ew5Mh}s\D;g5= lxnpMjO m>/&"kW >(Eƒ"Mr~~RLua]vH'x'Ĉy2Io[tEļiz_G@L[ jxu&[:<U>sJcLa45Eo u4ZVۑ|LM0$y_km,u6 ǞzǬꇸO= Rqq]zۛ`I1<""&o7A)hZfFL([6[ߟP;f~! k_3OꁓM/d,nb>рuHhl \pf*c?K gG+\r=z`Ů v4G-1lxFuEEz9[X+ӫA N64! GC6ZJ5$I9pWW 8K PQ1.gM*AJ%"VX* WO+P}• 'X;UFW3MYJin^5ʄ4pQjr*D3Z4Y*N 7x UBl: ] ڸW ~F0^F;Hrʓo, wj@}+9BefѣRP-C vg}@Y#L&_gNՄd"H`R@zx* DP9fTI:kP{|(3fکTVgSwŊk$V - rdwA@ Okr}>4O>ӍZ 1iMZXփHFy*,.s4[80E[`PFX+vP"Deᄶcev ϏUMƁ龁<1+0#]~] oQQ@G]Aߴ0FQ`~M|T9G XIaiAU ӡP<&q©39̛s} `Ur?Y6qEu(뫬22lz4@\bKSZU$Dxm}>_%mbB_ +E2{R|Mĺ/Ӷ *[lW!G+i#tJ ὚IPuۧTީ}AQ_bx:P=A<"ޝ֏ F~;+rptNT05\ /_FJ9ޡ~.NU,,(`1Kb{Y㽄A~^:>B_`&ͬim:Ӥ??Z?%2xz.u}X-[VUH=Vozȉ)̅}~^g4; !X8\(hNW#1W(\*O3ڤ?0:_!+$7=?j GCa8v y_Mg_$rRs@G?Ϭ 0 CKu-b-/.#fza37INp) mij?B=^i%Ƴf';lEQ*H}3ܱ}r.?ltGUN/V*TBZCb!SzͶz$sԥ}Eg\]Ē;hq3+1]DELt;oY#B᫨4:;K2ގge&_. Do =pѨS5#b|G9X;]L <_Rte%nRs ۚY3T9$ J9AX~ǯ9@~&Ja{- R`zN;<ʿOjػ<]\*6>7 ؓ 8$X^~jhv>,pxWYD@|ߵS-!q68|lo?BĻs3IױO~fۅ\66xgz8qg4_H>mMUYWKymyZ|:gyx'K.A֓uh|^*k3'BT4Յ(",dυM{ڢ}t2mZtU?7[v;>$c8Dݬ0nO$]~SNߌa -@PT?Iī?;ґendstream endobj 361 0 obj << /Filter /FlateDecode /Length 419 >> stream x]n0D|0Lw"E$6ZmCSUdrCYzn\˵u|sUӒߤ5[uz5lmƁ26|ӟؚ>5ǥ|erهTO!yNUYqӡnl0Ct;`& @$0DiT*pIR&wR!O*2vg"P-$)U) 3.I@ƈ1IHIQQDըʑ{ +&zviXi5> stream xXgxTe>Cx  T "M@)BBꤗI&:={ILH!$ J.tU?r}+cy<}?=j ]jU(q(:E3czĺGO <M?C!,?iT`#荑05ٜZ U[!&$Ÿ>}fDyGhZ1daxtZlVM _.&o 'KNO%|֭_n~}7:%#1zO(:.#[(&)1Y EQS-ZjfV(Wg$&?vu\|dĤԗ̝?gް'3MQOPk'Dj@m6S[TzNF=G-R3erjZM<K 91p̱pԳ`1h KȦ:C<9]fc{r,sx.[Y=s> @T}Y[K[*7A#ڴ-@[=PȘm:p[]Ro!Ff1Y+.XZ 3up^[Gn*oDjpVQe.%"|cم[ISZh Ռxim@+5uZ=A33S{Qy2I(4 4|V{H;U\x3h U"P $2w:ߧ|x{?nG|Ҫmтݬ\/)-!]J]N?g`Jյ}}Oy^8Ae+t=FՖqZXb.yWUhGCЯxt7ũxCGtǽ]]Mz~ "Ё 3h `-o>ZrnT{DRCJ@)tT^_t{E XJ?QEη %˄ L%sSb"}6=Z| *J$ *wLeNB5A-ڔ mtF#YWmSS~ÝNЎr}r4]OZq7]LU:_z#AkP~$%B6gޞ'-%NELFDˎ2\efJj Q0wu"#ds_E%&5\szRWC5~xfM.׫ ]{JXh7lJ,SZZn-}.rve}  K6:8EYPԊD</~m9HuxϵfU߿NnWs/T1$`3FxsɶM¸((r@p3C32QR}g(1ȔdvzH`yVJB̝9Pʌz T**nW, yA*b_ TR}؏۔NAʮ5*|P6B@߬Enm&\& y: \E޽y[Y׍oT_nRP%8?;/xfWxO`ۉ~ iw]AVdO*|[<뉪#\)ʨrl<^d/q_JO  dA^EetĝP]gU0-kp]eNu+V[V fJ., iQEIe}o3b` yy@XX07X4tA]C^|:TX=Lh>=`vZ-b/&lݹF q-T-Qoƒ`?-_ to" `w7ٖ FґXC%OM:y.'k'aF.4ydHw4;\˥9ɒ=/#Wo1M1x*S'ȕH w;6p`/V:htc"vԡ@Si|!)MN1ٯJ$Av=w=H' O)8q~ ~=Ujvu2Ud0#4ZbLD<6ٵE@oڶuJ|<&| oף'n ڷw{͵yxPi2Jd8صAze?ݺ&7nJ~uïscn4 Ncr{JT$0a WYRť5 L4#6趭C;s$Х9(ސ׭ Ah>WJVZnBLWΑwz Z1,'tCد\H;5טY W6r+ZZjj< VzX QznV %6igh(ŜŌ0.6&2XhUa,%h2].#b77wCq902O3**@!wlٵ){Yɖ,V};'őOS>/d)l:ezsRG"Gа3BɜWo2f=dk57J?u4BW]a22f>4%[Sѱ#o1B8MߞZsٴ7^r/͜?_J՚ Gr=|D% P_;/b5>S,5sŧƎBy)hdb,崒?{Ws~W7=GvIh8 DžxCTԅDd8qpaQu['d4*wVĥqvttWêToEER3ixrfB+wi5,ƪ> xh/B<[$ccN xE|V *rnwmZx y3p{ojtDY}hHÉ[!CA>oh0f v a>drK e:C+]+YVcp?uJlendstream endobj 363 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1705 >> stream x]{PSwoblIn/`7ֶ]+R*,-TZD@ &jF143[g\Ib(fonŵ;g.@# =GAO7۞ެ]źil l8 k<(`D+sb Rz-+K<~y@ݔ{`bk;SS<⌥35n 3 4,hIHÓ$A^f.&wgiyYL_ZCC0PCۄ[KuVU eL,wX {n+Br^Cn}IQu룞)k[UthuK }JM=Q;TySՐqBza7.Fl$ӤO'ehoéEs]Sf°Π*8&) ղIwi՛+K*/δ:!k`؝HcW:MQSӐ'l97^D.h%JߘzDzh팔tqok=8cpST黠1l #̙Og M#dbft4jb<_ZjKpS W]WDxtpAHf fڄY@3e6CslhHX׼ mb6Xenu\J;9EK@RpK*D"nXV^hDendstream endobj 364 0 obj << /Filter /FlateDecode /Length 205 >> stream x] w7.m/p}vrp[yHA`y` [Gg׃"+O|W3ϪܯʽHKTNY+x=~V$ κ5aMJbQIh$ dIBĦ$!kC`yƜ VBsR᷵b(jendstream endobj 365 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1369 >> stream xkLSwϡPXm&ӝSu:.al*ʭЖB[޶ҋPnÂAuf2L̸,\;f,'9<{Xl {322AlayC@2>ЇSsqa'vfHXc,W+SuJB{s淒6a%o&^HQZ% jKy262ō̡A\;,Te\]9ޞ80lqzYiEeM eall? ۍ2eX<3%]31լ[UqʸnNxcZBQ*G_ƊFZ0 )&(2xZRSZ8[2zA"SUK [ڇ:Ht'rI C%%O)_7M0'ʂ_O .w7 G3?@8<}f c}\VSJ0>#3x>2!B۸$coAiM^P H} fK>޷:ies:ȑΝϫXJ~Og@nNbPI*'$.qkf.e}I[%]QLnCdrS_ $&r+q =vgT82v8P$5ɚjm2@[ZTK'3yU  X,T2ˍ몤Pña2AMtu>v(2>6WY %Cv:]>;smN,Ű?72(endstream endobj 366 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2554 >> stream xe PWǿC Pg( X<@y@( #"(H4PԊhթuuVneigwv̝Μs"ֆDNW$E&{{ye"S"8x1~ f``o5n 1m$$"њME)5 q //_a(z*FF+SiEdRRԂ3AᖒT*Rb1k> R, X2hUce|Bp|Lz$EQcRT^>g|g찹nC26VP%FS,5r\a{=XbEkD6#l|-i[(qh"i$mGgLfcVd2{^U,Aq$SG<{2nV q:#<#,ޅmt `fU0i`4iz%RUÿefs y|M`/E;1f%*AI:b/(QBSt>כS;`ҿtu{ ,{i] T!/_\OEM@$ u_{mf|^9sbl2$$tPik N0gM"'<n-ví:q^D:?-g>q3޹E6˓ByiEW_ []9r[K&q~km70Ӳ>9MuCý{ϹWT 5Cz͚'YCeǑ qE\JƟvNC1g vЋ,KQo{5'hdj/3sOR3 کi/3"l(y(g*$";>5i(oorőܫ.̗(=l׫W ÆOBam+e Zy{KRnwMk?B+45%`H"6n x:<#?w6ך2%e .3NA[d?D]o{YLNwqݔβAk,|ص!jne80>]Gz¿EFW’M3(~];XuYqIa9$)sn1Xa>~ht37 Z, f%REՅuL4DILRfWUe7[M\6v+;G+4Tt貫 Jyuけ&Q{gC';&=ZtGc77dW.)4g$nwnk)kU pc|%{-,n 'n~^xj ۯ}{i"ae_3{Ƣ.tÑs7OƾGW&Co! wqP- 'x&, K qy2^hk?g4v説%7r՚fd'mJC|Z;+a/'n6+M+tjoh=y0FPUoYfb:z%89(G 儒8!p=-@ !|$ ju[HA|l+iR=r#鞡3A&V-zzN?|x(ɪ%U BC]B`"[ɭ%6D d"!#o//-oG:9V` PT?q;K|+|HӖ\ALAiaYݯQv;6U(ן -/-"fcY˔-dZ5xłW(5(҄=PVfaL?ᠰza36ÅBƗZ2>S6f VLt2qz/x~%{Ľž\YQyBϨ釰%^Dbd-D'@;}&}h2vCn T3<'XV9\SLgDpBVAjF.qc^#0bmu?>kCX S[J2DH$^Kq*b7e op#oNDGՋq~,NHM =SAI2 E 5:fno ;Jv(/)/ۛ2wڏ8endstream endobj 367 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 346 >> stream xcd`ab`dd N+64O,,M f!CgO/nnC }_1<=9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C53000103012_*ÂO|qwaW[ߙV޽4w~+wsne׋!۹Y}} 5.v*T];G҅?|_8{! 'r\,!!<<܇zl^}?20lendstream endobj 368 0 obj << /Filter /FlateDecode /Length 6144 >> stream x]Y$qfXoc;jk C+eQ$˚=ޝn3& LU LSr8a{Q8/d`=_0p﮾t{?WBնR/nnl[i^9׭R;mϲw6rjoqg|[ 3eq)lھzϛ"!vN.o|uɵԽ3v4rƆYWvq\/a6 u0ߞIX*6U801]U_6 j>ꊁp@T.tw=~-ئa)&x6o8TUjn_o` Oxc).Z6V^,nnS2yL \@:"Y$PqdYδrzoBԲZ;??kֽE y0S:p]\%1T1CGm{_+p("4Z3iMn? 2kc] @}Lݧi+he_ks` {.LTYe$R!pkc \]1Kz \Lb.tDZ=ʌ0;er)_BDe:3¿$3GA@fd/HdMR?Qu%"DD߾ZI #ǦV/Fo'7ɠM6myӁLN~&mHeo+yx5sSCՋ^;p,9UΓY:ҘK[s}_(vEX@wR'Y}m } 6yPf)1>ShI]+ Q'aȶ8 FW{b'2*leqȮ 5+W( (veF*́wrGl&#? )[mCvP=rɿ*˹Dts6n>K6B"(/@&&)1w eh0_333@.OKԇn/K+?9Е'(ix+R."JG+72|x ,Թ~~~ۓ7itNH ЂqEV~؄;J@[ {I>f$(ƂM2#mƟL1V*mt"ߵn!wd>tYrțORekį'gOO| Y$OIr[MXlr^y{\NM^mK]Ѽ㹑6p m wɲ@m5/4EG4Z5^mgzX^m>Jygi@=} ]n"n ]M|G`nlrzti+J_/(/ ̫Ʈ4FJѹbz{U"]by?E]qrfߔ*{Ɖ|@%ybD'׍kcbД]?ƞ$:ll[xXdة,)Z6"J!~\X2Tw0 Ɗv[<FiA6Kđ 9;]69P !6}R#F. PvǢBu@k1y0;#2"|Z1$6[c܋6X (Uђ?P#^Jg_ 8AKv4"*gf'h ч85v&!&g: CqV( 1SSPt]\3L2%YhVXg|rw8,@Iqk8qz^ņ3cpY\M8E62ljo4Up58pY-Ra&:6G7D.,Zʫޥ9ynRVNN2z00Kbb3dDYC,;>i8/npb;]M8fm+L|;Ayt&SLX9w(sK/q9\qR2AcwS90:Nwnz* e:$i#l c/j%hT O ORC 晆m!_!]!tցek0t4`DI@]Agx[sӊZcqX3Mxڟѱ7LE0;0ڀx6|@*^:KN i@FvyjDZY>`΁NHxJ1!65vx$Yۥ񮳖uQv8z--)!G)"ːb(%ܙکi_o!׃w)EKK˙zUۯVrcJ~nRbv"lRn<$a%USZ8:D/2{xs4P>e `t9\8sV>|b .+~=F5aWL:_`A&;f.ai8JyAOS:!(=/Yt T'LZ! '!LvZD!,$(Z({'vv]7$rM-a,$isX8h<ET3&wᲆP\F7qw+Q~  بHtxOхKS?pm29]kua5ymǒ E79k7:3tX(v9V  jUg琢"~[S "*"+H Mͭh*$BKEF3cʾeÍ29Ɛdզr@EjyJpgW1 k@^BuNE"Y.'~)79 @qc,@Ļ_f!=/HHSxuRu lY}l_>3iun4)؋)XZk(g %8JWwjt%48f~Ί.-nezҢ/+{KkU·EOa dgd=]|FɟŽSf]Sq3GJ!sjxV1Q?kXeVXH̱ WU zk]u; ʔѺUFp#O +Ԙ-]Q X@[7<˸}|ݝu?>h7Ug+ӋPt*7*>.B\l䆱7cT2A>AJ$diWj[+&җ;J%0?>D~6CtVHG%InxeTM(UQϛʝgW%?{=Di쓞nGծkqi ɑUf< I}E!6)6ը*7mvGQݗPvܛ ^Jj1HCA SHĹyӽ`/,@}.>MѦ b0jȁ(Ii|fhRFW*ڦS!DM$Euu%Ga{Xp#SlMSL̈́acL+1U;ĖE39]PTH-(TA/ř!в9ְ($**5Pws҅Ovm,R]*?ĀLpR9IjQy.+k;NUHظ9`_Qn"Pή,y(9kcS4ָI3 T:),\oRa_][:dr+(n̮Q+ê×$X,1;mF+@ \7~0Ct0aT5=z7ҥޏ3xg?Ro_뗀eX*ρ1c%6hб8o]gM͑'o"M63IU>7l& I\c"Q6 k N^Q[ʈ>IRW: 3}/_QwLf!Ҹ2<'RV+i7!cA%Ă10M<&B y]]iwDdx 1  .S+n!W`L3?&MK'_u뾼8s+I:IW\C/^XlXq/6p]tw8քǹ O9J[ cx}w7=X!OW\":)* e%݉q}.dw*#Z`%r޼=%6 »է.,&%Ln1Eva;RqWBa.(LSkSx unB FY 0P^xP^\  =&^o_8ss+Ryh 24^3+nQGJ\"2I!.:g#@q?B~L?$) xn"I0G>Nns]W-Z ':D~߳QS3b8z} ?3f\#fZja@a:Q 5iAJ590" cn T}ulҰdIgݱqtc{"vsC'1Iendstream endobj 369 0 obj << /Filter /FlateDecode /Length 5185 >> stream x\K8rQ7jxv0؍;>H>pԭ.ML$dUWOwb@"/7M-6 _5=7uso?hOjxyM+6θ+_UBno^,Mݨ^o1ʉWݻ5~oEk۪Ҵ2Mku4v+]kټaߍ=LM_~޼˗?]~{8^8ﮅvsLZtV(pAꑤjT]n+>eٟ8轶C|U5~Hշ?D@?lTBH`nH]M{28xޡ٠'"'LTCxH|ȯHU-rb+Vmhak#:阅rs?W7~_o@I`ϭe> AdAJN%¬@J5neLuHz.˻n@/>~2= =nAIZ_hwݧl"Wsb_0%OIwL /" qg,'U=ZuP.EJyOA_"?FK56v]zߌA%\_Ơău٭h-R(<|$o-;D<\*xK9>Kjgjz]{'U JN+Vy- p4dtЅ܁ƒßef{x*BZHV!)|^4mM9o[0!m!PhJ ɶ-`[gx n/׏*sV^U~``Ngv*.78?hdd!Z8+XCqmni0-^c`6D7ϙvd|~(Bq$)j?> 03Ŵ.8gv3qcRM!eM)2%+vʌ7ّ<q&HȣK0œ-o 3cMXI`d1u-^ WBQ]ou(9Ӊȍ({*+²& cn#sY:>@]\7 " ϓ N;; ?R(Ə  fMA1~A4Bbg'A*Քx8ܘ uU s2 9]HbtAʧQ؍0/:\QhF`K HYl[†㦅vQC#b!∱~4dL:F#ԐVr}4w~(}$3|d>~ c[9_`i;`=:fr11|TtWhBЃ)]jxb2c ;#m\K)XK\/I<᳒ @#8v$1h JxGţdҮ)~WĨ%1M!h!-nqEYQCzsQ` iS 7 OJK{>=-ttܜ$ׁj 7kA^Ь]k$ s :2 pZ mBVs?r^~L+څ4!&]d~cH<ʆpS=~e6߫cq}&23YL&]ڑ7]!"х |ans|Á1L"Ñǡ ^,dR/rJvjqqwwc<0dc4I&%I,2b"h|̧6ц2m,C&sF'Y=Mm%C^ϯIgjF~‡3JG32 B1_d'/ VpȥrCuq .0c/qo8ɣ³9gU1-$ “Igq]Ѹ!(L #}V8 ;+G)𥘢F:4~)[(Ԟm cLx*JKqrWu ̠L-/kEa}.6p8~˾l^p9D΃s%EO tcX:d+@!H:^ZS9veB.ҧ}D,qj8Ԗ[Ц4aBJ\kHbt=S+N[c7t+V8.{*drkw2;Fcv>c ѷ{*Y.# I+V{mVǾheHB4Ҙ}Li$e9*}LfzB+AqI"A /h=/d~z[W˖{Y[Xbj7 ;!.闀HMXNݱ'XHا,㫦ʀt3(2'?2U;Bd6ݲyE-g=I _>"!`Ox3GYC| hHs]3F/sLPwHө&4|1`ل _037EsC;po䬬 eߪ~tiא1d]1ar'1t Y\*+:U)ttײWݏTVjEhQEbS03#]C_)7NטDR<:$XBDJ,!B[P~ԊCHU|1wn}d;p8Fω0s0jrDhLG/ j70Vh9T>UHj8eaJ&~!}> ̚ @JiWHy*395ќ(V<[czUE9pO;l6O$\ byvf k4L(.9}Dرa[O  prSfQ%"4C Kdt-;fJXziw{* (0E04ӧSn <_݉~ &֩&Dzi sF4SOt)_:I܈nS*nSfr5u!!G+)k{4|/2VMRᗧSjҝwNm6po)ߘgjJ&y/6IO)hHϧhcJ=G/`]@NiwzW;`u7ooxK؜4JviUR%xk.f: avy=7 ny fERbÞǑY9b7F('E`ˀ9*8^nK'/wk\y-+/l'уLC &u ]jf?:tuGOck{^-!rKWO{ ! b:kܖ/zŨA tr3dѣ'oq)X$;6zy|w5p?o~Vѷsp&NP$rendstream endobj 370 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 403 >> stream xcd`ab`dd M̳ JM/I, f!Cdž^=<<<,*=3#cxzs~AeQfzF.THTpSJL//THKQS/ f*h)$f$))F(+k꡻ g```La`pe`bdd} ߏ KZ{%k|O[UhS\Vc*vI{|f~~JKLql9~+h-^;̕f`.]QS!ܗ[,НpkcݫwM> stream x]O1 y?@.KdhU1!C_ MUu8Kg2ل-zu:귨O0[UjU 8U.@-&5A*̀zJEo@L00)2Y)`uˏ5NI|j\o%u> stream xmOqW@J4z` Pf6T]:< (>L.B<ď ,Xuںv>m]އ~c@ɩ)NcY~)|1],q]Ջ%h4FM9zzcOp-%Wb!O ]G;osqnn[~AƝSN򈏶Oe2qK'OȌ<~<}u*!x1ض?EHB9m ︟~2ejnF1i-`5b,CE "G"DUYSK7:hٓ͘N1PU=,Wj5:Ɛa3of?K.j]:$mUy@gS vU3X/$!֍NRۤfH֓jIʡ,k.+ceA8(H yy۞05U6s> stream x]Mn! F ?EMɢU cYAdkx2p:˲Ὥ727POxYNK&kƪk_5=-^q(Gu[ [,TGc3(,+򟧌s:fnȺtz9H=a@;=.#DH|9/yFӽ5,(q.KߌZJ:s!endstream endobj 374 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1129 >> stream x{L[UAEGE#j, msR*^T(}@}^h݀@Mc"`Ш3?j\0re2crrNwKOp-+/W6TjZ٢ M4==s;Ӹ]"*i؝ѹG0ǫufxXl2h 䞢B9_*lћe.c Q\w^|z.1b\["oD`h~:a(l7u3̾K `P#6X1~t^>R3V$ rgj% Y mdj 5 ỿݦH\w&">"Ѷ/0NO8``4AD]G`x#h,s QR5_BcZ\!̧Q.$Ӓi~ gP$;n vp:\Fy;>Gѐ/쇁T]%O%f=fpIw|C&WT-ɩ܍BK⮭̨*}˾|/^HŐ7Et<a͹3h.l(*By({_'AFyNj8 eP;{Vo P 3 ?"~7}wGnV|}"N9WcDf"K)q@ Y1&b7|la?iuendstream endobj 375 0 obj << /Filter /FlateDecode /Length 178 >> stream x]O1 y? U!bI  p1!C_ I>wcp=S/tXk;+Sߤ<$]gޔLiXTtU%:DAi3+yRf`HlkQvrpx4j l,{;]4|zZendstream endobj 376 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 834 >> stream xuR]hW>'IFm@cʰ[ZNzW휙]0m&_'5giBX?Xzӡ@/6BT2Z<_{ۉ*yyb v(ZK@ lijp{{PP]UQ7hLZ0fпjjTW̕3>>IԴ;װ(zC }F/r^A)vpw77;+^GrKm Ő;PotKle%޿BfcAppg(2"~,o5UaeqNU-f[69ҰLլhѲ3Gc.8?Eda1b0 aH! !#g;0D@mkh-41pŶS`ֽ{Jl;]? )nzkS|['l2g]fJӗ<71"`̅l:LCs( Še>e9qc4oRY^ZP 0Iwn:_"]Vϡ"B#EvfXZKYGfm;,b纊DO[_!}wwc^SgS|~k`4/yjj՟t<=g=9ǡO2NJ:]AK/A"?q8~W[XygrpBtWyuӨjͧzARVjRj<9̤ 7L&%ɎS(+Jtendstream endobj 377 0 obj << /Filter /FlateDecode /Length 4899 >> stream x]o$9-7yG$@ @.o9=6hm{ndr;ڇM&v\?k`D6›^}{Eܧߛo>ldBU÷Z"Zb 诿ibԘ}zvBn_lws1k~ a"e)Mp|C3E拀m8]w[Mi9IR>cӭWbAFaaBFW_\q5s! O,Ձ-5H*f;  NTm$f4a !aoN$b,u/ DHc(\oHLjEx'"_Π0ġ[̀?oD֋#fBN'!5H"1Z4!,j$P')Nނ( nS -a< ~ՑDw,sB5L0 4nw6_61 5?~Ɠ8zRmnⴛi (-D@;B%x"9FHfR s@N4X͘HY@kmgr)U ca!~֧<-@s~,RDA A)h*24òFW[oѶZ8+;7-#ov feOb޳Zn {Zr#hiyN|+C-oъZዃD_M*F,HS020' 0Mr`??&ny8N6ibʆQlQ5YtrG\XNJeɁ]S֣\8+Q8`B,g5,_lA1LDhUT1eRnյ9ZE!΅7`29gT|TALTe1dc6  ]Iωv̧T+XӼD w};ߓ+7 EL J6)܄nmL=5rXPb=`6?H}Jpl0ҨN!0# Nm82a22X&" ,;?W`,MT{m Y^A+U*i%;r+ݖIt*Wb_F? Pb"" ž(hŹ ` 3݇\p.dGHvO9>\;RfVH3ZWyީcXHbӄQ:5H'#zOc&@x/Xf\1sF!12cFu%3;=364cm Rt\nư#" +4s oe~=wǗn|o8ã7ᬆ!eT3gX3ە2%W6[\б U)u4]"p6 Uk6U h1~c31PтbΕ>3S7=;VT3ʀzb6JUq%1 l$YBuzՒ,K2п:;tc"ӰJn|\NvWOVS/8PorN˝{Ʈ.{ 2c'i{u;^cNJeHmI uGvGĄsv؆. a.i }^ef ZNÅ=XMЫ~)?~5 f9 LeejVնTk?; ':-~qL^KFA"Z7 a{JNԼU;9 ,P&'J,ٕɳEj XG Ey\F+ ZQ(^g.-/hpe؅׉B"yν’aC)ذKxAWT9nܮ-Ts;c'c]۝ծuɦ_Z q8C39Mk&/ٚ.yqC(+-њװrsy9mM0wHi彁+GUsp#G,B˥tm:W:ӗqZge)uH+riWX.[u^'u'Eљ:i˹<<hC0Wvn)\P@^D"PΕse:p%mVWuWt퓎W:Y2݈?κ=b1C0x20dQ{P`Fe`kL՗z`֌$-RK)Ҍtf_v G^R3K'lR67O%FURveZ<+KlR!6fع 1x1gBxJ=U<՚ 1l[aK+GpMrBl<^U2;eƶR,p]O MGTKXD45̦ʬnds9/XtR7V5, 7joWIN7Z`z{׬5kECP;+Py=13mwZRkf( >ϢvezʈfA((T$n>}]E3"P(8,@ļj+Nۯd֛o[+S TVsnт6߅O~lço/+hǧ VM7$h Fp织Onr ?do۱M[aԀvkyq@bڧ Y-NjIHGyF1_X}WTat\/Ka`ZH`7 A`G$?VV9xqwWUs脎x?>JpD@V烃dƂf~8  `X+=W: AR6@X!34C !òNƠLtmV߉6iھUٙK%wr)O} c'pHHDDõ<3Ƈo0lzTgקn(&JLѶSauLo ;L w˃p73iLƑ>ظ'Z+|X-3Qw;BEu6 $-x5vDT<ۖ)1M n7*IEYj gQۤ}eoոDFh>Mf ) _\FQmG]Aӏ{sC# Kx5R#apH 7C5;bAvxk$*- ndM8TN%PPyendstream endobj 378 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 908 >> stream x_lSum:t !&Ө$H4m7JXײҮk]vmsow;v[D0t*j'8MH4~&E_4xsߜ琄LB$xGgm=6m4=ѵaZ{ ȥ }n݊[adxqolV7޽65U狌Ǵ63Fs{O3͝Q*ژݎ~c5-zLFRkllVA2,M]($jdHG/P!Prz+rU]vZR.~՞wnxS9kzSU;O Oj<< BP{Io!FKr㺪Lˤ­J׈3#q)jx8 UKa2s'`T!Ȩ$I9.C4 Ӹ[1CYir/Nҩ_Nʼ_~n^;ẁoUÎxT3߽*_ ڙ tĿ &Z@3F=>ϚK&0tSK@1*TM]V[`u<-}xp p>_ dX+ hUF? !y:tsb_nK׶L*WPD! 6~ ? xpo4nRN-BT~t`v( /\Ԉ)ԁsx~>ب:Q1;#e2Y䜩-U6u2C7ʲ0oseZiN` O'endstream endobj 379 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 502 >> stream xmKoQFevH(4AqA2Z|0Vܸ:ʌXZzac W`\7q#ҥ/!<;sb!>.\8EQ/X w$ew,n戴eHe~=X{"GfYI[24DNiwJ^]@4ʸmSW7om1:H.Zfn+e̶ZmUI5 yy[ XMSSJ:C+ϔmdvv[k29'p:Ͷ갬տS_> f F~X,ՋrYJROAendstream endobj 380 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 402 >> stream xcd`ab`dd M3 JM/I, f!C7]?uXyyX|!=L{ w_fF|ʢ#c]] iTध_^竧_TSHJHISOSIP v Vp Cwd o5|, X{r +>]{ؗuyt%usHg_}r?سRپb*f7?0OtIIwcߏ^\\]}hQL@[]w%݋f}?݋p=olnqqpvp/;i {7O?cendstream endobj 381 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 329 >> stream xcd`ab`dd M3 JM/I, jf!C/=<<<,o&={3#cxzs~AeQfzF.THTpSJL//THKQS/ f*h)$f$))F(+k; g```f`a`bdd ? je?$,gQu>ur?|6{Y~~wA-#{]Aw~?V]|yr߁*g,^.Wy&.d[%$${Uo_Nxgɓ&O^r> stream xcd`ab`ddM,,IL6 JM/I,ɨf!CAk7s7BS'``fd ϩt/,L(Q0200 I Nz ^ٙ y) ^zz ~@L<ԌĜ44`נ` Ѐ`M=NCd```e`dc`k3[14!gwљUU]rU!qA%-Οbbi˶g>q!fFOVgvvOS]Ijvyݝutt7wwtwUGdn g-(7s/H:ݻ|ۖ7//!8A6C&#|o-a9ӰL@ |TdžGC|ꔅs6}j=d5SO+P];{œމ3g]ttk7Ie%FE msZ8y4y7^p ${_ٳm̭@_ژܜ#۠)Eݽ3]𝯛cςD :A[iӾ/d[u[%$${MOoo__ϴygl3wľ^{-endstream endobj 383 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 588 >> stream xcd`ab`ddM,,IL64uIf!CLJA<<,~.={ #cxVEs~AeQfzF.THTpSJL//THKQS/ f*h)$f$))F(+kaqPIFjI"SC$c3G , 4y}3k[ s9R+^0 :nպn ݋פ7Vfvq)^nÊ=%Tm [7V)'u_-VfWԆ4gKl..~ܵK8V-Hˋ ;qȆmsg-I<\qkn7<tݻ^ϸߘlQ'Eyzoݜ2VU{>=x r/n}onߩl3.}r-[:7>Ԥ!Q>ZU˺7wwwYb69?"za[[{}oOO_ gN5sմ {Me`,endstream endobj 384 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 283 >> stream xcd`ab`dd M34 JM/I,f!Cܬ<<,7 }_1<9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C50\``````(a`bdd?3qG߷0~բxy؜u̙[r| 87mBU\XBBy8yL;ódKމ}}}&201g>endstream endobj 385 0 obj << /Filter /FlateDecode /Length 10740 >> stream x}msdu?oR% vٮ#WJ HX@2ysNYRI;sow>o};߷ׯWz^ͫ]~ B Ǽ{so~8s}oɗSk1Coq~ w}~yw5aͷ2=7C1Yh|tb5H~F"p7߼x__W% C?)Nyr9v{*'߿J 1]k !J-)U!{Tl!VzH-D Vy(Tq'k4 b &PC|*%T}ȇ`CgKsW.Y/rh] @!8$P Z )X;|")B0ȁCCSGR@6n(TP E'[i  ]Rf$@^ѮϽkؿ']5:4 @`[i,5>'+yC6xsn@p *)PV1#A(ZafjpD=ԁk3:zh a [:S8@rG<ik ƆEV^ҁ+Q͐ $ɧ)DRKD545?V:\r| 8 v Ș<Ȝ"\d J&DɋQ*0!Q@rV](iXyZO 5cO3j[0c? A/CSX} XgY=R`QeA6Jˋ%T P w!H)!¥YPB*Ot@"B4HP++HА\@H-&HВUJ "ڵ Q-O eQ[8ВhBU=%Éb$M+Z2$&`@KVcUM >黎RBV@SV!K ΂ӒPƹ T[,@CV\Y}ڱJzvQXqQ.eD3VBˀ4jM˽20+8 x(Sx;>Db5Re @*8Dg۲NIl u$6cB45 l[.VPdXْܻm;4jb B<@Bw*^_:j@Q\G 4j1d5l:j@Q& c`=a~9"gze3t'd/[o4a4ͷ6*kPWőQfTg >2fe(ӨOF-G:ұrٌ2Q%U{R:$:a+x>11 |^tT~-w[h _^t熸1*q\%q$w}b1IQ#AT Ox#Nq&sx%q &9Ja$B-:-]b1ԚyLX=m:)yV9I<et߬ 9SC! A1OUKJT`pN1h>A*O)xM#13Sv0r7Ձ"i`%* QFTHHLVӊ  A0&|/7 Bpᳵ2 mȆuY0qWy1 hYc%XCcTh+XCaOFdHfcU̙[*ɲ˶u6Fot* }ssma#;eYr&Cr6㿨\Fܠj9T. .Ps4iV fn\0KQ4*ZBo8殏:ƘLM4?16yrW7DXlfi1.F\ |Uyü 8D7NgtIa+m,8sl( ƴM})wo/}(αW.n/oO?;S嘑T#X>{yuv黥Pɠ*$H_$Z Sd1WG iԢd5b5 !G={ǥbuy:KV6ߪ~>*o}|#U4~ λ5 q_/x5}QJT`:A~`352/gy2hK71gNOܫJͷoTj^LqpQYoRrIJMUG3-f8&C^,/SV;Ѯ70Β8o2&֫ju0Rշ9tD^tʚeM0vTeFWsQ'{! zh j2R Wqugs:#ۙdnn}(NDo YjPU?Bv0rKv ٽޖu82 nf!;X^L&C9x1^&>enÂz+3V|`gf--H&n)}`NF$G^Ji2#kahdڊ*At&n3k "9fs}ql4p$< `xgVv$iɁmCb/AL*[EPPG f,I_0m£H.0.-6g*pĬma4y&ڗv;&|m& X";줅Fcpo1 2A! P2~י}zg :K 3 5Rĉe߽MHM;_ %aƨrnLۓ@,iQ;:5+3 JA!AjHGF2I؜f9 dh @w@Dq MSs,8ꮈ[(Cglh$kk <[Y,5i.4xi6诳5X>jUhրRf,?lhad>[ -g1-;ǒx??xg vwo<˂.v7tNi~q k6A 7pwF?ޞ}pCgv0i@WѨ2弝F-u824L4jLLiQ duԇ@ :gY=I "pjeF_ӷ7h-?yH!ܵj>ZGiGǀ̣9ZOiT4 Q \AٌI(lFӨڳ"0ڳ"(:/K6 s^oMiN0`G|"J5Z/qUioȗX iH[P4vQKWj ًg@' 7ZCla<ڋl=e C ecmc iZUPmS7 VϚN R7Wq8hyXub6Lبjl9q/ƻ[k!Yd1Emϯ /d212Iܧ$8,yCFVfBv2cD[&?(0J'M7I3A;  vk[ARpnRh=2:VR#h7 oa=<34үZe>cXuRb0lߩ|Rw ţOYVLY\m٭_)qi#ʤC&c>9úJOGb]S9o]NٛH# a'>ե*)i 2)̮+.Kx$1^WעkQbfg4EU#k ^]lV 5d #vu!d+)",6erWBD#t|*?.f] n0 uQs2MWorg-3+t^ 5VF`VX$OZX[ ҕ,\$iӠv/Z$xe-\GdEA"ALX8fD~ Ң Jw%36dSdtnMqdi k.du~2NSG6*G:!h02 }D?OєUf^~Y֨SG,Y+DI9in62J.< jhBg!9dQ]DރIFMI3 Y(Bf;ŁJQ 4͐lXW^!#{f[`lI*6ˉyD4\2jCs휰Irr:ezJDdGEJQNhz#R؀I—.GSbGUO,f]}iܐJ D`V$&MԥΕ59آ!LCNdpduH=AR Є<ӽ/vs!&< Mj" ȦUz>0Ϸ"bvӓjOӔ Nc0#Zz!F xZ J,l*qT1F͸٫ "{ jf(gI@CӐؚOl|`nczU KMIL,^3ƛאㄠhVR bӆDrG=B㘲2ڂ U Ӹ1qUSu*6iTSD]X]LJ9݋f^iʥ5ǁm4]$c̖o^u7,N^b^bÉHRP!j{=\IROYɳ"*FoILgyuv-:qA ֩ 'RkdOIZHG d+.y=wM8&83rZFPuOZQ  Qx*WVYC"픪fS'UuS3`hV^z޼NfGE>U{1vREʑ!Ʊc˺{T("w5ڞoaQ# 3U#+%gR>cGܮ]ƉtzpT´'YoU`mc l2cIAnBM!ωYfonS=cݧtd*{TNbEwiPl)_B[ҳCe (35}): aJ\"nir[YUMٛ%8R֢d&=="hyAF R4=52.k&EgIr]4/TᢍAL@aҵ#giD~mi5y+*7mU۶1z >N͘4{-FilU1fm`4C,irC* )/XgfJ[6Y$uvQ ,I.u8Ή!-RXb!11$q덚ukBZ>Կ(zg]Ѿ+᭺,U/hlMnyěe4l U#hGD 䱦&!6] bs"HLRW$ JE e%MwWQa?@6)+¼,ӧi;rβs97ˌf Йe&RJ\AHfX&n4~Cb`Z[Kd|޸Wh"D_8 Z*K #9c ,/zYG>cG1ݐJҙXfYՌVcOT 5KveyUl`Y3w @,]CbΑ /[%vS|҂*YTYE]SPM|Lid(]Yu7-E˲U -|eXcJ WQZ P[!f2vknbW^Ʒ1S!g*އ EcTgjo67 ,Tږ_SԌ22Xi22CYTY8hyQ4vSnĚnzLM؄Y$vM?zyT{*g+#@^omվ%^ykAIkeAڰz_svxCEW|p:Ѿ1hØY"= <:R䫽 hDo"wcȖ| O_874_`N6PఴIutRGԏql}vVː_]ߘ1pcsË<a p'p`>ͅ1\Cw~t&h PdPbg7 ߃pkO"PqCᯏ#̋S 8+=WgYb?OX;^3QKk^EmOLQOGiQqq^2\Z Y(P/@í·CuE} ϟ}֎>?e4[N\r".?.b&`{i7xc ۲?ؖ-%$1+sمM_<ƿ8p;zD[|3]U~]C݆_"[- }v%W1BBQ(p<B/l:IENS S(|N(3)4NOч=>u19vL7G}jOԲI(;,^'c}7T3b1#OH ^bjOY'il홎tt=fulG%>/eޫlŪ JN:wpΞNpO ?rk !FLqO'-ϮI Xs\`?C jpD #qK߃.<1@u|ʮ=J {8q<?FoQ>+S|g 21?:>_?IO-KNm;v^DRót\(lB'^oE\^ G3R-=L_>]myxy uuv<$ynSyi?/}Z8M<#n-<m5{Se/[G$0Տ<];P?dQqyo߮ |z}j/):zl*SmH-pՊ `>aŖw2[AP5?f$T%رmМ >ܒٟn$eǩA75y1V!QGZ%;uS~o/k4a<~M^6|_ﭛ G:|M̍a 鹖p4*NT-?c6?ltg4[5Dy2^칽?r=釿;󮍾楗H_7\݉/RV&]1X:~4=}&S+<>v8jÎm}(sze혹߉:K}s34 A=rc؈D^,Qn/FJeYp=,'  s"jk/d4x<3zxD-L*cXͱlj˱l3/@Sn#([qܜL[\6#1vE|[# _b50 NumWW7K=> stream xViTW~ESե %"X. b(BEPQAٕQ@11 # *`+naQ0 #"3љ3Ƙ/zΙ`L}λ{n s30e@|ʆ7]JUc${3i ˘Ja +̏X +g #9_T(eҤtiY2ԟxLTRGv )4m|jFHvuf:,&5]mw!$MեO:+6.>41)9,h2#_4@x +h9Fliz(P: ` h}Bt=Q̼y7E\z!1.G&>n6Opy&Tǀjf\//a|[a>ގ^l3xfL}a1%)̄M%'ck,8||SYl1>$gk6%>%o/^Mva?L|_.mm=܂[踒{q%V}<}yh?dV @AKM?S]{ħ-;,|B^q ?$h$?(@7I'GI H<*F9Q+D檿5ʥ&:FAd`3G~"LgXo`$b%O`JX>LH?Ufs%*O;C|ѬĬ<զkröVgE,ƮkWг#[h$Xk sEbp֩^n|ɠ lC<H}t,GpbW"\"^j5vv)/]pS> )<1nn+<q:-Z$7O4nK7W Dň.W`+؍+i*F1I ^z񦩘XN ?ߛΚs 0baa&՘5?7%YSg靛9F<Ėcn'E/>xp6y&y me>&^1.α*HrI qdJj95^2 } {0F: B: h0gAOcp0% y'Y}OBgL ;PCa=lBp endstream endobj 387 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2679 >> stream xUiTW骊TʲUADAH"D¨q\&N5(FeQ(5 (vt21&8=3Ր̜;ԩS}{KIc<33c+KބM xS)Sq'B Og Max@HmEUf϶Z>5;-1a]f<&[{|zbB|>Uᗨ٘._ߍ_( Rևgl̊_.1()Yi3aqAD$VAD0B&wBA,#Y^QRVX"ml^R1 g}%jط jj?`3xҵ®eSXE!+mSʮUOCEﻫ#E8*JEhN#| -̰;VhaX[r/\d,I : 5KxVY `<' .Ii>uߊ &a{^ puB7v9/ZkhJRa}큔,h*T}qJm9${2s(s J)\xJW$~IYGNmTd調_E{dpƸizvW^eԈyr}}l#c9{oV ɑȃjt}s:r0gIHLkb/))8*/8T|N-Y}. J`C]`;;`Ȓ&1A0$1ny#*}Zh|NBV;|ZWX^XчI0|sݼ1shn"l,c/{\:Z *σ}ksybs,!9X%f`zZ*pf,HD!(ʬ.2ZCzkP &HP>G瑩B3b4P tH>pl.*(ZyM UxPR!|!t]sH1  |UWU֔`|FC#L0kEvE˫Bd5i*laRi,[eit>wKPH~<{Zf!7&t_N[O%'}?WЌl W.A#حl\(=2钨aRHAHyoһ@P|, tE=vXihK M±A>ln]`2(|7| >Wi(p%k/i8M Qޕ)[_Z]h7%Ʀ[1bíQE Lˁ1c E|w&;%X^p?Ypf0z y/1/ Eэc Fܳ:6^ |^ 4g~R)ЧbϳvayICV?{HdF?q 7 ;ěUKP+}'6aEJw cb)CD?^FRv3Pu܊ZdqxU{56:qok?!"쵨<ԉ*+*" 0VE{Ef˙X́Ikܩbϕ3/ZsmK;\esm}4~-38IǨcǪד7c7endstream endobj 388 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3244 >> stream xW X>/7%#iWzQizbUE୴*r@ B$$| w"7AֵjՖmp]o\vծv~{g;B׍燇y}| /$'ϕ˒r \15},Id/zy+H1ՍX!{{g[Dx,kfg#o`dt6?_M[D=+SN*INIMۘ)ˉ|ɧ-}fG_^z ţ mFQEZ֠ :DH`$E]= f)[uNx>&|AxYY;`qט=~wu  H .w%}. uP*`w0qY7;>!Ѱ5sw1˹did^h5f `Sj!ҡWO%JIk=-|N;,w׈AX *"ñ:]['.XYx^?zӱe`DĿpu)\]:w4و0+ KRd,0BSSLh}?-Рf,5;k8vBṈkpQ1堼b?xAiީEx=Ii.S0ëa+Dļ`pAkU}x1en<9Y7{#鋸i$ʀ% UȅïpXWź6XeNo[N:e֘FI%wtK#8rLեAK%m@FݮP d/ѧ~mUʏh7vfEKc!2ȨT=*1ѣ駫/h?> JMk+U"ҙY`(2j6T pWVSz<=m m0eBHtYD=L^"Fc "\9|&N"Gt`.o[A v-֡pnGw Gh>ή6ŝsNtPP@kՙb\e0섹PeiWuuڦ9H$TfTߤp ˭7Q#IbcaUQR X*q1l;8Q<)b:,+xIu@ hn?,5ݹ} AnXT-)&YrAh3 {n4] Dp>HEMh-B9*d& st"5!PKD=6{-4%U':%% fdrJn>wQ^MKJKKJ60asp%q숴 J5)D-)sG/4_lwgqv: oe8xy&Y+#@YF7$l]a;ax'.8H?I}*EyPO)|ʷ={H1Tʷ,ڤQVPe*6ȫfZNz~LW9AR^3S=x;;-ghSBhJ}]7nl]B0niӫ#b01 ,߄B?E9]K}W7[Y5- خ۾{o`dZ!{iK VG_ p J^tqlVGگl) "f>k7^g/hP477R@B]M\ahX z^O/hVR|R{Q $ lO&ף{܁,~WA,䜨pmݵM7>Kjg=" G\7ߛ #3tiCpm(s]c#8Fb:@6!FhhRiɿIB_Vi,c8t$I֚ .w,YEbH#5>Cq3pl#l> n)á>Ð1i > vA[GNTIXW*uHYSs *l-o##,5|< ^+7kU#\CڱOu'?y[R\c7uC.""㝒 >ۑ⻁GGSk>ׁd影BXmJSeuCg5yDPNĖ_O'FVh t<Kb;b獇r7O5p{xHg:fՄR1L v%=zg C.GAY{m~6UyXl0*K[a~S{ {'1{[ =fpx!!9}=9uO7[> stream x][uv^`# c<'tݫlA(yX:ȐKcXC̯9KA1ٺw.Uvѵlߗ۫n+F.?/~i]+߅-8w-|aiP]evI]/Ikfy#ok,Z'Yj# \sXaryYfueMaߒ۶h-iVW-lBt( jOW7Z׻qD#hz o;hn]C'-}J'Mlzl`wyq~h;}o&kJN)D3 ~=5oDJGΰIkfarXjN*||y& kLai6z皭~7zXA:iթ\k=IFr $4I7/ht|q8-ZNkϚE5Ï]@?l|>;X}A"]YFuYg*ș4խpnOg|XZOt\JBpEN:-؉ZiLA|w@,?~]w"e5%@*h@<&U(z Dq|+)a^+Q2^?̋}B9:f޼8$P2ǧDX{;q=Kv]{J8uB̗Qzd.һ֐@K"uv ['*s!$Jެ^`` xg6u6RpLq {&r]}Cƴ.dd7v@sHp~n"Q[ƃE ϔ"UN9zt<0RP)Щvw: }v*FnoB/Qp"BJ:v@X0 $Sb I<&kovKw/k8Jr#bmXYWy]C؊:aRU:9TVD`*7cApf 35# AX* @YR}Q uPmv90l=,sdkYoI["19SȤ#V]>|Lr ZIz['L{j'2=n<b 9 Z}ZQ5,`` mX0*y,h$rS}Xlv\]IW` j l{3ӄM NsUE`OP(߭tXEW7'~F hig ?ɍ̱Y͒D>Ky^R:i2DJ"5uTEo"z~@@Q‰ l#UITEK+p0Pe &BiZ 7>u8{ Y!_'}r xiD2=saccc43#shUCz cDƼ& ^6K_I 'D(3) }Vut ].*%zy8[fg<~vvJT]g+Z3*iRQ@M`nf7 {D4>GJ?r(e]@ŸR ")at|+$vt:A- K/v/ߘ<Cyi0359iKRA +FZdޮ7&UӞ%7/ϛ/ Nx` wVJ昤0=KKٝY$ /I,:E˭9 ՠ

5;ZҸ 7g/먐PѰ.YL?䡂]\0!@VZ֣̠p !Α*0$XWY^*]_pq|Oi>@s94 }EJcrpy@|~wh*SܭLERN_!cAgZ% coSt7m2͚2p<$?$TRwXeAڂsDcCLnc3;`L 0KZflO YV`49F%, 6ؤB7єl-jV(Th2%+0"]CKi2$jqBX%$>VZL+hÌ2ttxZ%&44V\8#\-Җ* |#?|Lv<+v#Y+:$&_E % &je9[ezݶyBTX2Tk"h{ګGA7pf> H(|*TuO׵VGe$VwS06Od3A*IE}Zmy'ΘֶgN{w444Y-j~TjqBi_u et MU5|Ȼ X5 \9=X b.O.֨A W ] T%M*3(`Y8oi2E&R M]Iv`τ;KoI~Zp0O@~eeDtUZQzC}(IGh)sD0-2YcRMGh^X@ò蛎IL4KMoRao~kb?_K A%g`#/%o8JM4i$? -OWF+HCl;WKԃDZT:B.auih+)p;qr<ԍxv |Š1e,^WF G Iן(@e&cߡGx>/١^X"_BUC4/)N: | -4t 6TRR+MSƧeeQsNTFrJ o~~P( aVQT9̈́KZb錣]ԧ?gFQІ3Rn]a_ e#wfű)C҇=1xYYcXq%eZKŦ̂S_Y8WzU\䡱`م EaQ <YetzCy"y>1ioqMa] i-ƝL: UyRFe]F:8WKP \=êNsTG֑z/%"w42 38'6X69 HʄQuAlơ+TTGPWdϗUoԈi}C[|\u=E${\%`-IM~e8{"xy/`1ǝ?All]YQs x%1nCxc&M rrU|7`Jk2=ziZc6?6p2)maN+%'(!"|~7xMݪo0f)Y_`@i2AxJ>4zE xK++{Cqg~7ũIkjBa6PT6Ĉid9-Qn9.X@<<fDŽzo,0PNԿ\qw͏ź/f&oY;)ُR1tq9ӞGh8?8!ImIQ!.v]UFM.*yU6+;6SSy:c?E$yНt@b.m5q+p*d0N.U{  u'bK^֮$:/5XI^Hgh$edTӯuu+t!|c>{N}kp#EL}YUPʋ3Βl520;V]+nW"*IKu$:ҟ'9\Vs{}8f*yXds R7O$ɭ/|3=(in#GsIL&,)ۇ(y30!T$u&X8 Jf*Z 3ANoTk9<*f_-o1^"\L/ĖC yfI[irn[qpЃ0NE!aq. t˟|hF:|:X&'gsbkx޷<+$+g{8܁+b1qjP;Fs1ցZK9S-!15[ㄧÔ4ph O`Y51)҃ygNf68S%ǖ/n =,ȟ6&?ǥe¿#J.fsF)&Zm*\kxLkN!M 6i?wqsNqϪa*&W0K[GYk+o  Nx4e5H q1aKo-i؏lokV |Gm"_e^ʴ7BN:mӍEsN=IajϬTR3r*)ȂӃ^#@p@ 11rAT#UNab$-+ئI࿭ڥ_?Wm=o93@v(KO=> stream x֥)>͕R-cy5eܘWZ|[Rmz_hF>o6U3'Z ·wO܏q/ܸk[̹6|Υn1aOQfZÚV5o~ Cl6zxw':ds$$W/\Zܼ `2Uk8G‘yJ缵^J[FNkgw͟žE<$4޴`2އW_!53v=,>qf46;Z/EAkK<V2l&).ox`)hr\E>dVrY~Hx lM!uq8+L Kanj,XPV/"-T;cR]Y0iaqx9y|(s k (`@HB r IsG?[ִʩe˽VGُ[WB oŸE᱕K[Ε` DZ0;eڋH,~r($k:$c!dt`eiFa0 y4l4gd?"FG~^/kZq"dmQh?o֋74/h@z?.}KGh#a~5 s1fCi!z+ׂT|EG$S(ʘRdzʎ1[~Lu4)6љ(Gd#(AQcP(a~ hXXB0gGb[-^O&%@&Q[ĉx6쪵R߁\7'GD*QdIL4rB<9.F8J'#Jgc|zݕfs-ty괭&R%Ejý><:GGzIDKAN%N?%~ c1l8~A|9ex\yaCsex5>3\g0PBkc8ѯ,iٜ@ (_d>H|0%Y`awc[:t-=O!nTLIy\O#{ .8U#}vqqd~}I @}%(GZR^Bm(nAښAJhΘބB#@/Gmalvj߳Ď݂Ϗ34R ` ipGZ ('82Qېj(s#OLm}:gȱ(2k~( ϤDRL!hmGiax\, r;ױ0i?7ddyo EqGui(8_z |OSiq$Pl'%RSI=-m > FldG ZJ\'3au!uYvOe- uo»[DmvcmZQlTf}i! ae׼#[] o1r.}\~C 9 v*Աt4Ee!,;_&p>Jǫ_5€3":LgbɤR-N\NWev:nxhR#1)!\qʡW(riC-PbJt 6?CmcGp8epcDre !H˞g wobe$tDğوVUQ,-aRV ^O%L+:afGͮ쮒a^KX)>6̜DD c$h ct=Kj'Q6׋p4foC_@`jJXp+C.Pr͵)&Y[ )}bÿl2 mIsoF֭  d_v\Q"x EI5i\{]' 8al\WFڲTd@&wofR)' y`,r~8!OƐG|mSd,]NcO$sQѭj⠯bȲIs/|E xX7 N H.G"o1uh5kJanXQՎȌMlqEV&*ykgr6\Fy70Ҽ!J+@Mod$><+J̈+ 򩚤)vdNuXД69)`Xtjd Q'07=oΚ6QγTI(H HfswT3e&`TeDk(͋8YDP4ˋu/dIS^h9ӳ%{yi"u)f&o=V<_R~_m T 4s{9YQ?ED=1xZxÀ.DWk_q"mOO5 <=Ui.~MMk$X Oel.2sFz ~E/k0щ3 {XXd2bO}(bR0te} 2vnvg|ͯc: 9uN*s>a:{T>҃P(gyU0X03"X,N\\2MH{)Rs;s2t.^?9oU9D_oν1Dpnh[!Ӟ=&V|3"VKoݮԈAՕm?t"Ul5/ØԨ 0 e+`fd"QQXzΖ8{n Ψ_Q`9^03ExV<"֝wQ3S8އ2pkJ+dQsFaʵ4̈p0UZTüZu?Eq MtT/zVv%2XO˪r4t*gQ`1"IB<C1@Ed eN/ɬG h z%-ݦdxRqyN*7"^,n v?eP('#^vO;6~s*T_ɪUbEpl(>eIlN{xPt, pR}27^tmemY^f*?p8{I;^'*1rc USjb)mo~))H'QYE++$xqhIHHNJ+3;؛hb&M7 85t}x}^Q^.'A^$ASd|Lktsb2ы< MK(0J("!16Ԑ2J JZpiPʔhfw@8 雾"vI83$p#qx-gbAă2fqm]&B7x TB(f(<҅*Q󖇞 ʨ woeW(橁Gs#vFsb.4OmKSk;y"˨%!v@r)<EϖmweJںji\o4E7Y-.HD^pMeDԡ9!M74bd}5A| n1s4C |uA0E1;[4!;g[8Pzjq''"yNh=_^t$d pnV)nd>(y"iH;DrM zV bIˈ>@2 v 9d@g-Q " >Qd6=y}<|,j׫Pendstream endobj 391 0 obj << /Filter /FlateDecode /Length 6549 >> stream x]ݏ$q7ීS ,Xi~AAX 4#͌SU$Yl=3b,Vꃼ^\ pu'J^^ M y_WR^jy2W׷Oudw}٭J>xg-6ʉ!ti{žxޭ>\t6H}Ib`7 BW{Bqu+6uG m_L=<•bXE3Tw!N1a:NA6OsFK:5Ʉ:Llwh%lsWka-h+On(4́uyz Gj1>ϧ 80Ҝ.[s7 _cK>>u5@9fͫ^T:J:[9p] n@ML?x'8 t_QZ |ZH#6vA.@HyU 8ݰjTk9 SŖD% >LbINisN'S7.6r\~vu_ M*Z $ }|vDaFs3 TӦSdՀ[GrL6#~20)sT-nZuC${u<[.AnWxB_8fqf/|lp1ۅ=ͺ8z3@ARCXY0=#`"ɇ9{=xyUv";"qQ= oJ ;0nsM?Eo:,"xhv+vqHCĮ0U\mqq V=%C:aL`Zy-,Pk= Z d?3t?Ԧַ%˖sA_kZ)xO']% 5Eޣc3c?W;z4 rXG3(iB_M;ACHY5GKTĢ)i9Tq[S )-3jch)Ba[WX^ttou}W`)E)֣OҺ%Gcg ).kMכ> nPB<#=yx'shKTCH̷jm*K!( WwSD xLq{})H _FnK-?47xz[ 79 D ܶ)͗TrA]n{h*d7m:qcc-Qd0bb4l~1inD([RTjaG-7 4`EKCMT&x;mCmm!86fHI6t2 J,:T11d~3 ƸآMEf{&>8l#PB¦zf/pіؔha:ҜqGқ9'Q!ө.p4|x^/KxD;pJXZþʒoAݡ{K]zm#.71nJa)*X"T,h!_JT`L3Sq%y@#4Ūy>:#xy?yַĚ?a]0,!!EgygKMcL{8tJk} 3%v5is,~W!l4*S\h1E^y(R& pƫOŭ*1|P#Th]z߰uDY2=e1(Z/>FxdGjI6iQ.W|jc0pkZ76pS3H6[KC:0w"g<&dѦ̛^V3<(Q̊cyG28#dm ޟ7ԓΥe(rVXS+ J1ܡ『NO 12; ZU,[aszYRŹvpyAdf^Xvh!M/ARZ`(yt+*VdaAuI{*`쨦smɀ:Ww.Zk ΃E|L x5C,9[+)( "qa hxe]NMk:*#(d,ϟl$Qf6-3.瘺2C h@3-3AN|73aL n"bO9Xx"Sj)Uy㪑d!AM~"il}.tmFxiڟ1lN*R͕=Q2#bУC x}8)fСPuAOo]8y_H:YV-OLKji6IªxXHiƧQa)Ǹ.+"K`QÙQ)Ÿe7jctpuE#PP1lvh6^{hЏ9jzozpUmQa:,Yf{nwٻ@ʶ;U`kJם/C T7CY aJ?/s "O&\xvtYpeg:<h)d,KDQ09%'5O1zq)G+O%k8*vÔ߲ٝ5rǥYEBirMFG X: Y7 VTcxg-d0](S@"*X"4{3JCSDPn-MɴZiʾV*GRX[/"dmR0`S@m>x>#gR'x]Qʔl 66aލO\d.f]bCQ E0maU 9*]C@C|U M2sd.iim:V ! okfᇺ:m"#qg=mb1Gjh2ocA&wF25{b NJRM,C"πϭPzqR͓ViPnyԑLTKqRں*,ȪrQZWH{9'n)32;pΝFFO=;9 >Dc|{ȩܹ$ .?Bot#ML;<ǃRG!);'VI^|tNU-7&WOhnUkBCZw6c"$~LTjߎ9'$0#onx~^1ۺn|-}[* q: 16S\~ψoţ\炭ƻ2ƚׅd?]Be EaH쑮7 [gj\HN!3jOsSf.4aPV <rԞbxpQg>?=yT&-^堯7SlWٽwyܥǎ;ï|Ͽa:^8F@25޼.VC<[v6>%L>C& 0DW%t| ,8=\_b]*MC6U6elbWQ9<3^vO?i1\9;Q6Sϣf%1F3Cr$N-Z._BﴱS+_rL3%:G. tS_=.qTWQ)kәzEcư'I[F|!zϛ3 E{ʐc7jI̮]^^#ImYY #{kzH,s,Z Dh'otso/ p>9wkVuankO? Q~ˊO;߯燜lY٫:~e5T /IЫ14=8/ψ,˘ukU>$ݩteixeЛd|4@rGBZ bȐ'i5/>!udPv!ub6jޖz6mǹm&x$+A?}@V|. 0?\8p m]Q]"CyKujYD|SU^o̷3Y3iㆎ9p&jlכЦɩM_`[nxt8 ,:σG-9S2U iܥ0*lendstream endobj 392 0 obj << /Filter /FlateDecode /Length 6220 >> stream x]K7rv1'1jrǮ!DAPTlI>83T#Qh: G"_&.V\v_Ew A^=[_~z}~i/.'RʾZ^:^oW]kfX]-mUyϪhNm]r_>Zo^x}׳Eݕm ռϳo-E^xXE_]xdL_6%BB͏q^`ͳJHt:G2xx<,g{j{7[X&k?>\yh۫EkӷzK>ΨWlF0FIlT:o>v<ty7nHc!rÀ5Xbkh:5lmqཅ?Xu1؋wT}jWi+ ]g&$)~j蚇>z0%\ބxc>V|^QUQajƸf5LG$7%Z|q}"s;\d҂ve(:5*hDwo{:#O6X]EOAs%ʊehDe]¶swv&yV' 2Y6]9l1?, &6 en>Q/Jf;56}TؓɂQ# RHBQ!y1F׸*-0 C5<a( Pi @GqG@K'7\Q2W4wa3r'k)ߩ]=7+7]ùltQh?uʅqvg* Fv5Ԭ/ٖb R* v(sߏ2m@ 4x!yYqy*Ylq~ *ܓF)1>-v=|SҮv{Y[@&j4.cW׾Ci:nJJwۇ!a}90d<-җ9H+OQ{!ҭ2uFy#@%$Ew+ |i^ wbXk n[b_AV+ak\v w\-~i*ck<[XĐAC#3b耛NG|=N<( 5 }¡p»NGݓg,0A3}\skeߏAdi֯GXI@Zki$;:} ӄ劊đs;E! 3NN)VCdCd0.Mr/dŁr -=Tm((oԘژ356 YTq[]AS`CK\+߅ RQJ8?,Df>{H߂vQ$fhFs;'W"tFB=9'AK ch*ٔ  529V;jeL)1r_6B|~eZKpaa2M8QO2f@]༕ @@Vu"ZѬ_ufFyQVr¢ l'|VbDt7s|  Ʀ_&ęr]l͘-?XgX6_Mbp;S eđj?RL&G0_%G&Y iqtyN@+RAX2F٩gSZjn+ugzL33|p9f ,'W aCq DɭS,Y$|~Z"BBYٜT( apʳT'2vf+JG|0g:+hղPEþ#:RI1X# ]@$f;%D~]-4n0uR#&bL AJ c[bkC_vN!N!yŲ݌1ֻƐPe؎ԕNѻ$꡻](ZBowAU"S}6$: rH9DotcX) KQX9_y8QKy΁E-2lj+VqtU &E/FvG.V GE06EC<BP"轛 ;M1f{|ځ ] 8}5P+ߧ@%B_X,9MM7}VUnHe]ZÔh,9Ġٽ",,Eo0ʹb mC4̃c 3j:)гK#$csiRW!㴌b{p XrHj^. 2}GMMYp2K)ډo>NFv߶WEdx"%ul4M@DXMu^7]}jC,r*yj#y?+@i20K|kFg&L$]j VKk}{wgh [wBT[xvXE2WO,]*gL=)lދ`Y%xp-9B|Y[,<:ۍr}WXո RF,ƯE(JMHECCr4'UbgvCjO~ :"WqC(pS=r70nN6Q:ٺ6dh[+%&Z|dm[tfYU3\=`VzdL/gX[Ve#dgrr1rGo ΰlX&9"+X58qa}U,t'kG]{)_ߏRUaIҩ[te"Cߌg7Lqņ GFwzhf*JSo6LVc?{bEK%}{G˗IEYә$\.7YҽpܛR%wj+jB貪±~+?LHE}yU/XF0>5PQT@T #vCGY78g hU1\V(Յ>ؽhbY1#shTiʛ,z.͸ddMu Pq=YB8=SuOl5'>),Ya%HterWYQN1*4/N]7R K-!7}4Wcðp|ܼVLgXDD+RfUK.NpǷ!O=jfjp4A ϧ9_>BON܍Md:+XQNLă4FIS ыfPdOTLP**J^D@- sXWu2kjB1(9..2c(`UBu2]r#ݫpOV\m%2fbA u/(] OIW$ eҒԑB:B=XyUbq!([:l\\E"?x;r'^oKG1+1f4}m]FdCrcn[ja2UBzJ'UуaNCsX>x-m?zs( "̽S*\=SVG}j[(X^bI}2w:h-r:Ë %9&s>Sٲ_]4 .x$zf)HOg%tOڵ:yEkCϷWtۃS={*f]qiL⋌X1wa[ !SQ_O>{-gSYwzފdG;R($!:pRn-`IC`)ouAt16Es]iNH%ȡ2dPaOoy׊-[%LBV[Mꔤ[NObىn^.{1{fP̊BtP tPoO&LQh We | Geu 4f wt&L,T1.LX#-Ö߭\cѓ=*Qendstream endobj 393 0 obj << /Filter /FlateDecode /Length 6868 >> stream x]IǕ4w;}Sa]3=DВlrkaAΡnaD H%*3 f3bbB\_{K~Zq7'͓"f{O~K .^}]ąθ6(sr?uEciNyɫk;B_Y]w/M8!BV/[]jۮSͳtm'm~=q C+z{8|Χ߯E+/hyzwMKIKae+TtEi6ݦz]go߹5\ʺS5us\(@Mř;ǃL^Qc)fU>@n,,fE]9hg%DM`J iv%7f*MOybc.}+Pf (3/eȷTM~;G/jYY֮n ͟ X( m'~~{ݗ0po$UAU8r P-D6GtDnUz$%@JN"mۜ7loH4p۱f)pSʉ\ XqrpLJmWO*/(Gܜef3XPf4Y_zONo3dZPܩ)¡뉘[cqh .Om[y.z(ORg (Ϸ<:A=P Mlf}\+X/Mm af63f(\gY2!_7?Y%y+f4OnN H iOWcX ?TC1nD"^~1/H#Ci6Pk ƌ?C~6}H 'Q;,~uuM.~j-ae\>W/Z ~,O]"wk'"P`zdR4LX5?T} Ѓ$7I:OIi|lyDe䜻迁*%& J+20IBӀ䅱uЄhOHЈ5AiN;蚥c`ʑVxPT)ڦstӘcɲD~ke叧LUlVoo}7D#UeOK }Us1D `1\Qcl+B ?8E':*LnP}|UdpZk૛ ;#H }A˛IPblv\9xJ /Yp>''q)#[+D1.8#\7<3Nx0,p-/h ~&.XY 8H;J«0E.N܎$<#iSH Ie ^Vk!-] 1Y mU4Evx,~Qt)~7&-96Q▣@[3{ӟn0i4s`*$%BO/9eWB99i 6\`HCM) :4h+4 VB#-,o jwn+7H-gCt"I}IinKP!WTgl }rq#i5G9K.soVCX \nY":-ϛez]"*TJa0#$3+69+fjOaR1[y6n~!ѴYp@(Y9ʒ |MHᮂZJ׷ +f)Ց832-D8LRq^?~36EY qvJ+}(@Fp0鰾),6}Yf%.4=ڜ2l` siج.|y,2{?W%w&>ä8 JAUjۿ_QJwFp!ge :jM#ODRrZ41',⻸&+~@hQ{' S鲬'z&g=qDc^JE%_/=Y$ {S٥/ڦ< vcѪ VQیIBcSy19"s8"b`X _FIk0q]w&kJQl݌l}"kd t| b+M! 2KѫXc0W8 \R g) VЩZ9͑i^C`J[2v$m'h87;4('yl VضjEl;(A](,gnQT`;ECV8ˈqqEbxl]u >jNcP8H*[c³h*Z:8E#1 8* upPĤ~eh#.$15ɦɠaΎXx2ҝ(5F Z1XOA+MCSVmPc\h70唁NZNUIg[9uC}q;MvNqAbKrb sYkS;1-V\p%$6-rzL;BDEqtC<7@>1 z$;v:3~>yYӰ~)9f:r2'<'TU[,s_Q}. <2 1v9\G%KFYZCl}ޏZw6b6l k鎡*/c."8n|QxJ|0voƅO(/|f*,΄5x 7F1nWT3, ˺.EVu~>+YHp'pJ-JDn :aAr ڜ3UqW;ՓXK~ywYflʰȁR -e|Wd'JceILa/cMr k|"xp\SaT&`m<}!F\w̿Ov`qU@~ YެH"WkyqߗA"aqY%s>Rú6~9Fp\XFEd$S鍗S-]VY* Qb,mO,Iњ埱qZW5; ]{)4]83pyҞ܉4L\QLw(bV?N8Vc\fQ|Nٟ$S4p }Û[ÚR.%6#GP>Vz8W@辣s1)<>܏ R3}y`q"uuFa*犲DP6SOGf-*`hg\0 gY!"?[{['*Lˣ!csW)1WD9HH %YeU4*v%c"tA>z5sw@t]܇>0 TL:O*kBnucm-O&>5<3|b_Vcr!(!X߬']Y+0dblbI݁q_uovZG)|V%w:҆V{Ueg=*p_k6'ܦצ _¬k N}IwJj‹BTvj ́+gc*4_:Ȩ{dm;Zv! CROg}*ii~i T׋Vv|wY^N7={מf/מxUWiT?/zdblWWn3[i3,3ݐkTԤ&˂̬؛Bᙡ(7\ueQf)1Kv5S$)CRJK\Njf@X`tml/@ghZ8_յʛ U+(jdlů@(H -kE0I\@)6{}\}Y:?s+07B0>a&.^ݦ &-Y% 8fono .z&eƄ?+0(~ͩ Uuh<[L7FWt ejw,E&?^^hp0j(XlAnf%!g( ҧ_ù-X^)}iQ=|ۃt4Mzheʖ~'=gFzC-gr3(ʧwX`;Ҽ]jbzաهԵ^!v,y( Bբp#Ռtua!a$5@k×q&/uoSl"a\szm>`\aeP,NQR.Ղ{ρqqw䀪 ۽ҍ>jS b<{^}hiGO]8'/a=`f$t78P :չA&'#Ɠɘd޲hC~ kͤf~\p( J$o2}rTS)[_+P3mM* ;)&S I?faa h}yW3k@[ ѽY !͉lƢYUЈjs6{IX)/4K"\E0̙ GH1R9oQl5ՖQcF7w2endstream endobj 394 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 331 >> stream xcd`ab`dd M3 JM/I, jf!C?Y{xyyX|(=L{0fF|ʢ#c]] iTध_^竧_TSHJHISOSIP v Vp Cw}  ʾ9?_]Z]+l3vy7~_g-.,\=S7j.^2Sg.^*WS'/`u[%${uOoO_O爛yx6n?w*/=endstream endobj 395 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 468 >> stream xcd`ab`ddM,,IL6 JM/I,ɨf!Cל=<<<,~ }O=N1<9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5:E15|Y\04!竛̿|WYYV*WQ[;qIs,|ww}ڇxyɵ^ X]mvu^jwjwaIevabCXwGÔ){&͞ 759fTW7&˗۷GvJi;wYfXtι)kذArm+vs(nLnΖOmV֔B8֪*>.PlӦ}/XkKH>g7ޞޞYsL0kQOOϔ=}SxxTVendstream endobj 396 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 533 >> stream xcd`ab`dd N+64 JM/I, f!Cß^<<,4 }G1<=9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5300810(1133}pQLV1,(p[9ߘ 1X}; ga]9M H,8 =lyrM `} g_>aOo!nߩߧwئwO̟l@ͭWANxÌ~u_Qo}ZyRwVߍؾZcwo.*g|Waq6E浟^3 1T%j[$ߡM]l͛35-#DBR.0{ Npb <zh*ϑ}} >Sqendstream endobj 397 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2886 >> stream xypWkcUb@ S̑@BҤ# !CKua7C!C! :29fI:6dͤhF+ͷy>2v WY4-?PRn[4{HIm(D!"P}(~B($gmfJ׬ZĊs4\1Z=PLQ+M+̐natP%eI3S{'H_۳+ayO˺ _m).-ɈMH/xfbbXJ${$"Hl&-JEb;$!bDN%#ZRacK!BBO%O~>>_"/Y7?" \B0E쩅}GC2I*TI_[>qT~tAm֙AEj~ `txեct w5QRײJ Jl} -41b>W硑#f]GBNzxƞwQ$AKpKLߜRdZ:ѬKsGv;{?֬\~"ƼA^h( \bLLu"x~9|zl`5oۢ YyMA^VoY[6ID)P^ȧEu 8^y"IՂGрTkD[VG*j*Y vBEiLh~԰6GIi.C,[gTes<A-_ȕq ^rR3˒(핃ER~F~Xf0Jj5->wkz5X@bn訪4*+7[T$~U% (DߠG.-fFmj3ohh,SQ{0ZZXUۨ,֖U ~u:ѽ?mG .~}-S\_quLȍ}q\O+sd@[g^5kqUt&D hՁ/|i^mpɱ}r ?`G c()T@Jəmý'G>ǡ-Ph^+%-6RSrsbebpi(_5 *éKzmr8ťlKޯ89E'mkj32sE l~tot=o,ǭ dl/ o&FϝcM-m ycƳ2?r;tNlꨤMd)@^kLW:8 eJ(!; .P鴡nxGOi"{24 idЯU& 4pט3[[;kh= z01z`Mu?SH siuX -gڲ~4V~ek -䎅S[$Xvg=7{2$n`>@30 nT&dF9fv~ S3Ȉ4z{Flņ\l1mTYkr˪,h5Fg6RxmUoq/B\[g!E! wp ȷY%2Q|Tbͬ$}?45Dq(c;[ akFsQM'Cap4m =+ ?۴z_stӃ Ѭc%c4t&r3Gɱ=a,D;;PisR]cjl9C_A\GpfHνX*o.u@(ՇrY@Wՠ>.k/ZnH\[U ګ[G;&WhJr>íC{D^nn"ɯbBp`()12ؙHsig,uԁw^r0œ|D%}0n/m1/:QՒU=ǿu[u ufF*0$Z7enj?}4ٹ(=@GN[F-cBQ.!?eƊgP‡Ղ ͲmN,Zщ/Vo63=iցff\Z%Ls7jD*5EBLōq#Q:=@buzam̴z;8,b1VX z2&m>U\Ա`XFkRC$?Tjkx#@#310)$+ZcZuzzmt{w2y1{)/nؗ4?^+M6u ;EvSR/͸FvVkHKG:KW>Z[lԂS0rgzɱ+Jwc-W dz㧞oT&UHJ}Ne\ݤJ h$$xbܻ|[O4izx s+UzMgC?rsT1 6|@u0:X98+kV笟p|Bϣo@IAV^=!eeV_6:qb(ETF%vQ |l(9enp}l3VeY[\9sendstream endobj 398 0 obj << /Filter /FlateDecode /Length 4145 >> stream x\Ko$r Gб'i7ߤX;9؈a{- UZnc̮٩*d?fZZNc;!b=?VϫӪd _\TW'N== ]\~zv3I*NϾ;S)䒟eJ'UUVI]IkzZ KgE?IV|Kca+훋Yfu2eCD2omYULw+n}Pd)( ]~8YKZE +[ ;6HWr`غ-|`=?8Rp+I\a?Nv$¹b==/s͋m*L<(՚tq, I [7oJ_[鶳+q(0y<G(}<e\Tq-^5@f;>ZRIO~rfOV18~$<3웕R u"$*\ђJ,.Eq8Fb v{Z]lwqadn@ŞӷXs%+ԛmd2YԻȥ(R+n,y }L TxΆ}ɔJfnx\*}6]ysڦro# @N[/( k^$FK/-چNga@E6ђ(eLW]Rs ל;Cn赆uE_>=/@2<UJ%E'5 +I(S0*oIF竨XE(S$톙I/ue:ƣj2T0^8N=e86QLkQkQI&l[MRfNLQWmT5  ŴFu:~JUzM/26ʢslF MCTҞA;yG2E5{±hi|ۨ6)?mT۰mc%jyQmq`$jr$j#QCN#fTwXZȑj Q1G7ʞRqLiYs ^?$+aNGG. j-+MZ<]zJb `qz ,,>H}6ҏr jiK|tq`˙0<-me;ߥ'EW% A~5i Ǘf %">YU )b,v) SWrp.9Kx_ʝe#XwEn9x1hp$$|guu!mQ Q3RH,}U<d#N,7DT8DFrnl2vɂqh&<6M㑄,]E>pfh~3{1g'?Ĭc]n$AdCjrI@'YH#vvG/< zVmè{]h"10Z!>+j:-TV- 5K+Ńm@IX*aNP`2m)Am+ K.A7>"8.D^t;SXT>-A01.5 K>!dzpq]2ܼRyFCXfqvo $s;|6]3a>("2.rfU|6Ex?plGN"Wl5SiZzlSpl&l挮c89ۣRUh8 !]P=/泗șOeس"O*F%[W0ɤS|5Mm~2_OJħtaWYY3Cw;x#6vEQ1%̤"3 0t?m2;)RLiYc=?XH2ňb<`1X צ4;?f1':P {pi0H_tVA^xBaҭffϪN=^im枈cҼ7S7sjӣk]撃Fbep;9\Bb)z HۃD0!'WQ[I bPWJ蘑[r%C)5]4iQ,*AxxU.%_wA޲!C*|(S׉;Xhc;0Ą~IXW¼##H4Q <\.kZlĽC(ذ;Š RlҾmdq<̴#;Ǥj[p}VFnW3& 6HeE -}|ʖ4~{ctqGQwD`41^~METnSOA]d9zBLPMT ^rf S^ZCc#x#fw3-eul/,OFc<ŒZiOwqGWjXB>6.^O#-oE܎yHUBOQ{1l?G8JjQmPf|pH`gZA&Hc':Nsp+tH J0ra/>,'t_ٮsIQimXG&éRM{"Z_!&X|52n =rݖ3'DWV8J0p˭X0OaKyKɇ=Ls~$Rq_zS$KW 7_)c]RfBbg5KezJ) Ha?!نͲp[G9(.z$e(¢M}w2LX7Y2ZsM7ÌS0L.ZE84< ֢P8 6dl']4 эȆ_YYG%yFy};Sc- +IﰀY\! XL.Y`$ˮ#d$4d1uq͐lUT€ k-k{Piqnދ &.g`ON̡9=49Vu}d 5|ŢX7". MI#Ț|n5@?+ qUH fhm֣Od-{h!|pJȺ?^`R>f~&̐h.45DOX,Y%r?#7K8'/toCn{{ A ڿ5]21rhaF,NTM0<̑NW R!:=¼+bHWuAx,ƽq_>d%qG@FvNc|pU4ztٚˋQvg%> stream x[Ks7"%p%SIYN\)B.e1>=3I*؝Wtukfwgqzmvo/ξ~m{Ӽͦ6{\3t[~?d˸i>d{Bcw!^BM:fx9nbg&&Ep0@S((nױM*⡘* S 5C0 pL!'ɼq[e1zXڭjmXvP.W([: aR\+>wCNΐ-$efJ [i7uQE§\]LmHG}jўxMh飇N t,~c-mSr ţma{Y' 0>e&Y +! ]:&XR4LP*ar~ c2 _S8HaN5h2h\Թj\z|oO K ӔTd:Cx)pѢmR(媠f>K4pP93oz_nH?\yQo ʇ}F_gW 90[Ol%M="Hr8: ]ܚtU&7%- g>Gg6~NJ/g|6EbZJv9ٮX@> O'G<_~΂!РQ[s^uw4tTypǝh/.h_ͫ[ؐ ~e2g&6rke8E'nWC@sų ?T0IDCT/c*ysw!6kQDg%ot=>s{?O;J !w(i®rWh2 49ZKS'$zQc0)qCh ZO4 ԣ$AEj5A ʟ~Z2'z#c<9:Fi~[+j*r*x[HWS<%䖻 4RqK eCmzqYȮtVڠU͂}bD̪*t~I,zyE^QSNIEwIj%N),A*i _+3-nԢ?]Q~}) ՂKsOE C?Ao 鳳'/~ ܤs|6N㪴ײLGDrc wN^ԾGgW`E {8ʊ eM4O|9 8H˂9cAHf%3t: .ovO :2U|:(d R +@Ү9%s4 פ\ j)D,{DV%4 AkռPAн>: 0ʓ n\714C:$KG\(4Bul,̻L%c\B$yhGcCfHC2 :Xmù\/x,?Vi9&BrY̵^"[|/nYQh͎~;B7c/w JC=O_?%j:"&I D, ˪vP5$)~4h5Q8$]-TB#[Z56ԫ6VU!Ćt,h@;Jyge'%9pm1Y!<!x`f]昩K!}gh|%_P²T82zޤu\ì9 nZ!#'G9y˕J>a{mS@c(%(eP/w|lZ\aeQE&h0E*YIg_NMN%I0$D/ZhU_\[fXK2"|cŚOZ"畻{ ,Q)HMy*PhXsW($}S@#p~SQ~x6To:( BI:R_<>oO. iYȥb.!:)1J̹W8J[լ/.A8aD65WrTBj,Tk^9=Tkʤh:ܦ )MLq,5f֘ͣ9Q),R"b#bG IL懙@W_Q,Lpeٌ/_5To(j[7!B4MY鋢i#Gsܐd`lZGx4Y06IיDqC`>˜4CX,q܆XVӛZfa:<ɜ6g‘Pa?(P" P"CfZ ԙgnZdF? f! BКzZy0 tidv:YH90,%o(ݗGW=<5+#!FpBAСNAB@5(GͭP+(Uv(TS9$%W38 N qV#b&w9HgGwx`pVOCABY\u% uyc:TD5R@/~uRgӲf҆) TkBL*q9s{Z{ 4\Ӝ@wޅ&x# ƪw`)|YPm,! ^Q7f(Q|d4O'<$f&38ǜǣJhc+x}SoNw`,L 'XXE&O& W9/1K}YݸfeU賍G<݋wP!v"&@w9݌%hO9=kW34מ/~8vZĥUN/Vx_=&a夗76\HJFJwxbq hћ״:{*JgrNE&.N%[,0ht^Q͚VL;]2/شr s ,⥤xoB.LJ§wjĜO+%>)٦C;PUlͿ&eBlRhdq CRX8‘9DZklAy$9'LI ɻyCayY.ΌEq]ߥ2+ŇI01D / `pK|#ßM8k:q*Mdzqrendstream endobj 400 0 obj << /Filter /FlateDecode /Length 4518 >> stream x\KHrw#xX!¨w8|X;fbacwzcPYR_?ۙ* С YUY_~~454/;4_nv6~{/ ]&ta6F =Tnoڐ\ՍmmF WLJ-Vp.B+nwRʺiD-7uô>gC{`h!|Yv`xc!ؑ!va~ky휗8oRJ}NOq-tvL; U[Òn?rJz9Vi#ЊW~[ǭɇˇ9uǬ`7f0m*V+)]l&4ixhds~u4WkgDҪ6_yi8iYpkiV35jo)'' hp0V3hBrQY?ĝ0fp"C^S-T;Hs*awz|aEVFܨpIp[%ŀe -F*\"Mw1|O`Duav 4a7~ap6&kYow T@(0" y."\?y7e _7Y mfV\d/ MMmdLk?^.'Ճ0fB$XtM$Ǫ`%n?!d7iTi<< y!|@ρzJx5;Hhr;*;`a .;Tnꀕ +Fh⛶絛2 F1*XX#.Et5p h?mQJF &"v =j1}m}&qrIN6#QLs-`W,uh\ &,%Rv{[H%]Y^HBIMoȍ%MNBtꬓ>u8QIQx-6+L-^$-B3ҨQ'-i@}:\U "ljA3kO#y+'׃"g8w7 $9^@bA2bHu4{x|̬Q?p=RhV0GJNͯGAŰGpb,"!- 5q}:>wwZ ՗\vsMbBG.`*H9ME¼> IqoTeAHY$?D;6h0>VUHimn-^Oh^P6)>>ʐϱ/u :5'{L0m2YYku2Z()WARR=*p}4,zυNXCPL\L#FQ@!1O ̈Kt[Cc9~Lj6Sym!{c6RFG`ܖ҄P |_ jD&Y.ݢt pC1yFW0_U]juD%h-+7k]^Om:q1c PzaL#p<΀$_yE&|)({0{!=nkg~ąuXpgQ8 1g(^zy'1a T荏&LDז0\>(9;u$IOpR0$0m3w~cAG!} Ѡwi9BVEP-bp6ܹF(mySɀXh$J0&')AU_9sZT\A=O=~&J+OPj!ΒēM<"YؗFij/V\zqLAtsl \M2W3xT(>֊!'@ܨ>{pmaz0p7/!ˉ: }]n?>Tm5ohsjn4ۡ{o~;B˞:RaU ||IS*j= ie``.ϰtLE-J^(v*)DyLcU=Ib}(R慓ԦipaRv5%Ϟ[4u*2̡M pIL]$+QO%`11@HL a4rS07m=QnO=PݘKD-5dk#/޹%kJąOq?t9`Xު6"ƤHG=:ߧZrQw{ٜLZ,@k@$H>ǧx) xH{?[" osEjఀEl|X$ tBAh있\{%V4ډ91髀ͯuY]=,fH{M2b22Rd Wx( *F Kk #Mr-oP>%Jj ۔flr58(\ՊyAQu0'l>qx.ܷdkЭo5.OfjںH(ouȋ4l C s& \ʺ)}Bg ,L-~QsSknŘp]N0ec絑C I.Tkkfe'}$ #܇,HqDZOKQ ?+ WI Yū3~6ꋧX̾+k'Dꊺjy!w}0b<>Eo .!ܔK5Aթ V8WPbHfJ_>|6K! ~q2K|X "((S{6K2_S 3鷩Y`M(w{ :>Lb#jYJ^;Ɣ0텉蝇2兩WsćH7 1s:BZ*/g)ʣ}'CL J#3G_ 8"=~WΝrjn#^7{7Nh+˧v. `Θ"|scɒX.sT(R7ko N4Pf H<7ݡ+.vcxvj!ьz)BMW}tӇc|ǦJξ:0*b7L 6\ء9c]NLa!)T!(96z\*Iyia)SSs _5?PKF/j5Y{0_ <Y|Wi0*kR5r!zEh9R(TLkIy\e=,,@p/*5Z GoyqAp: _bl 0iVL;é/Hnwچ :VIQ. /(T/F-nop@R*MZ É|$SR ;C`^n!Ma0 ~7N'Ζyy̿~%Z ;/jV͡$ѾdʋB.q\K_08č9%yq%zt,Mɵ?ynx0g\Oxo N ?94.if fI&ea?NMR#m$V-RQR?kU>gW~7v\%R`&ҤwI=OKMhr L.^,D|6 YZa`f2:c1mG#/ı}UD?@VpbvA)uP܄f}%#,endstream endobj 401 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2429 >> stream xUkp^auyL$Ӑh4<3 y%KkYXd=lɲ]=dmْc0&ghH M1@$mI;m\Qu&]C;Ͻ{=9[|p*^jë_#%]Z|T],Hڜ +| yb}wx_ޟ{|YZ|ו(ULZ%~i&@|!TA|]4rTh7H֓DLN%JڻXsoTj(ebUTh':bB"ʔ b)HT-9*R(h5!RH%%VIbN~S*Jd Uh5bL%X;娭ip52M(ؓXT7>HZR]©% L ,.JMA)ߢ#ԤTIݸK]>⿪Trݻʻi5np?K%e}a+jVE$&d+`2{;möc;nl#hX3gڲ\^<\5+_ UAƘ>瓼dA>jvAQ(R u:[As01N pcY#`nٵRZV9 pXKꪷ w;aOSFPd7Ϣ۳-"beEicUZ|d~h(7#047yis_;].8T rڦ+/,@coqWxod2z#g4UzdRސ1ox;?+p. |P 8L&[ y ~zjmkFϞ OpeLILJG{VeF0>K#J߸KלV /ƥ"3c8ݞq+E1N; @16Kb1۴jU &ƁtWbq.o0w@cҷ6p8)tL{f(V8 ǝf95˹oH'3ק>}NTMNCjv h%(pegs'+A[NEgϒn^(.NCYfd0o0fԣ>-w@^蕦jtгT5*P;.1 *إ~vO}aSrpPevZ4\M'G2ՖN\Og._#+>On-ϫkV;8[mm-ڭxAys s漬=rΊ ueP0kw@-MFiHn6ՉDb،7gRU[%# nPN_ CxWZjhEf&==p9EzZͨ&0v\hZ!*t\yܹw0AL~kxi`H[*mZeYsA̹[? |s>>m}LIg7F g/II]^gdZ5d,-D‡S}$K|9s{DUsDHXj>8&..| ]C3C^+X\C {oS"+%6/8^8}wؽEv=: 馜s!-2UrCڪbvjR+ Sd$zƟ {:X1Y;ox;x5?ær9mnYsA]ؘiocfMDړpE&Np?IV.Y!( uԶ_X+a5cWb06>/Hv ÁH}5go.yżU?vbVw zZ [nbI4~兩nÞYr(z<wmU[Ue=*_ܲccMurD%hٯEzx&]RE9Y;*@e!~ bh0[RJl.ɽЬxxcBL ՟-lq =➻w_cؿ,endstream endobj 402 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 371 >> stream xcd`ab`dd N+64uIf!CO/nnC/ }S1<%9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5P0000103012_i eO_^Pw*ˋ T\)S^m. =wnw6/ i^O mӺvKc?{oi~}/.cA<_qsg.a][9\Ğ{z{&>&20endstream endobj 403 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 135 /Subtype /Image /Width 186 /Length 3880 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?cPN@3+г[5>A5Rp@ݑ{Y$r“:Z(eN>g'kR-1JqQ+/O`̿Pl=WoXK(=2Oq@ȭO,fGp22>M)>#PHRfD&gSQhzb4S왜m Mk6ǵjhh~^ə?caKFiYJN{&PFsNہҍҧۏ csڳ2N[x7r:mGCfwXdR4Ȋ䑐GY.Ұw6v?[aA$'ǞéU/umEkb^-Zw S3Y]쓜dSPvѲR5 uciA5]x𒨨ZuvGi~3bզιWPKd0Hc`;[ǿkJ(/n!IRmp9jgBQݎ5y UEt"ʃ)~oe=f$ir )5=FH?*e15,N䝭ZJ*sXF~Ċ Actl8l 9?iJ~sQ(4<3kgL8y>-dX޴r}2MPKbiIc_"0?p I4mAUQ*x)hzڍW/o$lTkj_0wf@iCָ_If(OO_Σ6UX' 8u>ф~cl߻<~1H fSIn#1dOU/֤.ftYVjf[rx ]bT` *gU'3赊tt[̜#Je<==yżo(DJ:1>e`lnQR-kNsH޾ŷ1B@]<09jԐ99#֘͌\zSki'2\! <?[a xOn\NQx?žo#'&1}WDRJ9i1͇2;1'lg^PO!k ԂUy&V=5rLԒsC*Sew) SKzӰMN{҂liBG<9αP5CLïˏ$=HJIsG&hb7Fi pOK;Ի.F7 7P16⛻ HS7SL¸&zfMƪK[N߸1Le-Cɐ0Kzڑ~ ' ~iv3W~+}jeJAAZwad@#.]p>*X֪~?!曚3LLQ`%F4f4L6PZ#wV$b:?#4h;3̤*yd1}yY4$i7M4n4n2@ \EH=?MlҢ6Hѳn(XF^c1p;:i#L$8lV@X]gȣlsϚg $QϸTvlEhi񽕥Ϟ2)Eo s(Uw P]H|Юso ²Mg$0<"B{ B(F YŤ )BǦ$\]kBE$;P5mKYbG-+[Ůnn"#$G班OwG RߌEZH @_3<?wW;=Yp낧LS2op ѧ͹󶲤z~Tn3p&^Ong'u[$*śU.~AQ5^K#a? 1hi=cƪ !}/pIVnš#cQ6; i$UqM,|NJiUKg'ҤTRm]Yݣ|L@Ms\*>TOY)R)JxSm' ][丸<߮pKw#?z#гFp @})s >IaXƺ=d8HN0kJ [ 5x~c}9da4YЦn~R/jm$r=:`WqڬI ]yr}㱬[HK'QPY$1^!ITCX+[I€6wϥhs5GQC  4EHR Td^<0Gjd'}81Aj.<ͻT Zz>0h"4R Z((`[xg5-i1\>TVў$rhI .XF2ZMV2 Ad`ihZ~5Z+hc-Š(HT$db1ӞQ@4o~Nf(i3EtQE(Q@ IEQEendstream endobj 404 0 obj << /Filter /FlateDecode /Length 2336 >> stream xYK8? ߆jk~j'V&[zvN&~IYNf+X lN_}nyyo}dpԱj; [ܲQpBWarsUPaa~lJFObϖYmIٴ~C.!L坈T7eZDn[ʶBxIVa 'MTeM$yp[.Z_]K#P B#5ZI M[ݣAǰy"h>F bF?C0f!C dүMmSH' W̎hIt&cQ$ϦJo8rxKxQlm1@rCegߡ0$ ,`mQC;NڋD8c8ǘ*3ìjC]gp&ekJbɂ| ҄cX^ARm;t۱m;uV 9Hn"&/J4_҃qtۘ*A>#Ww?wOK+ߗNc7ԧ?<6Mo uPuN95/>  oX^_j֯|شQE՜92גZjr`4jWY|CvPzdmtILoԸve8Px~__!k+PD0I#Ha;$Qևf8m5I9":3ezրu@ z|\< lG٤yrUIO")kGw9= bij^z1 =~Ws r {˷A8ȇ#)wAvEAMoU$Er=䀓P84,O`45`}r9;MY2͠PRH-J\CFtr܈Xbq4x`zQ(-n^3s\6?M\A:ٴ݃%LYq8 0pv~9\q5kg 3"[~ϗ׮~Sb[?!i'Zc;X]N_#`4run w:LG9#_# 62C~l2"t Ke֒ 02vpL:zPRcP"m"'kDcD,@$8TfuƇ7J8rg hR K3P~V(#:vHF{x5MiO{pc=d&l|H¥KOBMO8fl`lG28fw/}#u0O+#L@l˴Q^5vvlfGfq.pH}[5RY '8g35r E;D.f⺙DϹ}ձ 7ʈ/tS#+\[(EWpt5ҤVt t\t Cԩ^QZT>21(\i4GM,)LJc?7aendstream endobj 405 0 obj << /Filter /FlateDecode /Length 10363 >> stream x}]odGOF?𛱨]|;sk@cx޵׶PE8K6{HJ }ND余dVktUŤ?-ղ{ꏯ@wo__Eȡ-^Jqv%C i\>Po_}l9,)۳s~?ݟ)8E(Sw{*|,~No_=5/o>rK A!WY! $`˒<!C<ti .,;4dC?L撑b!;n>tY|=T]xphQ @ D@\:@v~ 7pI ?y>ӌ08CCCՇϠƃS(&LҸ\aLɕprF'.[)"t*"[@`{/Y&ؿ#!t<5C@4%/sb@ r, RѮ sHBAvA,k'΃yj>T~TK; []@> 8,u8ni@ ҙw: >D'BPH4`8 ^1h Bpr:K4_xJ@;7 !ij> {oT .U/吽B<| ].!9 :1pH8aij⿄  !4H$ MuYe!{,CͩG&@%45"+ D$kuu@]i%*a$eի_iS8~?9kqtR 5/߼nշez5,[7gk;wg?|Y(T 3xvK$"[~߰L-N[w8(Ύ PH0I4 (z(LLV?2;Z6 &[Xc:deX:H7эdtz;p0p\->l1yHm_OS_ח/ P"߾ DCS0`3:R@ z !(L_B@E|d#(1&Np2Ǵ83|y$U!9H]g~=hV!M ;Ic ;c ,qS ,"y cjzB$P ,<xkJ1 :@9<$)NU##~^V$-RL,K,aX!cF @،`|k#*"lG%^Ou1pr=ӞCFFhY mdqC-UCqUG殰y.#K5v z MV%0e d-3dd #Z|dU VVYZ28G9CBV?d-(k Z2O$/UQ,%:qae-T*kA]e-w <ŶʵZd-|T|aUl,7fLױhG܈(MH+1زy~)/~{?Zf_m3gLT'QR sOLIq#_VV&iá+-]Uv)ؠ*Uܰi!Oo 1w_$M<;f0$2᰸iE"X4nJ@Ii88E=D#P,6߭!-疹SN;v7(i.$kt@%y`r8h B QQK̍7*WLK2An 9Q fCIhJ | }3ET'"38B bf0e`ik]$h[~P3H!_KV0B QY娊=3j =_ʵۢa<μq"b2@ťiƒ.Wƙ^4#oSI*Oёl0&YCcUM wPq*:(>pQC%dRǺH;E V$dPҫw/Z 43n3MUG1.IS|+ND\fq][C%9H[QG+#J>`aVDRei@Ɯ%_..^yxq{y:= HD*kRO߳V$ 6oL#xv&U> Q6ӠEA5FKedqmr3lS2=!JL"w::XaV>N%~{L/thN b cbƤѕԃYx1ߦ&-Q/ڤr᪺%8*HqʦwKFQz1:׏Gh4s_vImNBi)t J[oH4QUT: ~ Prpqz9٨sFuT ez-­ۖφ&э`ݖ'Jhx0&L°0]S_ ?᠅́!ndg7Nd_A-I(Țt!;P}wvD0]a+AWs#Á20`d2Ƒ5%#|Btu]rK"a 0/Nkk'%!XSMtYbQC+ɗ8C!z>l7(m}"YZH,ZtqUߤ%U:Dc̩7q#!HmQ(1uH}z mb]=J@4-̰8CX*Yj5bftH=ZmĩN)ą| mFqf;8 Yc ֮Q%rmKLiTդumCȆ#c@2h5-%cW[1$q"m-E7ʯЅ+m$L-?d6*mUI,]27^P#/؂'߳+7i@It!K%nju+! `B44H(&U#w9[*(LlZSg':TDɳ?<*>6[Ɇך8[T($X=1ԝRN\1YXKİgEfqDx5"s I@Y:!H:!1L\S\s섀I&%Ng 1l1YrPX$! S&0!3"G܀ĪbS&"Wd(dY>jɘ"aeGn*dR*YT 0?Mf}1xeN$$zzL%TbH%LN&*LLjZMޒO 2!OBFZ( yz9rO~xrLbQ ]XO)K$$N7tҸCv uzS09Ag-IIyDǰKH}quLdCd4$ BN&?nz%iFBEA*%;Ovs7{+ŲM%jPWQOSUӉ~I%[x`r. dcn R1E Ex 5}@.Iq-q$DŽ'EvFISz ة0Μu QMVUJ[NOWBV6Va}WX!}:w*@, Rx|oPq*7'dC$.t%ɊUCR|xKwҬZd^2w0ߪ6,LHI*%GCǼ=,|REE[JK2Z[oSqi6`D%*%23-a&ۃBd+VNcvSYץNnH^#_JiF,zB:OԤ8sTTk%iJJlI :Dg뢌<Io^Lг C>"WU~Ak3\ev '#j;20H? ɉSkϕ;o h%#@4I6/{vu&m3-9k7L&mVzV:lӔaZ3.I oEȢ^$v H܋$u',$*z rC:1 ge~-T^:4l#tɊk $Rt)I#Hrԥ} IȂU2U:B2cpySFۢo(i  [vr&.=} ZH(mPSe5&^ڱUy}"Tg}U=nB zڬG!MJєgk;_4n/n\DǢ+&z#YJ êLթ^uۓĆ/Fe$!YSAQ]D<%S" EMvӒ(=a)>#+4`ӢZ=`1H#`{ʠXI;W1^?ec5Bg7AYbbCö,f&ijPi%nQ*IQ m=Z^Ԋ4%j Svkmϭ<@}s?^ܞwǻݏw7lw]ηwǫ- .>\|5{=56HQd tBQV6a_+d`?A5 êF:;:M#c[61<}u&E<Fic-:`d][] ѩ41톆۠_7s[9 c?6j_K_:}9l9-y0la1/FuWtÉ Uv,f}Y$m Sk@Ѽ⭛PQX 33T z`9M \H]pQEޖZxERTxYbՎ^VtL껷@E.b5V+Y$aK.Z0' mU,s#,IyO]nǽiYiݼ$>GTSeS\NK6(Ɲk_z&Txntl;[$g bxiF>1P5NlmGL8vekzX l/wu-|NVMζ]Լh:37b8N- Z]z&gdz$g.hlr-ǾE쫘ͽmRVRbY$d͛ݙh1G!cJo)F[ihM,ڄQ.BF6MBzw[nv%s3&$h~,{12KʫmM]OGF׹N;vKD,, ѶY<iB)O_l._4ybxTtM7'}{Vwb>ԎJRBjg7tS ڥn:bJ\C.vWj?f`bpnjW27~Zb\ؙ&c=sf\5.OT/uUFblBkHRS~o.ߍ/sِɿ9;C(5QG喃 ׷>LؠΆW=;>~?l͇OjGUpv7f7;e { 7^~wۼqomPuWٚ[o[iyx`N=IxuQ;?B^Wh'`[!5Ew􅜤_e?g_߰ӈX0ָ;ȶ( mxP1DH?~z3r{y6G(*l!RiBx@QW?R =Łn'ZI|eQbfG@m)Yc~xh A !fm}W-aޭ-&Jͤ$N2w3 j$_bޯO0dv#&;.+qS'uyx085*qN߄/Jيiۏ";?(.RMS|k3`L@ [Vv7˙ FiߜH3.B GLBXwSzQYcٹaZKIʼnǷg䧀i~*iEcQB{|TƏY(D Tf{H&V.$ɝw~aF Jo轢p,*ת:{D@o+.PxrڦeN"ᄊG⬰>k`N>W~&ړk fQ32AW_ L?]y8՟"wE~~;6«J| MM:c7WW7{t~`{&ss}}Q/u)ΰ_{]pw>AY'-َi;YW>d.!G)=HZ}w2ˬF`'L m}I &q~ 5bR`DF<w3 Cևna|c"$/HZb9DŽoV^,@XO87C#~:ä}|_eRGoU~6ΥPMS$"]N6fYK6FI;o?ZGAl t;W#-FןSG OF﷘vˉg2hb5 PZ8ã`2D={=a kCXXk.u;TCnD2Ow$Ms>#o r'-7.~}*-Hw5`Y%r:bη~oœ!?q ߀΅;#'Tr.C{Sz>r=woMԇ]=m995[V-x-r:6oJ<8B  GpFNrC]Ѥ1)@+58^m̢'m NCpЙβϛ~/?KOq'T™g83G33Jik\1<ྷ:e:/{?_P1JzR߁=_UXZ%/ǘ ՟q~V?B1!"kgȏ(,)UdT|}MΧaERM^P1j_dr_,thA prw-$xޏEÃ=\sDi$:H{|,2(u󐄸=i7m^|^0efքbD$^ ꤅ZX/Y?^OO*l1~wOXb@V^bA~V}Lwy /OOvyNͳq ]bKnYMe֑I?;;)¾SuCMe1j}Ve@Wc99owI9OTq&8]endstream endobj 406 0 obj << /Filter /FlateDecode /Length 3311 >> stream xZKsN Ix 6y?\đAJ`rIJh>_ ,HQN==g_?a7'Ox=M._^=$NBZ(qjԧ77 _uE{\6gR;YY^@#-grsFc;)Xd6ՌqYonq7X" :\.dNt0LCto_oθ1PGGMB0MyI&%iܞtS3^ k#UI(Ns/GOvqo74"ziTςW&M9_%O^58xЍ.aK 귅gmF9z(}_iD2FFyP.S}o)@&'"6-]gÇ1"eC?1n<YW?mlb]FU!C7?"Ss"IHW'](@&r$\|[(NNmqܝ^|{r70%9Ba 5%뗯V&q꾅34U^A"51I~۵>wnn{.갽n/ߟ< +2Ћ2Ma<ŋL`\/1zuC%"O9"w% ,r=xS?Vl~Ga( # $5&h#dtzgPw>#;ME)7jt+be%`/f"}ݵgZ٩_=΋s`<3~IՆgޞbEQ(P8"^&W߿=tDa +ο:tV =v5m{{3gFf;2gj՘B"3|!jθ)`:8k"̔1kC iG)bC{;DNv=yXK2S"؎)S L W{mKhϭǀJaGS&|ƃ:rJJHA2͍99k@T/ 3*,)V8"(`܏5H eMf?eHhr2tŲ`F 0r|Q#),fN b :e eQT cubs9y]9fZPՉ4"Ds ) 2q Tf2JX`:# GVEUmKG M6 *TŻ mIէI^G :T8ل~\MYEhn,LR"V@x*LaL]j / p;)[96:6V%Ztgh1qWÐZ&K|Ȃ%8#ؼIƚa pcA}GkYB(>#=v}sC6B#nw9;. `0:44nƧn*e'9?vQi;ӷ"f4c@Ӧ%Q oʜ9"ǚwMnNPf\7 03Z/F#BCd>fܬU^^ *r1 ?ꞧ9k -*%-'2َюVQyR1/ * llzJps<֯!3mY?S݈4֍@rsZ1DbAԢ=~?,"[ڢ.BöW +6vyP/ AMN> a&ZC/zTGlqA;Pc"ꏫ`tU>\wd.g:(mqZ,x ܬhyJʶmfXK1:Hm>jbcR?Z/cԁN%,Sk3rnAAӫ#\qQ H=vK*j^V!rڽCJ<:C/MZ#(r1R sb'"_[0K[$ȏG*vQ," r%G Eɱt#7}v6vdqDTCr(vUMj?-N1d(0@§ʅ,. Y(6k$m#8AMV,j(c̬=5W"$UD bKh9cгp)@<`]P *Tɬ*Կ2>+xtc`dҁ]* [8 NqOm KweV@e.o2Y; ,}ErqF>)= Bfh+ϣz#qKw<S_ x  95&OjZvv8QЕ65 9Jr9!fYů5L|[̊5a8#aHC ƯW/GE+4tV᪦ boOįV,Vc̥/\PuS YY[e^6cF7J˱Gwێf3SԲj 'iTendstream endobj 407 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1073 >> stream x]R}LSw}ڇ5MĽ-NqH d.8q"hKƆ(>(-GE` hK jdens9D j5cd^NX*h['ss<R!HE_Yd4 F u$&&3JJtB||b\\u/,gZo_ؔ/ \BYhqj.nllrg lUE7>VkbgV;\$FF?vrY=@W9+>^ To$ Zh?>DPڣf xDZ%e:EWG@j@jXaŃjĠ{QRC94xP|q7JI*GE4ri!#E[Yٕ (|?>Seum2Jf5|%US.:)hq:. xo:'ּ{J ZU^;I|ʯr0CMA-ACct _/r+tAZy&8f>q@Yo'f)M6ek #7b_Oз>U[ōH$EhGK]O5ʾB=7w#*jifC-$C,($^0фj#~ل"<o> stream xYK6mY rKvk76Y{`F`FmĿ:! @Hq&Hke]eE[_pz]n_l<|rs[R_ %VKWêj͕C{kvNeg5ڳ~hWk|3>_,پpU]sW²xI;0BEeC6~uVf-uWl3p8Wtg <^l_bbTZuq?|\qŪZI$Ԏ˼PXͶKmw)eC!q5h PKk]ߑ$.8Ǐ bְ^(!;O'+>F?ƝB!|-%ۉݧ6, Va\VZy|CO3׹ uP+qSzs-et?78_eƙ]E():<BRRYG ֔ zC^Ӝ!1Q:sMocuf mc v57SOo- [FB5Ԧ㸉j TEHCc*N=n ٪ A*[!]R!~pLK*?ӻ +H@}d$XHg3-7 R }(#j=g T)-EC0Pc繜DXV-@Qzc +u V=,\ kǶ*R;E]{%rpPղ;$q\QYLJYE ȫETTwDr4v'Uh$Ji gr_m>a tM؞Oel9'K~%A )5FT8ke4Ce;300'#MykqX!0g]9b0u"^tuh%&F_P3pYe}VaRl)jjRScŔ}h{Ǎlq\>;]*0iQa('b{s4C4F<SBJz:uɘ\k~nhj: ^BՄ\Wa%h+|A#NV"p[ Ugf-fMs}r˜8 R|SCmdCD28V:l㑇 fTn ho!/7;>>H-š/r5 όF/WF|3=,4H KhSg7Eqvn3.yIb/t=O</4\邭]㱙9`v8P$qCW?tҭ&PlS{QDZ-LTTG xAr=%Ѫfqw {~3nNBs 9pp]j<4z?`YL7g:?=W o!sM,zL8aBH0PY`SvEj.Rv,QTxql:go[\O3YēyΌ]MPt5ͮqtJ9IwM+PE9񯔢7]9̧'澛@g-S<T}Y".ꖃ*%]!6!N].-A@r?4HW"Ł)L-Ey4Zv͸CK_NjEj*j**,UZQVr9 uP9BHJendstream endobj 409 0 obj << /Filter /FlateDecode /Length 1052 >> stream xWKoFG<ZS4m(b+ ᘮ(:$eAKRZ:v])CռoGSJXJ^ M?%NkQ/gvxBu,]% K9wKe*%Uی4A /Ik!_g!8qV@uqEQmM[,?HύDDqK(ev7׫3LB8 81b"e\d/cSƈS{DPj^|.0j3G&!]2Ϋyp%LM2l|N*%4Z8/X M0H-6 *rdaԱX :DW*?#u5(=Vz&؃o^n".jX$Dd释({Dqqוcaa xα ޅ}N:R7ЎArۨdM[lW*aRzd^ϒ %*kxx3RZQqRp= wH6X%Zn0tCN,KP87=ZǑvfdmoi]o Ȣ*:u8c2빍sRޔ՞4Q2ᏼ>Q/3LS ӬMwHfZJyZ&:xerp9s0p:=sjJ(pQ];7?Ef|ۑhOsCJ54ùrc?-/W~K<9W0bsSܙ1Bl%f4//.,;S`%4U{0| 0qn)'þgcfDJ$u[媗1 99of"Y'p?w%\leѶLHXN_OI̱aE U:Q XVœ4 ҿ qI<qo!ׄz$aqI CM n6^=Oendstream endobj 410 0 obj << /Type /XRef /Length 295 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 411 /ID [<231e9985dd5ac9a5f5ec7f34250cbc9a><8eef01f79f040e2e8bd8089f22a83f04>] >> stream xcb&F~0 $8J8?5@6ZPJx}4%T?54pv4 T?i&( y;F*q FJ R D*H R DrA$l)`"Y$#w6X+|o, Q Dm\`5` `pV@l.خ#`o2D *62-2Rer-+K`ײH"-z`2D,LH`3o՜Y 5DrD]8q endstream endobj startxref 214258 %%EOF forecast/inst/doc/JSS2008.R0000644000176200001440000001634414474112174014713 0ustar liggesusers## ----load_forecast, echo=FALSE, message=FALSE--------------------------------- library('forecast') ## ----load_expsmooth, echo=FALSE, message=FALSE, eval=FALSE-------------------- # library('expsmooth') ## ----expsmooth_datsets, echo=FALSE, message=FALSE----------------------------- bonds <- structure(c(5.83, 6.06, 6.58, 7.09, 7.31, 7.23, 7.43, 7.37, 7.6, 7.89, 8.12, 7.96, 7.93, 7.61, 7.33, 7.18, 6.74, 6.27, 6.38, 6.6, 6.3, 6.13, 6.02, 5.79, 5.73, 5.89, 6.37, 6.62, 6.85, 7.03, 6.99, 6.75, 6.95, 6.64, 6.3, 6.4, 6.69, 6.52, 6.8, 7.01, 6.82, 6.6, 6.32, 6.4, 6.11, 5.82, 5.87, 5.89, 5.63, 5.65, 5.73, 5.72, 5.73, 5.58, 5.53, 5.41, 4.87, 4.58, 4.89, 4.69, 4.78, 4.99, 5.23, 5.18, 5.54, 5.9, 5.8, 5.94, 5.91, 6.1, 6.03, 6.26, 6.66, 6.52, 6.26, 6, 6.42, 6.1, 6.04, 5.83, 5.8, 5.74, 5.72, 5.23, 5.14, 5.1, 4.89, 5.13, 5.37, 5.26, 5.23, 4.97, 4.76, 4.55, 4.61, 5.07, 5, 4.9, 5.28, 5.21, 5.15, 4.9, 4.62, 4.24, 3.88, 3.91, 4.04, 4.03, 4.02, 3.9, 3.79, 3.94, 3.56, 3.32, 3.93, 4.44, 4.29, 4.27, 4.29, 4.26, 4.13, 4.06, 3.81, 4.32, 4.7), .Tsp = c(1994, 2004.33333333333, 12), class = "ts") usnetelec <- structure(c(296.1, 334.1, 375.3, 403.8, 447, 476.3, 550.3, 603.9, 634.6, 648.5, 713.4, 759.2, 797.1, 857.9, 920, 987.2, 1058.4, 1147.5, 1217.8, 1332.8, 1445.5, 1535.1, 1615.9, 1753, 1864.1, 1870.3, 1920.8, 2040.9, 2127.4, 2209.4, 2250.7, 2289.6, 2298, 2244.4, 2313.4, 2419.5, 2473, 2490.5, 2575.3, 2707.4, 2967.3, 3038, 3073.8, 3083.9, 3197.2, 3247.5, 3353.5, 3444.2, 3492.2, 3620.3, 3694.8, 3802.1, 3736.6, 3858.5, 3848), .Tsp = c(1949, 2003, 1), class = "ts") ukcars <- structure(c(330.371, 371.051, 270.67, 343.88, 358.491, 362.822, 261.281, 240.355, 325.382, 316.7, 171.153, 257.217, 298.127, 251.464, 181.555, 192.598, 245.652, 245.526, 225.261, 238.211, 257.385, 228.461, 175.371, 226.462, 266.15, 287.251, 225.883, 265.313, 272.759, 234.134, 196.462, 205.551, 291.283, 284.422, 221.571, 250.697, 253.757, 267.016, 220.388, 277.801, 283.233, 302.072, 259.72, 297.658, 306.129, 322.106, 256.723, 341.877, 356.004, 361.54, 270.433, 311.105, 326.688, 327.059, 274.257, 367.606, 346.163, 348.211, 250.008, 292.518, 343.318, 343.429, 275.386, 329.747, 364.521, 378.448, 300.798, 331.757, 362.536, 389.133, 323.322, 391.832, 421.646, 416.823, 311.713, 381.902, 422.982, 427.722, 376.85, 458.58, 436.225, 441.487, 369.566, 450.723, 462.442, 468.232, 403.636, 413.948, 460.496, 448.932, 407.787, 469.408, 494.311, 433.24, 335.106, 378.795, 387.1, 372.395, 335.79, 397.08, 449.755, 402.252, 391.847, 385.89, 424.325, 433.28, 391.213, 408.74, 445.458, 428.202, 379.048, 394.042, 432.796), .Tsp = c(1977, 2005, 4), class = "ts") visitors <- structure(c(75.7, 75.4, 83.1, 82.9, 77.3, 105.7, 121.9, 150, 98, 118, 129.5, 110.6, 91.7, 94.8, 109.5, 105.1, 95, 130.3, 156.7, 190.1, 139.7, 147.8, 145.2, 132.7, 120.7, 116.5, 142, 140.4, 128, 165.7, 183.1, 222.8, 161.3, 180.4, 185.2, 160.5, 157.1, 163.8, 203.3, 196.9, 179.6, 207.3, 208, 245.8, 168.9, 191.1, 180, 160.1, 136.6, 142.7, 175.4, 161.4, 149.9, 174.1, 192.7, 247.4, 176.2, 192.8, 189.1, 181.1, 149.9, 157.3, 185.3, 178.2, 162.7, 190.6, 198.6, 253.1, 177.4, 190.6, 189.2, 168, 161.4, 172.2, 208.3, 199.3, 197.4, 216, 223.9, 266.8, 196.1, 238.2, 217.8, 203.8, 175.2, 176.9, 219.3, 199.1, 190, 229.3, 255, 302.4, 242.8, 245.5, 257.9, 226.3, 213.4, 204.6, 244.6, 239.9, 224, 267.2, 285.9, 344, 250.5, 304.3, 307.4, 255.1, 214.9, 230.9, 282.5, 265.4, 254, 301.6, 311, 384, 303.8, 319.1, 313.5, 294.2, 244.8, 261.4, 329.7, 304.9, 268.6, 320.7, 342.9, 422.3, 317.2, 392.7, 365.6, 333.2, 261.5, 306.9, 358.2, 329.2, 309.2, 350.4, 375.6, 465.2, 342.9, 408, 390.9, 325.9, 289.1, 308.2, 397.4, 330.4, 330.9, 366.5, 379.5, 448.3, 346.2, 353.6, 338.6, 341.1, 283.4, 304.2, 372.3, 323.7, 323.9, 354.8, 367.9, 457.6, 351, 398.6, 389, 334.1, 298.1, 317.1, 388.5, 355.6, 353.1, 397, 416.7, 460.8, 360.8, 434.6, 411.9, 405.6, 319.3, 347.9, 429, 372.9, 403, 426.5, 459.9, 559.9, 416.6, 429.2, 428.7, 405.4, 330.2, 370, 446.9, 384.6, 366.3, 378.5, 376.2, 523.2, 379.3, 437.2, 446.5, 360.3, 329.9, 339.4, 418.2, 371.9, 358.6, 428.9, 437, 534, 396.6, 427.5, 392.5, 321.5, 260.9, 308.3, 415.5, 362.2, 385.6, 435.3, 473.3, 566.6, 420.2, 454.8, 432.3, 402.8, 341.3, 367.3, 472, 405.8, 395.6, 449.9, 479.9, 593.1, 462.4, 501.6, 504.7, 409.5), .Tsp = c(1985.33333333333, 2005.25, 12), class = "ts") ## ----etsexamples, fig.height=7, fig.width=9, echo=FALSE, fig.cap="Four time series showing point forecasts and 80\\% \\& 95\\% prediction intervals obtained using exponential smoothing state space models."---- par(mfrow = c(2,2)) mod1 <- ets(bonds) mod2 <- ets(usnetelec) mod3 <- ets(ukcars) mod4 <- ets(visitors) plot(forecast(mod1), main="(a) US 10-year bonds yield", xlab="Year", ylab="Percentage per annum") plot(forecast(mod2), main="(b) US net electricity generation", xlab="Year", ylab="Billion kwh") plot(forecast(mod3), main="(c) UK passenger motor vehicle production", xlab="Year", ylab="Thousands of cars") plot(forecast(mod4), main="(d) Overseas visitors to Australia", xlab="Year", ylab="Thousands of people") ## ----etsnames, echo=FALSE----------------------------------------------------- etsnames <- c(mod1$method, mod2$method, mod3$method, mod4$method) etsnames <- gsub("Ad","A\\\\damped",etsnames) ## ----ets-usnetelec, echo=TRUE------------------------------------------------- etsfit <- ets(usnetelec) ## ----ets-usnetelec-print,echo=TRUE-------------------------------------------- etsfit ## ----ets-usnetelec-accuracy,eval=TRUE,echo=TRUE------------------------------- accuracy(etsfit) ## ----ets-usnetelec-fcast, fig.height=5, fig.width=8, message=FALSE, warning=FALSE, include=FALSE, output=FALSE---- fcast <- forecast(etsfit) plot(fcast) ## ----ets-usnetelec-fcast-print,eval=TRUE,echo=TRUE---------------------------- fcast ## ----ets-usnetelec-newdata,eval=FALSE,echo=TRUE------------------------------- # fit <- ets(usnetelec[1:45]) # test <- ets(usnetelec[46:55], model = fit) # accuracy(test) ## ----ets-usnetelec-fcast-accuracy,eval=FALSE,echo=TRUE------------------------ # accuracy(forecast(fit,10), usnetelec[46:55]) ## ----arimaexamples, fig.height=7, fig.width=9, echo=FALSE, fig.cap="Four time series showing point forecasts and 80\\% \\& 95\\% prediction intervals obtained using ARIMA models."---- mod1 <- auto.arima(bonds, seasonal=FALSE, approximation=FALSE) mod2 <- auto.arima(usnetelec) mod3 <- auto.arima(ukcars) mod4 <- auto.arima(visitors) par(mfrow = c(2,2)) plot(forecast(mod1), main="(a) US 10-year bonds yield", xlab="Year", ylab="Percentage per annum") plot(forecast(mod2), main="(b) US net electricity generation", xlab="Year", ylab="Billion kwh") plot(forecast(mod3), main="(c) UK passenger motor vehicle production", xlab="Year", ylab="Thousands of cars") plot(forecast(mod4), main="(d) Overseas visitors to Australia", xlab="Year", ylab="Thousands of people") ## ----arima-auto-fcast,eval=TRUE,echo=TRUE,fig.show="hide"--------------------- arimafit <- auto.arima(usnetelec) fcast <- forecast(arimafit) plot(fcast) ## ----arimanames, echo=FALSE--------------------------------------------------- # Convert character strings to latex arimanames <- c(as.character(mod1), as.character(mod2), as.character(mod3), as.character(mod4)) arimanames <- gsub("\\[([0-9]*)\\]", "$_{\\1}$", arimanames) ## ----arimafcastsummary, echo=TRUE, message=FALSE, warning=FALSE, as.is=TRUE---- summary(fcast) forecast/inst/doc/JSS2008.Rmd0000644000176200001440000017272514473635572015255 0ustar liggesusers--- author: - name: Rob J Hyndman affiliation: Monash University address: > Department of Econometrics \& Business Statistics Monash University Clayton VIC 3800, Australia email: Rob.Hyndman@monash.edu url: https://robjhyndman.com - name: Yeasmin Khandakar affiliation: Monash University address: > Department of Econometrics \& Business Statistics Monash University Clayton VIC 3800, Australia title: formatted: "Automatic Time Series Forecasting:\\newline the \\pkg{forecast} Package for \\proglang{R}" # If you use tex in the formatted title, also supply version without plain: "Automatic Time Series Forecasting: the forecast Package for R" # For running headers, if needed short: "\\pkg{forecast}: Automatic Time Series Forecasting" abstract: > This vignette to the \proglang{R} package \pkg{forecast} is an updated version of @HK2008, published in the *Journal of Statistical Software*. Automatic forecasts of large numbers of univariate time series are often needed in business and other contexts. We describe two automatic forecasting algorithms that have been implemented in the \pkg{forecast} package for \proglang{R}. The first is based on innovations state space models that underly exponential smoothing methods. The second is a step-wise algorithm for forecasting with ARIMA models. The algorithms are applicable to both seasonal and non-seasonal data, and are compared and illustrated using four real time series. We also briefly describe some of the other functionality available in the \pkg{forecast} package.} keywords: # at least one keyword must be supplied formatted: [ARIMA models, automatic forecasting, exponential smoothing, prediction intervals, state space models, time series, "\\proglang{R}"] plain: [ARIMA models, automatic forecasting, exponential smoothing, prediction intervals, state space models, time series, R] preamble: > \usepackage{amsmath,rotating,bm,fancyvrb,paralist,thumbpdf} \Volume{27} \Issue{3} \Month{July} \Year{2008} \Submitdate{2007-05-29} \Acceptdate{2008-03-22} \def\damped{$_{\mbox{\footnotesize d}}$} \let\var=\VAR \def\R{\proglang{R}} \def\dampfactor{\phi_h} \raggedbottom bibliography: JSS-paper.bib vignette: > %\VignetteIndexEntry{Automatic Time Series Forecasting: the forecast Package for R (Hyndman & Khandakar, JSS 2008)} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} documentclass: jss output: if (rmarkdown::pandoc_version() >= "2") rticles::jss_article else rmarkdown::html_vignette fig_width: 7 fig_height: 6 fig_caption: true --- ```{r load_forecast, echo=FALSE, message=FALSE} library('forecast') ``` ```{r load_expsmooth, echo=FALSE, message=FALSE, eval=FALSE} library('expsmooth') ``` ```{r expsmooth_datsets, echo=FALSE, message=FALSE} bonds <- structure(c(5.83, 6.06, 6.58, 7.09, 7.31, 7.23, 7.43, 7.37, 7.6, 7.89, 8.12, 7.96, 7.93, 7.61, 7.33, 7.18, 6.74, 6.27, 6.38, 6.6, 6.3, 6.13, 6.02, 5.79, 5.73, 5.89, 6.37, 6.62, 6.85, 7.03, 6.99, 6.75, 6.95, 6.64, 6.3, 6.4, 6.69, 6.52, 6.8, 7.01, 6.82, 6.6, 6.32, 6.4, 6.11, 5.82, 5.87, 5.89, 5.63, 5.65, 5.73, 5.72, 5.73, 5.58, 5.53, 5.41, 4.87, 4.58, 4.89, 4.69, 4.78, 4.99, 5.23, 5.18, 5.54, 5.9, 5.8, 5.94, 5.91, 6.1, 6.03, 6.26, 6.66, 6.52, 6.26, 6, 6.42, 6.1, 6.04, 5.83, 5.8, 5.74, 5.72, 5.23, 5.14, 5.1, 4.89, 5.13, 5.37, 5.26, 5.23, 4.97, 4.76, 4.55, 4.61, 5.07, 5, 4.9, 5.28, 5.21, 5.15, 4.9, 4.62, 4.24, 3.88, 3.91, 4.04, 4.03, 4.02, 3.9, 3.79, 3.94, 3.56, 3.32, 3.93, 4.44, 4.29, 4.27, 4.29, 4.26, 4.13, 4.06, 3.81, 4.32, 4.7), .Tsp = c(1994, 2004.33333333333, 12), class = "ts") usnetelec <- structure(c(296.1, 334.1, 375.3, 403.8, 447, 476.3, 550.3, 603.9, 634.6, 648.5, 713.4, 759.2, 797.1, 857.9, 920, 987.2, 1058.4, 1147.5, 1217.8, 1332.8, 1445.5, 1535.1, 1615.9, 1753, 1864.1, 1870.3, 1920.8, 2040.9, 2127.4, 2209.4, 2250.7, 2289.6, 2298, 2244.4, 2313.4, 2419.5, 2473, 2490.5, 2575.3, 2707.4, 2967.3, 3038, 3073.8, 3083.9, 3197.2, 3247.5, 3353.5, 3444.2, 3492.2, 3620.3, 3694.8, 3802.1, 3736.6, 3858.5, 3848), .Tsp = c(1949, 2003, 1), class = "ts") ukcars <- structure(c(330.371, 371.051, 270.67, 343.88, 358.491, 362.822, 261.281, 240.355, 325.382, 316.7, 171.153, 257.217, 298.127, 251.464, 181.555, 192.598, 245.652, 245.526, 225.261, 238.211, 257.385, 228.461, 175.371, 226.462, 266.15, 287.251, 225.883, 265.313, 272.759, 234.134, 196.462, 205.551, 291.283, 284.422, 221.571, 250.697, 253.757, 267.016, 220.388, 277.801, 283.233, 302.072, 259.72, 297.658, 306.129, 322.106, 256.723, 341.877, 356.004, 361.54, 270.433, 311.105, 326.688, 327.059, 274.257, 367.606, 346.163, 348.211, 250.008, 292.518, 343.318, 343.429, 275.386, 329.747, 364.521, 378.448, 300.798, 331.757, 362.536, 389.133, 323.322, 391.832, 421.646, 416.823, 311.713, 381.902, 422.982, 427.722, 376.85, 458.58, 436.225, 441.487, 369.566, 450.723, 462.442, 468.232, 403.636, 413.948, 460.496, 448.932, 407.787, 469.408, 494.311, 433.24, 335.106, 378.795, 387.1, 372.395, 335.79, 397.08, 449.755, 402.252, 391.847, 385.89, 424.325, 433.28, 391.213, 408.74, 445.458, 428.202, 379.048, 394.042, 432.796), .Tsp = c(1977, 2005, 4), class = "ts") visitors <- structure(c(75.7, 75.4, 83.1, 82.9, 77.3, 105.7, 121.9, 150, 98, 118, 129.5, 110.6, 91.7, 94.8, 109.5, 105.1, 95, 130.3, 156.7, 190.1, 139.7, 147.8, 145.2, 132.7, 120.7, 116.5, 142, 140.4, 128, 165.7, 183.1, 222.8, 161.3, 180.4, 185.2, 160.5, 157.1, 163.8, 203.3, 196.9, 179.6, 207.3, 208, 245.8, 168.9, 191.1, 180, 160.1, 136.6, 142.7, 175.4, 161.4, 149.9, 174.1, 192.7, 247.4, 176.2, 192.8, 189.1, 181.1, 149.9, 157.3, 185.3, 178.2, 162.7, 190.6, 198.6, 253.1, 177.4, 190.6, 189.2, 168, 161.4, 172.2, 208.3, 199.3, 197.4, 216, 223.9, 266.8, 196.1, 238.2, 217.8, 203.8, 175.2, 176.9, 219.3, 199.1, 190, 229.3, 255, 302.4, 242.8, 245.5, 257.9, 226.3, 213.4, 204.6, 244.6, 239.9, 224, 267.2, 285.9, 344, 250.5, 304.3, 307.4, 255.1, 214.9, 230.9, 282.5, 265.4, 254, 301.6, 311, 384, 303.8, 319.1, 313.5, 294.2, 244.8, 261.4, 329.7, 304.9, 268.6, 320.7, 342.9, 422.3, 317.2, 392.7, 365.6, 333.2, 261.5, 306.9, 358.2, 329.2, 309.2, 350.4, 375.6, 465.2, 342.9, 408, 390.9, 325.9, 289.1, 308.2, 397.4, 330.4, 330.9, 366.5, 379.5, 448.3, 346.2, 353.6, 338.6, 341.1, 283.4, 304.2, 372.3, 323.7, 323.9, 354.8, 367.9, 457.6, 351, 398.6, 389, 334.1, 298.1, 317.1, 388.5, 355.6, 353.1, 397, 416.7, 460.8, 360.8, 434.6, 411.9, 405.6, 319.3, 347.9, 429, 372.9, 403, 426.5, 459.9, 559.9, 416.6, 429.2, 428.7, 405.4, 330.2, 370, 446.9, 384.6, 366.3, 378.5, 376.2, 523.2, 379.3, 437.2, 446.5, 360.3, 329.9, 339.4, 418.2, 371.9, 358.6, 428.9, 437, 534, 396.6, 427.5, 392.5, 321.5, 260.9, 308.3, 415.5, 362.2, 385.6, 435.3, 473.3, 566.6, 420.2, 454.8, 432.3, 402.8, 341.3, 367.3, 472, 405.8, 395.6, 449.9, 479.9, 593.1, 462.4, 501.6, 504.7, 409.5), .Tsp = c(1985.33333333333, 2005.25, 12), class = "ts") ``` # Introduction Automatic forecasts of large numbers of univariate time series are often needed in business. It is common to have over one thousand product lines that need forecasting at least monthly. Even when a smaller number of forecasts are required, there may be nobody suitably trained in the use of time series models to produce them. In these circumstances, an automatic forecasting algorithm is an essential tool. Automatic forecasting algorithms must determine an appropriate time series model, estimate the parameters and compute the forecasts. They must be robust to unusual time series patterns, and applicable to large numbers of series without user intervention. The most popular automatic forecasting algorithms are based on either exponential smoothing or ARIMA models. In this article, we discuss the implementation of two automatic univariate forecasting methods in the \pkg{forecast} package for \proglang{R}. We also briefly describe some univariate forecasting methods that are part of the \pkg{forecast} package. The \pkg{forecast} package for the \proglang{R} system for statistical computing [@R] is available from the Comprehensive \proglang{R} Archive Network at \url{https://CRAN.R-project.org/package=forecast}. Version `r packageVersion('forecast')` of the package was used for this paper. The \pkg{forecast} package contains functions for univariate forecasting and a few examples of real time series data. For more extensive testing of forecasting methods, the \pkg{fma} package contains the 90 data sets from @MWH3, the \pkg{expsmooth} package contains 24 data sets from @expsmooth08, and the \pkg{Mcomp} package contains the 1001 time series from the M-competition [@Mcomp82] and the 3003 time series from the M3-competition [@M3comp00]. The \pkg{forecast} package implements automatic forecasting using exponential smoothing, ARIMA models, the Theta method [@AN00], cubic splines [@HKPB05], as well as other common forecasting methods. In this article, we primarily discuss the exponential smoothing approach (in Section \ref{sec:expsmooth}) and the ARIMA modelling approach (in Section \ref{sec:arima}) to automatic forecasting. In Section \ref{sec:package}, we describe the implementation of these methods in the \pkg{forecast} package, along with other features of the package. # Exponential smoothing {#sec:expsmooth} Although exponential smoothing methods have been around since the 1950s, a modelling framework incorporating procedures for model selection was not developed until relatively recently. @OKS97, @HKSG02 and @HKOS05 have shown that all exponential smoothing methods (including non-linear methods) are optimal forecasts from innovations state space models. Exponential smoothing methods were originally classified by Pegels' (1969)\nocite{Pegels69} taxonomy. This was later extended by @Gardner85, modified by @HKSG02, and extended again by @Taylor03a, giving a total of fifteen methods seen in the following table. \begin{table}[!hbt] \begin{center}\vspace{0.2cm} \begin{tabular}{|ll|ccc|} \hline & &\multicolumn{3}{c|}{Seasonal Component} \\ \multicolumn{2}{|c|}{Trend}& N & A & M\\ \multicolumn{2}{|c|}{Component} & (None) & (Additive) & (Multiplicative)\\ \cline{3-5} &&&\\[-0.3cm] N & (None) & N,N & N,A & N,M\\ &&&&\\[-0.3cm] A & (Additive) & A,N & A,A & A,M\\ &&&&\\[-0.3cm] A\damped & (Additive damped) & A\damped,N & A\damped,A & A\damped,M\\ &&&&\\[-0.3cm] M & (Multiplicative) & M,N & M,A & M,M\\ &&&&\\[-0.3cm] M\damped & (Multiplicative damped) & M\damped,N & M\damped,A & M\damped,M\\ \hline \end{tabular}\vspace{0.2cm} \end{center} \caption{The fifteen exponential smoothing methods.} \end{table} Some of these methods are better known under other names. For example, cell (N,N) describes the simple exponential smoothing (or SES) method, cell (A,N) describes Holt's linear method, and cell (A\damped,N) describes the damped trend method. The additive Holt-Winters' method is given by cell (A,A) and the multiplicative Holt-Winters' method is given by cell (A,M). The other cells correspond to less commonly used but analogous methods. ## Point forecasts for all methods We denote the observed time series by $y_1,y_2,\dots,y_n$. A forecast of $y_{t+h}$ based on all of the data up to time $t$ is denoted by $\hat{y}_{t+h|t}$. To illustrate the method, we give the point forecasts and updating equations for method (A,A), the Holt-Winters' additive method: \begin{subequations}\label{eq:AMmethod} \begin{align} \mbox{Level:}\quad &\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha)(\ell_{t-1} + b_{t-1})\hspace*{1cm} \label{eq:3-44a}\\ \mbox{Growth:}\quad &b_t = \beta^*(\ell_t - \ell_{t-1}) + (1-\beta^*)b_{t-1} \label{eq:3-45a}\\ \mbox{Seasonal:}\quad &s_t = \gamma(y_t - \ell_{t-1} -b_{t-1}) + (1-\gamma)s_{t-m}\label{eq:3-46a}\\ \mbox{Forecast:}\quad &\hat{y}_{t+h|t} = \ell_t + b_th +s_{t-m+h_m^+}. \label{eq:3-47a} \end{align} \end{subequations} where $m$ is the length of seasonality (e.g., the number of months or quarters in a year), $\ell_t$ represents the level of the series, $b_t$ denotes the growth, $s_t$ is the seasonal component, $\hat{y}_{t+h|t}$ is the forecast for $h$ periods ahead, and $h_m^+ = \big[(h-1) \mbox{ mod } m\big] + 1$. To use method \eqref{eq:AMmethod}, we need values for the initial states $\ell_0$, $b_0$ and $s_{1-m},\dots,s_0$, and for the smoothing parameters $\alpha$, $\beta^*$ and $\gamma$. All of these will be estimated from the observed data. Equation \eqref{eq:3-46a} is slightly different from the usual Holt-Winters equations such as those in @MWH3 or @BOK05. These authors replace \eqref{eq:3-46a} with $$ s_t = \gamma^*(y_t - \ell_{t}) + (1-\gamma^*)s_{t-m}. $$ If $\ell_t$ is substituted using \eqref{eq:3-44a}, we obtain $$s_t = \gamma^*(1-\alpha)(y_t - \ell_{t-1}-b_{t-1}) + \{1-\gamma^*(1-\alpha)\}s_{t-m}. $$ Thus, we obtain identical forecasts using this approach by replacing $\gamma$ in \eqref{eq:3-46a} with $\gamma^*(1-\alpha)$. The modification given in \eqref{eq:3-46a} was proposed by @OKS97 to make the state space formulation simpler. It is equivalent to Archibald's (1990)\nocite{Archibald90} variation of the Holt-Winters' method. \begin{sidewaystable} \begin{small} \begin{center} \begin{tabular}{|c|lll|} \hline & \multicolumn{3}{c|}{Seasonal} \\ {Trend} & \multicolumn{1}{c}{N} & \multicolumn{1}{c}{A} & \multicolumn{1}{c|}{M}\\ \cline{2-4} & $\ell_t = \alpha y_t + (1-\alpha) \ell_{t-1}$ & $\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha) \ell_{t-1}$ & $\ell_t = \alpha (y_t / s_{t-m}) + (1-\alpha) \ell_{t-1}$\\ {N} & & $s_t = \gamma (y_t - \ell_{t-1}) + (1-\gamma) s_{t-m}$ & $s_t = \gamma (y_t / \ell_{t-1}) + (1-\gamma) s_{t-m}$ \\ & $\hat{y}_{t+h|t} = \ell_t$ & $\hat{y}_{t+h|t} = \ell_t + s_{t-m+h_m^+}$ & $\hat{y}_{t+h|t}= \ell_ts_{t-m+h_m^+}$ \\ \hline & $\ell_t = \alpha y_t + (1-\alpha) (\ell_{t-1}+b_{t-1})$ & $\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha) (\ell_{t-1}+b_{t-1})$ & $\ell_t = \alpha (y_t / s_{t-m}) + (1-\alpha) (\ell_{t-1}+b_{t-1})$\\ {A} & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) b_{t-1}$ & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) b_{t-1}$ & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) b_{t-1}$\\ & & $s_t = \gamma (y_t - \ell_{t-1}-b_{t-1}) + (1-\gamma) s_{t-m}$ & $s_t = \gamma (y_t / (\ell_{t-1}-b_{t-1})) + (1-\gamma) s_{t-m}$\\ & $\hat{y}_{t+h|t} = \ell_t+hb_t$ & $\hat{y}_{t+h|t} = \ell_t +hb_t +s_{t-m+h_m^+}$ & $\hat{y}_{t+h|t}= (\ell_t+hb_t)s_{t-m+h_m^+}$ \\ \hline & $\ell_t = \alpha y_t + (1-\alpha) (\ell_{t-1}+\phi b_{t-1})$ & $\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha) (\ell_{t-1}+\phi b_{t-1})$ & $\ell_t = \alpha (y_t / s_{t-m}) + (1-\alpha) (\ell_{t-1}+\phi b_{t-1})$\\ {A\damped } & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) \phi b_{t-1}$ & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) \phi b_{t-1}$ & $b_t = \beta^* (\ell_t-\ell_{t-1}) + (1-\beta^*) \phi b_{t-1}$\\ & & $s_t = \gamma (y_t - \ell_{t-1}-\phi b_{t-1}) + (1-\gamma) s_{t-m}$ & $s_t = \gamma (y_t / (\ell_{t-1}-\phi b_{t-1})) + (1-\gamma) s_{t-m}$\\ & $\hat{y}_{t+h|t} = \ell_t+\dampfactor b_t$ & $\hat{y}_{t+h|t} = \ell_t+\dampfactor b_t+s_{t-m+h_m^+}$ & $\hat{y}_{t+h|t}= (\ell_t+\dampfactor b_t)s_{t-m+h_m^+}$ \\ \hline & $\ell_t = \alpha y_t + (1-\alpha) \ell_{t-1}b_{t-1}$ & $\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha) \ell_{t-1}b_{t-1}$ & $\ell_t = \alpha (y_t / s_{t-m}) + (1-\alpha) \ell_{t-1}b_{t-1}$\\ {M} & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b_{t-1}$ & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b_{t-1}$ & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b_{t-1}$\\ & & $s_t = \gamma (y_t - \ell_{t-1}b_{t-1}) + (1-\gamma) s_{t-m}$ & $s_t = \gamma (y_t / (\ell_{t-1}b_{t-1})) + (1-\gamma) s_{t-m}$\\ & $\hat{y}_{t+h|t} = \ell_tb_t^h$ & $\hat{y}_{t+h|t} = \ell_tb_t^h + s_{t-m+h_m^+}$ & $\hat{y}_{t+h|t}= \ell_tb_t^hs_{t-m+h_m^+}$ \\ \hline & $\ell_t = \alpha y_t + (1-\alpha) \ell_{t-1}b^\phi_{t-1}$ & $\ell_t = \alpha (y_t - s_{t-m}) + (1-\alpha)\ell_{t-1}b^\phi_{t-1}$ & $\ell_t = \alpha (y_t / s_{t-m}) + (1-\alpha)\ell_{t-1}b^\phi_{t-1}$\\ {M\damped } & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b^\phi_{t-1}$ & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b^\phi_{t-1}$ & $b_t = \beta^* (\ell_t/\ell_{t-1}) + (1-\beta^*) b^\phi_{t-1}$\\ & & $s_t = \gamma (y_t - \ell_{t-1}b^\phi_{t-1}) + (1-\gamma) s_{t-m}$ & $s_t = \gamma (y_t / (\ell_{t-1}b^\phi_{t-1})) + (1-\gamma) s_{t-m}$\\ & $\hat{y}_{t+h|t} = \ell_tb_t^{\phi_h}$ & $\hat{y}_{t+h|t} = \ell_tb_t^{\phi_h} + s_{t-m+h_m^+}$ & $\hat{y}_{t+h|t}= \ell_tb_t^{\phi_h}s_{t-m+h_m^+}$ \\ \hline \end{tabular} \end{center} \end{small} \caption{Formulae for recursive calculations and point forecasts. In each case, $\ell_t$ denotes the series level at time $t$, $b_t$ denotes the slope at time $t$, $s_t$ denotes the seasonal component of the series at time $t$, and $m$ denotes the number of seasons in a year; $\alpha$, $\beta^*$, $\gamma$ and $\phi$ are constants, $\phi_h = \phi+\phi^2+\dots+\phi^{h}$ and $h_m^+ = \big[(h-1) \mbox{ mod } m\big] + 1$.}\label{table:pegels} \end{sidewaystable} Table \ref{table:pegels} gives recursive formulae for computing point forecasts $h$ periods ahead for all of the exponential smoothing methods. Some interesting special cases can be obtained by setting the smoothing parameters to extreme values. For example, if $\alpha=0$, the level is constant over time; if $\beta^*=0$, the slope is constant over time; and if $\gamma=0$, the seasonal pattern is constant over time. At the other extreme, naïve forecasts (i.e., $\hat{y}_{t+h|t}=y_t$ for all $h$) are obtained using the (N,N) method with $\alpha=1$. Finally, the additive and multiplicative trend methods are special cases of their damped counterparts obtained by letting $\phi=1$. ## Innovations state space models {#sec:statespace} For each exponential smoothing method in Table \ref{table:pegels}, @expsmooth08 describe two possible innovations state space models, one corresponding to a model with additive errors and the other to a model with multiplicative errors. If the same parameter values are used, these two models give equivalent point forecasts, although different prediction intervals. Thus there are 30 potential models described in this classification. Historically, the nature of the error component has often been ignored, because the distinction between additive and multiplicative errors makes no difference to point forecasts. We are careful to distinguish exponential smoothing \emph{methods} from the underlying state space \emph{models}. An exponential smoothing method is an algorithm for producing point forecasts only. The underlying stochastic state space model gives the same point forecasts, but also provides a framework for computing prediction intervals and other properties. To distinguish the models with additive and multiplicative errors, we add an extra letter to the front of the method notation. The triplet (E,T,S) refers to the three components: error, trend and seasonality. So the model ETS(A,A,N) has additive errors, additive trend and no seasonality---in other words, this is Holt's linear method with additive errors. Similarly, ETS(M,M\damped,M) refers to a model with multiplicative errors, a damped multiplicative trend and multiplicative seasonality. The notation ETS($\cdot$,$\cdot$,$\cdot$) helps in remembering the order in which the components are specified. Once a model is specified, we can study the probability distribution of future values of the series and find, for example, the conditional mean of a future observation given knowledge of the past. We denote this as $\mu_{t+h|t} = \E(y_{t+h} \mid \bm{x}_t)$, where $\bm{x}_t$ contains the unobserved components such as $\ell_t$, $b_t$ and $s_t$. For $h=1$ we use $\mu_t\equiv\mu_{t+1|t}$ as a shorthand notation. For many models, these conditional means will be identical to the point forecasts given in Table \ref{table:pegels}, so that $\mu_{t+h|t}=\hat{y}_{t+h|t}$. However, for other models (those with multiplicative trend or multiplicative seasonality), the conditional mean and the point forecast will differ slightly for $h\ge 2$. We illustrate these ideas using the damped trend method of @GM85. \subsubsection{Additive error model: ETS(A,A$_d$,N)} Let $\mu_t = \hat{y}_t = \ell_{t-1}+b_{t-1}$ denote the one-step forecast of $y_{t}$ assuming that we know the values of all parameters. Also, let $\varepsilon_t = y_t - \mu_t$ denote the one-step forecast error at time $t$. From the equations in Table \ref{table:pegels}, we find that \begin{align} \label{ss1} y_t &= \ell_{t-1} + \phi b_{t-1} + \varepsilon_t\\ \ell_t &= \ell_{t-1} + \phi b_{t-1} + \alpha \varepsilon_t \label{ss2}\\ b_t &= \phi b_{t-1} + \beta^*(\ell_t - \ell_{t-1}- \phi b_{t-1}) = \phi b_{t-1} + \alpha\beta^*\varepsilon_t. \label{ss3} \end{align} We simplify the last expression by setting $\beta=\alpha\beta^*$. The three equations above constitute a state space model underlying the damped Holt's method. Note that it is an \emph{innovations} state space model [@AM79;@Aoki87] because the same error term appears in each equation. We an write it in standard state space notation by defining the state vector as $\bm{x}_t = (\ell_t,b_t)'$ and expressing \eqref{ss1}--\eqref{ss3} as \begin{subequations} \begin{align} y_t &= \left[ 1 \phi \right] \bm{x}_{t-1} + \varepsilon_t\label{obseq}\\ \bm{x}_t &= \left[\begin{array}{ll} 1 & \phi\\ 0 & \phi \end{array}\right]\bm{x}_{t-1} + \left[\begin{array}{l} \alpha\\ \beta \end{array}\right]\varepsilon_t.\label{stateeq} \end{align} \end{subequations} The model is fully specified once we state the distribution of the error term $\varepsilon_t$. Usually we assume that these are independent and identically distributed, following a normal distribution with mean 0 and variance $\sigma^2$, which we write as $\varepsilon_t \sim\mbox{NID}(0, \sigma^2)$. \subsubsection{Multiplicative error model: ETS(M,A$_d$,N)} A model with multiplicative error can be derived similarly, by first setting $\varepsilon_t = (y_t-\mu_t)/\mu_t$, so that $\varepsilon_t$ is the relative error. Then, following a similar approach to that for additive errors, we find \begin{align*} y_t &= (\ell_{t-1} + \phi b_{t-1})(1 + \varepsilon_t)\\ \ell_t &= (\ell_{t-1} + \phi b_{t-1})(1 + \alpha \varepsilon_t)\\ b_t &= \phi b_{t-1} + \beta(\ell_{t-1}+\phi b_{t-1})\varepsilon_t, \end{align*} or \begin{align*} y_t &= \left[ 1 \phi \right] \bm{x}_{t-1}(1 + \varepsilon_t)\\ \bm{x}_t &= \left[\begin{array}{ll} 1 & \phi \\ 0 & \phi \end{array}\right]\bm{x}_{t-1} + \left[ 1 \phi \right] \bm{x}_{t-1} \left[\begin{array}{l} \alpha\\ \beta \end{array}\right]\varepsilon_t. \end{align*} Again we assume that $\varepsilon_t \sim \mbox{NID}(0,\sigma^2)$. Of course, this is a nonlinear state space model, which is usually considered difficult to handle in estimating and forecasting. However, that is one of the many advantages of the innovations form of state space models --- we can still compute forecasts, the likelihood and prediction intervals for this nonlinear model with no more effort than is required for the additive error model. ## State space models for all exponential smoothing methods {#sec:ssmodels} There are similar state space models for all 30 exponential smoothing variations. The general model involves a state vector $\bm{x}_t = (\ell_t, b_t$, $s_t, s_{t-1}, \dots, s_{t-m+1})'$ and state space equations of the form \begin{subequations}\label{eq:ss} \begin{align} y_t &= w(\bm{x}_{t-1}) + r(\bm{x}_{t-1})\varepsilon_t \label{eq:ss1}\\ \bm{x}_t &= f(\bm{x}_{t-1}) + g(\bm{x}_{t-1})\varepsilon_t \label{eq:ss2} \end{align} \end{subequations} where $\{\varepsilon_t\}$ is a Gaussian white noise process with mean zero and variance $\sigma^2$, and $\mu_t = w(\bm{x}_{t-1})$. The model with additive errors has $r(\bm{x}_{t-1})=1$, so that $y_t = \mu_{t} + \varepsilon_t$. The model with multiplicative errors has $r(\bm{x}_{t-1})=\mu_t$, so that $y_t = \mu_{t}(1 + \varepsilon_t)$. Thus, $\varepsilon_t = (y_t - \mu_t)/\mu_t$ is the relative error for the multiplicative model. The models are not unique. Clearly, any value of $r(\bm{x}_{t-1})$ will lead to identical point forecasts for $y_t$. All of the methods in Table \ref{table:pegels} can be written in the form \eqref{eq:ss1} and \eqref{eq:ss2}. The specific form for each model is given in @expsmooth08. Some of the combinations of trend, seasonality and error can occasionally lead to numerical difficulties; specifically, any model equation that requires division by a state component could involve division by zero. This is a problem for models with additive errors and either multiplicative trend or multiplicative seasonality, as well as for the model with multiplicative errors, multiplicative trend and additive seasonality. These models should therefore be used with caution. The multiplicative error models are useful when the data are strictly positive, but are not numerically stable when the data contain zeros or negative values. So when the time series is not strictly positive, only the six fully additive models may be applied. The point forecasts given in Table \ref{table:pegels} are easily obtained from these models by iterating equations \eqref{eq:ss1} and \eqref{eq:ss2} for $t=n+1, n+2,\dots,n+h$, setting $\varepsilon_{n+j}=0$ for $j=1,\dots,h$. In most cases (notable exceptions being models with multiplicative seasonality or multiplicative trend for $h\ge2$), the point forecasts can be shown to be equal to $\mu_{t+h|t} = \E(y_{t+h} \mid \bm{x}_t)$, the conditional expectation of the corresponding state space model. The models also provide a means of obtaining prediction intervals. In the case of the linear models, where the forecast distributions are normal, we can derive the conditional variance $v_{t+h|t} = \var(y_{t+h} \mid \bm{x}_t)$ and obtain prediction intervals accordingly. This approach also works for many of the nonlinear models. Detailed derivations of the results for many models are given in @HKOS05. A more direct approach that works for all of the models is to simply simulate many future sample paths conditional on the last estimate of the state vector, $\bm{x}_t$. Then prediction intervals can be obtained from the percentiles of the simulated sample paths. Point forecasts can also be obtained in this way by taking the average of the simulated values at each future time period. An advantage of this approach is that we generate an estimate of the complete predictive distribution, which is especially useful in applications such as inventory planning, where expected costs depend on the whole distribution. ## Estimation {#sec:estimation} In order to use these models for forecasting, we need to know the values of $\bm{x}_0$ and the parameters $\alpha$, $\beta$, $\gamma$ and $\phi$. It is easy to compute the likelihood of the innovations state space model \eqref{eq:ss}, and so obtain maximum likelihood estimates. @OKS97 show that \begin{equation}\label{likelihood} L^*(\bm\theta,\bm{x}_0) = n\log\Big(\sum_{t=1}^n \varepsilon^2_t\Big) + 2\sum_{t=1}^n \log|r(\bm{x}_{t-1})| \end{equation} is equal to twice the negative logarithm of the likelihood function (with constant terms eliminated), conditional on the parameters $\bm\theta = (\alpha,\beta,\gamma,\phi)'$ and the initial states $\bm{x}_0 = (\ell_0,b_0,s_0,s_{-1},\dots,s_{-m+1})'$, where $n$ is the number of observations. This is easily computed by simply using the recursive equations in Table \ref{table:pegels}. Unlike state space models with multiple sources of error, we do not need to use the Kalman filter to compute the likelihood. The parameters $\bm\theta$ and the initial states $\bm{x}_0$ can be estimated by minimizing $L^*$. Most implementations of exponential smoothing use an ad hoc heuristic scheme to estimate $\bm{x}_0$. However, with modern computers, there is no reason why we cannot estimate $\bm{x}_0$ along with $\bm\theta$, and the resulting forecasts are often substantially better when we do. We constrain the initial states $\bm{x}_0$ so that the seasonal indices add to zero for additive seasonality, and add to $m$ for multiplicative seasonality. There have been several suggestions for restricting the parameter space for $\alpha$, $\beta$ and $\gamma$. The traditional approach is to ensure that the various equations can be interpreted as weighted averages, thus requiring $\alpha$, $\beta^*=\beta/\alpha$, $\gamma^*=\gamma/(1-\alpha)$ and $\phi$ to all lie within $(0,1)$. This suggests $$0<\alpha<1,\qquad 0<\beta<\alpha,\qquad 0<\gamma < 1-\alpha,\qquad\mbox{and}\qquad 0<\phi<1. $$ However, @HAA08 show that these restrictions are usually stricter than necessary (although in a few cases they are not restrictive enough). ## Model selection Forecast accuracy measures such as mean squared error (MSE) can be used for selecting a model for a given set of data, provided the errors are computed from data in a hold-out set and not from the same data as were used for model estimation. However, there are often too few out-of-sample errors to draw reliable conclusions. Consequently, a penalized method based on the in-sample fit is usually better. One such approach uses a penalized likelihood such as Akaike's Information Criterion: $$\mbox{AIC} = L^*(\hat{\bm\theta},\hat{\bm{x}}_0) + 2q, $$ where $q$ is the number of parameters in $\bm\theta$ plus the number of free states in $\bm{x}_0$, and $\hat{\bm\theta}$ and $\hat{\bm{x}}_0$ denote the estimates of $\bm\theta$ and $\bm{x}_0$. We select the model that minimizes the AIC amongst all of the models that are appropriate for the data. The AIC also provides a method for selecting between the additive and multiplicative error models. The point forecasts from the two models are identical so that standard forecast accuracy measures such as the MSE or mean absolute percentage error (MAPE) are unable to select between the error types. The AIC is able to select between the error types because it is based on likelihood rather than one-step forecasts. Obviously, other model selection criteria (such as the BIC) could also be used in a similar manner. ## Automatic forecasting {#sec:algorithm} We combine the preceding ideas to obtain a robust and widely applicable automatic forecasting algorithm. The steps involved are summarized below. \begin{compactenum} \item For each series, apply all models that are appropriate, optimizing the parameters (both smoothing parameters and the initial state variable) of the model in each case. \item Select the best of the models according to the AIC. \item Produce point forecasts using the best model (with optimized parameters) for as many steps ahead as required. \item Obtain prediction intervals for the best model either using the analytical results of Hyndman, Koehler, et al. (2005), or by simulating future sample paths for $\{y_{n+1},\dots,y_{n+h}\}$ and finding the $\alpha/2$ and $1-\alpha/2$ percentiles of the simulated data at each forecasting horizon. If simulation is used, the sample paths may be generated using the normal distribution for errors (parametric bootstrap) or using the resampled errors (ordinary bootstrap). \end{compactenum} @HKSG02 applied this automatic forecasting strategy to the M-competition data [@Mcomp82] and the IJF-M3 competition data [@M3comp00] using a restricted set of exponential smoothing models, and demonstrated that the methodology is particularly good at short term forecasts (up to about 6 periods ahead), and especially for seasonal short-term series (beating all other methods in the competitions for these series). # ARIMA models {#sec:arima} A common obstacle for many people in using Autoregressive Integrated Moving Average (ARIMA) models for forecasting is that the order selection process is usually considered subjective and difficult to apply. But it does not have to be. There have been several attempts to automate ARIMA modelling in the last 25 years. @HR82 proposed a method to identify the order of an ARMA model for a stationary series. In their method the innovations can be obtained by fitting a long autoregressive model to the data, and then the likelihood of potential models is computed via a series of standard regressions. They established the asymptotic properties of the procedure under very general conditions. @Gomez98 extended the Hannan-Rissanen identification method to include multiplicative seasonal ARIMA model identification. @TRAMOSEATS98 implemented this automatic identification procedure in the software \pkg{TRAMO} and \pkg{SEATS}. For a given series, the algorithm attempts to find the model with the minimum BIC. @Liu89 proposed a method for identification of seasonal ARIMA models using a filtering method and certain heuristic rules; this algorithm is used in the \pkg{SCA-Expert} software. Another approach is described by @MP00a whose algorithm for univariate ARIMA models also allows intervention analysis. It is implemented in the software package ``Time Series Expert'' (\pkg{TSE-AX}). Other algorithms are in use in commercial software, although they are not documented in the public domain literature. In particular, \pkg{Forecast Pro} [@ForecastPro00] is well-known for its excellent automatic ARIMA algorithm which was used in the M3-forecasting competition [@M3comp00]. Another proprietary algorithm is implemented in \pkg{Autobox} [@Reilly00]. @OL96 provide an early review of some of the commercial software that implement automatic ARIMA forecasting. ## Choosing the model order using unit root tests and the AIC A non-seasonal ARIMA($p,d,q$) process is given by $$ \phi(B)(1-B^d)y_{t} = c + \theta(B)\varepsilon_t $$ where $\{\varepsilon_t\}$ is a white noise process with mean zero and variance $\sigma^2$, $B$ is the backshift operator, and $\phi(z)$ and $\theta(z)$ are polynomials of order $p$ and $q$ respectively. To ensure causality and invertibility, it is assumed that $\phi(z)$ and $\theta(z)$ have no roots for $|z|<1$ [@BDbook91]. If $c\ne0$, there is an implied polynomial of order $d$ in the forecast function. The seasonal ARIMA$(p,d,q)(P,D,Q)_m$ process is given by $$ \Phi(B^m)\phi(B)(1-B^{m})^D(1-B)^dy_{t} = c + \Theta(B^m)\theta(B)\varepsilon_t $$ where $\Phi(z)$ and $\Theta(z)$ are polynomials of orders $P$ and $Q$ respectively, each containing no roots inside the unit circle. If $c\ne0$, there is an implied polynomial of order $d+D$ in the forecast function. The main task in automatic ARIMA forecasting is selecting an appropriate model order, that is the values $p$, $q$, $P$, $Q$, $D$, $d$. If $d$ and $D$ are known, we can select the orders $p$, $q$, $P$ and $Q$ via an information criterion such as the AIC: $$\mbox{AIC} = -2\log(L) + 2(p+q+P+Q+k)$$ where $k=1$ if $c\ne0$ and 0 otherwise, and $L$ is the maximized likelihood of the model fitted to the \emph{differenced} data $(1-B^m)^D(1-B)^dy_t$. The likelihood of the full model for $y_t$ is not actually defined and so the value of the AIC for different levels of differencing are not comparable. One solution to this difficulty is the ``diffuse prior'' approach which is outlined in @DKbook01 and implemented in the \code{arima()} function [@Ripley:2002] in \R. In this approach, the initial values of the time series (before the observed values) are assumed to have mean zero and a large variance. However, choosing $d$ and $D$ by minimizing the AIC using this approach tends to lead to over-differencing. For forecasting purposes, we believe it is better to make as few differences as possible because over-differencing harms forecasts [@SY94] and widens prediction intervals. [Although, see @Hendry97 for a contrary view.] Consequently, we need some other approach to choose $d$ and $D$. We prefer unit-root tests. However, most unit-root tests are based on a null hypothesis that a unit root exists which biases results towards more differences rather than fewer differences. For example, variations on the Dickey-Fuller test [@DF81] all assume there is a unit root at lag 1, and the HEGY test of @HEGY90 is based on a null hypothesis that there is a seasonal unit root. Instead, we prefer unit-root tests based on a null hypothesis of no unit-root. For non-seasonal data, we consider ARIMA($p,d,q$) models where $d$ is selected based on successive KPSS unit-root tests [@KPSS92]. That is, we test the data for a unit root; if the test result is significant, we test the differenced data for a unit root; and so on. We stop this procedure when we obtain our first insignificant result. For seasonal data, we consider ARIMA$(p,d,q)(P,D,Q)_m$ models where $m$ is the seasonal frequency and $D=0$ or $D=1$ depending on an extended Canova-Hansen test [@CH95]. Canova and Hansen only provide critical values for $21$. Let $C_m$ be the critical value for seasonal period $m$. We plotted $C_m$ against $m$ for values of $m$ up to 365 and noted that they fit the line $C_m = 0.269 m^{0.928}$ almost exactly. So for $m>12$, we use this simple expression to obtain the critical value. We note in passing that the null hypothesis for the Canova-Hansen test is not an ARIMA model as it includes seasonal dummy terms. It is a test for whether the seasonal pattern changes sufficiently over time to warrant a seasonal unit root, or whether a stable seasonal pattern modelled using fixed dummy variables is more appropriate. Nevertheless, we have found that the test is still useful for choosing $D$ in a strictly ARIMA framework (i.e., without seasonal dummy variables). If a stable seasonal pattern is selected (i.e., the null hypothesis is not rejected), the seasonality is effectively handled by stationary seasonal AR and MA terms. After $D$ is selected, we choose $d$ by applying successive KPSS unit-root tests to the seasonally differenced data (if $D=1$) or the original data (if $D=0$). Once $d$ (and possibly $D$) are selected, we proceed to select the values of $p$, $q$, $P$ and $Q$ by minimizing the AIC. We allow $c\ne0$ for models where $d+D < 2$. ## A step-wise procedure for traversing the model space Suppose we have seasonal data and we consider ARIMA$(p,d,q)(P,D,Q)_m$ models where $p$ and $q$ can take values from 0 to 3, and $P$ and $Q$ can take values from 0 to 1. When $c=0$ there is a total of 288 possible models, and when $c\ne 0$ there is a total of 192 possible models, giving 480 models altogether. If the values of $p$, $d$, $q$, $P$, $D$ and $Q$ are allowed to range more widely, the number of possible models increases rapidly. Consequently, it is often not feasible to simply fit every potential model and choose the one with the lowest AIC. Instead, we need a way of traversing the space of models efficiently in order to arrive at the model with the lowest AIC value. We propose a step-wise algorithm as follows. \begin{description} \item[Step 1:] We try four possible models to start with. \begin{itemize} \item ARIMA($2,d,2$) if $m=1$ and ARIMA($2,d,2)(1,D,1)$ if $m>1$. \item ARIMA($0,d,0$) if $m=1$ and ARIMA($0,d,0)(0,D,0)$ if $m>1$. \item ARIMA($1,d,0$) if $m=1$ and ARIMA($1,d,0)(1,D,0)$ if $m>1$. \item ARIMA($0,d,1$) if $m=1$ and ARIMA($0,d,1)(0,D,1)$ if $m>1$. \end{itemize} If $d+D \le 1$, these models are fitted with $c\ne0$. Otherwise, we set $c=0$. Of these four models, we select the one with the smallest AIC value. This is called the ``current'' model and is denoted by ARIMA($p,d,q$) if $m=1$ or ARIMA($p,d,q)(P,D,Q)_m$ if $m>1$. \item[Step 2:] We consider up to seventeen variations on the current model: \begin{itemize} \item where one of $p$, $q$, $P$ and $Q$ is allowed to vary by $\pm1$ from the current model; \item where $p$ and $q$ both vary by $\pm1$ from the current model; \item where $P$ and $Q$ both vary by $\pm1$ from the current model; \item where the constant $c$ is included if the current model has $c=0$ or excluded if the current model has $c\ne0$. \end{itemize} Whenever a model with lower AIC is found, it becomes the new ``current'' model and the procedure is repeated. This process finishes when we cannot find a model close to the current model with lower AIC. \end{description} There are several constraints on the fitted models to avoid problems with convergence or near unit-roots. The constraints are outlined below. \begin{compactitem}\itemsep=8pt \item The values of $p$ and $q$ are not allowed to exceed specified upper bounds (with default values of 5 in each case). \item The values of $P$ and $Q$ are not allowed to exceed specified upper bounds (with default values of 2 in each case). \item We reject any model which is ``close'' to non-invertible or non-causal. Specifically, we compute the roots of $\phi(B)\Phi(B)$ and $\theta(B)\Theta(B)$. If either have a root that is smaller than 1.001 in absolute value, the model is rejected. \item If there are any errors arising in the non-linear optimization routine used for estimation, the model is rejected. The rationale here is that any model that is difficult to fit is probably not a good model for the data. \end{compactitem} The algorithm is guaranteed to return a valid model because the model space is finite and at least one of the starting models will be accepted (the model with no AR or MA parameters). The selected model is used to produce forecasts. ## Comparisons with exponential smoothing There is a widespread myth that ARIMA models are more general than exponential smoothing. This is not true. The two classes of models overlap. The linear exponential smoothing models are all special cases of ARIMA models---the equivalences are discussed in @HAA08. However, the non-linear exponential smoothing models have no equivalent ARIMA counterpart. On the other hand, there are many ARIMA models which have no exponential smoothing counterpart. Thus, the two model classes overlap and are complimentary; each has its strengths and weaknesses. The exponential smoothing state space models are all non-stationary. Models with seasonality or non-damped trend (or both) have two unit roots; all other models---that is, non-seasonal models with either no trend or damped trend---have one unit root. It is possible to define a stationary model with similar characteristics to exponential smoothing, but this is not normally done. The philosophy of exponential smoothing is that the world is non-stationary. So if a stationary model is required, ARIMA models are better. One advantage of the exponential smoothing models is that they can be non-linear. So time series that exhibit non-linear characteristics including heteroscedasticity may be better modelled using exponential smoothing state space models. For seasonal data, there are many more ARIMA models than the 30 possible models in the exponential smoothing class of Section \ref{sec:expsmooth}. It may be thought that the larger model class is advantageous. However, the results in @HKSG02 show that the exponential smoothing models performed better than the ARIMA models for the seasonal M3 competition data. (For the annual M3 data, the ARIMA models performed better.) In a discussion of these results, @Hyndman01 speculates that the larger model space of ARIMA models actually harms forecasting performance because it introduces additional uncertainty. The smaller exponential smoothing class is sufficiently rich to capture the dynamics of almost all real business and economic time series. # The forecast package {#sec:package} The algorithms and modelling frameworks for automatic univariate time series forecasting are implemented in the \pkg{forecast} package in \R. We illustrate the methods using the following four real time series shown in Figure \ref{fig:etsexamples}. \begin{compactitem} \item Figure \ref{fig:etsexamples}(a) shows 125 monthly US government bond yields (percent per annum) from January 1994 to May 2004. \item Figure \ref{fig:etsexamples}(b) displays 55 observations of annual US net electricity generation (billion kwh) for 1949 through 2003. \item Figure \ref{fig:etsexamples}(c) presents 113 quarterly observations of passenger motor vehicle production in the U.K. (thousands of cars) for the first quarter of 1977 through the first quarter of 2005. \item Figure \ref{fig:etsexamples}(d) shows 240 monthly observations of the number of short term overseas visitors to Australia from May 1985 to April 2005. \end{compactitem} ```{r etsexamples, fig.height=7, fig.width=9, echo=FALSE, fig.cap="Four time series showing point forecasts and 80\\% \\& 95\\% prediction intervals obtained using exponential smoothing state space models."} par(mfrow = c(2,2)) mod1 <- ets(bonds) mod2 <- ets(usnetelec) mod3 <- ets(ukcars) mod4 <- ets(visitors) plot(forecast(mod1), main="(a) US 10-year bonds yield", xlab="Year", ylab="Percentage per annum") plot(forecast(mod2), main="(b) US net electricity generation", xlab="Year", ylab="Billion kwh") plot(forecast(mod3), main="(c) UK passenger motor vehicle production", xlab="Year", ylab="Thousands of cars") plot(forecast(mod4), main="(d) Overseas visitors to Australia", xlab="Year", ylab="Thousands of people") ``` ```{r etsnames, echo=FALSE} etsnames <- c(mod1$method, mod2$method, mod3$method, mod4$method) etsnames <- gsub("Ad","A\\\\damped",etsnames) ``` ## Implementation of the automatic exponential smoothing algorithm The innovations state space modelling framework described in Section \ref{sec:expsmooth} is implemented via the \code{ets()} function in the \pkg{forecast} package. (The default settings of \code{ets()} do not allow models with multiplicative trend, but they can be included using \code{allow.multiplicative.trend=TRUE}.) The models chosen via the algorithm for the four data sets were: \begin{compactitem} \item `r etsnames[1]` for monthly US 10-year bonds yield\\ ($\alpha=`r format(coef(mod1)['alpha'], digits=4, nsmall=4)`$, $\beta=`r format(coef(mod1)['beta'], digits=4, nsmall=4)`$, $\phi=`r format(coef(mod1)['phi'], digits=4, nsmall=4)`$, $\ell_0 = `r format(coef(mod1)['l'], digits=4, nsmall=4)`$, $b_0=`r format(coef(mod1)['b'], digits=4, nsmall=4)`$); \item `r etsnames[2]` for annual US net electricity generation\\ ($\alpha=`r format(coef(mod2)['alpha'], digits=4, nsmall=4)`$, $\beta=`r format(coef(mod2)['beta'], digits=4, nsmall=4)`$, $\ell_0 = `r format(coef(mod2)['l'], digits=4, nsmall=4)`$, $b_0=`r format(coef(mod2)['b'], digits=4, nsmall=4)`$); \item `r etsnames[3]` for quarterly UK motor vehicle production\\ ($\alpha=`r format(coef(mod3)['alpha'], digits=4, nsmall=4)`$, $\gamma=`r format(coef(mod3)['gamma'], digits=4, nsmall=4)`$, $\ell_0 = `r format(coef(mod3)['l'], digits=4, nsmall=4)`$, $s_{-3}=`r format(-sum(coef(mod3)[c('s0','s1','s2')]), digits=4, nsmall=4)`$, $s_{-2}=`r format(coef(mod3)['s2'], digits=4, nsmall=4)`$, $s_{-1}=`r format(coef(mod3)['s1'], digits=4, nsmall=4)`$, $s_0=`r format(coef(mod3)['s0'], digits=4, nsmall=4)`$); \item `r etsnames[4]` for monthly Australian overseas visitors\\ ($\alpha=`r format(coef(mod4)['alpha'], digits=4, nsmall=4)`$, $\beta=`r format(coef(mod4)['beta'], digits=2, nsmall=4)`$, $\gamma=`r format(coef(mod4)['gamma'], digits=4, nsmall=4)`$, $\ell_0 = `r format(coef(mod4)['l'], digits=4, nsmall=4)`$, $b_0 = `r format(coef(mod4)['b'], digits=4, nsmall=4)`$, $s_{-11}=`r format(12-sum(tail(coef(mod4),11)), digits=4, nsmall=4)`$, $s_{-10}=`r format(coef(mod4)['s10'], digits=4, nsmall=4)`$, $s_{-9}=`r format(coef(mod4)['s9'], digits=4, nsmall=4)`$, $s_{-8}=`r format(coef(mod4)['s8'], digits=4, nsmall=4)`$, $s_{-7}=`r format(coef(mod4)['s7'], digits=4, nsmall=4)`$, $s_{-6}=`r format(coef(mod4)['s6'], digits=4, nsmall=4)`$, $s_{-5}=`r format(coef(mod4)['s5'], digits=4, nsmall=4)`$, $s_{-4}=`r format(coef(mod4)['s4'], digits=4, nsmall=4)`$, $s_{-3}=`r format(coef(mod4)['s3'], digits=4, nsmall=4)`$, $s_{-2}=`r format(coef(mod4)['s2'], digits=4, nsmall=4)`$, $s_{-1}=`r format(coef(mod4)['s1'], digits=4, nsmall=4)`$, $s_0=`r format(coef(mod4)['s0'], digits=4, nsmall=4)`$). \end{compactitem} Although there is a lot of computation involved, it can be handled remarkably quickly on modern computers. Each of the forecasts shown in Figure \ref{fig:etsexamples} took no more than a few seconds on a standard PC. The US electricity generation series took the longest as there are no analytical prediction intervals available for the ETS(M,M\damped,N) model. Consequently, the prediction intervals for this series were computed using simulation of 5000 future sample paths. To apply the algorithm to the US net electricity generation time series \code{usnetelec}, we use the following command. ```{r ets-usnetelec, echo=TRUE} etsfit <- ets(usnetelec) ``` The object \code{etsfit} is of class ``\code{ets}'' and contains all of the necessary information about the fitted model including model parameters, the value of the state vector $\bm{x}_t$ for all $t$, residuals and so on. Printing the \code{etsfit} object shows the main items of interest. ```{r ets-usnetelec-print,echo=TRUE} etsfit ``` Some goodness-of-fit measures [defined in @HK06] are obtained using \code{accuracy()}. ```{r ets-usnetelec-accuracy,eval=TRUE,echo=TRUE} accuracy(etsfit) ``` There are also \code{coef()}, \code{plot()}, \code{summary()}, \code{residuals()}, \code{fitted()} and \code{simulate()} methods for objects of class ``\code{ets}''. The \code{plot()} function shows time plots of the original time series along with the extracted components (level, growth and seasonal). The \code{forecast()} function computes the required forecasts which are then plotted as in Figure \ref{fig:etsexamples}(b). ```{r ets-usnetelec-fcast, fig.height=5, fig.width=8, message=FALSE, warning=FALSE, include=FALSE, output=FALSE} fcast <- forecast(etsfit) plot(fcast) ``` Printing the \code{fcast} object gives a table showing the prediction intervals. ```{r ets-usnetelec-fcast-print,eval=TRUE,echo=TRUE} fcast ``` The \code{ets()} function also provides the useful feature of applying a fitted model to a new data set. For example, we could withhold 10 observations from the \code{usnetelec} data set when fitting, then compute the one-step forecast errors for the out-of-sample data. ```{r ets-usnetelec-newdata,eval=FALSE,echo=TRUE} fit <- ets(usnetelec[1:45]) test <- ets(usnetelec[46:55], model = fit) accuracy(test) ``` We can also look at the measures of forecast accuracy where the forecasts are based on only the fitting data. ```{r ets-usnetelec-fcast-accuracy,eval=FALSE,echo=TRUE} accuracy(forecast(fit,10), usnetelec[46:55]) ``` ## The HoltWinters() function There is another implementation of exponential smoothing in \R\ via the \code{HoltWinters()} function [@Meyer:2002] in the \pkg{stats} package. It implements only the (N,N), (A,N), (A,A) and (A,M) methods. The initial states $\bm{x}_0$ are fixed using a heuristic algorithm. Because of the way the initial states are estimated, a full three years of seasonal data are required to implement the seasonal forecasts using \code{HoltWinters()}. (See @shortseasonal for the minimal sample size required.) The smoothing parameters are optimized by minimizing the average squared prediction errors, which is equivalent to minimizing \eqref{likelihood} in the case of additive errors. There is a \code{predict()} method for the resulting object which can produce point forecasts and prediction intervals. Although it is nowhere documented, it appears that the prediction intervals produced by \code{predict()} for an object of class \code{HoltWinters} are based on an equivalent ARIMA model in the case of the (N,N), (A,N) and (A,A) methods, assuming additive errors. These prediction intervals are equivalent to the prediction intervals that arise from the (A,N,N), (A,A,N) and (A,A,A) state space models. For the (A,M) method, the prediction interval provided by \code{predict()} appears to be based on @CY91 which is an approximation to the true prediction interval arising from the (A,A,M) model. Prediction intervals with multiplicative errors are not possible using the \code{HoltWinters()} function. ## Implementation of the automatic ARIMA algorithm ```{r arimaexamples, fig.height=7, fig.width=9, echo=FALSE, fig.cap="Four time series showing point forecasts and 80\\% \\& 95\\% prediction intervals obtained using ARIMA models."} mod1 <- auto.arima(bonds, seasonal=FALSE, approximation=FALSE) mod2 <- auto.arima(usnetelec) mod3 <- auto.arima(ukcars) mod4 <- auto.arima(visitors) par(mfrow = c(2,2)) plot(forecast(mod1), main="(a) US 10-year bonds yield", xlab="Year", ylab="Percentage per annum") plot(forecast(mod2), main="(b) US net electricity generation", xlab="Year", ylab="Billion kwh") plot(forecast(mod3), main="(c) UK passenger motor vehicle production", xlab="Year", ylab="Thousands of cars") plot(forecast(mod4), main="(d) Overseas visitors to Australia", xlab="Year", ylab="Thousands of people") ``` The algorithm of Section \ref{sec:arima} is applied to the same four time series. Unlike the exponential smoothing algorithm, the ARIMA class of models assumes homoscedasticity, which is not always appropriate. Consequently, transformations are sometimes necessary. For these four time series, we model the raw data for series (a)--(c), but the logged data for series (d). The prediction intervals are back-transformed with the point forecasts to preserve the probability coverage. To apply this algorithm to the US net electricity generation time series \code{usnetelec}, we use the following commands. ```{r arima-auto-fcast,eval=TRUE,echo=TRUE,fig.show="hide"} arimafit <- auto.arima(usnetelec) fcast <- forecast(arimafit) plot(fcast) ``` ```{r arimanames, echo=FALSE} # Convert character strings to latex arimanames <- c(as.character(mod1), as.character(mod2), as.character(mod3), as.character(mod4)) arimanames <- gsub("\\[([0-9]*)\\]", "$_{\\1}$", arimanames) ``` The function \code{auto.arima()} implements the algorithm of Section \ref{sec:arima} and returns an object of class \code{Arima}. The resulting forecasts are shown in Figure \ref{fig:arimaexamples}. The fitted models are as follows: \begin{compactitem} \item `r arimanames[1]` for monthly US 10-year bonds yield\\ ($\theta_1= `r format(coef(mod1)['ma1'], digits=4, nsmall=4)`$); \item `r arimanames[2]` for annual US net electricity generation\\ ($\phi_1= `r format(coef(mod2)['ar1'], digits=4, nsmall=4)`$; $\phi_2= `r format(coef(mod2)['ar2'], digits=4, nsmall=4)`$; $\theta_1= `r format(coef(mod2)['ma1'], digits=4, nsmall=4)`$; $\theta_2= `r format(coef(mod2)['ma2'], digits=4, nsmall=4)`$; $c= `r format(coef(mod2)['drift'], digits=4, nsmall=4)`$); \item `r arimanames[3]` for quarterly UK motor vehicle production\\ ($\phi_1= `r format(coef(mod3)['ar1'], digits=4, nsmall=4)`$; $\phi_2= `r format(coef(mod3)['ar2'], digits=4, nsmall=4)`$; $\Phi_1= `r format(coef(mod3)['sar1'], digits=4, nsmall=4)`$; $\Phi_2= `r format(coef(mod3)['sar2'], digits=4, nsmall=4)`$); \item `r arimanames[4]` for monthly Australian overseas visitors\\ ($\phi_1= `r format(coef(mod4)['ar1'], digits=4, nsmall=4)`$; $\theta_1= `r format(coef(mod4)['ma1'], digits=4, nsmall=4)`$; $\Theta_1= `r format(coef(mod4)['sma1'], digits=4, nsmall=4)`$; $\Theta_2= `r format(coef(mod4)['sma2'], digits=4, nsmall=4)`$; $c= `r format(coef(mod4)['drift'], digits=4, nsmall=4)`$). \end{compactitem} Note that the \R\ parameterization has $\theta(B) = (1 + \theta_1B + \dots + \theta_qB)$ and $\phi(B) = (1 - \phi_1B + \dots - \phi_qB)$, and similarly for the seasonal terms. A summary of the forecasts is available, part of which is shown below. ``` Forecast method: ARIMA(2,1,2) with drift Series: usnetelec Coefficients: ar1 ar2 ma1 ma2 drift -1.3032 -0.4332 1.5284 0.8340 66.1585 s.e. 0.2122 0.2084 0.1417 0.1185 7.5595 sigma^2 estimated as 2262: log likelihood=-283.34 AIC=578.67 AICc=580.46 BIC=590.61 Error measures: ME RMSE MAE MPE MAPE MASE ACF1 Training set 0.046402 44.894 32.333 -0.61771 2.1012 0.45813 0.022492 Forecasts: Point Forecast Lo 80 Hi 80 Lo 95 Hi 95 2004 3968.957 3908.002 4029.912 3875.734 4062.180 2005 3970.350 3873.950 4066.751 3822.919 4117.782 2006 4097.171 3971.114 4223.228 3904.383 4289.959 2007 4112.332 3969.691 4254.973 3894.182 4330.482 2008 4218.671 4053.751 4383.591 3966.448 4470.894 2009 4254.559 4076.108 4433.010 3981.641 4527.476 2010 4342.760 4147.088 4538.431 4043.505 4642.014 2011 4393.306 4185.211 4601.401 4075.052 4711.560 2012 4470.261 4248.068 4692.455 4130.446 4810.077 2013 4529.113 4295.305 4762.920 4171.535 4886.690 ``` The training set error measures for the two models are very similar. Note that the information criteria are not comparable. The \pkg{forecast} package also contains the function \code{Arima()} which is largely a wrapper to the \code{arima()} function in the \pkg{stats} package. The \code{Arima()} function in the \pkg{forecast} package makes it easier to include a drift term when $d+D=1$. (Setting \code{include.mean=TRUE} in the \code{arima()} function from the \pkg{stats} package will only work when $d+D=0$.) It also provides the facility for fitting an existing ARIMA model to a new data set (as was demonstrated for the \code{ets()} function earlier). One-step forecasts for ARIMA models are now available via a \code{fitted()} function. We also provide a new function \code{arima.errors()} which returns the original time series after adjusting for regression variables. If there are no regression variables in the ARIMA model, then the errors will be identical to the original series. If there are regression variables in the ARIMA model, then the errors will be equal to the original series minus the effect of the regression variables, but leaving in the serial correlation that is modelled with the AR and MA terms. In contrast, \code{residuals()} provides true residuals, removing the AR and MA terms as well. The generic functions \code{summary()}, \code{print()}, \code{fitted()} and \code{forecast()} apply to models obtained from either the \code{Arima()} or \code{arima()} functions. ## The forecast() function The \code{forecast()} function is generic and has S3 methods for a wide range of time series models. It computes point forecasts and prediction intervals from the time series model. Methods exist for models fitted using \code{ets()}, \code{auto.arima()}, \code{Arima()}, \code{arima()}, \code{ar()}, \code{HoltWinters()} and \texttt{StructTS()}. There is also a method for a \code{ts} object. If a time series object is passed as the first argument to \code{forecast()}, the function will produce forecasts based on the exponential smoothing algorithm of Section \ref{sec:expsmooth}. In most cases, there is an existing \code{predict()} function which is intended to do much the same thing. Unfortunately, the resulting objects from the \code{predict()} function contain different information in each case and so it is not possible to build generic functions (such as \code{plot()} and \code{summary()}) for the results. So, instead, \code{forecast()} acts as a wrapper to \code{predict()}, and packages the information obtained in a common format (the \code{forecast} class). We also define a default \code{predict()} method which is used when no existing \code{predict()} function exists, and calls the relevant \code{forecast()} function. Thus, \code{predict()} methods parallel \code{forecast()} methods, but the latter provide consistent output that is more usable. \subsection[The forecast class]{The \code{forecast} class} The output from the \code{forecast()} function is an object of class ``\code{forecast}'' and includes at least the following information: \begin{compactitem} \item the original series; \item point forecasts; \item prediction intervals of specified coverage; \item the forecasting method used and information about the fitted model; \item residuals from the fitted model; \item one-step forecasts from the fitted model for the period of the observed data. \end{compactitem} There are \code{print()}, \code{plot()} and \code{summary()} methods for the ``\code{forecast}'' class. Figures \ref{fig:etsexamples} and \ref{fig:arimaexamples} were produced using the \code{plot()} method. The prediction intervals are, by default, computed for 80\% and 95\% coverage, although other values are possible if requested. Fan charts [@Wallis99] are possible using the combination \verb|plot(forecast(model.object, fan = TRUE))|. ## Other functions {#sec:other} We now briefly describe some of the other features of the \pkg{forecast} package. Each of the following functions produces an object of class ``\code{forecast}''. \code{croston()} : implements the method of @Croston72 for intermittent demand forecasting. In this method, the time series is decomposed into two separate sequences: the non-zero values and the time intervals between non-zero values. These are then independently forecast using simple exponential smoothing and the forecasts of the original series are obtained as ratios of the two sets of forecasts. No prediction intervals are provided because there is no underlying stochastic model [@SH05]. \code{theta()} : provides forecasts from the Theta method [@AN00]. @HB03 showed that these were equivalent to a special case of simple exponential smoothing with drift. \code{splinef()} : gives cubic-spline forecasts, based on fitting a cubic spline to the historical data and extrapolating it linearly. The details of this method, and the associated prediction intervals, are discussed in @HKPB05. \code{meanf()} : returns forecasts based on the historical mean. \code{rwf()} : gives ``naïve'' forecasts equal to the most recent observation assuming a random walk model. This function also allows forecasting using a random walk with drift. In addition, there are some new plotting functions for time series. \code{tsdisplay()} : provides a time plot along with an ACF and PACF. \code{seasonplot()} : produces a seasonal plot as described in @MWH3. \newpage # Bibliography forecast/inst/CITATION0000644000176200001440000000252214456202551014177 0ustar liggesusers year <- sub("-.*", "", meta$Date) if(!length(year)) year <- substr(Sys.Date(),1,4) vers <- meta$Version if(is.null(vers)) vers <- packageVersion("forecast") vers <- paste("R package version", vers) # Grab authors from DESCRIPTION file # authors <- eval(parse(text=as.list(read.dcf("../DESCRIPTION")[1, ])$`Authors@R`)) # authors <- authors[sapply(authors$role, function(roles) "aut" %in% roles)] # authors <- sapply(authors, function(author) paste(author$given, author$family)) # authors <- paste(authors, collapse = " and ") citHeader("To cite the forecast package in publications, please use:") bibentry(bibtype = "Manual", title = "{forecast}: Forecasting functions for time series and linear models", author = "Rob Hyndman and George Athanasopoulos and Christoph Bergmeir and Gabriel Caceres and Leanne Chhay and Mitchell O'Hara-Wild and Fotios Petropoulos and Slava Razbash and Earo Wang and Farah Yasmeen", year = year, note = vers, url = "https://pkg.robjhyndman.com/forecast/") bibentry(bibtype = "Article", title = "Automatic time series forecasting: the forecast package for {R}", author = c(as.person("Rob J Hyndman"),as.person("Yeasmin Khandakar")), journal = "Journal of Statistical Software", volume = 26, number = 3, pages = "1--22", year = 2008, doi = "10.18637/jss.v027.i03" )